Skip to content

Commit

Permalink
Merge branch 'master' into post-processors-priority
Browse files Browse the repository at this point in the history
  • Loading branch information
tristanlatr authored Jul 16, 2023
2 parents 396fd64 + 965ed95 commit d190566
Show file tree
Hide file tree
Showing 14 changed files with 265 additions and 158 deletions.
5 changes: 5 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,11 @@ in development
scope when possible, when impossible, the theoretical runtime scopes are used. A warning can
be reported when an annotation name is ambiguous (can be resolved to different names
depending on the scope context) with option ``-v``.
* Fix presentation of type aliases in string form.
* Improve the AST colorizer to output less parenthesis when it's not required.
* Fix colorization of dictionary unpacking.
* Improve the class hierarchy such that it links top level names with intersphinx when possible.
* Add highlighting when clicking on "View In Hierarchy" link from class page.
* Recognize variadic generics type variables (PEP 646).

pydoctor 23.4.1
Expand Down
16 changes: 7 additions & 9 deletions pydoctor/astbuilder.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,18 +92,11 @@ def _isTypeAlias(self, ob: model.Attribute) -> bool:
Return C{True} if the Attribute is a type alias.
"""
if ob.value is not None:

if is_using_annotations(ob.annotation, ('typing.TypeAlias', 'typing_extensions.TypeAlias'), ob):
try:
ob.value = unstring_annotation(ob.value, ob)
except SyntaxError as e:
ob.report(f"invalid type alias: {e}")
return False
if is_using_annotations(ob.annotation, ('typing.TypeAlias',
'typing_extensions.TypeAlias'), ob):
return True

if is_typing_annotation(ob.value, ob.parent):
return True

return False

def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
Expand All @@ -117,7 +110,12 @@ def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
return
if self._isTypeAlias(attr) is True:
attr.kind = model.DocumentableKind.TYPE_ALIAS
# unstring type aliases
attr.value = unstring_annotation(
# this cast() is safe because _isTypeAlias() return True only if value is not None
cast(ast.expr, attr.value), attr, section='type alias')
elif self._isTypeVariable(attr) is True:
# TODO: unstring bound argument of type variables
attr.kind = model.DocumentableKind.TYPE_VARIABLE

visit_AnnAssign = visit_Assign
Expand Down
4 changes: 2 additions & 2 deletions pydoctor/astutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, (ast.Constant, ast.NameConstant)) and node.value is None

def unstring_annotation(node: ast.expr, ctx:'model.Documentable') -> ast.expr:
def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned.
Expand All @@ -205,7 +205,7 @@ def unstring_annotation(node: ast.expr, ctx:'model.Documentable') -> ast.expr:
except SyntaxError as ex:
module = ctx.module
assert module is not None
module.report(f'syntax error in annotation: {ex}', lineno_offset=node.lineno)
module.report(f'syntax error in {section}: {ex}', lineno_offset=node.lineno, section=section)
return node
else:
assert isinstance(expr, ast.expr), expr
Expand Down
48 changes: 34 additions & 14 deletions pydoctor/epydoc/markup/_pyval_repr.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,13 @@ def visit(self, node: ast.AST) -> ast.AST:
return node

# TODO: add support for comparators when needed.
# _OperatorDelimitier is needed for:
# - IfExp
# - UnaryOp
# - BinOp, needs special handling for power operator
# - Compare
# - BoolOp
# - Lambda
class _OperatorDelimiter:
"""
A context manager that can add enclosing delimiters to nested operators when needed.
Expand All @@ -135,7 +142,7 @@ class _OperatorDelimiter:
"""

def __init__(self, colorizer: 'PyvalColorizer', state: _ColorizerState,
node: Union[ast.UnaryOp, ast.BinOp, ast.BoolOp]) -> None:
node: Union[ast.UnaryOp, ast.BinOp, ast.BoolOp],) -> None:

self.discard = True
"""No parenthesis by default."""
Expand All @@ -148,12 +155,17 @@ def __init__(self, colorizer: 'PyvalColorizer', state: _ColorizerState,
# See _Parentage class, applied in PyvalColorizer._colorize_ast()
parent_node: Optional[ast.AST] = getattr(node, 'parent', None)

if isinstance(parent_node, (ast.UnaryOp, ast.BinOp, ast.BoolOp)):
if parent_node:
precedence = astor.op_util.get_op_precedence(node.op)
parent_precedence = astor.op_util.get_op_precedence(parent_node.op)
# Add parenthesis when precedences are equal to avoid confusions
# and correctly handle the Pow special case without too much annoyance.
if precedence <= parent_precedence:
if isinstance(parent_node, (ast.UnaryOp, ast.BinOp, ast.BoolOp)):
parent_precedence = astor.op_util.get_op_precedence(parent_node.op)
if isinstance(parent_node.op, ast.Pow) or isinstance(parent_node, ast.BoolOp):
parent_precedence+=1
else:
parent_precedence = colorizer.explicit_precedence.get(
node, astor.op_util.Precedence.highest)

if precedence < parent_precedence:
self.discard = False

def __enter__(self) -> '_OperatorDelimiter':
Expand Down Expand Up @@ -261,6 +273,9 @@ def __init__(self, linelen:Optional[int], maxlines:int, linebreakok:bool=True, r
self.maxlines: Union[int, float] = maxlines if maxlines!=0 else float('inf')
self.linebreakok = linebreakok
self.refmap = refmap if refmap is not None else {}
# some edge cases require to compute the precedence ahead of time and can't be
# easily done with access only to the parent node of some operators.
self.explicit_precedence:Dict[ast.AST, int] = {}

#////////////////////////////////////////////////////////////
# Colorization Tags & other constants
Expand Down Expand Up @@ -294,6 +309,10 @@ def __init__(self, linelen:Optional[int], maxlines:int, linebreakok:bool=True, r

RE_COMPILE_SIGNATURE = signature(re.compile)

def _set_precedence(self, precedence:int, *node:ast.AST) -> None:
for n in node:
self.explicit_precedence[n] = precedence

def colorize(self, pyval: Any) -> ColorizedPyvalRepr:
"""
Entry Point.
Expand Down Expand Up @@ -351,10 +370,6 @@ def _colorize(self, pyval: Any, state: _ColorizerState) -> None:
elif pyvaltype is frozenset:
self._multiline(self._colorize_iter, pyval,
state, prefix='frozenset([', suffix='])')
elif pyvaltype is dict:
self._multiline(self._colorize_dict,
list(pyval.items()),
state, prefix='{', suffix='}')
elif pyvaltype is list:
self._multiline(self._colorize_iter, pyval, state, prefix='[', suffix=']')
elif issubclass(pyvaltype, ast.AST):
Expand Down Expand Up @@ -447,15 +462,20 @@ def _colorize_iter(self, pyval: Iterable[Any], state: _ColorizerState,
if suffix is not None:
self._output(suffix, self.GROUP_TAG, state)

def _colorize_dict(self, items: Iterable[Tuple[Any, Any]], state: _ColorizerState, prefix: str, suffix: str) -> None:
def _colorize_ast_dict(self, items: Iterable[Tuple[Optional[ast.AST], ast.AST]],
state: _ColorizerState, prefix: str, suffix: str) -> None:
self._output(prefix, self.GROUP_TAG, state)
indent = state.charpos
for i, (key, val) in enumerate(items):
if i>=1:
self._insert_comma(indent, state)
state.result.append(self.WORD_BREAK_OPPORTUNITY)
self._colorize(key, state)
self._output(': ', self.COLON_TAG, state)
if key:
self._set_precedence(astor.op_util.Precedence.Comma, val)
self._colorize(key, state)
self._output(': ', self.COLON_TAG, state)
else:
self._output('**', None, state)
self._colorize(val, state)
self._output(suffix, self.GROUP_TAG, state)

Expand Down Expand Up @@ -546,7 +566,7 @@ def _colorize_ast(self, pyval: ast.AST, state: _ColorizerState) -> None:
self._multiline(self._colorize_iter, pyval.elts, state, prefix='set([', suffix='])')
elif isinstance(pyval, ast.Dict):
items = list(zip(pyval.keys, pyval.values))
self._multiline(self._colorize_dict, items, state, prefix='{', suffix='}')
self._multiline(self._colorize_ast_dict, items, state, prefix='{', suffix='}')
elif isinstance(pyval, ast.Name):
self._colorize_ast_name(pyval, state)
elif isinstance(pyval, ast.Attribute):
Expand Down
18 changes: 9 additions & 9 deletions pydoctor/linker.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@ def taglink(o: 'model.Documentable', page_url: str,
ret(title=o.fullName())
return ret

def intersphinx_link(label:"Flattenable", url:str) -> Tag:
"""
Create a intersphinx link.
It's special because it uses the 'intersphinx-link' CSS class.
"""
return tags.a(label, href=url, class_='intersphinx-link')

class _EpydocLinker(DocstringLinker):
"""
Expand Down Expand Up @@ -92,13 +99,6 @@ def switch_context(self, ob:Optional['model.Documentable']) -> Iterator[None]:
self._page_object = old_page_object
self.reporting_obj = old_reporting_object

@staticmethod
def _create_intersphinx_link(label:"Flattenable", url:str) -> Tag:
"""
Create a link with the special 'intersphinx-link' CSS class.
"""
return tags.a(label, href=url, class_='intersphinx-link')

def look_for_name(self,
name: str,
candidates: Iterable['model.Documentable'],
Expand Down Expand Up @@ -139,7 +139,7 @@ def link_to(self, identifier: str, label: "Flattenable") -> Tag:

url = self.look_for_intersphinx(fullID)
if url is not None:
return self._create_intersphinx_link(label, url=url)
return intersphinx_link(label, url=url)

link = tags.transparent(label)
return link
Expand All @@ -152,7 +152,7 @@ def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
xref = label
else:
if isinstance(resolved, str):
xref = self._create_intersphinx_link(label, url=resolved)
xref = intersphinx_link(label, url=resolved)
else:
xref = taglink(resolved, self.page_url, label)

Expand Down
2 changes: 1 addition & 1 deletion pydoctor/node2stan.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def starttag(self, node: nodes.Node, tagname: str, suffix: str = '\n', **attribu
# iterate through attributes one at a time because some
# versions of docutils don't case-normalize attributes.
for attr_dict in attr_dicts:
for key, val in list(attr_dict.items()):
for key, val in tuple(attr_dict.items()):
# Prefix all CSS classes with "rst-"; and prefix all
# names with "rst-" to avoid conflicts.
if key.lower() in ('class', 'id', 'name'):
Expand Down
14 changes: 7 additions & 7 deletions pydoctor/templatewriter/pages/sidebar.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def __init__(self, ob: Documentable, documented_ob: Documentable,
self.template_lookup = template_lookup

# Does this sidebar section represents the object itself ?
self._represents_documented_ob = self.ob == self.documented_ob
self._represents_documented_ob = self.ob is self.documented_ob

@renderer
def kind(self, request: IRequest, tag: Tag) -> str:
Expand Down Expand Up @@ -195,7 +195,7 @@ def docstringToc(self, request: IRequest, tag: Tag) -> Union[Tag, str]:

# Only show the TOC if visiting the object page itself, in other words, the TOC do dot show up
# in the object's parent section or any other subsections except the main one.
if toc and self.documented_ob == self.ob:
if toc and self.documented_ob is self.ob:
return tag.fillSlots(titles=toc)
else:
return ""
Expand Down Expand Up @@ -288,9 +288,8 @@ def __init__(self, ob: Documentable,
@renderer
def items(self, request: IRequest, tag: Tag) -> Iterator['ContentItem']:

for child in self.children:

yield ContentItem(
return (
ContentItem(
loader=TagLoader(tag),
ob=self.ob,
child=child,
Expand All @@ -299,6 +298,7 @@ def items(self, request: IRequest, tag: Tag) -> Iterator['ContentItem']:
nested_content_loader=self.nested_content_loader,
template_lookup=self.template_lookup,
level_depth=self._level_depth)
for child in self.children )


class ContentItem(Element):
Expand Down Expand Up @@ -329,7 +329,7 @@ def class_(self, request: IRequest, tag: Tag) -> str:
# But I found it a little bit too colorful.
if self.child.isPrivate:
class_ += "private"
if self.child == self.documented_ob:
if self.child is self.documented_ob:
class_ += " thisobject"
return class_

Expand All @@ -350,7 +350,7 @@ def expandableItem(self, request: IRequest, tag: Tag) -> Union[str, 'ExpandableI
# pass do_not_expand=True also when an object do not have any members,
# instead of expanding on an empty div.
return ExpandableItem(TagLoader(tag), self.child, self.documented_ob, nested_contents,
do_not_expand=self.child == self.documented_ob or not nested_contents.has_contents)
do_not_expand=self.child is self.documented_ob or not nested_contents.has_contents)
else:
return ""

Expand Down
65 changes: 32 additions & 33 deletions pydoctor/templatewriter/search.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
"""

from pathlib import Path
from typing import Iterable, Iterator, List, Optional, Tuple, Type, Dict, TYPE_CHECKING
from typing import Iterator, List, Optional, Tuple, Type, Dict, TYPE_CHECKING
import json

import attr
Expand All @@ -17,22 +17,27 @@
if TYPE_CHECKING:
from twisted.web.template import Flattenable

def get_all_documents_flattenable(system: model.System) -> List[Dict[str, "Flattenable"]]:
def get_all_documents_flattenable(system: model.System) -> Iterator[Dict[str, "Flattenable"]]:
"""
Get the all data to be writen into ``all-documents.html`` file.
Get a generator for all data to be writen into ``all-documents.html`` file.
"""
documents: List[Dict[str, "Flattenable"]] = [dict(
id=ob.fullName(),
name=epydoc2stan.insert_break_points(ob.name),
fullName=epydoc2stan.insert_break_points(ob.fullName()),
kind=epydoc2stan.format_kind(ob.kind) if ob.kind else '',
type=str(ob.__class__.__name__),
summary=epydoc2stan.format_summary(ob),
url=ob.url,
privacy=str(ob.privacyClass.name))

for ob in system.allobjects.values() if ob.isVisible]
return documents
# This function accounts for a substantial proportion of pydoctor runtime.
# So it's optimized.
insert_break_points = epydoc2stan.insert_break_points
format_kind = epydoc2stan.format_kind
format_summary = epydoc2stan.format_summary

return ({
'id': ob.fullName(),
'name': ob.name,
'fullName': insert_break_points(ob.fullName()),
'kind': format_kind(ob.kind) if ob.kind else '',
'type': str(ob.__class__.__name__),
'summary': format_summary(ob),
'url': ob.url,
'privacy': str(ob.privacyClass.name)}

for ob in system.allobjects.values() if ob.isVisible)

class AllDocuments(Page):

Expand All @@ -42,7 +47,7 @@ def title(self) -> str:
return "All Documents"

@renderer
def documents(self, request: None, tag: Tag) -> Iterable[Tag]:
def documents(self, request: None, tag: Tag) -> Iterator[Tag]:
for doc in get_all_documents_flattenable(self.system):
yield tag.clone().fillSlots(**doc)

Expand Down Expand Up @@ -110,23 +115,17 @@ def format_kind(self, ob:model.Documentable) -> str:
return epydoc2stan.format_kind(ob.kind) if ob.kind else ''

def get_corpus(self) -> List[Tuple[Dict[str, Optional[str]], Dict[str, int]]]:

documents: List[Tuple[Dict[str, Optional[str]], Dict[str, int]]] = []

for ob in (o for o in self.system.allobjects.values() if o.isVisible):

documents.append(
(
{
f:self.format(ob, f) for f in self.fields
},
{
"boost": self.get_ob_boost(ob)
}
)
)

return documents
return [
(
{
f:self.format(ob, f) for f in self.fields
},
{
"boost": self.get_ob_boost(ob)
}
)
for ob in (o for o in self.system.allobjects.values() if o.isVisible)
]

def write(self) -> None:

Expand Down
Loading

0 comments on commit d190566

Please sign in to comment.