From de6733491873298dadd438c0113d0506e7eee2ac Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 02:26:23 -0400 Subject: [PATCH 01/60] Add initial code for proper aliases handling. --- docs/epytext_demo/demo_epytext_module.py | 5 +- .../demo_restructuredtext_module.py | 5 +- pydoctor/astbuilder.py | 56 ++++---- pydoctor/epydoc2stan.py | 16 ++- pydoctor/model.py | 84 +++++++++-- pydoctor/templates/attribute-child.html | 2 +- .../templatewriter/pages/attributechild.py | 17 ++- pydoctor/test/test_astbuilder.py | 130 ++++++++++++++++-- 8 files changed, 256 insertions(+), 59 deletions(-) diff --git a/docs/epytext_demo/demo_epytext_module.py b/docs/epytext_demo/demo_epytext_module.py index 3a19998e9..14c51d124 100644 --- a/docs/epytext_demo/demo_epytext_module.py +++ b/docs/epytext_demo/demo_epytext_module.py @@ -8,7 +8,7 @@ from somelib import SomeInterface import zope.interface import zope.schema -from typing import Final, Sequence, Optional +from typing import Final, Sequence, Optional, Protocol LANG = 'Fr' """ @@ -20,6 +20,9 @@ This is also a constant, but annotated with typing.Final. """ +Interface = Protocol +"""Aliases are also documented.""" + def demo_fields_docstring_arguments(m, b): # type: ignore """ Fields are used to describe specific properties of a documented object. diff --git a/docs/restructuredtext_demo/demo_restructuredtext_module.py b/docs/restructuredtext_demo/demo_restructuredtext_module.py index 070be598e..20283d15e 100644 --- a/docs/restructuredtext_demo/demo_restructuredtext_module.py +++ b/docs/restructuredtext_demo/demo_restructuredtext_module.py @@ -7,7 +7,7 @@ from abc import ABC import zope.interface import zope.schema -from typing import Final, Sequence, Optional +from typing import Final, Sequence, Optional, Protocol LANG = 'Fr' """ @@ -19,6 +19,9 @@ This is also a constant, but annotated with typing.Final. """ +Interface = Protocol +"""Aliases are also documented.""" + def demo_fields_docstring_arguments(m, b): # type: ignore """ Fields are used to describe specific properties of a documented object. diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index d8dcbeac8..f1a9ba06e 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -46,25 +46,7 @@ def _maybeAttribute(cls: model.Class, name: str) -> bool: inherited) attribute, L{False} otherwise """ obj = cls.find(name) - return obj is None or isinstance(obj, model.Attribute) - - -def _handleAliasing( - ctx: model.CanContainImportsDocumentable, - target: str, - expr: Optional[ast.expr] - ) -> bool: - """If the given expression is a name assigned to a target that is not yet - in use, create an alias. - @return: L{True} iff an alias was created. - """ - if target in ctx.contents: - return False - full_name = node2fullname(expr, ctx) - if full_name is None: - return False - ctx._localNameToFullName_map[target] = full_name - return True + return obj is None or isinstance(obj, model.Attribute) _attrs_decorator_signature = signature(attrs) """Signature of the L{attr.s} class decorator.""" @@ -201,6 +183,9 @@ def extract_final_subscript(annotation: ast.Subscript) -> ast.expr: assert isinstance(ann_slice, ast.expr) return ann_slice +def is_alias(value: ast.expr) -> bool: + return node2dottedname(value) is not None + class ModuleVistor(ast.NodeVisitor): currAttr: Optional[model.Documentable] newAttr: Optional[model.Documentable] @@ -387,7 +372,9 @@ def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None: asname = orgname # Move re-exported objects into current module. - if asname in exports and mod is not None: + if asname in exports \ + and mod is not None: # This part of the condition makes if impossible to re-export + # names that are not part of the current system. We could create Aliases instead. try: ob = mod.contents[orgname] except KeyError: @@ -502,6 +489,20 @@ def _handleConstant(self, obj: model.Attribute, value: Optional[ast.expr], linen # Just plain "Final" annotation. # Simply ignore it because it's duplication of information. obj.annotation = _infer_type(value) if value else None + + def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: int) -> bool: + """ + Must be called after obj.setLineNumber() to have the right line number in the warning. + """ + + if is_attribute_overridden(obj, value): + obj.report(f'Assignment to alias "{obj.name}" overrides previous assignment ' + f'at line {obj.linenumber}, the original redirection will be ignored.', + section='ast', lineno_offset=lineno-obj.linenumber) + + obj.kind = model.DocumentableKind.ALIAS + # This will be used to follow the alias redirection. + obj.value = value def _handleModuleVar(self, target: str, @@ -526,8 +527,9 @@ def _handleModuleVar(self, obj.annotation = annotation obj.setLineNumber(lineno) - - if is_constant(obj): + if is_alias(expr): + self._handleAlias(obj=obj, value=expr, lineno=lineno) + elif is_constant(obj): self._handleConstant(obj=obj, value=expr, lineno=lineno) else: obj.kind = model.DocumentableKind.VARIABLE @@ -545,8 +547,7 @@ def _handleAssignmentInModule(self, ) -> None: module = self.builder.current assert isinstance(module, model.Module) - if not _handleAliasing(module, target, expr): - self._handleModuleVar(target, annotation, expr, lineno) + self._handleModuleVar(target, annotation, expr, lineno) def _handleClassVar(self, name: str, @@ -581,7 +582,9 @@ def _handleClassVar(self, obj.annotation = annotation obj.setLineNumber(lineno) - if is_constant(obj): + if is_alias(expr): + self._handleAlias(obj=obj, value=expr, lineno=lineno) + elif is_constant(obj): self._handleConstant(obj=obj, value=expr, lineno=lineno) else: obj.value = expr @@ -630,8 +633,7 @@ def _handleAssignmentInClass(self, ) -> None: cls = self.builder.current assert isinstance(cls, model.Class) - if not _handleAliasing(cls, target, expr): - self._handleClassVar(target, annotation, expr, lineno) + self._handleClassVar(target, annotation, expr, lineno) def _handleDocstringUpdate(self, targetNode: ast.expr, diff --git a/pydoctor/epydoc2stan.py b/pydoctor/epydoc2stan.py index 3f8080285..19ad1a978 100644 --- a/pydoctor/epydoc2stan.py +++ b/pydoctor/epydoc2stan.py @@ -652,7 +652,9 @@ def format_docstring(obj: model.Documentable) -> Tag: ret: Tag = tags.div if pdoc is None: - ret(tags.p(class_='undocumented')("Undocumented")) + # Aliases are generally not documented, so we never mark them as "undocumented". + if obj.kind is not model.DocumentableKind.ALIAS: + ret(tags.p(class_='undocumented')("Undocumented")) else: try: stan = pdoc.to_stan(_EpydocLinker(source)) @@ -698,7 +700,11 @@ def format_summary(obj: model.Documentable) -> Tag: source = obj.parent assert source is not None elif doc is None: - return format_undocumented(obj) + if obj.kind is model.DocumentableKind.ALIAS: + # Aliases are generally not documented, so we never mark them as "undocumented", we simply link the object. + return Tag('', children=format_alias_value(obj).children) + else: + return format_undocumented(obj) else: # Tell mypy that if we found a docstring, we also have its source. assert source is not None @@ -836,10 +842,12 @@ def format_kind(kind: model.DocumentableKind, plural: bool = False) -> str: model.DocumentableKind.VARIABLE : 'Variable', model.DocumentableKind.SCHEMA_FIELD : 'Attribute', model.DocumentableKind.CONSTANT : 'Constant', + model.DocumentableKind.ALIAS : 'Alias', } plurals = { model.DocumentableKind.CLASS : 'Classes', model.DocumentableKind.PROPERTY : 'Properties', + model.DocumentableKind.ALIAS : 'Aliases', } if plural: return plurals.get(kind, names[kind] + 's') @@ -872,3 +880,7 @@ def format_constant_value(obj: model.Attribute) -> "Flattenable": """ rows = list(_format_constant_value(obj)) return tags.table(class_='valueTable')(*rows) + +def format_alias_value(obj: model.Attribute) -> "Flattenable": + return tags.p(tags.em("Alias to ", + colorize_inline_pyval(obj.value).to_stan(_EpydocLinker(obj)))) \ No newline at end of file diff --git a/pydoctor/model.py b/pydoctor/model.py index ec6c783f6..8fe346b04 100644 --- a/pydoctor/model.py +++ b/pydoctor/model.py @@ -25,6 +25,7 @@ from pydoctor.epydoc.markup import ParsedDocstring from pydoctor.sphinx import CacheT, SphinxInventory +from pydoctor.astutils import node2dottedname if TYPE_CHECKING: from typing_extensions import Literal @@ -95,6 +96,7 @@ class DocumentableKind(Enum): STATIC_METHOD = 600 METHOD = 500 FUNCTION = 400 + ALIAS = 320 CONSTANT = 310 CLASS_VARIABLE = 300 SCHEMA_FIELD = 220 @@ -266,8 +268,9 @@ def _handle_reparenting_post(self) -> None: def _localNameToFullName(self, name: str) -> str: raise NotImplementedError(self._localNameToFullName) - def expandName(self, name: str) -> str: - """Return a fully qualified name for the possibly-dotted `name`. + def expandName(self, name: str, redirected_from:Optional['Documentable']=None) -> str: + """ + Return a fully qualified name for the possibly-dotted `name`. To explain what this means, consider the following modules: @@ -286,27 +289,72 @@ class E: In the context of mod2.E, expandName("RenamedExternal") should be "external_location.External" and expandName("renamed_mod.Local") - should be "mod1.Local". """ + should be "mod1.Local". + + This method is in charge to follow the aliases when possible! + It will reccursively follow any L{DocumentalbeKind.ALIAS} entry found. + + @param name: The name to expand. + @param redirected_from: In the case of a followed redirection ony. This is + the alias object. This variable is used to prevent infinite loops when doing the lookup. + """ parts = name.split('.') obj: Documentable = self - for i, p in enumerate(parts): - full_name = obj._localNameToFullName(p) - if full_name == p and i != 0: + for i, part in enumerate(parts): + full_name = obj._localNameToFullName(part) + if full_name == part and i != 0: # The local name was not found. - # TODO: Instead of returning the input, _localNameToFullName() - # should probably either return None or raise LookupError. - full_name = f'{obj.fullName()}.{p}' - break + # If we're looking at a class, we try our luck with the inherited members + if isinstance(obj, Class) and obj.find(part) is not None: + full_name = obj.find(part).fullName() + else: + # TODO: Instead of returning the input, _localNameToFullName() + # should probably either return None or raise LookupError. + full_name = f'{obj.fullName()}.{part}' + break nxt = self.system.objForFullName(full_name) if nxt is None: break obj = nxt - return '.'.join([full_name] + parts[i + 1:]) + + expanded_name = '.'.join([full_name] + parts[i + 1:]) + + # We check if the name we resolved is an alias. + # Attribute for all aliases are created now, we can follow the redirection here. + obj = self.system.objForFullName(expanded_name) + if obj is not None and obj.kind is DocumentableKind.ALIAS: + resolved = self._resolveAlias(obj, redirected_from=redirected_from) + expanded_name = resolved or expanded_name + + return expanded_name + + def _resolveAlias(self, alias: 'Attribute', redirected_from:Optional['Attribute']=None) -> Optional[str]: + dottedname = node2dottedname(alias.value) + if dottedname: + name = '.'.join(dottedname) + + ctx = self # should ctx be alias.parent? + + # This checks avoids infinite recursion error + if redirected_from != alias: + # We redirect to the original object instead! + return ctx.expandName(name, redirected_from=alias) + else: + # Issue tracing the alias back to it's original location, found the same alias again. + if ctx.parent: + # We try with the parent scope and redirect to the original object! + # This is used in situations like right here in the System class and it's aliases, + # because they have the same name as the name they are aliasing, it's causing trouble. + return ctx.parent.expandName(name, redirected_from=alias) + return None def resolveName(self, name: str) -> Optional['Documentable']: - """Return the object named by "name" (using Python's lookup rules) in - this context, if any is known to pydoctor.""" - return self.system.objForFullName(self.expandName(name)) + """ + Return the object named by "name" (using Python's lookup rules) in + this context, if any is known to pydoctor. + """ + obj = self.system.objForFullName(self.expandName(name)) + return obj @property def privacyClass(self) -> PrivacyClass: @@ -397,6 +445,10 @@ def setup(self) -> None: def _localNameToFullName(self, name: str) -> str: if name in self.contents: o: Documentable = self.contents[name] + if o.kind is DocumentableKind.ALIAS: + resolved = self._resolveAlias(o) + if resolved: + return resolved return o.fullName() elif name in self._localNameToFullName_map: return self._localNameToFullName_map[name] @@ -472,6 +524,10 @@ def find(self, name: str) -> Optional[Documentable]: def _localNameToFullName(self, name: str) -> str: if name in self.contents: o: Documentable = self.contents[name] + if o.kind is DocumentableKind.ALIAS: + resolved = self._resolveAlias(o, o) # We pass redirected_from value in order to avoid inifite recursion. + if resolved: + return resolved return o.fullName() elif name in self._localNameToFullName_map: return self._localNameToFullName_map[name] diff --git a/pydoctor/templates/attribute-child.html b/pydoctor/templates/attribute-child.html index f02fbd6d9..5811c68bd 100644 --- a/pydoctor/templates/attribute-child.html +++ b/pydoctor/templates/attribute-child.html @@ -21,7 +21,7 @@ Docstring. - + Value of the attribute if it's a constant. diff --git a/pydoctor/templatewriter/pages/attributechild.py b/pydoctor/templatewriter/pages/attributechild.py index a4a48257b..65ab5c7c1 100644 --- a/pydoctor/templatewriter/pages/attributechild.py +++ b/pydoctor/templatewriter/pages/attributechild.py @@ -78,8 +78,15 @@ def functionDeprecated(self, request: object, tag: Tag) -> "Flattenable": return tags.div(msg, role="alert", class_="deprecationNotice alert alert-warning") @renderer - def constantValue(self, request: object, tag: Tag) -> "Flattenable": - if self.ob.kind is not DocumentableKind.CONSTANT or self.ob.value is None: - return tag.clear() - # Attribute is a constant (with a value), then display it's value - return epydoc2stan.format_constant_value(self.ob) + def value(self, request: object, tag: Tag) -> "Flattenable": + if self.ob.value is not None: + if self.ob.kind is DocumentableKind.CONSTANT: + # Attribute is a constant (with a value), then display it's value + return epydoc2stan.format_constant_value(self.ob) + if self.ob.kind is DocumentableKind.ALIAS: + # Attribute is an alias + return epydoc2stan.format_alias_value(self.ob) + else: + return '' + else: + return '' \ No newline at end of file diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index 5bb83acb2..7bec20518 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -484,12 +484,12 @@ class D(C): assert mod.contents['D'].bases == ['mod.C'], mod.contents['D'].bases @systemcls_param -def test_documented_no_alias(systemcls: Type[model.System]) -> None: - """A variable that is documented should not be considered an alias.""" - # TODO: We should also verify this for inline docstrings, but the code - # currently doesn't support that. We should perhaps store aliases - # as Documentables as well, so we can change their 'kind' when - # an inline docstring follows the assignment. +def test_documented_alias(systemcls: Type[model.System]) -> None: + """ + All variables that simply points to an attribute or name are now + legit L{Attribute} documentable objects with a special kind: L{DocumentableKind.ALIAS}. + """ + mod = fromText(''' class SimpleClient: pass @@ -498,14 +498,128 @@ class Processor: @ivar clientFactory: Callable that returns a client. """ clientFactory = SimpleClient - ''', systemcls=systemcls) + ''', systemcls=systemcls, modname='mod') P = mod.contents['Processor'] f = P.contents['clientFactory'] assert unwrap(f.parsed_docstring) == """Callable that returns a client.""" assert f.privacyClass is model.PrivacyClass.VISIBLE - assert f.kind is model.DocumentableKind.INSTANCE_VARIABLE + # we now mark aliases with the ALIAS kind! + assert f.kind is model.DocumentableKind.ALIAS assert f.linenumber + # TODO: We should also verify this for inline docstrings, but the code + # currently doesn't support that. We should perhaps store aliases + # as Documentables as well, so we can change their 'kind' when + # an inline docstring follows the assignment. + # mod = fromText(''' + # class SimpleClient: + # pass + # class Processor: + # clientFactory = SimpleClient + # """ + # Callable that returns a client. + # """ + # ''', systemcls=systemcls, modname='mod') + # P = mod.contents['Processor'] + # f = P.contents['clientFactory'] + # assert unwrap(f.parsed_docstring) == """Callable that returns a client.""" + # assert f.privacyClass is model.PrivacyClass.VISIBLE + # # we now mark aliases with the ALIAS kind! + # assert f.kind is model.DocumentableKind.ALIAS + # assert f.linenumber + + +@systemcls_param +def test_resolveName_alias(systemcls: Type[model.System]) -> None: + system = systemcls() + fromText(''' + class BaseClient: + BAR = 1 + FOO = 2 + ''', system=system, modname='base_mod') + mod = fromText(''' + import base_mod as _base + class SimpleClient(_base.BaseClient): + BARS = SimpleClient.FOO + FOOS = _base.BaseClient.BAR + class Processor: + var = 1 + clientFactory = SimpleClient + BARS = _base.BaseClient.FOO + P = Processor + ''', system=system, modname='mod') + Processor = mod.contents['Processor'] + assert mod.expandName('Processor.clientFactory')=="mod.SimpleClient" + assert mod.expandName('Processor.BARS')=="base_mod.BaseClient.FOO" + assert mod.system.allobjects.get("mod.SimpleClient").find("FOO") is not None + assert mod.system.allobjects.get("mod.SimpleClient.FOO") is None + assert mod.contents['P'].kind is model.DocumentableKind.ALIAS + assert mod._resolveAlias(mod.contents['P'])=="mod.Processor" + assert mod._localNameToFullName('P')=="mod.Processor" + assert mod.expandName('P')=="mod.Processor" + assert mod.expandName('P.var')=="mod.Processor.var" + assert mod.expandName('P.clientFactory')=="mod.SimpleClient" + assert mod.expandName('Processor.clientFactory.BARS')=="base_mod.BaseClient.FOO" + assert mod.expandName('Processor.clientFactory.FOOS')=="base_mod.BaseClient.BAR" + assert mod.expandName('P.clientFactory.BARS')=="base_mod.BaseClient.FOO" + assert mod.expandName('P.clientFactory.FOOS')=="base_mod.BaseClient.BAR" + assert Processor.expandName('clientFactory')=="mod.SimpleClient" + assert Processor.expandName('BARS')=="base_mod.BaseClient.FOO" + assert Processor.expandName('clientFactory.BARS')=="base_mod.BaseClient.FOO" + +@systemcls_param +def test_resolveName_alias2(systemcls: Type[model.System]) -> None: + system = systemcls() + base_mod = fromText(''' + class Foo: + _1=1 + _2=2 + _3=3 + foo = Foo._1 + class Attribute: + foo = foo + class Class: + pass + ''', system=system, modname='base_mod') + mod = fromText(''' + from base_mod import Attribute, Class, Foo + class System: + Attribute = Attribute + Class = Class + class SuperSystem: + foo = Foo._3 + class Attribute: + foo = foo + Attribute = Attribute + ''', system=system, modname='mod') + System = mod.contents['System'] + SuperSystem = mod.contents['SuperSystem'] + assert mod.expandName('System.Attribute')=="base_mod.Attribute" + assert mod.expandName('System.Class')=="base_mod.Class" + + assert System.expandName('Attribute')=="base_mod.Attribute" + assert System.expandName('Class')=="base_mod.Class" + + assert mod.expandName('SuperSystem.Attribute')=="mod.SuperSystem.Attribute" + assert SuperSystem.expandName('Attribute')=="mod.SuperSystem.Attribute" + + assert mod.expandName('SuperSystem.Attribute.foo')=="base_mod.Foo._3" + assert SuperSystem.expandName('Attribute.foo')=="base_mod.Foo._3" + + assert base_mod.contents['Attribute'].contents['foo'].kind is model.DocumentableKind.ALIAS + assert mod.contents['System'].contents['Attribute'].kind is model.DocumentableKind.ALIAS + + assert base_mod.contents['Attribute'].contents['foo'].fullName() == 'base_mod.Attribute.foo' + assert 'base_mod.Attribute.foo' in mod.system.allobjects, str(list(mod.system.allobjects)) + + mod.system.objForFullName('base_mod.Attribute.foo').kind is model.DocumentableKind.ALIAS + + assert mod.expandName('System.Attribute.foo')=="base_mod.Foo._1" + assert System.expandName('Attribute.foo')=="base_mod.Foo._1" + + assert mod.resolveName('System').contents['Attribute'].kind is model.DocumentableKind.ALIAS + + @systemcls_param def test_subclasses(systemcls: Type[model.System]) -> None: src = ''' From e476cf88ff3640dcae462db451fb74cf408d62c6 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 03:50:13 -0400 Subject: [PATCH 02/60] Fix a reccursion error and reduce the scope of what is an module-level alias. An module-level alias must be defined at the top level of the module. --- pydoctor/astbuilder.py | 30 +++++++++++++++++++++++++++++- pydoctor/model.py | 19 ++++++++++--------- 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index f1a9ba06e..f962c96e4 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -194,6 +194,7 @@ def __init__(self, builder: 'ASTBuilder', module: model.Module): self.builder = builder self.system = builder.system self.module = module + self._moduleLevelAssigns: List[str] = [] def default(self, node: ast.AST) -> None: body: Optional[Sequence[ast.stmt]] = getattr(node, 'body', None) @@ -208,6 +209,21 @@ def default(self, node: ast.AST) -> None: def visit_Module(self, node: ast.Module) -> None: assert self.module.docstring is None + # Build the list of module level variable. This is is used to check if a module assignment + # should be analyzed or not. See _handleAssignmentInModule. + _assigns = list(_findAnyModuleLevelAssign(node)) + for target, value in _assigns: + for a in target: + if isinstance(a, ast.Tuple): + for ele in a.elts: + name = node2dottedname(ele) + if name: + self._moduleLevelAssigns.append('.'.join(name)) + else: + name = node2dottedname(a) + if name: + self._moduleLevelAssigns.append('.'.join(name)) + self.builder.push(self.module, 0) if len(node.body) > 0 and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str): self.module.setDocstring(node.body[0].value) @@ -547,7 +563,11 @@ def _handleAssignmentInModule(self, ) -> None: module = self.builder.current assert isinstance(module, model.Module) - self._handleModuleVar(target, annotation, expr, lineno) + # Check if the assignment is on the first level. + # We ignore assignments that are not defined at least once at the module level. + # Meaning that we ignore variables defines in "if" or "try/catch" blocks. + if target in self._moduleLevelAssigns: + self._handleModuleVar(target, annotation, expr, lineno) def _handleClassVar(self, name: str, @@ -703,6 +723,7 @@ def _handleAssignment(self, self._handleDocstringUpdate(value, expr, lineno) elif isinstance(value, ast.Name) and value.id == 'self': self._handleInstanceVar(targetNode.attr, annotation, expr, lineno) + # TODO: Fix https://github.com/twisted/pydoctor/issues/13 def visit_Assign(self, node: ast.Assign) -> None: lineno = node.lineno @@ -1184,6 +1205,13 @@ def parseFile(self, path: Path) -> Optional[ast.Module]: model.System.defaultBuilder = ASTBuilder +def _findAnyModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[List[ast.expr], ast.Assign]]: + for node in mod_ast.body: + if isinstance(node, (ast.Assign)): + yield (node.targets, node) + elif isinstance(node, ast.AnnAssign): + yield ([node.target], node) + def findModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[str, ast.Assign]]: """ Find module level Assign. diff --git a/pydoctor/model.py b/pydoctor/model.py index 8fe346b04..fe91366de 100644 --- a/pydoctor/model.py +++ b/pydoctor/model.py @@ -265,7 +265,7 @@ def _handle_reparenting_post(self) -> None: for o in self.contents.values(): o._handle_reparenting_post() - def _localNameToFullName(self, name: str) -> str: + def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: raise NotImplementedError(self._localNameToFullName) def expandName(self, name: str, redirected_from:Optional['Documentable']=None) -> str: @@ -292,7 +292,7 @@ class E: should be "mod1.Local". This method is in charge to follow the aliases when possible! - It will reccursively follow any L{DocumentalbeKind.ALIAS} entry found. + It will reccursively follow any L{DocumentableKind.ALIAS} entry found. @param name: The name to expand. @param redirected_from: In the case of a followed redirection ony. This is @@ -301,7 +301,7 @@ class E: parts = name.split('.') obj: Documentable = self for i, part in enumerate(parts): - full_name = obj._localNameToFullName(part) + full_name = obj._localNameToFullName(part, redirected_from=redirected_from) if full_name == part and i != 0: # The local name was not found. # If we're looking at a class, we try our luck with the inherited members @@ -442,11 +442,11 @@ def setup(self) -> None: self._docformat: Optional[str] = None - def _localNameToFullName(self, name: str) -> str: + def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: if name in self.contents: o: Documentable = self.contents[name] if o.kind is DocumentableKind.ALIAS: - resolved = self._resolveAlias(o) + resolved = self._resolveAlias(o, redirected_from=redirected_from) if resolved: return resolved return o.fullName() @@ -521,11 +521,12 @@ def find(self, name: str) -> Optional[Documentable]: return obj return None - def _localNameToFullName(self, name: str) -> str: + def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: if name in self.contents: o: Documentable = self.contents[name] if o.kind is DocumentableKind.ALIAS: - resolved = self._resolveAlias(o, o) # We pass redirected_from value in order to avoid inifite recursion. + # We pass redirected_from value in order to avoid inifite recursion. + resolved = self._resolveAlias(o, redirected_from=redirected_from) if resolved: return resolved return o.fullName() @@ -563,8 +564,8 @@ def docsources(self) -> Iterator[Documentable]: if self.name in b.contents: yield b.contents[self.name] - def _localNameToFullName(self, name: str) -> str: - return self.parent._localNameToFullName(name) + def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: + return self.parent._localNameToFullName(name, redirected_from=redirected_from) class Function(Inheritable): kind = DocumentableKind.FUNCTION From 5f5fca96e1239513b5159a9a6c3f6d38d513c219 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 12:50:12 -0400 Subject: [PATCH 03/60] Still register the indirection for aliases defined in try/expect or ifs blocks. Add a test for that. --- pydoctor/astbuilder.py | 23 +++++++++++++++++ pydoctor/test/test_astbuilder.py | 42 ++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index f962c96e4..473ada2ea 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -509,6 +509,8 @@ def _handleConstant(self, obj: model.Attribute, value: Optional[ast.expr], linen def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: int) -> bool: """ Must be called after obj.setLineNumber() to have the right line number in the warning. + + Create an alias or update an alias. """ if is_attribute_overridden(obj, value): @@ -520,6 +522,24 @@ def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: # This will be used to follow the alias redirection. obj.value = value + def _handleIndirection(self, + ctx: model.CanContainImportsDocumentable, + target: str, + expr: Optional[ast.expr] + ) -> bool: + """ + Aliases declared in "try/except" block or "if" blocks are not documented, but we still track the indirection. + + If the given expression is a name assigned to a target that is not yet in use, register an indirection in the L{_localNameToFullName_map} attriute. + """ + if target in ctx.contents: + return + full_name = node2fullname(expr, ctx) + if full_name is None: + return + ctx._localNameToFullName_map[target] = full_name + + def _handleModuleVar(self, target: str, annotation: Optional[ast.expr], @@ -568,6 +588,9 @@ def _handleAssignmentInModule(self, # Meaning that we ignore variables defines in "if" or "try/catch" blocks. if target in self._moduleLevelAssigns: self._handleModuleVar(target, annotation, expr, lineno) + elif is_alias(expr): + # But we still track the name indirection + self._handleIndirection(module, target, expr) def _handleClassVar(self, name: str, diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index 7bec20518..755ae9beb 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -619,6 +619,48 @@ class Attribute: assert mod.resolveName('System').contents['Attribute'].kind is model.DocumentableKind.ALIAS +@systemcls_param +def test_resolveName_alias3(systemcls: Type[model.System]) -> None: + """ + We ignore assignments that are not defined at least once at the module level. + Meaning that we ignore variables defines in "if" or "try/catch" blocks. + """ + system = systemcls() + + base_mod = fromText(''' + ssl = 1 + ''', system=system, modname='twisted.internet') + + mod = fromText(''' + try: + from twisted.internet import ssl as _ssl + except ImportError: + ssl = None + else: + ssl = _ssl + ''', system=system, modname='mod') + + assert mod.expandName('ssl')=="twisted.internet.ssl" + assert mod.expandName('_ssl')=="twisted.internet.ssl" + assert ast.literal_eval(mod.resolveName('ssl').value)==1 + assert 'ssl' not in mod.contents + + mod = fromText(''' + ssl = None + try: + from twisted.internet import ssl as _ssl + except ImportError: + ssl = None + else: + ssl = _ssl + ''', system=systemcls(), modname='mod') + + assert mod.expandName('ssl')=="twisted.internet.ssl" + assert mod.expandName('_ssl')=="twisted.internet.ssl" + assert mod.contents['ssl'].kind is model.DocumentableKind.ALIAS + assert mod.resolveName('ssl') is None + assert 'ssl' in mod.contents + @systemcls_param def test_subclasses(systemcls: Type[model.System]) -> None: From da6f4ce596b9a70031b032a8c8ea098a7cdb9483 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 20:24:12 -0400 Subject: [PATCH 04/60] Cleanup and documenting. --- pydoctor/astbuilder.py | 12 ++++---- pydoctor/epydoc2stan.py | 3 +- pydoctor/model.py | 53 ++++++++++++++++++++++++++------ pydoctor/test/test_astbuilder.py | 45 +++++++++++++++++++-------- 4 files changed, 85 insertions(+), 28 deletions(-) diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index 473ada2ea..f54de39f2 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -183,7 +183,7 @@ def extract_final_subscript(annotation: ast.Subscript) -> ast.expr: assert isinstance(ann_slice, ast.expr) return ann_slice -def is_alias(value: ast.expr) -> bool: +def is_alias(value: Optional[ast.expr]) -> bool: return node2dottedname(value) is not None class ModuleVistor(ast.NodeVisitor): @@ -506,7 +506,7 @@ def _handleConstant(self, obj: model.Attribute, value: Optional[ast.expr], linen # Simply ignore it because it's duplication of information. obj.annotation = _infer_type(value) if value else None - def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: int) -> bool: + def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: int) -> None: """ Must be called after obj.setLineNumber() to have the right line number in the warning. @@ -522,11 +522,11 @@ def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: # This will be used to follow the alias redirection. obj.value = value - def _handleIndirection(self, - ctx: model.CanContainImportsDocumentable, + @staticmethod + def _handleIndirection(ctx: model.CanContainImportsDocumentable, target: str, expr: Optional[ast.expr] - ) -> bool: + ) -> None: """ Aliases declared in "try/except" block or "if" blocks are not documented, but we still track the indirection. @@ -1228,7 +1228,7 @@ def parseFile(self, path: Path) -> Optional[ast.Module]: model.System.defaultBuilder = ASTBuilder -def _findAnyModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[List[ast.expr], ast.Assign]]: +def _findAnyModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[List[ast.expr], Union[ast.Assign, ast.AnnAssign]]]: for node in mod_ast.body: if isinstance(node, (ast.Assign)): yield (node.targets, node) diff --git a/pydoctor/epydoc2stan.py b/pydoctor/epydoc2stan.py index 19ad1a978..018dc6926 100644 --- a/pydoctor/epydoc2stan.py +++ b/pydoctor/epydoc2stan.py @@ -701,6 +701,7 @@ def format_summary(obj: model.Documentable) -> Tag: assert source is not None elif doc is None: if obj.kind is model.DocumentableKind.ALIAS: + assert isinstance(obj, model.Attribute) # Aliases are generally not documented, so we never mark them as "undocumented", we simply link the object. return Tag('', children=format_alias_value(obj).children) else: @@ -881,6 +882,6 @@ def format_constant_value(obj: model.Attribute) -> "Flattenable": rows = list(_format_constant_value(obj)) return tags.table(class_='valueTable')(*rows) -def format_alias_value(obj: model.Attribute) -> "Flattenable": +def format_alias_value(obj: model.Attribute) -> Tag: return tags.p(tags.em("Alias to ", colorize_inline_pyval(obj.value).to_stan(_EpydocLinker(obj)))) \ No newline at end of file diff --git a/pydoctor/model.py b/pydoctor/model.py index fe91366de..4e5525dba 100644 --- a/pydoctor/model.py +++ b/pydoctor/model.py @@ -268,7 +268,7 @@ def _handle_reparenting_post(self) -> None: def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: raise NotImplementedError(self._localNameToFullName) - def expandName(self, name: str, redirected_from:Optional['Documentable']=None) -> str: + def expandName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: """ Return a fully qualified name for the possibly-dotted `name`. @@ -293,36 +293,69 @@ class E: This method is in charge to follow the aliases when possible! It will reccursively follow any L{DocumentableKind.ALIAS} entry found. + + Example: + + mod1.py:: + + import external + class Processor: + spec = external.Processor.more_spec + P = Processor + + mod2.py:: + + from mod1 import P + class Runner: + processor = P + + In the context of mod2, expandName("Runner.processor.spec") should be + "external.Processor.more_spec". @param name: The name to expand. - @param redirected_from: In the case of a followed redirection ony. This is + @param redirected_from: In the case of a followed redirection only. This is the alias object. This variable is used to prevent infinite loops when doing the lookup. + @note: The implementation replies on iterating through the each part of the dotted name, + calling L{_localNameToFullName} for each name in their associated context and incrementally building + the fullName from that. + @since 2021: Lookup members in superclasses when possible and follows L{DocumentableKind.ALIAS}. This mean that L{expandName} will never return the name of an alias, + it will always follow it's indirection to the origin. """ + parts = name.split('.') - obj: Documentable = self + ctx: Documentable = self # The context for the currently processed part of the name. for i, part in enumerate(parts): - full_name = obj._localNameToFullName(part, redirected_from=redirected_from) + full_name = ctx._localNameToFullName(part, redirected_from=redirected_from) if full_name == part and i != 0: # The local name was not found. # If we're looking at a class, we try our luck with the inherited members - if isinstance(obj, Class) and obj.find(part) is not None: - full_name = obj.find(part).fullName() - else: + if isinstance(ctx, Class): + ctx.find(part) + f = ctx.find(part) + full_name = f.fullName() if f else full_name + # We don't have a full name + if full_name == part: # TODO: Instead of returning the input, _localNameToFullName() # should probably either return None or raise LookupError. - full_name = f'{obj.fullName()}.{part}' + # Or maybe we should find a way to indicate if the expanded name is "guessed", like in this case. + # or we surely have the the correct fullName. With the cirrent implementation, this would mean checking + # if parts[i + 1:] contains anything. + full_name = f'{ctx.fullName()}.{part}' break nxt = self.system.objForFullName(full_name) if nxt is None: break - obj = nxt + ctx = nxt expanded_name = '.'.join([full_name] + parts[i + 1:]) + # We check if the name we resolved is an alias. # Attribute for all aliases are created now, we can follow the redirection here. obj = self.system.objForFullName(expanded_name) if obj is not None and obj.kind is DocumentableKind.ALIAS: + assert isinstance(obj, Attribute) + # Try to resolve alias, fallback to original value if None. resolved = self._resolveAlias(obj, redirected_from=redirected_from) expanded_name = resolved or expanded_name @@ -446,6 +479,7 @@ def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']= if name in self.contents: o: Documentable = self.contents[name] if o.kind is DocumentableKind.ALIAS: + assert isinstance(o, Attribute) resolved = self._resolveAlias(o, redirected_from=redirected_from) if resolved: return resolved @@ -526,6 +560,7 @@ def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']= o: Documentable = self.contents[name] if o.kind is DocumentableKind.ALIAS: # We pass redirected_from value in order to avoid inifite recursion. + assert isinstance(o, Attribute) resolved = self._resolveAlias(o, redirected_from=redirected_from) if resolved: return resolved diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index 755ae9beb..2fc716721 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -530,7 +530,10 @@ class Processor: @systemcls_param -def test_resolveName_alias(systemcls: Type[model.System]) -> None: +def test_expandName_alias(systemcls: Type[model.System]) -> None: + """ + expandName now follows all kinds of aliases! + """ system = systemcls() fromText(''' class BaseClient: @@ -551,7 +554,7 @@ class Processor: Processor = mod.contents['Processor'] assert mod.expandName('Processor.clientFactory')=="mod.SimpleClient" assert mod.expandName('Processor.BARS')=="base_mod.BaseClient.FOO" - assert mod.system.allobjects.get("mod.SimpleClient").find("FOO") is not None + assert mod.system.allobjects.get("mod.SimpleClient") is not None assert mod.system.allobjects.get("mod.SimpleClient.FOO") is None assert mod.contents['P'].kind is model.DocumentableKind.ALIAS assert mod._resolveAlias(mod.contents['P'])=="mod.Processor" @@ -568,7 +571,14 @@ class Processor: assert Processor.expandName('clientFactory.BARS')=="base_mod.BaseClient.FOO" @systemcls_param -def test_resolveName_alias2(systemcls: Type[model.System]) -> None: +def test_expandName_alias_same_name_recursion(systemcls: Type[model.System]) -> None: + """ + When the name of the alias is the same as the name contained in it's value, + it can create a recursion error. The C{redirected_from} parameter of methods + L{_localNameToFullName}, L{_resolveAlias} and L{expandName} prevent an infinite loop where + the name it beening revolved to the object itself. When this happends, we use the parent object context + to call L{expandName()}, avoiding the infinite recursion. + """ system = systemcls() base_mod = fromText(''' class Foo: @@ -612,25 +622,25 @@ class Attribute: assert base_mod.contents['Attribute'].contents['foo'].fullName() == 'base_mod.Attribute.foo' assert 'base_mod.Attribute.foo' in mod.system.allobjects, str(list(mod.system.allobjects)) - mod.system.objForFullName('base_mod.Attribute.foo').kind is model.DocumentableKind.ALIAS + f = mod.system.objForFullName('base_mod.Attribute.foo') + assert isinstance(f, model.Attribute) + assert f.kind is model.DocumentableKind.ALIAS assert mod.expandName('System.Attribute.foo')=="base_mod.Foo._1" assert System.expandName('Attribute.foo')=="base_mod.Foo._1" - assert mod.resolveName('System').contents['Attribute'].kind is model.DocumentableKind.ALIAS + assert mod.contents['System'].contents['Attribute'].kind is model.DocumentableKind.ALIAS @systemcls_param -def test_resolveName_alias3(systemcls: Type[model.System]) -> None: +def test_expandName_alias_not_module_level(systemcls: Type[model.System]) -> None: """ We ignore assignments that are not defined at least once at the module level. Meaning that we ignore variables defines in "if" or "try/catch" blocks. """ system = systemcls() - base_mod = fromText(''' ssl = 1 ''', system=system, modname='twisted.internet') - mod = fromText(''' try: from twisted.internet import ssl as _ssl @@ -642,24 +652,35 @@ def test_resolveName_alias3(systemcls: Type[model.System]) -> None: assert mod.expandName('ssl')=="twisted.internet.ssl" assert mod.expandName('_ssl')=="twisted.internet.ssl" - assert ast.literal_eval(mod.resolveName('ssl').value)==1 + s = mod.resolveName('ssl') + assert isinstance(s, model.Attribute) + assert s.value is not None + assert ast.literal_eval(s.value)==1 assert 'ssl' not in mod.contents + system = systemcls() + base_mod = fromText(''' + ssl = 1 + ''', system=system, modname='twisted.internet') mod = fromText(''' + # We definied the alias at the module level such that it will be included in the docs ssl = None try: from twisted.internet import ssl as _ssl except ImportError: ssl = None else: + # The last analyzed assignments "wins" ssl = _ssl - ''', system=systemcls(), modname='mod') + ''', system=system, modname='mod') assert mod.expandName('ssl')=="twisted.internet.ssl" assert mod.expandName('_ssl')=="twisted.internet.ssl" + s = mod.resolveName('ssl') + assert isinstance(s, model.Attribute) + assert s.value is not None + assert ast.literal_eval(s.value)==1 assert mod.contents['ssl'].kind is model.DocumentableKind.ALIAS - assert mod.resolveName('ssl') is None - assert 'ssl' in mod.contents @systemcls_param From 100e55bc62a4eadac0c0d75651978e73b6c03083 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 20:49:25 -0400 Subject: [PATCH 05/60] Fix docstrings --- pydoctor/astbuilder.py | 2 +- pydoctor/model.py | 13 +++++++------ pydoctor/test/test_astbuilder.py | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index f54de39f2..69b24c9cd 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -530,7 +530,7 @@ def _handleIndirection(ctx: model.CanContainImportsDocumentable, """ Aliases declared in "try/except" block or "if" blocks are not documented, but we still track the indirection. - If the given expression is a name assigned to a target that is not yet in use, register an indirection in the L{_localNameToFullName_map} attriute. + If the given expression is a name assigned to a target that is not yet in use, register an indirection in the L{CanContainImportsDocumentable._localNameToFullName_map} attriute. """ if target in ctx.contents: return diff --git a/pydoctor/model.py b/pydoctor/model.py index 4e5525dba..444c9bdef 100644 --- a/pydoctor/model.py +++ b/pydoctor/model.py @@ -287,9 +287,9 @@ class Local: class E: pass - In the context of mod2.E, expandName("RenamedExternal") should be - "external_location.External" and expandName("renamed_mod.Local") - should be "mod1.Local". + In the context of mod2.E, C{expandName("RenamedExternal")} should be + C{"external_location.External"} and C{expandName("renamed_mod.Local")} + should be C{"mod1.Local"}. This method is in charge to follow the aliases when possible! It will reccursively follow any L{DocumentableKind.ALIAS} entry found. @@ -309,8 +309,8 @@ class Processor: class Runner: processor = P - In the context of mod2, expandName("Runner.processor.spec") should be - "external.Processor.more_spec". + In the context of mod2, C{expandName("Runner.processor.spec")} should be + C{"external.Processor.more_spec"}. @param name: The name to expand. @param redirected_from: In the case of a followed redirection only. This is @@ -318,7 +318,8 @@ class Runner: @note: The implementation replies on iterating through the each part of the dotted name, calling L{_localNameToFullName} for each name in their associated context and incrementally building the fullName from that. - @since 2021: Lookup members in superclasses when possible and follows L{DocumentableKind.ALIAS}. This mean that L{expandName} will never return the name of an alias, + + Lookup members in superclasses when possible and follows L{DocumentableKind.ALIAS}. This mean that L{expandName} will never return the name of an alias, it will always follow it's indirection to the origin. """ diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index 2fc716721..6b46e5dc7 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -575,9 +575,9 @@ def test_expandName_alias_same_name_recursion(systemcls: Type[model.System]) -> """ When the name of the alias is the same as the name contained in it's value, it can create a recursion error. The C{redirected_from} parameter of methods - L{_localNameToFullName}, L{_resolveAlias} and L{expandName} prevent an infinite loop where + L{CanContainImportsDocumentable._localNameToFullName}, L{Documentable._resolveAlias} and L{Documentable.expandName} prevent an infinite loop where the name it beening revolved to the object itself. When this happends, we use the parent object context - to call L{expandName()}, avoiding the infinite recursion. + to call L{Documentable.expandName()}, avoiding the infinite recursion. """ system = systemcls() base_mod = fromText(''' From 275e815a479e83fc69eed35493e27f6fbff07ba3 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 20:54:17 -0400 Subject: [PATCH 06/60] Import Final from the typing_extensions module --- pydoctor/templatewriter/pages/__init__.py | 3 ++- pydoctor/templatewriter/summary.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pydoctor/templatewriter/pages/__init__.py b/pydoctor/templatewriter/pages/__init__.py index 4d1d0d1be..ca945fc64 100644 --- a/pydoctor/templatewriter/pages/__init__.py +++ b/pydoctor/templatewriter/pages/__init__.py @@ -2,8 +2,9 @@ from typing import ( TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Mapping, Sequence, - Tuple, Type, Union, Final + Tuple, Type, Union ) +from typing_extensions import Final import ast import abc diff --git a/pydoctor/templatewriter/summary.py b/pydoctor/templatewriter/summary.py index 47e8e2c52..75faab794 100644 --- a/pydoctor/templatewriter/summary.py +++ b/pydoctor/templatewriter/summary.py @@ -3,8 +3,9 @@ from collections import defaultdict from typing import ( TYPE_CHECKING, DefaultDict, Dict, Iterable, List, Mapping, MutableSet, - Sequence, Tuple, Type, Union, cast, Final + Sequence, Tuple, Type, Union, cast ) +from typing_extensions import Final from twisted.web.template import Element, Tag, TagLoader, renderer, tags From f56d0506d651b2380c2117ea586acfdf70e36767 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sat, 26 Jun 2021 20:56:07 -0400 Subject: [PATCH 07/60] Add requirement typing_extensions; python_version < "3.8" --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 179be769e..18af45ebe 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,6 +38,7 @@ install_requires = astor attrs docutils + typing_extensions; python_version < "3.8" importlib_metadata; python_version < "3.8" importlib_resources; python_version < "3.7" From 2c762751a3d0b6a51861e9b601ee522aa07448b8 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sun, 27 Jun 2021 22:11:23 -0400 Subject: [PATCH 08/60] Use ._alias_to attribute to store the aliases indirection instead of computing it from AST all the time. Consider variables that are not on the root level as aliases, too. And warns only when they are actually overriden. Move node2fullname to astutils module. Delete overridenInCount from ClassPage since it was unused. Re-export names that are not part of the current system with an alias. Cleanup expandName() and associated. Use directly taglink() in format_alias_value() when possible. Add Documentable.aliases property. This is somewhat working in the tests but not always in real life. Rename the redirected_from parameter to "indirections". Speaking of tests, this commit also adds A LOT of new tests for the expandName() method. --- pydoctor/astbuilder.py | 112 +++++---------- pydoctor/astutils.py | 14 +- pydoctor/epydoc2stan.py | 11 +- pydoctor/model.py | 166 ++++++++++++++-------- pydoctor/test/test_astbuilder.py | 213 +++++++++++++++++++++++++++- pydoctor/test/test_zopeinterface.py | 1 + 6 files changed, 373 insertions(+), 144 deletions(-) diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index 69b24c9cd..6cb6e253d 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -16,7 +16,7 @@ from pydoctor import epydoc2stan, model, node2stan from pydoctor.epydoc.markup import flatten from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval -from pydoctor.astutils import bind_args, node2dottedname +from pydoctor.astutils import bind_args, node2dottedname, node2fullname def parseFile(path: Path) -> ast.Module: """Parse the contents of a Python source file.""" @@ -29,14 +29,6 @@ def parseFile(path: Path) -> ast.Module: else: _parse = ast.parse - -def node2fullname(expr: Optional[ast.expr], ctx: model.Documentable) -> Optional[str]: - dottedname = node2dottedname(expr) - if dottedname is None: - return None - return ctx.expandName('.'.join(dottedname)) - - def _maybeAttribute(cls: model.Class, name: str) -> bool: """Check whether a name is a potential attribute of the given class. This is used to prevent an assignment that wraps a method from @@ -209,21 +201,6 @@ def default(self, node: ast.AST) -> None: def visit_Module(self, node: ast.Module) -> None: assert self.module.docstring is None - # Build the list of module level variable. This is is used to check if a module assignment - # should be analyzed or not. See _handleAssignmentInModule. - _assigns = list(_findAnyModuleLevelAssign(node)) - for target, value in _assigns: - for a in target: - if isinstance(a, ast.Tuple): - for ele in a.elts: - name = node2dottedname(ele) - if name: - self._moduleLevelAssigns.append('.'.join(name)) - else: - name = node2dottedname(a) - if name: - self._moduleLevelAssigns.append('.'.join(name)) - self.builder.push(self.module, 0) if len(node.body) > 0 and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str): self.module.setDocstring(node.body[0].value) @@ -388,22 +365,30 @@ def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None: asname = orgname # Move re-exported objects into current module. - if asname in exports \ - and mod is not None: # This part of the condition makes if impossible to re-export - # names that are not part of the current system. We could create Aliases instead. - try: - ob = mod.contents[orgname] - except KeyError: - self.builder.warning("cannot find re-exported name", - f'{modname}.{orgname}') + if asname in exports: + if mod is None: + # re-export names that are not part of the current system with an alias + attr = current.contents.get(asname) + if not attr: + attr = self.builder.addAttribute(name=asname, kind=model.DocumentableKind.ALIAS, parent=current) + attr._alias_to = f'{modname}.{orgname}' + # This is only for the HTML repr + attr.value=ast.Name(attr._alias_to) + continue else: - if mod.all is None or orgname not in mod.all: - self.system.msg( - "astbuilder", - "moving %r into %r" % (ob.fullName(), current.fullName()) - ) - ob.reparent(current, asname) - continue + try: + ob = mod.contents[orgname] + except KeyError: + self.builder.warning("cannot find re-exported name", + f'{modname}.{orgname}') + else: + if mod.all is None or orgname not in mod.all: + self.system.msg( + "astbuilder", + "moving %r into %r" % (ob.fullName(), current.fullName()) + ) + ob.reparent(current, asname) + continue # If we're importing from a package, make sure imported modules # are processed (getProcessedModule() ignores non-modules). @@ -513,31 +498,20 @@ def _handleAlias(self, obj: model.Attribute, value: Optional[ast.expr], lineno: Create an alias or update an alias. """ - if is_attribute_overridden(obj, value): - obj.report(f'Assignment to alias "{obj.name}" overrides previous assignment ' - f'at line {obj.linenumber}, the original redirection will be ignored.', + if is_attribute_overridden(obj, value) and is_alias(obj.value): + obj.report(f'Assignment to alias "{obj.name}" overrides previous alias ' + f'at line {obj.linenumber}.', section='ast', lineno_offset=lineno-obj.linenumber) obj.kind = model.DocumentableKind.ALIAS - # This will be used to follow the alias redirection. + # This will be used for HTML repr of the alias. obj.value = value - - @staticmethod - def _handleIndirection(ctx: model.CanContainImportsDocumentable, - target: str, - expr: Optional[ast.expr] - ) -> None: - """ - Aliases declared in "try/except" block or "if" blocks are not documented, but we still track the indirection. - - If the given expression is a name assigned to a target that is not yet in use, register an indirection in the L{CanContainImportsDocumentable._localNameToFullName_map} attriute. - """ - if target in ctx.contents: - return - full_name = node2fullname(expr, ctx) - if full_name is None: - return - ctx._localNameToFullName_map[target] = full_name + dottedname = node2dottedname(value) + # It cannot be None, because we call _handleAlias() only if is_alias() is True. + assert dottedname is not None + name = '.'.join(dottedname) + # Store the alias value as string now, this avoids doing it in _resolveAlias(). + obj._alias_to = name def _handleModuleVar(self, @@ -583,14 +557,7 @@ def _handleAssignmentInModule(self, ) -> None: module = self.builder.current assert isinstance(module, model.Module) - # Check if the assignment is on the first level. - # We ignore assignments that are not defined at least once at the module level. - # Meaning that we ignore variables defines in "if" or "try/catch" blocks. - if target in self._moduleLevelAssigns: - self._handleModuleVar(target, annotation, expr, lineno) - elif is_alias(expr): - # But we still track the name indirection - self._handleIndirection(module, target, expr) + self._handleModuleVar(target, annotation, expr, lineno) def _handleClassVar(self, name: str, @@ -944,7 +911,8 @@ def _annotation_from_attrib(self, if typ is not None: return self._unstring_annotation(typ) default = args.arguments.get('default') - return _infer_type(default) + if default is not None: + return _infer_type(default) return None def _annotations_from_function( @@ -1228,12 +1196,6 @@ def parseFile(self, path: Path) -> Optional[ast.Module]: model.System.defaultBuilder = ASTBuilder -def _findAnyModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[List[ast.expr], Union[ast.Assign, ast.AnnAssign]]]: - for node in mod_ast.body: - if isinstance(node, (ast.Assign)): - yield (node.targets, node) - elif isinstance(node, ast.AnnAssign): - yield ([node.target], node) def findModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[str, ast.Assign]]: """ diff --git a/pydoctor/astutils.py b/pydoctor/astutils.py index ccc2c5fa2..98871cff3 100644 --- a/pydoctor/astutils.py +++ b/pydoctor/astutils.py @@ -2,10 +2,13 @@ Various bits of reusable code related to L{ast.AST} node processing. """ -from typing import Optional, List +from typing import Optional, List, TYPE_CHECKING, Union from inspect import BoundArguments, Signature import ast +if TYPE_CHECKING: + from pydoctor.model import Documentable + def node2dottedname(node: Optional[ast.expr]) -> Optional[List[str]]: """ Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names. @@ -34,3 +37,12 @@ def bind_args(sig: Signature, call: ast.Call) -> BoundArguments: if kw.arg is not None } return sig.bind(*call.args, **kwargs) + +def node2fullname(expr: Optional[Union[ast.expr, str]], ctx: 'Documentable') -> Optional[str]: + """ + Return L{ctx.expandName(name)} if C{expr} is a valid name, or C{None}. + """ + dottedname = node2dottedname(expr) if isinstance(expr, ast.expr) else expr + if dottedname is None: + return None + return ctx.expandName('.'.join(dottedname)) \ No newline at end of file diff --git a/pydoctor/epydoc2stan.py b/pydoctor/epydoc2stan.py index 018dc6926..3fcf6f61b 100644 --- a/pydoctor/epydoc2stan.py +++ b/pydoctor/epydoc2stan.py @@ -883,5 +883,12 @@ def format_constant_value(obj: model.Attribute) -> "Flattenable": return tags.table(class_='valueTable')(*rows) def format_alias_value(obj: model.Attribute) -> Tag: - return tags.p(tags.em("Alias to ", - colorize_inline_pyval(obj.value).to_stan(_EpydocLinker(obj)))) \ No newline at end of file + alias_value = obj._alias_to + assert alias_value is not None + target = obj.resolveName(alias_value) + if target: + # TODO: contextualize the name in the context of the module/class, currently this always shows the fullName of the object. + alias = tags.code(taglink(target, obj.page_object.url)) + else: + alias = colorize_inline_pyval(obj.value).to_stan(_EpydocLinker(obj.parent)) + return tags.p(tags.em("Alias to ", alias)) \ No newline at end of file diff --git a/pydoctor/model.py b/pydoctor/model.py index 444c9bdef..1931dd27b 100644 --- a/pydoctor/model.py +++ b/pydoctor/model.py @@ -25,7 +25,7 @@ from pydoctor.epydoc.markup import ParsedDocstring from pydoctor.sphinx import CacheT, SphinxInventory -from pydoctor.astutils import node2dottedname +from pydoctor.astutils import node2dottedname, node2fullname if TYPE_CHECKING: from typing_extensions import Literal @@ -125,6 +125,8 @@ class Documentable: documentation_location = DocLocation.OWN_PAGE """Page location where we are documented.""" + _RESOLVE_ALIAS_MAX_RECURSE = 4 + def __init__( self, system: 'System', name: str, parent: Optional['Documentable'] = None, @@ -251,6 +253,8 @@ def reparent(self, new_parent: 'Module', new_name: str) -> None: self.name = new_name self._handle_reparenting_post() del old_parent.contents[old_name] + # We could add a special alias insead of using _localNameToFullName_map, + # this would allow to track the original location of the documentable. old_parent._localNameToFullName_map[old_name] = self.fullName() new_parent.contents[new_name] = self self._handle_reparenting_post() @@ -265,10 +269,11 @@ def _handle_reparenting_post(self) -> None: for o in self.contents.values(): o._handle_reparenting_post() - def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: + def _localNameToFullName(self, name: str, indirections:Any=None) -> str: raise NotImplementedError(self._localNameToFullName) - def expandName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: + def expandName(self, name: str, + indirections:Optional[List['Attribute']]=None) -> str: """ Return a fully qualified name for the possibly-dotted `name`. @@ -292,7 +297,8 @@ class E: should be C{"mod1.Local"}. This method is in charge to follow the aliases when possible! - It will reccursively follow any L{DocumentableKind.ALIAS} entry found. + It will reccursively follow any L{DocumentableKind.ALIAS} entry found + up to certain level of complexity. Example: @@ -313,8 +319,7 @@ class Runner: C{"external.Processor.more_spec"}. @param name: The name to expand. - @param redirected_from: In the case of a followed redirection only. This is - the alias object. This variable is used to prevent infinite loops when doing the lookup. + @param indirections: See L{_resolveAlias} @note: The implementation replies on iterating through the each part of the dotted name, calling L{_localNameToFullName} for each name in their associated context and incrementally building the fullName from that. @@ -326,7 +331,7 @@ class Runner: parts = name.split('.') ctx: Documentable = self # The context for the currently processed part of the name. for i, part in enumerate(parts): - full_name = ctx._localNameToFullName(part, redirected_from=redirected_from) + full_name = ctx._localNameToFullName(part, indirections) if full_name == part and i != 0: # The local name was not found. # If we're looking at a class, we try our luck with the inherited members @@ -338,9 +343,8 @@ class Runner: if full_name == part: # TODO: Instead of returning the input, _localNameToFullName() # should probably either return None or raise LookupError. - # Or maybe we should find a way to indicate if the expanded name is "guessed", like in this case. - # or we surely have the the correct fullName. With the cirrent implementation, this would mean checking - # if parts[i + 1:] contains anything. + # Or maybe we should find a way to indicate if the expanded name is "guessed" or if we have the the correct fullName. + # With the current implementation, this would mean checking if "parts[i + 1:]" contains anything. full_name = f'{ctx.fullName()}.{part}' break nxt = self.system.objForFullName(full_name) @@ -348,40 +352,54 @@ class Runner: break ctx = nxt - expanded_name = '.'.join([full_name] + parts[i + 1:]) + return '.'.join([full_name] + parts[i + 1:]) + + def _resolveAlias(self, alias: 'Attribute', + indirections:Optional[List['Attribute']]=None) -> str: + """ + Resolve the alias value to it's target full name. + Or fall back to original alias full name if we know we've exhausted the max recursions. + @param alias: an ALIAS object. + @param indirections: Chain of alias objects followed. + This variable is used to prevent infinite loops when doing the lookup. + """ + if len(indirections or ()) > self._RESOLVE_ALIAS_MAX_RECURSE: + return indirections[0].fullName() + # the _alias_to attribute should never be none for ALIAS objects + assert alias.kind is DocumentableKind.ALIAS + name = alias._alias_to + assert name is not None - # We check if the name we resolved is an alias. - # Attribute for all aliases are created now, we can follow the redirection here. - obj = self.system.objForFullName(expanded_name) - if obj is not None and obj.kind is DocumentableKind.ALIAS: - assert isinstance(obj, Attribute) - # Try to resolve alias, fallback to original value if None. - resolved = self._resolveAlias(obj, redirected_from=redirected_from) - expanded_name = resolved or expanded_name - - return expanded_name - - def _resolveAlias(self, alias: 'Attribute', redirected_from:Optional['Attribute']=None) -> Optional[str]: - dottedname = node2dottedname(alias.value) - if dottedname: - name = '.'.join(dottedname) - - ctx = self # should ctx be alias.parent? - - # This checks avoids infinite recursion error - if redirected_from != alias: - # We redirect to the original object instead! - return ctx.expandName(name, redirected_from=alias) - else: - # Issue tracing the alias back to it's original location, found the same alias again. - if ctx.parent: - # We try with the parent scope and redirect to the original object! - # This is used in situations like right here in the System class and it's aliases, - # because they have the same name as the name they are aliasing, it's causing trouble. - return ctx.parent.expandName(name, redirected_from=alias) + # the context is important + ctx = alias.parent + + # This checks avoids infinite recursion error when a alias has the same name as it's value + if indirections and indirections[-1] != alias or not indirections: + # We redirect to the original object instead! + return ctx.expandName(name, indirections=(indirections or [])+[alias]) + else: + # Issue tracing the alias back to it's original location, found the same alias again. + if ctx.parent: + # We try with the parent scope and redirect to the original object! + # This is used in situations like right here in the System class and it's aliases, + # because they have the same name as the name they are aliasing, it's causing trouble. + return ctx.parent.expandName(name, indirections=(indirections or [])+[alias]) return None + def _resolveDocumentable(self, o: 'Documentable', + indirections:Optional[List['Attribute']]=None) -> str: + """ + Wrapper for L{_resolveAlias}. + + If the documentable is an alias, then follow it and return the supposed full name fo the documentable object, + or return the passed object's - C{o} - full name. + """ + if o.kind is DocumentableKind.ALIAS: + assert isinstance(o, Attribute) + return self._resolveAlias(o, indirections) + return o.fullName() + def resolveName(self, name: str) -> Optional['Documentable']: """ Return the object named by "name" (using Python's lookup rules) in @@ -441,7 +459,20 @@ def report(self, descr: str, section: str = 'parsing', lineno_offset: int = 0) - section, f'{self.description}:{linenumber}: {descr}', thresh=-1) + + @property + def aliases(self) -> List['Attribute']: + """ + Return the known aliases of an object. + It seems that the list if not always complete, though. + """ + aliases: List['Attribute'] = [] + for alias in filter(lambda ob: ob.kind is DocumentableKind.ALIAS and isinstance(ob, Attribute), + self.system.allobjects.values()): + if alias.parent._resolveDocumentable(alias) == self.fullName(): + aliases.append(alias) + return aliases class CanContainImportsDocumentable(Documentable): def setup(self) -> None: @@ -476,17 +507,19 @@ def setup(self) -> None: self._docformat: Optional[str] = None - def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: + def _localNameToFullName(self, name: str, indirections:Any=None) -> str: + # Follows aliases if name in self.contents: - o: Documentable = self.contents[name] - if o.kind is DocumentableKind.ALIAS: - assert isinstance(o, Attribute) - resolved = self._resolveAlias(o, redirected_from=redirected_from) - if resolved: - return resolved - return o.fullName() + return self._resolveDocumentable( + self.contents[name], + indirections) elif name in self._localNameToFullName_map: - return self._localNameToFullName_map[name] + resolved = self._localNameToFullName_map[name] + if resolved in self.system.allobjects: + resolved = self._resolveDocumentable( + self.system.allobjects[resolved], + indirections) + return resolved else: return name @@ -556,18 +589,19 @@ def find(self, name: str) -> Optional[Documentable]: return obj return None - def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: + def _localNameToFullName(self, name: str, indirections:Any=None) -> str: + # Follows aliases if name in self.contents: - o: Documentable = self.contents[name] - if o.kind is DocumentableKind.ALIAS: - # We pass redirected_from value in order to avoid inifite recursion. - assert isinstance(o, Attribute) - resolved = self._resolveAlias(o, redirected_from=redirected_from) - if resolved: - return resolved - return o.fullName() + return self._resolveDocumentable( + self.contents[name], + indirections) elif name in self._localNameToFullName_map: - return self._localNameToFullName_map[name] + resolved = self._localNameToFullName_map[name] + if resolved in self.system.allobjects: + resolved = self._resolveDocumentable( + self.system.allobjects[resolved], + indirections) + return resolved else: return self.parent._localNameToFullName(name) @@ -600,8 +634,8 @@ def docsources(self) -> Iterator[Documentable]: if self.name in b.contents: yield b.contents[self.name] - def _localNameToFullName(self, name: str, redirected_from:Optional['Attribute']=None) -> str: - return self.parent._localNameToFullName(name, redirected_from=redirected_from) + def _localNameToFullName(self, name: str, indirections:Any=None) -> str: + return self.parent._localNameToFullName(name, indirections) class Function(Inheritable): kind = DocumentableKind.FUNCTION @@ -626,6 +660,16 @@ class Attribute(Inheritable): None value means the value is not initialized at the current point of the the process. """ + _alias_to: Optional[str] = None + """" + We store the alias value here so we don't have to process it all the time. + For aliases, this is the same as:: + + '.'.join(node2dottedname(self.value)) + + For other attributes, this is None. + """ + # Work around the attributes of the same name within the System class. _ModuleT = Module _PackageT = Package diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index 6b46e5dc7..87de57794 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -6,7 +6,7 @@ from twisted.python._pydoctor import TwistedSystem -from pydoctor import astbuilder, model +from pydoctor import astbuilder, model, astutils from pydoctor.epydoc.markup import DocstringLinker, ParsedDocstring, flatten from pydoctor.epydoc.markup.epytext import Element, ParsedEpytextDocstring from pydoctor.epydoc2stan import format_summary, get_parsed_type @@ -574,7 +574,7 @@ class Processor: def test_expandName_alias_same_name_recursion(systemcls: Type[model.System]) -> None: """ When the name of the alias is the same as the name contained in it's value, - it can create a recursion error. The C{redirected_from} parameter of methods + it can create a recursion error. The C{indirections} parameter of methods L{CanContainImportsDocumentable._localNameToFullName}, L{Documentable._resolveAlias} and L{Documentable.expandName} prevent an infinite loop where the name it beening revolved to the object itself. When this happends, we use the parent object context to call L{Documentable.expandName()}, avoiding the infinite recursion. @@ -631,8 +631,13 @@ class Attribute: assert mod.contents['System'].contents['Attribute'].kind is model.DocumentableKind.ALIAS + # Tests the .aliases property. + + assert [o.fullName() for o in base_mod.contents['Foo'].contents['_1'].aliases] == ['base_mod.foo','base_mod.Attribute.foo'] + assert [o.fullName() for o in base_mod.contents['Foo'].contents['_3'].aliases] == ['mod.SuperSystem.foo', 'mod.SuperSystem.Attribute.foo'] + @systemcls_param -def test_expandName_alias_not_module_level(systemcls: Type[model.System]) -> None: +def test_expandName_alias_not_documentable_module_level(systemcls: Type[model.System]) -> None: """ We ignore assignments that are not defined at least once at the module level. Meaning that we ignore variables defines in "if" or "try/catch" blocks. @@ -656,7 +661,10 @@ def test_expandName_alias_not_module_level(systemcls: Type[model.System]) -> Non assert isinstance(s, model.Attribute) assert s.value is not None assert ast.literal_eval(s.value)==1 - assert 'ssl' not in mod.contents + assert mod.contents['ssl'].kind is model.DocumentableKind.ALIAS + +@systemcls_param +def test_expandName_alias_documentable_module_level(systemcls: Type[model.System]) -> None: system = systemcls() base_mod = fromText(''' @@ -670,7 +678,7 @@ def test_expandName_alias_not_module_level(systemcls: Type[model.System]) -> Non except ImportError: ssl = None else: - # The last analyzed assignments "wins" + # The last analyzed assignment "wins" ssl = _ssl ''', system=system, modname='mod') @@ -682,6 +690,201 @@ def test_expandName_alias_not_module_level(systemcls: Type[model.System]) -> Non assert ast.literal_eval(s.value)==1 assert mod.contents['ssl'].kind is model.DocumentableKind.ALIAS +@systemcls_param +def test_expandName_alias_not_documentable_class_level(systemcls: Type[model.System]) -> None: + """ + We ignore assignments that are not defined at least once at the module level. + Meaning that we ignore variables defines in "if" or "try/catch" blocks. + """ + system = systemcls() + mod = fromText(''' + import sys + class A: + if sys.version_info[0] < 3: + alias = B.a + else: + # The last analyzed assignment "wins" + alias = B.b + class B: + a = 3 + b = 4 + ''', system=system, modname='mod') + + s = mod.resolveName('A.alias') + assert isinstance(s, model.Attribute) + assert s.fullName()=="mod.B.b" + assert s.value is not None + assert ast.literal_eval(s.value)==4 + assert mod.contents['A'].contents['alias'].kind is model.DocumentableKind.ALIAS + +@systemcls_param +def test_expandName_alias_documentale_class_level(systemcls: Type[model.System]) -> None: + system = systemcls() + mod = fromText(''' + # We definied the alias at the module level such that it will be included in the docs + import sys + class A: + alias = None + if sys.version_info[0] < 3: + alias = B.a + else: + alias = B.b + class B: + a = 3 + b = 4 + ''', system=system, modname='mod') + + s = mod.resolveName('A.alias') + assert isinstance(s, model.Attribute) + assert s.fullName() == "mod.B.b" + assert s.value is not None + assert ast.literal_eval(s.value)==4 + assert mod.contents['A'].contents['alias'].kind is model.DocumentableKind.ALIAS + +@systemcls_param +def test_aliases_property(systemcls: Type[model.System]) -> None: + base_mod = ''' + class Z: + pass + ''' + src = ''' + import base_mod + from abc import ABC + class A(ABC): + _1=1 + _2=2 + _3=3 + class_ = B # this is funcky + + class B(A): + _1=a_1 + _2=A._2 + _3=A._3 + class_ = a + + a = A + a_1 = A._1 + b = B + bob = b.class_.class_.class_ + lol = b.class_.class_ + blu = b.class_ + mod = base_mod + ''' + system = systemcls() + fromText(base_mod, system=system, modname='base_mod') + fromText(src, system=system) + + A = system.allobjects['.A'] + B = system.allobjects['.B'] + _base_mod = system.allobjects['base_mod'] + + assert isinstance(A, model.Class) + assert A.subclasses == [system.allobjects['.B']] + + assert [o.fullName() for o in A.aliases] == ['.B.class_', '.a', '.bob', '.blu'] + assert [o.fullName() for o in B.aliases] == ['.A.class_', '.b', '.lol'] + assert [o.fullName() for o in A.contents['_1'].aliases] == ['.B._1', '.a_1'] + assert [o.fullName() for o in A.contents['_2'].aliases] == ['.B._2'] + assert [o.fullName() for o in A.contents['_3'].aliases] == ['.B._3'] + assert [o.fullName() for o in _base_mod.aliases] == ['.mod'] + + # Aliases cannot currently have aliases because resolveName() always follows the aliases. + assert [o.fullName() for o in A.contents['class_'].aliases] == [] + assert [o.fullName() for o in B.contents['class_'].aliases] == [] + +@systemcls_param +def test_aliases_re_export(systemcls: Type[model.System]) -> None: + + src = ''' + # Import and re-export some external lib + + from constantly import NamedConstant, ValueConstant, FlagConstant, Names, Values, Flags + from mylib import core + from mylib.core import Observalbe + from mylib.core._impl import Processor + Patator = core.Patator + + __all__ = ["NamedConstant", "ValueConstant", "FlagConstant", "Names", "Values", "Flags", + "Processor","Patator","Observalbe"] + ''' + system = systemcls() + fromText(src, system=system) + assert system.allobjects['.ValueConstant'].kind is model.DocumentableKind.ALIAS + n = system.allobjects['.NamedConstant'] + assert isinstance(n, model.Attribute) + assert astor.to_source(n.value).strip() == 'constantly.NamedConstant' == astutils.node2fullname(n.value, n.parent) + + n = system.allobjects['.Processor'] + assert isinstance(n, model.Attribute) + assert n.kind is model.DocumentableKind.ALIAS + assert astor.to_source(n.value).strip() == 'mylib.core._impl.Processor' == astutils.node2fullname(n.value, n.parent) + + assert system.allobjects['.ValueConstant'].kind is model.DocumentableKind.ALIAS + n = system.allobjects['.Observalbe'] + assert isinstance(n, model.Attribute) + assert n.kind is model.DocumentableKind.ALIAS + assert astor.to_source(n.value).strip() == 'mylib.core.Observalbe' == astutils.node2fullname(n.value, n.parent) + + n = system.allobjects['.Patator'] + assert isinstance(n, model.Attribute) + assert n.kind is model.DocumentableKind.ALIAS + assert astor.to_source(n.value).strip() == 'core.Patator' + assert astutils.node2fullname(n.value, n.parent) == 'mylib.core.Patator' + +@systemcls_param +def test_exportName_re_exported_aliases(systemcls: Type[model.System]) -> None: + # TODO: fix this test. This is probably related to https://github.com/twisted/pydoctor/issues/295. + base_mod = ''' + class Zoo: + _1=1 + class Hey: + _2=2 + Z = Zoo + H = Hey + ''' + src = ''' + from base_mod import Z, H + __all__ = ["Z", "H"] + ''' + system = systemcls() + fromText(base_mod, system=system, modname='base_mod') + fromText(src, system=system, modname='mod') + + mod = system.allobjects['mod'] + base_mod = system.allobjects['base_mod'] + assert mod.expandName('Z._1')=="Zoo._1" # Should be "base_mod.Zoo._1" + assert base_mod.expandName('Z._1')=="Zoo._1" # Should be "base_mod.Zoo._1" + assert base_mod.expandName('Zoo._1')=="base_mod.Zoo._1" + assert system.allobjects['mod.Z'].kind is model.DocumentableKind.ALIAS + +@systemcls_param +def test_expandName_aliasloops(systemcls: Type[model.System]) -> None: + + src = ''' + from abc import ABC + class A(ABC): + _1=C._2 + _2=2 + + class B(A): + _1=A._2 + _2=A._1 + + class C(A,B): + _1=A._1 + _2=B._2 + # this could crash with an infitine recursion error! + ''' + system = systemcls() + fromText(src, system=system) + A = system.allobjects['.A'] + B = system.allobjects['.B'] + C = system.allobjects['.C'] + + assert A.expandName('_1') == '.A._1' + assert B.expandName('_2') == '.B._2' + assert C.expandName('_2') == '.C._2' + assert C.expandName('_1') == '.C._1' @systemcls_param def test_subclasses(systemcls: Type[model.System]) -> None: diff --git a/pydoctor/test/test_zopeinterface.py b/pydoctor/test/test_zopeinterface.py index 93b31a7ab..5caa089d3 100644 --- a/pydoctor/test/test_zopeinterface.py +++ b/pydoctor/test/test_zopeinterface.py @@ -184,6 +184,7 @@ class IMyInterface(interface.Interface): ''' mod = fromText(src, systemcls=ZopeInterfaceSystem) attr = mod.contents['IMyInterface'].contents['attribute'] + assert mod.contents['IMyInterface'].contents['Attrib'].kind is model.DocumentableKind.ALIAS assert attr.docstring == 'fun in a bun' assert attr.kind is model.DocumentableKind.ATTRIBUTE From fccbd4f8637ab3d481826946faed7374c6767919 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sun, 27 Jun 2021 22:21:26 -0400 Subject: [PATCH 09/60] Add Add Known aliases for modules and classes. Not fully functional... --- pydoctor/templatewriter/pages/__init__.py | 53 ++++++++++++++--------- pydoctor/templatewriter/summary.py | 8 +++- setup.cfg | 1 - 3 files changed, 39 insertions(+), 23 deletions(-) diff --git a/pydoctor/templatewriter/pages/__init__.py b/pydoctor/templatewriter/pages/__init__.py index ca945fc64..2d8feefb7 100644 --- a/pydoctor/templatewriter/pages/__init__.py +++ b/pydoctor/templatewriter/pages/__init__.py @@ -4,7 +4,13 @@ TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Mapping, Sequence, Tuple, Type, Union ) -from typing_extensions import Final +if TYPE_CHECKING: + from typing_extensions import Final +else: + # Dirty hack to work without the typing_extensions dep at runtime. + from collections import defaultdict + from functools import partial + Final = defaultdict(partial(defaultdict, defaultdict)) import ast import abc @@ -292,6 +298,13 @@ class ModulePage(CommonPage): def extras(self) -> List["Flattenable"]: r = super().extras() + # Add Known aliases, for modules. + aliases = sorted(self.ob.aliases, key=objects_order) + p = assembleList(self.ob.system, "Known aliases: ", + [o.fullName() for o in aliases], self.page_url) + if p is not None: + r.append(tags.p(p)) + sourceHref = util.srclink(self.ob) if sourceHref: r.append(tags.a("(source)", href=sourceHref, class_="sourceLink")) @@ -360,7 +373,6 @@ def assembleList( system: model.System, label: str, lst: Sequence[str], - idbase: str, page_url: str ) -> Optional["Flattenable"]: lst2 = [] @@ -371,20 +383,16 @@ def assembleList( lst = lst2 if not lst: return None - def one(item: str) -> "Flattenable": + r = [] + for i, item in enumerate(lst): + if i>0: + r.append(', ') if item in system.allobjects: - return tags.code(epydoc2stan.taglink(system.allobjects[item], page_url)) + r.append(tags.code(epydoc2stan.taglink(system.allobjects[item], page_url))) else: - return item - def commasep(items: Sequence[str]) -> List["Flattenable"]: - r = [] - for item in items: - r.append(one(item)) - r.append(', ') - del r[-1] - return r + r.append(tags.code(item)) p: List["Flattenable"] = [label] - p.extend(commasep(lst)) + p.extend(r) return p @@ -403,7 +411,6 @@ def __init__(self, attrs = unmasked_attrs(baselist) if attrs: self.baselists.append((baselist, attrs)) - self.overridenInCount = 0 def extras(self) -> List["Flattenable"]: r = super().extras() @@ -424,9 +431,17 @@ def extras(self) -> List["Flattenable"]: if not scs: return r p = assembleList(self.ob.system, "Known subclasses: ", - [o.fullName() for o in scs], "moreSubclasses", self.page_url) + [o.fullName() for o in scs], self.page_url) if p is not None: r.append(tags.p(p)) + + # Add Known aliases, for classes. + aliases = sorted(self.ob.aliases, key=objects_order) + p = assembleList(self.ob.system, "Known aliases: ", + [o.fullName() for o in aliases], self.page_url) + if p is not None: + r.append(tags.p(p)) + return r def classSignature(self) -> "Flattenable": @@ -500,12 +515,11 @@ def functionExtras(self, ob: model.Documentable) -> List["Flattenable"]: break ocs = sorted(overriding_subclasses(self.ob, name), key=objects_order) if ocs: - self.overridenInCount += 1 - idbase = 'overridenIn' + str(self.overridenInCount) l = assembleList(self.ob.system, 'overridden in ', - [o.fullName() for o in ocs], idbase, self.page_url) + [o.fullName() for o in ocs], self.page_url) if l is not None: r.append(tags.div(class_="interfaceinfo")(l)) + # Not adding Known aliases here because it would really be too much information. return r @@ -522,8 +536,7 @@ def extras(self) -> List["Flattenable"]: namelist = sorted(self.ob.implements_directly, key=lambda x:x.lower()) label = 'Implements interfaces: ' if namelist: - l = assembleList(self.ob.system, label, namelist, "moreInterface", - self.page_url) + l = assembleList(self.ob.system, label, namelist, self.page_url) if l is not None: r.append(tags.p(l)) return r diff --git a/pydoctor/templatewriter/summary.py b/pydoctor/templatewriter/summary.py index 75faab794..30b7f56a2 100644 --- a/pydoctor/templatewriter/summary.py +++ b/pydoctor/templatewriter/summary.py @@ -1,11 +1,15 @@ """Classes that generate the summary pages.""" -from collections import defaultdict from typing import ( TYPE_CHECKING, DefaultDict, Dict, Iterable, List, Mapping, MutableSet, Sequence, Tuple, Type, Union, cast ) -from typing_extensions import Final +if TYPE_CHECKING: + from typing_extensions import Final +else: + from collections import defaultdict + from functools import partial + Final = defaultdict(partial(defaultdict, defaultdict)) from twisted.web.template import Element, Tag, TagLoader, renderer, tags diff --git a/setup.cfg b/setup.cfg index 18af45ebe..179be769e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,6 @@ install_requires = astor attrs docutils - typing_extensions; python_version < "3.8" importlib_metadata; python_version < "3.8" importlib_resources; python_version < "3.7" From e666795e389a6cffea18065408efc043f7065b9a Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sun, 27 Jun 2021 22:21:51 -0400 Subject: [PATCH 10/60] Add some aliases in the RST demo --- .../demo_restructuredtext_module.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/restructuredtext_demo/demo_restructuredtext_module.py b/docs/restructuredtext_demo/demo_restructuredtext_module.py index 20283d15e..a8976d434 100644 --- a/docs/restructuredtext_demo/demo_restructuredtext_module.py +++ b/docs/restructuredtext_demo/demo_restructuredtext_module.py @@ -100,6 +100,7 @@ class DemoClass(ABC, _PrivateClass): """ This is the docstring of this class. """ + #FIXME: For some reason, the alias Demo do ont appear in the class page :/ def __init__(self, one: str, two: bytes) -> None: """ @@ -147,6 +148,12 @@ def read_and_write_delete(self) -> None: """ This is a docstring for deleter. """ + pass + + ro = read_only + rw = read_and_write + rwd = read_and_write_delete + class IContact(zope.interface.Interface): """ @@ -163,3 +170,6 @@ class IContact(zope.interface.Interface): def send_email(text: str) -> None: pass + +_Demo = _PrivateClass +Demo = DemoClass \ No newline at end of file From 3d67f953431c823a48d4426b5447478b6f29a673 Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Sun, 2 Jan 2022 16:53:38 -0500 Subject: [PATCH 11/60] broken :/ --- pydoctor/epydoc/markup/__init__.py | 2 +- pydoctor/model.py | 47 ++++++++++++++++-------------- pydoctor/templatewriter/summary.py | 5 +--- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/pydoctor/epydoc/markup/__init__.py b/pydoctor/epydoc/markup/__init__.py index 000c4048b..028d81a74 100644 --- a/pydoctor/epydoc/markup/__init__.py +++ b/pydoctor/epydoc/markup/__init__.py @@ -85,7 +85,7 @@ def get_parser_by_name(docformat: str, obj: Optional['Documentable'] = None) -> """ mod = import_module(f'pydoctor.epydoc.markup.{docformat}') # We can safely ignore this mypy warning, since we can be sure the 'get_parser' function exist and is "correct". - return mod.get_parser(obj) # type:ignore[no-any-return, attr-defined] + return mod.get_parser(obj) # type:ignore[no-any-return] ################################################## ## ParsedDocstring diff --git a/pydoctor/model.py b/pydoctor/model.py index 8ae476da5..feb6bc534 100644 --- a/pydoctor/model.py +++ b/pydoctor/model.py @@ -18,14 +18,13 @@ from optparse import Values from pathlib import Path from typing import ( - TYPE_CHECKING, Collection, Dict, Iterable, Iterator, List, Mapping, + TYPE_CHECKING, Any, Collection, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Set, Tuple, Type, TypeVar, Union, overload ) from urllib.parse import quote from pydoctor.epydoc.markup import ParsedDocstring from pydoctor.sphinx import CacheT, SphinxInventory -from pydoctor.astutils import node2dottedname, node2fullname if TYPE_CHECKING: from typing_extensions import Literal @@ -353,7 +352,7 @@ class Runner: return '.'.join([full_name] + parts[i + 1:]) def _resolveAlias(self, alias: 'Attribute', - indirections:Optional[List['Attribute']]=None) -> str: + indirections:Optional[List['Attribute']]=None) -> Optional[str]: """ Resolve the alias value to it's target full name. Or fall back to original alias full name if we know we've exhausted the max recursions. @@ -361,7 +360,7 @@ def _resolveAlias(self, alias: 'Attribute', @param indirections: Chain of alias objects followed. This variable is used to prevent infinite loops when doing the lookup. """ - if len(indirections or ()) > self._RESOLVE_ALIAS_MAX_RECURSE: + if indirections and len(indirections or ()) > self._RESOLVE_ALIAS_MAX_RECURSE: return indirections[0].fullName() # the _alias_to attribute should never be none for ALIAS objects @@ -386,7 +385,7 @@ def _resolveAlias(self, alias: 'Attribute', return None def _resolveDocumentable(self, o: 'Documentable', - indirections:Optional[List['Attribute']]=None) -> str: + indirections:Optional[List['Attribute']]=None) -> Optional[str]: """ Wrapper for L{_resolveAlias}. @@ -463,11 +462,13 @@ def aliases(self) -> List['Attribute']: """ Return the known aliases of an object. - It seems that the list if not always complete, though. + @note: It seems that the list is not always complete, though. """ aliases: List['Attribute'] = [] for alias in filter(lambda ob: ob.kind is DocumentableKind.ALIAS and isinstance(ob, Attribute), self.system.allobjects.values()): + assert isinstance(alias, Attribute) + assert alias.parent is not None if alias.parent._resolveDocumentable(alias) == self.fullName(): aliases.append(alias) return aliases @@ -508,18 +509,19 @@ def setup(self) -> None: def _localNameToFullName(self, name: str, indirections:Any=None) -> str: # Follows aliases if name in self.contents: - return self._resolveDocumentable( + resolved = self._resolveDocumentable( self.contents[name], indirections) - elif name in self._localNameToFullName_map: - resolved = self._localNameToFullName_map[name] - if resolved in self.system.allobjects: + if resolved: + return resolved + if name in self._localNameToFullName_map: + if self._localNameToFullName_map[name] in self.system.allobjects: resolved = self._resolveDocumentable( - self.system.allobjects[resolved], + self.system.allobjects[self._localNameToFullName_map[name]], indirections) - return resolved - else: - return name + if resolved: + return resolved + return name @property def module(self) -> 'Module': @@ -590,18 +592,19 @@ def find(self, name: str) -> Optional[Documentable]: def _localNameToFullName(self, name: str, indirections:Any=None) -> str: # Follows aliases if name in self.contents: - return self._resolveDocumentable( + resolved = self._resolveDocumentable( self.contents[name], indirections) - elif name in self._localNameToFullName_map: - resolved = self._localNameToFullName_map[name] - if resolved in self.system.allobjects: + if resolved: + return resolved + if name in self._localNameToFullName_map: + if self._localNameToFullName_map[name] in self.system.allobjects: resolved = self._resolveDocumentable( - self.system.allobjects[resolved], + self.system.allobjects[self._localNameToFullName_map[name]], indirections) - return resolved - else: - return self.parent._localNameToFullName(name) + if resolved: + return resolved + return self.parent._localNameToFullName(name) @property def constructor_params(self) -> Mapping[str, Optional[ast.expr]]: diff --git a/pydoctor/templatewriter/summary.py b/pydoctor/templatewriter/summary.py index 031e97567..2eed7c7de 100644 --- a/pydoctor/templatewriter/summary.py +++ b/pydoctor/templatewriter/summary.py @@ -6,11 +6,8 @@ ) if TYPE_CHECKING: from typing_extensions import Final -else: - from collections import defaultdict - from functools import partial - Final = defaultdict(partial(defaultdict, defaultdict)) +from collections import defaultdict from twisted.web.template import Element, Tag, TagLoader, renderer, tags from pydoctor import epydoc2stan, model From 606e54d8af766d89ec62cf6268fef7b82b074e7e Mon Sep 17 00:00:00 2001 From: tristanlatr Date: Mon, 23 May 2022 12:29:41 -0400 Subject: [PATCH 12/60] merge docs from 'twisted-master' into 'alias' --- docs/epytext_demo/demo_epytext_module.py | 15 ++ docs/numpy_demo/__init__.py | 2 + docs/restructuredtext_demo/__init__.py | 15 ++ .../demo_restructuredtext_module.py | 18 +- docs/source/codedoc.rst | 17 +- docs/source/conf.py | 37 ++-- docs/source/contrib.rst | 49 +++++ .../custom_template_demo/pyproject.toml | 9 + docs/source/customize.rst | 199 ++++++++++++++++-- .../list-restructuredtext-support.rst | 4 +- docs/source/docformat/restructuredtext.rst | 12 +- docs/source/help.rst | 108 +++++++++- docs/source/quickstart.rst | 4 + docs/tests/__init__.py | 8 + docs/tests/test-search.html | 90 ++++++++ docs/tests/test.py | 141 +++++++++---- docs/tests/test_python_igraph_docs.py | 27 +++ docs/tests/test_standard_library_docs.py | 26 +++ docs/tests/test_twisted_docs.py | 43 +++- 19 files changed, 730 insertions(+), 94 deletions(-) create mode 100644 docs/source/custom_template_demo/pyproject.toml create mode 100644 docs/tests/test-search.html create mode 100644 docs/tests/test_python_igraph_docs.py create mode 100644 docs/tests/test_standard_library_docs.py diff --git a/docs/epytext_demo/demo_epytext_module.py b/docs/epytext_demo/demo_epytext_module.py index 0c528509b..315c9cac9 100644 --- a/docs/epytext_demo/demo_epytext_module.py +++ b/docs/epytext_demo/demo_epytext_module.py @@ -5,10 +5,14 @@ """ from abc import ABC +import math from typing import AnyStr, Dict, Generator, List, Union, Sequence, Optional, Protocol, TYPE_CHECKING from somelib import SomeInterface import zope.interface import zope.schema +from typing import Sequence, Optional +from incremental import Version +from twisted.python.deprecate import deprecated, deprecatedProperty if TYPE_CHECKING: from typing_extensions import Final @@ -26,6 +30,10 @@ Interface = Protocol """Aliases are also documented.""" +@deprecated(Version("demo", "NEXT", 0, 0), replacement=math.prod) +def demo_product_deprecated(x, y) -> float: # type: ignore + return float(x * y) + def demo_fields_docstring_arguments(m, b): # type: ignore """ Fields are used to describe specific properties of a documented object. @@ -115,6 +123,13 @@ def read_only(self) -> int: """ return 1 + @deprecatedProperty(Version("demo", 1, 3, 0), replacement=read_only) + def read_only_deprecated(self) -> int: + """ + This is a deprecated read-only property. + """ + return 1 + @property def read_and_write(self) -> int: """ diff --git a/docs/numpy_demo/__init__.py b/docs/numpy_demo/__init__.py index 23468395b..943b66a4f 100644 --- a/docs/numpy_demo/__init__.py +++ b/docs/numpy_demo/__init__.py @@ -50,6 +50,8 @@ """ from typing import List, Union +__docformat__ = 'numpy' + module_level_variable1 = 12345 module_level_variable2 = 98765 diff --git a/docs/restructuredtext_demo/__init__.py b/docs/restructuredtext_demo/__init__.py index 8710a4a69..ef0d8c219 100644 --- a/docs/restructuredtext_demo/__init__.py +++ b/docs/restructuredtext_demo/__init__.py @@ -23,6 +23,21 @@ blank lines are often needed where Epytext allowed no blank line after parent element. Indentation is also much more important, lists content and child items must be correctly indented. +Titles +====== + +Level 2 +------- + +Level 3 +~~~~~~~ + +Level 4 +^^^^^^^ + +Level 5 +!!!!!!! + Lists ===== diff --git a/docs/restructuredtext_demo/demo_restructuredtext_module.py b/docs/restructuredtext_demo/demo_restructuredtext_module.py index 0e4eff13a..63b33f80a 100644 --- a/docs/restructuredtext_demo/demo_restructuredtext_module.py +++ b/docs/restructuredtext_demo/demo_restructuredtext_module.py @@ -4,9 +4,12 @@ Most part of this documentation is using Python type hinting. """ from abc import ABC +import math import zope.interface import zope.schema -from typing import Final, Sequence, Optional, Protocol, AnyStr, Generator, Union, List, Dict, TYPE_CHECKING +from typing import Sequence, Optional, Protocol, AnyStr, Generator, Union, List, Dict, TYPE_CHECKING +from incremental import Version +from twisted.python.deprecate import deprecated, deprecatedProperty if TYPE_CHECKING: from typing_extensions import Final @@ -24,6 +27,10 @@ Interface = Protocol """Aliases are also documented.""" +@deprecated(Version("demo", "NEXT", 0, 0), replacement=math.prod) +def demo_product_deprecated(x, y) -> float: # type: ignore + return float(x * y) + def demo_fields_docstring_arguments(m, b = 0): # type: ignore """ Fields are used to describe specific properties of a documented object. @@ -133,6 +140,13 @@ def read_only(self) -> int: """ return 1 + @deprecatedProperty(Version("demo", 1, 3, 0), replacement=read_only) + def read_only_deprecated(self) -> int: + """ + This is a deprecated read-only property. + """ + return 1 + @property def read_and_write(self) -> int: """ @@ -188,4 +202,4 @@ def send_email(text: str) -> None: pass _Demo = _PrivateClass -Demo = DemoClass \ No newline at end of file +Demo = DemoClass diff --git a/docs/source/codedoc.rst b/docs/source/codedoc.rst index f170b12c5..090d9c26c 100644 --- a/docs/source/codedoc.rst +++ b/docs/source/codedoc.rst @@ -115,15 +115,15 @@ ReStructuredText fields are written with colons, like ``:field:`` or ``:field ar Here are the supported fields (written with ReStructuredText format, but same fields are supported with Epytext): - - ``:cvar foo:``, document a class variable. Applicable in the context of the docstring of a class. - - ``:ivar foo:``, document a instance variable. Applicable in the context of the docstring of a class. - - ``:var foo:``, document a variable. Applicable in the context of the docstring of a module or class. + - ``:cvar foo:``, document a class variable named ``foo``. Applicable in the context of the docstring of a class. + - ``:ivar foo:``, document a instance variable named ``foo``. Applicable in the context of the docstring of a class. + - ``:var foo:``, document a variable named ``foo``. Applicable in the context of the docstring of a module or class. If used in the context of a class, behaves just like ``@ivar:``. - ``:note:``, add a note section. - - ``:param bar:`` (synonym: ``@arg bar:``), document a function's (or method's) parameter. + - ``:param bar:`` (synonym: ``@arg bar:``), document a function's (or method's) parameter named ``bar``. Applicable in the context of the docstring of a function of method. - ``:keyword:``, document a function's (or method's) keyword parameter (``**kwargs``). - - ``:type bar: C{list}``, document the type of an argument/keyword or variable, depending on the context. + - ``:type bar: C{list}``, document the type of an argument/keyword or variable (``bar`` in this example), depending on the context. - ``:return:`` (synonym: ``@returns:``), document the return type of a function (or method). - ``:rtype:`` (synonym: ``@returntype:``), document the type of the return value of a function (or method). - ``:yield:`` (synonym: ``@yields:``), document the values yielded by a generator function (or method). @@ -147,7 +147,7 @@ Type fields, namely ``type``, ``rtype`` and ``ytype``, can be interpreted, such types can be linked automatically. For reStructuredText and Epytext documentation format, enable this behaviour with the option:: - --process-fields + --process-types The type auto-linking is always enabled for Numpy and Google style documentation formats. @@ -293,6 +293,9 @@ Modules, classes and functions of which the name starts with an underscore are c This method is public. """ +.. note:: + Pydoctor actually supports 3 types of privacy: public, private and hidden. + See :ref:`Override objects privacy ` for more informations. Re-exporting ------------ @@ -314,4 +317,4 @@ The content of ``my_project/__init__.py`` includes:: from .core._impl import MyClass - __all__ = ["MyClass"] + __all__ = ("MyClass",) diff --git a/docs/source/conf.py b/docs/source/conf.py index 574c0edee..70cbb90e7 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -35,9 +35,9 @@ extensions = [ "sphinx_rtd_theme", "sphinx.ext.intersphinx", - "pydoctor.sphinx_ext._help_output", "pydoctor.sphinx_ext.build_apidocs", "sphinxcontrib.spelling", + "sphinxarg.ext", ] # Add any paths that contain templates here, relative to this directory. @@ -59,6 +59,8 @@ # Configure intersphinx magic intersphinx_mapping = { 'twisted': ('https://twistedmatrix.com/documents/current/api/', None), + 'configargparse': ('https://bw2.github.io/ConfigArgParse/', None), + 'std': ('https://docs.python.org/3/', None), } # -- Options for HTML output ------------------------------------------------- @@ -90,42 +92,35 @@ _pydoctor_root = pathlib.Path(__file__).parent.parent.parent _common_args = [ f'--html-viewsource-base=https://github.com/twisted/pydoctor/tree/{_git_reference}', - f'--project-base-dir={_pydoctor_root}', - '--intersphinx=https://docs.python.org/3/objects.inv', - '--intersphinx=https://twistedmatrix.com/documents/current/api/objects.inv', - '--intersphinx=https://urllib3.readthedocs.io/en/latest/objects.inv', - '--intersphinx=https://requests.readthedocs.io/en/latest/objects.inv', - '--intersphinx=https://www.attrs.org/en/stable/objects.inv', - '--intersphinx=https://www.sphinx-doc.org/en/stable/objects.inv', - '--intersphinx=https://tristanlatr.github.io/apidocs/docutils/objects.inv', + f'--project-base-dir={_pydoctor_root}', + f'--config={_pydoctor_root}/setup.cfg', ] pydoctor_args = { 'main': [ '--html-output={outdir}/api/', # Make sure to have a trailing delimiter for better usage coverage. '--project-name=pydoctor', f'--project-version={version}', - '--docformat=epytext', + '--docformat=epytext', + '--privacy=HIDDEN:pydoctor.test', '--project-url=../index.html', f'{_pydoctor_root}/pydoctor', ] + _common_args, 'custom_template_demo': [ '--html-output={outdir}/custom_template_demo/', - '--project-name=pydoctor with a twisted theme', f'--project-version={version}', - '--docformat=epytext', - '--project-url=../customize.html', - '--theme=base', f'--template-dir={_pydoctor_root}/docs/sample_template', f'{_pydoctor_root}/pydoctor', - ] + _common_args, + ] + _common_args + + [f'--config={_pydoctor_root}/docs/source/custom_template_demo/pyproject.toml', + '-qqq' ], # we don't want to hear any warnings from this custom template demo. 'epydoc_demo': [ '--html-output={outdir}/docformat/epytext', '--project-name=pydoctor-epytext-demo', '--project-version=1.3.0', - '--docformat=epytext', - '--intersphinx=https://zopeschema.readthedocs.io/en/latest/objects.inv', - '--intersphinx=https://zopeinterface.readthedocs.io/en/latest/objects.inv', + '--docformat=epytext', + '--sidebar-toc-depth=3', '--project-url=../epytext.html', + '--theme=readthedocs', f'{_pydoctor_root}/docs/epytext_demo', ] + _common_args, 'restructuredtext_demo': [ @@ -133,16 +128,17 @@ '--project-name=pydoctor-restructuredtext-demo', '--project-version=1.0.0', '--docformat=restructuredtext', + '--sidebar-toc-depth=3', '--project-url=../restructuredtext.html', '--process-types', f'{_pydoctor_root}/docs/restructuredtext_demo', ] + _common_args, - 'numpy_demo': [ + 'numpy_demo': [ # no need to pass --docformat here, we use __docformat__ '--html-output={outdir}/docformat/numpy', '--project-name=pydoctor-numpy-style-demo', '--project-version=1.0.0', - '--docformat=numpy', '--project-url=../google-numpy.html', + '--theme=readthedocs', f'{_pydoctor_root}/docs/numpy_demo', f'{_pydoctor_root}/pydoctor/napoleon' ] + _common_args, @@ -152,6 +148,7 @@ '--project-version=1.0.0', '--docformat=google', '--project-url=../google-numpy.html', + '--theme=readthedocs', f'{_pydoctor_root}/docs/google_demo', ] + _common_args, } diff --git a/docs/source/contrib.rst b/docs/source/contrib.rst index 69c1b4e3e..2d4156e1c 100644 --- a/docs/source/contrib.rst +++ b/docs/source/contrib.rst @@ -74,6 +74,55 @@ The following process ensures correct version management: - Update the README file and add an empty placeholder for unreleased changes. - Merge the branch +Updating pydoctor for Linux distributions +----------------------------------------- + +The information below covers Debian and its derivative distributions. +The same principles should be applied for Fedora, Arch, Alpine or any other +Linux distribution. + +There shouldn't be any additional steps needed to get pydoctor updated in +Debian (and its downstream distributions like Ubuntu). +As pydoctor is a Python based package the `Debian Python Team +`_ is usually taking care about +updating pydoctor in Debian. The DPT is available through the team mailing +list (``Debian Python List ``) there everyone +can get in contact by email. If you just want to ask something quickly please +use this option. + +Debian uses a separate, non GitHub, BTS (Bug Tracking System) to keep track +of issues. The package maintainers like to use this system in case of more +specific requests or problems. The preferred and suggested way to open up +new issues within the Debian BTS is to use the tool `reportbug +`_ that will do some additional magic +while collecting the data for the bug report like collecting installed +packages and there versions. ``reportbug`` should be used if you are working +on a Debian based system. + +But you can also use any email client to open up bug reports on the Debian +BTS by simply writing an email to the address ``submit@bugs.debian.org``. + +If you want to help to keep the pydoctor package up to date in Debian the +DPT is happy to take your help! Helping out can be done in various ways. + +* Keep an eye on `reported issues `_ + for the pydoctor package and forward them upstream if needed. +* Have also a look at cross connected packages and possible build issues + there regarding the build dependency onpydoctor. These packages are mostly + `twisted `_ or + `git-buildpackage `_. +* Ideally taking over some maintainer responsibilities for pydoctor in Debian. + +pydoctor and new depending packages +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It might happen that pydoctor is requiring new additional Python libraries +due to new wanted features or to enhance the internal test suite. + +Such new packages shouldn't get vendored. They need to be packaged in +Debian. Best is to get in contact with the DPT to talk about about new +requirements and the best way to get things done. + Author Design Notes ------------------- diff --git a/docs/source/custom_template_demo/pyproject.toml b/docs/source/custom_template_demo/pyproject.toml new file mode 100644 index 000000000..113b478bd --- /dev/null +++ b/docs/source/custom_template_demo/pyproject.toml @@ -0,0 +1,9 @@ +[tool.pydoctor] +project-name = 'pydoctor with a twisted theme' +docformat = 'epytext' +privacy = 'HIDDEN:pydoctor.test' +project-url = '../customize.html' +theme = base +intersphinx = ["https://docs.python.org/3/objects.inv", + "https://twistedmatrix.com/documents/current/api/objects.inv",] + # Yes, it's missing a lot of intersphinx links, but that's ok, this is just an example. \ No newline at end of file diff --git a/docs/source/customize.rst b/docs/source/customize.rst index f1b0ec95d..859f2571b 100644 --- a/docs/source/customize.rst +++ b/docs/source/customize.rst @@ -1,17 +1,44 @@ +Theming and other customizations +================================ -Customize Output -================ +Configure sidebar expanding/collapsing +-------------------------------------- + +By default, the sidebar only lists one level of objects (always expanded), +to allow objects to expand/collapse and show first nested content, use the following option:: + + --sidebar-expand-depth=2 + +This value describe how many nested modules and classes should be expandable. + +.. note:: + Careful, a value higher than ``1`` (which is the default) can make your HTML files + significantly larger if you have many modules or classes. + + To disable completely the sidebar, use option ``--no-sidebar`` + +Theming +------- + +Currently, there are 2 main themes packaged with pydoctor: ``classic`` and ``readthedocs``. + +Choose your theme with option:: + + --theme + +.. note:: + Additionnaly, the ``base`` theme can be used as a base for customizations. Tweak HTML templates -------------------- -They are 3 placeholders designed to be overwritten to include custom HTML and CSS into the pages. +They are 3 special files designed to be included in specific places of each pages. - ``header.html``: at the very beginning of the body - ``subheader.html``: after the main header, before the page title - ``extra.css``: extra CSS sheet for layout customization -To override a placeholder, write your custom HTML or CSS files to a directory +To include a file, write your custom HTML or CSS files to a directory and use the following option:: --template-dir=./pydoctor_templates @@ -29,31 +56,179 @@ HTML templates have their own versioning system and warnings will be triggered w .. note:: - This example is using new ``pydoctor`` option, ``--theme=base``. - This means that bootstrap CSS will not be copied to build directory. + This example is using the ``base`` theme. + +.. _customize-privacy: + +Override objects privacy (show/hide) +------------------------------------ + +Pydoctor supports 3 types of privacy. +Below is the description of each type and the default association: + +- ``PRIVATE``: By default for objects whose name starts with an underscore and are not a dunder method. + Rendered in HTML, but hidden via CSS by default. + +- ``PUBLIC``: By default everything else that is not private. + Always rendered and visible in HTML. + +- ``HIDDEN``: Nothing is hidden by default. + Not rendered at all and no links can be created to hidden objects. + Not present in the search index nor the intersphinx inventory. + Basically excluded from API documentation. If a module/package/class is hidden, then all it's members are hidden as well. + +When the default rules regarding privacy doesn't fit your use case, +use the ``--privacy`` command line option. +It can be used multiple times to define multiple privacy rules:: + + --privacy=: + +where ```` can be one of ``PUBLIC``, ``PRIVATE`` or ``HIDDEN`` (case insensitive), and ```` is fnmatch-like +pattern matching objects fullName. + +Privacy tweak examples +^^^^^^^^^^^^^^^^^^^^^^ +- ``--privacy="PUBLIC:**"`` + Makes everything public. + +- ``--privacy="HIDDEN:twisted.test.*" --privacy="PUBLIC:twisted.test.proto_helpers"`` + Makes everything under ``twisted.test`` hidden except ``twisted.test.proto_helpers``, which will be public. + +- ``--privacy="PRIVATE:**.__*__" --privacy="PUBLIC:**.__init__"`` + Makes all dunder methods private except ``__init__``. + +.. important:: The order of arguments matters. Pattern added last have priority over a pattern added before, + but an exact match wins over a fnmatch. + +.. note:: See :py:mod:`pydoctor.qnmatch` for more informations regarding the pattern syntax. + +.. note:: Quotation marks should be added around each rule to avoid shell expansions. + Unless the arguments are passed directly to pydoctor, like in Sphinx's ``conf.py``, in this case you must not quote the privacy rules. Use a custom system class ------------------------- -You can subclass the :py:class:`pydoctor.zopeinterface.ZopeInterfaceSystem` +You can subclass the :py:class:`pydoctor.model.System` and pass your custom class dotted name with the following argument:: --system-class=mylib._pydoctor.CustomSystem -System class allows you to dynamically show/hide classes or methods. -This is also used by the Twisted project to handle deprecation. +System class allows you to customize certain aspect of the system and configure the enabled extensions. +If what you want to achieve has something to do with the state of some objects in the Documentable tree, +it's very likely that you can do it without the need to override any system method, +by using the extension mechanism described below. + +Brief on pydoctor extensions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The AST builder can now be customized with extension modules. +This is how we handle Zope Interfaces declarations and :py:mod:`twisted.python.deprecate` warnings. -See the :py:class:`twisted:twisted.python._pydoctor.TwistedSystem` custom class documentation. +Each pydocotor extension is a Python module with at least a ``setup_pydoctor_extension()`` function. +This function is called at initialization of the system with one argument, +the :py:class:`pydoctor.extensions.ExtRegistrar` object representing the system. + +An extension can register multiple kind of components: + - AST builder visitors + - Mixin classes for :py:class:`pydoctor.model.Documentable` + - Post processors + +Take a look at built-in extensions :py:mod:`pydoctor.extensions.zopeinterface` and :py:mod:`pydoctor.extensions.deprecate`. Navigate to the source code for a better overview. +A concrete example +^^^^^^^^^^^^^^^^^^ + +Let's say you want to write a extension for simple pydantic classes like this one: + +.. code:: python + + from typing import ClassVar + from pydantic import BaseModel + class Model(BaseModel): + a: int + b: int = Field(...) + name:str = 'Jane Doe' + kind:ClassVar = 'person' + + +First, we need to create a new module that will hold our extension code: ``mylib._pydoctor``. +This module will contain visitor code that visits ``ast.AnnAssign`` nodes after the main visitor. +It will check if the current context object is a class derived from ``pydantic.BaseModel`` and +transform each class variable into instance variables accordingly. + +.. code:: python + + # Module mylib._pydoctor + + import ast + from pydoctor import astutils, extensions, model + + class PydanticModVisitor(extensions.ModuleVisitorExt): + + def depart_AnnAssign(self, node: ast.AnnAssign) -> None: + """ + Called after an annotated assignment definition is visited. + """ + ctx = self.visitor.builder.current + if not isinstance(ctx, model.Class): + # check if the current context object is a class + return + + if not any(ctx.expandName(b) == 'pydantic.BaseModel' for b in ctx.bases): + # check if the current context object if a class derived from ``pydantic.BaseModel`` + return + + dottedname = astutils.node2dottedname(node.target) + if not dottedname or len(dottedname)!=1: + # check if the assignment is a simple name, otherwise ignore it + return + + # Get the attribute from current context + attr = ctx.contents[dottedname[0]] + + assert isinstance(attr, model.Attribute) + + # All class variables that are not annotated with ClassVar will be transformed to instance variables. + if astutils.is_using_typing_classvar(attr.annotation, attr): + return + + if attr.kind == model.DocumentableKind.CLASS_VARIABLE: + attr.kind = model.DocumentableKind.INSTANCE_VARIABLE + + def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None: + r.register_astbuilder_visitor(PydanticModVisitor) + + class PydanticSystem(model.System): + # Declare that this system should load this additional extension + custom_extensions = ['mylib._pydoctor'] + +Then, we would pass our custom class dotted name with the argument ``--system-class``:: + + --system-class=mylib._pydoctor.PydanticSystem + +Et voilà. + +If this extension mechanism doesn't support the tweak you want, you can consider overriding some +:py:class:`pydoctor.model.System` methods. For instance, overriding :py:meth:`pydoctor.model.System.__init__` method could be useful, +if some want to write a custom :py:class:`pydoctor.sphinx.SphinxInventory`. + + +.. important:: + If you feel like other users of the community might benefit from your extension as well, please + don't hesitate to open a pull request adding your extension module to the package :py:mod:`pydoctor.extensions`. + Use a custom writer class ------------------------- -You can subclass the :py:class:`pydoctor.templatewriter.TemplateWriter` +You can subclass the :py:class:`pydoctor.templatewriter.TemplateWriter` (or the abstract super class :py:class:`pydoctor.templatewriter.IWriter`) and pass your custom class dotted name with the following argument:: + --html-writer=mylib._pydoctor.CustomTemplateWriter - --html-class=mylib._pydoctor.CustomTemplateWriter +The option is actually badly named because, theorically one could write a subclass +of :py:class:`pydoctor.templatewriter.IWriter` (to be used alongside option ``--template-dir``) +that would output Markdown, reStructuredText or JSON. .. warning:: Pydoctor does not have a stable API yet. Code customization is prone to break in future versions. diff --git a/docs/source/docformat/list-restructuredtext-support.rst b/docs/source/docformat/list-restructuredtext-support.rst index 1aea36796..c4d184d6a 100644 --- a/docs/source/docformat/list-restructuredtext-support.rst +++ b/docs/source/docformat/list-restructuredtext-support.rst @@ -60,7 +60,7 @@ List of ReST directives * - ``.. code::`` - `docutils `__ - - Yes + - Yes (No options supported) * - ``.. python::`` - pydoctor @@ -144,7 +144,7 @@ List of ReST directives * - ``.. code-block::`` - `Sphinx `__ - - No + - Yes (No options supported) * - ``.. literalinclude::`` - `Sphinx `__ diff --git a/docs/source/docformat/restructuredtext.rst b/docs/source/docformat/restructuredtext.rst index ee5fb63f6..7baf4d9f0 100644 --- a/docs/source/docformat/restructuredtext.rst +++ b/docs/source/docformat/restructuredtext.rst @@ -88,10 +88,10 @@ Here is a list of the supported ReST directives by package of origin: - `docutils`: ``.. include::``, ``.. contents::``, ``.. image::``, ``.. figure::``, ``.. unicode::``, ``.. raw::``, ``.. math::``, - ``.. role::``, ``.. table::``, ``.. warning::``, ``.. note::`` + ``.. role::``, ``.. table::``, ``.. code::``, ``.. warning::``, ``.. note::`` and other admonitions, and a few others. -- `epydoc`: None -- `Sphinx`: ``.. deprecated::``, ``.. versionchanged::``, ``.. versionadded::`` +- `epydoc`: None yet. +- `Sphinx`: ``.. deprecated::``, ``.. versionchanged::``, ``.. versionadded::``, ``.. code-block::`` - `pydoctor`: ``.. python::`` `Full list of supported and unsupported directives `_ @@ -102,8 +102,12 @@ Colorized snippets directive Using reStructuredText markup it is possible to specify Python snippets in a `doctest block `_. + If the Python prompt gets in your way when you try to copy and paste and you are not interested -in self-testing docstrings, the python directive will let you obtain a simple block of colorized text:: +in self-testing docstrings, the python directive will let you obtain a simple block of colorized text. +Directives ``.. code::`` and ``.. code-block::`` acts exactly the same. + +:: .. python:: diff --git a/docs/source/help.rst b/docs/source/help.rst index 14d91863b..440496b9d 100644 --- a/docs/source/help.rst +++ b/docs/source/help.rst @@ -1,6 +1,106 @@ -Command Line Options -==================== +CLI Options and Config File +=========================== -Below are the available command line options: +Command line options +-------------------- -.. help_output:: +.. argparse:: + :ref: pydoctor.options.get_parser + :prog: pydoctor + :nodefault: + +Configuration file +------------------ + +All arguments can also be set in a config file. + +Repeatable arguments must be defined as list. + +Positional arguments can be set with option ``add-package``. + +By convention, the config file resides on the root of your repository. + +Pydoctor automatically integrates with common project files ``./pyproject.toml`` or ``./setup.cfg`` and loads file ``./pydoctor.ini`` if if exists. +The configuration parser supports `TOML `_ and INI formats. + +.. note:: No path processing is done to determine the project root directory, pydoctor only looks at the current working directory. + You can set a different config file path with option ``--config``, this is necessary to load project configuration files from Sphinx's ``conf.py``. + +``pydoctor.ini`` +^^^^^^^^^^^^^^^^ + +Declaring section ``[pydoctor]`` is required. + +:: + + [pydoctor] + add-package = + src/mylib + intersphinx = + https://docs.python.org/3/objects.inv + https://twistedmatrix.com/documents/current/api/objects.inv + docformat = restructuredtext + verbose = 1 + warnings-as-errors = true + privacy = + HIDDEN:pydoctor.test + PUBLIC:pydoctor._configparser + +``pyproject.toml`` +^^^^^^^^^^^^^^^^^^ + +``pyproject.toml`` are considered for configuration when they contain a ``[tool.pydoctor]`` table. It must use TOML format. + +:: + + [tool.pydoctor] + add-package = ["src/mylib"] + intersphinx = ["https://docs.python.org/3/objects.inv", + "https://twistedmatrix.com/documents/current/api/objects.inv"] + docformat = "restructuredtext" + verbose = 1 + warnings-as-errors = true + privacy = ["HIDDEN:pydoctor.test", + "PUBLIC:pydoctor._configparser",] + +Note that the config file fragment above is also valid INI format and could be parsed from a ``setup.cfg`` file successfully. + +``setup.cfg`` +^^^^^^^^^^^^^ + +``setup.cfg`` can also be used to hold pydoctor configuration if they have a ``[tool:pydoctor]`` section. It must use ``INI`` format. + +:: + + [tool:pydoctor] + add-package = + src/mylib + intersphinx = + https://docs.python.org/3/objects.inv + https://twistedmatrix.com/documents/current/api/objects.inv + docformat = restructuredtext + verbose = 1 + warnings-as-errors = true + privacy = + HIDDEN:pydoctor.test + PUBLIC:pydoctor._configparser + +.. Note:: If an argument is specified in more than one place, + then command line values override config file values which override defaults. + If more than one config file exists, ``pydoctor.ini`` overrides values from + ``pyproject.toml`` which overrrides ``setup.cfg``. Repeatable options are not + merged together, there are overriden as well. + +.. Note:: + The INI parser behaves like :py:class:`configargparse:configargparse.ConfigparserConfigFileParser` in addition that it + converts plain multiline values to list, each non-empty line will be converted to a list item. + If for some reason you need newlines in a string value, just tripple quote your string like you would do in python. + + Allowed syntax is that for a :py:class:`std:configparser.ConfigParser` with the default options. + +.. Note:: + Last note: pydoctor has always supported a ``--config`` option, but before 2022, the format was undocumentd and rather fragile. + This new configuration format breaks compatibility with older config file in three main ways: + + - Options names are now the same as argument without the leading ``--`` (e.g ``project-name`` and not ``projectname``). + - Define repeatable options with multiline strings or list literals instead of commas separated string. diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 405a1fc2f..4f4c2c9bb 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -8,6 +8,10 @@ Pydoctor can be installed from PyPI:: $ pip install -U pydoctor +For Debian and derivatives, pydoctor can be installed with ``apt``:: + + $ sudo apt install pydoctor + Example ------- diff --git a/docs/tests/__init__.py b/docs/tests/__init__.py index e69de29bb..1287c496e 100644 --- a/docs/tests/__init__.py +++ b/docs/tests/__init__.py @@ -0,0 +1,8 @@ +from pathlib import Path +import os + +def get_toxworkdir_subdir(subdir:str) -> Path: + dir = Path(os.environ['TOX_WORK_DIR']).joinpath(subdir) \ + if os.environ.get('TOX_WORK_DIR') else Path(os.getcwd()).joinpath(f'./.tox/{subdir}') + assert dir.exists(), f"Looks like {dir} not not exist!" + return dir diff --git a/docs/tests/test-search.html b/docs/tests/test-search.html new file mode 100644 index 000000000..290bc904d --- /dev/null +++ b/docs/tests/test-search.html @@ -0,0 +1,90 @@ + + + +

+
+    
+    
+    
+    
+    
+    
+    
+
\ No newline at end of file
diff --git a/docs/tests/test.py b/docs/tests/test.py
index 6f32ff555..1dee900d9 100644
--- a/docs/tests/test.py
+++ b/docs/tests/test.py
@@ -5,6 +5,11 @@
 #
 import os
 import pathlib
+from typing import List
+import xml.etree.ElementTree as ET
+import json
+
+from lunr.index import Index
 
 from sphinx.ext.intersphinx import inspect_main
 
@@ -14,15 +19,6 @@
 BASE_DIR = pathlib.Path(os.environ.get('TOX_INI_DIR', os.getcwd())) / 'build' / 'docs'
 
 
-def test_help_output_extension():
-    """
-    The help output extension will include the CLI help on the Sphinx page.
-    """
-    with open(BASE_DIR / 'help.html', 'r') as stream:
-        page = stream.read()
-        assert '--project-url=PROJECTURL' in page, page
-
-
 def test_rtd_pydoctor_call():
     """
     With the pydoctor Sphinx extension, the pydoctor API HTML files are
@@ -89,29 +85,6 @@ def test_sphinx_object_inventory_version_epytext_demo():
             ), page
 
 
-def test_index_contains_infos():
-    """
-    Test if index.html contains the following informations:
-
-        - meta generator tag
-        - nav and links to modules, classes, names
-        - link to the root package
-        - pydoctor github link in the footer
-    """
-
-    infos = (f'pydoctor, the root package.',
-              'pydoctor',)
-
-    with open(BASE_DIR / 'api' / 'index.html', 'r', encoding='utf-8') as stream:
-        page = stream.read()
-        for i in infos:
-            assert i in page, page
-
 def test_page_contains_infos():
     """
     Test if pydoctor.driver.html contains the following informations:
@@ -122,8 +95,8 @@ def test_page_contains_infos():
         - pydoctor github link in the footer
     """
 
-    infos = (f' None:
+    """
+    Run some searches on the lunr index to test it's validity. 
+    """
+
+    with (BASE_DIR / 'api' / 'searchindex.json').open() as fobj:
+        index_data = json.load(fobj)
+        index = Index.load(index_data)
+
+        def test_search(query:str, expected:List[str], order_is_important:bool=True) -> None:
+            if order_is_important:
+                assert [r["ref"] for r in index.search(query)] == expected
+            else:
+                assert sorted([r["ref"] for r in index.search(query)]) == sorted(expected)
+
+        test_search('+qname:pydoctor', ['pydoctor'])
+        test_search('+qname:pydoctor.epydoc2stan', ['pydoctor.epydoc2stan'])
+        test_search('_colorize_re_pattern', ['pydoctor.epydoc.markup._pyval_repr.PyvalColorizer._colorize_re_pattern'])
+        
+        test_search('+name:Class', 
+            ['pydoctor.model.Class', 
+             'pydoctor.factory.Factory.Class',
+             'pydoctor.model.DocumentableKind.CLASS',
+             'pydoctor.model.System.Class'])
+        
+        to_stan_results = [
+                    'pydoctor.epydoc.markup.ParsedDocstring.to_stan', 
+                    'pydoctor.epydoc.markup.plaintext.ParsedPlaintextDocstring.to_stan',
+                    'pydoctor.epydoc.markup._types.ParsedTypeDocstring.to_stan',
+                    'pydoctor.epydoc.markup._pyval_repr.ColorizedPyvalRepr.to_stan',
+                    'pydoctor.epydoc2stan.ParsedStanOnly.to_stan'
+                ]
+        test_search('to_stan*', to_stan_results, order_is_important=False)
+        test_search('to_stan', to_stan_results, order_is_important=False)
+
+        to_node_results = [
+                    'pydoctor.epydoc.markup.ParsedDocstring.to_node', 
+                    'pydoctor.epydoc.markup.plaintext.ParsedPlaintextDocstring.to_node',
+                    'pydoctor.epydoc.markup._types.ParsedTypeDocstring.to_node',
+                    'pydoctor.epydoc.markup.restructuredtext.ParsedRstDocstring.to_node',
+                    'pydoctor.epydoc.markup.epytext.ParsedEpytextDocstring.to_node',
+                    'pydoctor.epydoc2stan.ParsedStanOnly.to_node'
+                ]
+        test_search('to_node*', to_node_results, order_is_important=False)
+        test_search('to_node', to_node_results, order_is_important=False)
+        
+        test_search('qname:pydoctor.epydoc.markup.restructuredtext.ParsedRstDocstring', 
+                ['pydoctor.epydoc.markup.restructuredtext.ParsedRstDocstring'])
+        test_search('pydoctor.epydoc.markup.restructuredtext.ParsedRstDocstring', 
+                ['pydoctor.epydoc.markup.restructuredtext.ParsedRstDocstring'])
+
+def test_pydoctor_test_is_hidden():
+    """
+    Test that option --privacy=HIDDEN:pydoctor.test makes everything under pydoctor.test HIDDEN.
+    """
+
+    def getText(node: ET.Element) -> str:
+        return ''.join(node.itertext()).strip()
+
+    with open(BASE_DIR / 'api' / 'all-documents.html', 'r', encoding='utf-8') as stream:
+        document = ET.fromstring(stream.read())
+        for liobj in document.findall('body/div/ul/li[@id]'):
+            
+            if not str(liobj.get("id")).startswith("pydoctor"):
+                continue # not a all-documents list item, maybe in the menu or whatever.
+            
+            # figure obj name
+            fullName = getText(liobj.findall('./div[@class=\'fullName\']')[0])
+            
+            if fullName.startswith("pydoctor.test"):
+                # figure obj privacy
+                privacy = getText(liobj.findall('./div[@class=\'privacy\']')[0])
+                # check that it's indeed private
+                assert privacy == 'HIDDEN'
diff --git a/docs/tests/test_python_igraph_docs.py b/docs/tests/test_python_igraph_docs.py
new file mode 100644
index 000000000..95440eafb
--- /dev/null
+++ b/docs/tests/test_python_igraph_docs.py
@@ -0,0 +1,27 @@
+#
+# Run tests after python-igraph's documentation is executed.
+#
+# These tests are designed to be executed inside tox, after pydoctor is run.
+# Alternatively this can be excuted manually from the project root folder like:
+#   pytest docs/tests/test_python_igraph_docs.py
+
+from . import get_toxworkdir_subdir
+
+BASE_DIR = get_toxworkdir_subdir('python-igraph-output')
+
+def test_python_igraph_docs() -> None:
+    """
+    Test for https://github.com/twisted/pydoctor/issues/287
+    """
+
+    with open(BASE_DIR / 'igraph.html') as stream:
+        page = stream.read()
+        assert all(impl in page for impl in ['href="igraph._igraph.html"']), page
+
+    with open(BASE_DIR / 'igraph.Graph.html') as stream:
+        page = stream.read()
+        assert all(impl in page for impl in ['href="igraph._igraph.GraphBase.html"']), page
+
+    with open(BASE_DIR / 'igraph._igraph.GraphBase.html') as stream:
+        page = stream.read()
+        assert all(impl in page for impl in ['href="igraph.Graph.html"']), page
diff --git a/docs/tests/test_standard_library_docs.py b/docs/tests/test_standard_library_docs.py
new file mode 100644
index 000000000..cfae595b0
--- /dev/null
+++ b/docs/tests/test_standard_library_docs.py
@@ -0,0 +1,26 @@
+#
+# Run tests after Python standard library's documentation is executed.
+#
+# These tests are designed to be executed inside tox, after pydoctor is run.
+# Alternatively this can be excuted manually from the project root folder like:
+#   pytest docs/tests/test_standard_library_docs.py
+
+from . import get_toxworkdir_subdir
+
+PYTHON_DIR = get_toxworkdir_subdir('cpython')
+BASE_DIR = get_toxworkdir_subdir('cpython-output')
+
+def test_std_lib_docs() -> None:
+    """
+    For each top-level module in python standard library, check if there is an associated documentation page.
+    """
+    for entry in PYTHON_DIR.joinpath('Lib').iterdir():
+        if entry.is_file() and entry.suffix=='.py': # Module
+            name = entry.name[0:-3]
+            if name == "__init__": continue
+            assert BASE_DIR.joinpath('Lib.'+name+'.html').exists()
+        
+        elif entry.is_dir() and entry.joinpath('__init__.py').exists(): # Package
+            assert BASE_DIR.joinpath('Lib.'+entry.name+'.html').exists()
+
+    
diff --git a/docs/tests/test_twisted_docs.py b/docs/tests/test_twisted_docs.py
index 8c38318d7..339802219 100644
--- a/docs/tests/test_twisted_docs.py
+++ b/docs/tests/test_twisted_docs.py
@@ -2,24 +2,53 @@
 # Run tests after Twisted's the documentation is executed.
 #
 # These tests are designed to be executed inside tox, after bin/admin/build-apidocs.
-#
+# Alternatively this can be excuted manually from the project root folder like:
+#   pytest docs/tests/test_twisted_docs.py
 
-import pathlib
-import os
+from . import get_toxworkdir_subdir
 
-BASE_DIR = pathlib.Path(os.environ.get('TOX_WORK_DIR', os.getcwd())) / 'twisted-apidocs-build'
+BASE_DIR = get_toxworkdir_subdir('twisted-apidocs-build')
 
 # Test for https://github.com/twisted/pydoctor/issues/428
 def test_IPAddress_implementations() -> None:
     """
-    There is a flaw in the logic, currently.
+    This test ensures all important subclasses of IAddress show up in the IAddress class page documentation.
     """
 
-    implementations_that_currently_do_not_show_up = ['twisted.internet.address.IPv4Address', 
+    show_up = ['twisted.internet.address.IPv4Address', 
         'twisted.internet.address.IPv6Address', 
         'twisted.internet.address.HostnameAddress', 
         'twisted.internet.address.UNIXAddress']
 
     with open(BASE_DIR / 'twisted.internet.interfaces.IAddress.html') as stream:
         page = stream.read()
-        assert all(impl in page for impl in implementations_that_currently_do_not_show_up), page
+        assert all(impl in page for impl in show_up), page
+
+# Test for https://github.com/twisted/pydoctor/issues/505
+def test_web_template_api() -> None:
+    """
+    This test ensures all important members of the twisted.web.template 
+    module are documented at the right place
+    """
+
+    exists = ['twisted.web.template.Tag.html', 
+        'twisted.web.template.slot.html', 
+        'twisted.web.template.Comment.html', 
+        'twisted.web.template.CDATA.html',
+        'twisted.web.template.CharRef.html',
+        'twisted.web.template.TagLoader.html',
+        'twisted.web.template.XMLString.html',
+        'twisted.web.template.XMLFile.html',
+        'twisted.web.template.Element.html',]
+    for e in exists:
+        assert (BASE_DIR / e).exists(), f"{e} not found"
+    
+    show_up = [
+        'twisted.web.template.renderer',
+        'twisted.web.template.flatten',
+        'twisted.web.template.flattenString', 
+        'twisted.web.template.renderElement']
+
+    with open(BASE_DIR / 'twisted.web.template.html') as stream:
+        page = stream.read()
+        assert all(impl in page for impl in show_up), page

From 46c1e8eb461394a6a60a7e3297dae2baee646356 Mon Sep 17 00:00:00 2001
From: tristanlatr 
Date: Mon, 23 May 2022 13:33:07 -0400
Subject: [PATCH 13/60] Forgot a meta file.

---
 .coveragerc | 1 +
 1 file changed, 1 insertion(+)

diff --git a/.coveragerc b/.coveragerc
index e5b7c2b61..fd1f02049 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -3,6 +3,7 @@ branch = True
 omit =
     pydoctor/sphinx_ext/*
     pydoctor/test/*
+    pydoctor/epydoc/sre_parse36.py
 source =
     pydoctor
 

From 59d46ee298c6f71ed683ceb0c246a409da243b92 Mon Sep 17 00:00:00 2001
From: tristanlatr 
Date: Mon, 23 May 2022 13:34:53 -0400
Subject: [PATCH 14/60] merge source files 'twisted-master' into 'alias',
 currently fails 21 astbuilder tests, mostly related to alias handling... I'll
 fix that after merging.

---
 pydoctor/_configparser.py                     |  378 ++
 pydoctor/astbuilder.py                        |  300 +-
 pydoctor/astutils.py                          |  131 +-
 pydoctor/driver.py                            |  600 +--
 pydoctor/epydoc/docutils.py                   |   80 +-
 pydoctor/epydoc/markup/__init__.py            |  165 +-
 pydoctor/epydoc/markup/_pyval_repr.py         |   18 +-
 pydoctor/epydoc/markup/_types.py              |    2 +-
 pydoctor/epydoc/markup/epytext.py             |   55 +-
 pydoctor/epydoc/markup/plaintext.py           |   36 +-
 pydoctor/epydoc/markup/restructuredtext.py    |   30 +-
 pydoctor/epydoc/sre_parse36.py                | 1035 +++++
 pydoctor/epydoc2stan.py                       |  455 +--
 pydoctor/extensions/__init__.py               |  160 +
 pydoctor/extensions/deprecate.py              |  157 +
 pydoctor/{ => extensions}/zopeinterface.py    |  127 +-
 pydoctor/factory.py                           |  114 +
 pydoctor/linker.py                            |  430 ++
 pydoctor/model.py                             |  480 ++-
 pydoctor/napoleon/docstring.py                |    4 +-
 pydoctor/node2stan.py                         |   29 +-
 pydoctor/options.py                           |  410 ++
 pydoctor/qnmatch.py                           |   71 +
 pydoctor/sphinx.py                            |    3 +
 pydoctor/sphinx_ext/_help_output.py           |   54 -
 pydoctor/sphinx_ext/build_apidocs.py          |    7 +-
 pydoctor/stanutils.py                         |   11 +-
 pydoctor/templatewriter/__init__.py           |    4 +-
 pydoctor/templatewriter/pages/__init__.py     |  214 +-
 .../templatewriter/pages/attributechild.py    |   20 +-
 .../templatewriter/pages/functionchild.py     |   17 +-
 pydoctor/templatewriter/pages/sidebar.py      |  419 ++
 pydoctor/templatewriter/pages/table.py        |   11 +-
 pydoctor/templatewriter/search.py             |  179 +
 pydoctor/templatewriter/summary.py            |   71 +-
 pydoctor/templatewriter/util.py               |   88 +-
 pydoctor/templatewriter/writer.py             |   25 +-
 pydoctor/test/__init__.py                     |    1 +
 pydoctor/test/epydoc/restructuredtext.doctest |   27 +
 pydoctor/test/epydoc/test_epytext.py          |    3 +
 pydoctor/test/epydoc/test_epytext2html.py     |   90 +-
 pydoctor/test/epydoc/test_pyval_repr.py       |   28 +-
 pydoctor/test/epydoc/test_restructuredtext.py |  135 +-
 pydoctor/test/test_astbuilder.py              |  229 +-
 pydoctor/test/test_commandline.py             |   25 +-
 pydoctor/test/test_configparser.py            |  390 ++
 .../test/test_cyclic_imports_base_classes.py  |   39 +
 pydoctor/test/test_epydoc2stan.py             |  392 +-
 pydoctor/test/test_model.py                   |  240 +-
 pydoctor/test/test_napoleon_docstring.py      |   13 +-
 pydoctor/test/test_node2stan.py               |   73 +-
 pydoctor/test/test_options.py                 |  225 ++
 pydoctor/test/test_packages.py                |   93 +-
 pydoctor/test/test_pydantic_fields.py         |   70 +
 pydoctor/test/test_qnmatch.py                 |  130 +
 pydoctor/test/test_sphinx.py                  |   14 +-
 pydoctor/test/test_templatewriter.py          |  141 +-
 .../test/test_twisted_python_deprecate.py     |  165 +
 pydoctor/test/test_visitor.py                 |  155 +
 pydoctor/test/test_zopeinterface.py           |  149 +-
 .../test/testcustomtemplates/allok/nav.html   |    4 +-
 .../mymod/__init__.py                         |    0
 .../mymod/base.c                              |   42 +
 .../c_module_invalid_text_signature/setup.py  |    8 +
 .../mymod/__init__.py                         |    0
 .../mymod/base.c                              |   31 +
 .../mymod/base.py                             |   13 +
 .../setup.py                                  |    8 +
 .../cyclic_imports_base_classes/__init__.py   |    1 +
 .../cyclic_imports_base_classes/a.py          |    4 +
 .../cyclic_imports_base_classes/b.py          |    4 +
 .../package_module_name_clash/__init__.py     |    0
 .../package_module_name_clash/pack.py         |    1 +
 .../pack/__init__.py                          |    1 +
 .../reparented_module/__init__.py             |    6 +
 .../testpackages/reparented_module/mod.py     |    5 +
 .../reparenting_crash/__init__.py             |    3 +
 .../reparenting_crash/reparenting_crash.py    |    8 +
 .../reparenting_crash_alt/__init__.py         |    3 +
 .../reparenting_crash_alt/_impl.py            |    6 +
 .../reparenting_crash_alt.py                  |    2 +
 .../reparenting_follows_aliases/__init__.py   |    0
 .../_myotherthing.py                          |    4 +
 .../reparenting_follows_aliases/_mything.py   |    3 +
 .../reparenting_follows_aliases/main.py       |   10 +
 pydoctor/themes/base/ajax.js                  |   50 +
 pydoctor/themes/base/all-documents.html       |   28 +
 pydoctor/themes/base/apidocs.css              |  673 +++-
 pydoctor/themes/base/attribute-child.html     |    5 +-
 pydoctor/themes/base/common.html              |   94 +-
 pydoctor/themes/base/fonts/info.svg           |    4 +
 pydoctor/themes/base/fonts/x-circle.svg       |    4 +
 pydoctor/themes/base/footer.html              |   10 +-
 pydoctor/themes/base/function-child.html      |    5 +-
 pydoctor/themes/base/head.html                |    4 +-
 pydoctor/themes/base/index.html               |   33 +-
 pydoctor/themes/base/lunr.js                  | 3475 +++++++++++++++++
 pydoctor/themes/base/nameIndex.html           |    9 +-
 pydoctor/themes/base/nav.html                 |  110 +-
 pydoctor/themes/base/pydoctor.js              |    6 +
 pydoctor/themes/base/search.js                |  458 +++
 pydoctor/themes/base/searchlib.js             |  339 ++
 pydoctor/themes/base/sidebar-list.html        |   38 +
 pydoctor/themes/base/sidebar.html             |   38 +
 pydoctor/themes/base/sidebartoggle.js         |   69 +
 pydoctor/themes/base/summary.html             |    9 +-
 pydoctor/themes/classic/.gitattributes        |    1 +
 pydoctor/themes/classic/head.html             |    4 +-
 pydoctor/themes/readthedocs/common.html       |  144 +
 .../readthedocs/fonts/Roboto-Slab-Bold.woff2  |  Bin 0 -> 67312 bytes
 .../fonts/Roboto-Slab-Regular.woff2           |  Bin 0 -> 66444 bytes
 pydoctor/themes/readthedocs/fonts/book.svg    |    2 +
 pydoctor/themes/readthedocs/fonts/home.svg    |    2 +
 .../themes/readthedocs/fonts/lato-bold.woff2  |  Bin 0 -> 184912 bytes
 .../readthedocs/fonts/lato-normal.woff2       |  Bin 0 -> 182708 bytes
 .../readthedocs/fonts/minus-square-o.svg      |    2 +
 .../readthedocs/fonts/plus-square-o.svg       |    2 +
 pydoctor/themes/readthedocs/footer.html       |   15 +
 pydoctor/themes/readthedocs/head.html         |   14 +
 pydoctor/themes/readthedocs/nav.html          |   42 +
 .../themes/readthedocs/readthedocstheme.css   |  768 ++++
 pydoctor/utils.py                             |  105 +
 pydoctor/visitor.py                           |  325 ++
 123 files changed, 14650 insertions(+), 1621 deletions(-)
 create mode 100644 pydoctor/_configparser.py
 create mode 100644 pydoctor/epydoc/sre_parse36.py
 create mode 100644 pydoctor/extensions/__init__.py
 create mode 100644 pydoctor/extensions/deprecate.py
 rename pydoctor/{ => extensions}/zopeinterface.py (72%)
 create mode 100644 pydoctor/factory.py
 create mode 100644 pydoctor/linker.py
 create mode 100644 pydoctor/options.py
 create mode 100644 pydoctor/qnmatch.py
 delete mode 100644 pydoctor/sphinx_ext/_help_output.py
 create mode 100644 pydoctor/templatewriter/pages/sidebar.py
 create mode 100644 pydoctor/templatewriter/search.py
 create mode 100644 pydoctor/test/test_configparser.py
 create mode 100644 pydoctor/test/test_cyclic_imports_base_classes.py
 create mode 100644 pydoctor/test/test_options.py
 create mode 100644 pydoctor/test/test_pydantic_fields.py
 create mode 100644 pydoctor/test/test_qnmatch.py
 create mode 100644 pydoctor/test/test_twisted_python_deprecate.py
 create mode 100644 pydoctor/test/test_visitor.py
 create mode 100644 pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/__init__.py
 create mode 100644 pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/base.c
 create mode 100644 pydoctor/test/testpackages/c_module_invalid_text_signature/setup.py
 create mode 100644 pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/__init__.py
 create mode 100644 pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.c
 create mode 100644 pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.py
 create mode 100644 pydoctor/test/testpackages/c_module_python_module_name_clash/setup.py
 create mode 100644 pydoctor/test/testpackages/cyclic_imports_base_classes/__init__.py
 create mode 100644 pydoctor/test/testpackages/cyclic_imports_base_classes/a.py
 create mode 100644 pydoctor/test/testpackages/cyclic_imports_base_classes/b.py
 create mode 100644 pydoctor/test/testpackages/package_module_name_clash/__init__.py
 create mode 100644 pydoctor/test/testpackages/package_module_name_clash/pack.py
 create mode 100644 pydoctor/test/testpackages/package_module_name_clash/pack/__init__.py
 create mode 100644 pydoctor/test/testpackages/reparented_module/__init__.py
 create mode 100644 pydoctor/test/testpackages/reparented_module/mod.py
 create mode 100644 pydoctor/test/testpackages/reparenting_crash/__init__.py
 create mode 100644 pydoctor/test/testpackages/reparenting_crash/reparenting_crash.py
 create mode 100644 pydoctor/test/testpackages/reparenting_crash_alt/__init__.py
 create mode 100644 pydoctor/test/testpackages/reparenting_crash_alt/_impl.py
 create mode 100644 pydoctor/test/testpackages/reparenting_crash_alt/reparenting_crash_alt.py
 create mode 100644 pydoctor/test/testpackages/reparenting_follows_aliases/__init__.py
 create mode 100644 pydoctor/test/testpackages/reparenting_follows_aliases/_myotherthing.py
 create mode 100644 pydoctor/test/testpackages/reparenting_follows_aliases/_mything.py
 create mode 100644 pydoctor/test/testpackages/reparenting_follows_aliases/main.py
 create mode 100644 pydoctor/themes/base/ajax.js
 create mode 100644 pydoctor/themes/base/all-documents.html
 create mode 100644 pydoctor/themes/base/fonts/info.svg
 create mode 100644 pydoctor/themes/base/fonts/x-circle.svg
 create mode 100644 pydoctor/themes/base/lunr.js
 create mode 100644 pydoctor/themes/base/search.js
 create mode 100644 pydoctor/themes/base/searchlib.js
 create mode 100644 pydoctor/themes/base/sidebar-list.html
 create mode 100644 pydoctor/themes/base/sidebar.html
 create mode 100644 pydoctor/themes/base/sidebartoggle.js
 create mode 100644 pydoctor/themes/classic/.gitattributes
 create mode 100644 pydoctor/themes/readthedocs/common.html
 create mode 100644 pydoctor/themes/readthedocs/fonts/Roboto-Slab-Bold.woff2
 create mode 100644 pydoctor/themes/readthedocs/fonts/Roboto-Slab-Regular.woff2
 create mode 100644 pydoctor/themes/readthedocs/fonts/book.svg
 create mode 100644 pydoctor/themes/readthedocs/fonts/home.svg
 create mode 100644 pydoctor/themes/readthedocs/fonts/lato-bold.woff2
 create mode 100644 pydoctor/themes/readthedocs/fonts/lato-normal.woff2
 create mode 100644 pydoctor/themes/readthedocs/fonts/minus-square-o.svg
 create mode 100644 pydoctor/themes/readthedocs/fonts/plus-square-o.svg
 create mode 100644 pydoctor/themes/readthedocs/footer.html
 create mode 100644 pydoctor/themes/readthedocs/head.html
 create mode 100644 pydoctor/themes/readthedocs/nav.html
 create mode 100644 pydoctor/themes/readthedocs/readthedocstheme.css
 create mode 100644 pydoctor/utils.py
 create mode 100644 pydoctor/visitor.py

diff --git a/pydoctor/_configparser.py b/pydoctor/_configparser.py
new file mode 100644
index 000000000..51b4fc6ac
--- /dev/null
+++ b/pydoctor/_configparser.py
@@ -0,0 +1,378 @@
+"""
+Provides L{configargparse.ConfigFileParser} classes to parse C{TOML} and C{INI} files with **mandatory** support for sections.
+Useful to integrate configuration into project files like C{pyproject.toml} or C{setup.cfg}.
+
+L{TomlConfigParser} usage: 
+
+>>> TomlParser = TomlConfigParser(['tool.my_super_tool']) # Simple TOML parser.
+>>> parser = ArgumentParser(..., default_config_files=['./pyproject.toml'], config_file_parser_class=TomlParser)
+
+L{IniConfigParser} works the same way (also it optionaly convert multiline strings to list with argument C{split_ml_text_to_list}).
+
+L{CompositeConfigParser} usage:
+
+>>> MY_CONFIG_SECTIONS = ['tool.my_super_tool', 'tool:my_super_tool', 'my_super_tool']
+>>> TomlParser =  TomlConfigParser(MY_CONFIG_SECTIONS)
+>>> IniParser = IniConfigParser(MY_CONFIG_SECTIONS, split_ml_text_to_list=True)
+>>> MixedParser = CompositeConfigParser([TomlParser, IniParser]) # This parser supports both TOML and INI formats.
+>>> parser = ArgumentParser(..., default_config_files=['./pyproject.toml', 'setup.cfg', 'my_super_tool.ini'], config_file_parser_class=MixedParser)
+
+"""
+from collections import OrderedDict
+import re
+from typing import Any, Callable, Dict, List, Optional, Tuple, TextIO, Union
+import csv
+import functools
+import configparser
+from ast import literal_eval
+
+from configargparse import ConfigFileParserException, ConfigFileParser
+import toml
+
+# I did not invented these regex, just put together some stuff from:
+# - https://stackoverflow.com/questions/11859442/how-to-match-string-in-quotes-using-regex
+# - and https://stackoverflow.com/a/41005190
+
+_QUOTED_STR_REGEX = re.compile(r'(^\"(?:\\.|[^\"\\])*\"$)|'
+                               r'(^\'(?:\\.|[^\'\\])*\'$)')
+
+_TRIPLE_QUOTED_STR_REGEX = re.compile(r'(^\"\"\"(\s+)?(([^\"]|\"([^\"]|\"[^\"]))*(\"\"?)?)?(\s+)?(?:\\.|[^\"\\])\"\"\"$)|'
+                                                                                                 # Unescaped quotes at the end of a string generates 
+                                                                                                 # "SyntaxError: EOL while scanning string literal", 
+                                                                                                 # so we don't account for those kind of strings as quoted.
+                                      r'(^\'\'\'(\s+)?(([^\']|\'([^\']|\'[^\']))*(\'\'?)?)?(\s+)?(?:\\.|[^\'\\])\'\'\'$)', flags=re.DOTALL)
+
+@functools.lru_cache(maxsize=256, typed=True)
+def is_quoted(text:str, triple:bool=True) -> bool:
+    """
+    Detect whether a string is a quoted representation. 
+
+    @param triple: Also match tripple quoted strings.
+    """
+    return bool(_QUOTED_STR_REGEX.match(text)) or \
+        (triple and bool(_TRIPLE_QUOTED_STR_REGEX.match(text)))
+
+def unquote_str(text:str, triple:bool=True) -> str:
+    """
+    Unquote a maybe quoted string representation. 
+    If the string is not detected as being a quoted representation, it returns the same string as passed.
+    It supports all kinds of python quotes: C{\"\"\"}, C{'''}, C{"} and C{'}.
+
+    @param triple: Also unquote tripple quoted strings.
+    @raises ValueError: If the string is detected as beeing quoted but literal_eval() fails to evaluate it as string.
+        This would be a bug in the regex. 
+    """
+    if is_quoted(text, triple=triple):
+        try:
+            s = literal_eval(text)
+            assert isinstance(s, str)
+        except Exception as e:
+            raise ValueError(f"Error trying to unquote the quoted string: {text}: {e}") from e
+        return s
+    return text
+
+def parse_toml_section_name(section_name:str) -> Tuple[str, ...]:
+    """
+    Parse a TOML section name to a sequence of strings.
+
+    The following names are all valid: 
+
+    .. python::
+
+        "a.b.c"            # this is best practice -> returns ("a", "b", "c")
+        " d.e.f "          # same as [d.e.f] -> returns ("d", "e", "f")
+        " g .  h  . i "    # same as [g.h.i] -> returns ("g", "h", "i")
+        ' j . "ʞ" . "l" '  # same as [j."ʞ"."l"], double or simple quotes here are supported. -> returns ("j", "ʞ", "l")
+    """
+    section = []
+    for row in csv.reader([section_name], delimiter='.'):
+        for a in row:
+            section.append(unquote_str(a.strip(), triple=False))
+    return tuple(section)
+
+def get_toml_section(data:Dict[str, Any], section:Union[Tuple[str, ...], str]) -> Optional[Dict[str, Any]]:
+    """
+    Given some TOML data (as loaded with C{toml.load()}), returns the requested section of the data.
+    Returns C{None} if the section is not found.
+    """
+    sections = parse_toml_section_name(section) if isinstance(section, str) else section
+    itemdata = data.get(sections[0])
+    if not itemdata:
+        return None
+    sections = sections[1:]
+    if sections:
+        return get_toml_section(itemdata, sections)
+    else:
+        if not isinstance(itemdata, dict):
+            return None
+        return itemdata
+
+class TomlConfigParser(ConfigFileParser):
+    """
+    U{TOML } parser with support for sections.
+
+    This config parser can be used to integrate with C{pyproject.toml} files.
+
+    Example::
+
+        # this is a comment
+        # this is TOML section table:
+        [tool.my-software] 
+        # how to specify a key-value pair (strings must be quoted):
+        format-string = "restructuredtext"
+        # how to set an arg which has action="store_true":
+        warnings-as-errors = true
+        # how to set an arg which has action="count" or type=int:
+        verbosity = 1
+        # how to specify a list arg (eg. arg which has action="append"):
+        repeatable-option = ["https://docs.python.org/3/objects.inv",
+                        "https://twistedmatrix.com/documents/current/api/objects.inv"]
+        # how to specify a multiline text:
+        multi-line-text = '''
+            Lorem ipsum dolor sit amet, consectetur adipiscing elit. 
+            Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc. 
+            Maecenas quis dapibus leo, a pellentesque leo. 
+            '''
+        # how to specify a empty text:
+        empty-text = ''
+        # how to specify a empty list:
+        empty-list = []
+
+    Usage:
+
+    >>> import configargparse
+    >>> parser = configargparse.ArgParser(
+    ...             default_config_files=['pyproject.toml', 'my_super_tool.toml'],
+    ...             config_file_parser_class=configargparse.TomlConfigParser(['tool.my_super_tool']),
+    ...          )
+
+    """
+
+    def __init__(self, sections: List[str]) -> None:
+        super().__init__()
+        self.sections = sections
+    
+    def __call__(self) -> ConfigFileParser:
+        return self
+
+    def parse(self, stream:TextIO) -> Dict[str, Any]:
+        """Parses the keys and values from a TOML config file."""
+        # parse with configparser to allow multi-line values
+        try:
+            config = toml.load(stream)
+        except Exception as e:
+            raise ConfigFileParserException("Couldn't parse TOML file: %s" % e)
+
+        # convert to dict and filter based on section names
+        result: Dict[str, Any] = OrderedDict()
+
+        for section in self.sections:
+            data = get_toml_section(config, section)
+            if data:
+                # Seems a little weird, but anything that is not a list is converted to string, 
+                # It will be converted back to boolean, int or whatever after.
+                # Because config values are still passed to argparser for computation.
+                for key, value in data.items():
+                    if isinstance(value, list):
+                        result[key] = [str(i) for i in value]
+                    elif value is None:
+                        pass
+                    else:
+                        result[key] = str(value)
+                break
+        
+        return result
+
+    def get_syntax_description(self) -> str:
+        return ("Config file syntax is Tom's Obvious, Minimal Language. "
+                "See https://github.com/toml-lang/toml/blob/v0.5.0/README.md for details.")
+
+class IniConfigParser(ConfigFileParser):
+    """
+    INI parser with support for sections.
+    
+    This parser somewhat ressembles L{configargparse.ConfigparserConfigFileParser}. 
+    It uses L{configparser} and evaluate values written with python list syntax. 
+
+    With the following changes: 
+        - Must be created with argument to bind the parser to a list of sections.
+        - Does not convert multiline strings to single line.
+        - Optional support for converting multiline strings to list (if ``split_ml_text_to_list=True``). 
+        - Optional support for quoting strings in config file 
+            (useful when text must not be converted to list or when text 
+            should contain trailing whitespaces).
+        - Comments may only appear on their own in an otherwise empty line (like in configparser).
+
+    This config parser can be used to integrate with ``setup.cfg`` files.
+
+    Example::
+
+        # this is a comment
+        ; also a comment
+        [my_super_tool]
+        # how to specify a key-value pair:
+        format-string: restructuredtext 
+        # white space are ignored, so name = value same as name=value
+        # this is why you can quote strings (double quotes works just as well)
+        quoted-string = '\thello\tmom...  '
+        # how to set an arg which has action="store_true"
+        warnings-as-errors = true
+        # how to set an arg which has action="count" or type=int
+        verbosity = 1
+        # how to specify a list arg (eg. arg which has action="append")
+        repeatable-option = ["https://docs.python.org/3/objects.inv",
+                        "https://twistedmatrix.com/documents/current/api/objects.inv"]
+        # how to specify a multiline text:
+        multi-line-text = 
+            Lorem ipsum dolor sit amet, consectetur adipiscing elit. 
+            Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc. 
+            Maecenas quis dapibus leo, a pellentesque leo. 
+        # how to specify a empty text:
+        empty-text = 
+        # this also works:
+        empty-text = ''
+        # how to specify a empty list:
+        empty-list = []
+
+    If you use L{IniConfigParser(sections, split_ml_text_to_list=True)}, 
+    the same rules are applicable with the following changes::
+
+        [my-software]
+        # to specify a list arg (eg. arg which has action="append"), 
+        # just enter one value per line (the list literal format can still be used):
+        repeatable-option =
+            https://docs.python.org/3/objects.inv
+            https://twistedmatrix.com/documents/current/api/objects.inv
+        # to specify a multiline text, you have to quote it:
+        multi-line-text = '''
+            Lorem ipsum dolor sit amet, consectetur adipiscing elit. 
+            Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc. 
+            Maecenas quis dapibus leo, a pellentesque leo. 
+            '''
+        # how to specify a empty text:
+        empty-text = ''
+        # how to specify a empty list:
+        empty-list = []
+        # the following empty value would be simply ignored because we can't 
+        # differenciate between simple value and list value without any data:
+        totally-ignored-field = 
+
+    Usage:
+
+    >>> import configargparse
+    >>> parser = configargparse.ArgParser(
+    ...             default_config_files=['setup.cfg', 'my_super_tool.ini'],
+    ...             config_file_parser_class=configargparse.IniConfigParser(['tool:my_super_tool', 'my_super_tool']),
+    ...          )
+
+    """
+
+    def __init__(self, sections:List[str], split_ml_text_to_list:bool) -> None:
+        super().__init__()
+        self.sections = sections
+        self.split_ml_text_to_list = split_ml_text_to_list
+
+    def __call__(self) -> ConfigFileParser:
+        return self
+
+    def parse(self, stream:TextIO) -> Dict[str, Any]:
+        """Parses the keys and values from an INI config file."""
+        # parse with configparser to allow multi-line values
+        config = configparser.ConfigParser()
+        try:
+            config.read_string(stream.read())
+        except Exception as e:
+            raise ConfigFileParserException("Couldn't parse INI file: %s" % e)
+
+        # convert to dict and filter based on INI section names
+        result: Dict[str, Union[str, List[str]]] = OrderedDict()
+        for section in config.sections() + [configparser.DEFAULTSECT]:
+            if section not in self.sections:
+                continue
+            for k,value in config[section].items():
+                # value is already strip by configparser
+                if not value and self.split_ml_text_to_list:
+                    # ignores empty values when split_ml_text_to_list is True
+                    # because we can't differenciate empty list and empty string.
+                    continue
+                # evaluate lists
+                if value.startswith('[') and value.endswith(']'):
+                    try:
+                        l = literal_eval(value)
+                        assert isinstance(l, list)
+                        # Ensure all list values are strings.
+                        result[k] = [str(i) for i in l]
+                    except Exception as e:
+                        # error evaluating object
+                        _tripple = 'tripple ' if '\n' in value else ''
+                        raise ConfigFileParserException("Error evaluating list: " + str(e) + f". Put {_tripple}quotes around your text if it's meant to be a string.") from e
+                else:
+                    if is_quoted(value):
+                        # evaluate quoted string
+                        try:
+                            result[k] = unquote_str(value)
+                        except ValueError as e:
+                            # error unquoting string
+                            raise ConfigFileParserException(str(e)) from e
+                    # split multi-line text into list of strings if split_ml_text_to_list is enabled.
+                    elif self.split_ml_text_to_list and '\n' in value.rstrip('\n'):
+                        result[k] = [i for i in value.split('\n') if i]
+                    else:
+                        result[k] = value
+        return result
+
+    def get_syntax_description(self) -> str:
+        msg = ("Uses configparser module to parse an INI file which allows multi-line values. "
+                "See https://docs.python.org/3/library/configparser.html for details. "
+                "This parser includes support for quoting strings literal as well as python list syntax evaluation. ")
+        if self.split_ml_text_to_list:
+            msg += ("Alternatively lists can be constructed with a plain multiline string, "
+                "each non-empty line will be converted to a list item.")
+        return msg
+
+class CompositeConfigParser(ConfigFileParser):
+    """
+    A config parser that understands multiple formats.
+
+    This parser will successively try to parse the file with each compisite parser, until it succeeds, 
+    else it fails showing all encountered error messages.
+
+    The following code will make configargparse understand both TOML and INI formats. 
+    Making it easy to integrate in both C{pyproject.toml} and C{setup.cfg}.
+
+    >>> import configargparse
+    >>> my_tool_sections = ['tool.my_super_tool', 'tool:my_super_tool', 'my_super_tool']
+    ...                     # pyproject.toml like section, setup.cfg like section, custom section
+    >>> parser = configargparse.ArgParser(
+    ...             default_config_files=['setup.cfg', 'my_super_tool.ini'],
+    ...             config_file_parser_class=configargparse.CompositeConfigParser(
+    ...             [configargparse.TomlConfigParser(my_tool_sections), 
+    ...                 configargparse.IniConfigParser(my_tool_sections, split_ml_text_to_list=True)]
+    ...             ),
+    ...          )
+
+    """
+
+    def __init__(self, config_parser_types: List[Callable[[], ConfigFileParser]]) -> None:
+        super().__init__()
+        self.parsers = [p() for p in config_parser_types]
+
+    def __call__(self) -> ConfigFileParser:
+        return self
+
+    def parse(self, stream:TextIO) -> Dict[str, Any]:
+        errors = []
+        for p in self.parsers:
+            try:
+                return p.parse(stream) # type: ignore[no-any-return]
+            except Exception as e:
+                stream.seek(0)
+                errors.append(e)
+        raise ConfigFileParserException(
+                f"Error parsing config: {', '.join(repr(str(e)) for e in errors)}")
+    
+    def get_syntax_description(self) -> str:
+        msg = "Uses multiple config parser settings (in order): \n"
+        for i, parser in enumerate(self.parsers): 
+            msg += f"[{i+1}] {parser.__class__.__name__}: {parser.get_syntax_description()} \n"
+        return msg
diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py
index 84dd95fea..b90098239 100644
--- a/pydoctor/astbuilder.py
+++ b/pydoctor/astbuilder.py
@@ -8,14 +8,14 @@
 from itertools import chain
 from pathlib import Path
 from typing import (
-    Any, Callable, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple,
+    Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple,
     Type, TypeVar, Union, cast
 )
 
 import astor
 from pydoctor import epydoc2stan, model, node2stan
 from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval
-from pydoctor.astutils import bind_args, node2dottedname, node2fullname
+from pydoctor.astutils import bind_args, node2dottedname, node2fullname, is__name__equals__main__, NodeVisitor
 
 def parseFile(path: Path) -> ast.Module:
     """Parse the contents of a Python source file."""
@@ -28,13 +28,14 @@ def parseFile(path: Path) -> ast.Module:
 else:
     _parse = ast.parse
 
+
 def _maybeAttribute(cls: model.Class, name: str) -> bool:
     """Check whether a name is a potential attribute of the given class.
     This is used to prevent an assignment that wraps a method from
     creating an attribute that would overwrite or shadow that method.
 
     @return: L{True} if the name does not exist or is an existing (possibly
-        inherited) attribute, L{False} otherwise
+        inherited) attribute, L{False} if this name defines something else than an L{Attribute}. 
     """
     obj = cls.find(name)
     return obj is None or isinstance(obj, model.Attribute)    
@@ -178,25 +179,24 @@ def extract_final_subscript(annotation: ast.Subscript) -> ast.expr:
 def is_alias(value: Optional[ast.expr]) -> bool:
     return node2dottedname(value) is not None
 
-class ModuleVistor(ast.NodeVisitor):
-    currAttr: Optional[model.Documentable]
-    newAttr: Optional[model.Documentable]
+
+class ModuleVistor(NodeVisitor):
 
     def __init__(self, builder: 'ASTBuilder', module: model.Module):
+        super().__init__()
         self.builder = builder
         self.system = builder.system
         self.module = module
         self._moduleLevelAssigns: List[str] = []
 
-    def default(self, node: ast.AST) -> None:
-        body: Optional[Sequence[ast.stmt]] = getattr(node, 'body', None)
-        if body is not None:
-            self.currAttr = None
-            for child in body:
-                self.newAttr = None
-                self.visit(child)
-                self.currAttr = self.newAttr
-            self.newAttr = None
+
+    def visit_If(self, node: ast.If) -> None:
+        if isinstance(node.test, ast.Compare):
+            if is__name__equals__main__(node.test):
+                # skip if __name__ == '__main__': blocks since
+                # whatever is declared in them cannot be imported
+                # and thus is not part of the API
+                raise self.SkipNode()
 
     def visit_Module(self, node: ast.Module) -> None:
         assert self.module.docstring is None
@@ -205,14 +205,15 @@ def visit_Module(self, node: ast.Module) -> None:
         if len(node.body) > 0 and isinstance(node.body[0], ast.Expr) and isinstance(node.body[0].value, ast.Str):
             self.module.setDocstring(node.body[0].value)
             epydoc2stan.extract_fields(self.module)
-        self.default(node)
+
+    def depart_Module(self, node: ast.Module) -> None:
         self.builder.pop(self.module)
 
-    def visit_ClassDef(self, node: ast.ClassDef) -> Optional[model.Class]:
+    def visit_ClassDef(self, node: ast.ClassDef) -> None:
         # Ignore classes within functions.
         parent = self.builder.current
         if isinstance(parent, model.Function):
-            return None
+            raise self.SkipNode()
 
         rawbases = []
         bases = []
@@ -269,10 +270,10 @@ def visit_ClassDef(self, node: ast.ClassDef) -> Optional[model.Class]:
         for b in cls.baseobjects:
             if b is not None:
                 b.subclasses.append(cls)
-        self.default(node)
+
+    def depart_ClassDef(self, node: ast.ClassDef) -> None:
         self.builder.popClass()
 
-        return cls
 
     def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
         ctx = self.builder.current
@@ -334,19 +335,21 @@ def _importAll(self, modname: str) -> None:
                 if not name.startswith('_')
                 ]
 
+        # Fetch names to export.
+        exports = self._getCurrentModuleExports()
+
         # Add imported names to our module namespace.
         assert isinstance(self.builder.current, model.CanContainImportsDocumentable)
         _localNameToFullName = self.builder.current._localNameToFullName_map
         expandName = mod.expandName
         for name in names:
-            _localNameToFullName[name] = expandName(name)
 
-    def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None:
-        """Handle a C{from  import } statement."""
+            if self._handleReExport(exports, name, name, mod) is True:
+                continue
 
-        # Process the module we're importing from.
-        mod = self.system.getProcessedModule(modname)
+            _localNameToFullName[name] = expandName(name)
 
+    def _getCurrentModuleExports(self) -> Collection[str]:
         # Fetch names to export.
         current = self.builder.current
         if isinstance(current, model.Module):
@@ -354,44 +357,103 @@ def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None:
             if exports is None:
                 exports = []
         else:
-            assert isinstance(current, model.CanContainImportsDocumentable)
             # Don't export names imported inside classes or functions.
             exports = []
+        return exports
+
+    def _handleReExport(self, curr_mod_exports:Collection[str], 
+                        origin_name:str, as_name:str,
+                        origin_module:Union[model.Module, str]) -> bool:
+        """
+        Move re-exported objects into current module.
+
+        @param origin_module: None if the module is unknown to this system.
+        @returns: True if the imported name has been sucessfully re-exported.
+        """
+        # Move re-exported objects into current module.
+        current = self.builder.current
+        if isinstance(origin_module, model.Module):
+            modname = origin_module.fullName()
+            known_module = True
+        else:
+            modname = origin_module
+            known_module = False
+        if as_name in curr_mod_exports:
+            # In case of duplicates names, we can't rely on resolveName,
+            # So we use content.get first to resolve non-alias names. 
+            if known_module:
+                ob = origin_module.contents.get(origin_name) or origin_module.resolveName(origin_name)
+                if ob is None:
+                    self.builder.warning("cannot resolve re-exported name",
+                                            f'{modname}.{origin_name}')
+                else:
+                    if origin_module.all is None or origin_name not in origin_module.all:
+                        self.system.msg(
+                            "astbuilder",
+                            "moving %r into %r" % (ob.fullName(), current.fullName())
+                            )
+                        # Must be a Module since the exports is set to an empty list if it's not.
+                        assert isinstance(current, model.Module)
+                        ob.reparent(current, as_name)
+                        return True
+            else:
+                # re-export names that are not part of the current system with an alias
+                attr = current.contents.get(as_name)
+                if not attr:
+                    attr = self.builder.addAttribute(name=as_name, kind=model.DocumentableKind.ALIAS, parent=current)
+                assert isinstance(attr, model.Attribute)
+                attr._alias_to = f'{modname}.{origin_name}'
+                # This is only for the HTML repr
+                attr.value=ast.Name(attr._alias_to)
+                return True
+            
+            # if mod is None: 
+            #         # re-export names that are not part of the current system with an alias
+            #         attr = current.contents.get(asname)
+            #         if not attr:
+            #             attr = self.builder.addAttribute(name=asname, kind=model.DocumentableKind.ALIAS, parent=current)
+            #         assert isinstance(attr, model.Attribute)
+            #         attr._alias_to = f'{modname}.{orgname}'
+            #         # This is only for the HTML repr
+            #         attr.value=ast.Name(attr._alias_to)
+            #         continue
+            #     else:
+            #         try:
+            #             ob = mod.contents[orgname]
+            #         except KeyError:
+            #             self.builder.warning("cannot find re-exported name",
+            #                                 f'{modname}.{orgname}')
+            #         else:
+            #             if mod.all is None or orgname not in mod.all:
+            #                 self.system.msg(
+            #                     "astbuilder",
+            #                     "moving %r into %r" % (ob.fullName(), current.fullName())
+            #                     )
+            #                 # Must be a Module since the exports is set to an empty list if it's not.
+            #                 assert isinstance(current, model.Module)
+            #                 ob.reparent(current, asname)
+            #                 continue
+        return False
+
+    def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None:
+        """Handle a C{from  import } statement."""
 
+        # Process the module we're importing from.
+        mod = self.system.getProcessedModule(modname)
+
+        # Fetch names to export.
+        exports = self._getCurrentModuleExports()
+
+        current = self.builder.current
+        assert isinstance(current, model.CanContainImportsDocumentable)
         _localNameToFullName = current._localNameToFullName_map
         for al in names:
             orgname, asname = al.name, al.asname
             if asname is None:
                 asname = orgname
 
-            # Move re-exported objects into current module.
-            if asname in exports:
-                if mod is None: 
-                    # re-export names that are not part of the current system with an alias
-                    attr = current.contents.get(asname)
-                    if not attr:
-                        attr = self.builder.addAttribute(name=asname, kind=model.DocumentableKind.ALIAS, parent=current)
-                    assert isinstance(attr, model.Attribute)
-                    attr._alias_to = f'{modname}.{orgname}'
-                    # This is only for the HTML repr
-                    attr.value=ast.Name(attr._alias_to)
-                    continue
-                else:
-                    try:
-                        ob = mod.contents[orgname]
-                    except KeyError:
-                        self.builder.warning("cannot find re-exported name",
-                                            f'{modname}.{orgname}')
-                    else:
-                        if mod.all is None or orgname not in mod.all:
-                            self.system.msg(
-                                "astbuilder",
-                                "moving %r into %r" % (ob.fullName(), current.fullName())
-                                )
-                            # Must be a Module since the exports is set to an empty list if it's not.
-                            assert isinstance(current, model.Module)
-                            ob.reparent(current, asname)
-                            continue
+            if self._handleReExport(exports, orgname, asname, mod or modname) is True:
+                continue
 
             # If we're importing from a package, make sure imported modules
             # are processed (getProcessedModule() ignores non-modules).
@@ -528,29 +590,42 @@ def _handleModuleVar(self,
             # and therefore doesn't need an Attribute instance.
             return
         parent = self.builder.current
-        obj = parent.resolveName(target)
+        obj = parent.contents.get(target)
         
         if obj is None:
             obj = self.builder.addAttribute(name=target, kind=None, parent=parent)
         
-        if isinstance(obj, model.Attribute):
-            
-            if annotation is None and expr is not None:
-                annotation = _infer_type(expr)
+        # If it's not an attribute it means that the name is already denifed as function/class 
+        # probably meaning that this attribute is a bound callable. 
+        #
+        #   def func(value, stock) -> int:...
+        #   var = 2
+        #   func = partial(func, value=var)
+        #
+        # We don't know how to handle this,
+        # so we ignore it to document the original object. This means that we might document arguments 
+        # that are in reality not existing because they have values in a partial() call for instance.
+
+        if not isinstance(obj, model.Attribute):
+            return
             
-            obj.annotation = annotation
-            obj.setLineNumber(lineno)
-            if is_alias(expr):
-                self._handleAlias(obj=obj, value=expr, lineno=lineno)
-            elif is_constant(obj):
-                self._handleConstant(obj=obj, value=expr, lineno=lineno)
-            else:
-                obj.kind = model.DocumentableKind.VARIABLE
-                # We store the expr value for all Attribute in order to be able to 
-                # check if they have been initialized or not.
-                obj.value = expr
+        if annotation is None and expr is not None:
+            annotation = _infer_type(expr)
+        
+        obj.annotation = annotation
+        obj.setLineNumber(lineno)
+        
+        if is_alias(expr):
+            self._handleAlias(obj=obj, value=expr, lineno=lineno)
+        elif is_constant(obj):
+            self._handleConstant(obj=obj, value=expr, lineno=lineno)
+        else:
+            obj.kind = model.DocumentableKind.VARIABLE
+            # We store the expr value for all Attribute in order to be able to 
+            # check if they have been initialized or not.
+            obj.value = expr
 
-            self.newAttr = obj
+        self.builder.currentAttr = obj
 
     def _handleAssignmentInModule(self,
             target: str,
@@ -604,7 +679,7 @@ def _handleClassVar(self,
         else:
             obj.value = expr
 
-        self.newAttr = obj
+        self.builder.currentAttr = obj
 
     def _handleInstanceVar(self,
             name: str,
@@ -621,7 +696,7 @@ def _handleInstanceVar(self,
         if not _maybeAttribute(cls, name):
             return
 
-        # Class variables can only be Attribute, so it's OK to cast
+        # Class variables can only be Attribute, so it's OK to cast because we used _maybeAttribute() above.
         obj = cast(Optional[model.Attribute], cls.contents.get(name))
         if obj is None:
 
@@ -640,7 +715,8 @@ def _handleInstanceVar(self,
         else:
             obj.kind = model.DocumentableKind.INSTANCE_VARIABLE
             obj.value = expr
-        self.newAttr = obj
+        
+        self.builder.currentAttr = obj
 
     def _handleAssignmentInClass(self,
             target: str,
@@ -748,12 +824,13 @@ def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
     def visit_Expr(self, node: ast.Expr) -> None:
         value = node.value
         if isinstance(value, ast.Str):
-            attr = self.currAttr
+            attr = self.builder.currentAttr
             if attr is not None:
                 attr.setDocstring(value)
-
+                self.builder.currentAttr = None
         self.generic_visit(node)
 
+
     def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
         self._handleFunctionDef(node, is_async=True)
 
@@ -767,18 +844,23 @@ def _handleFunctionDef(self,
         # Ignore inner functions.
         parent = self.builder.current
         if isinstance(parent, model.Function):
-            return
+            raise self.SkipNode()
 
         lineno = node.lineno
+
+        # setting linenumber from the start of the decorations
         if node.decorator_list:
             lineno = node.decorator_list[0].lineno
 
+        # extracting docstring
         docstring: Optional[ast.Str] = None
         if len(node.body) > 0 and isinstance(node.body[0], ast.Expr) \
                               and isinstance(node.body[0].value, ast.Str):
             docstring = node.body[0].value
 
         func_name = node.name
+
+        # determine the function's kind
         is_property = False
         is_classmethod = False
         is_staticmethod = False
@@ -802,12 +884,13 @@ def _handleFunctionDef(self,
                     func_name = '.'.join(deco_name[-2:])
 
         if is_property:
+            # handle property and skip child nodes.
             attr = self._handlePropertyDef(node, docstring, lineno)
             if is_classmethod:
                 attr.report(f'{attr.fullName()} is both property and classmethod')
             if is_staticmethod:
                 attr.report(f'{attr.fullName()} is both property and staticmethod')
-            return
+            raise self.SkipNode()
 
         func = self.builder.pushFunction(func_name, lineno)
         func.is_async = is_async
@@ -864,7 +947,11 @@ def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
 
         func.signature = signature
         func.annotations = self._annotations_from_function(node)
-        self.default(node)
+    
+    def depart_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
+        self.builder.popFunction()
+
+    def depart_FunctionDef(self, node: ast.FunctionDef) -> None:
         self.builder.popFunction()
 
     def _handlePropertyDef(self,
@@ -990,7 +1077,7 @@ def __init__(self, value: Any, ctx: model.Documentable):
         The colorized value as L{ParsedDocstring}.
         """
 
-        self._linker = epydoc2stan._EpydocLinker(ctx)
+        self._linker = ctx.docstring_linker
         """
         Linker.
         """
@@ -1110,26 +1197,41 @@ def _annotation_for_elements(sequence: Iterable[object]) -> Optional[ast.expr]:
 DocumentableT = TypeVar('DocumentableT', bound=model.Documentable)
 
 class ASTBuilder:
+    """
+    Keeps tracks of the state of the AST build, creates documentable and adds objects to the system.
+    """
     ModuleVistor = ModuleVistor
 
     def __init__(self, system: model.System):
         self.system = system
-        self.current = cast(model.Documentable, None)
-        self.currentMod: Optional[model.Module] = None
+        
+        self.current = cast(model.Documentable, None) # current visited object
+        self.currentMod: Optional[model.Module] = None # module, set when visiting ast.Module
+        self.currentAttr: Optional[model.Documentable] = None # recently visited attribute object
+        
         self._stack: List[model.Documentable] = []
         self.ast_cache: Dict[Path, Optional[ast.Module]] = {}
 
+
     def _push(self, cls: Type[DocumentableT], name: str, lineno: int) -> DocumentableT:
+        """
+        Create and enter a new object of the given type and add it to the system.
+        """
         obj = cls(self.system, name, self.current)
         self.system.addObject(obj)
         self.push(obj, lineno)
+        self.currentAttr = None
         return obj
 
     def _pop(self, cls: Type[model.Documentable]) -> None:
         assert isinstance(self.current, cls)
         self.pop(self.current)
+        self.currentAttr = None
 
     def push(self, obj: model.Documentable, lineno: int) -> None:
+        """
+        Enter a documentable.
+        """
         self._stack.append(self.current)
         self.current = obj
         if isinstance(obj, model.Module):
@@ -1146,30 +1248,51 @@ def push(self, obj: model.Documentable, lineno: int) -> None:
             obj.setLineNumber(lineno)
 
     def pop(self, obj: model.Documentable) -> None:
+        """
+        Leave a documentable.
+        """
         assert self.current is obj, f"{self.current!r} is not {obj!r}"
         self.current = self._stack.pop()
         if isinstance(obj, model.Module):
             self.currentMod = None
 
     def pushClass(self, name: str, lineno: int) -> model.Class:
+        """
+        Create and a new class in the system.
+        """
         return self._push(self.system.Class, name, lineno)
+
     def popClass(self) -> None:
+        """
+        Leave a class.
+        """
         self._pop(self.system.Class)
 
     def pushFunction(self, name: str, lineno: int) -> model.Function:
+        """
+        Create and enter a new function in the system.
+        """
         return self._push(self.system.Function, name, lineno)
+
     def popFunction(self) -> None:
+        """
+        Leave a function.
+        """
         self._pop(self.system.Function)
 
     def addAttribute(self,
             name: str, kind: Optional[model.DocumentableKind], parent: model.Documentable
             ) -> model.Attribute:
+        """
+        Add a new attribute to the system, attributes cannot be "entered".
+        """
         system = self.system
         parentMod = self.currentMod
         attr = system.Attribute(system, name, parent)
         attr.kind = kind
         attr.parentMod = parentMod
         system.addObject(attr)
+        self.currentAttr = attr
         return attr
 
     def warning(self, message: str, detail: str) -> None:
@@ -1185,7 +1308,10 @@ def processModuleAST(self, mod_ast: ast.Module, mod: model.Module) -> None:
             else:
                 module_var_parser(node, mod)
 
-        self.ModuleVistor(self, mod).visit(mod_ast)
+        vis = self.ModuleVistor(self, mod)
+        vis.extensions.add(*self.system._astbuilder_visitors)
+        vis.extensions.attach_visitor(vis)
+        vis.walkabout(mod_ast)
 
     def parseFile(self, path: Path) -> Optional[ast.Module]:
         try:
@@ -1198,6 +1324,14 @@ def parseFile(self, path: Path) -> Optional[ast.Module]:
                 self.warning("cannot parse", str(path))
             self.ast_cache[path] = mod
             return mod
+    
+    def parseString(self, py_string:str) -> Optional[ast.Module]:
+        mod = None
+        try:
+            mod = _parse(py_string)
+        except (SyntaxError, ValueError):
+            self.warning("cannot parse string: ", py_string)
+        return mod
 
 model.System.defaultBuilder = ASTBuilder
 
diff --git a/pydoctor/astutils.py b/pydoctor/astutils.py
index 98871cff3..0302afd51 100644
--- a/pydoctor/astutils.py
+++ b/pydoctor/astutils.py
@@ -2,14 +2,64 @@
 Various bits of reusable code related to L{ast.AST} node processing.
 """
 
-from typing import Optional, List, TYPE_CHECKING, Union
+from typing import Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING
+
 from inspect import BoundArguments, Signature
 import ast
 
+import sys
+from numbers import Number
+
+from pydoctor import visitor
+
 if TYPE_CHECKING:
-    from pydoctor.model import Documentable
+    from pydoctor import model
+
+# AST visitors
 
-def node2dottedname(node: Optional[ast.expr]) -> Optional[List[str]]:
+def iter_values(node: ast.AST) -> Iterator[ast.AST]:
+    for _, value in ast.iter_fields(node):
+        if isinstance(value, list):
+            for item in value:
+                if isinstance(item, ast.AST):
+                    yield item
+        elif isinstance(value, ast.AST):
+            yield value
+
+class NodeVisitor(visitor.PartialVisitor[ast.AST]):
+    """
+    Generic AST node visitor. This class does not work like L{ast.NodeVisitor}, 
+    it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
+
+    :See: L{visitor} for more informations.
+    """
+    def generic_visit(self, node: ast.AST) -> None:
+        """
+        Helper method to visit a node by calling C{visit()} on each child of the node. 
+        This is useful because this vistitor only visits statements inside C{.body} attribute. 
+        
+        So if one wants to visit L{ast.Expr} children with their visitor, they should include::
+
+            def visit_Expr(self, node:ast.Expr):
+                self.generic_visit(node)
+        """
+        for v in iter_values(node):
+            self.visit(v)
+    
+    @classmethod
+    def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
+        """
+        Returns the nested nodes in the body of a node.
+        """
+        body: Optional[Sequence[ast.AST]] = getattr(node, 'body', None)
+        if body is not None:
+            for child in body:
+                yield child
+
+class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
+    ...
+
+def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
     """
     Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names. 
     """
@@ -24,6 +74,15 @@ def node2dottedname(node: Optional[ast.expr]) -> Optional[List[str]]:
     parts.reverse()
     return parts
 
+def node2fullname(expr: Optional[ast.AST], ctx: 'model.Documentable') -> Optional[str]:
+    """
+    Returns L{ctx.expandName(name)} if C{expr} is a valid name, or C{None}.
+    """
+    dottedname = node2dottedname(expr)
+    if dottedname is None:
+        return None
+    return ctx.expandName('.'.join(dottedname))
+
 def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
     """
     Binds the arguments of a function call to that function's signature.
@@ -38,11 +97,65 @@ def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
         }
     return sig.bind(*call.args, **kwargs)
 
-def node2fullname(expr: Optional[Union[ast.expr, str]], ctx: 'Documentable') -> Optional[str]:
+if sys.version_info[:2] >= (3, 8):
+    # Since Python 3.8 "foo" is parsed as ast.Constant.
+    def get_str_value(expr:ast.expr) -> Optional[str]:
+        if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
+            return expr.value
+        return None
+    def get_num_value(expr:ast.expr) -> Optional[Number]:
+        if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
+            return expr.value
+        return None
+    def _is_str_constant(expr: ast.expr, s: str) -> bool:
+        return isinstance(expr, ast.Constant) and expr.value == s
+else:
+    # Before Python 3.8 "foo" was parsed as ast.Str.
+    def get_str_value(expr:ast.expr) -> Optional[str]:
+        if isinstance(expr, ast.Str):
+            return expr.s
+        return None
+    def get_num_value(expr:ast.expr) -> Optional[Number]:
+        if isinstance(expr, ast.Num):
+            return expr.n
+        return None
+    def _is_str_constant(expr: ast.expr, s: str) -> bool:
+        return isinstance(expr, ast.Str) and expr.s == s
+
+def is__name__equals__main__(cmp: ast.Compare) -> bool:
     """
-    Return L{ctx.expandName(name)} if C{expr} is a valid name, or C{None}.
+    Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
     """
-    dottedname = node2dottedname(expr) if isinstance(expr, ast.expr) else expr
-    if dottedname is None:
-        return None
-    return ctx.expandName('.'.join(dottedname))
\ No newline at end of file
+    return isinstance(cmp.left, ast.Name) \
+    and cmp.left.id == '__name__' \
+    and len(cmp.ops) == 1 \
+    and isinstance(cmp.ops[0], ast.Eq) \
+    and len(cmp.comparators) == 1 \
+    and _is_str_constant(cmp.comparators[0], '__main__')
+
+def is_using_typing_final(expr: Optional[ast.AST], 
+                    ctx:'model.Documentable') -> bool:
+    return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
+
+def is_using_typing_classvar(expr: Optional[ast.AST], 
+                    ctx:'model.Documentable') -> bool:
+    return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
+
+def is_using_annotations(expr: Optional[ast.AST], 
+                            annotations:Sequence[str], 
+                            ctx:'model.Documentable') -> bool:
+    """
+    Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
+    """
+    full_name = node2fullname(expr, ctx)
+    if full_name in annotations:
+        return True
+    if isinstance(expr, ast.Subscript):
+        # Final[...] or typing.Final[...] expressions
+        if isinstance(expr.value, (ast.Name, ast.Attribute)):
+            value = expr.value
+            full_name = node2fullname(value, ctx)
+            if full_name in annotations:
+                return True
+    return False
+
diff --git a/pydoctor/driver.py b/pydoctor/driver.py
index df5d1cfbb..30b69bec8 100644
--- a/pydoctor/driver.py
+++ b/pydoctor/driver.py
@@ -1,23 +1,16 @@
-"""The command-line parsing and entry point."""
+"""The entry point."""
 
-from optparse import SUPPRESS_HELP, Option, OptionParser, OptionValueError, Values
-from pathlib import Path
-from typing import TYPE_CHECKING, List, Sequence, Tuple, Type, TypeVar, cast
+from typing import  Sequence
 import datetime
 import os
 import sys
+from pathlib import Path
 
-from pydoctor.themes import get_themes
-from pydoctor import model, zopeinterface, __version__
-from pydoctor.templatewriter import IWriter, TemplateError, TemplateLookup
-from pydoctor.sphinx import (MAX_AGE_HELP, USER_INTERSPHINX_CACHE,
-                             SphinxInventoryWriter, prepareCache)
-from pydoctor.epydoc.markup import get_supported_docformats
-
-if TYPE_CHECKING:
-    from typing_extensions import NoReturn
-else:
-    NoReturn = None
+from pydoctor.options import Options, BUILDTIME_FORMAT
+from pydoctor.utils import error
+from pydoctor import model
+from pydoctor.templatewriter import IWriter, TemplateLookup, TemplateError
+from pydoctor.sphinx import SphinxInventoryWriter, prepareCache
 
 # In newer Python versions, use importlib.resources from the standard library.
 # On older versions, a compatibility package must be installed from PyPI.
@@ -26,298 +19,132 @@
 else:
     import importlib.resources as importlib_resources
 
-BUILDTIME_FORMAT = '%Y-%m-%d %H:%M:%S'
-
-def error(msg: str, *args: object) -> NoReturn:
-    if args:
-        msg = msg%args
-    print(msg, file=sys.stderr)
-    sys.exit(1)
-
-T = TypeVar('T')
-
-def findClassFromDottedName(
-        dottedname: str,
-        optionname: str,
-        base_class: Type[T]
-        ) -> Type[T]:
+def get_system(options: model.Options) -> model.System:
     """
-    Looks up a class by full name.
-    Watch out, prints a message and SystemExits on error!
-    """
-    if '.' not in dottedname:
-        error("%stakes a dotted name", optionname)
-    parts = dottedname.rsplit('.', 1)
-    try:
-        mod = __import__(parts[0], globals(), locals(), parts[1])
-    except ImportError:
-        error("could not import module %s", parts[0])
-    try:
-        cls = getattr(mod, parts[1])
-    except AttributeError:
-        error("did not find %s in module %s", parts[1], parts[0])
-    if not issubclass(cls, base_class):
-        error("%s is not a subclass of %s", cls, base_class)
-    return cast(Type[T], cls)
-
-MAKE_HTML_DEFAULT = object()
-
-def resolve_path(path: str) -> Path:
-    """Parse a given path string to a L{Path} object.
-
-    The path is converted to an absolute path, as required by
-    L{System.setSourceHref()}.
-    The path does not need to exist.
+    Get a system with the defined options. Load packages and modules.
     """
+    cache = prepareCache(clearCache=options.clear_intersphinx_cache,
+                         enableCache=options.enable_intersphinx_cache,
+                         cachePath=options.intersphinx_cache_path,
+                         maxAge=options.intersphinx_cache_max_age)
 
-    # We explicitly make the path relative to the current working dir
-    # because on Windows resolve() does not produce an absolute path
-    # when operating on a non-existing path.
-    return Path(Path.cwd(), path).resolve()
+    # step 1: make/find the system
+    system = options.systemclass(options)
+    system.fetchIntersphinxInventories(cache)
+    cache.close() # Fixes ResourceWarning: unclosed 
 
-def parse_path(option: Option, opt: str, value: str) -> Path:
-    """Parse a path value given to an option to a L{Path} object
-    using L{resolve_path()}.
-    """
+    # TODO: load buildtime with default factory and converter in model.Options
+    # Support source date epoch:
+    # https://reproducible-builds.org/specs/source-date-epoch/
     try:
-        return resolve_path(value)
-    except Exception as ex:
-        raise OptionValueError(f"{opt}: invalid path: {ex}")
-
-class CustomOption(Option):
-    TYPES = Option.TYPES + ("path",)
-    TYPE_CHECKER = dict(Option.TYPE_CHECKER, path=parse_path)
-
-def getparser() -> OptionParser:
-    parser = OptionParser(
-        option_class=CustomOption, version=__version__,
-        usage="usage: %prog [options] SOURCEPATH...")
-    parser.add_option(
-        '-c', '--config', dest='configfile',
-        help=("Use config from this file (any command line"
-              "options override settings from the file)."))
-    parser.add_option(
-        '--system-class', dest='systemclass',
-        help=("A dotted name of the class to use to make a system."))
-    parser.add_option(
-        '--project-name', dest='projectname',
-        help=("The project name, shown at the top of each HTML page."))
-    parser.add_option(
-        '--project-version',
-        dest='projectversion',
-        default='',
-        metavar='VERSION',
-        help=(
-            "The version of the project for which the API docs are generated. "
-            "Defaults to empty string."
-            ))
-    parser.add_option(
-        '--project-url', dest='projecturl',
-        help=("The project url, appears in the html if given."))
-    parser.add_option(
-        '--project-base-dir', dest='projectbasedirectory', type='path',
-        help=("Path to the base directory of the project.  Source links "
-              "will be computed based on this value."), metavar="PATH",)
-    parser.add_option(
-        '--testing', dest='testing', action='store_true',
-        help=("Don't complain if the run doesn't have any effects."))
-    parser.add_option(
-        '--pdb', dest='pdb', action='store_true',
-        help=("Like py.test's --pdb."))
-    parser.add_option(
-        '--make-html', action='store_true', dest='makehtml',
-        default=MAKE_HTML_DEFAULT, help=("Produce html output."
-            " Enabled by default if options '--testing' or '--make-intersphinx' are not specified. "))
-    parser.add_option(
-        '--make-intersphinx', action='store_true', dest='makeintersphinx',
-        default=False, help=("Produce (only) the objects.inv intersphinx file."))
-    parser.add_option(
-        '--add-package', action='append', dest='packages',
-        metavar='PACKAGEDIR', default=[], help=SUPPRESS_HELP)
-    parser.add_option(
-        '--add-module', action='append', dest='modules',
-        metavar='MODULE', default=[], help=SUPPRESS_HELP)
-    parser.add_option(
-        '--prepend-package', action='store', dest='prependedpackage',
-        help=("Pretend that all packages are within this one.  "
-              "Can be used to document part of a package."))
-    _docformat_choices = get_supported_docformats()
-    parser.add_option(
-        '--docformat', dest='docformat', action='store', default='epytext',
-        type="choice", choices=list(_docformat_choices),
-        help=("Format used for parsing docstrings. "
-             f"Supported values: {', '.join(_docformat_choices)}"),
-             metavar='FORMAT')
-    parser.add_option(
-        '--template-dir', action='append',
-        dest='templatedir', default=[],
-        help=("Directory containing custom HTML templates. Can repeat."),
-        metavar='PATH',
-    )
-    parser.add_option('--theme', dest='theme', default='classic', 
-        choices=list(get_themes()) ,
-        help=("The theme to use when building your API documentation. "),
-    )
-    parser.add_option(
-        '--html-subject', dest='htmlsubjects', action='append',
-        help=("The fullName of objects to generate API docs for"
-              " (generates everything by default)."),
-              metavar='PACKAGE/MOD/CLASS')
-    parser.add_option(
-        '--html-summary-pages', dest='htmlsummarypages',
-        action='store_true', default=False,
-        help=("Only generate the summary pages."))
-    parser.add_option(
-        '--html-output', dest='htmloutput', default='apidocs',
-        help=("Directory to save HTML files to (default 'apidocs')"), metavar='PATH',)
-    parser.add_option(
-        '--html-writer', dest='htmlwriter',
-        help=("Dotted name of writer class to use (default "
-              "'pydoctor.templatewriter.TemplateWriter')."), metavar='CLASS',)
-    parser.add_option(
-        '--html-viewsource-base', dest='htmlsourcebase',
-        help=("This should be the path to the trac browser for the top "
-              "of the svn checkout we are documenting part of."), metavar='URL',)
-    parser.add_option(
-        '--process-types', dest='processtypes', action='store_true', 
-        help="Process the 'type' and 'rtype' fields, add links and inline markup automatically. "
-            "This settings should not be enabled when using google or numpy docformat because the types are always processed by default.",)
-    parser.add_option(
-        '--buildtime', dest='buildtime',
-        help=("Use the specified build time over the current time. "
-              "Format: %s" % BUILDTIME_FORMAT), metavar='TIME')
-    parser.add_option(
-        '-W', '--warnings-as-errors', action='store_true',
-        dest='warnings_as_errors', default=False,
-        help=("Return exit code 3 on warnings."))
-    parser.add_option(
-        '-v', '--verbose', action='count', dest='verbosity',
-        default=0,
-        help=("Be noisier.  Can be repeated for more noise."))
-    parser.add_option(
-        '-q', '--quiet', action='count', dest='quietness',
-        default=0,
-        help=("Be quieter."))
-    def verbose_about_callback(option: Option, opt_str: str, value: str, parser: OptionParser) -> None:
-        assert parser.values is not None
-        d = parser.values.verbosity_details
-        d[value] = d.get(value, 0) + 1
-    parser.add_option(
-        '--verbose-about', metavar="stage", action="callback",
-        type=str, default={}, dest='verbosity_details',
-        callback=verbose_about_callback,
-        help=("Be noiser during a particular stage of generation."))
-    parser.add_option(
-        '--introspect-c-modules', default=False, action='store_true',
-        help=("Import and introspect any C modules found."))
-
-    parser.add_option(
-        '--intersphinx', action='append', dest='intersphinx',
-        metavar='URL_TO_OBJECTS.INV', default=[],
-        help=(
-            "Use Sphinx objects inventory to generate links to external "
-            "documentation. Can be repeated."))
-
-    parser.add_option(
-        '--enable-intersphinx-cache',
-        dest='enable_intersphinx_cache_deprecated',
-        action='store_true',
-        default=False,
-        help=SUPPRESS_HELP
-    )
-    parser.add_option(
-        '--disable-intersphinx-cache',
-        dest='enable_intersphinx_cache',
-        action='store_false',
-        default=True,
-        help="Disable Intersphinx cache."
-    )
-    parser.add_option(
-        '--intersphinx-cache-path',
-        dest='intersphinx_cache_path',
-        default=USER_INTERSPHINX_CACHE,
-        help="Where to cache intersphinx objects.inv files.",
-        metavar='PATH',
-    )
-    parser.add_option(
-        '--clear-intersphinx-cache',
-        dest='clear_intersphinx_cache',
-        action='store_true',
-        default=False,
-        help=("Clear the Intersphinx cache "
-              "specified by --intersphinx-cache-path."),
-    )
-    parser.add_option(
-        '--intersphinx-cache-max-age',
-        dest='intersphinx_cache_max_age',
-        default='1d',
-        help=MAX_AGE_HELP,
-        metavar='DURATION',
-    )
-    parser.add_option(
-        '--pyval-repr-maxlines', dest='pyvalreprmaxlines', default=7, type=int,
-        help='Maxinum number of lines for a constant value representation. Use 0 for unlimited.')
-    parser.add_option(
-        '--pyval-repr-linelen', dest='pyvalreprlinelen', default=80, type=int,
-        help='Maxinum number of caracters for a constant value representation line. Use 0 for unlimited.')
-
-    return parser
-
-def readConfigFile(options: Values) -> None:
-    # this is all a bit horrible.  rethink, then rewrite!
-    for i, line in enumerate(open(options.configfile)):
-        line = line.strip()
-        if not line or line.startswith('#'):
-            continue
-        if ':' not in line:
-            error("don't understand line %d of %s",
-                  i+1, options.configfile)
-        k, v = line.split(':', 1)
-        k = k.strip()
-        v = os.path.expanduser(v.strip())
-
-        if not hasattr(options, k):
-            error("invalid option %r on line %d of %s",
-                  k, i+1, options.configfile)
-        pre_v = getattr(options, k)
-        if not pre_v:
-            if isinstance(pre_v, list):
-                setattr(options, k, v.split(','))
-            else:
-                setattr(options, k, v)
-        else:
-            if not isinstance(pre_v, list):
-                setattr(options, k, v)
+        system.buildtime = datetime.datetime.utcfromtimestamp(
+            int(os.environ['SOURCE_DATE_EPOCH']))
+    except ValueError as e:
+        error(str(e))
+    except KeyError:
+        pass
+    # Load custom buildtime
+    if options.buildtime:
+        try:
+            system.buildtime = datetime.datetime.strptime(
+                options.buildtime, BUILDTIME_FORMAT)
+        except ValueError as e:
+            error(str(e))
+    
+    # step 2: add any packages and modules
+
+    prependedpackage = None
+    if options.prependedpackage:
+        for m in options.prependedpackage.split('.'):
+            prependedpackage = system.Package(
+                system, m, prependedpackage)
+            system.addObject(prependedpackage)
+            initmodule = system.Module(system, '__init__', prependedpackage)
+            system.addObject(initmodule)
+    
+    builder = system.systemBuilder(system)
+    try:
+        for path in options.sourcepath:
+            builder.addModule(path)
+    except model.SystemBuildingError as e:
+        error(str(e))
 
-def parse_args(args: Sequence[str]) -> Tuple[Values, List[str]]:
-    parser = getparser()
-    options, args = parser.parse_args(args)
-    options.verbosity -= options.quietness
+    # step 3: move the system to the desired state
 
-    _warn_deprecated_options(options)
+    if system.options.projectname is None:
+        name = '/'.join(system.root_names)
+        system.msg('warning', f"Guessing '{name}' for project name.", thresh=0)
+        system.projectname = name
+    else:
+        system.projectname = system.options.projectname
 
-    return options, args
+    builder.buildModules()
 
+    return system
 
-def _warn_deprecated_options(options: Values) -> None:
+def make(system: model.System) -> None:
     """
-    Check the CLI options and warn on deprecated options.
+    Produce the html/intersphinx output, as configured in the system's options. 
     """
-    if options.enable_intersphinx_cache_deprecated:
-        print("The --enable-intersphinx-cache option is deprecated; "
-              "the cache is now enabled by default.",
-              file=sys.stderr, flush=True)
-    if options.modules:
-        print("The --add-module option is deprecated; "
-              "pass modules as positional arguments instead.",
-              file=sys.stderr, flush=True)
-    if options.packages:
-        print("The --add-package option is deprecated; "
-              "pass packages as positional arguments instead.",
-              file=sys.stderr, flush=True)
+    options = system.options
+    # step 4: make html, if desired
+
+    if options.makehtml:
+        options.makeintersphinx = True
+        
+        system.msg('html', 'writing html to %s using %s.%s'%(
+            options.htmloutput, options.htmlwriter.__module__,
+            options.htmlwriter.__name__))
+
+        writer: IWriter
+        
+        # Always init the writer with the 'base' set of templates at least.
+        template_lookup = TemplateLookup(
+                            importlib_resources.files('pydoctor.themes') / 'base')
+        
+        # Handle theme selection, 'classic' by default.
+        if system.options.theme != 'base':
+            template_lookup.add_templatedir(
+                importlib_resources.files('pydoctor.themes') / system.options.theme)
+
+        # Handle custom HTML templates
+        if system.options.templatedir:
+            try:
+                for t in system.options.templatedir:
+                    template_lookup.add_templatedir(Path(t))
+            except TemplateError  as e:
+                error(str(e))
 
+        build_directory = Path(options.htmloutput)
 
+        writer = options.htmlwriter(build_directory, template_lookup=template_lookup)
 
+        writer.prepOutputDirectory()
+
+        subjects: Sequence[model.Documentable] = ()
+        if options.htmlsubjects:
+            subjects = [system.allobjects[fn] for fn in options.htmlsubjects]
+        else:
+            writer.writeSummaryPages(system)
+            if not options.htmlsummarypages:
+                subjects = system.rootobjects
+        writer.writeIndividualFiles(subjects)
+        
+    if options.makeintersphinx:
+        if not options.makehtml:
+            subjects = system.rootobjects
+        # Generate Sphinx inventory.
+        sphinx_inventory = SphinxInventoryWriter(
+            logger=system.msg,
+            project_name=system.projectname,
+            project_version=system.options.projectversion,
+            )
+        if not os.path.exists(options.htmloutput):
+            os.makedirs(options.htmloutput)
+        sphinx_inventory.generate(
+            subjects=subjects,
+            basepath=options.htmloutput,
+            )
 
 def main(args: Sequence[str] = sys.argv[1:]) -> int:
     """
@@ -325,192 +152,41 @@ def main(args: Sequence[str] = sys.argv[1:]) -> int:
 
     @param args: Command line arguments to run the CLI.
     """
-    options, args = parse_args(args)
+    options = Options.from_args(args)
 
     exitcode = 0
 
-    if options.configfile:
-        readConfigFile(options)
-
-    cache = prepareCache(clearCache=options.clear_intersphinx_cache,
-                         enableCache=options.enable_intersphinx_cache,
-                         cachePath=options.intersphinx_cache_path,
-                         maxAge=options.intersphinx_cache_max_age)
-
     try:
-        # step 1: make/find the system
-        if options.systemclass:
-            systemclass = findClassFromDottedName(
-                options.systemclass, '--system-class', model.System)
-        else:
-            systemclass = zopeinterface.ZopeInterfaceSystem
-
-        system = systemclass(options)
-        system.fetchIntersphinxInventories(cache)
-
-        if options.htmlsourcebase:
-            if options.projectbasedirectory is None:
-                error("you must specify --project-base-dir "
-                      "when using --html-viewsource-base")
-            system.sourcebase = options.htmlsourcebase
-
-        # step 1.5: check that we're actually going to accomplish something here
-        args = list(args) + options.modules + options.packages
-
-        if options.makehtml == MAKE_HTML_DEFAULT:
-            if not options.testing and not options.makeintersphinx:
-                options.makehtml = True
-            else:
-                options.makehtml = False
-
-        # Support source date epoch:
-        # https://reproducible-builds.org/specs/source-date-epoch/
-        try:
-            system.buildtime = datetime.datetime.utcfromtimestamp(
-                int(os.environ['SOURCE_DATE_EPOCH']))
-        except ValueError as e:
-            error(str(e))
-        except KeyError:
-            pass
-
-        if options.buildtime:
-            try:
-                system.buildtime = datetime.datetime.strptime(
-                    options.buildtime, BUILDTIME_FORMAT)
-            except ValueError as e:
-                error(str(e))
-
-        # step 2: add any packages and modules
 
-        if args:
-            prependedpackage = None
-            if options.prependedpackage:
-                for m in options.prependedpackage.split('.'):
-                    prependedpackage = system.Package(
-                        system, m, prependedpackage)
-                    system.addObject(prependedpackage)
-                    initmodule = system.Module(system, '__init__', prependedpackage)
-                    system.addObject(initmodule)
-            added_paths = set()
-            for arg in args:
-                path = resolve_path(arg)
-                if path in added_paths:
-                    continue
-                if options.projectbasedirectory is not None:
-                    # Note: Path.is_relative_to() was only added in Python 3.9,
-                    #       so we have to use this workaround for now.
-                    try:
-                        path.relative_to(options.projectbasedirectory)
-                    except ValueError as ex:
-                        error(f"Source path lies outside base directory: {ex}")
-                if path.is_dir():
-                    system.msg('addPackage', f"adding directory {path}")
-                    if not (path / '__init__.py').is_file():
-                        error(f"Source directory lacks __init__.py: {path}")
-                    system.addPackage(path, prependedpackage)
-                elif path.is_file():
-                    system.msg('addModuleFromPath', f"adding module {path}")
-                    system.addModuleFromPath(path, prependedpackage)
-                elif path.exists():
-                    error(f"Source path is neither file nor directory: {path}")
-                else:
-                    error(f"Source path does not exist: {path}")
-                added_paths.add(path)
-        else:
+        # Check that we're actually going to accomplish something here
+        if not options.sourcepath:
             error("No source paths given.")
 
-        # step 3: move the system to the desired state
-
-        if system.options.projectname is None:
-            name = '/'.join(system.root_names)
-            system.msg('warning', f"Guessing '{name}' for project name.", thresh=0)
-            system.projectname = name
-        else:
-            system.projectname = system.options.projectname
-
-        system.process()
-
-        # step 4: make html, if desired
-
-        if options.makehtml:
-            options.makeintersphinx = True
-            from pydoctor import templatewriter
-            if options.htmlwriter:
-                writerclass = findClassFromDottedName(
-                    # ignore mypy error: Only concrete class can be given where "Type[IWriter]" is expected
-                    options.htmlwriter, '--html-writer', IWriter) # type: ignore[misc]
-            else:
-                writerclass = templatewriter.TemplateWriter
+        # Build model
+        system = get_system(options)
+        
+        # Produce output (HMTL, json, ect)
+        make(system)
 
-            system.msg('html', 'writing html to %s using %s.%s'%(
-                options.htmloutput, writerclass.__module__,
-                writerclass.__name__))
+        # Print summary of docstring syntax errors
+        if system.docstring_syntax_errors:
+            exitcode = 2
 
-            writer: IWriter
-            
-            # Always init the writer with the 'base' set of templates at least.
-            template_lookup = TemplateLookup(
-                                importlib_resources.files('pydoctor.themes') / 'base')
-            
-            # Handle theme selection, 'classic' by default.
-            if system.options.theme != 'base':
-                template_lookup.add_templatedir(
-                    importlib_resources.files('pydoctor.themes') / system.options.theme)
-
-
-            # Handle custom HTML templates
-            if system.options.templatedir:
-                try:
-                    for t in system.options.templatedir:
-                        template_lookup.add_templatedir(Path(t))
-                except TemplateError  as e:
-                    error(str(e))
-
-            build_directory = Path(options.htmloutput)
-
-            writer = writerclass(build_directory, template_lookup=template_lookup)
-
-            writer.prepOutputDirectory()
-
-            subjects: Sequence[model.Documentable] = ()
-            if options.htmlsubjects:
-                subjects = [system.allobjects[fn] for fn in options.htmlsubjects]
-            else:
-                writer.writeSummaryPages(system)
-                if not options.htmlsummarypages:
-                    subjects = system.rootobjects
-            writer.writeIndividualFiles(subjects)
-            if system.docstring_syntax_errors:
-                def p(msg: str) -> None:
-                    system.msg('docstring-summary', msg, thresh=-1, topthresh=1)
-                p("these %s objects' docstrings contain syntax errors:"
-                  %(len(system.docstring_syntax_errors),))
-                exitcode = 2
-                for fn in sorted(system.docstring_syntax_errors):
-                    p('    '+fn)
+            def p(msg: str) -> None:
+                system.msg('docstring-summary', msg, thresh=-1, topthresh=1)
+            p("these %s objects' docstrings contain syntax errors:"
+                %(len(system.docstring_syntax_errors),))
+            for fn in sorted(system.docstring_syntax_errors):
+                p('    '+fn)
 
         if system.violations and options.warnings_as_errors:
             # Update exit code if the run has produced warnings.
             exitcode = 3
-
-        if options.makeintersphinx:
-            if not options.makehtml:
-                subjects = system.rootobjects
-            # Generate Sphinx inventory.
-            sphinx_inventory = SphinxInventoryWriter(
-                logger=system.msg,
-                project_name=system.projectname,
-                project_version=system.options.projectversion,
-                )
-            if not os.path.exists(options.htmloutput):
-                os.makedirs(options.htmloutput)
-            sphinx_inventory.generate(
-                subjects=subjects,
-                basepath=options.htmloutput,
-                )
+        
     except:
         if options.pdb:
             import pdb
             pdb.post_mortem(sys.exc_info()[2])
         raise
+    
     return exitcode
diff --git a/pydoctor/epydoc/docutils.py b/pydoctor/epydoc/docutils.py
index 762dafa51..02a20e0b5 100644
--- a/pydoctor/epydoc/docutils.py
+++ b/pydoctor/epydoc/docutils.py
@@ -1,9 +1,10 @@
 """
-Collection of helper functions and classes related to the creation L{docutils} nodes.
+Collection of helper functions and classes related to the creation and processing of L{docutils} nodes.
 """
 from typing import Iterable, Iterator, Optional
 
 from docutils import nodes
+from docutils.transforms import parts
 
 __docformat__ = 'epytext en'
 
@@ -44,6 +45,83 @@ def set_node_attributes(node: nodes.Node,
 
     return node
 
+def build_table_of_content(node: nodes.Node, depth: int, level: int = 0) -> Optional[nodes.Node]:
+    """
+    Simplified from docutils Contents transform. 
+
+    All section nodes MUST have set attribute 'ids' to a list of strings.
+    """
+
+    def _copy_and_filter(node: nodes.Node) -> nodes.Node:
+        """Return a copy of a title, with references, images, etc. removed."""
+        visitor = parts.ContentsFilter(node.document)
+        node.walkabout(visitor)
+        return visitor.get_entry_text()
+
+    level += 1
+    sections = [sect for sect in node if isinstance(sect, nodes.section)]
+    entries = []
+    for section in sections:
+        title = section[0]
+        entrytext = _copy_and_filter(title)
+        reference = nodes.reference('', '', refid=section['ids'][0],
+                                    *entrytext)
+        ref_id = node.document.set_id(reference,
+                                    suggested_prefix='toc-entry')
+        entry = nodes.paragraph('', '', reference)
+        item = nodes.list_item('', entry)
+        if title.next_node(nodes.reference) is None:
+            title['refid'] = ref_id
+        if level < depth:
+            subsects = build_table_of_content(section, depth=depth, level=level)
+            item += subsects or []
+        entries.append(item)
+    if entries:
+        contents = nodes.bullet_list('', *entries)
+        return contents
+    else:
+        return None
+
+def get_lineno(node: nodes.Node) -> int:
+    """
+    Get the 0-based line number for a docutils `nodes.title_reference`.
+
+    Walk up the tree hierarchy until we find an element with a line number, then
+    counts the number of newlines until the reference element is found.
+    """
+    # Fixes https://github.com/twisted/pydoctor/issues/237
+        
+    def get_first_parent_lineno(_node: Optional[nodes.Node]) -> int:
+        if _node is None:
+            return 0
+        
+        if _node.line:
+            # This line points to the start of the containing node
+            # Here we are removing 1 to the result because ParseError class is zero-based
+            # while docutils line attribute is 1-based.
+            line:int = _node.line-1
+            # Let's figure out how many newlines we need to add to this number 
+            # to get the right line number.
+            parent_rawsource: Optional[str] = _node.rawsource or None
+            node_rawsource: Optional[str] = node.rawsource or None
+
+            if parent_rawsource is not None and \
+               node_rawsource is not None:
+                if node_rawsource in parent_rawsource:
+                    node_index = parent_rawsource.index(node_rawsource)
+                    # Add the required number of newlines to the result
+                    line += parent_rawsource[:node_index].count('\n')
+        else:
+            line = get_first_parent_lineno(_node.parent)
+        return line
+
+    if node.line:
+        line = node.line
+    else:
+        line = get_first_parent_lineno(node.parent)
+    
+    return line # type:ignore[no-any-return]
+
 class wbr(nodes.inline):
     """
     Word break opportunity.
diff --git a/pydoctor/epydoc/markup/__init__.py b/pydoctor/epydoc/markup/__init__.py
index 028d81a74..ebb6a4748 100644
--- a/pydoctor/epydoc/markup/__init__.py
+++ b/pydoctor/epydoc/markup/__init__.py
@@ -33,12 +33,20 @@
 """
 __docformat__ = 'epytext en'
 
-from importlib import import_module
 from typing import Callable, List, Optional, Sequence, Iterator, TYPE_CHECKING
 import abc
 import sys
+import re
+from importlib import import_module
 from inspect import getmodulename
 
+from docutils import nodes, utils
+from twisted.web.template import Tag, tags
+
+from pydoctor import node2stan
+from pydoctor.epydoc.docutils import set_node_attributes, build_table_of_content
+
+
 # In newer Python versions, use importlib.resources from the standard library.
 # On older versions, a compatibility package must be installed from PyPI.
 if sys.version_info < (3, 9):
@@ -46,15 +54,10 @@
 else:
     import importlib.resources as importlib_resources
 
-from docutils import nodes
-from twisted.web.template import Tag
-
 if TYPE_CHECKING:
     from twisted.web.template import Flattenable
     from pydoctor.model import Documentable
 
-from pydoctor import node2stan
-
 ##################################################
 ## Contents
 ##################################################
@@ -98,6 +101,11 @@ class ParsedDocstring(abc.ABC):
     or L{pydoctor.epydoc.markup.restructuredtext.parse_docstring()}.
 
     Subclasses must implement L{has_body()} and L{to_node()}.
+    
+    A default implementation for L{to_stan()} method, relying on L{to_node()} is provided.
+    But some subclasses override this behaviour.
+    
+    Implementation of L{get_toc()} also relies on L{to_node()}.
     """
 
     def __init__(self, fields: Sequence['Field']):
@@ -108,17 +116,36 @@ def __init__(self, fields: Sequence['Field']):
         """
 
         self._stan: Optional[Tag] = None
+        self._summary: Optional['ParsedDocstring'] = None
+        self._compact = True
 
     @abc.abstractproperty
     def has_body(self) -> bool:
-        """Does this docstring have a non-empty body?
+        """
+        Does this docstring have a non-empty body?
 
         The body is the part of the docstring that remains after the fields
         have been split off.
         """
-        raise NotImplementedError()
+    
+    def get_toc(self, depth: int) -> Optional['ParsedDocstring']:
+        """
+        The table of contents of the docstring if titles are defined or C{None}.
+        """
+        try:
+            document = self.to_node()
+        except NotImplementedError:
+            return None
+        contents = build_table_of_content(document, depth=depth)
+        docstring_toc = utils.new_document('toc')
+        if contents:
+            docstring_toc.extend(contents)
+            from pydoctor.epydoc.markup.restructuredtext import ParsedRstDocstring
+            return ParsedRstDocstring(docstring_toc, ())
+        else:
+            return None
 
-    def to_stan(self, docstring_linker: 'DocstringLinker') -> Tag:
+    def to_stan(self, docstring_linker: 'DocstringLinker', compact:bool=True) -> Tag:
         """
         Translate this docstring to a Stan tree.
 
@@ -129,20 +156,56 @@ def to_stan(self, docstring_linker: 'DocstringLinker') -> Tag:
             links into and out of the docstring.
         @return: The docstring presented as a stan tree.
         """
+        # The following three lines is a hack in order to still show p tags 
+        # around docstrings content when there is only a single line text
+        # and arguement compact=False is passed. We clear cached stan if required.
+        if compact != self._compact and self._stan is not None:
+            self._stan = None
+        self._compact = compact
+
         if self._stan is not None:
-            return self._stan
-        self._stan = Tag('', children=node2stan.node2stan(self.to_node(), docstring_linker).children)
+            return self._stan      
+
+        docstring_stan = node2stan.node2stan(self.to_node(), 
+                                        docstring_linker, 
+                                        compact=compact)
+
+        self._stan = Tag('', children=docstring_stan.children)
         return self._stan
     
     @abc.abstractmethod
     def to_node(self) -> nodes.document:
         """
-        Translate this docstring to a L{docutils.nodes.document}.
+        Translate this docstring to a L{nodes.document}.
+
+        @return: The docstring presented as a L{nodes.document}.
 
-        @return: The docstring presented as a L{docutils.nodes.document}.
+        @note: Some L{ParsedDocstring} subclasses do not support docutils nodes.
+            This method might raise L{NotImplementedError} in such cases. (i.e. L{pydoctor.epydoc.markup._types.ParsedTypeDocstring})
         """
         raise NotImplementedError()
+    
+    def get_summary(self) -> 'ParsedDocstring':
+        """
+        Returns the summary of this docstring.
+        
+        @note: The summary is cached.
+        """
+        # Avoid rare cyclic import error, see https://github.com/twisted/pydoctor/pull/538#discussion_r845668735
+        from pydoctor import epydoc2stan
+        if self._summary is not None:
+            return self._summary
+        try: 
+            _document = self.to_node()
+            visitor = SummaryExtractor(_document)
+            _document.walk(visitor)
+        except Exception: 
+            self._summary = epydoc2stan.ParsedStanOnly(tags.span(class_='undocumented')("Broken summary"))
+        else:
+            self._summary = visitor.summary or epydoc2stan.ParsedStanOnly(tags.span(class_='undocumented')("No summary"))
+        return self._summary
 
+      
 ##################################################
 ## Fields
 ##################################################
@@ -314,3 +377,79 @@ def __repr__(self) -> str:
             return ''
         else:
             return f''
+
+class SummaryExtractor(nodes.NodeVisitor):
+    """
+    A docutils node visitor that extracts first sentences from
+    the first paragraph in a document.
+    """
+    def __init__(self, document: nodes.document, maxchars:int=200) -> None:
+        """
+        @param document: The docutils document to extract a summary from.
+        @param maxchars: Maximum of characters the summary can span. 
+            Sentences are not cut in the middle, so the actual length
+            might be longer if your have a large first paragraph.
+        """
+        super().__init__(document)
+        self.summary: Optional['ParsedDocstring'] = None
+        self.other_docs: bool = False
+        self.maxchars = maxchars
+
+    def visit_document(self, node: nodes.Node) -> None:
+        self.summary = None
+
+    _SENTENCE_RE_SPLIT = re.compile(r'( *[\.\?!][\'"\)\]]* *)')
+
+    def visit_paragraph(self, node: nodes.Node) -> None:
+        if self.summary is not None:
+            # found a paragraph after the first one
+            self.other_docs = True
+            raise nodes.StopTraversal()
+
+        summary_doc = utils.new_document('summary')
+        summary_pieces = []
+
+        # Extract the first sentences from the first paragraph until maximum number 
+        # of characters is reach or until the end of the paragraph.
+        char_count = 0
+
+        for child in node:
+
+            if char_count > self.maxchars:
+                break
+            
+            if isinstance(child, nodes.Text):
+                text = child.astext().replace('\n', ' ')
+                sentences = [item for item in self._SENTENCE_RE_SPLIT.split(text) if item] # Not empty values only
+                
+                for i,s in enumerate(sentences):
+                    
+                    if char_count > self.maxchars:
+                        # Leave final point alone.
+                        if not (i == len(sentences)-1 and len(s)==1):
+                            break
+
+                    summary_pieces.append(set_node_attributes(nodes.Text(s), document=summary_doc))
+                    char_count += len(s)
+
+            else:
+                summary_pieces.append(set_node_attributes(child.deepcopy(), document=summary_doc))
+                char_count += len(''.join(node2stan.gettext(child)))
+            
+        if char_count > self.maxchars:
+            if not summary_pieces[-1].astext().endswith('.'):
+                summary_pieces.append(set_node_attributes(nodes.Text('...'), document=summary_doc))
+            self.other_docs = True
+
+        set_node_attributes(summary_doc, children=[
+            set_node_attributes(nodes.paragraph('', ''), document=summary_doc, lineno=1, 
+            children=summary_pieces)])
+
+        from pydoctor.epydoc.markup.restructuredtext import ParsedRstDocstring
+        self.summary = ParsedRstDocstring(summary_doc, fields=[])
+
+    def visit_field(self, node: nodes.Node) -> None:
+        raise nodes.SkipNode()
+
+    def unknown_visit(self, node: nodes.Node) -> None:
+        '''Ignore all unknown nodes'''
diff --git a/pydoctor/epydoc/markup/_pyval_repr.py b/pydoctor/epydoc/markup/_pyval_repr.py
index 9c729cb3e..daf821bc5 100644
--- a/pydoctor/epydoc/markup/_pyval_repr.py
+++ b/pydoctor/epydoc/markup/_pyval_repr.py
@@ -43,12 +43,12 @@
 from inspect import signature
 from typing import Any, AnyStr, Union, Callable, Dict, Iterable, Sequence, Optional, List, Tuple, cast
 
-import sre_parse36 #https://github.com/tristanlatr/sre_parse36
 import attr
 import astor.op_util
 from docutils import nodes, utils
 from twisted.web.template import Tag
 
+from pydoctor.epydoc import sre_parse36
 from pydoctor.epydoc.markup import DocstringLinker
 from pydoctor.epydoc.markup.restructuredtext import ParsedRstDocstring
 from pydoctor.epydoc.docutils import set_node_attributes, wbr, obj_reference
@@ -190,7 +190,7 @@ def __init__(self, document: nodes.document, is_complete: bool, warnings: List[s
         List of warnings
         """
     
-    def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
+    def to_stan(self, docstring_linker: DocstringLinker, compact:bool=False) -> Tag:
         try:
             return Tag('code')(super().to_stan(docstring_linker))
         except Exception as e:
@@ -236,7 +236,19 @@ def enc(c: str) -> str:
         elif c == "\\": 
             c = r'\\'
         return c
-    return ''.join(map(enc, s))
+
+    # Escape it
+    s = ''.join(map(enc, s))
+
+    # Ensures there is no funcky caracters (like surrogate unicode strings)
+    try:
+        s.encode('utf-8')
+    except UnicodeEncodeError:
+        # Otherwise replace them with backslashreplace
+        s = s.encode('utf-8', 'backslashreplace').decode('utf-8')
+    
+    return s
+
 def _bytes_escape(b: bytes) -> str:
     return repr(b)[2:-1]
 
diff --git a/pydoctor/epydoc/markup/_types.py b/pydoctor/epydoc/markup/_types.py
index bddede9c9..db892a378 100644
--- a/pydoctor/epydoc/markup/_types.py
+++ b/pydoctor/epydoc/markup/_types.py
@@ -48,7 +48,7 @@ def to_node(self) -> nodes.document:
         """
         raise NotImplementedError()
 
-    def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
+    def to_stan(self, docstring_linker: DocstringLinker, compact:bool=False) -> Tag:
         """
         Present the type as a stan tree. 
         """
diff --git a/pydoctor/epydoc/markup/epytext.py b/pydoctor/epydoc/markup/epytext.py
index 4fb389271..e1fc62826 100644
--- a/pydoctor/epydoc/markup/epytext.py
+++ b/pydoctor/epydoc/markup/epytext.py
@@ -132,8 +132,9 @@
 #   4. helpers
 #   5. testing
 
-from typing import Any, Callable, Iterable, List, Optional, Sequence, Union, cast, overload
+from typing import Any, Callable, Iterable, List, Optional, Sequence, Set, Union, cast, overload
 import re
+import unicodedata
 
 from docutils import utils, nodes
 from twisted.web.template import Tag
@@ -143,6 +144,38 @@
 from pydoctor.epydoc.docutils import set_node_attributes
 from pydoctor.model import Documentable
 
+##################################################
+## Helper functions
+##################################################
+
+def gettext(node: Union[str, 'Element', List[Union[str, 'Element']]]) -> List[str]:
+    """Return the text inside the epytext element(s)."""
+    filtered: List[str] = []
+    if isinstance(node, str):
+        filtered.append(node)
+    elif isinstance(node, list):
+        for child in node:
+            filtered.extend(gettext(child))
+    elif isinstance(node, Element):
+        filtered.extend(gettext(node.children))
+    return filtered
+
+def slugify(string:str) -> str:
+    # zacharyvoase/slugify is licensed under the The Unlicense
+    """
+    A generic slugifier utility (currently only for Latin-based scripts).
+    Example:
+        >>> slugify("Héllo Wörld")
+        "hello-world"
+    """
+    return re.sub(r'[-\s]+', '-', 
+                re.sub(rb'[^\w\s-]', b'',
+                    unicodedata.normalize('NFKD', string)
+                    .encode('ascii', 'ignore'))
+                .strip()
+                .lower()
+                .decode())
+
 ##################################################
 ## DOM-Like Encoding
 ##################################################
@@ -1347,6 +1380,7 @@ def __init__(self, body: Optional[Element], fields: Sequence['Field']):
         # Caching:
         self._stan: Optional[Tag] = None
         self._document: Optional[nodes.document] = None
+        self._section_slugs: Set[str] = set()
 
     def __str__(self) -> str:
         return str(self._tree)
@@ -1354,7 +1388,18 @@ def __str__(self) -> str:
     @property
     def has_body(self) -> bool:
         return self._tree is not None
-    
+
+    def _slugify(self, text:str) -> str:
+        # Takes special care to ensure we don't generate 
+        # twice the same ID for sections.
+        s = slugify(text)
+        i = 1
+        while s in self._section_slugs:
+            s = slugify(f"{text}-{i}")
+            i+=1
+        self._section_slugs.add(s)
+        return s
+
     def to_node(self) -> nodes.document:
 
         if self._document is not None:
@@ -1428,7 +1473,11 @@ def _to_node(self, tree: Element) -> Iterable[nodes.Node]:
             yield set_node_attributes(nodes.doctest_block(tree.children[0], tree.children[0]), document=self._document)
         elif tree.tag in ('fieldlist', 'tag', 'arg'):
             raise AssertionError("There should not be any field lists left")
-        elif tree.tag in ('section', 'epytext'):
+        elif tree.tag == 'section':
+            assert len(tree.children)>0, f"empty section {tree}"
+            yield set_node_attributes(nodes.section('', ids=[self._slugify(' '.join(gettext(tree.children[0])))]), 
+                document=self._document, children=variables)
+        elif tree.tag == 'epytext':
             yield set_node_attributes(nodes.section(''), document=self._document, children=variables)
         elif tree.tag == 'symbol':
             symbol = cast(str, tree.children[0])
diff --git a/pydoctor/epydoc/markup/plaintext.py b/pydoctor/epydoc/markup/plaintext.py
index eb05cb42f..0d94437ef 100644
--- a/pydoctor/epydoc/markup/plaintext.py
+++ b/pydoctor/epydoc/markup/plaintext.py
@@ -13,11 +13,12 @@
 
 from typing import List, Callable, Optional
 
-from docutils import nodes
+from docutils import nodes, utils
 from twisted.web.template import Tag, tags
 
 from pydoctor.epydoc.markup import DocstringLinker, ParsedDocstring, ParseError
 from pydoctor.model import Documentable
+from pydoctor.epydoc.docutils import set_node_attributes
 
 def parse_docstring(docstring: str, errors: List[ParseError], processtypes: bool = False) -> ParsedDocstring:
     """
@@ -42,7 +43,7 @@ def __init__(self, text: str):
         ParsedDocstring.__init__(self, ())
         self._text = text
         # Caching:
-        # self._document: Optional[nodes.document] = None
+        self._document: Optional[nodes.document] = None
 
     @property
     def has_body(self) -> bool:
@@ -52,17 +53,28 @@ def has_body(self) -> bool:
     # We don't want to use docutils to process the plaintext format because we won't 
     # actually use the document tree ,it does not contains any additionnalt information compared to the raw docstring. 
     # Also, the consolidated fields handling in restructuredtext.py relies on this "pre" class.
-    def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
+    def to_stan(self, docstring_linker: DocstringLinker, compact:bool=False) -> Tag:
         return tags.p(self._text, class_='pre')
     
     def to_node(self) -> nodes.document:
-        raise NotImplementedError()
+        # This code is mainly used to generate summary of plaintext docstrings.
 
-        # TODO: Delete this code when we're sure this is the right thing to do.
-        # if self._document is not None:
-        #     return self._document
-        # else:
-        #     self._document = utils.new_document('plaintext')
-        #     self._document = set_node_attributes(self._document, 
-        #         children=set_nodes_parent((nodes.literal_block(rawsource=self._text, text=self._text)), self._document))
-        #     return self._document
+        if self._document is not None:
+            return self._document
+        else:
+            # create document
+            _document = utils.new_document('plaintext')
+
+            # split text into paragraphs
+            paragraphs = [set_node_attributes(nodes.paragraph('',''), children=[
+                            set_node_attributes(nodes.Text(p.strip('\n')), document=_document, lineno=0)], 
+                            document=_document, lineno=0)
+                                for p in self._text.split('\n\n')] 
+            
+            # assemble document
+            _document = set_node_attributes(_document, 
+                                            children=paragraphs,
+                                            document=_document, lineno=0)
+
+            self._document = _document
+            return self._document
diff --git a/pydoctor/epydoc/markup/restructuredtext.py b/pydoctor/epydoc/markup/restructuredtext.py
index d84846807..a189bec46 100644
--- a/pydoctor/epydoc/markup/restructuredtext.py
+++ b/pydoctor/epydoc/markup/restructuredtext.py
@@ -13,7 +13,7 @@
 defined by L{ParsedDocstring}.
 
 L{ParsedRstDocstring} is basically just a L{ParsedDocstring} wrapper
-for the C{docutils.nodes.document} class.
+for the C{nodes.document} class.
 
 B{Creating C{ParsedRstDocstring}s}:
 
@@ -50,7 +50,7 @@
 from docutils.parsers.rst.directives.admonitions import BaseAdmonition # type: ignore[import]
 from docutils.readers.standalone import Reader as StandaloneReader
 from docutils.utils import Reporter, new_document
-from docutils.parsers.rst import Directive, directives #type: ignore[attr-defined]
+from docutils.parsers.rst import Directive, directives # type:ignore[attr-defined]
 from docutils.transforms import Transform, frontmatter
 
 from pydoctor.epydoc.markup import Field, ParseError, ParsedDocstring
@@ -147,7 +147,7 @@ def has_body(self) -> bool:
             isinstance(child, nodes.Text) or child.children
             for child in self._document.children
             )
-    
+
     def to_node(self) -> nodes.document:
         return self._document
 
@@ -482,13 +482,35 @@ class PythonCodeDirective(Directive):
     """
 
     has_content = True
-
+    
     def run(self) -> List[nodes.Node]:
         text = '\n'.join(self.content)
         node = nodes.doctest_block(text, text, codeblock=True)
         return [ node ]
 
+class DocutilsAndSphinxCodeBlockAdapter(PythonCodeDirective):
+    # Docutils and Sphinx code blocks have both one optional argument, 
+    # so we accept it here as well but do nothing with it.
+    required_arguments = 0
+    optional_arguments = 1
+
+    # Listing all options that docutils.parsers.rst.directives.body.CodeBlock provides
+    # And also sphinx.directives.code.CodeBlock. We don't care about their values, 
+    # we just don't want to see them in self.content.
+    option_spec = {'class': directives.class_option,
+                'name': directives.unchanged,
+                'number-lines': directives.unchanged, # integer or None
+                'force': directives.flag,
+                'linenos': directives.flag,
+                'dedent': directives.unchanged, # integer or None
+                'lineno-start': int,
+                'emphasize-lines': directives.unchanged_required,
+                'caption': directives.unchanged_required,
+    }
+
 directives.register_directive('python', PythonCodeDirective)
+directives.register_directive('code', DocutilsAndSphinxCodeBlockAdapter)
+directives.register_directive('code-block', DocutilsAndSphinxCodeBlockAdapter)
 directives.register_directive('versionadded', VersionChange)
 directives.register_directive('versionchanged', VersionChange)
 directives.register_directive('deprecated', VersionChange)
diff --git a/pydoctor/epydoc/sre_parse36.py b/pydoctor/epydoc/sre_parse36.py
new file mode 100644
index 000000000..879fd02dc
--- /dev/null
+++ b/pydoctor/epydoc/sre_parse36.py
@@ -0,0 +1,1035 @@
+# Code copied from Python 3.6 - Python Software Foundation - GNU General Public License v3.0
+#
+# The motivation to add the ``sre_parse36`` module is to provide a 
+# colorizer for regular expressions that produce the *same* expression 
+# as initially provided (the way epydoc did it). 
+# It's packaged with pydoctor for the simplicity of not having to install another requirement form PyPi.
+#
+# The handling of non-capturing groups changed from Python 3.7, we can't 
+# back reproduce the original regular expression from a ``SubPattern`` 
+# instance anymore. This regression is tracked at https://bugs.python.org/issue45674.
+# It seems that it won't be fixed.
+#
+# The the issue is that in Python 3.7 and beyond, it not possible to
+# differentiate capturing groups and non-capturing from a ``SubPattern`` 
+# intance.
+#
+# Demontration:
+# ```python
+# >>> import sre_parse
+# >>> sre_parse.parse("(?:foo (?:bar) | (?:baz))").dump()
+# BRANCH
+#   LITERAL 102
+#   LITERAL 111
+#   LITERAL 111
+#   LITERAL 32
+#   LITERAL 98
+#   LITERAL 97
+#   LITERAL 114
+#   LITERAL 32
+# OR
+#   LITERAL 32
+#   LITERAL 98
+#   LITERAL 97
+#   LITERAL 122
+# ```
+#
+# Whereas in Python 3.6: 
+# 
+# ```python
+# >>> import sre_parse
+# >>> sre_parse.parse("(?:foo (?:bar) | (?:baz))").dump()
+# SUBPATTERN None 0 0
+#   BRANCH
+#     LITERAL 102
+#     LITERAL 111
+#     LITERAL 111
+#     LITERAL 32
+#     SUBPATTERN None 0 0
+#       LITERAL 98
+#       LITERAL 97
+#       LITERAL 114
+#     LITERAL 32
+#   OR
+#     LITERAL 32
+#     SUBPATTERN None 0 0
+#       LITERAL 98
+#       LITERAL 97
+#       LITERAL 122
+# ```
+#
+#   -------------------------------
+#
+#
+# Secret Labs' Regular Expression Engine
+#
+# convert re-style regular expression to sre pattern
+#
+# Copyright (c) 1998-2001 by Secret Labs AB.  All rights reserved.
+#
+# See the sre.py file for information on usage and redistribution.
+#
+
+"""Internal support module for sre"""
+
+# XXX: show string offset and offending character for all errors
+
+from sre_constants import *
+
+SPECIAL_CHARS = ".\\[{()*+?^$|"
+REPEAT_CHARS = "*+?{"
+
+DIGITS = frozenset("0123456789")
+
+OCTDIGITS = frozenset("01234567")
+HEXDIGITS = frozenset("0123456789abcdefABCDEF")
+ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+WHITESPACE = frozenset(" \t\n\r\v\f")
+
+_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT})
+_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY})
+
+ESCAPES = {
+    r"\a": (LITERAL, ord("\a")),
+    r"\b": (LITERAL, ord("\b")),
+    r"\f": (LITERAL, ord("\f")),
+    r"\n": (LITERAL, ord("\n")),
+    r"\r": (LITERAL, ord("\r")),
+    r"\t": (LITERAL, ord("\t")),
+    r"\v": (LITERAL, ord("\v")),
+    r"\\": (LITERAL, ord("\\"))
+}
+
+CATEGORIES = {
+    r"\A": (AT, AT_BEGINNING_STRING), # start of string
+    r"\b": (AT, AT_BOUNDARY),
+    r"\B": (AT, AT_NON_BOUNDARY),
+    r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
+    r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
+    r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
+    r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
+    r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
+    r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
+    r"\Z": (AT, AT_END_STRING), # end of string
+}
+
+FLAGS = {
+    # standard flags
+    "i": SRE_FLAG_IGNORECASE,
+    "L": SRE_FLAG_LOCALE,
+    "m": SRE_FLAG_MULTILINE,
+    "s": SRE_FLAG_DOTALL,
+    "x": SRE_FLAG_VERBOSE,
+    # extensions
+    "a": SRE_FLAG_ASCII,
+    "t": SRE_FLAG_TEMPLATE,
+    "u": SRE_FLAG_UNICODE,
+}
+
+GLOBAL_FLAGS = (SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE |
+                SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE)
+
+class Verbose(Exception):
+    pass
+
+class Pattern:
+    # master pattern object.  keeps track of global attributes
+    def __init__(self):
+        self.flags = 0
+        self.groupdict = {}
+        self.groupwidths = [None]  # group 0
+        self.lookbehindgroups = None
+    @property
+    def groups(self):
+        return len(self.groupwidths)
+    def opengroup(self, name=None):
+        gid = self.groups
+        self.groupwidths.append(None)
+        if self.groups > MAXGROUPS:
+            raise error("too many groups")
+        if name is not None:
+            ogid = self.groupdict.get(name, None)
+            if ogid is not None:
+                raise error("redefinition of group name %r as group %d; "
+                            "was group %d" % (name, gid,  ogid))
+            self.groupdict[name] = gid
+        return gid
+    def closegroup(self, gid, p):
+        self.groupwidths[gid] = p.getwidth()
+    def checkgroup(self, gid):
+        return gid < self.groups and self.groupwidths[gid] is not None
+
+    def checklookbehindgroup(self, gid, source):
+        if self.lookbehindgroups is not None:
+            if not self.checkgroup(gid):
+                raise source.error('cannot refer to an open group')
+            if gid >= self.lookbehindgroups:
+                raise source.error('cannot refer to group defined in the same '
+                                   'lookbehind subpattern')
+
+class SubPattern:
+    # a subpattern, in intermediate form
+    def __init__(self, pattern, data=None):
+        self.pattern = pattern
+        if data is None:
+            data = []
+        self.data = data
+        self.width = None
+    def dump(self, level=0):
+        nl = True
+        seqtypes = (tuple, list)
+        for op, av in self.data:
+            print(level*"  " + str(op), end='')
+            if op is IN:
+                # member sublanguage
+                print()
+                for op, a in av:
+                    print((level+1)*"  " + str(op), a)
+            elif op is BRANCH:
+                print()
+                for i, a in enumerate(av[1]):
+                    if i:
+                        print(level*"  " + "OR")
+                    a.dump(level+1)
+            elif op is GROUPREF_EXISTS:
+                condgroup, item_yes, item_no = av
+                print('', condgroup)
+                item_yes.dump(level+1)
+                if item_no:
+                    print(level*"  " + "ELSE")
+                    item_no.dump(level+1)
+            elif isinstance(av, seqtypes):
+                nl = False
+                for a in av:
+                    if isinstance(a, SubPattern):
+                        if not nl:
+                            print()
+                        a.dump(level+1)
+                        nl = True
+                    else:
+                        if not nl:
+                            print(' ', end='')
+                        print(a, end='')
+                        nl = False
+                if not nl:
+                    print()
+            else:
+                print('', av)
+    def __repr__(self):
+        return repr(self.data)
+    def __len__(self):
+        return len(self.data)
+    def __delitem__(self, index):
+        del self.data[index]
+    def __getitem__(self, index):
+        if isinstance(index, slice):
+            return SubPattern(self.pattern, self.data[index])
+        return self.data[index]
+    def __setitem__(self, index, code):
+        self.data[index] = code
+    def insert(self, index, code):
+        self.data.insert(index, code)
+    def append(self, code):
+        self.data.append(code)
+    def getwidth(self):
+        # determine the width (min, max) for this subpattern
+        if self.width is not None:
+            return self.width
+        lo = hi = 0
+        for op, av in self.data:
+            if op is BRANCH:
+                i = MAXREPEAT - 1
+                j = 0
+                for av in av[1]:
+                    l, h = av.getwidth()
+                    i = min(i, l)
+                    j = max(j, h)
+                lo = lo + i
+                hi = hi + j
+            elif op is CALL:
+                i, j = av.getwidth()
+                lo = lo + i
+                hi = hi + j
+            elif op is SUBPATTERN:
+                i, j = av[-1].getwidth()
+                lo = lo + i
+                hi = hi + j
+            elif op in _REPEATCODES:
+                i, j = av[2].getwidth()
+                lo = lo + i * av[0]
+                hi = hi + j * av[1]
+            elif op in _UNITCODES:
+                lo = lo + 1
+                hi = hi + 1
+            elif op is GROUPREF:
+                i, j = self.pattern.groupwidths[av]
+                lo = lo + i
+                hi = hi + j
+            elif op is GROUPREF_EXISTS:
+                i, j = av[1].getwidth()
+                if av[2] is not None:
+                    l, h = av[2].getwidth()
+                    i = min(i, l)
+                    j = max(j, h)
+                else:
+                    i = 0
+                lo = lo + i
+                hi = hi + j
+            elif op is SUCCESS:
+                break
+        self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
+        return self.width
+
+class Tokenizer:
+    def __init__(self, string):
+        self.istext = isinstance(string, str)
+        self.string = string
+        if not self.istext:
+            string = str(string, 'latin1')
+        self.decoded_string = string
+        self.index = 0
+        self.next = None
+        self.__next()
+    def __next(self):
+        index = self.index
+        try:
+            char = self.decoded_string[index]
+        except IndexError:
+            self.next = None
+            return
+        if char == "\\":
+            index += 1
+            try:
+                char += self.decoded_string[index]
+            except IndexError:
+                raise error("bad escape (end of pattern)",
+                            self.string, len(self.string) - 1) from None
+        self.index = index + 1
+        self.next = char
+    def match(self, char):
+        if char == self.next:
+            self.__next()
+            return True
+        return False
+    def get(self):
+        this = self.next
+        self.__next()
+        return this
+    def getwhile(self, n, charset):
+        result = ''
+        for _ in range(n):
+            c = self.next
+            if c not in charset:
+                break
+            result += c
+            self.__next()
+        return result
+    def getuntil(self, terminator):
+        result = ''
+        while True:
+            c = self.next
+            self.__next()
+            if c is None:
+                if not result:
+                    raise self.error("missing group name")
+                raise self.error("missing %s, unterminated name" % terminator,
+                                 len(result))
+            if c == terminator:
+                if not result:
+                    raise self.error("missing group name", 1)
+                break
+            result += c
+        return result
+    @property
+    def pos(self):
+        return self.index - len(self.next or '')
+    def tell(self):
+        return self.index - len(self.next or '')
+    def seek(self, index):
+        self.index = index
+        self.__next()
+
+    def error(self, msg, offset=0):
+        return error(msg, self.string, self.tell() - offset)
+
+def _class_escape(source, escape):
+    # handle escape code inside character class
+    code = ESCAPES.get(escape)
+    if code:
+        return code
+    code = CATEGORIES.get(escape)
+    if code and code[0] is IN:
+        return code
+    try:
+        c = escape[1:2]
+        if c == "x":
+            # hexadecimal escape (exactly two digits)
+            escape += source.getwhile(2, HEXDIGITS)
+            if len(escape) != 4:
+                raise source.error("incomplete escape %s" % escape, len(escape))
+            return LITERAL, int(escape[2:], 16)
+        elif c == "u" and source.istext:
+            # unicode escape (exactly four digits)
+            escape += source.getwhile(4, HEXDIGITS)
+            if len(escape) != 6:
+                raise source.error("incomplete escape %s" % escape, len(escape))
+            return LITERAL, int(escape[2:], 16)
+        elif c == "U" and source.istext:
+            # unicode escape (exactly eight digits)
+            escape += source.getwhile(8, HEXDIGITS)
+            if len(escape) != 10:
+                raise source.error("incomplete escape %s" % escape, len(escape))
+            c = int(escape[2:], 16)
+            chr(c) # raise ValueError for invalid code
+            return LITERAL, c
+        elif c in OCTDIGITS:
+            # octal escape (up to three digits)
+            escape += source.getwhile(2, OCTDIGITS)
+            c = int(escape[1:], 8)
+            if c > 0o377:
+                raise source.error('octal escape value %s outside of '
+                                   'range 0-0o377' % escape, len(escape))
+            return LITERAL, c
+        elif c in DIGITS:
+            raise ValueError
+        if len(escape) == 2:
+            if c in ASCIILETTERS:
+                raise source.error('bad escape %s' % escape, len(escape))
+            return LITERAL, ord(escape[1])
+    except ValueError:
+        pass
+    raise source.error("bad escape %s" % escape, len(escape))
+
+def _escape(source, escape, state):
+    # handle escape code in expression
+    code = CATEGORIES.get(escape)
+    if code:
+        return code
+    code = ESCAPES.get(escape)
+    if code:
+        return code
+    try:
+        c = escape[1:2]
+        if c == "x":
+            # hexadecimal escape
+            escape += source.getwhile(2, HEXDIGITS)
+            if len(escape) != 4:
+                raise source.error("incomplete escape %s" % escape, len(escape))
+            return LITERAL, int(escape[2:], 16)
+        elif c == "u" and source.istext:
+            # unicode escape (exactly four digits)
+            escape += source.getwhile(4, HEXDIGITS)
+            if len(escape) != 6:
+                raise source.error("incomplete escape %s" % escape, len(escape))
+            return LITERAL, int(escape[2:], 16)
+        elif c == "U" and source.istext:
+            # unicode escape (exactly eight digits)
+            escape += source.getwhile(8, HEXDIGITS)
+            if len(escape) != 10:
+                raise source.error("incomplete escape %s" % escape, len(escape))
+            c = int(escape[2:], 16)
+            chr(c) # raise ValueError for invalid code
+            return LITERAL, c
+        elif c == "0":
+            # octal escape
+            escape += source.getwhile(2, OCTDIGITS)
+            return LITERAL, int(escape[1:], 8)
+        elif c in DIGITS:
+            # octal escape *or* decimal group reference (sigh)
+            if source.next in DIGITS:
+                escape += source.get()
+                if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
+                    source.next in OCTDIGITS):
+                    # got three octal digits; this is an octal escape
+                    escape += source.get()
+                    c = int(escape[1:], 8)
+                    if c > 0o377:
+                        raise source.error('octal escape value %s outside of '
+                                           'range 0-0o377' % escape,
+                                           len(escape))
+                    return LITERAL, c
+            # not an octal escape, so this is a group reference
+            group = int(escape[1:])
+            if group < state.groups:
+                if not state.checkgroup(group):
+                    raise source.error("cannot refer to an open group",
+                                       len(escape))
+                state.checklookbehindgroup(group, source)
+                return GROUPREF, group
+            raise source.error("invalid group reference %d" % group, len(escape) - 1)
+        if len(escape) == 2:
+            if c in ASCIILETTERS:
+                raise source.error("bad escape %s" % escape, len(escape))
+            return LITERAL, ord(escape[1])
+    except ValueError:
+        pass
+    raise source.error("bad escape %s" % escape, len(escape))
+
+def _parse_sub(source, state, verbose, nested):
+    # parse an alternation: a|b|c
+
+    items = []
+    itemsappend = items.append
+    sourcematch = source.match
+    start = source.tell()
+    while True:
+        itemsappend(_parse(source, state, verbose, nested + 1,
+                           not nested and not items))
+        if not sourcematch("|"):
+            break
+
+    if len(items) == 1:
+        return items[0]
+
+    subpattern = SubPattern(state)
+    subpatternappend = subpattern.append
+
+    # check if all items share a common prefix
+    while True:
+        prefix = None
+        for item in items:
+            if not item:
+                break
+            if prefix is None:
+                prefix = item[0]
+            elif item[0] != prefix:
+                break
+        else:
+            # all subitems start with a common "prefix".
+            # move it out of the branch
+            for item in items:
+                del item[0]
+            subpatternappend(prefix)
+            continue # check next one
+        break
+
+    # check if the branch can be replaced by a character set
+    for item in items:
+        if len(item) != 1 or item[0][0] is not LITERAL:
+            break
+    else:
+        # we can store this as a character set instead of a
+        # branch (the compiler may optimize this even more)
+        subpatternappend((IN, [item[0] for item in items]))
+        return subpattern
+
+    subpattern.append((BRANCH, (None, items)))
+    return subpattern
+
+def _parse_sub_cond(source, state, condgroup, verbose, nested):
+    item_yes = _parse(source, state, verbose, nested + 1)
+    if source.match("|"):
+        item_no = _parse(source, state, verbose, nested + 1)
+        if source.next == "|":
+            raise source.error("conditional backref with more than two branches")
+    else:
+        item_no = None
+    subpattern = SubPattern(state)
+    subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
+    return subpattern
+
+def _parse(source, state, verbose, nested, first=False):
+    # parse a simple pattern
+    subpattern = SubPattern(state)
+
+    # precompute constants into local variables
+    subpatternappend = subpattern.append
+    sourceget = source.get
+    sourcematch = source.match
+    _len = len
+    _ord = ord
+
+    while True:
+
+        this = source.next
+        if this is None:
+            break # end of pattern
+        if this in "|)":
+            break # end of subpattern
+        sourceget()
+
+        if verbose:
+            # skip whitespace and comments
+            if this in WHITESPACE:
+                continue
+            if this == "#":
+                while True:
+                    this = sourceget()
+                    if this is None or this == "\n":
+                        break
+                continue
+
+        if this[0] == "\\":
+            code = _escape(source, this, state)
+            subpatternappend(code)
+
+        elif this not in SPECIAL_CHARS:
+            subpatternappend((LITERAL, _ord(this)))
+
+        elif this == "[":
+            here = source.tell() - 1
+            # character set
+            set = []
+            setappend = set.append
+##          if sourcematch(":"):
+##              pass # handle character classes
+            if sourcematch("^"):
+                setappend((NEGATE, None))
+            # check remaining characters
+            start = set[:]
+            while True:
+                this = sourceget()
+                if this is None:
+                    raise source.error("unterminated character set",
+                                       source.tell() - here)
+                if this == "]" and set != start:
+                    break
+                elif this[0] == "\\":
+                    code1 = _class_escape(source, this)
+                else:
+                    code1 = LITERAL, _ord(this)
+                if sourcematch("-"):
+                    # potential range
+                    that = sourceget()
+                    if that is None:
+                        raise source.error("unterminated character set",
+                                           source.tell() - here)
+                    if that == "]":
+                        if code1[0] is IN:
+                            code1 = code1[1][0]
+                        setappend(code1)
+                        setappend((LITERAL, _ord("-")))
+                        break
+                    if that[0] == "\\":
+                        code2 = _class_escape(source, that)
+                    else:
+                        code2 = LITERAL, _ord(that)
+                    if code1[0] != LITERAL or code2[0] != LITERAL:
+                        msg = "bad character range %s-%s" % (this, that)
+                        raise source.error(msg, len(this) + 1 + len(that))
+                    lo = code1[1]
+                    hi = code2[1]
+                    if hi < lo:
+                        msg = "bad character range %s-%s" % (this, that)
+                        raise source.error(msg, len(this) + 1 + len(that))
+                    setappend((RANGE, (lo, hi)))
+                else:
+                    if code1[0] is IN:
+                        code1 = code1[1][0]
+                    setappend(code1)
+
+            # XXX:  should move set optimization to compiler!
+            if _len(set)==1 and set[0][0] is LITERAL:
+                subpatternappend(set[0]) # optimization
+            elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
+                subpatternappend((NOT_LITERAL, set[1][1])) # optimization
+            else:
+                # XXX:  should add charmap optimization here
+                subpatternappend((IN, set))
+
+        elif this in REPEAT_CHARS:
+            # repeat previous item
+            here = source.tell()
+            if this == "?":
+                min, max = 0, 1
+            elif this == "*":
+                min, max = 0, MAXREPEAT
+
+            elif this == "+":
+                min, max = 1, MAXREPEAT
+            elif this == "{":
+                if source.next == "}":
+                    subpatternappend((LITERAL, _ord(this)))
+                    continue
+                min, max = 0, MAXREPEAT
+                lo = hi = ""
+                while source.next in DIGITS:
+                    lo += sourceget()
+                if sourcematch(","):
+                    while source.next in DIGITS:
+                        hi += sourceget()
+                else:
+                    hi = lo
+                if not sourcematch("}"):
+                    subpatternappend((LITERAL, _ord(this)))
+                    source.seek(here)
+                    continue
+                if lo:
+                    min = int(lo)
+                    if min >= MAXREPEAT:
+                        raise OverflowError("the repetition number is too large")
+                if hi:
+                    max = int(hi)
+                    if max >= MAXREPEAT:
+                        raise OverflowError("the repetition number is too large")
+                    if max < min:
+                        raise source.error("min repeat greater than max repeat",
+                                           source.tell() - here)
+            else:
+                raise AssertionError("unsupported quantifier %r" % (char,))
+            # figure out which item to repeat
+            if subpattern:
+                item = subpattern[-1:]
+            else:
+                item = None
+            if not item or (_len(item) == 1 and item[0][0] is AT):
+                raise source.error("nothing to repeat",
+                                   source.tell() - here + len(this))
+            if item[0][0] in _REPEATCODES:
+                raise source.error("multiple repeat",
+                                   source.tell() - here + len(this))
+            if sourcematch("?"):
+                subpattern[-1] = (MIN_REPEAT, (min, max, item))
+            else:
+                subpattern[-1] = (MAX_REPEAT, (min, max, item))
+
+        elif this == ".":
+            subpatternappend((ANY, None))
+
+        elif this == "(":
+            start = source.tell() - 1
+            group = True
+            name = None
+            condgroup = None
+            add_flags = 0
+            del_flags = 0
+            if sourcematch("?"):
+                # options
+                char = sourceget()
+                if char is None:
+                    raise source.error("unexpected end of pattern")
+                if char == "P":
+                    # python extensions
+                    if sourcematch("<"):
+                        # named group: skip forward to end of name
+                        name = source.getuntil(">")
+                        if not name.isidentifier():
+                            msg = "bad character in group name %r" % name
+                            raise source.error(msg, len(name) + 1)
+                    elif sourcematch("="):
+                        # named backreference
+                        name = source.getuntil(")")
+                        if not name.isidentifier():
+                            msg = "bad character in group name %r" % name
+                            raise source.error(msg, len(name) + 1)
+                        gid = state.groupdict.get(name)
+                        if gid is None:
+                            msg = "unknown group name %r" % name
+                            raise source.error(msg, len(name) + 1)
+                        if not state.checkgroup(gid):
+                            raise source.error("cannot refer to an open group",
+                                               len(name) + 1)
+                        state.checklookbehindgroup(gid, source)
+                        subpatternappend((GROUPREF, gid))
+                        continue
+                    else:
+                        char = sourceget()
+                        if char is None:
+                            raise source.error("unexpected end of pattern")
+                        raise source.error("unknown extension ?P" + char,
+                                           len(char) + 2)
+                elif char == ":":
+                    # non-capturing group
+                    group = None
+                elif char == "#":
+                    # comment
+                    while True:
+                        if source.next is None:
+                            raise source.error("missing ), unterminated comment",
+                                               source.tell() - start)
+                        if sourceget() == ")":
+                            break
+                    continue
+                elif char in "=!<":
+                    # lookahead assertions
+                    dir = 1
+                    if char == "<":
+                        char = sourceget()
+                        if char is None:
+                            raise source.error("unexpected end of pattern")
+                        if char not in "=!":
+                            raise source.error("unknown extension ?<" + char,
+                                               len(char) + 2)
+                        dir = -1 # lookbehind
+                        lookbehindgroups = state.lookbehindgroups
+                        if lookbehindgroups is None:
+                            state.lookbehindgroups = state.groups
+                    p = _parse_sub(source, state, verbose, nested + 1)
+                    if dir < 0:
+                        if lookbehindgroups is None:
+                            state.lookbehindgroups = None
+                    if not sourcematch(")"):
+                        raise source.error("missing ), unterminated subpattern",
+                                           source.tell() - start)
+                    if char == "=":
+                        subpatternappend((ASSERT, (dir, p)))
+                    else:
+                        subpatternappend((ASSERT_NOT, (dir, p)))
+                    continue
+                elif char == "(":
+                    # conditional backreference group
+                    condname = source.getuntil(")")
+                    group = None
+                    if condname.isidentifier():
+                        condgroup = state.groupdict.get(condname)
+                        if condgroup is None:
+                            msg = "unknown group name %r" % condname
+                            raise source.error(msg, len(condname) + 1)
+                    else:
+                        try:
+                            condgroup = int(condname)
+                            if condgroup < 0:
+                                raise ValueError
+                        except ValueError:
+                            msg = "bad character in group name %r" % condname
+                            raise source.error(msg, len(condname) + 1) from None
+                        if not condgroup:
+                            raise source.error("bad group number",
+                                               len(condname) + 1)
+                        if condgroup >= MAXGROUPS:
+                            msg = "invalid group reference %d" % condgroup
+                            raise source.error(msg, len(condname) + 1)
+                    state.checklookbehindgroup(condgroup, source)
+                elif char in FLAGS or char == "-":
+                    # flags
+                    flags = _parse_flags(source, state, char)
+                    if flags is None:  # global flags
+                        if not first or subpattern:
+                            import warnings
+                            warnings.warn(
+                                'Flags not at the start of the expression %r%s' % (
+                                    source.string[:20],  # truncate long regexes
+                                    ' (truncated)' if len(source.string) > 20 else '',
+                                ),
+                                DeprecationWarning, stacklevel=nested + 6
+                            )
+                        if (state.flags & SRE_FLAG_VERBOSE) and not verbose:
+                            raise Verbose
+                        continue
+                    add_flags, del_flags = flags
+                    group = None
+                else:
+                    raise source.error("unknown extension ?" + char,
+                                       len(char) + 1)
+
+            # parse group contents
+            if group is not None:
+                try:
+                    group = state.opengroup(name)
+                except error as err:
+                    raise source.error(err.msg, len(name) + 1) from None
+            if condgroup:
+                p = _parse_sub_cond(source, state, condgroup, verbose, nested + 1)
+            else:
+                sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and
+                               not (del_flags & SRE_FLAG_VERBOSE))
+                p = _parse_sub(source, state, sub_verbose, nested + 1)
+            if not source.match(")"):
+                raise source.error("missing ), unterminated subpattern",
+                                   source.tell() - start)
+            if group is not None:
+                state.closegroup(group, p)
+            subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p)))
+
+        elif this == "^":
+            subpatternappend((AT, AT_BEGINNING))
+
+        elif this == "$":
+            subpattern.append((AT, AT_END))
+
+        else:
+            raise AssertionError("unsupported special character %r" % (char,))
+
+    return subpattern
+
+def _parse_flags(source, state, char):
+    sourceget = source.get
+    add_flags = 0
+    del_flags = 0
+    if char != "-":
+        while True:
+            add_flags |= FLAGS[char]
+            char = sourceget()
+            if char is None:
+                raise source.error("missing -, : or )")
+            if char in ")-:":
+                break
+            if char not in FLAGS:
+                msg = "unknown flag" if char.isalpha() else "missing -, : or )"
+                raise source.error(msg, len(char))
+    if char == ")":
+        state.flags |= add_flags
+        return None
+    if add_flags & GLOBAL_FLAGS:
+        raise source.error("bad inline flags: cannot turn on global flag", 1)
+    if char == "-":
+        char = sourceget()
+        if char is None:
+            raise source.error("missing flag")
+        if char not in FLAGS:
+            msg = "unknown flag" if char.isalpha() else "missing flag"
+            raise source.error(msg, len(char))
+        while True:
+            del_flags |= FLAGS[char]
+            char = sourceget()
+            if char is None:
+                raise source.error("missing :")
+            if char == ":":
+                break
+            if char not in FLAGS:
+                msg = "unknown flag" if char.isalpha() else "missing :"
+                raise source.error(msg, len(char))
+    assert char == ":"
+    if del_flags & GLOBAL_FLAGS:
+        raise source.error("bad inline flags: cannot turn off global flag", 1)
+    if add_flags & del_flags:
+        raise source.error("bad inline flags: flag turned on and off", 1)
+    return add_flags, del_flags
+
+def fix_flags(src, flags):
+    # Check and fix flags according to the type of pattern (str or bytes)
+    if isinstance(src, str):
+        if flags & SRE_FLAG_LOCALE:
+            raise ValueError("cannot use LOCALE flag with a str pattern")
+        if not flags & SRE_FLAG_ASCII:
+            flags |= SRE_FLAG_UNICODE
+        elif flags & SRE_FLAG_UNICODE:
+            raise ValueError("ASCII and UNICODE flags are incompatible")
+    else:
+        if flags & SRE_FLAG_UNICODE:
+            raise ValueError("cannot use UNICODE flag with a bytes pattern")
+        if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII:
+            raise ValueError("ASCII and LOCALE flags are incompatible")
+    return flags
+
+def parse(str, flags=0, pattern=None):
+    # parse 're' pattern into list of (opcode, argument) tuples
+
+    source = Tokenizer(str)
+
+    if pattern is None:
+        pattern = Pattern()
+    pattern.flags = flags
+    pattern.str = str
+
+    try:
+        p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)
+    except Verbose:
+        # the VERBOSE flag was switched on inside the pattern.  to be
+        # on the safe side, we'll parse the whole thing again...
+        pattern = Pattern()
+        pattern.flags = flags | SRE_FLAG_VERBOSE
+        pattern.str = str
+        source.seek(0)
+        p = _parse_sub(source, pattern, True, 0)
+
+    p.pattern.flags = fix_flags(str, p.pattern.flags)
+
+    if source.next is not None:
+        assert source.next == ")"
+        raise source.error("unbalanced parenthesis")
+
+    if flags & SRE_FLAG_DEBUG:
+        p.dump()
+
+    return p
+
+def parse_template(source, pattern):
+    # parse 're' replacement string into list of literals and
+    # group references
+    s = Tokenizer(source)
+    sget = s.get
+    groups = []
+    literals = []
+    literal = []
+    lappend = literal.append
+    def addgroup(index, pos):
+        if index > pattern.groups:
+            raise s.error("invalid group reference %d" % index, pos)
+        if literal:
+            literals.append(''.join(literal))
+            del literal[:]
+        groups.append((len(literals), index))
+        literals.append(None)
+    groupindex = pattern.groupindex
+    while True:
+        this = sget()
+        if this is None:
+            break # end of replacement string
+        if this[0] == "\\":
+            # group
+            c = this[1]
+            if c == "g":
+                name = ""
+                if not s.match("<"):
+                    raise s.error("missing <")
+                name = s.getuntil(">")
+                if name.isidentifier():
+                    try:
+                        index = groupindex[name]
+                    except KeyError:
+                        raise IndexError("unknown group name %r" % name)
+                else:
+                    try:
+                        index = int(name)
+                        if index < 0:
+                            raise ValueError
+                    except ValueError:
+                        raise s.error("bad character in group name %r" % name,
+                                      len(name) + 1) from None
+                    if index >= MAXGROUPS:
+                        raise s.error("invalid group reference %d" % index,
+                                      len(name) + 1)
+                addgroup(index, len(name) + 1)
+            elif c == "0":
+                if s.next in OCTDIGITS:
+                    this += sget()
+                    if s.next in OCTDIGITS:
+                        this += sget()
+                lappend(chr(int(this[1:], 8) & 0xff))
+            elif c in DIGITS:
+                isoctal = False
+                if s.next in DIGITS:
+                    this += sget()
+                    if (c in OCTDIGITS and this[2] in OCTDIGITS and
+                        s.next in OCTDIGITS):
+                        this += sget()
+                        isoctal = True
+                        c = int(this[1:], 8)
+                        if c > 0o377:
+                            raise s.error('octal escape value %s outside of '
+                                          'range 0-0o377' % this, len(this))
+                        lappend(chr(c))
+                if not isoctal:
+                    addgroup(int(this[1:]), len(this) - 1)
+            else:
+                try:
+                    this = chr(ESCAPES[this][1])
+                except KeyError:
+                    if c in ASCIILETTERS:
+                        import warnings
+                        warnings.warn('bad escape %s' % this,
+                                      DeprecationWarning, stacklevel=4)
+                lappend(this)
+        else:
+            lappend(this)
+    if literal:
+        literals.append(''.join(literal))
+    if not isinstance(source, str):
+        # The tokenizer implicitly decodes bytes objects as latin-1, we must
+        # therefore re-encode the final representation.
+        literals = [None if s is None else s.encode('latin-1') for s in literals]
+    return groups, literals
+
+def expand_template(template, match):
+    g = match.group
+    empty = match.string[:0]
+    groups, literals = template
+    literals = literals[:]
+    try:
+        for index, group in groups:
+            literals[index] = g(group) or empty
+    except IndexError:
+        raise error("invalid group reference %d" % index)
+    return empty.join(literals)
diff --git a/pydoctor/epydoc2stan.py b/pydoctor/epydoc2stan.py
index fc2b1ffb5..3b7dc52ad 100644
--- a/pydoctor/epydoc2stan.py
+++ b/pydoctor/epydoc2stan.py
@@ -4,29 +4,40 @@
 
 from collections import defaultdict
 from typing import (
-    TYPE_CHECKING, Callable, ClassVar, DefaultDict, Dict, Generator, Iterable,
-    Iterator, List, Mapping, Optional, Sequence, Tuple, Union
+    TYPE_CHECKING, Any, Callable, ClassVar, DefaultDict, Dict, Generator, 
+    Iterator, List, Mapping, Optional, Sequence, Tuple, 
 )
 import ast
-import itertools
+import re
 
 import attr
 
-from pydoctor import model
+from pydoctor import model, linker
 from pydoctor.epydoc.markup import Field as EpydocField, ParseError, get_parser_by_name
 from twisted.web.template import Tag, tags
-from pydoctor.epydoc.markup import DocstringLinker, ParsedDocstring
+from pydoctor.epydoc.markup import ParsedDocstring
 import pydoctor.epydoc.markup.plaintext
 from pydoctor.epydoc.markup._pyval_repr import colorize_pyval, colorize_inline_pyval
 
 if TYPE_CHECKING:
     from twisted.web.template import Flattenable
 
+taglink = linker.taglink
+"""
+Alias to L{pydoctor.linker.taglink()}.
+"""
+
 def get_parser(obj: model.Documentable) -> Callable[[str, List[ParseError], bool], ParsedDocstring]:
     """
     Get the C{parse_docstring(str, List[ParseError], bool) -> ParsedDocstring} function. 
     """    
-    # Use module's __docformat__ if specified, else use system's.
+    # Use module's __docformat__ if specified, else use system's. 
+    # Except if system's docformat is plaintext, in this case, use plaintext.
+    # See https://github.com/twisted/pydoctor/issues/503 for the reason
+    # of this behavior. 
+    if obj.system.options.docformat == 'plaintext':
+        return pydoctor.epydoc.markup.plaintext.parse_docstring
+    # the docstring should be parsed using the format of the module it was inherited from
     docformat = obj.module.docformat or obj.system.options.docformat
     
     try:
@@ -50,176 +61,6 @@ def get_docstring(
             return None, source
     return None, None
 
-
-def taglink(o: model.Documentable, page_url: str, label: Optional["Flattenable"] = None) -> Tag:
-    if not o.isVisible:
-        o.system.msg("html", "don't link to %s"%o.fullName())
-
-    if label is None:
-        label = o.fullName()
-
-    url = o.url
-    if url.startswith(page_url + '#'):
-        # When linking to an item on the same page, omit the path.
-        # Besides shortening the HTML, this also avoids the page being reloaded
-        # if the query string is non-empty.
-        url = url[len(page_url):]
-
-    ret: Tag = tags.a(label, href=url)
-    return ret
-
-
-class _EpydocLinker(DocstringLinker):
-
-    def __init__(self, obj: model.Documentable):
-        self.obj = obj
-
-    def look_for_name(self,
-            name: str,
-            candidates: Iterable[model.Documentable],
-            lineno: int
-            ) -> Optional[model.Documentable]:
-        part0 = name.split('.')[0]
-        potential_targets = []
-        for src in candidates:
-            if part0 not in src.contents:
-                continue
-            target = src.resolveName(name)
-            if target is not None and target not in potential_targets:
-                potential_targets.append(target)
-        if len(potential_targets) == 1:
-            return potential_targets[0]
-        elif len(potential_targets) > 1:
-            self.obj.report(
-                "ambiguous ref to %s, could be %s" % (
-                    name,
-                    ', '.join(ob.fullName() for ob in potential_targets)),
-                'resolve_identifier_xref', lineno)
-        return None
-
-    def look_for_intersphinx(self, name: str) -> Optional[str]:
-        """
-        Return link for `name` based on intersphinx inventory.
-
-        Return None if link is not found.
-        """
-        return self.obj.system.intersphinx.getLink(name)
-
-    def link_to(self, identifier: str, label: "Flattenable") -> Tag:
-        fullID = self.obj.expandName(identifier)
-
-        target = self.obj.system.objForFullName(fullID)
-        if target is not None:
-            return taglink(target, self.obj.page_object.url, label)
-
-        url = self.look_for_intersphinx(fullID)
-        if url is not None:
-            return tags.a(label, href=url)
-
-        return tags.transparent(label)
-
-    def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
-        xref: "Flattenable"
-        try:
-            resolved = self._resolve_identifier_xref(target, lineno)
-        except LookupError:
-            xref = label
-        else:
-            if isinstance(resolved, model.Documentable):
-                xref = taglink(resolved, self.obj.page_object.url, label)
-            else:
-                xref = tags.a(label, href=resolved)
-        ret: Tag = tags.code(xref)
-        return ret
-
-    def resolve_identifier(self, identifier: str) -> Optional[str]:
-        fullID = self.obj.expandName(identifier)
-
-        target = self.obj.system.objForFullName(fullID)
-        if target is not None:
-            return target.url
-
-        return self.look_for_intersphinx(fullID)
-
-    def _resolve_identifier_xref(self,
-            identifier: str,
-            lineno: int
-            ) -> Union[str, model.Documentable]:
-        """
-        Resolve a crossreference link to a Python identifier.
-        This will resolve the identifier to any reasonable target,
-        even if it has to look in places where Python itself would not.
-
-        @param identifier: The name of the Python identifier that
-            should be linked to.
-        @param lineno: The line number within the docstring at which the
-            crossreference is located.
-        @return: The referenced object within our system, or the URL of
-            an external target (found via Intersphinx).
-        @raise LookupError: If C{identifier} could not be resolved.
-        """
-
-        # There is a lot of DWIM here. Look for a global match first,
-        # to reduce the chance of a false positive.
-
-        # Check if 'identifier' is the fullName of an object.
-        target = self.obj.system.objForFullName(identifier)
-        if target is not None:
-            return target
-
-        # Check if the fullID exists in an intersphinx inventory.
-        fullID = self.obj.expandName(identifier)
-        target_url = self.look_for_intersphinx(fullID)
-        if not target_url:
-            # FIXME: https://github.com/twisted/pydoctor/issues/125
-            # expandName is unreliable so in the case fullID fails, we
-            # try our luck with 'identifier'.
-            target_url = self.look_for_intersphinx(identifier)
-        if target_url:
-            return target_url
-
-        # Since there was no global match, go look for the name in the
-        # context where it was used.
-
-        # Check if 'identifier' refers to an object by Python name resolution
-        # in our context. Walk up the object tree and see if 'identifier' refers
-        # to an object by Python name resolution in each context.
-        src: Optional[model.Documentable] = self.obj
-        while src is not None:
-            target = src.resolveName(identifier)
-            if target is not None:
-                return target
-            src = src.parent
-
-        # Walk up the object tree again and see if 'identifier' refers to an
-        # object in an "uncle" object.  (So if p.m1 has a class C, the
-        # docstring for p.m2 can say L{C} to refer to the class in m1).
-        # If at any level 'identifier' refers to more than one object, complain.
-        src = self.obj
-        while src is not None:
-            target = self.look_for_name(identifier, src.contents.values(), lineno)
-            if target is not None:
-                return target
-            src = src.parent
-
-        # Examine every module and package in the system and see if 'identifier'
-        # names an object in each one.  Again, if more than one object is
-        # found, complain.
-        target = self.look_for_name(
-            identifier, self.obj.system.objectsOfType(model.Module), lineno)
-        if target is not None:
-            return target
-
-        message = f'Cannot find link target for "{fullID}"'
-        if identifier != fullID:
-            message = f'{message}, resolved from "{identifier}"'
-        root_idx = fullID.find('.')
-        if root_idx != -1 and fullID[:root_idx] not in self.obj.system.root_names:
-            message += ' (you can link to external docs with --intersphinx)'
-        self.obj.report(message, 'resolve_identifier_xref', lineno)
-        raise LookupError(identifier)
-
-
 @attr.s(auto_attribs=True)
 class FieldDesc:
     """
@@ -248,14 +89,16 @@ def format(self) -> Generator[Tag, None, None]:
         formatted = self.body or self._UNDOCUMENTED
         fieldNameTd: List[Tag] = []
         if self.name:
-            name = self.name
-
             # Add the stars to the params names just before generating the field stan, not before.
-            if isinstance(name, VariableArgument):
-                name = f"*{name}"
-            elif isinstance(name, KeywordArgument):
-                name = f"**{name}"
-            
+            if isinstance(self.name, VariableArgument):
+                prefix = "*"
+            elif isinstance(self.name, KeywordArgument):
+                prefix = "**"
+            else:
+                prefix = ""
+
+            name = tags.transparent(prefix, insert_break_points(self.name))
+
             stan_name = tags.span(class_="fieldArg")(name)
             if self.type:
                 stan_name(":")
@@ -279,7 +122,6 @@ def format(self) -> Generator[Tag, None, None]:
         yield tags.td(tags.code(self.type), class_="fieldArgContainer")
         yield tags.td(self.body or self._UNDOCUMENTED)
 
-
 def format_desc_list(label: str, descs: Sequence[FieldDesc]) -> Iterator[Tag]:
     """
     Format list of L{FieldDesc}. Used for param, returns, raises, etc.
@@ -350,7 +192,7 @@ def from_epydoc(cls, field: EpydocField, source: model.Documentable) -> 'Field':
 
     def format(self) -> Tag:
         """Present this field's body as HTML."""
-        return self.body.to_stan(_EpydocLinker(self.source))
+        return self.body.to_stan(self.source.docstring_linker)
 
     def report(self, message: str) -> None:
         self.source.report(message, lineno_offset=self.lineno, section='docstring')
@@ -396,7 +238,7 @@ class FieldHandler:
 
     def __init__(self, obj: model.Documentable):
         self.obj = obj
-        self._linker = _EpydocLinker(self.obj)
+        self._linker = self.obj.docstring_linker
 
         self.types: Dict[str, Optional[Tag]] = {}
 
@@ -659,14 +501,14 @@ def _is_none_literal(node: ast.expr) -> bool:
     return isinstance(node, (ast.Constant, ast.NameConstant)) and node.value is None
 
 
-def reportErrors(obj: model.Documentable, errs: Sequence[ParseError]) -> None:
+def reportErrors(obj: model.Documentable, errs: Sequence[ParseError], section:str='docstring') -> None:
     if errs and obj.fullName() not in obj.system.docstring_syntax_errors:
         obj.system.docstring_syntax_errors.add(obj.fullName())
         for err in errs:
             obj.report(
-                'bad docstring: ' + err.descr(),
+                f'bad {section}: ' + err.descr(),
                 lineno_offset=(err.linenum() or 1) - 1,
-                section='docstring'
+                section=section
                 )
 
 
@@ -674,16 +516,20 @@ def parse_docstring(
         obj: model.Documentable,
         doc: str,
         source: model.Documentable,
+        markup: Optional[str]=None,
+        section: str='docstring',
         ) -> ParsedDocstring:
     """Parse a docstring.
     @param obj: The object we're parsing the documentation for.
     @param doc: The docstring.
     @param source: The object on which the docstring is defined.
         This can differ from C{obj} if the docstring is inherited.
+    @param markup: Parse the docstring with the given markup, ignoring system's options.
+        Useful for creating L{ParsedDocstring}s from restructuredtext for instance.
+    @param section: A custom section to use.
     """
 
-    # the docstring should be parsed using the format of the module it was inherited from
-    parser = get_parser(source)
+    parser = get_parser(source) if not markup else get_parser_by_name(markup, obj)
     errs: List[ParseError] = []
     try:
         parsed_doc = parser(doc, errs, obj.system.options.processtypes)
@@ -691,13 +537,22 @@ def parse_docstring(
         errs.append(ParseError(f'{e.__class__.__name__}: {e}', 1))
         parsed_doc = pydoctor.epydoc.markup.plaintext.parse_docstring(doc, errs)
     if errs:
-        reportErrors(source, errs)
+        reportErrors(source, errs, section=section)
     return parsed_doc
 
+def ensure_parsed_docstring(obj: model.Documentable) -> Optional[model.Documentable]:
+    """
+    Currently, it's not 100% clear at what point the L{Documentable.parsed_docstring} attribute is set.
+    It can be set from the ast builder or later processing step.
+    
+    This function ensures that the C{parsed_docstring} attribute of a documentable is set to it's final value. 
 
-def format_docstring(obj: model.Documentable) -> Tag:
-    """Generate an HTML representation of a docstring"""
-
+    @returns: 
+        - If the C{obj.parsed_docstring} is set to a L{ParsedDocstring} instance: 
+          The source object of the docstring (might be different 
+          from C{obj} if the documentation is inherited).
+        - If the object is undocumented: C{None}.
+    """
     doc, source = get_docstring(obj)
 
     # Use cached or split version if possible.
@@ -716,22 +571,78 @@ def format_docstring(obj: model.Documentable) -> Tag:
     if parsed_doc is None and doc is not None:
         parsed_doc = parse_docstring(obj, doc, source)
         obj.parsed_docstring = parsed_doc
+    
+    if obj.parsed_docstring is not None:
+        return source
+    else:
+        return None
+
+
+class ParsedStanOnly(ParsedDocstring):
+    """
+    A L{ParsedDocstring} directly constructed from stan, for caching purposes.
+    
+    L{to_stan} method simply returns back what's given to L{ParsedStanOnly.__init__}. 
+    """
+    def __init__(self, stan: Tag):
+        super().__init__(fields=[])
+        self._fromstan = stan
+    def has_body(self) -> bool:
+        return True
+    def to_stan(self, docstring_linker: Any, compact:bool=False) -> Tag:
+        return self._fromstan
+    def to_node(self) -> Any:
+        raise NotImplementedError()
+
+def _get_parsed_summary(obj: model.Documentable) -> Tuple[Optional[model.Documentable], ParsedDocstring]:
+    """
+    Ensures that the L{model.Documentable.parsed_summary} attribute of a documentable is set to it's final value. 
+    Do not generate summary twice.
+    
+    @returns: Tuple: C{source}, C{parsed docstring}
+    """
+    source = ensure_parsed_docstring(obj)
+    
+    if obj.parsed_summary is not None:
+        return (source, obj.parsed_summary)
+
+    if source is None:
+        #  if obj.kind is model.DocumentableKind.ALIAS: 
+        #     assert isinstance(obj, model.Attribute)
+        #     # Aliases are generally not documented, so we never mark them as "undocumented", we simply link the object.
+        #     return Tag('', children=format_alias_value(obj).children)
+        summary_parsed_doc: ParsedDocstring = ParsedStanOnly(format_undocumented(obj))
+    else:
+        # Tell mypy that if we found a docstring, we also have its source.
+        assert obj.parsed_docstring is not None
+        summary_parsed_doc = obj.parsed_docstring.get_summary()
+    
+    assert summary_parsed_doc is not None
+    obj.parsed_summary = summary_parsed_doc
+
+    return (source, summary_parsed_doc)
+
+def format_docstring(obj: model.Documentable) -> Tag:
+    """Generate an HTML representation of a docstring"""
+
+    source = ensure_parsed_docstring(obj)
 
     ret: Tag = tags.div
-    if parsed_doc is None:
+    if source is None:
         # Aliases are generally not documented, so we never mark them as "undocumented".
         if obj.kind is not model.DocumentableKind.ALIAS:
             ret(tags.p(class_='undocumented')("Undocumented"))
     else:
+        assert obj.parsed_docstring is not None, "ensure_parsed_docstring() did not do it's job"
         try:
-            stan = parsed_doc.to_stan(_EpydocLinker(source))
+            stan = obj.parsed_docstring.to_stan(source.docstring_linker, compact=False)
         except Exception as e:
             errs = [ParseError(f'{e.__class__.__name__}: {e}', 1)]
-            if doc is None:
+            if source.docstring is None:
                 stan = tags.p(class_="undocumented")('Broken description')
             else:
-                parsed_doc_plain = pydoctor.epydoc.markup.plaintext.parse_docstring(doc, errs)
-                stan = parsed_doc_plain.to_stan(_EpydocLinker(source))
+                parsed_doc_plain = pydoctor.epydoc.markup.plaintext.parse_docstring(source.docstring, errs)
+                stan = parsed_doc_plain.to_stan(source.docstring_linker)
             reportErrors(source, errs)
         if stan.tagName:
             ret(stan)
@@ -741,8 +652,9 @@ def format_docstring(obj: model.Documentable) -> Tag:
     fh = FieldHandler(obj)
     if isinstance(obj, model.Function):
         fh.set_param_types_from_annotations(obj.annotations)
-    if parsed_doc is not None:
-        for field in parsed_doc.fields:
+    if source is not None:
+        assert obj.parsed_docstring is not None, "ensure_parsed_docstring() did not do it's job"
+        for field in obj.parsed_docstring.fields:
             fh.handle(Field.from_epydoc(field, source))
     if isinstance(obj, model.Function):
         fh.resolve_types()
@@ -754,51 +666,24 @@ def format_docstring(obj: model.Documentable) -> Tag:
 def format_summary(obj: model.Documentable) -> Tag:
     """Generate an shortened HTML representation of a docstring."""
 
-    doc, source = get_docstring(obj)
-
-    if (doc is None or source is not obj) and isinstance(obj, model.Attribute):
-        # Attributes can be documented as fields in their parent's docstring.
-        parsed_doc = obj.parsed_docstring
-    else:
-        parsed_doc = None
-
-    if parsed_doc is not None:
-        # The docstring was split off from the Attribute's parent docstring.
-        source = obj.parent
-        assert source is not None
-    elif doc is None:
-        if obj.kind is model.DocumentableKind.ALIAS: 
-            assert isinstance(obj, model.Attribute)
-            # Aliases are generally not documented, so we never mark them as "undocumented", we simply link the object.
-            return Tag('', children=format_alias_value(obj).children)
-        else:
-            return format_undocumented(obj)
-    else:
-        # Tell mypy that if we found a docstring, we also have its source.
-        assert source is not None
-        # Use up to three first non-empty lines of doc string as summary.
-        lines = [
-            line.strip()
-            for line in itertools.takewhile(
-                lambda line: line.strip(),
-                itertools.dropwhile(lambda line: not line.strip(), doc.split('\n'))
-                )
-            ]
-        if len(lines) > 3:
-            return tags.span(class_='undocumented')("No summary")
-        parsed_doc = parse_docstring(obj, ' '.join(lines), source)
-
+    source, parsed_doc = _get_parsed_summary(obj)
+    if not source:
+        source = obj
     try:
-        stan = parsed_doc.to_stan(_EpydocLinker(source))
+        # Disallow same_page_optimization in order to make sure we're not
+        # breaking links when including the summaries on other pages.
+        assert isinstance(source.docstring_linker, linker._CachedEpydocLinker)
+        source.docstring_linker.same_page_optimization = False
+        stan = parsed_doc.to_stan(source.docstring_linker)
+        source.docstring_linker.same_page_optimization = True
+    
     except Exception:
         # This problem will likely be reported by the full docstring as well,
         # so don't spam the log.
-        return tags.span(class_='undocumented')("Broken description")
+        stan = tags.span(class_='undocumented')("Broken description")
+        obj.parsed_summary = ParsedStanOnly(stan)
 
-    content: Sequence["Flattenable"] = [stan] if stan.tagName else stan.children
-    if content and isinstance(content[0], Tag) and content[0].tagName == 'p':
-        content = content[0].children
-    return Tag('')(*content)
+    return stan
 
 
 def format_undocumented(obj: model.Documentable) -> Tag:
@@ -838,7 +723,7 @@ def type2stan(obj: model.Documentable) -> Optional[Tag]:
     if parsed_type is None:
         return None
     else:
-        return parsed_type.to_stan(_EpydocLinker(obj))
+        return parsed_type.to_stan(obj.docstring_linker)
 
 def get_parsed_type(obj: model.Documentable) -> Optional[ParsedDocstring]:
     parsed_type = obj.parsed_type
@@ -851,6 +736,17 @@ def get_parsed_type(obj: model.Documentable) -> Optional[ParsedDocstring]:
 
     return None
 
+def format_toc(obj: model.Documentable) -> Optional[Tag]:
+    # Load the parsed_docstring if it's not already done. 
+    ensure_parsed_docstring(obj)
+
+    if obj.parsed_docstring:
+        if obj.system.options.sidebartocdepth > 0:
+            toc = obj.parsed_docstring.get_toc(depth=obj.system.options.sidebartocdepth)
+            if toc:
+                return toc.to_stan(obj.docstring_linker)
+    return None
+
 
 field_name_to_kind = {
     'ivar': model.DocumentableKind.INSTANCE_VARIABLE,
@@ -934,7 +830,7 @@ def _format_constant_value(obj: model.Attribute) -> Iterator["Flattenable"]:
         linelen=obj.system.options.pyvalreprlinelen,
         maxlines=obj.system.options.pyvalreprmaxlines)
     
-    value_repr = doc.to_stan(_EpydocLinker(obj))
+    value_repr = doc.to_stan(obj.docstring_linker)
 
     # Report eventual warnings. It warns when a regex failed to parse or the html2stan() function fails.
     for message in doc.warnings:
@@ -960,5 +856,72 @@ def format_alias_value(obj: model.Attribute) -> Tag:
         # TODO: contextualize the name in the context of the module/class, currently this always shows the fullName of the object.
         alias = tags.code(taglink(target, obj.page_object.url))
     else:
-        alias =  colorize_inline_pyval(obj.value).to_stan(_EpydocLinker(obj.parent))
-    return tags.p(tags.em("Alias to ", alias))
\ No newline at end of file
+        alias =  colorize_inline_pyval(obj.value).to_stan(obj.parent.docstring_linker)
+    return tags.p(tags.em("Alias to ", alias))
+
+def _split_indentifier_parts_on_case(indentifier:str) -> List[str]:
+
+    def split(text:str, sep:str) -> List[str]:
+        # We use \u200b as temp token to hack a split that passes the tests.
+        return text.replace(sep, '\u200b'+sep).split('\u200b')
+
+    match = re.match('(_{1,2})?(.*?)(_{1,2})?$', indentifier)
+    assert match is not None # the regex always matches
+    prefix, text, suffix = match.groups(default='')
+    text_parts = []
+    
+    if text.islower() or text.isupper():
+        # We assume snake_case or SCREAMING_SNAKE_CASE.
+        text_parts = split(text, '_')
+    else:
+        # We assume camelCase.  We're not using a regex because we also want it
+        # to work with non-ASCII characters (and the Python re module does not
+        # support checking for Unicode properties using something like \p{Lu}).
+        current_part = ''
+        previous_was_upper = False
+        for c in text:
+
+            if c.isupper() and not previous_was_upper:
+                text_parts.append(current_part)
+                current_part = ''
+            
+            current_part += c
+            previous_was_upper = c.isupper()
+        
+        if current_part:
+            text_parts.append(current_part)
+
+    if not text_parts: # the name is composed only by underscores
+        text_parts = ['']
+    
+    if prefix:
+        text_parts[0] = prefix + text_parts[0]
+    if suffix:
+        text_parts[-1] = text_parts[-1] + suffix
+
+    return text_parts
+
+def insert_break_points(text: str) -> 'Flattenable':
+    """
+    Browsers aren't smart enough to recognize word breaking opportunities in
+    snake_case or camelCase, so this function helps them out by inserting
+    word break opportunities.
+
+    :note: It support full dotted names and will add a wbr tag after each dot.
+    """
+
+    # We use tags.wbr instead of zero-width spaces because
+    # zero-width spaces can interfer in subtle ways when copy/pasting a name.
+    
+    r: List['Flattenable'] = []
+    parts = text.split('.')
+    for i,t in enumerate(parts):
+        _parts = _split_indentifier_parts_on_case(t)
+        for i_,p in enumerate(_parts):
+            r += [p]
+            if i_ != len(_parts)-1:
+                r += [tags.wbr()]
+        if i != len(parts)-1:
+            r += [tags.wbr(), '.']
+    return tags.transparent(*r)
+
diff --git a/pydoctor/extensions/__init__.py b/pydoctor/extensions/__init__.py
new file mode 100644
index 000000000..aadac69d7
--- /dev/null
+++ b/pydoctor/extensions/__init__.py
@@ -0,0 +1,160 @@
+"""
+Pydoctor's extension system.
+
+An extension can be composed by mixin classes, AST builder visitor extensions and post processors.
+"""
+import importlib
+import sys
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Type, Union, cast
+
+# In newer Python versions, use importlib.resources from the standard library.
+# On older versions, a compatibility package must be installed from PyPI.
+if sys.version_info < (3, 9):
+    import importlib_resources
+else:
+    import importlib.resources as importlib_resources
+
+import attr
+from pydoctor import model, astutils, astbuilder
+
+class ClassMixin:
+    """Base class for mixins applied to L{model.Class} objects."""
+class ModuleMixin:
+    """Base class for mixins applied to L{model.Module} objects."""
+class PackageMixin:
+    """Base class for mixins applied to L{model.Package} objects."""
+class FunctionMixin:
+    """Base class for mixins applied to L{model.Function} objects."""
+class AttributeMixin:
+    """Base class for mixins applied to L{model.Attribute} objects."""
+class DocumentableMixin(ModuleMixin, ClassMixin, FunctionMixin, AttributeMixin):
+    """Base class for mixins applied to all L{model.Documentable} objects."""
+class CanContainImportsDocumentableMixin(PackageMixin, ModuleMixin, ClassMixin):
+    """Base class for mixins applied to L{model.Class}, L{model.Module} and L{model.Package} objects."""
+class InheritableMixin(FunctionMixin, AttributeMixin):
+    """Base class for mixins applied to L{model.Function} and L{model.Attribute} objects."""
+
+MixinT = Union[ClassMixin, ModuleMixin, PackageMixin, FunctionMixin, AttributeMixin]
+
+def _importlib_resources_contents(package: str) -> Iterable[str]:
+    """Return an iterable of entries in C{package}.
+
+    Note that not all entries are resources.  Specifically, directories are
+    not considered resources. 
+    """
+    return [path.name for path in importlib_resources.files(package).iterdir()]
+
+
+def _importlib_resources_is_resource(package: str, name: str) -> bool:
+    """True if C{name} is a resource inside C{package}.
+
+    Directories are B{not} resources.
+    """
+    resource = name
+    return any(
+        traversable.name == resource and traversable.is_file()
+        for traversable in importlib_resources.files(package).iterdir()
+    )
+
+def _get_submodules(pkg: str) -> Iterator[str]:
+    for name in _importlib_resources_contents(pkg):
+        if (not name.startswith('_') and _importlib_resources_is_resource(pkg, name)) and name.endswith('.py'):
+            name = name[:-len('.py')]
+            yield f"{pkg}.{name}"
+
+def _get_setup_extension_func_from_module(module: str) -> Callable[['ExtRegistrar'], None]:
+    """
+    Will look for the special function C{setup_pydoctor_extension} in the provided module.
+    
+    @Raises AssertionError: if module do not provide a valid setup_pydoctor_extension() function.
+    @Raises ModuleNotFoundError: if module is not found.
+    @Returns: a tuple(str, callable): extension module name, setup_pydoctor_extension() function.
+    """
+    mod = importlib.import_module(module)
+    
+    assert hasattr(mod, 'setup_pydoctor_extension'), f"{mod}.setup_pydoctor_extension() function not found."
+    assert callable(mod.setup_pydoctor_extension), f"{mod}.setup_pydoctor_extension should be a callable."
+    return cast('Callable[[ExtRegistrar], None]', mod.setup_pydoctor_extension)
+
+_mixin_to_class_name: Dict[Any, str] = {
+        ClassMixin: 'Class',
+        ModuleMixin: 'Module',
+        PackageMixin: 'Package',
+        FunctionMixin: 'Function',
+        AttributeMixin: 'Attribute',
+    }
+
+def _get_mixins(*mixins: Type[MixinT]) -> Dict[str, List[Type[MixinT]]]:
+    """
+    Transform a list of mixins classes to a dict from the 
+    concrete class name to the mixins that must be applied to it.
+    This relies on the fact that mixins shoud extend one of the 
+    base mixin classes in L{pydoctor.extensions} module.
+    
+    @raises AssertionError: If a mixin does not extends any of the 
+        provided base mixin classes.
+    """
+    mixins_by_name: Dict[str, List[Type[MixinT]]] = {}
+    for mixin in mixins:
+        added = False
+        for k,v in _mixin_to_class_name.items():
+            if isinstance(mixin, type) and issubclass(mixin, k):
+                mixins_by_name.setdefault(v, [])
+                mixins_by_name[v].append(mixin)
+                added = True
+                # do not break, such that one class can be added to several class
+                # bases if it extends the right types.
+        if not added:
+            assert False, f"Invalid mixin {mixin.__name__!r}. Mixins must subclass one of the base class."
+    return mixins_by_name
+
+@attr.s(auto_attribs=True)
+class ExtRegistrar:
+    """
+    The extension registrar class provides utilites to register an extension's components.
+    """
+    system: model.System
+
+    def register_mixin(self, *mixin: Type[MixinT]) -> None:
+        """
+        Register mixin for model objects. Mixins shoud extend one of the 
+        base mixin classes in L{pydoctor.extensions} module, i.e. L{ClassMixin} or L{DocumentableMixin}, etc.
+        """
+        self.system._factory.add_mixins(**_get_mixins(*mixin))
+
+    def register_astbuilder_visitor(self, 
+            *visitor: Type[astutils.NodeVisitorExt]) -> None:
+        """
+        Register AST visitor(s). Typically visitor extensions inherits from L{ModuleVisitorExt}.
+        """
+        self.system._astbuilder_visitors.extend(visitor)
+    
+    def register_post_processor(self, 
+            *post_processor: Callable[[model.System], None]) -> None:
+        """
+        Register post processor(s).
+         
+        A post-processor is simply a one-argument callable receiving 
+        the processed L{model.System} and doing stuff on the L{model.Documentable} tree.
+        """
+        self.system._post_processors.extend(post_processor)
+
+def load_extension_module(system:'model.System', mod: str) -> None:
+    """
+    Load the pydoctor extension module into the system.
+    """
+    setup_pydoctor_extension = _get_setup_extension_func_from_module(mod)
+    setup_pydoctor_extension(ExtRegistrar(system))
+
+def get_extensions() -> Iterator[str]:
+    """
+    Get the full names of all the pydoctor extension modules.
+    """
+    return _get_submodules('pydoctor.extensions')
+
+class ModuleVisitorExt(astutils.NodeVisitorExt):
+    """
+    Base class to extend the L{astbuilder.ModuleVistor}.
+    """
+    when = astutils.NodeVisitorExt.When.AFTER
+    visitor: astbuilder.ModuleVistor
diff --git a/pydoctor/extensions/deprecate.py b/pydoctor/extensions/deprecate.py
new file mode 100644
index 000000000..90ffb87d3
--- /dev/null
+++ b/pydoctor/extensions/deprecate.py
@@ -0,0 +1,157 @@
+
+# Copyright (c) Twisted Matrix Laboratories.
+# Adjusted from file twisted/python/_pydoctor.py
+
+"""
+Support for L{twisted.python.deprecate}.
+"""
+
+import ast
+import inspect
+from numbers import Number
+from typing import Optional, Sequence, Tuple, Union, TYPE_CHECKING
+
+from pydoctor import astbuilder, model, epydoc2stan, astutils, extensions
+
+from twisted.python.deprecate import deprecated
+from incremental import Version
+
+if TYPE_CHECKING:
+    import incremental
+
+def getDeprecated(self:model.Documentable, decorators:Sequence[ast.expr]) -> None:
+    """
+    With a list of decorators, and the object it is running on, set the
+    C{_deprecated_info} flag if any of the decorators are a Twisted deprecation
+    decorator.
+    """
+    for a in decorators:
+        if isinstance(a, ast.Call):
+            fn = astbuilder.node2fullname(a.func, self)
+
+            if fn in (
+                "twisted.python.deprecate.deprecated",
+                "twisted.python.deprecate.deprecatedProperty",
+            ):
+                try:
+                    version, text = deprecatedToUsefulText(self, self.name, a)
+                except Exception as e:
+                    # It's a reference or something that we can't figure out
+                    # from the AST.
+                    self.report(str(e), section='deprecation text')
+                else:
+                    # Add a deprecation info with reStructuredText .. deprecated:: directive.
+                    parsed_info = epydoc2stan.parse_docstring(
+                        obj=self,
+                        doc=f".. deprecated:: {version}\n   {text}", 
+                        source=self, 
+                        markup='restructuredtext', 
+                        section='deprecation text',)
+                    self.extra_info.append(parsed_info)
+
+class ModuleVisitor(extensions.ModuleVisitorExt):
+    
+    def depart_ClassDef(self, node:ast.ClassDef) -> None:
+        """
+        Called after a class definition is visited.
+        """
+        try:
+            cls = self.visitor.builder.current.contents[node.name]
+        except KeyError:
+            # Classes inside functions are ignored.
+            return
+        assert isinstance(cls, model.Class)
+        getDeprecated(cls, node.decorator_list)
+
+    def depart_FunctionDef(self, node:ast.FunctionDef) -> None:
+        """
+        Called after a function definition is visited.
+        """
+        try:
+            # Property or Function
+            func = self.visitor.builder.current.contents[node.name]
+        except KeyError:
+            # Inner functions are ignored.
+            return
+        assert isinstance(func, (model.Function, model.Attribute))
+        getDeprecated(func, node.decorator_list)
+
+_incremental_Version_signature = inspect.signature(Version)
+def versionToUsefulObject(version:ast.Call) -> 'incremental.Version':
+    """
+    Change an AST C{Version()} to a real one.
+
+    @note: Only use required arguments, ignores arguments release_candidate, prerelease, post, dev.
+    @raises ValueError: If the incremental.Version call is invalid.
+    """
+    bound_args = astutils.bind_args(_incremental_Version_signature, version)
+    package = astutils.get_str_value(bound_args.arguments['package'])
+    major: Union[Number, str, None] = astutils.get_num_value(bound_args.arguments['major']) or \
+        astutils.get_str_value(bound_args.arguments['major'])
+    if isinstance(major, str) and major != "NEXT": 
+        raise ValueError("Invalid call to incremental.Version(), 'major' should be an int or 'NEXT'.")
+    return Version(package, major, 
+        minor=astutils.get_num_value(bound_args.arguments['minor']),
+        micro=astutils.get_num_value(bound_args.arguments['micro']),)
+
+_deprecation_text_with_replacement_template = "``{name}`` was deprecated in {package} {version}; please use `{replacement}` instead."
+_deprecation_text_without_replacement_template = "``{name}`` was deprecated in {package} {version}."
+
+_deprecated_signature = inspect.signature(deprecated)
+def deprecatedToUsefulText(ctx:model.Documentable, name:str, deprecated:ast.Call) -> Tuple[str, str]:
+    """
+    Change a C{@deprecated} to a display string.
+
+    @param ctx: The context in which the deprecation is evaluated.
+    @param name: The name of the thing we're deprecating.
+    @param deprecated: AST call to L{twisted.python.deprecate.deprecated} or L{twisted.python.deprecate.deprecatedProperty}.
+    @returns: The version and text to use in the deprecation warning.
+    @raises ValueError or TypeError: If something is wrong.
+    """
+
+    bound_args = astutils.bind_args(_deprecated_signature, deprecated)
+    _version_call = bound_args.arguments['version']
+    if not isinstance(_version_call, ast.Call) or \
+       astbuilder.node2fullname(_version_call.func, ctx) != "incremental.Version":
+        raise ValueError("Invalid call to twisted.python.deprecate.deprecated(), first argument should be a call to incremental.Version()")
+    
+    version = versionToUsefulObject(_version_call)
+    replacement: Optional[str] = None
+
+    replvalue = bound_args.arguments.get('replacement')
+    if replvalue is not None:
+        if astutils.node2dottedname(replvalue) is not None:
+            replacement = astbuilder.node2fullname(replvalue, ctx)
+        else:
+            replacement = astutils.get_str_value(replvalue)
+    _version = version.public()
+    _package = version.package
+
+    # Avoids html injections
+    def validate_identifier(_text:str) -> bool:
+        if not all(p.isidentifier() for p in _text.split('.')):
+            return False
+        return True
+
+    if not validate_identifier(_package):
+        raise ValueError(f"Invalid package name: {_package!r}")
+    if replacement is not None and not validate_identifier(replacement):
+        raise ValueError(f"Invalid replacement name: {replacement!r}")
+    
+    if replacement is not None:
+        text = _deprecation_text_with_replacement_template.format(
+            name=name, 
+            package=_package,
+            version=_version,
+            replacement=replacement
+        )
+    else:
+        text = _deprecation_text_without_replacement_template.format(
+            name=name, 
+            package=_package,
+            version=_version,
+        )
+    return _version, text
+
+def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
+    r.register_astbuilder_visitor(ModuleVisitor)
diff --git a/pydoctor/zopeinterface.py b/pydoctor/extensions/zopeinterface.py
similarity index 72%
rename from pydoctor/zopeinterface.py
rename to pydoctor/extensions/zopeinterface.py
index 676991d17..5db6c8b3f 100644
--- a/pydoctor/zopeinterface.py
+++ b/pydoctor/extensions/zopeinterface.py
@@ -4,11 +4,11 @@
 import ast
 import re
 
-from pydoctor import astbuilder
+from pydoctor import astbuilder, astutils, extensions
 from pydoctor import model
 from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval
 
-class ZopeInterfaceModule(model.Module):
+class ZopeInterfaceModule(model.Module, extensions.ModuleMixin):
 
     def setup(self) -> None:
         super().setup()
@@ -21,7 +21,7 @@ def allImplementedInterfaces(self) -> Iterable[str]:
         return self.implements_directly
 
 
-class ZopeInterfaceClass(model.Class):
+class ZopeInterfaceClass(model.Class, extensions.ClassMixin):
     isinterface = False
     isschemafield = False
     isinterfaceclass = False
@@ -66,12 +66,12 @@ def _inheritedDocsources(obj: model.Documentable) -> Iterator[model.Documentable
                 if name in io2.contents:
                     yield io2.contents[name]
 
-class ZopeInterfaceFunction(model.Function):
+class ZopeInterfaceFunction(model.Function, extensions.FunctionMixin):
     def docsources(self) -> Iterator[model.Documentable]:
         yield from super().docsources()
         yield from _inheritedDocsources(self)
 
-class ZopeInterfaceAttribute(model.Attribute):
+class ZopeInterfaceAttribute(model.Attribute, extensions.AttributeMixin):
     def docsources(self) -> Iterator[model.Documentable]:
         yield from super().docsources()
         yield from _inheritedDocsources(self)
@@ -124,7 +124,10 @@ def _handle_implemented(
 
         if isinstance(iface, ZopeInterfaceClass):
             if iface.isinterface:
-                iface.implementedby_directly.append(implementer)
+                # System might be post processed mutilple times during tests, 
+                # so we check if implementer is already there.
+                if implementer not in iface.implementedby_directly:
+                    iface.implementedby_directly.append(implementer)
             else:
                 implementer.report(
                     'Class "%s" is not an interface' % iface_name,
@@ -165,48 +168,44 @@ def namesInterface(system: model.System, name: str) -> bool:
         return False
     return obj.isinterface
 
-class ZopeInterfaceModuleVisitor(astbuilder.ModuleVistor):
+class ZopeInterfaceModuleVisitor(extensions.ModuleVisitorExt):
 
-    def _handleAssignmentInModule(self,
+    def _handleZopeInterfaceAssignmentInModule(self,
             target: str,
-            annotation: Optional[ast.expr],
             expr: Optional[ast.expr],
             lineno: int
             ) -> None:
-        super()._handleAssignmentInModule(
-                target, annotation, expr, lineno)
-
         if not isinstance(expr, ast.Call):
             return
-        funcName = astbuilder.node2fullname(expr.func, self.builder.current)
+        funcName = astbuilder.node2fullname(expr.func, self.visitor.builder.current)
         if funcName is None:
             return
-        ob = self.system.objForFullName(funcName)
+        ob = self.visitor.system.objForFullName(funcName)
         if isinstance(ob, ZopeInterfaceClass) and ob.isinterfaceclass:
             # TODO: Process 'bases' and '__doc__' arguments.
-            interface = self.builder.pushClass(target, lineno)
+            # TODO: Currently, this implementation will create a duplicate class 
+            # with the same name as the attribute, overriding it.
+            interface = self.visitor.builder.pushClass(target, lineno)
             assert isinstance(interface, ZopeInterfaceClass)
             interface.isinterface = True
             interface.implementedby_directly = []
             interface.bases = []
             interface.baseobjects = []
-            self.builder.popClass()
-            self.newAttr = interface
+            self.visitor.builder.popClass()
+            self.visitor.builder.currentAttr = interface
 
-    def _handleAssignmentInClass(self,
+    def _handleZopeInterfaceAssignmentInClass(self,
             target: str,
-            annotation: Optional[ast.expr],
             expr: Optional[ast.expr],
             lineno: int
             ) -> None:
-        super()._handleAssignmentInClass(target, annotation, expr, lineno)
 
         if not isinstance(expr, ast.Call):
             return
-        attr: Optional[model.Documentable] = self.builder.current.contents.get(target)
+        attr: Optional[model.Documentable] = self.visitor.builder.current.contents.get(target)
         if attr is None:
             return
-        funcName = astbuilder.node2fullname(expr.func, self.builder.current)
+        funcName = astbuilder.node2fullname(expr.func, self.visitor.builder.current)
         if funcName is None:
             return
 
@@ -225,7 +224,7 @@ def _handleAssignmentInClass(self,
                 attr.kind = model.DocumentableKind.SCHEMA_FIELD
 
             else:
-                cls = self.builder.system.objForFullName(funcName)
+                cls = self.visitor.builder.system.objForFullName(funcName)
                 if not (isinstance(cls, ZopeInterfaceClass) and cls.isschemafield):
                     return
                 attr.kind = model.DocumentableKind.SCHEMA_FIELD
@@ -241,9 +240,28 @@ def _handleAssignmentInClass(self,
                 attr.report(
                     'description of field "%s" is not a string literal' % attr.name,
                     section='zopeinterface')
+    
+    def _handleZopeInterfaceAssignment(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
+        for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
+            dottedname = astutils.node2dottedname(target) 
+            if dottedname and len(dottedname)==1:
+                # Here, we consider single name assignment only
+                current = self.visitor.builder.current
+                if isinstance(current, model.Class):
+                    self._handleZopeInterfaceAssignmentInClass(
+                        dottedname[0], node.value, node.lineno
+                    )
+                elif isinstance(current, model.Module):
+                    self._handleZopeInterfaceAssignmentInModule(
+                        dottedname[0], node.value, node.lineno
+                    )
+        
+    def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
+        self._handleZopeInterfaceAssignment(node)
+    visit_AnnAssign = visit_Assign
 
     def visit_Call(self, node: ast.Call) -> None:
-        base = astbuilder.node2fullname(node.func, self.builder.current)
+        base = astbuilder.node2fullname(node.func, self.visitor.builder.current)
         if base is None:
             return
         meth = getattr(self, "visit_Call_" + base.replace('.', '_'), None)
@@ -251,32 +269,30 @@ def visit_Call(self, node: ast.Call) -> None:
             meth(base, node)
 
     def visit_Call_zope_interface_moduleProvides(self, funcName: str, node: ast.Call) -> None:
-        if not isinstance(self.builder.current, ZopeInterfaceModule):
-            self.default(node)
+        if not isinstance(self.visitor.builder.current, ZopeInterfaceModule):
             return
 
-        addInterfaceInfoToModule(self.builder.current, node.args)
+        addInterfaceInfoToModule(self.visitor.builder.current, node.args)
 
     def visit_Call_zope_interface_implements(self, funcName: str, node: ast.Call) -> None:
-        cls = self.builder.current
+        cls = self.visitor.builder.current
         if not isinstance(cls, ZopeInterfaceClass):
-            self.default(node)
             return
         addInterfaceInfoToClass(cls, node.args, cls,
                                 funcName == 'zope.interface.implementsOnly')
     visit_Call_zope_interface_implementsOnly = visit_Call_zope_interface_implements
 
     def visit_Call_zope_interface_classImplements(self, funcName: str, node: ast.Call) -> None:
-        parent = self.builder.current
+        parent = self.visitor.builder.current
         if not node.args:
-            self.builder.system.msg(
+            self.visitor.builder.system.msg(
                 'zopeinterface',
                 f'{parent.description}:{node.lineno}: '
                 f'required argument to classImplements() missing',
                 thresh=-1)
             return
         clsname = astbuilder.node2fullname(node.args[0], parent)
-        cls = None if clsname is None else self.system.allobjects.get(clsname)
+        cls = None if clsname is None else self.visitor.system.allobjects.get(clsname)
         if not isinstance(cls, ZopeInterfaceClass):
             if clsname is None:
                 argdesc = '1'
@@ -284,7 +300,7 @@ def visit_Call_zope_interface_classImplements(self, funcName: str, node: ast.Cal
             else:
                 argdesc = f'"{clsname}"'
                 problem = 'not found' if cls is None else 'is not a class'
-            self.builder.system.msg(
+            self.visitor.builder.system.msg(
                 'zopeinterface',
                 f'{parent.description}:{node.lineno}: '
                 f'argument {argdesc} to classImplements() {problem}',
@@ -294,18 +310,18 @@ def visit_Call_zope_interface_classImplements(self, funcName: str, node: ast.Cal
                                 funcName == 'zope.interface.classImplementsOnly')
     visit_Call_zope_interface_classImplementsOnly = visit_Call_zope_interface_classImplements
 
-    def visit_ClassDef(self, node: ast.ClassDef) -> Optional[ZopeInterfaceClass]:
-        cls = super().visit_ClassDef(node)
-        if cls is None:
-            return None
-        assert isinstance(cls, ZopeInterfaceClass)
+    def depart_ClassDef(self, node: ast.ClassDef) -> None:
+        cls = self.visitor.builder.current.contents.get(node.name)
+        
+        if not isinstance(cls, ZopeInterfaceClass):
+            return
 
-        bases = [self.builder.current.expandName(base) for base in cls.bases]
+        bases = [self.visitor.builder.current.expandName(base) for base in cls.bases]
 
         if 'zope.interface.interface.InterfaceClass' in bases:
             cls.isinterfaceclass = True
 
-        if any(namesInterface(self.system, b) for b in cls.bases):
+        if any(namesInterface(self.visitor.system, b) for b in cls.bases):
             cls.isinterface = True
             cls.kind = model.DocumentableKind.INTERFACE
             cls.implementedby_directly = []
@@ -321,25 +337,18 @@ def visit_ClassDef(self, node: ast.ClassDef) -> Optional[ZopeInterfaceClass]:
                     continue
                 addInterfaceInfoToClass(cls, args, cls.parent, False)
 
-        return cls
-
-
-class ZopeInterfaceASTBuilder(astbuilder.ASTBuilder):
-    ModuleVistor = ZopeInterfaceModuleVisitor
-
-
-class ZopeInterfaceSystem(model.System):
-    Module = ZopeInterfaceModule
-    Class = ZopeInterfaceClass
-    Function = ZopeInterfaceFunction
-    Attribute = ZopeInterfaceAttribute
-    defaultBuilder = ZopeInterfaceASTBuilder
+def postProcess(self:model.System) -> None:
 
-    def postProcess(self) -> None:
-        super().postProcess()
+    for mod in self.objectsOfType(ZopeInterfaceModule):
+        _handle_implemented(mod)
 
-        for mod in self.objectsOfType(ZopeInterfaceModule):
-            _handle_implemented(mod)
+    for cls in self.objectsOfType(ZopeInterfaceClass):
+        _handle_implemented(cls)
 
-        for cls in self.objectsOfType(ZopeInterfaceClass):
-            _handle_implemented(cls)
+def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
+    r.register_mixin(ZopeInterfaceModule, 
+                      ZopeInterfaceFunction, 
+                      ZopeInterfaceClass, 
+                      ZopeInterfaceAttribute)
+    r.register_astbuilder_visitor(ZopeInterfaceModuleVisitor)
+    r.register_post_processor(postProcess)
diff --git a/pydoctor/factory.py b/pydoctor/factory.py
new file mode 100644
index 000000000..e56fb44a9
--- /dev/null
+++ b/pydoctor/factory.py
@@ -0,0 +1,114 @@
+"""
+Create customizable model classes. 
+"""
+
+from typing import Dict, List, Tuple, Type, Any, Union, Sequence, TYPE_CHECKING
+
+if TYPE_CHECKING:
+    from pydoctor import model
+
+class GenericFactory:
+
+    def __init__(self, bases: Dict[str, Type[Any]]) -> None:
+        self.bases = bases
+        self.mixins: Dict[str, List[Type[Any]]] = {}
+        self._class_cache: Dict[Tuple[str, Tuple[Type[Any], ...]], Type[Any]] = {}
+
+    def add_mixin(self, for_class: str, mixin:Type[Any]) -> None:
+        """
+        Add a mixin class to the specified object in the factory. 
+        """
+        try:
+            mixins = self.mixins[for_class]
+        except KeyError:
+            mixins = []
+            self.mixins[for_class] = mixins
+        
+        assert isinstance(mixins, list)
+        mixins.append(mixin)
+
+    def add_mixins(self, **kwargs:Union[Sequence[Type[Any]], Type[Any]]) -> None:
+        """
+        Add mixin classes to objects in the factory. 
+        Example::
+            class MyClassMixin: ...
+            class MyDataMixin: ...
+            factory = factory.Factory()
+            factory.add_mixins(Class=MyClassMixin, Attribute=MyDataMixin)
+        :param kwargs: Minin(s) classes to apply to names.
+        """
+        for key,value in kwargs.items():
+            if isinstance(value, Sequence):
+                for item in value:
+                    self.add_mixin(key, item)
+            else:
+                self.add_mixin(key, value)
+
+    def get_class(self, name:str) -> Type[Any]:
+        class_id = name, tuple(self.mixins.get(name, [])+[self.bases[name]])
+        cached = self._class_cache.get(class_id)
+        if cached is not None:
+            cls = cached
+        else:
+            cls = type(*class_id, {})
+            self._class_cache[class_id] = cls
+        return cls
+
+class Factory(GenericFactory):
+    """
+    Classes are created dynamically with `type` such that they can inherith from customizable mixin classes. 
+    """
+
+    def __init__(self) -> None:
+        # Workaround cyclic import issue.
+        from pydoctor import model
+        self.model = model
+        _bases = {
+            'Class': model.Class,
+            'Function': model.Function,
+            'Module': model.Module,
+            'Package': model.Package,
+            'Attribute': model.Attribute,
+        }
+        super().__init__(bases=_bases)
+
+    def add_mixin(self, for_class: str, mixin: Type[Any]) -> None:
+        super().add_mixin(for_class, mixin)
+
+        # Take care to avoid inconsistent MRO by removing extra model.* classes from the Mixin bases.
+        try:
+            b = list(mixin.__bases__)
+            b.remove(getattr(self.model, for_class))
+            mixin.__bases__ = tuple(b)
+        except ValueError:
+            pass
+
+    @property
+    def Class(self) -> Type['model.Class']:
+        klass = self.get_class('Class')
+        assert issubclass(klass, self.model.Class)
+        return klass
+
+    @property
+    def Function(self) -> Type['model.Function']:
+        func = self.get_class('Function')
+        assert issubclass(func, self.model.Function)
+        return func
+
+    @property
+    def Module(self) -> Type['model.Module']:
+        mod = self.get_class('Module')
+        assert issubclass(mod, self.model.Module)
+        return mod
+    
+    @property
+    def Package(self) -> Type['model.Package']:
+        mod = self.get_class('Package')
+        assert issubclass(mod, self.model.Package)
+        return mod
+
+    @property
+    def Attribute(self) -> Type['model.Attribute']:
+        data = self.get_class('Attribute')
+        assert issubclass(data, self.model.Attribute)
+        return data
diff --git a/pydoctor/linker.py b/pydoctor/linker.py
new file mode 100644
index 000000000..a1edfedbf
--- /dev/null
+++ b/pydoctor/linker.py
@@ -0,0 +1,430 @@
+"""
+This module provides implementations of epydoc's L{DocstringLinker} class.
+"""
+
+from collections import defaultdict
+import attr
+from twisted.web.template import Tag, tags
+from typing import (
+    TYPE_CHECKING, Dict,Iterable, List, Optional, Set, Union, cast
+)
+
+from pydoctor.epydoc.markup import DocstringLinker
+
+if TYPE_CHECKING:
+    from twisted.web.template import Flattenable
+    from typing_extensions import Literal
+    
+    # This import must be kept in the TYPE_CHECKING block for circular references issues.
+    from pydoctor import model
+
+def taglink(o: 'model.Documentable', page_url: str, 
+            label: Optional["Flattenable"] = None, 
+            same_page_optimization:bool=True) -> Tag:
+    """
+    Create a link to an object that exists in the system.
+
+    @param o: The object to link to
+    @param page_url: The URL of the current page
+    @param label: The label to use for the link
+    @param same_page_optimization: Whether to create a link with the anchor only when 
+        page_url matches the object's URL.
+    """
+    if not o.isVisible:
+        o.system.msg("html", "don't link to %s"%o.fullName())
+
+    if label is None:
+        label = o.fullName()
+
+    url = o.url
+    if url.startswith(page_url + '#') and same_page_optimization is True:
+        # When linking to an item on the same page, omit the path.
+        # Besides shortening the HTML, this also avoids the page being reloaded
+        # if the query string is non-empty.
+        url = url[len(page_url):]
+
+    ret: Tag = tags.a(label, href=url, class_='internal-link')
+    if label != o.fullName():
+        ret(title=o.fullName())
+    return ret
+
+
+class _EpydocLinker(DocstringLinker):
+    """
+    This linker implements the xref lookup logic.
+    """
+    
+    class LookupFailed(LookupError):
+        """
+        Encapsulate a link tag that is not actually a link because we count not resolve the name. 
+
+        Used only if L{_EpydocLinker.strict} is True.
+        """
+        def __init__(self, *args: object, link: Tag) -> None:
+            super().__init__(*args)
+            self.link: Tag = link
+
+    def __init__(self, obj: 'model.Documentable', same_page_optimization:bool, strict:bool=False):
+        self.obj = obj
+        self.same_page_optimization=same_page_optimization
+        self.strict=strict
+
+    @staticmethod
+    def _create_intersphinx_link(label:"Flattenable", url:str) -> Tag:
+        """
+        Create a link with the special 'intersphinx-link' CSS class.
+        """
+        return tags.a(label, href=url, class_='intersphinx-link')
+
+    def look_for_name(self,
+            name: str,
+            candidates: Iterable['model.Documentable'],
+            lineno: int
+            ) -> Optional['model.Documentable']:
+        part0 = name.split('.')[0]
+        potential_targets = []
+        for src in candidates:
+            if part0 not in src.contents:
+                continue
+            target = src.resolveName(name)
+            if target is not None and target not in potential_targets:
+                potential_targets.append(target)
+        if len(potential_targets) == 1:
+            return potential_targets[0]
+        elif len(potential_targets) > 1:
+            self.obj.report(
+                "ambiguous ref to %s, could be %s" % (
+                    name,
+                    ', '.join(ob.fullName() for ob in potential_targets)),
+                'resolve_identifier_xref', lineno)
+        return None
+
+    def look_for_intersphinx(self, name: str) -> Optional[str]:
+        """
+        Return link for `name` based on intersphinx inventory.
+
+        Return None if link is not found.
+        """
+        return self.obj.system.intersphinx.getLink(name)
+
+    def link_to(self, identifier: str, label: "Flattenable") -> Tag:
+        # :Raises _EpydocLinker.LookupFailed: If the identifier cannot be resolved and self.strict is True.
+        # Can return a Tag('a') or Tag('transparent') if not found
+        fullID = self.obj.expandName(identifier)
+
+        target = self.obj.system.objForFullName(fullID)
+        if target is not None:
+            return taglink(target, self.obj.page_object.url, label, 
+                           same_page_optimization=self.same_page_optimization)
+
+        url = self.look_for_intersphinx(fullID)
+        if url is not None:
+            return self._create_intersphinx_link(label, url=url)
+
+        link = tags.transparent(label)
+        if self.strict:
+            raise self.LookupFailed(identifier, link=link)
+        return link
+
+    def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
+        # :Raises _EpydocLinker.LookupFailed: If the identifier cannot be resolved and self.strict is True.
+        # Otherwise returns a Tag('code'). 
+        # If not foud the code tag will simply contain the label as Flattenable, like:
+        # Tag('code', children=['label as Flattenable'])
+        # If the link is found it gives something like:
+        # Tag('code', children=[Tag('a', href='...', children=['label as Flattenable'])])
+        xref: "Flattenable"
+        try:
+            resolved = self._resolve_identifier_xref(target, lineno)
+        except LookupError as e:
+            xref = label
+            if self.strict:
+                raise self.LookupFailed(str(e), link=tags.code(xref)) from e
+        else:
+            if isinstance(resolved, str):
+                xref = self._create_intersphinx_link(label, url=resolved)
+            else:
+                xref = taglink(resolved, self.obj.page_object.url, label, 
+                           same_page_optimization=self.same_page_optimization)
+                
+        return tags.code(xref)
+
+    def resolve_identifier(self, identifier: str) -> Optional[str]:
+        fullID = self.obj.expandName(identifier)
+
+        target = self.obj.system.objForFullName(fullID)
+        if target is not None:
+            return target.url
+
+        return self.look_for_intersphinx(fullID)
+
+    def _resolve_identifier_xref(self,
+            identifier: str,
+            lineno: int
+            ) -> Union[str, 'model.Documentable']:
+        """
+        Resolve a crossreference link to a Python identifier.
+        This will resolve the identifier to any reasonable target,
+        even if it has to look in places where Python itself would not.
+
+        @param identifier: The name of the Python identifier that
+            should be linked to.
+        @param lineno: The line number within the docstring at which the
+            crossreference is located.
+        @return: The referenced object within our system, or the URL of
+            an external target (found via Intersphinx).
+        @raise LookupError: If C{identifier} could not be resolved.
+        """
+
+        # There is a lot of DWIM here. Look for a global match first,
+        # to reduce the chance of a false positive.
+
+        # Check if 'identifier' is the fullName of an object.
+        target = self.obj.system.objForFullName(identifier)
+        if target is not None:
+            return target
+
+        # Check if the fullID exists in an intersphinx inventory.
+        fullID = self.obj.expandName(identifier)
+        target_url = self.look_for_intersphinx(fullID)
+        if not target_url:
+            # FIXME: https://github.com/twisted/pydoctor/issues/125
+            # expandName is unreliable so in the case fullID fails, we
+            # try our luck with 'identifier'.
+            target_url = self.look_for_intersphinx(identifier)
+        if target_url:
+            return target_url
+
+        # Since there was no global match, go look for the name in the
+        # context where it was used.
+
+        # Check if 'identifier' refers to an object by Python name resolution
+        # in our context. Walk up the object tree and see if 'identifier' refers
+        # to an object by Python name resolution in each context.
+        src: Optional['model.Documentable'] = self.obj
+        while src is not None:
+            target = src.resolveName(identifier)
+            if target is not None:
+                return target
+            src = src.parent
+
+        # Walk up the object tree again and see if 'identifier' refers to an
+        # object in an "uncle" object.  (So if p.m1 has a class C, the
+        # docstring for p.m2 can say L{C} to refer to the class in m1).
+        # If at any level 'identifier' refers to more than one object, complain.
+        src = self.obj
+        while src is not None:
+            target = self.look_for_name(identifier, src.contents.values(), lineno)
+            if target is not None:
+                return target
+            src = src.parent
+
+        # Examine every module and package in the system and see if 'identifier'
+        # names an object in each one.  Again, if more than one object is
+        # found, complain.
+        target = self.look_for_name(
+            # System.objectsOfType now supports passing the type as string.
+            identifier, self.obj.system.objectsOfType('pydoctor.model.Module'), lineno)
+        if target is not None:
+            return target
+
+        message = f'Cannot find link target for "{fullID}"'
+        if identifier != fullID:
+            message = f'{message}, resolved from "{identifier}"'
+        root_idx = fullID.find('.')
+        if root_idx != -1 and fullID[:root_idx] not in self.obj.system.root_names:
+            message += ' (you can link to external docs with --intersphinx)'
+        self.obj.report(message, 'resolve_identifier_xref', lineno)
+        raise LookupError(identifier)
+
+
+class _CachedEpydocLinker(_EpydocLinker):
+    """
+    This linker implements smart caching functionalities on top of public methods defined in L{_EpydocLinker}.
+
+    The cache is implemented at the L{Tag} (Stan) level, letting us do transformation over cached L{Tag} instances
+    and recycle already resolved URLs and adjust them to change formatting as requested by link_xref(). 
+    """
+    
+    @attr.s(auto_attribs=True)
+    class CacheEntry:
+        name: str
+        label: "Flattenable"
+        link: Tag
+        lookup_failed: bool
+        warned_linenos: Set[int] = attr.ib(factory=set)
+
+    class NewDerivedEntry(Exception):
+        def __init__(self, *args: object, entry:'_CachedEpydocLinker.CacheEntry') -> None:
+            super().__init__(*args)
+            self.entry=entry
+
+    _CacheType = Dict[str, Dict[bool, List['_CachedEpydocLinker.CacheEntry']]]
+    _defaultCache: _CacheType = defaultdict(lambda:{True:[], False:[]})
+
+    def __init__(self, obj: 'model.Documentable', same_page_optimization:bool=True) -> None:
+        super().__init__(obj, same_page_optimization, strict=True)
+        
+        self._link_to_cache: '_CachedEpydocLinker._CacheType' = self._defaultCache.copy()
+        self._link_xref_cache: '_CachedEpydocLinker._CacheType' = self._defaultCache.copy()
+    
+    def _get_cache(self, cache_kind: 'Literal["link_to", "link_xref"]' = "link_to") -> '_CachedEpydocLinker._CacheType':
+        cache_dict = getattr(self, f"_{cache_kind}_cache")
+        assert isinstance(cache_dict, dict)
+        return cast('_CachedEpydocLinker._CacheType', cache_dict)
+
+    def _new_derived_entry(self, 
+                             cached_entry: '_CachedEpydocLinker.CacheEntry', 
+                             label: Optional["Flattenable"], 
+                             cache_kind: 'Literal["link_to", "link_xref"]' = "link_to") -> '_CachedEpydocLinker.CacheEntry':
+
+        # Transform the URL to omit the filename when self.same_page_optimization is True and
+        # add it when self.same_page_optimization is False.
+        link = self._adjust_link(cached_entry.link, 
+                                        # here we clone the link because we need to change the label anyway
+                                        self.same_page_optimization) or (cached_entry.link.clone() if label else cached_entry.link)
+
+        # Change the label if needed.
+        if label:
+            link.children = [label]
+
+        return self._store_in_cache(
+                        cached_entry.name, 
+                        label if label else link.children[0], 
+                        link=link, 
+                        cache_kind=cache_kind,
+                        lookup_failed=cached_entry.lookup_failed,
+                        warned_linenos=cached_entry.warned_linenos # We do not use copy() here by design.
+                    )
+    
+    def _adjust_link(self, link: Tag, use_same_page_optimization:bool) -> Optional[Tag]:
+        # Returns a new link or None if the current link is correct.
+        if use_same_page_optimization is False:
+            if link.attributes.get('href', '').startswith("#"): # type:ignore
+                link = link.clone()
+                link.attributes['href'] = self.obj.page_object.url + link.attributes['href'] # type:ignore
+                assert not link.attributes['href'].startswith("#") # type:ignore
+                return link
+        else:
+            if link.attributes.get('href', '').startswith(self.obj.page_object.url+"#"): # type:ignore
+                link = link.clone()
+                link.attributes['href'] = link.attributes['href'][len(self.obj.page_object.url):] # type:ignore
+                assert link.attributes['href'].startswith("#") # type:ignore
+                return link
+        return None
+
+    def _lookup_cached_entry(self, target:str, label: "Flattenable", 
+                          cache_kind: 'Literal["link_to", "link_xref"]' = "link_to") -> Optional['_CachedEpydocLinker.CacheEntry']:
+        # Lookup an entry in the cache, raise NewDerivedEntry if the exact entry could not be found
+        # but we could extrapolate the correct link from the link we already had in the cache.
+        # Returns None if no coresponding entry has been found in the cache.
+        
+        # For xrefs, we first look into the link_to cache.
+        if cache_kind == "link_xref":
+            cached = self._lookup_cached_entry(target, label, cache_kind="link_to")
+            if cached is not None: return cached
+        
+        # Get the cached entries
+        cache = self._get_cache(cache_kind)
+        not_same_value_for_same_page_optimization = False
+        values = cache[target][self.same_page_optimization]
+        
+        # Fallback to the entries that have not the same value for same_page_optimization
+        # This is ok because we have support for these URL transformation, see _adjust_link. 
+        if not values: 
+            values = cache[target][not self.same_page_optimization]
+            not_same_value_for_same_page_optimization = True
+        
+        # Not found
+        if not values: 
+            return None
+
+        # Here we iterate, but we could transform this into a dict access for more speed.
+        # But at the same time, usually there are not a lot of different labels applied 
+        # to the same link in the same docstring, so the current behaviour is good enough.
+        for entry in values:
+            if entry.label==label: 
+                if not_same_value_for_same_page_optimization:
+                    new_entry = self._new_derived_entry(entry, None, cache_kind)
+                    raise self.NewDerivedEntry('new cache entry', entry=new_entry)
+                return entry
+        else: 
+            # Automatically infer what would be the link 
+            # with a different label
+            entry = values[0]
+            new_entry = self._new_derived_entry(entry, label, cache_kind)
+            raise self.NewDerivedEntry('new cache entry', entry=new_entry)               
+    
+    def _store_in_cache(self, target: str, 
+                        label: "Flattenable", 
+                        link: Tag,  
+                        cache_kind: 'Literal["link_to", "link_xref"]' = "link_to", 
+                        lookup_failed:bool=False, 
+                        warned_linenos: Optional[Set[int]]=None) -> '_CachedEpydocLinker.CacheEntry':
+        # Store a new resolved link in the cache.
+
+        cache = self._get_cache(cache_kind)
+        values = cache[target][self.same_page_optimization]
+        entry = self.CacheEntry(target, label, link=link, lookup_failed=lookup_failed)
+        if warned_linenos:
+            entry.warned_linenos = warned_linenos # We do not use copy() here by design.
+        values.insert(0, entry)
+        return entry
+
+    def _lookup_cached_link_to(self, target: str, label: "Flattenable") -> Optional[Tag]:
+        # Lookup a link_to() cached value.
+        try:
+            cached = self._lookup_cached_entry(target, label, cache_kind="link_to")
+        except self.NewDerivedEntry as e:
+            cached = e.entry
+        if cached:
+            return cached.link
+        return None
+
+    def link_to(self, target: str, label: "Flattenable") -> Tag:
+        link = self._lookup_cached_link_to(target, label)
+        if link is None: 
+            failed=False 
+            try:
+                link = super().link_to(target, label)
+            except self.LookupFailed as e:
+                link = e.link
+                failed=True
+            self._store_in_cache(target, label, link, 
+                                 cache_kind="link_to", 
+                                 lookup_failed=failed)
+        return link
+    
+    def _lookup_cached_link_xref(self, target: str, label: "Flattenable", lineno: int) -> Optional[Tag]:
+        # Lookup a link_xref() cached value. 
+        # Warns if the link is derived from a link that failed the URL lookup.
+        try:
+            cached = self._lookup_cached_entry(target, label, cache_kind="link_xref")
+        except self.NewDerivedEntry as e:            
+            cached = e.entry
+            # Warns onlt if the line number differs from any other values we have already in cache.
+            if cached.lookup_failed and lineno not in cached.warned_linenos:
+                self.obj.report(f'Cannot find link target for "{cached.name}"', 'resolve_identifier_xref', lineno_offset=lineno)
+                cached.warned_linenos.add(lineno) # Add lineno such that the warning does not trigger again for this line.
+        
+        if cached:
+            return cached.link
+        return None
+
+    def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
+        link: Optional["Flattenable"] = self._lookup_cached_link_xref(target, label, lineno)
+        if link is None:
+            failed=False 
+            try:
+                link = super().link_xref(target, label, lineno).children[0]
+            except self.LookupFailed as e:
+                link = e.link.children[0]
+                failed=True
+            if not isinstance(link, Tag): 
+                link = tags.transparent(link)
+            new_cached = self._store_in_cache(target, label, link, 
+                                              cache_kind="link_xref", 
+                                              lookup_failed=failed)
+            if failed:
+                new_cached.warned_linenos.add(lineno)
+        return tags.code(link)
diff --git a/pydoctor/model.py b/pydoctor/model.py
index feb6bc534..dd1c80f80 100644
--- a/pydoctor/model.py
+++ b/pydoctor/model.py
@@ -6,30 +6,32 @@
 being documented -- a System is a bad of Documentables, in some sense.
 """
 
+import abc
 import ast
 import datetime
 import importlib
 import inspect
 import platform
 import sys
+import textwrap
 import types
 from enum import Enum
-from inspect import Signature
-from optparse import Values
+from inspect import signature, Signature
 from pathlib import Path
 from typing import (
-    TYPE_CHECKING, Any, Collection, Dict, Iterable, Iterator, List, Mapping,
-    Optional, Sequence, Set, Tuple, Type, TypeVar, Union, overload
+    TYPE_CHECKING, Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping,
+    Optional, Sequence, Set, Tuple, Type, TypeVar, Union, cast, overload
 )
 from urllib.parse import quote
 
+from pydoctor.options import Options
+from pydoctor import factory, qnmatch, utils, linker, astutils
 from pydoctor.epydoc.markup import ParsedDocstring
 from pydoctor.sphinx import CacheT, SphinxInventory
 
 if TYPE_CHECKING:
     from typing_extensions import Literal
-    from twisted.web.template import Flattenable
-    from pydoctor.astbuilder import ASTBuilder
+    from pydoctor.astbuilder import ASTBuilder, DocumentableT
 else:
     Literal = {True: bool, False: bool}
     ASTBuilder = object
@@ -74,13 +76,15 @@ class PrivacyClass(Enum):
 
     @cvar HIDDEN: Don't show the object at all.
     @cvar PRIVATE: Show, but de-emphasize the object.
-    @cvar VISIBLE: Show the object as normal.
+    @cvar PUBLIC: Show the object as normal.
     """
 
     HIDDEN = 0
     PRIVATE = 1
-    VISIBLE = 2
-    
+    PUBLIC = 2
+    # For compatibility
+    VISIBLE = PUBLIC
+
 class DocumentableKind(Enum):
     """
     L{Enum} containing values indicating the possible object types.
@@ -115,6 +119,7 @@ class Documentable:
     """
     docstring: Optional[str] = None
     parsed_docstring: Optional[ParsedDocstring] = None
+    parsed_summary: Optional[ParsedDocstring] = None
     parsed_type: Optional[ParsedDocstring] = None
     docstring_lineno = 0
     linenumber = 0
@@ -138,7 +143,10 @@ def __init__(
         self.parent = parent
         self.parentMod: Optional[Module] = None
         self.source_path: Optional[Path] = source_path
-        self._deprecated_info: Optional["Flattenable"] = None
+        self.extra_info: List[ParsedDocstring] = []
+        """
+        A list to store extra informations about this documentable, as L{ParsedDocstring}.
+        """
         self.setup()
 
     @property
@@ -147,6 +155,7 @@ def doctarget(self) -> 'Documentable':
 
     def setup(self) -> None:
         self.contents: Dict[str, Documentable] = {}
+        self._linker: Optional['linker.DocstringLinker'] = None
 
     def setDocstring(self, node: ast.Str) -> None:
         doc = node.s
@@ -176,7 +185,10 @@ def setLineNumber(self, lineno: int) -> None:
             if parentMod is not None:
                 parentSourceHref = parentMod.sourceHref
                 if parentSourceHref:
-                    self.sourceHref = f'{parentSourceHref}#L{lineno:d}'
+                    self.sourceHref = self.system.options.htmlsourcetemplate.format(
+                        mod_source_href=parentSourceHref,
+                        lineno=str(lineno)
+                    )
 
     @property
     def description(self) -> str:
@@ -209,9 +221,16 @@ def page_object(self) -> 'Documentable':
     def url(self) -> str:
         """Relative URL at which the documentation for this Documentable
         can be found.
+
+        For page objects this method MUST return an C{.html} filename without a
+        URI fragment (because L{pydoctor.templatewriter.writer.TemplateWriter}
+        uses it directly to determine the output filename).
         """
         page_obj = self.page_object
-        page_url = f'{quote(page_obj.fullName())}.html'
+        if list(self.system.root_names) == [page_obj.fullName()]:
+            page_url = 'index.html'
+        else:
+            page_url = f'{quote(page_obj.fullName())}.html'
         if page_obj is self:
             return page_url
         else:
@@ -244,7 +263,7 @@ def reparent(self, new_parent: 'Module', new_name: str) -> None:
         # :/
         self._handle_reparenting_pre()
         old_parent = self.parent
-        assert isinstance(old_parent, Module)
+        assert isinstance(old_parent, CanContainImportsDocumentable)
         old_name = self.name
         self.parent = self.parentMod = new_parent
         self.name = new_name
@@ -333,15 +352,15 @@ class Runner:
                 # The local name was not found.
                 # If we're looking at a class, we try our luck with the inherited members
                 if isinstance(ctx, Class):
-                    ctx.find(part)
-                    f = ctx.find(part)
-                    full_name = f.fullName() if f else full_name
-                # We don't have a full name
+                    inherited = ctx.find(part)
+                    if inherited: 
+                        full_name = inherited.fullName()
                 if full_name == part:
+                    # We don't have a full name
                     # TODO: Instead of returning the input, _localNameToFullName()
                     #       should probably either return None or raise LookupError.
                     # Or maybe we should find a way to indicate if the expanded name is "guessed" or if we have the the correct fullName. 
-                    # With the current implementation, this would mean checking if "parts[i + 1:]" contains anything. 
+                    # With the current implementation, this would mean checking if "parts[i + 1:]" contains anything.
                     full_name = f'{ctx.fullName()}.{part}'
                     break
             nxt = self.system.objForFullName(full_name)
@@ -416,7 +435,11 @@ def isVisible(self) -> bool:
 
         This is just a simple helper which defers to self.privacyClass.
         """
-        return self.privacyClass is not PrivacyClass.HIDDEN
+        isVisible = self.privacyClass is not PrivacyClass.HIDDEN
+        # If a module/package/class is hidden, all it's members are hidden as well.
+        if isVisible and self.parent:
+            isVisible = self.parent.isVisible
+        return isVisible
 
     @property
     def isPrivate(self) -> bool:
@@ -424,7 +447,7 @@ def isPrivate(self) -> bool:
 
         This is just a simple helper which defers to self.privacyClass.
         """
-        return self.privacyClass is not PrivacyClass.VISIBLE
+        return self.privacyClass is not PrivacyClass.PUBLIC
 
     @property
     def module(self) -> 'Module':
@@ -442,7 +465,7 @@ def report(self, descr: str, section: str = 'parsing', lineno_offset: int = 0) -
 
         linenumber: object
         if section in ('docstring', 'resolve_identifier_xref'):
-            linenumber = self.docstring_lineno
+            linenumber = self.docstring_lineno or self.linenumber
         else:
             linenumber = self.linenumber
         if linenumber:
@@ -472,6 +495,18 @@ def aliases(self) -> List['Attribute']:
             if alias.parent._resolveDocumentable(alias) == self.fullName():
                 aliases.append(alias)
         return aliases
+    
+    @property
+    def docstring_linker(self) -> 'linker.DocstringLinker':
+        """
+        Returns an instance of L{DocstringLinker} suitable for resolving names
+        in the context of the object scope. 
+        """
+        if self._linker is not None:
+            return self._linker
+        self._linker = linker._CachedEpydocLinker(self)
+        return self._linker
+
 
 class CanContainImportsDocumentable(Documentable):
     def setup(self) -> None:
@@ -493,6 +528,13 @@ def privacyClass(self) -> PrivacyClass:
     def setup(self) -> None:
         super().setup()
 
+        self._is_c_module = False
+        """Whether this module is a C-extension."""
+        self._py_mod: Optional[types.ModuleType] = None
+        """The live module if the module was built from introspection."""
+        self._py_string: Optional[str] = None
+        """The module string if the module was built from text."""
+
         self.all: Optional[Collection[str]] = None
         """Names listed in the C{__all__} variable of this module.
 
@@ -521,6 +563,8 @@ def _localNameToFullName(self, name: str, indirections:Any=None) -> str:
                     indirections)
                 if resolved:
                     return resolved
+            else:
+                return self._localNameToFullName_map[name]
         return name
 
     @property
@@ -547,6 +591,11 @@ def docformat(self) -> Optional[str]:
     def docformat(self, value: str) -> None:
         self._docformat = value
 
+    def submodules(self) -> Iterator['Module']:
+        """Returns an iterator over the visible submodules."""
+        return (m for m in self.contents.values()
+                if isinstance(m, Module) and m.isVisible)
+
 class Package(Module):
     kind = DocumentableKind.PACKAGE
 
@@ -643,7 +692,7 @@ class Function(Inheritable):
     is_async: bool
     annotations: Mapping[str, Optional[ast.expr]]
     decorators: Optional[Sequence[ast.expr]]
-    signature: Signature
+    signature: Optional[Signature]
 
     def setup(self) -> None:
         super().setup()
@@ -677,6 +726,32 @@ class Attribute(Inheritable):
 
 T = TypeVar('T')
 
+def import_mod_from_file_location(module_full_name:str, path: Path) -> types.ModuleType:
+    spec = importlib.util.spec_from_file_location(module_full_name, path)
+    if spec is None: 
+        raise RuntimeError(f"Cannot find spec for module {module_full_name} at {path}")
+    py_mod = importlib.util.module_from_spec(spec)
+    loader = spec.loader
+    assert isinstance(loader, importlib.abc.Loader), loader
+    loader.exec_module(py_mod)
+    return py_mod
+
+
+# Declare the types that we consider as functions (also when they are coming
+# from a C extension)
+func_types: Tuple[Type[Any], ...] = (types.BuiltinFunctionType, types.FunctionType)
+if hasattr(types, "MethodDescriptorType"):
+    # This is Python >= 3.7 only
+    func_types += (types.MethodDescriptorType, )
+else:
+    func_types += (type(str.join), )
+if hasattr(types, "ClassMethodDescriptorType"):
+    # This is Python >= 3.7 only
+    func_types += (types.ClassMethodDescriptorType, )
+else:
+    func_types += (type(dict.__dict__["fromkeys"]), )
+
+_default_extensions = object()
 class System:
     """A collection of related documentable objects.
 
@@ -684,17 +759,25 @@ class System:
     package.
     """
 
-    Class = Class
-    Module = Module
-    Package = Package
-    Function = Function
-    Attribute = Attribute
     # Not assigned here for circularity reasons:
     #defaultBuilder = astbuilder.ASTBuilder
     defaultBuilder: Type[ASTBuilder]
-    sourcebase: Optional[str] = None
+    systemBuilder: Type['ISystemBuilder']
+    options: 'Options'
+    extensions: List[str] = cast('List[str]', _default_extensions)
+    """
+    List of extensions.
+
+    By default, all built-in pydoctor extensions will be loaded.
+    Override this value to cherry-pick extensions. 
+    """
+
+    custom_extensions: List[str] = []
+    """
+    Additional list of extensions to load alongside default extensions.
+    """
 
-    def __init__(self, options: Optional[Values] = None):
+    def __init__(self, options: Optional['Options'] = None):
         self.allobjects: Dict[str, Documentable] = {}
         self.rootobjects: List[_ModuleT] = []
 
@@ -708,8 +791,7 @@ def __init__(self, options: Optional[Values] = None):
         if options:
             self.options = options
         else:
-            from pydoctor.driver import parse_args
-            self.options, _ = parse_args([])
+            self.options = Options.defaults()
             self.options.verbosity = 3
 
         self.projectname = 'my project'
@@ -720,31 +802,68 @@ def __init__(self, options: Optional[Values] = None):
         self.verboselevel = 0
         self.needsnl = False
         self.once_msgs: Set[Tuple[str, str]] = set()
-        self.unprocessed_modules: Set[Module] = set()
+
+        # We're using the id() of the modules as key, and not the fullName becaue modules can
+        # be reparented, generating KeyError.
+        self.unprocessed_modules: List[_ModuleT] = []
+
         self.module_count = 0
         self.processing_modules: List[str] = []
         self.buildtime = datetime.datetime.now()
         self.intersphinx = SphinxInventory(logger=self.msg)
 
+        # Since privacy handling now uses fnmatch, we cache results so we don't re-run matches all the time.
+        # We use the fullName of the objets as the dict key in order to bind a full name to a privacy, not an object to a privacy.
+        # this way, we are sure the objects' privacy stay true even if we reparent them manually.
+        self._privacyClassCache: Dict[str, PrivacyClass] = {}
+        
+        # workaround cyclic import issue
+        from pydoctor import extensions
+
+        # Initialize the extension system
+        self._factory = factory.Factory()
+        self._astbuilder_visitors: List[Type['astutils.NodeVisitorExt']] = []
+        self._post_processors: List[Callable[['System'], None]] = []
+        
+        if self.extensions == _default_extensions:
+            self.extensions = list(extensions.get_extensions())
+        assert isinstance(self.extensions, list)
+        assert isinstance(self.custom_extensions, list)
+        for ext in self.extensions + self.custom_extensions:
+            # Load extensions
+            extensions.load_extension_module(self, ext)
+
+    @property
+    def Class(self) -> Type['Class']:
+        return self._factory.Class
+    @property
+    def Function(self) -> Type['Function']:
+        return self._factory.Function
+    @property
+    def Module(self) -> Type['Module']:
+        return self._factory.Module
+    @property
+    def Package(self) -> Type['Package']:
+        return self._factory.Package
+    @property
+    def Attribute(self) -> Type['Attribute']:
+        return self._factory.Attribute
+
+    @property
+    def sourcebase(self) -> Optional[str]:
+        return self.options.htmlsourcebase
+
     @property
     def root_names(self) -> Collection[str]:
         """The top-level package/module names in this system."""
         return {obj.name for obj in self.rootobjects}
 
-    def verbosity(self, section: Union[str, Iterable[str]]) -> int:
-        if isinstance(section, str):
-            section = (section,)
-        delta: int = max(self.options.verbosity_details.get(sect, 0)
-                         for sect in section)
-        base: int = self.options.verbosity
-        return base + delta
-
     def progress(self, section: str, i: int, n: Optional[int], msg: str) -> None:
         if n is None:
             d = str(i)
         else:
             d = f'{i}/{n}'
-        if self.verbosity(section) == 0 and sys.stdout.isatty():
+        if self.options.verbosity == 0 and sys.stdout.isatty():
             print('\r'+d, msg, end='')
             sys.stdout.flush()
             if d == n:
@@ -762,6 +881,16 @@ def msg(self,
             wantsnl: bool = True,
             once: bool = False
             ) -> None:
+        """
+        Log a message. pydoctor's logging system is bit messy.
+        
+        @param section: API doc generation step this message belongs to.
+        @param msg: The message.
+        @param thresh: The minimum verbosity level of the system for this message to actually be printed.
+            Meaning passing thresh=-1 will make message still display if C{-q} is passed but not if C{-qq}. 
+            Similarly, passing thresh=1 will make the message only apprear if the verbosity level is at least increased once with C{-v}.
+        @param topthresh: The maximum verbosity level of the system for this message to actually be printed.
+        """
         if once:
             if (section, msg) in self.once_msgs:
                 return
@@ -774,7 +903,7 @@ def msg(self,
             # on top of the logging system.
             self.violations += 1
 
-        if thresh <= self.verbosity(section) <= topthresh:
+        if thresh <= self.options.verbosity <= topthresh:
             if self.needsnl and wantsnl:
                 print()
             print(msg, end='')
@@ -830,19 +959,49 @@ def _warning(self,
         if self.options.verbosity > 0:
             print(fn, message, detail)
 
-    def objectsOfType(self, cls: Type[T]) -> Iterator[T]:
+    def objectsOfType(self, cls: Union[Type['DocumentableT'], str]) -> Iterator['DocumentableT']:
         """Iterate over all instances of C{cls} present in the system. """
+        if isinstance(cls, str):
+            cls = utils.findClassFromDottedName(cls, 'objectsOfType', 
+                base_class=cast(Type['DocumentableT'], Documentable))
+        assert isinstance(cls, type)
         for o in self.allobjects.values():
             if isinstance(o, cls):
                 yield o
 
     def privacyClass(self, ob: Documentable) -> PrivacyClass:
+        ob_fullName = ob.fullName()
+        cached_privacy = self._privacyClassCache.get(ob_fullName)
+        if cached_privacy is not None:
+            return cached_privacy
+        
+        # kind should not be None, this is probably a relica of a past age of pydoctor.
+        # but keep it just in case.
         if ob.kind is None:
             return PrivacyClass.HIDDEN
+        
+        privacy = PrivacyClass.PUBLIC
         if ob.name.startswith('_') and \
                not (ob.name.startswith('__') and ob.name.endswith('__')):
-            return PrivacyClass.PRIVATE
-        return PrivacyClass.VISIBLE
+            privacy = PrivacyClass.PRIVATE
+        
+        # Precedence order: CLI arguments order
+        # Check exact matches first, then qnmatch
+        _found_exact_match = False
+        for priv, match in reversed(self.options.privacy):
+            if ob_fullName == match:
+                privacy = priv
+                _found_exact_match = True
+                break
+        if not _found_exact_match:
+            for priv, match in reversed(self.options.privacy):
+                if qnmatch.qnmatch(ob_fullName, match):
+                    privacy = priv
+                    break
+
+        # Store in cache
+        self._privacyClassCache[ob_fullName] = privacy
+        return privacy
 
     def addObject(self, obj: Documentable) -> None:
         """Add C{object} to the system."""
@@ -882,6 +1041,7 @@ def setSourceHref(self, mod: _ModuleT, source_path: Path) -> None:
             mod.sourceHref = None
         else:
             projBaseDir = mod.system.options.projectbasedirectory
+            assert projBaseDir is not None
             relative = source_path.relative_to(projBaseDir).as_posix()
             mod.sourceHref = f'{self.sourcebase}/{relative}'
 
@@ -909,26 +1069,76 @@ def analyzeModule(self,
             ) -> _ModuleT:
         factory = self.Package if is_package else self.Module
         mod = factory(self, modname, parentPackage, modpath)
-        self.addObject(mod)
-        self.progress(
-            "analyzeModule", len(self.allobjects),
-            None, "modules and packages discovered")
-        self.unprocessed_modules.add(mod)
-        self.module_count += 1
+        self._addUnprocessedModule(mod)
         self.setSourceHref(mod, modpath)
         return mod
 
+    def _addUnprocessedModule(self, mod: _ModuleT) -> None:
+        """
+        First add the new module into the unprocessed_modules list. 
+        Handle eventual duplication of module names, and finally add the 
+        module to the system.
+        """
+        assert mod.state is ProcessingState.UNPROCESSED
+        first = self.allobjects.get(mod.fullName())
+        if first is not None:
+            # At this step of processing only modules exists
+            assert isinstance(first, Module)
+            self._handleDuplicateModule(first, mod)
+        else:
+            self.unprocessed_modules.append(mod)
+            self.addObject(mod)
+            self.progress(
+                "analyzeModule", len(self.allobjects),
+                None, "modules and packages discovered")        
+            self.module_count += 1
+
+    def _handleDuplicateModule(self, first: _ModuleT, dup: _ModuleT) -> None:
+        """
+        This is called when two modules have the same name. 
+
+        Current rules are the following: 
+            - C-modules wins over regular python modules
+            - Packages wins over modules
+            - Else, the last added module wins
+        """
+        self._warning(dup.parent, "duplicate", str(first))
+
+        if first._is_c_module and not isinstance(dup, Package):
+            # C-modules wins
+            return
+        elif isinstance(first, Package) and not isinstance(dup, Package):
+            # Packages wins
+            return
+        else:
+            # Else, the last added module wins
+            self._remove(first)
+            self.unprocessed_modules.remove(first)
+            self._addUnprocessedModule(dup)
+
     def _introspectThing(self, thing: object, parent: Documentable, parentMod: _ModuleT) -> None:
         for k, v in thing.__dict__.items():
-            if (isinstance(v, (types.BuiltinFunctionType, types.FunctionType))
+            if (isinstance(v, func_types)
                     # In PyPy 7.3.1, functions from extensions are not
-                    # instances of the above abstract types.
-                    or v.__class__.__name__ == 'builtin_function_or_method'):
+                    # instances of the abstract types in func_types
+                    or (hasattr(v, "__class__") and v.__class__.__name__ == 'builtin_function_or_method')):
                 f = self.Function(self, k, parent)
                 f.parentMod = parentMod
                 f.docstring = v.__doc__
                 f.decorators = None
-                f.signature = Signature()
+                try:
+                    f.signature = signature(v)
+                except ValueError:
+                    # function has an invalid signature.
+                    parent.report(f"Cannot parse signature of {parent.fullName()}.{k}")
+                    f.signature = None
+                except TypeError:
+                    # in pypy we get a TypeError calling signature() on classmethods, 
+                    # because apparently, they are not callable :/
+                    f.signature = None
+                        
+                f.is_async = False
+                f.annotations = {name: None for name in f.signature.parameters} if f.signature else {}
                 self.addObject(f)
             elif isinstance(v, type):
                 c = self.Class(self, k, parent)
@@ -951,22 +1161,17 @@ def introspectModule(self,
         else:
             module_full_name = f'{package.fullName()}.{module_name}'
 
-        spec = importlib.util.spec_from_file_location(module_full_name, path)
-        if spec is None: 
-            raise RuntimeError(f"Cannot find spec for module {module_full_name} at {path}")
-        py_mod = importlib.util.module_from_spec(spec)
-        loader = spec.loader
-        assert isinstance(loader, importlib.abc.Loader), loader
-        loader.exec_module(py_mod)
+        py_mod = import_mod_from_file_location(module_full_name, path)
         is_package = py_mod.__package__ == py_mod.__name__
 
         factory = self.Package if is_package else self.Module
         module = factory(self, module_name, package, path)
-        self.addObject(module)
-
+        
         module.docstring = py_mod.__doc__
-        self._introspectThing(py_mod, module, module)
-
+        module._is_c_module = True
+        module._py_mod = py_mod
+        
+        self._addUnprocessedModule(module)
         return module
 
     def addPackage(self, package_path: Path, parentPackage: Optional[_PackageT] = None) -> None:
@@ -992,9 +1197,16 @@ def addModuleFromPath(self, path: Path, package: Optional[_PackageT]) -> None:
             elif suffix in importlib.machinery.SOURCE_SUFFIXES:
                 self.analyzeModule(path, module_name, package)
             break
+    
+    def _remove(self, o: Documentable) -> None:
+        del self.allobjects[o.fullName()]
+        oc = list(o.contents.values())
+        for c in oc:
+            self._remove(c)
 
     def handleDuplicate(self, obj: Documentable) -> None:
-        '''This is called when we see two objects with the same
+        """
+        This is called when we see two objects with the same
         .fullName(), for example::
 
             class C:
@@ -1006,19 +1218,14 @@ def meth(self):
                         implementation 2
 
         The default is that the second definition "wins".
-        '''
+        """
         i = 0
         fullName = obj.fullName()
         while (fullName + ' ' + str(i)) in self.allobjects:
             i += 1
         prev = self.allobjects[fullName]
         self._warning(obj.parent, "duplicate", str(prev))
-        def remove(o: Documentable) -> None:
-            del self.allobjects[o.fullName()]
-            oc = list(o.contents.values())
-            for c in oc:
-                remove(c)
-        remove(prev)
+        self._remove(prev)
         prev.name = obj.name + ' ' + str(i)
         def readd(o: Documentable) -> None:
             self.allobjects[o.fullName()] = o
@@ -1038,25 +1245,38 @@ def getProcessedModule(self, modname: str) -> Optional[_ModuleT]:
         if mod.state is ProcessingState.UNPROCESSED:
             self.processModule(mod)
 
-        assert mod.state in (ProcessingState.PROCESSING, ProcessingState.PROCESSED)
+        assert mod.state in (ProcessingState.PROCESSING, ProcessingState.PROCESSED), mod.state
         return mod
 
-
     def processModule(self, mod: _ModuleT) -> None:
         assert mod.state is ProcessingState.UNPROCESSED
+        assert mod in self.unprocessed_modules
         mod.state = ProcessingState.PROCESSING
+        self.unprocessed_modules.remove(mod)
         if mod.source_path is None:
-            return
-        builder = self.defaultBuilder(self)
-        ast = builder.parseFile(mod.source_path)
-        if ast:
+            assert mod._py_string is not None
+        if mod._is_c_module:
             self.processing_modules.append(mod.fullName())
             self.msg("processModule", "processing %s"%(self.processing_modules), 1)
-            builder.processModuleAST(ast, mod)
+            self._introspectThing(mod._py_mod, mod, mod)
             mod.state = ProcessingState.PROCESSED
             head = self.processing_modules.pop()
             assert head == mod.fullName()
-        self.unprocessed_modules.remove(mod)
+        else:
+            builder = self.defaultBuilder(self)
+            if mod._py_string is not None:
+                ast = builder.parseString(mod._py_string)
+            else:
+                assert mod.source_path is not None
+                ast = builder.parseFile(mod.source_path)
+            if ast:
+                self.processing_modules.append(mod.fullName())
+                if mod._py_string is None:
+                    self.msg("processModule", "processing %s"%(self.processing_modules), 1)
+                builder.processModuleAST(ast, mod)
+                mod.state = ProcessingState.PROCESSED
+                head = self.processing_modules.pop()
+                assert head == mod.fullName()
         self.progress(
             'process',
             self.module_count - len(self.unprocessed_modules),
@@ -1078,7 +1298,8 @@ def postProcess(self) -> None:
         without the risk of drawing incorrect conclusions because modules
         were not fully processed yet.
         """
-        pass
+        for post_processor in self._post_processors:
+            post_processor(self)
 
 
     def fetchIntersphinxInventories(self, cache: CacheT) -> None:
@@ -1087,3 +1308,96 @@ def fetchIntersphinxInventories(self, cache: CacheT) -> None:
         """
         for url in self.options.intersphinx:
             self.intersphinx.update(cache, url)
+
+class SystemBuildingError(Exception):
+    """
+    Raised when there is a (handled) fatal error while adding modules to the builder.
+    """
+
+class ISystemBuilder(abc.ABC):
+    """
+    Interface class for building a system.
+    """
+    @abc.abstractmethod
+    def __init__(self, system: 'System') -> None:
+        """
+        Create the builder.
+        """
+    @abc.abstractmethod
+    def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
+        """
+        Add a module or package from file system path to the pydoctor system. 
+        If the path points to a directory, adds all submodules recursively.
+
+        @raises SystemBuildingError: If there is an error while adding the module/package.
+        """
+    @abc.abstractmethod
+    def addModuleString(self, text: str, modname: str,
+                        parent_name: Optional[str] = None,
+                        is_package: bool = False, ) -> None:
+        """
+        Add a module from text to the system.
+        """
+    @abc.abstractmethod
+    def buildModules(self) -> None:
+        """
+        Build the modules.
+        """
+
+class SystemBuilder(ISystemBuilder):
+    """
+    This class is only an adapter for some System methods related to module building. 
+    """
+    def __init__(self, system: 'System') -> None:
+        self.system = system
+        self._added: Set[Path] = set()
+
+    def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
+        if path in self._added:
+            return
+        # Path validity check
+        if self.system.options.projectbasedirectory is not None:
+            # Note: Path.is_relative_to() was only added in Python 3.9,
+            #       so we have to use this workaround for now.
+            try:
+                path.relative_to(self.system.options.projectbasedirectory)
+            except ValueError as ex:
+                raise SystemBuildingError(f"Source path lies outside base directory: {ex}")
+        parent: Optional[Package] = None
+        if parent_name:
+            _p = self.system.allobjects[parent_name]
+            assert isinstance(_p, Package)
+            parent = _p
+        if path.is_dir():
+            self.system.msg('addPackage', f"adding directory {path}")
+            if not (path / '__init__.py').is_file():
+                raise SystemBuildingError(f"Source directory lacks __init__.py: {path}")
+            self.system.addPackage(path, parent)
+        elif path.is_file():
+            self.system.msg('addModuleFromPath', f"adding module {path}")
+            self.system.addModuleFromPath(path, parent)
+        elif path.exists():
+            raise SystemBuildingError(f"Source path is neither file nor directory: {path}")
+        else:
+            raise SystemBuildingError(f"Source path does not exist: {path}")
+        self._added.add(path)
+
+    def addModuleString(self, text: str, modname: str,
+                        parent_name: Optional[str] = None,
+                        is_package: bool = False, ) -> None:
+        if parent_name is None:
+            parent = None
+        else:
+            # Set containing package as parent.
+            parent = self.system.allobjects[parent_name]
+            assert isinstance(parent, Package), f"{parent.fullName()} is not a Package, it's a {parent.kind}"
+        
+        factory = self.system.Package if is_package else self.system.Module
+        mod = factory(self.system, name=modname, parent=parent, source_path=None)
+        mod._py_string = textwrap.dedent(text)
+        self.system._addUnprocessedModule(mod)
+
+    def buildModules(self) -> None:
+        self.system.process()
+
+System.systemBuilder = SystemBuilder
diff --git a/pydoctor/napoleon/docstring.py b/pydoctor/napoleon/docstring.py
index 378fc21d5..79c152792 100644
--- a/pydoctor/napoleon/docstring.py
+++ b/pydoctor/napoleon/docstring.py
@@ -1556,13 +1556,13 @@ def _parse_numpydoc_see_also_section(self, content: List[str]) -> List[str]:
         """
         items: List[Tuple[str, List[str], Optional[str]]] = []
 
-        def parse_item_name(text: str) -> Tuple[str, str]:
+        def parse_item_name(text: str) -> Tuple[str, Optional[str]]:
             """Match ':role:`name`' or 'name'"""
             m = self._name_rgx.match(text)
             if m:
                 g = m.groups()
                 if g[1] is None:
-                    return g[3], None  # type: ignore [unreachable]
+                    return g[3], None
                 else:
                     return g[2], g[1]
             raise ValueError(f"{text} is not a item name")
diff --git a/pydoctor/node2stan.py b/pydoctor/node2stan.py
index 5b26a7d7f..875734f6e 100644
--- a/pydoctor/node2stan.py
+++ b/pydoctor/node2stan.py
@@ -12,19 +12,20 @@
 if TYPE_CHECKING:
     from twisted.web.template import Flattenable
     from pydoctor.epydoc.markup import DocstringLinker
-    
+
+from pydoctor.epydoc.docutils import get_lineno
 from pydoctor.epydoc.doctest import colorize_codeblock, colorize_doctest
 from pydoctor.stanutils import flatten, html2stan
 
-def node2html(node: nodes.Node, docstring_linker: 'DocstringLinker') -> List[str]:
+def node2html(node: nodes.Node, docstring_linker: 'DocstringLinker', compact:bool=True) -> List[str]:
     """
     Convert a L{docutils.nodes.Node} object to HTML strings.
     """
-    visitor = HTMLTranslator(node.document, docstring_linker)
+    visitor = HTMLTranslator(node.document, docstring_linker, compact=compact)
     node.walkabout(visitor)
     return visitor.body
 
-def node2stan(node: Union[nodes.Node, Iterable[nodes.Node]], docstring_linker: 'DocstringLinker') -> Tag:
+def node2stan(node: Union[nodes.Node, Iterable[nodes.Node]], docstring_linker: 'DocstringLinker', compact:bool=True) -> Tag:
     """
     Convert L{docutils.nodes.Node} objects to a Stan tree.
 
@@ -35,10 +36,10 @@ def node2stan(node: Union[nodes.Node, Iterable[nodes.Node]], docstring_linker: '
     """
     html = []
     if isinstance(node, nodes.Node):
-        html += node2html(node, docstring_linker)
+        html += node2html(node, docstring_linker, compact)
     else:
         for child in node:
-            html += node2html(child, docstring_linker)
+            html += node2html(child, docstring_linker, compact)
     return html2stan(''.join(html))
 
 
@@ -70,7 +71,8 @@ class HTMLTranslator(html4css1.HTMLTranslator):
 
     def __init__(self,
             document: nodes.document,
-            docstring_linker: 'DocstringLinker'
+            docstring_linker: 'DocstringLinker',
+            compact: bool = False, 
             ):
         self._linker = docstring_linker
 
@@ -85,12 +87,11 @@ def __init__(self,
         # don't allow 

tags, start at

# h1 is reserved for the page nodes.title. self.section_level += 1 + self._compact = compact # Handle interpreted text (crossreferences) def visit_title_reference(self, node: nodes.Node) -> None: - # TODO: 'node.line' is None for reStructuredText based docstring for some reason. - # https://github.com/twisted/pydoctor/issues/237 - lineno = node.line or 0 + lineno = get_lineno(node) self._handle_reference(node, link_func=lambda target, label: self._linker.link_xref(target, label, lineno)) # Handle internal references @@ -118,7 +119,13 @@ def _handle_reference(self, node: nodes.Node, link_func: Callable[[str, "Flatten raise nodes.SkipNode() def should_be_compact_paragraph(self, node: nodes.Node) -> bool: - if self.document.children == [node]: + + # HTMLTranslator.should_be_compact_paragraph() used to always remove the + # p tag when there is only one element in the document. This is a good behaviour + # for colorizing AST values, etc, but for the docstring, we want to have at least + # one paragraph (for a better margin, so we use option compact=False). + + if self._compact is True and self.document.children == [node]: return True else: return super().should_be_compact_paragraph(node) # type: ignore[no-any-return] diff --git a/pydoctor/options.py b/pydoctor/options.py new file mode 100644 index 000000000..1c9fe6025 --- /dev/null +++ b/pydoctor/options.py @@ -0,0 +1,410 @@ +""" +The command-line parsing. +""" + +import re +from typing import Sequence, List, Optional, Type, Tuple, TYPE_CHECKING +import sys +import functools +from pathlib import Path +from argparse import SUPPRESS, Namespace + +from configargparse import ArgumentParser +import attr + +from pydoctor import __version__ +from pydoctor.themes import get_themes +from pydoctor.epydoc.markup import get_supported_docformats +from pydoctor.sphinx import MAX_AGE_HELP, USER_INTERSPHINX_CACHE +from pydoctor.utils import parse_path, findClassFromDottedName, parse_privacy_tuple, error +from pydoctor._configparser import CompositeConfigParser, IniConfigParser, TomlConfigParser + +if TYPE_CHECKING: + from pydoctor import model + from pydoctor.templatewriter import IWriter + +BUILDTIME_FORMAT = '%Y-%m-%d %H:%M:%S' +BUILDTIME_FORMAT_HELP = 'YYYY-mm-dd HH:MM:SS' + +DEFAULT_CONFIG_FILES = ['./pyproject.toml', './setup.cfg', './pydoctor.ini'] +CONFIG_SECTIONS = ['tool.pydoctor', 'tool:pydoctor', 'pydoctor'] + +DEFAULT_SYSTEM = 'pydoctor.model.System' + +__all__ = ("Options", ) + +# CONFIGURATION PARSING + +PydoctorConfigParser = CompositeConfigParser( + [TomlConfigParser(CONFIG_SECTIONS), + IniConfigParser(CONFIG_SECTIONS, split_ml_text_to_list=True)]) + +# ARGUMENTS PARSING + +def get_parser() -> ArgumentParser: + parser = ArgumentParser( + prog='pydoctor', + description="API doc generator.", + usage="pydoctor [options] SOURCEPATH...", + default_config_files=DEFAULT_CONFIG_FILES, + config_file_parser_class=PydoctorConfigParser, + ignore_unknown_config_file_keys=True,) + parser.add_argument( + '-c', '--config', is_config_file=True, + help=("Load config from this file (any command line" + "options override settings from the file)."), metavar="PATH",) + parser.add_argument( + '--project-name', dest='projectname', metavar="PROJECTNAME", + help=("The project name, shown at the top of each HTML page.")) + parser.add_argument( + '--project-version', + dest='projectversion', + default='', + metavar='VERSION', + help=( + "The version of the project for which the API docs are generated. " + "Defaults to empty string." + )) + parser.add_argument( + '--project-url', dest='projecturl', metavar="URL", + help=("The project url, appears in the html if given.")) + parser.add_argument( + '--project-base-dir', dest='projectbasedirectory', + help=("Path to the base directory of the project. Source links " + "will be computed based on this value."), metavar="PATH", default='.') + parser.add_argument( + '--testing', dest='testing', action='store_true', + help=("Don't complain if the run doesn't have any effects.")) + parser.add_argument( + '--pdb', dest='pdb', action='store_true', + help=("Like py.test's --pdb.")) + parser.add_argument( + '--make-html', action='store_true', dest='makehtml', + default=Options.MAKE_HTML_DEFAULT, help=("Produce html output." + " Enabled by default if options '--testing' or '--make-intersphinx' are not specified. ")) + parser.add_argument( + '--make-intersphinx', action='store_true', dest='makeintersphinx', + default=False, help=("Produce (only) the objects.inv intersphinx file.")) + # Used to pass sourcepath from config file + parser.add_argument( + '--add-package', '--add-module', action='append', dest='packages', + metavar='MODPATH', default=[], help=SUPPRESS) + parser.add_argument( + '--prepend-package', action='store', dest='prependedpackage', + help=("Pretend that all packages are within this one. " + "Can be used to document part of a package."), metavar='PACKAGE') + _docformat_choices = list(get_supported_docformats()) + parser.add_argument( + '--docformat', dest='docformat', action='store', default='epytext', + choices=_docformat_choices, + help=("Format used for parsing docstrings. " + f"Supported values: {', '.join(_docformat_choices)}"), metavar='FORMAT') + parser.add_argument('--theme', dest='theme', default='classic', + choices=list(get_themes()) , + help=("The theme to use when building your API documentation. "), + metavar='THEME', + ) + parser.add_argument( + '--template-dir', action='append', + dest='templatedir', default=[], + help=("Directory containing custom HTML templates. Can be repeated."), + metavar='PATH', + ) + parser.add_argument( + '--privacy', action='append', dest='privacy', + metavar=':', default=[], + help=("Set the privacy of specific objects when default rules doesn't fit the use case. " + "Format: ':', where can be one of 'PUBLIC', 'PRIVATE' or " + "'HIDDEN' (case insensitive), and is fnmatch-like pattern matching objects fullName. " + "Pattern added last have priority over a pattern added before, but an exact match wins over a fnmatch. Can be repeated.")) + parser.add_argument( + '--html-subject', dest='htmlsubjects', action='append', + help=("The fullName of objects to generate API docs for" + " (generates everything by default)."), metavar='PACKAGE/MOD/CLASS') + parser.add_argument( + '--html-summary-pages', dest='htmlsummarypages', + action='store_true', default=False, + help=("Only generate the summary pages.")) + parser.add_argument( + '--html-output', dest='htmloutput', default='apidocs', + help=("Directory to save HTML files to (default 'apidocs')"), metavar='PATH') + parser.add_argument( + '--html-writer', dest='htmlwriter', + default='pydoctor.templatewriter.TemplateWriter', + help=("Dotted name of HTML writer class to use (default 'pydoctor.templatewriter.TemplateWriter')."), + metavar='CLASS', ) + parser.add_argument( + '--html-viewsource-base', dest='htmlsourcebase', + help=("This should be the path to the trac browser for the top " + "of the svn checkout we are documenting part of."), metavar='URL',) + parser.add_argument( + '--html-viewsource-template', dest='htmlsourcetemplate', + help=("A format string used to generate the source link of documented objects. " + "The default behaviour auto detects most common providers like Github, Bitbucket, GitLab or SourceForge. " + "But in some cases you might have to override the template string, for instance to make it work with git-web, use: " + '--html-viewsource-template="{mod_source_href}#n{lineno}"'), metavar='SOURCETEMPLATE', default=Options.HTML_SOURCE_TEMPLATE_DEFAULT) + parser.add_argument( + '--buildtime', dest='buildtime', + help=("Use the specified build time over the current time. " + f"Format: {BUILDTIME_FORMAT_HELP}"), metavar='TIME') + parser.add_argument( + '--process-types', dest='processtypes', action='store_true', + help="Process the 'type' and 'rtype' fields, add links and inline markup automatically. " + "This settings should not be enabled when using google or numpy docformat because the types are always processed by default.",) + parser.add_argument( + '--warnings-as-errors', '-W', action='store_true', + dest='warnings_as_errors', default=False, + help=("Return exit code 3 on warnings.")) + parser.add_argument( + '--verbose', '-v', action='count', dest='verbosity', + default=0, + help=("Be noisier. Can be repeated for more noise.")) + parser.add_argument( + '--quiet', '-q', action='count', dest='quietness', + default=0, + help=("Be quieter.")) + + parser.add_argument( + '--introspect-c-modules', default=False, action='store_true', + help=("Import and introspect any C modules found.")) + + parser.add_argument( + '--intersphinx', action='append', dest='intersphinx', + metavar='URL_TO_OBJECTS.INV', default=[], + help=( + "Use Sphinx objects inventory to generate links to external " + "documentation. Can be repeated.")) + + parser.add_argument( + '--enable-intersphinx-cache', + dest='enable_intersphinx_cache_deprecated', + action='store_true', + default=False, + help=SUPPRESS + ) + parser.add_argument( + '--disable-intersphinx-cache', + dest='enable_intersphinx_cache', + action='store_false', + default=True, + help="Disable Intersphinx cache." + ) + parser.add_argument( + '--intersphinx-cache-path', + dest='intersphinx_cache_path', + default=USER_INTERSPHINX_CACHE, + help="Where to cache intersphinx objects.inv files.", + metavar='PATH', + ) + parser.add_argument( + '--clear-intersphinx-cache', + dest='clear_intersphinx_cache', + action='store_true', + default=False, + help=("Clear the Intersphinx cache " + "specified by --intersphinx-cache-path."), + ) + parser.add_argument( + '--intersphinx-cache-max-age', + dest='intersphinx_cache_max_age', + default='1d', + help=MAX_AGE_HELP, + metavar='DURATION', + ) + parser.add_argument( + '--pyval-repr-maxlines', dest='pyvalreprmaxlines', default=7, type=int, metavar='INT', + help='Maxinum number of lines for a constant value representation. Use 0 for unlimited.') + parser.add_argument( + '--pyval-repr-linelen', dest='pyvalreprlinelen', default=80, type=int, metavar='INT', + help='Maxinum number of caracters for a constant value representation line. Use 0 for unlimited.') + parser.add_argument( + '--sidebar-expand-depth', metavar="INT", type=int, default=1, dest='sidebarexpanddepth', + help=("How many nested modules and classes should be expandable, " + "first level is always expanded, nested levels can expand/collapse. Value should be 1 or greater. (default: 1)")) + parser.add_argument( + '--sidebar-toc-depth', metavar="INT", type=int, default=6, dest='sidebartocdepth', + help=("How many nested titles should be listed in the docstring TOC " + "(default: 6)")) + parser.add_argument( + '--no-sidebar', default=False, action='store_true', dest='nosidebar', + help=("Do not generate the sidebar at all.")) + + parser.add_argument( + '--system-class', dest='systemclass', default=DEFAULT_SYSTEM, + help=("A dotted name of the class to use to make a system.")) + + parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}') + + parser.add_argument( + 'sourcepath', metavar='SOURCEPATH', + help=("Path to python modules/packages to document."), + nargs="*", default=[], + ) + return parser + +def parse_args(args: Sequence[str]) -> Namespace: + parser = get_parser() + options = parser.parse_args(args) + assert isinstance(options, Namespace) + options.verbosity -= options.quietness + + _warn_deprecated_options(options) + + return options + +def _warn_deprecated_options(options: Namespace) -> None: + """ + Check the CLI options and warn on deprecated options. + """ + if options.enable_intersphinx_cache_deprecated: + print("The --enable-intersphinx-cache option is deprecated; " + "the cache is now enabled by default.", + file=sys.stderr, flush=True) + +# CONVERTERS + +def _convert_sourcepath(l: List[str]) -> List[Path]: + return list(map(functools.partial(parse_path, opt='SOURCEPATH'), l)) +def _convert_templatedir(l: List[str]) -> List[Path]: + return list(map(functools.partial(parse_path, opt='--template-dir'), l)) +def _convert_projectbasedirectory(s: Optional[str]) -> Optional[Path]: + if s: return parse_path(s, opt='--project-base-dir') + else: return None +def _convert_systemclass(s: str) -> Type['model.System']: + try: + return findClassFromDottedName(s, '--system-class', base_class='pydoctor.model.System') + except ValueError as e: + error(str(e)) +def _convert_htmlwriter(s: str) -> Type['IWriter']: + try: + return findClassFromDottedName(s, '--html-writer', base_class='pydoctor.templatewriter.IWriter') + except ValueError as e: + error(str(e)) +def _convert_privacy(l: List[str]) -> List[Tuple['model.PrivacyClass', str]]: + return list(map(functools.partial(parse_privacy_tuple, opt='--privacy'), l)) + +_RECOGNIZED_SOURCE_HREF = { + # Sourceforge + '{mod_source_href}#l{lineno}': re.compile(r'(^https?:\/\/sourceforge\.net\/)'), + + # Bitbucket + '{mod_source_href}#lines-{lineno}': re.compile(r'(^https?:\/\/bitbucket\.org\/)'), + + # Matches all other plaforms: Github, Gitlab, etc. + # This match should be kept last in the list. + '{mod_source_href}#L{lineno}': re.compile(r'(.*)?') + } + # Since we can't guess git-web platform form URL, + # we have to pass the template string wih option: + # --html-viewsource-template="{mod_source_href}#n{lineno}" + +def _get_viewsource_template(sourcebase: Optional[str]) -> str: + """ + Recognize several version control providers based on option C{--html-viewsource-base}. + """ + if not sourcebase: + return '{mod_source_href}#L{lineno}' + for template, regex in _RECOGNIZED_SOURCE_HREF.items(): + if regex.match(sourcebase): + return template + else: + assert False + +# TYPED OPTIONS CONTAINER + +@attr.s +class Options: + """ + Container for all possible pydoctor options. + + See C{pydoctor --help} for more informations. + """ + MAKE_HTML_DEFAULT = object() + # Avoid to define default values for config options here because it's taken care of by argparse. + + HTML_SOURCE_TEMPLATE_DEFAULT = object() + + sourcepath: List[Path] = attr.ib(converter=_convert_sourcepath) + systemclass: Type['model.System'] = attr.ib(converter=_convert_systemclass) + projectname: Optional[str] = attr.ib() + projectversion: str = attr.ib() + projecturl: Optional[str] = attr.ib() + projectbasedirectory: Path = attr.ib(converter=_convert_projectbasedirectory) + testing: bool = attr.ib() + pdb: bool = attr.ib() # only working via driver.main() + makehtml: bool = attr.ib() + makeintersphinx: bool = attr.ib() + prependedpackage: Optional[str] = attr.ib() + docformat: str = attr.ib() + theme: str = attr.ib() + processtypes: bool = attr.ib() + templatedir: List[Path] = attr.ib(converter=_convert_templatedir) + privacy: List[Tuple['model.PrivacyClass', str]] = attr.ib(converter=_convert_privacy) + htmlsubjects: Optional[List[str]] = attr.ib() + htmlsummarypages: bool = attr.ib() + htmloutput: str = attr.ib() # TODO: make this a Path object once https://github.com/twisted/pydoctor/pull/389/files is merged + htmlwriter: Type['IWriter'] = attr.ib(converter=_convert_htmlwriter) + htmlsourcebase: Optional[str] = attr.ib() + htmlsourcetemplate: str = attr.ib() + buildtime: Optional[str] = attr.ib() + warnings_as_errors: bool = attr.ib() + verbosity: int = attr.ib() + quietness: int = attr.ib() + introspect_c_modules: bool = attr.ib() + intersphinx: List[str] = attr.ib() + enable_intersphinx_cache: bool = attr.ib() + intersphinx_cache_path: str = attr.ib() + clear_intersphinx_cache: bool = attr.ib() + intersphinx_cache_max_age: str = attr.ib() + pyvalreprlinelen: int = attr.ib() + pyvalreprmaxlines: int = attr.ib() + sidebarexpanddepth: int = attr.ib() + sidebartocdepth: int = attr.ib() + nosidebar: int = attr.ib() + + def __attrs_post_init__(self) -> None: + # do some validations... + # check if sidebar related arguments are valid + if self.sidebarexpanddepth < 1: + error("Invalid --sidebar-expand-depth value." + 'The value of --sidebar-expand-depth option should be greater or equal to 1, ' + 'to suppress sidebar generation all together: use --no-sidebar') + if self.sidebartocdepth < 0: + error("Invalid --sidebar-toc-depth value" + 'The value of --sidebar-toc-depth option should be greater or equal to 0, ' + 'to suppress sidebar generation all together: use --no-sidebar') + + # HIGH LEVEL FACTORY METHODS + + @classmethod + def defaults(cls,) -> 'Options': + return cls.from_args([]) + + @classmethod + def from_args(cls, args: Sequence[str]) -> 'Options': + return cls.from_namespace(parse_args(args)) + + @classmethod + def from_namespace(cls, args: Namespace) -> 'Options': + argsdict = vars(args) + + # set correct default for --make-html + if args.makehtml == cls.MAKE_HTML_DEFAULT: + if not args.testing and not args.makeintersphinx: + argsdict['makehtml'] = True + else: + argsdict['makehtml'] = False + + # auto-detect source link template if the default value is used. + if args.htmlsourcetemplate == cls.HTML_SOURCE_TEMPLATE_DEFAULT: + argsdict['htmlsourcetemplate'] = _get_viewsource_template(args.htmlsourcebase) + + # handle deprecated arguments + argsdict['sourcepath'].extend(list(map(functools.partial(parse_path, opt='--add-package'), argsdict.pop('packages')))) + + # remove deprecated arguments + argsdict.pop('enable_intersphinx_cache_deprecated') + + # remove the config argument + argsdict.pop('config') + + return cls(**argsdict) + diff --git a/pydoctor/qnmatch.py b/pydoctor/qnmatch.py new file mode 100644 index 000000000..b140f73bd --- /dev/null +++ b/pydoctor/qnmatch.py @@ -0,0 +1,71 @@ +""" +Provides a modified L{fnmatch} function specialized for python objects fully qualified name pattern matching. + +Special patterns are:: + + ** matches everything (recursive) + * matches everything except "." (one level ony) + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq +""" +import functools +import re +from typing import Any, Callable + +@functools.lru_cache(maxsize=256, typed=True) +def _compile_pattern(pat: str) -> Callable[[str], Any]: + res = translate(pat) + return re.compile(res).match + +def qnmatch(name:str, pattern:str) -> bool: + """Test whether C{name} matches C{pattern}. + """ + match = _compile_pattern(pattern) + return match(name) is not None + +# Barely changed from https://github.com/python/cpython/blob/3.8/Lib/fnmatch.py +# Not using python3.9+ version because implementation is significantly more complex. +def translate(pat:str) -> str: + """Translate a shell PATTERN to a regular expression. + There is no way to quote meta-characters. + """ + i, n = 0, len(pat) + res = '' + while i < n: + c = pat[i] + i = i+1 + if c == '*': + # Changes begins: understands '**'. + if i < n and pat[i] == '*': + res = res + '.*?' + i = i + 1 + else: + res = res + r'[^\.]*?' + # Changes ends. + elif c == '?': + res = res + '.' + elif c == '[': + j = i + if j < n and pat[j] == '!': + j = j+1 + if j < n and pat[j] == ']': + j = j+1 + while j < n and pat[j] != ']': + j = j+1 + if j >= n: + res = res + '\\[' + else: + stuff = pat[i:j] + # Changes begins: simplifications handling backslashes and hyphens not required for fully qualified names. + stuff = stuff.replace('\\', r'\\') + i = j+1 + if stuff[0] == '!': + stuff = '^' + stuff[1:] + elif stuff[0] in ('^', '['): + stuff = '\\' + stuff + res = '%s[%s]' % (res, stuff) + # Changes ends. + else: + res = res + re.escape(c) + return r'(?s:%s)\Z' % res diff --git a/pydoctor/sphinx.py b/pydoctor/sphinx.py index bd4bf0fc6..6e1a8ebad 100644 --- a/pydoctor/sphinx.py +++ b/pydoctor/sphinx.py @@ -25,6 +25,7 @@ class CacheT(Protocol): def get(self, url: str) -> Optional[bytes]: ... + def close(self) -> None: ... else: Documentable = object CacheT = object @@ -401,6 +402,8 @@ def get(self, url: str) -> Optional[bytes]: ) return None + def close(self) -> None: + self._session.close() def prepareCache( clearCache: bool, diff --git a/pydoctor/sphinx_ext/_help_output.py b/pydoctor/sphinx_ext/_help_output.py deleted file mode 100644 index 02b2e1095..000000000 --- a/pydoctor/sphinx_ext/_help_output.py +++ /dev/null @@ -1,54 +0,0 @@ -""" -Private extension that produces the pydoctor help output to be included in the documentation. -""" -from docutils import nodes -from docutils.parsers.rst import Directive - -from contextlib import redirect_stdout -from io import StringIO -from typing import Any, Dict, List, TYPE_CHECKING - -from pydoctor import __version__ -from pydoctor.driver import parse_args - - -if TYPE_CHECKING: - from sphinx.application import Sphinx - - -class HelpOutputDirective(Directive): - """ - Directive that will generate the pydoctor help as block literal. - - It takes no options or input value. - """ - has_content = True - - def run(self) -> List[nodes.Node]: - """ - Called by docutils each time the directive is found. - """ - - stream = StringIO() - try: - with redirect_stdout(stream): - parse_args(['--help']) - except SystemExit: - # The stdlib --help handling triggers system exit. - pass - - text = ['pydoctor --help'] + stream.getvalue().splitlines()[1:] - return [nodes.literal_block(text='\n'.join(text), language='text')] - - -def setup(app: 'Sphinx') -> Dict[str, Any]: - """ - Called by Sphinx when the extensions is loaded. - """ - app.add_directive('help_output', HelpOutputDirective) - - return { - 'version': __version__, - 'parallel_read_safe': True, - 'parallel_write_safe': True, - } diff --git a/pydoctor/sphinx_ext/build_apidocs.py b/pydoctor/sphinx_ext/build_apidocs.py index 06da7cef2..a5b828885 100644 --- a/pydoctor/sphinx_ext/build_apidocs.py +++ b/pydoctor/sphinx_ext/build_apidocs.py @@ -33,7 +33,8 @@ from sphinx.util import logging from pydoctor import __version__ -from pydoctor.driver import main, parse_args +from pydoctor.driver import main +from pydoctor.options import parse_args logger = logging.getLogger(__name__) @@ -58,7 +59,7 @@ def on_build_finished(app: Sphinx, exception: Exception) -> None: for key, value in runs.items(): arguments = _get_arguments(value, placeholders) - options, _ = parse_args(arguments) + options = parse_args(arguments) output_path = pathlib.Path(options.htmloutput) sphinx_files = output_path.with_suffix('.sphinx_files') @@ -100,7 +101,7 @@ def on_builder_inited(app: Sphinx) -> None: for key, value in runs.items(): arguments = _get_arguments(value, placeholders) - options, _ = parse_args(arguments) + options = parse_args(arguments) output_path = pathlib.Path(options.htmloutput) temp_path = output_path.with_suffix('.pydoctor_temp') diff --git a/pydoctor/stanutils.py b/pydoctor/stanutils.py index cc3033600..1d4865ed9 100644 --- a/pydoctor/stanutils.py +++ b/pydoctor/stanutils.py @@ -27,9 +27,14 @@ def html2stan(html: Union[bytes, str]) -> Tag: html = html.encode('utf8') html = _RE_CONTROL.sub(lambda m:b'\\x%02x' % ord(m.group()), html) - stan = XMLString(b'
%s
' % html).load()[0] - assert isinstance(stan, Tag) - assert stan.tagName == 'div' + if not html.startswith(b'%s' % html).load()[0] + assert isinstance(stan, Tag) + assert stan.tagName == 'div' + else: + stan = XMLString(b'%s' % html).load()[0] + assert isinstance(stan, Tag) + assert stan.tagName == 'html' stan.tagName = '' return stan diff --git a/pydoctor/templatewriter/__init__.py b/pydoctor/templatewriter/__init__.py index e6c61175b..bc13ee873 100644 --- a/pydoctor/templatewriter/__init__.py +++ b/pydoctor/templatewriter/__init__.py @@ -1,5 +1,5 @@ """Render pydoctor data as HTML.""" -from typing import Iterable, Iterator, Optional, Union, cast, TYPE_CHECKING +from typing import Any, Iterable, Iterator, Optional, Union, cast, TYPE_CHECKING if TYPE_CHECKING: from typing_extensions import Protocol, runtime_checkable else: @@ -17,7 +17,7 @@ def runtime_checkable(f): if sys.version_info >= (3, 9): from importlib.abc import Traversable else: - Traversable = Path + Traversable = Any else: Traversable = object diff --git a/pydoctor/templatewriter/pages/__init__.py b/pydoctor/templatewriter/pages/__init__.py index 5764aa1de..dca6e686a 100644 --- a/pydoctor/templatewriter/pages/__init__.py +++ b/pydoctor/templatewriter/pages/__init__.py @@ -16,12 +16,14 @@ from twisted.web.iweb import IRenderable, ITemplateLoader, IRequest from twisted.web.template import Element, Tag, renderer, tags +from pydoctor.extensions import zopeinterface from pydoctor.stanutils import html2stan -from pydoctor import epydoc2stan, model, zopeinterface, __version__ +from pydoctor import epydoc2stan, model, __version__ from pydoctor.astbuilder import node2fullname from pydoctor.templatewriter import util, TemplateLookup, TemplateElement from pydoctor.templatewriter.pages.table import ChildTable +from pydoctor.templatewriter.pages.sidebar import SideBar from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval if TYPE_CHECKING: @@ -41,7 +43,14 @@ def objects_order(o: model.Documentable) -> Tuple[int, int, str]: children = sorted((o for o in ob.contents.values() if o.isVisible), key=objects_order) """ - return (-o.privacyClass.value, -o.kind.value if o.kind else 0, o.fullName().lower()) + + def map_kind(kind: model.DocumentableKind) -> model.DocumentableKind: + if kind == model.DocumentableKind.PACKAGE: + # packages and modules should be listed together + return model.DocumentableKind.MODULE + return kind + + return (-o.privacyClass.value, -map_kind(o.kind).value if o.kind else 0, o.fullName().lower()) def format_decorators(obj: Union[model.Function, model.Attribute]) -> Iterator["Flattenable"]: for dec in obj.decorators or (): @@ -52,10 +61,10 @@ def format_decorators(obj: Union[model.Function, model.Attribute]) -> Iterator[" if fn in ("twisted.python.deprecate.deprecated", "twisted.python.deprecate.deprecatedProperty"): break - + # Colorize decorators! doc = colorize_inline_pyval(dec) - stan = doc.to_stan(epydoc2stan._EpydocLinker(obj)) + stan = doc.to_stan(obj.docstring_linker) # Report eventual warnings. It warns when a regex failed to parse or the html2stan() function fails. for message in doc.warnings: obj.report(message) @@ -67,19 +76,7 @@ def format_signature(function: model.Function) -> "Flattenable": Return a stan representation of a nicely-formatted source-like function signature for the given L{Function}. Arguments default values are linked to the appropriate objects when possible. """ - return html2stan(str(function.signature)) - -class DocGetter: - """L{epydoc2stan} bridge.""" - def get(self, ob: model.Documentable, summary: bool = False) -> Tag: - if summary: - return epydoc2stan.format_summary(ob) - else: - return epydoc2stan.format_docstring(ob) - def get_type(self, ob: model.Documentable) -> Optional[Tag]: - return epydoc2stan.type2stan(ob) - - + return html2stan(str(function.signature)) if function.signature else "(...)" class Nav(TemplateElement): """ @@ -88,11 +85,6 @@ class Nav(TemplateElement): filename = 'nav.html' - def __init__(self, system: model.System, loader: ITemplateLoader) -> None: - super().__init__(loader) - self.system = system - - class Head(TemplateElement): """ Common metadata. @@ -155,7 +147,7 @@ def head(self, request: IRequest, tag: Tag) -> IRenderable: @renderer def nav(self, request: IRequest, tag: Tag) -> IRenderable: - return Nav(self.system, Nav.lookup_loader(self.template_lookup)) + return Nav(Nav.lookup_loader(self.template_lookup)) @renderer def header(self, request: IRequest, tag: Tag) -> IRenderable: @@ -175,11 +167,11 @@ class CommonPage(Page): filename = 'common.html' ob: model.Documentable - def __init__(self, ob: model.Documentable, template_lookup: TemplateLookup, docgetter: Optional[DocGetter]=None): + def __init__(self, ob: model.Documentable, template_lookup: TemplateLookup, docgetter: Optional[util.DocGetter]=None): super().__init__(ob.system, template_lookup) self.ob = ob if docgetter is None: - docgetter = DocGetter() + docgetter = util.DocGetter() self.docgetter = docgetter @property @@ -211,15 +203,11 @@ def namespace(self, obj: model.Documentable) -> List[Union[Tag, str]]: ob = ob.parent parts.reverse() return parts - @renderer def deprecated(self, request: object, tag: Tag) -> "Flattenable": - msg = self.ob._deprecated_info - if msg is None: - return () - else: - return tags.div(msg, role="alert", class_="deprecationNotice alert alert-warning") - + import warnings + warnings.warn("Renderer 'CommonPage.deprecated' is deprecated, the twisted's deprecation system is now supported by default.") + return '' @renderer def source(self, request: object, tag: Tag) -> "Flattenable": sourceHref = util.srclink(self.ob) @@ -231,8 +219,8 @@ def source(self, request: object, tag: Tag) -> "Flattenable": def inhierarchy(self, request: object, tag: Tag) -> "Flattenable": return () - def extras(self) -> List["Flattenable"]: - return [] + def extras(self) -> List[Tag]: + return self.objectExtras(self.ob) def docstring(self) -> "Flattenable": return self.docgetter.get(self.ob) @@ -240,7 +228,7 @@ def docstring(self) -> "Flattenable": def children(self) -> Sequence[model.Documentable]: return sorted( (o for o in self.ob.contents.values() if o.isVisible), - key=objects_order) + key=util.objects_order) def packageInitTable(self) -> "Flattenable": return () @@ -260,7 +248,7 @@ def mainTable(self) -> "Flattenable": def methods(self) -> Sequence[model.Documentable]: return sorted((o for o in self.ob.contents.values() if o.documentation_location is model.DocLocation.PARENT_PAGE and o.isVisible), - key=objects_order) + key=util.objects_order) def childlist(self) -> List[Union["AttributeChild", "FunctionChild"]]: from pydoctor.templatewriter.pages.attributechild import AttributeChild @@ -273,19 +261,38 @@ def childlist(self) -> List[Union["AttributeChild", "FunctionChild"]]: for c in self.methods(): if isinstance(c, model.Function): - r.append(FunctionChild(self.docgetter, c, self.functionExtras(c), func_loader)) + r.append(FunctionChild(self.docgetter, c, self.objectExtras(c), func_loader)) elif isinstance(c, model.Attribute): - r.append(AttributeChild(self.docgetter, c, self.functionExtras(c), attr_loader)) + r.append(AttributeChild(self.docgetter, c, self.objectExtras(c), attr_loader)) else: assert False, type(c) return r - def functionExtras(self, ob: model.Documentable) -> List["Flattenable"]: - return [] + def objectExtras(self, ob: model.Documentable) -> List[Tag]: + """ + Flatten each L{model.Documentable.extra_info} list item. + """ + r: List[Tag] = [] + for extra in ob.extra_info: + r.append(extra.to_stan(ob.docstring_linker, compact=False)) + return r + # Not adding Known aliases here because it would really be too much information. + # TODO: Would it actully be TMI? def functionBody(self, ob: model.Documentable) -> "Flattenable": return self.docgetter.get(ob) + @renderer + def maindivclass(self, request: IRequest, tag: Tag) -> str: + return 'nosidebar' if self.ob.system.options.nosidebar else '' + + @renderer + def sidebarcontainer(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + if self.ob.system.options.nosidebar: + return "" + else: + return tag.fillSlots(sidebar=SideBar(ob=self.ob, template_lookup=self.template_lookup)) + @property def slot_map(self) -> Dict[str, "Flattenable"]: slot_map = super().slot_map @@ -302,8 +309,10 @@ def slot_map(self) -> Dict[str, "Flattenable"]: class ModulePage(CommonPage): - def extras(self) -> List["Flattenable"]: - r = super().extras() + ob: model.Module + + def extras(self) -> List[Tag]: + r: List[Tag] = [] # Add Known aliases, for modules. aliases = sorted(self.ob.aliases, key=objects_order) @@ -316,21 +325,19 @@ def extras(self) -> List["Flattenable"]: if sourceHref: r.append(tags.a("(source)", href=sourceHref, class_="sourceLink")) + r.extend(super().extras()) return r class PackagePage(ModulePage): def children(self) -> Sequence[model.Documentable]: - return sorted( - (o for o in self.ob.contents.values() - if isinstance(o, model.Module) and o.isVisible), - key=objects_order) + return sorted(self.ob.submodules(), key=objects_order) def packageInitTable(self) -> "Flattenable": children = sorted( (o for o in self.ob.contents.values() if not isinstance(o, model.Module) and o.isVisible), - key=objects_order) + key=util.objects_order) if children: loader = ChildTable.lookup_loader(self.template_lookup) return [ @@ -345,43 +352,15 @@ def methods(self) -> Sequence[model.Documentable]: if o.documentation_location is model.DocLocation.PARENT_PAGE and o.isVisible] - -def overriding_subclasses( - c: model.Class, - name: str, - firstcall: bool = True - ) -> Iterator[model.Class]: - if not firstcall and name in c.contents: - yield c - else: - for sc in c.subclasses: - if sc.isVisible: - yield from overriding_subclasses(sc, name, False) - -def nested_bases(b: model.Class) -> Sequence[Tuple[model.Class, ...]]: - r: List[Tuple[model.Class, ...]] = [(b,)] - for b2 in b.baseobjects: - if b2 is None: - continue - for n in nested_bases(b2): - r.append(n + (b,)) - return r - -def unmasked_attrs(baselist: Sequence[model.Documentable]) -> Sequence[model.Documentable]: - maybe_masking = { - o.name - for b in baselist[1:] - for o in b.contents.values() - } - return [o for o in baselist[0].contents.values() - if o.isVisible and o.name not in maybe_masking] - def assembleList( system: model.System, label: str, lst: Sequence[str], page_url: str ) -> Optional["Flattenable"]: + """ + Convert list of object names into a stan tree with clickable links. + """ lst2 = [] for name in lst: o = system.allobjects.get(name) @@ -410,17 +389,17 @@ class ClassPage(CommonPage): def __init__(self, ob: model.Documentable, template_lookup: TemplateLookup, - docgetter: Optional[DocGetter] = None + docgetter: Optional[util.DocGetter] = None ): super().__init__(ob, template_lookup, docgetter) self.baselists = [] - for baselist in nested_bases(self.ob): - attrs = unmasked_attrs(baselist) + for baselist in util.nested_bases(self.ob): + attrs = util.unmasked_attrs(baselist) if attrs: self.baselists.append((baselist, attrs)) - def extras(self) -> List["Flattenable"]: - r = super().extras() + def extras(self) -> List[Tag]: + r: List[Tag] = [] sourceHref = util.srclink(self.ob) source: "Flattenable" @@ -434,44 +413,39 @@ def extras(self) -> List["Flattenable"]: self.classSignature(), ":", source ))) - scs = sorted(self.ob.subclasses, key=objects_order) - if not scs: - return r - p = assembleList(self.ob.system, "Known subclasses: ", - [o.fullName() for o in scs], self.page_url) - if p is not None: - r.append(tags.p(p)) - - # Add Known aliases, for classes. + # Add Known subclasses + subclasses = sorted(self.ob.subclasses, key=util.objects_order) + if subclasses: + p = assembleList(self.ob.system, "Known subclasses: ", + [o.fullName() for o in subclasses], self.page_url) + if p is not None: + r.append(tags.p(p)) + + # Add Known aliases, for classes. TODO: move this to extra_info aliases = sorted(self.ob.aliases, key=objects_order) - p = assembleList(self.ob.system, "Known aliases: ", - [o.fullName() for o in aliases], self.page_url) - if p is not None: - r.append(tags.p(p)) + if aliases: + p = assembleList(self.ob.system, "Known aliases: ", + [o.fullName() for o in aliases], self.page_url) + if p is not None: + r.append(tags.p(p)) + r.extend(super().extras()) return r def classSignature(self) -> "Flattenable": r: List["Flattenable"] = [] - zipped = list(zip(self.ob.rawbases, self.ob.bases, self.ob.baseobjects)) + _linker = self.ob.docstring_linker + zipped = list(zip(self.ob.rawbases, self.ob.bases)) if zipped: r.append('(') - for idx, (name, full_name, base) in enumerate(zipped): + for idx, (name, full_name) in enumerate(zipped): if idx != 0: r.append(', ') - if base is None: - # External class. - url = self.ob.system.intersphinx.getLink(full_name) - else: - # Internal class. - url = base.url - - if url is None: - tag = tags.span - else: - tag = tags.a(href=url) - r.append(tag(name, title=full_name)) + # link to external class or internal class + tag = _linker.link_to(full_name, name) + + r.append(tag(title=full_name)) r.append(')') return r @@ -490,7 +464,7 @@ def baseTables(self, request: object, item: Tag) -> "Flattenable": return [item.clone().fillSlots( baseName=self.baseName(b), baseTable=ChildTable(self.docgetter, self.ob, - sorted(attrs, key=objects_order), + sorted(attrs, key=util.objects_order), loader)) for b, attrs in baselists] @@ -509,10 +483,10 @@ def baseName(self, bases: Sequence[model.Class]) -> "Flattenable": r.extend([' (via ', tail, ')']) return r - def functionExtras(self, ob: model.Documentable) -> List["Flattenable"]: + def objectExtras(self, ob: model.Documentable) -> List[Tag]: page_url = self.page_url name = ob.name - r: List["Flattenable"] = [] + r: List[Tag] = [] for b in self.ob.allbases(include_self=False): if name not in b.contents: continue @@ -520,24 +494,24 @@ def functionExtras(self, ob: model.Documentable) -> List["Flattenable"]: r.append(tags.div(class_="interfaceinfo")( 'overrides ', tags.code(epydoc2stan.taglink(overridden, page_url)))) break - ocs = sorted(overriding_subclasses(self.ob, name), key=objects_order) + ocs = sorted(util.overriding_subclasses(self.ob, name), key=util.objects_order) if ocs: l = assembleList(self.ob.system, 'overridden in ', [o.fullName() for o in ocs], self.page_url) if l is not None: r.append(tags.div(class_="interfaceinfo")(l)) - # Not adding Known aliases here because it would really be too much information. + r.extend(super().objectExtras(ob)) return r class ZopeInterfaceClassPage(ClassPage): ob: zopeinterface.ZopeInterfaceClass - def extras(self) -> List["Flattenable"]: + def extras(self) -> List[Tag]: r = super().extras() if self.ob.isinterface: namelist = [o.fullName() for o in - sorted(self.ob.implementedby_directly, key=objects_order)] + sorted(self.ob.implementedby_directly, key=util.objects_order)] label = 'Known implementations: ' else: namelist = sorted(self.ob.implements_directly, key=lambda x:x.lower()) @@ -560,16 +534,16 @@ def interfaceMeth(self, methname: str) -> Optional[model.Documentable]: return method return None - def functionExtras(self, ob: model.Documentable) -> List["Flattenable"]: + def objectExtras(self, ob: model.Documentable) -> List[Tag]: imeth = self.interfaceMeth(ob.name) - r: List["Flattenable"] = [] + r: List[Tag] = [] if imeth: iface = imeth.parent assert iface is not None r.append(tags.div(class_="interfaceinfo")('from ', tags.code( epydoc2stan.taglink(imeth, self.page_url, iface.fullName()) ))) - r.extend(super().functionExtras(ob)) + r.extend(super().objectExtras(ob)) return r commonpages: 'Final[Mapping[str, Type[CommonPage]]]' = { diff --git a/pydoctor/templatewriter/pages/attributechild.py b/pydoctor/templatewriter/pages/attributechild.py index 65ab5c7c1..e4411e86a 100644 --- a/pydoctor/templatewriter/pages/attributechild.py +++ b/pydoctor/templatewriter/pages/attributechild.py @@ -6,7 +6,7 @@ from pydoctor.model import Attribute, DocumentableKind from pydoctor import epydoc2stan from pydoctor.templatewriter import TemplateElement, util -from pydoctor.templatewriter.pages import DocGetter, format_decorators +from pydoctor.templatewriter.pages import format_decorators if TYPE_CHECKING: from twisted.web.template import Flattenable @@ -17,9 +17,9 @@ class AttributeChild(TemplateElement): filename = 'attribute-child.html' def __init__(self, - docgetter: DocGetter, + docgetter: util.DocGetter, ob: Attribute, - extras: "Flattenable", + extras: List[Tag], loader: ITemplateLoader ): super().__init__(loader) @@ -62,7 +62,7 @@ def sourceLink(self, request: object, tag: Tag) -> "Flattenable": return () @renderer - def functionExtras(self, request: object, tag: Tag) -> "Flattenable": + def objectExtras(self, request: object, tag: Tag) -> List[Tag]: return self._functionExtras @renderer @@ -70,15 +70,7 @@ def functionBody(self, request: object, tag: Tag) -> "Flattenable": return self.docgetter.get(self.ob) @renderer - def functionDeprecated(self, request: object, tag: Tag) -> "Flattenable": - msg = self.ob._deprecated_info - if msg is None: - return () - else: - return tags.div(msg, role="alert", class_="deprecationNotice alert alert-warning") - - @renderer - def value(self, request: object, tag: Tag) -> "Flattenable": + def constantValue(self, request: object, tag: Tag) -> "Flattenable": if self.ob.value is not None: if self.ob.kind is DocumentableKind.CONSTANT: # Attribute is a constant (with a value), then display it's value @@ -89,4 +81,4 @@ def value(self, request: object, tag: Tag) -> "Flattenable": else: return '' else: - return '' \ No newline at end of file + return '' diff --git a/pydoctor/templatewriter/pages/functionchild.py b/pydoctor/templatewriter/pages/functionchild.py index c42f28df4..c067d6faf 100644 --- a/pydoctor/templatewriter/pages/functionchild.py +++ b/pydoctor/templatewriter/pages/functionchild.py @@ -1,11 +1,11 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List from twisted.web.iweb import ITemplateLoader from twisted.web.template import Tag, renderer, tags from pydoctor.model import Function from pydoctor.templatewriter import TemplateElement, util -from pydoctor.templatewriter.pages import DocGetter, format_decorators, format_signature +from pydoctor.templatewriter.pages import format_decorators, format_signature if TYPE_CHECKING: from twisted.web.template import Flattenable @@ -16,9 +16,9 @@ class FunctionChild(TemplateElement): filename = 'function-child.html' def __init__(self, - docgetter: DocGetter, + docgetter: util.DocGetter, ob: Function, - extras: "Flattenable", + extras: List[Tag], loader: ITemplateLoader ): super().__init__(loader) @@ -65,17 +65,10 @@ def sourceLink(self, request: object, tag: Tag) -> "Flattenable": return () @renderer - def functionExtras(self, request: object, tag: Tag) -> "Flattenable": + def objectExtras(self, request: object, tag: Tag) -> List[Tag]: return self._functionExtras @renderer def functionBody(self, request: object, tag: Tag) -> "Flattenable": return self.docgetter.get(self.ob) - @renderer - def functionDeprecated(self, request: object, tag: Tag) -> "Flattenable": - msg = self.ob._deprecated_info - if msg is None: - return () - else: - return tags.div(msg, role="alert", class_="deprecationNotice alert alert-warning") diff --git a/pydoctor/templatewriter/pages/sidebar.py b/pydoctor/templatewriter/pages/sidebar.py new file mode 100644 index 000000000..84f9ecc49 --- /dev/null +++ b/pydoctor/templatewriter/pages/sidebar.py @@ -0,0 +1,419 @@ +""" +Classes for the sidebar generation. +""" +from typing import Any, Iterable, Iterator, List, Optional, Tuple, Type, Union +from twisted.web.iweb import IRequest, ITemplateLoader +from twisted.web.template import TagLoader, renderer, Tag, Element, tags + +from pydoctor import epydoc2stan +from pydoctor.model import Attribute, Class, Function, Documentable, Module +from pydoctor.templatewriter import util, TemplateLookup, TemplateElement + +class SideBar(TemplateElement): + """ + Sidebar. + + Contains: + - the object docstring table of contents if titles are defined + - for classes: + - information about the contents of the current class and parent module/package. + - for modules/packages: + - information about the contents of the module and parent package. + """ + + filename = 'sidebar.html' + + def __init__(self, ob: Documentable, template_lookup: TemplateLookup): + super().__init__(loader=self.lookup_loader(template_lookup)) + self.ob = ob + self.template_lookup = template_lookup + + + @renderer + def sections(self, request: IRequest, tag: Tag) -> Iterator['SideBarSection']: + """ + Sections are L{SideBarSection} elements. + """ + + # The object itself + yield SideBarSection(loader=TagLoader(tag), ob=self.ob, + documented_ob=self.ob, template_lookup=self.template_lookup) + + parent: Optional[Documentable] = None + if isinstance(self.ob, Module): + # The object is a module, we document the parent package in the second section (if it's not a root module). + if self.ob.parent: + parent = self.ob.parent + else: + # The object is a class/function or attribute, we docuement the module that contains the object, not it's direct parent. + # + parent = self.ob.module + + if parent: + yield SideBarSection(loader=TagLoader(tag), ob=parent, + documented_ob=self.ob, template_lookup=self.template_lookup) +class SideBarSection(Element): + """ + Main sidebar section. + + The sidebar typically contains two C{SideBarSection}: one for the documented object and one for it's parent. + Root modules have only one section. + """ + + def __init__(self, ob: Documentable, documented_ob: Documentable, + loader: ITemplateLoader, template_lookup: TemplateLookup): + super().__init__(loader) + self.ob = ob + self.documented_ob = documented_ob + self.template_lookup = template_lookup + + # Does this sidebar section represents the object itself ? + self._represents_documented_ob = self.ob == self.documented_ob + + @renderer + def kind(self, request: IRequest, tag: Tag) -> str: + return epydoc2stan.format_kind(self.ob.kind) if self.ob.kind else 'Unknown kind' + + @renderer + def name(self, request: IRequest, tag: Tag) -> Tag: + """Craft a block for the title with custom description when hovering. """ + name = self.ob.name + link = epydoc2stan.taglink(self.ob, self.ob.page_object.url, + epydoc2stan.insert_break_points(name)) + tag = tags.code(link(title=self.description())) + if self._represents_documented_ob: + tag(class_='thisobject') + return tag + + def description(self) -> str: + """ + Short description of the sidebar section. + """ + return (f"This {epydoc2stan.format_kind(self.documented_ob.kind).lower() if self.documented_ob.kind else 'object'}" if self._represents_documented_ob + else f"The parent of this {epydoc2stan.format_kind(self.documented_ob.kind).lower() if self.documented_ob.kind else 'object'}" + if self.ob in [self.documented_ob.parent, self.documented_ob.module.parent] else "") + + @renderer + def content(self, request: IRequest, tag: Tag) -> 'ObjContent': + + return ObjContent(ob=self.ob, + loader=TagLoader(tag), + documented_ob=self.documented_ob, + template_lookup=self.template_lookup, + depth=self.ob.system.options.sidebarexpanddepth) + +class ObjContent(Element): + """ + Object content displayed on the sidebar. + + Each L{SideBarSection} object uses one of these in the L{SideBarSection.content} renderer. + This object is also used to represent the contents of nested expandable items. + + Composed by L{ContentList} elements. + """ + + #TODO: Hide the childrenKindTitle if they are all private and show private is off -> need JS + + def __init__(self, loader: ITemplateLoader, ob: Documentable, documented_ob: Documentable, + template_lookup: TemplateLookup, depth: int, level: int = 0): + + super().__init__(loader) + self.ob = ob + self.documented_ob = documented_ob + self.template_lookup = template_lookup + + self._depth = depth + self._level = level + 1 + + self.classList = self._getListOf(Class) + self.functionList = self._getListOf(Function) + self.variableList = self._getListOf(Attribute) + self.subModuleList = self._getListOf(Module) + + self.inheritedFunctionList = self._getListOf(Function, inherited=True) if isinstance(self.ob, Class) else None + self.inheritedVariableList = self._getListOf(Attribute, inherited=True) if isinstance(self.ob, Class) else None + + def _getListOf(self, type_: Type[Documentable], + inherited: bool = False) -> Optional['ContentList']: + children = self._children(inherited=inherited) + if children: + things = [ child for child in children if isinstance(child, type_) ] + return self._getListFrom(things, expand=self._isExpandable(type_)) + else: + return None + + #TODO: ensure not to crash if heterogeneous Documentable types are passed + + def _getListFrom(self, things: List[Documentable], expand: bool) -> Optional['ContentList']: + if things: + assert self.loader is not None + return ContentList(ob=self.ob, children=things, + documented_ob=self.documented_ob, + expand=expand, + nested_content_loader=self.loader, + template_lookup=self.template_lookup, + level_depth=(self._level, self._depth)) + else: + return None + + + def _children(self, inherited: bool = False) -> Optional[List[Documentable]]: + """ + Compute the children of this object. + """ + if inherited: + if isinstance(self.ob, Class): + children : List[Documentable] = [] + for baselist in util.nested_bases(self.ob): + # If the class has super class + if len(baselist) >= 2: + attrs = util.unmasked_attrs(baselist) + if attrs: + children.extend(attrs) + return sorted((o for o in children if o.isVisible), + key=util.objects_order) + else: + return None + else: + return sorted((o for o in self.ob.contents.values() if o.isVisible), + key=util.objects_order) + + def _isExpandable(self, list_type: Type[Documentable]) -> bool: + """ + Should the list items be expandable? + """ + + can_be_expanded = False + + # Classes, modules and packages can be expanded in the sidebar. + if issubclass(list_type, (Class, Module)): + can_be_expanded = True + + return self._level < self._depth and can_be_expanded + + @renderer + def docstringToc(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + + toc = util.DocGetter().get_toc(self.ob) + + # Only show the TOC if visiting the object page itself, in other words, the TOC do dot show up + # in the object's parent section or any other subsections except the main one. + if toc and self.documented_ob == self.ob: + return tag.fillSlots(titles=toc) + else: + return "" + + @renderer + def classesTitle(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + return tag.clear()("Classes") if self.classList else "" + + @renderer + def classes(self, request: IRequest, tag: Tag) -> Union[Element, str]: + return self.classList or "" + + @renderer + def functionsTitle(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + return (tag.clear()("Functions") if not isinstance(self.ob, Class) + else tag.clear()("Methods")) if self.functionList else "" + + @renderer + def functions(self, request: IRequest, tag: Tag) -> Union[Element, str]: + return self.functionList or "" + + @renderer + def inheritedFunctionsTitle(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + return tag.clear()("Inherited Methods") if self.inheritedFunctionList else "" + + @renderer + def inheritedFunctions(self, request: IRequest, tag: Tag) -> Union[Element, str]: + return self.inheritedFunctionList or "" + + @renderer + def variablesTitle(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + return (tag.clear()("Variables") if not isinstance(self.ob, Class) + else tag.clear()("Attributes")) if self.variableList else "" + + @renderer + def variables(self, request: IRequest, tag: Tag) -> Union[Element, str]: + return self.variableList or "" + + @renderer + def inheritedVariablesTitle(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + return tag.clear()("Inherited Attributes") if self.inheritedVariableList else "" + + @renderer + def inheritedVariables(self, request: IRequest, tag: Tag) -> Union[Element, str]: + return self.inheritedVariableList or "" + + @renderer + def subModulesTitle(self, request: IRequest, tag: Tag) -> Union[Tag, str]: + return tag.clear()("Modules") if self.subModuleList else "" + + @renderer + def subModules(self, request: IRequest, tag: Tag) -> Union[Element, str]: + return self.subModuleList or "" + + @property + def has_contents(self) -> bool: + return bool(self.classList or self.functionList or self.variableList or self.subModuleList or self.inheritedFunctionList or self.inheritedVariableList) + +class ContentList(TemplateElement): + """ + List of child objects that share the same type. + + One L{ObjContent} element can have up to six C{ContentList}: + - classes + - functions/methods + - variables/attributes + - modules + - inherited attributes + - inherited methods + """ + # one table per module children types: classes, functions, variables, modules + + filename = 'sidebar-list.html' + + def __init__(self, ob: Documentable, + children: Iterable[Documentable], documented_ob: Documentable, + expand: bool, nested_content_loader: ITemplateLoader, template_lookup: TemplateLookup, + level_depth: Tuple[int, int]): + super().__init__(loader=self.lookup_loader(template_lookup)) + self.ob = ob + self.children = children + self.documented_ob = documented_ob + + self._expand = expand + self._level_depth = level_depth + + self.nested_content_loader = nested_content_loader + self.template_lookup = template_lookup + + @renderer + def items(self, request: IRequest, tag: Tag) -> Iterable['ContentItem']: + + for child in self.children: + + yield ContentItem( + loader=TagLoader(tag), + ob=self.ob, + child=child, + documented_ob=self.documented_ob, + expand=self._expand, + nested_content_loader=self.nested_content_loader, + template_lookup=self.template_lookup, + level_depth=self._level_depth) + + +class ContentItem(Element): + """ + L{ContentList} item. + """ + + + def __init__(self, loader: ITemplateLoader, ob: Documentable, child: Documentable, documented_ob: Documentable, + expand: bool, nested_content_loader: ITemplateLoader, + template_lookup: TemplateLookup, level_depth: Tuple[int, int]): + + super().__init__(loader) + self.child = child + self.ob = ob + self.documented_ob = documented_ob + + self._expand = expand + self._level_depth = level_depth + + self.nested_content_loader = nested_content_loader + self.template_lookup = template_lookup + + @renderer + def class_(self, request: IRequest, tag: Tag) -> str: + class_ = '' + # We could keep same style as in the summary table. + # But I found it a little bit too colorful. + if self.child.isPrivate: + class_ += "private" + if self.child == self.documented_ob: + class_ += " thisobject" + return class_ + + def _contents(self) -> ObjContent: + + return ObjContent(ob=self.child, + loader=self.nested_content_loader, + documented_ob=self.documented_ob, + level=self._level_depth[0], + depth=self._level_depth[1], + template_lookup=self.template_lookup) + + @renderer + def expandableItem(self, request: IRequest, tag: Tag) -> Union[str, 'ExpandableItem']: + if self._expand: + nested_contents = self._contents() + + # pass do_not_expand=True also when an object do not have any members, + # instead of expanding on an empty div. + return ExpandableItem(TagLoader(tag), self.child, self.documented_ob, nested_contents, + do_not_expand=self.child == self.documented_ob or not nested_contents.has_contents) + else: + return "" + + @renderer + def linkOnlyItem(self, request: IRequest, tag: Tag) -> Union[str, 'LinkOnlyItem']: + if not self._expand: + return LinkOnlyItem(TagLoader(tag), self.child, self.documented_ob) + else: + return "" + +class LinkOnlyItem(Element): + """ + Sidebar leaf item: just a link to an object. + + Used by L{ContentItem.linkOnlyItem} + """ + + def __init__(self, loader: ITemplateLoader, child: Documentable, documented_ob: Documentable): + super().__init__(loader) + self.child = child + self.documented_ob = documented_ob + @renderer + def name(self, request: IRequest, tag: Tag) -> Tag: + return tags.code(epydoc2stan.taglink(self.child, self.documented_ob.page_object.url, + epydoc2stan.insert_break_points(self.child.name))) + +class ExpandableItem(LinkOnlyItem): + """ + Sidebar expandable item: link to an object and have a triangle that expand/collapse it's contents + + Used by L{ContentItem.expandableItem} + + @note: ExpandableItem can be created with C{do_not_expand} flag. + This will generate a expandable item with a special C{notExpandable} CSS class. + It differs from L{LinkOnlyItem}, wich do not show the expand button, + here we show it but we make it unusable by assinging an empty CSS ID. + """ + + last_ExpandableItem_id = 0 + + def __init__(self, loader: ITemplateLoader, child: Documentable, documented_ob: Documentable, + contents: ObjContent, do_not_expand: bool = False): + super().__init__(loader, child, documented_ob) + self._contents = contents + self._do_not_expand = do_not_expand + ExpandableItem.last_ExpandableItem_id += 1 + self._id = ExpandableItem.last_ExpandableItem_id + @renderer + def labelClass(self, request: IRequest, tag: Tag) -> str: + assert all(isinstance(child, str) for child in tag.children) + classes: List[Any] = tag.children + if self._do_not_expand: + classes.append('notExpandable') + return ' '.join(classes) + @renderer + def contents(self, request: IRequest, tag: Tag) -> ObjContent: + return self._contents + @renderer + def expandableItemId(self, request: IRequest, tag: Tag) -> str: + return f"expandableItemId{self._id}" + @renderer + def labelForExpandableItemId(self, request: IRequest, tag: Tag) -> str: + return f"expandableItemId{self._id}" if not self._do_not_expand else "" diff --git a/pydoctor/templatewriter/pages/table.py b/pydoctor/templatewriter/pages/table.py index e7a2d5f98..0099eb7df 100644 --- a/pydoctor/templatewriter/pages/table.py +++ b/pydoctor/templatewriter/pages/table.py @@ -9,14 +9,13 @@ if TYPE_CHECKING: from twisted.web.template import Flattenable - from pydoctor.templatewriter.pages import DocGetter class TableRow(Element): def __init__(self, loader: ITemplateLoader, - docgetter: "DocGetter", + docgetter: util.DocGetter, ob: Documentable, child: Documentable, ): @@ -42,12 +41,13 @@ def kind(self, request: object, tag: Tag) -> Tag: # The official name is "coroutine function", but that is both # a bit long and not as widely recognized. kind_name = f'Async {kind_name}' + return tag.clear()(kind_name) @renderer def name(self, request: object, tag: Tag) -> Tag: return tag.clear()(tags.code( - epydoc2stan.taglink(self.child, self.ob.url, self.child.name) + epydoc2stan.taglink(self.child, self.ob.url, epydoc2stan.insert_break_points(self.child.name)) )) @renderer @@ -56,22 +56,23 @@ def summaryDoc(self, request: object, tag: Tag) -> Tag: class ChildTable(TemplateElement): + last_id = 0 filename = 'table.html' def __init__(self, - docgetter: "DocGetter", + docgetter: util.DocGetter, ob: Documentable, children: Collection[Documentable], loader: ITemplateLoader, ): super().__init__(loader) - self.docgetter = docgetter self.children = children ChildTable.last_id += 1 self._id = ChildTable.last_id self.ob = ob + self.docgetter = docgetter @renderer def id(self, request: object, tag: Tag) -> str: diff --git a/pydoctor/templatewriter/search.py b/pydoctor/templatewriter/search.py new file mode 100644 index 000000000..62545a6c3 --- /dev/null +++ b/pydoctor/templatewriter/search.py @@ -0,0 +1,179 @@ +""" +Code building ``all-documents.html``, ``searchindex.json`` and ``fullsearchindex.json``. +""" + +from pathlib import Path +from typing import Iterable, Iterator, List, Optional, Tuple, Type, Dict, TYPE_CHECKING +import json + +import attr + +from pydoctor.templatewriter.pages import Page +from pydoctor import model, epydoc2stan, node2stan + +from twisted.web.template import Tag, renderer +from lunr import lunr, get_default_builder, stop_word_filter, stemmer + +if TYPE_CHECKING: + from twisted.web.template import Flattenable + +def get_all_documents_flattenable(system: model.System) -> List[Dict[str, "Flattenable"]]: + """ + Get the all data to be writen into ``all-documents.html`` file. + """ + documents: List[Dict[str, "Flattenable"]] = [dict( + id=ob.fullName(), + name=epydoc2stan.insert_break_points(ob.name), + fullName=epydoc2stan.insert_break_points(ob.fullName()), + kind=epydoc2stan.format_kind(ob.kind) if ob.kind else '', + type=str(ob.__class__.__name__), + summary=epydoc2stan.format_summary(ob), + url=ob.url, + privacy=str(ob.privacyClass.name)) + + for ob in system.allobjects.values() if ob.isVisible] + return documents + +class AllDocuments(Page): + + filename = 'all-documents.html' + + def title(self) -> str: + return "All Documents" + + @renderer + def documents(self, request: None, tag: Tag) -> Iterable[Tag]: + for doc in get_all_documents_flattenable(self.system): + yield tag.clone().fillSlots(**doc) + +@attr.s(auto_attribs=True) +class LunrIndexWriter: + """ + Class to write lunr indexes with configurable fields. + """ + + output_file: Path + system: model.System + fields: List[str] + + _BOOSTS = { + 'name':4, + 'names': 2, + 'qname':1, + 'docstring':1, + 'kind':-1 + } + + @staticmethod + def get_ob_boost(ob: model.Documentable) -> int: + # Advantage container types because they hold more informations. + if isinstance(ob, (model.Class, model.Module)): + return 2 + else: + return 1 + + def format(self, ob: model.Documentable, field:str) -> Optional[str]: + try: + return getattr(self, f'format_{field}')(ob) #type:ignore[no-any-return] + except AttributeError as e: + raise AssertionError() from e + + def format_name(self, ob: model.Documentable) -> str: + return ob.name + + def format_names(self, ob: model.Documentable) -> str: + return ' '.join(stem_identifier(ob.name)) + + def format_qname(self, ob: model.Documentable) -> str: + return ob.fullName() + + def format_docstring(self, ob: model.Documentable) -> Optional[str]: + # sanitize docstring in a proper way to be more easily indexable by lunr. + doc = None + source = epydoc2stan.ensure_parsed_docstring(ob) + if source is not None: + assert ob.parsed_docstring is not None + try: + doc = ' '.join(node2stan.gettext(ob.parsed_docstring.to_node())) + except NotImplementedError: + # some ParsedDocstring subclass raises NotImplementedError on calling to_node() + # Like ParsedPlaintextDocstring. + doc = source.docstring + return doc + + def format_kind(self, ob:model.Documentable) -> str: + return epydoc2stan.format_kind(ob.kind) if ob.kind else '' + + def get_corpus(self) -> List[Tuple[Dict[str, Optional[str]], Dict[str, int]]]: + + documents: List[Tuple[Dict[str, Optional[str]], Dict[str, int]]] = [] + + for ob in (o for o in self.system.allobjects.values() if o.isVisible): + + documents.append( + ( + { + f:self.format(ob, f) for f in self.fields + }, + { + "boost": self.get_ob_boost(ob) + } + ) + ) + + return documents + + def write(self) -> None: + + builder = get_default_builder() + + # Skip some pipelines for better UX + # https://lunr.readthedocs.io/en/latest/customisation.html#skip-a-pipeline-function-for-specific-field-names + + # We want classes named like "For" to be indexed with their name, even if it's matching stop words. + builder.pipeline.skip(stop_word_filter.stop_word_filter, ["qname", "name", "kind", "names"]) + + # We don't want "name" and related fields to be stemmed since the field "names" + # contains all cased breaked combinaisons and will be stemmed. + builder.pipeline.skip(stemmer.stemmer, ["name", "kind", "qname"]) + + # Removing the stemmer from the search pipeline, see https://github.com/yeraydiazdiaz/lunr.py/issues/112 + builder.search_pipeline.remove(stemmer.stemmer) + + index = lunr( + ref='qname', + fields=[{'field_name':name, 'boost':self._BOOSTS[name]} for name in self.fields], + documents=self.get_corpus(), + builder=builder) + + serialized_index = json.dumps(index.serialize()) + + with self.output_file.open('w', encoding='utf-8') as fobj: + fobj.write(serialized_index) + +# https://lunr.readthedocs.io/en/latest/ +def write_lunr_index(output_dir: Path, system: model.System) -> None: + """ + Write ``searchindex.json`` and ``fullsearchindex.json`` to the output directory. + + @arg output_dir: Output directory. + @arg system: System. + """ + LunrIndexWriter(output_dir / "searchindex.json", + system=system, + fields=["name", "names", "qname"] + ).write() + + LunrIndexWriter(output_dir / "fullsearchindex.json", + system=system, + fields=["name", "names", "qname", "docstring", "kind"] + ).write() + +def stem_identifier(identifier: str) -> Iterator[str]: + parts = epydoc2stan._split_indentifier_parts_on_case(identifier) + for p in parts: + p = p.strip('_') + if p and p.lower() not in stop_word_filter.WORDS: + yield p + +searchpages: List[Type[Page]] = [AllDocuments] diff --git a/pydoctor/templatewriter/summary.py b/pydoctor/templatewriter/summary.py index 2eed7c7de..142282c28 100644 --- a/pydoctor/templatewriter/summary.py +++ b/pydoctor/templatewriter/summary.py @@ -12,31 +12,44 @@ from pydoctor import epydoc2stan, model from pydoctor.templatewriter import TemplateLookup -from pydoctor.templatewriter.pages import Page +from pydoctor.templatewriter.pages import Page, objects_order if TYPE_CHECKING: from twisted.web.template import Flattenable - from typing_extensions import Final def moduleSummary(module: model.Module, page_url: str) -> Tag: r: Tag = tags.li( - tags.code(epydoc2stan.taglink(module, page_url)), ' - ', + tags.code(epydoc2stan.taglink(module, page_url, label=module.name)), ' - ', epydoc2stan.format_summary(module) ) if module.isPrivate: r(class_='private') if not isinstance(module, model.Package): return r - contents = [m for m in module.contents.values() - if isinstance(m, model.Module) and m.isVisible] + contents = list(module.submodules()) if not contents: return r + ul = tags.ul() - def fullName(obj: model.Documentable) -> str: - return obj.fullName() - for m in sorted(contents, key=fullName): - ul(moduleSummary(m, page_url)) + + if len(contents) > 50 and not any(any(s.submodules()) for s in contents): + # If there are more than 50 modules and no submodule has + # further submodules we use a more compact presentation. + li = tags.li(class_='compact-modules') + for m in sorted(contents, key=objects_order): + span = tags.span() + span(tags.code(epydoc2stan.taglink(m, m.url, label=m.name))) + span(', ') + if m.isPrivate: + span(class_='private') + li(span) + # remove the last trailing comma + li.children[-1].children.pop() # type: ignore + ul(li) + else: + for m in sorted(contents, key=objects_order): + ul(moduleSummary(m, page_url)) r(ul) return r @@ -279,23 +292,6 @@ class IndexPage(Page): def title(self) -> str: return f"API Documentation for {self.system.projectname}" - @renderer - def onlyIfOneRoot(self, request: object, tag: Tag) -> "Flattenable": - if len(self.system.rootobjects) != 1: - return [] - else: - root, = self.system.rootobjects - return tag.clear()( - "Start at ", tags.code(epydoc2stan.taglink(root, self.filename)), - ", the root ", epydoc2stan.format_kind(root.kind).lower(), ".") - - @renderer - def onlyIfMultipleRoots(self, request: object, tag: Tag) -> "Flattenable": - if len(self.system.rootobjects) == 1: - return [] - else: - return tag - @renderer def roots(self, request: object, tag: Tag) -> "Flattenable": r = [] @@ -307,10 +303,10 @@ def roots(self, request: object, tag: Tag) -> "Flattenable": @renderer def rootkind(self, request: object, tag: Tag) -> Tag: - return tag.clear()('/'.join(sorted( - epydoc2stan.format_kind(o.kind, plural=True).lower() - for o in self.system.rootobjects - ))) + rootkinds = sorted(set([o.kind for o in self.system.rootobjects]), key=lambda k:k.name) + return tag.clear()('/'.join( + epydoc2stan.format_kind(o, plural=True).lower() + for o in rootkinds )) def hasdocstring(ob: model.Documentable) -> bool: @@ -350,10 +346,13 @@ def stuff(self, request: object, tag: Tag) -> Tag: )) return tag -summarypages: 'Final[Iterable[Type[Page]]]' = [ - ModuleIndexPage, - ClassIndexPage, - IndexPage, - NameIndexPage, - UndocumentedSummaryPage, +def summaryPages(system: model.System) -> Iterable[Type[Page]]: + pages = [ + ModuleIndexPage, + ClassIndexPage, + NameIndexPage, + UndocumentedSummaryPage, ] + if len(system.root_names) > 1: + pages.append(IndexPage) + return pages diff --git a/pydoctor/templatewriter/util.py b/pydoctor/templatewriter/util.py index c893afed8..e8fbb4e2d 100644 --- a/pydoctor/templatewriter/util.py +++ b/pydoctor/templatewriter/util.py @@ -1,10 +1,31 @@ """Miscellaneous utilities for the HTML writer.""" import warnings +from typing import (Any, Dict, Generic, Iterable, Iterator, Mapping, + Optional, MutableMapping, Tuple, TypeVar, Union, Sequence) +from pydoctor import epydoc2stan import collections.abc -from typing import Any, Dict, Generic, Iterable, Iterator, Mapping, Optional, MutableMapping, Tuple, TypeVar, Union from pydoctor import model -from pydoctor.epydoc2stan import format_kind + +from twisted.web.template import Tag + +class DocGetter: + """L{epydoc2stan} bridge.""" + def get(self, ob: model.Documentable, summary: bool = False) -> Tag: + if summary: + return epydoc2stan.format_summary(ob) + else: + return epydoc2stan.format_docstring(ob) + def get_type(self, ob: model.Documentable) -> Optional[Tag]: + return epydoc2stan.type2stan(ob) + def get_toc(self, ob: model.Documentable) -> Optional[Tag]: + return epydoc2stan.format_toc(ob) + +def srclink(o: model.Documentable) -> Optional[str]: + """ + Get object source code URL, i.e. hosted on github. + """ + return o.sourceHref def css_class(o: model.Documentable) -> str: """ @@ -13,13 +34,68 @@ def css_class(o: model.Documentable) -> str: """ kind = o.kind assert kind is not None # if kind is None, object is invisible - class_ = format_kind(kind).lower().replace(' ', '') + class_ = epydoc2stan.format_kind(kind).lower().replace(' ', '') if o.privacyClass is model.PrivacyClass.PRIVATE: class_ += ' private' - return class_ + return class_ -def srclink(o: model.Documentable) -> Optional[str]: - return o.sourceHref +def overriding_subclasses( + classobj: model.Class, + name: str, + firstcall: bool = True + ) -> Iterator[model.Class]: + """ + Helper function to retreive the subclasses that override the given name from the parent class object. + """ + if not firstcall and name in classobj.contents: + yield classobj + else: + for subclass in classobj.subclasses: + if subclass.isVisible: + yield from overriding_subclasses(subclass, name, firstcall=False) + +def nested_bases(classobj: model.Class) -> Iterator[Tuple[model.Class, ...]]: + """ + Helper function to retreive the complete list of base classes chains (represented by tuples) for a given Class. + A chain of classes is used to compute the member inheritence from the first element to the last element of the chain. + + The first yielded chain only contains the Class itself. + + Then for each of the super-classes: + - the next yielded chain contains the super class and the class itself, + - the the next yielded chain contains the super-super class, the super class and the class itself, etc... + """ + yield (classobj,) + for base in classobj.baseobjects: + if base is None: + continue + for nested_base in nested_bases(base): + yield (nested_base + (classobj,)) + +def unmasked_attrs(baselist: Sequence[model.Class]) -> Sequence[model.Documentable]: + """ + Helper function to reteive the list of inherited children given a base classes chain (As yielded by L{nested_bases}). + The returned members are inherited from the Class listed first in the chain to the Class listed last: they are not overriden in between. + """ + maybe_masking = { + o.name + for b in baselist[1:] + for o in b.contents.values() + } + return [o for o in baselist[0].contents.values() + if o.isVisible and o.name not in maybe_masking] + +def objects_order(o: model.Documentable) -> Tuple[int, int, str]: + """ + Function to use as the value of standard library's L{sorted} function C{key} argument + such that the objects are sorted by: Privacy, Kind and Name. + + Example:: + + children = sorted((o for o in ob.contents.values() if o.isVisible), + key=objects_order) + """ + return (-o.privacyClass.value, -o.kind.value if o.kind else 0, o.fullName().lower()) def templatefile(filename: str) -> None: """Deprecated: can be removed once Twisted stops patching this.""" diff --git a/pydoctor/templatewriter/writer.py b/pydoctor/templatewriter/writer.py index b310f913c..0d45231de 100644 --- a/pydoctor/templatewriter/writer.py +++ b/pydoctor/templatewriter/writer.py @@ -1,12 +1,13 @@ """Badly named module that contains the driving code for the rendering.""" +import itertools from pathlib import Path from typing import IO, Iterable, Type, TYPE_CHECKING from pydoctor import model from pydoctor.templatewriter import ( - DOCTYPE, pages, summary, TemplateLookup, IWriter, StaticTemplate + DOCTYPE, pages, summary, search, TemplateLookup, IWriter, StaticTemplate ) from twisted.python.failure import Failure @@ -82,13 +83,31 @@ def writeIndividualFiles(self, obs: Iterable[model.Documentable]) -> None: def writeSummaryPages(self, system: model.System) -> None: import time - for pclass in summary.summarypages: + for pclass in itertools.chain(summary.summaryPages(system), search.searchpages): system.msg('html', 'starting ' + pclass.__name__ + ' ...', nonl=True) T = time.time() page = pclass(system=system, template_lookup=self.template_lookup) with self.build_directory.joinpath(pclass.filename).open('wb') as fobj: flattenToFile(fobj, page) system.msg('html', "took %fs"%(time.time() - T), wantsnl=False) + + # Generate the searchindex.json file + system.msg('html', 'starting lunr search index ...', nonl=True) + T = time.time() + search.write_lunr_index(self.build_directory, system=system) + system.msg('html', "took %fs"%(time.time() - T), wantsnl=False) + + if len(system.root_names) == 1: + # If there is just a single root module it is written to index.html to produce nicer URLs. + # To not break old links we also create a symlink from the full module name to the index.html + # file. This is also good for consistency: every module is accessible by .html + root_module_path = (self.build_directory / (list(system.root_names)[0] + '.html')) + try: + root_module_path.unlink() + # not using missing_ok=True because that was only added in Python 3.8 and we still support Python 3.6 + except FileNotFoundError: + pass + root_module_path.symlink_to('index.html') def _writeDocsFor(self, ob: model.Documentable) -> None: if not ob.isVisible: @@ -97,7 +116,7 @@ def _writeDocsFor(self, ob: model.Documentable) -> None: if self.dry_run: self.total_pages += 1 else: - with self.build_directory.joinpath(f'{ob.fullName()}.html').open('wb') as fobj: + with self.build_directory.joinpath(ob.url).open('wb') as fobj: self._writeDocsForOne(ob, fobj) for o in ob.contents.values(): self._writeDocsFor(o) diff --git a/pydoctor/test/__init__.py b/pydoctor/test/__init__.py index 94dd6ff5a..1748d0ca9 100644 --- a/pydoctor/test/__init__.py +++ b/pydoctor/test/__init__.py @@ -94,3 +94,4 @@ def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag: def resolve_identifier(self, identifier: str) -> Optional[str]: return None + \ No newline at end of file diff --git a/pydoctor/test/epydoc/restructuredtext.doctest b/pydoctor/test/epydoc/restructuredtext.doctest index 9438f8c77..a9aee4e37 100644 --- a/pydoctor/test/epydoc/restructuredtext.doctest +++ b/pydoctor/test/epydoc/restructuredtext.doctest @@ -104,3 +104,30 @@ as colorized Python code. class Foo: def __init__(self): pass +>>> p = restructuredtext.parse_docstring( +... """The directives options are ignored and do not show up in the HTML. +... +... .. code:: python +... :number-lines: +... :linenos: +... +... # This is some Python code +... def foo(): +... pass +... +... class Foo: +... def __init__(self): +... pass +... """, err) +>>> err +[] +>>> print(flatten(p.to_stan(None))) +

The directives options are ignored and do not show up in the HTML.

+
+# This is some Python code
+def foo():
+    pass
+
+class Foo:
+    def __init__(self):
+        pass
\ No newline at end of file diff --git a/pydoctor/test/epydoc/test_epytext.py b/pydoctor/test/epydoc/test_epytext.py index 4fd98a6c0..3cf042356 100644 --- a/pydoctor/test/epydoc/test_epytext.py +++ b/pydoctor/test/epydoc/test_epytext.py @@ -84,3 +84,6 @@ def test_literal_braces() -> None: assert epytext2html("{1:C{{2:3}}}") == '{1:{2:3}}' assert epytext2html("{{{}{}}{}}") == '{{{}{}}{}}' assert epytext2html("{{E{lb}E{lb}E{lb}}}") == '{{{{{}}' + +def test_slugify() -> None: + assert epytext.slugify("Héllo Wörld 1.2.3") == "hello-world-123" diff --git a/pydoctor/test/epydoc/test_epytext2html.py b/pydoctor/test/epydoc/test_epytext2html.py index f175a1524..7bc5a94cd 100644 --- a/pydoctor/test/epydoc/test_epytext2html.py +++ b/pydoctor/test/epydoc/test_epytext2html.py @@ -150,14 +150,14 @@ def test_epytext_sections() -> None:

This paragraph is not in any section.

-
+

Section 1

This is a paragraph in section 1.

-
+

Section 1.1

@@ -166,7 +166,7 @@ def test_epytext_sections() -> None:

-
+

Section 2

@@ -288,3 +288,87 @@ def test_nested_markup() -> None: ''' assert epytext2html(doc) == squash(expected) + +def test_get_toc() -> None: + + docstring = """ +Titles +====== + +Level 2 +------- + +Level 3 +~~~~~~~ + +Level 4 +^^^^^^^ + +Level 5 +!!!!!!! + +Level 2.2 +--------- + +Level 22 +-------- + +Lists +===== + +Other +===== +""" + + errors: List[ParseError] = [] + parsed = parse_docstring(docstring, errors) + assert not errors, [str(e.descr()) for e in errors] + + toc = parsed.get_toc(4) + assert toc is not None + html = flatten(toc.to_stan(NotFoundLinker())) + + expected_html=""" +
  • +

    + + Titles + +

    + +
  • +
  • + + Lists + +
  • +
  • + + Other + +
  • +""" + assert prettify(html) == prettify(expected_html) \ No newline at end of file diff --git a/pydoctor/test/epydoc/test_pyval_repr.py b/pydoctor/test/epydoc/test_pyval_repr.py index c99ae1037..7866e0fc6 100644 --- a/pydoctor/test/epydoc/test_pyval_repr.py +++ b/pydoctor/test/epydoc/test_pyval_repr.py @@ -5,7 +5,7 @@ from pydoctor.epydoc.markup._pyval_repr import PyvalColorizer from pydoctor.test import NotFoundLinker -from pydoctor.stanutils import flatten +from pydoctor.stanutils import flatten, flatten_text, html2stan from pydoctor.node2stan import gettext def color(v: Any, linebreakok:bool=True, maxlines:int=5, linelen:int=40) -> str: @@ -1432,11 +1432,29 @@ def test_line_wrapping() -> None: ...\n""" -def color2(v: Any) -> str: - colorizer = PyvalColorizer(linelen=50, maxlines=5) +def color2(v: Any, linelen:int=50) -> str: + """ + Pain text colorize. + """ + colorizer = PyvalColorizer(linelen=linelen, maxlines=5) colorized = colorizer.colorize(v) - text = ''.join(gettext(colorized.to_node())) - return text + text1 = ''.join(gettext(colorized.to_node())) + text2 = flatten_text(html2stan(flatten(colorized.to_stan(NotFoundLinker())))) + assert text1 == text2 + return text2 + + +def test_crash_surrogates_not_allowed() -> None: + """ + Test that the colorizer does not make the flatten function crash when passing surrogates unicode strings. + """ + assert color2('surrogates:\udc80\udcff') == "'surrogates:\\udc80\\udcff'" + +def test_surrogates_cars_in_re() -> None: + """ + Regex string are escaped their own way. See https://github.com/twisted/pydoctor/pull/493 + """ + assert color2(extract_expr(ast.parse("re.compile('surrogates:\\udc80\\udcff')"))) == "re.compile(r'surrogates:\\udc80\\udcff')" def test_repr_text() -> None: """Test a few representations, with a plain text version. diff --git a/pydoctor/test/epydoc/test_restructuredtext.py b/pydoctor/test/epydoc/test_restructuredtext.py index 3f38ebc01..684402b2e 100644 --- a/pydoctor/test/epydoc/test_restructuredtext.py +++ b/pydoctor/test/epydoc/test_restructuredtext.py @@ -1,14 +1,15 @@ from typing import List from textwrap import dedent -from pydoctor.epydoc.markup import DocstringLinker, ParseError, ParsedDocstring +from pydoctor.epydoc.markup import DocstringLinker, ParseError, ParsedDocstring, get_parser_by_name from pydoctor.epydoc.markup.restructuredtext import parse_docstring from pydoctor.test import NotFoundLinker from pydoctor.node2stan import node2stan -from pydoctor.stanutils import flatten +from pydoctor.stanutils import flatten, flatten_text from docutils import nodes from bs4 import BeautifulSoup +import pytest def prettify(html: str) -> str: return BeautifulSoup(html, features="html.parser").prettify() # type: ignore[no-any-return] @@ -213,3 +214,133 @@ def test_rst_directive_seealso() -> None:

    Hey

    """ assert prettify(html).strip() == prettify(expected_html).strip(), html + +@pytest.mark.parametrize( + 'markup', ('epytext', 'plaintext', 'restructuredtext', 'numpy', 'google') + ) +def test_summary(markup:str) -> None: + """ + Summaries are generated from the inline text inside the first paragraph. + The text is trimmed as soon as we reach a break point (or another docutils element) after 200 characters. + """ + cases = [ + ("Single line", "Single line"), + ("Single line.", "Single line."), + ("Single line with period.", "Single line with period."), + (""" + Single line with period. + + @type: Also with a tag. + """, "Single line with period."), + ("Other lines with period.\nThis is attached", "Other lines with period. This is attached"), + ("Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. ", + "Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line. Single line..."), + ("Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line. Single line Single line Single line ", + "Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line..."), + ("Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line.", + "Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line Single line."), + (""" + Return a fully qualified name for the possibly-dotted name. + + To explain what this means, consider the following modules... blabla""", + "Return a fully qualified name for the possibly-dotted name.") + ] + for src, summary_text in cases: + errors: List[ParseError] = [] + pdoc = get_parser_by_name(markup)(dedent(src), errors, False) + assert not errors + assert pdoc.get_summary() == pdoc.get_summary() # summary is cached inside ParsedDocstring as well. + assert flatten_text(pdoc.get_summary().to_stan(NotFoundLinker())) == summary_text + + +def test_get_toc() -> None: + + docstring = """ +Titles +====== + +Level 2 +------- + +Level 3 +~~~~~~~ + +Level 4 +^^^^^^^ + +Level 5 +!!!!!!! + +Level 2.2 +--------- + +Level 22 +-------- + +Lists +===== + +Other +===== +""" + + errors: List[ParseError] = [] + parsed = parse_docstring(docstring, errors) + assert not errors, [str(e.descr) for e in errors] + + toc = parsed.get_toc(4) + assert toc is not None + html = flatten(toc.to_stan(NotFoundLinker())) + + expected_html=""" +
  • +

    + + Titles + +

    + +
  • +
  • + + Lists + +
  • +
  • + + Other + +
  • +""" + assert prettify(html) == prettify(expected_html) + diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index 0790e6492..4044952cb 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -1,79 +1,78 @@ -from typing import Optional, Tuple, Type, overload, cast + +from typing import Optional, Tuple, Type, List, overload, cast import ast -import textwrap import astor -from twisted.python._pydoctor import TwistedSystem from pydoctor import astbuilder, model, astutils from pydoctor.epydoc.markup import DocstringLinker, ParsedDocstring from pydoctor.stanutils import flatten, html2stan, flatten_text from pydoctor.epydoc.markup.epytext import Element, ParsedEpytextDocstring -from pydoctor.epydoc2stan import format_summary, get_parsed_type -from pydoctor.zopeinterface import ZopeInterfaceSystem +from pydoctor.epydoc2stan import ensure_parsed_docstring, format_summary, get_parsed_type from . import CapSys, NotFoundLinker, posonlyargs, typecomment import pytest +class SimpleSystem(model.System): + """ + A system with no extensions. + """ + extensions:List[str] = [] + +class ZopeInterfaceSystem(model.System): + """ + A system with only the zope interface extension enabled. + """ + extensions = ['pydoctor.extensions.zopeinterface'] + +class DeprecateSystem(model.System): + """ + A system with only the twisted deprecated extension enabled. + """ + extensions = ['pydoctor.extensions.deprecate'] + +class PydanticSystem(model.System): + # Add our custom extension as extra + custom_extensions = ['pydoctor.test.test_pydantic_fields'] systemcls_param = pytest.mark.parametrize( - 'systemcls', (model.System, ZopeInterfaceSystem, TwistedSystem) + 'systemcls', (model.System, # system with all extensions enalbed + ZopeInterfaceSystem, # system with zopeinterface extension only + DeprecateSystem, # system with deprecated extension only + SimpleSystem, # system with no extensions + PydanticSystem, + ) ) -def fromAST( - ast: ast.Module, +def fromText( + text: str, + *, modname: str = '', is_package: bool = False, parent_name: Optional[str] = None, system: Optional[model.System] = None, - buildercls: Optional[Type[astbuilder.ASTBuilder]] = None, systemcls: Type[model.System] = model.System ) -> model.Module: - + if system is None: _system = systemcls() else: _system = system - - if buildercls is None: - buildercls = _system.defaultBuilder - builder = buildercls(_system) + assert _system is not None if parent_name is None: full_name = modname else: full_name = f'{parent_name}.{modname}' - # Set containing package as parent. - builder.current = _system.allobjects[parent_name] - - factory = _system.Package if is_package else _system.Module - mod: model.Module = builder._push(factory, modname, 0) - builder._pop(factory) - builder.processModuleAST(ast, mod) - assert mod is _system.allobjects[full_name] - mod.state = model.ProcessingState.PROCESSED - - if system is None: - # Assume that an implicit system will only contain one module, - # so post-process it as a convenience. - _system.postProcess() + builder = _system.systemBuilder(_system) + builder.addModuleString(text, modname, parent_name, is_package=is_package) + builder.buildModules() + mod = _system.allobjects[full_name] + assert isinstance(mod, model.Module) return mod -def fromText( - text: str, - *, - modname: str = '', - is_package: bool = False, - parent_name: Optional[str] = None, - system: Optional[model.System] = None, - buildercls: Optional[Type[astbuilder.ASTBuilder]] = None, - systemcls: Type[model.System] = model.System - ) -> model.Module: - ast = astbuilder._parse(textwrap.dedent(text)) - return fromAST(ast, modname, is_package, parent_name, system, buildercls, systemcls) - def unwrap(parsed_docstring: Optional[ParsedDocstring]) -> str: if parsed_docstring is None: @@ -517,31 +516,33 @@ class Processor: P = mod.contents['Processor'] f = P.contents['clientFactory'] assert unwrap(f.parsed_docstring) == """Callable that returns a client.""" - assert f.privacyClass is model.PrivacyClass.VISIBLE + assert f.privacyClass is model.PrivacyClass.PUBLIC # we now mark aliases with the ALIAS kind! assert f.kind is model.DocumentableKind.ALIAS assert f.linenumber - # TODO: We should also verify this for inline docstrings, but the code + # Verify this is working with inline docstrings as well., + # but the code # currently doesn't support that. We should perhaps store aliases # as Documentables as well, so we can change their 'kind' when # an inline docstring follows the assignment. - # mod = fromText(''' - # class SimpleClient: - # pass - # class Processor: - # clientFactory = SimpleClient - # """ - # Callable that returns a client. - # """ - # ''', systemcls=systemcls, modname='mod') - # P = mod.contents['Processor'] - # f = P.contents['clientFactory'] - # assert unwrap(f.parsed_docstring) == """Callable that returns a client.""" - # assert f.privacyClass is model.PrivacyClass.VISIBLE - # # we now mark aliases with the ALIAS kind! - # assert f.kind is model.DocumentableKind.ALIAS - # assert f.linenumber + mod = fromText(''' + class SimpleClient: + pass + class Processor: + clientFactory = SimpleClient + """ + Callable that returns a client. + """ + ''', systemcls=systemcls, modname='mod') + P = mod.contents['Processor'] + f = P.contents['clientFactory'] + ensure_parsed_docstring(f) + assert unwrap(f.parsed_docstring) == """Callable that returns a client.""" + assert f.privacyClass is model.PrivacyClass.VISIBLE + # we now mark aliases with the ALIAS kind! + assert f.kind is model.DocumentableKind.ALIAS + assert f.linenumber @systemcls_param @@ -815,12 +816,12 @@ def test_aliases_re_export(systemcls: Type[model.System]) -> None: from constantly import NamedConstant, ValueConstant, FlagConstant, Names, Values, Flags from mylib import core - from mylib.core import Observalbe + from mylib.core import Observable from mylib.core._impl import Processor Patator = core.Patator __all__ = ["NamedConstant", "ValueConstant", "FlagConstant", "Names", "Values", "Flags", - "Processor","Patator","Observalbe"] + "Processor","Patator","Observable"] ''' system = systemcls() fromText(src, system=system) @@ -835,10 +836,10 @@ def test_aliases_re_export(systemcls: Type[model.System]) -> None: assert astor.to_source(n.value).strip() == 'mylib.core._impl.Processor' == astutils.node2fullname(n.value, n.parent) assert system.allobjects['.ValueConstant'].kind is model.DocumentableKind.ALIAS - n = system.allobjects['.Observalbe'] + n = system.allobjects['.Observable'] assert isinstance(n, model.Attribute) assert n.kind is model.DocumentableKind.ALIAS - assert astor.to_source(n.value).strip() == 'mylib.core.Observalbe' == astutils.node2fullname(n.value, n.parent) + assert astor.to_source(n.value).strip() == 'mylib.core.Observable' == astutils.node2fullname(n.value, n.parent) n = system.allobjects['.Patator'] assert isinstance(n, model.Attribute) @@ -1295,7 +1296,7 @@ def test_import_module_from_package(systemcls: Type[model.System]) -> None: system = systemcls() fromText(''' # This module intentionally left blank. - ''', modname='a', system=system) + ''', modname='a', system=system, is_package=True) mod_b = fromText(''' def f(): pass ''', modname='b', parent_name='a', system=system) @@ -1360,7 +1361,7 @@ def f(self): assert not f.docstring a = C.contents['a'] assert a.docstring == """inline doc for a""" - assert a.privacyClass is model.PrivacyClass.VISIBLE + assert a.privacyClass is model.PrivacyClass.PUBLIC b = C.contents['_b'] assert b.docstring == """inline doc for _b""" assert b.privacyClass is model.PrivacyClass.PRIVATE @@ -1381,7 +1382,7 @@ class C: assert sorted(C.contents.keys()) == ['_b', 'a'] a = C.contents['a'] assert a.docstring == """inline doc for a""" - assert a.privacyClass is model.PrivacyClass.VISIBLE + assert a.privacyClass is model.PrivacyClass.PUBLIC b = C.contents['_b'] assert b.docstring == """inline doc for _b""" assert b.privacyClass is model.PrivacyClass.PRIVATE @@ -1427,7 +1428,7 @@ def set_f(self, value): ] a = C.contents['a'] assert a.docstring == """inline doc for a""" - assert a.privacyClass is model.PrivacyClass.VISIBLE + assert a.privacyClass is model.PrivacyClass.PUBLIC assert a.kind is model.DocumentableKind.INSTANCE_VARIABLE b = C.contents['_b'] assert b.docstring == """inline doc for _b""" @@ -1435,17 +1436,17 @@ def set_f(self, value): assert b.kind is model.DocumentableKind.INSTANCE_VARIABLE c = C.contents['c'] assert c.docstring == """inline doc for c""" - assert c.privacyClass is model.PrivacyClass.VISIBLE + assert c.privacyClass is model.PrivacyClass.PUBLIC assert c.kind is model.DocumentableKind.INSTANCE_VARIABLE d = C.contents['d'] assert d.docstring == """inline doc for d""" - assert d.privacyClass is model.PrivacyClass.VISIBLE + assert d.privacyClass is model.PrivacyClass.PUBLIC assert d.kind is model.DocumentableKind.INSTANCE_VARIABLE e = C.contents['e'] assert not e.docstring f = C.contents['f'] assert f.docstring == """inline doc for f""" - assert f.privacyClass is model.PrivacyClass.VISIBLE + assert f.privacyClass is model.PrivacyClass.PUBLIC assert f.kind is model.DocumentableKind.INSTANCE_VARIABLE @systemcls_param @@ -2368,3 +2369,91 @@ def test_constant_override_do_not_warns_when_defined_in_module_docstring(systemc assert ast.literal_eval(attr.value) == 99 captured = capsys.readouterr().out assert not captured + +@systemcls_param +def test__name__equals__main__is_skipped(systemcls: Type[model.System]) -> None: + """ + Code inside of C{if __name__ == '__main__'} should be skipped. + """ + mod = fromText(''' + foo = True + + if __name__ == '__main__': + var = True + + def fun(): + pass + + class Class: + pass + + def bar(): + pass + ''', modname='test', systemcls=systemcls) + assert tuple(mod.contents) == ('foo', 'bar') + +@systemcls_param +def test_variable_named_like_current_module(systemcls: Type[model.System]) -> None: + """ + Test for U{issue #474}. + """ + mod = fromText(''' + example = True + ''', systemcls=systemcls, modname="example") + assert 'example' in mod.contents + +@systemcls_param +def test_package_name_clash(systemcls: Type[model.System]) -> None: + system = systemcls() + builder = system.systemBuilder(system) + + builder.addModuleString('', 'mod', is_package=True) + builder.addModuleString('', 'sub', parent_name='mod', is_package=True) + + assert isinstance(system.allobjects['mod.sub'], model.Module) + + # The following statement completely overrides module 'mod' and all it's submodules. + builder.addModuleString('', 'mod', is_package=True) + + with pytest.raises(KeyError): + system.allobjects['mod.sub'] + + builder.addModuleString('', 'sub2', parent_name='mod', is_package=True) + + assert isinstance(system.allobjects['mod.sub2'], model.Module) + +@systemcls_param +def test_reexport_wildcard(systemcls: Type[model.System]) -> None: + """ + If a target module, + explicitly re-export via C{__all__} a set of names + that were initially imported from a sub-module via a wildcard, + those names are documented as part of the target module. + """ + system = systemcls() + builder = system.systemBuilder(system) + builder.addModuleString(''' + from ._impl import * + from _impl2 import * + __all__ = ['f', 'g', 'h', 'i', 'j'] + ''', modname='top', is_package=True) + + builder.addModuleString(''' + def f(): + pass + def g(): + pass + def h(): + pass + ''', modname='_impl', parent_name='top') + + builder.addModuleString(''' + class i: pass + class j: pass + ''', modname='_impl2') + + builder.buildModules() + + assert system.allobjects['top._impl'].resolveName('f') == system.allobjects['top'].contents['f'] + assert system.allobjects['_impl2'].resolveName('i') == system.allobjects['top'].contents['i'] + assert all(n in system.allobjects['top'].contents for n in ['f', 'g', 'h', 'i', 'j']) diff --git a/pydoctor/test/test_commandline.py b/pydoctor/test/test_commandline.py index dab2c3694..99369a0a5 100644 --- a/pydoctor/test/test_commandline.py +++ b/pydoctor/test/test_commandline.py @@ -5,6 +5,7 @@ from pytest import raises +from pydoctor.options import Options from pydoctor import driver from . import CapSys @@ -31,7 +32,7 @@ def geterrtext(*options: str) -> str: def test_invalid_option() -> None: err = geterrtext('--no-such-option') - assert 'no such option' in err + assert 'unrecognized arguments: --no-such-option' in err def test_cannot_advance_blank_system() -> None: err = geterrtext('--make-html') @@ -39,7 +40,7 @@ def test_cannot_advance_blank_system() -> None: def test_no_systemclasses_py3() -> None: err = geterrtext('--system-class') - assert 'requires 1 argument' in err + assert 'expected one argument' in err def test_invalid_systemclasses() -> None: err = geterrtext('--system-class=notdotted') @@ -61,7 +62,8 @@ def test_projectbasedir_absolute(tmp_path: Path) -> None: Using L{Path.samefile()} is reliable, but requires an existing path. """ assert tmp_path.is_absolute() - options, args = driver.parse_args(["--project-base-dir", str(tmp_path)]) + options = Options.from_args(["--project-base-dir", str(tmp_path)]) + assert options.projectbasedirectory is not None assert options.projectbasedirectory.samefile(tmp_path) assert options.projectbasedirectory.is_absolute() @@ -77,7 +79,8 @@ def test_projectbasedir_symlink(tmp_path: Path) -> None: link.symlink_to('target', target_is_directory=True) assert link.samefile(target) - options, args = driver.parse_args(["--project-base-dir", str(link)]) + options = Options.from_args(["--project-base-dir", str(link)]) + assert options.projectbasedirectory is not None assert options.projectbasedirectory.samefile(target) assert options.projectbasedirectory.is_absolute() @@ -89,7 +92,8 @@ def test_projectbasedir_relative() -> None: the options object. """ relative = "projbasedirvalue" - options, args = driver.parse_args(["--project-base-dir", relative]) + options = Options.from_args(["--project-base-dir", relative]) + assert options.projectbasedirectory is not None assert options.projectbasedirectory.is_absolute() assert options.projectbasedirectory.name == relative assert options.projectbasedirectory.parent == Path.cwd() @@ -99,8 +103,7 @@ def test_cache_enabled_by_default() -> None: """ Intersphinx object caching is enabled by default. """ - parser = driver.getparser() - (options, _) = parser.parse_args([]) + options = Options.defaults() assert options.enable_intersphinx_cache @@ -109,10 +112,10 @@ def test_cli_warnings_on_error() -> None: The --warnings-as-errors option is disabled by default. This is the test for the long form of the CLI option. """ - options, args = driver.parse_args([]) + options = Options.defaults() assert options.warnings_as_errors == False - options, args = driver.parse_args(['--warnings-as-errors']) + options = Options.from_args(['--warnings-as-errors']) assert options.warnings_as_errors == True @@ -120,7 +123,7 @@ def test_project_version_default() -> None: """ When no --project-version is provided, it will default empty string. """ - options, args = driver.parse_args([]) + options = Options.defaults() assert options.projectversion == '' @@ -128,7 +131,7 @@ def test_project_version_string() -> None: """ --project-version can be passed as a simple string. """ - options, args = driver.parse_args(['--project-version', '1.2.3.rc1']) + options = Options.from_args(['--project-version', '1.2.3.rc1']) assert options.projectversion == '1.2.3.rc1' diff --git a/pydoctor/test/test_configparser.py b/pydoctor/test/test_configparser.py new file mode 100644 index 000000000..f0162a8bb --- /dev/null +++ b/pydoctor/test/test_configparser.py @@ -0,0 +1,390 @@ +from io import StringIO +from typing import Any, Dict, List +import requests + +from pydoctor._configparser import parse_toml_section_name, is_quoted, unquote_str, IniConfigParser, TomlConfigParser + +# Test for the unquote_str() function relies on pydoctor's colorizer because it can generate a tripple +# quoted representation of a string. This has the benefit of testing our colorizer with naughty strings +# as well. But the tests are de-facto coupled with pydoctor's test suite. +from pydoctor.test.epydoc.test_pyval_repr import color2 + +def test_unquote_str() -> None: + + assert unquote_str('string') == 'string' + assert unquote_str('"string') == '"string' + assert unquote_str('string"') == 'string"' + assert unquote_str('"string"') == 'string' + assert unquote_str('\'string\'') == 'string' + assert unquote_str('"""string"""') == 'string' + assert unquote_str('\'\'\'string\'\'\'') == 'string' + assert unquote_str('"""\nstring"""') == '\nstring' + assert unquote_str('\'\'\'string\n\'\'\'') == 'string\n' + assert unquote_str('"""\nstring \n"""') == '\nstring \n' + assert unquote_str('\'\'\'\n string\n\'\'\'') == '\n string\n' + + assert unquote_str('\'\'\'string') == '\'\'\'string' + assert unquote_str('string\'\'\'') == 'string\'\'\'' + assert unquote_str('"""string') == '"""string' + assert unquote_str('string"""') == 'string"""' + assert unquote_str('"""str"""ing"""') == '"""str"""ing"""' + assert unquote_str('str\'ing') == 'str\'ing' + assert unquote_str('""""value""""') == '""""value""""' + +def test_unquote_naughty_quoted_strings() -> None: + # See https://github.com/minimaxir/big-list-of-naughty-strings/blob/master/blns.txt + res = requests.get('https://raw.githubusercontent.com/minimaxir/big-list-of-naughty-strings/master/blns.txt') + text = res.text + for i, string in enumerate(text.split('\n')): + if string.strip().startswith('#'): + continue + + # gerenerate two quoted version of the naughty string + # simply once + naughty_string_quoted = repr(string) + # quoted twice, once with repr, once with our colorizer + # (we insert \n such that we force the colorier to produce tripple quoted strings) + naughty_string_quoted2 = color2(f"\n{string!r}", linelen=0) + assert naughty_string_quoted2.startswith("'''") + + naughty_string_quoted2_alt = repr(f"{string!r}") + + # test unquote that repr + try: + assert unquote_str(naughty_string_quoted) == string + + assert unquote_str(unquote_str(naughty_string_quoted2).strip()) == string + + assert unquote_str(unquote_str(naughty_string_quoted2_alt)) == string + + if is_quoted(string): + assert unquote_str(string) == string[1:-1] + else: + assert unquote_str(string) == string + + except Exception as e: + raise AssertionError(f'error with naughty string at line {i}: {e}') from e + +def test_parse_toml_section_keys() -> None: + assert parse_toml_section_name('tool.pydoctor') == ('tool', 'pydoctor') + assert parse_toml_section_name(' tool.pydoctor ') == ('tool', 'pydoctor') + assert parse_toml_section_name(' "tool".pydoctor ') == ('tool', 'pydoctor') + assert parse_toml_section_name(' tool."pydoctor" ') == ('tool', 'pydoctor') + +INI_SIMPLE_STRINGS: List[Dict[str, Any]] = [ + {'line': 'key = value # not_a_comment # not_a_comment', 'expected': ('key', 'value # not_a_comment # not_a_comment', None)}, # that's normal behaviour for configparser + {'line': 'key=value#not_a_comment ', 'expected': ('key', 'value#not_a_comment', None)}, + {'line': 'key=value', 'expected': ('key', 'value', None)}, + {'line': 'key =value', 'expected': ('key', 'value', None)}, + {'line': 'key= value', 'expected': ('key', 'value', None)}, + {'line': 'key = value', 'expected': ('key', 'value', None)}, + {'line': 'key = value', 'expected': ('key', 'value', None)}, + {'line': ' key = value ', 'expected': ('key', 'value', None)}, + {'line': 'key:value', 'expected': ('key', 'value', None)}, + {'line': 'key :value', 'expected': ('key', 'value', None)}, + {'line': 'key: value', 'expected': ('key', 'value', None)}, + {'line': 'key : value', 'expected': ('key', 'value', None)}, + {'line': 'key : value', 'expected': ('key', 'value', None)}, + {'line': ' key : value ', 'expected': ('key', 'value', None)}, +] + +INI_QUOTES_CORNER_CASES: List[Dict[str, Any]] = [ + {'line': 'key="', 'expected': ('key', '"', None)}, + {'line': 'key = "', 'expected': ('key', '"', None)}, + {'line': ' key = " ', 'expected': ('key', '"', None)}, + {'line': 'key = ""value""', 'expected': ('key', '""value""', None)}, # Not a valid python, so we get the original value, which is normal + {'line': 'key = \'\'value\'\'', 'expected': ('key', "''value''", None)}, # Idem +] + +INI_QUOTED_STRINGS: List[Dict[str, Any]] = [ + {'line': 'key="value"', 'expected': ('key', 'value', None)}, + {'line': 'key = "value"', 'expected': ('key', 'value', None)}, + {'line': ' key = "value" ', 'expected': ('key', 'value', None)}, + {'line': 'key=" value "', 'expected': ('key', ' value ', None)}, + {'line': 'key = " value "', 'expected': ('key', ' value ', None)}, + {'line': ' key = " value " ', 'expected': ('key', ' value ', None)}, + {'line': "key='value'", 'expected': ('key', 'value', None)}, + {'line': "key = 'value'", 'expected': ('key', 'value', None)}, + {'line': " key = 'value' ", 'expected': ('key', 'value', None)}, + {'line': "key=' value '", 'expected': ('key', ' value ', None)}, + {'line': "key = ' value '", 'expected': ('key', ' value ', None)}, + {'line': " key = ' value ' ", 'expected': ('key', ' value ', None)}, + {'line': 'key = \'"value"\'', 'expected': ('key', '"value"', None)}, + {'line': 'key = "\'value\'"', 'expected': ('key', "'value'", None)}, +] + +INI_LOOKS_LIKE_QUOTED_STRINGS: List[Dict[str, Any]] = [ + {'line': 'key="value', 'expected': ('key', '"value', None)}, + {'line': 'key = "value', 'expected': ('key', '"value', None)}, + {'line': ' key = "value ', 'expected': ('key', '"value', None)}, + {'line': 'key=value"', 'expected': ('key', 'value"', None)}, + {'line': 'key = value"', 'expected': ('key', 'value"', None)}, + {'line': ' key = value " ', 'expected': ('key', 'value "', None)}, + {'line': "key='value", 'expected': ('key', "'value", None)}, + {'line': "key = 'value", 'expected': ('key', "'value", None)}, + {'line': " key = 'value ", 'expected': ('key', "'value", None)}, + {'line': "key=value'", 'expected': ('key', "value'", None)}, + {'line': "key = value'", 'expected': ('key', "value'", None)}, + {'line': " key = value ' ", 'expected': ('key', "value '", None)}, +] + +INI_BLANK_LINES: List[Dict[str, Any]] = [ + {'line': 'key=', 'expected': ('key', '', None)}, + {'line': 'key =', 'expected': ('key', '', None)}, + {'line': 'key= ', 'expected': ('key', '', None)}, + {'line': 'key = ', 'expected': ('key', '', None)}, + {'line': 'key = ', 'expected': ('key', '', None)}, + {'line': ' key = ', 'expected': ('key', '', None)}, + {'line': 'key:', 'expected': ('key', '', None)}, + {'line': 'key :', 'expected': ('key', '', None)}, + {'line': 'key: ', 'expected': ('key', '', None)}, + {'line': 'key : ', 'expected': ('key', '', None)}, + {'line': 'key : ', 'expected': ('key', '', None)}, + {'line': ' key : ', 'expected': ('key', '', None)}, +] + +INI_EQUAL_SIGN_VALUE: List[Dict[str, Any]] = [ + {'line': 'key=:', 'expected': ('key', ':', None)}, + {'line': 'key =:', 'expected': ('key', ':', None)}, + {'line': 'key= :', 'expected': ('key', ':', None)}, + {'line': 'key = :', 'expected': ('key', ':', None)}, + {'line': 'key = :', 'expected': ('key', ':', None)}, + {'line': ' key = : ', 'expected': ('key', ':', None)}, + {'line': 'key:=', 'expected': ('key', '=', None)}, + {'line': 'key :=', 'expected': ('key', '=', None)}, + {'line': 'key: =', 'expected': ('key', '=', None)}, + {'line': 'key : =', 'expected': ('key', '=', None)}, + {'line': 'key : =', 'expected': ('key', '=', None)}, + {'line': ' key : = ', 'expected': ('key', '=', None)}, + {'line': 'key==', 'expected': ('key', '=', None)}, + {'line': 'key ==', 'expected': ('key', '=', None)}, + {'line': 'key= =', 'expected': ('key', '=', None)}, + {'line': 'key = =', 'expected': ('key', '=', None)}, + {'line': 'key = =', 'expected': ('key', '=', None)}, + {'line': ' key = = ', 'expected': ('key', '=', None)}, + {'line': 'key::', 'expected': ('key', ':', None)}, + {'line': 'key ::', 'expected': ('key', ':', None)}, + {'line': 'key: :', 'expected': ('key', ':', None)}, + {'line': 'key : :', 'expected': ('key', ':', None)}, + {'line': 'key : :', 'expected': ('key', ':', None)}, + {'line': ' key : : ', 'expected': ('key', ':', None)}, +] + +INI_NEGATIVE_VALUES: List[Dict[str, Any]] = [ + {'line': 'key = -10', 'expected': ('key', '-10', None)}, + {'line': 'key : -10', 'expected': ('key', '-10', None)}, + # {'line': 'key -10', 'expected': ('key', '-10', None)}, # Not supported + {'line': 'key = "-10"', 'expected': ('key', '-10', None)}, + {'line': "key = '-10'", 'expected': ('key', '-10', None)}, + {'line': 'key=-10', 'expected': ('key', '-10', None)}, +] + +INI_KEY_SYNTAX_EMPTY: List[Dict[str, Any]] = [ + {'line': 'key_underscore=', 'expected': ('key_underscore', '', None)}, + {'line': '_key_underscore=', 'expected': ('_key_underscore', '', None)}, + {'line': 'key_underscore_=', 'expected': ('key_underscore_', '', None)}, + {'line': 'key-dash=', 'expected': ('key-dash', '', None)}, + {'line': 'key@word=', 'expected': ('key@word', '', None)}, + {'line': 'key$word=', 'expected': ('key$word', '', None)}, + {'line': 'key.word=', 'expected': ('key.word', '', None)}, +] + +INI_KEY_SYNTAX: List[Dict[str, Any]] = [ + {'line': 'key_underscore = value', 'expected': ('key_underscore', 'value', None)}, + # {'line': 'key_underscore', 'expected': ('key_underscore', 'true', None)}, # Not supported + {'line': '_key_underscore = value', 'expected': ('_key_underscore', 'value', None)}, + # {'line': '_key_underscore', 'expected': ('_key_underscore', 'true', None)}, # Idem + {'line': 'key_underscore_ = value', 'expected': ('key_underscore_', 'value', None)}, + # {'line': 'key_underscore_', 'expected': ('key_underscore_', 'true', None)}, Idem + {'line': 'key-dash = value', 'expected': ('key-dash', 'value', None)}, + # {'line': 'key-dash', 'expected': ('key-dash', 'true', None)}, # Idem + {'line': 'key@word = value', 'expected': ('key@word', 'value', None)}, + # {'line': 'key@word', 'expected': ('key@word', 'true', None)}, Idem + {'line': 'key$word = value', 'expected': ('key$word', 'value', None)}, + # {'line': 'key$word', 'expected': ('key$word', 'true', None)}, Idem + {'line': 'key.word = value', 'expected': ('key.word', 'value', None)}, + # {'line': 'key.word', 'expected': ('key.word', 'true', None)}, Idem +] + +INI_LITERAL_LIST: List[Dict[str, Any]] = [ + {'line': 'key = [1,2,3]', 'expected': ('key', ['1','2','3'], None)}, + {'line': 'key = []', 'expected': ('key', [], None)}, + {'line': 'key = ["hello", "world", ]', 'expected': ('key', ["hello", "world"], None)}, + {'line': 'key = [\'hello\', \'world\', ]', 'expected': ('key', ["hello", "world"], None)}, + {'line': 'key = [1,2,3] ', 'expected': ('key', ['1','2','3'], None)}, + {'line': 'key = [\n ] \n', 'expected': ('key', [], None)}, + {'line': 'key = [\n "hello", "world", ] \n\n\n\n', 'expected': ('key', ["hello", "world"], None)}, + {'line': 'key = [\n\n \'hello\', \n \'world\', ]', 'expected': ('key', ["hello", "world"], None)}, + {'line': r'key = "[\"hello\", \"world\", ]"', 'expected': ('key', "[\"hello\", \"world\", ]", None)}, +] + +INI_TRIPPLE_QUOTED_STRINGS: List[Dict[str, Any]] = [ + {'line': 'key="""value"""', 'expected': ('key', 'value', None)}, + {'line': 'key = """value"""', 'expected': ('key', 'value', None)}, + {'line': ' key = """value""" ', 'expected': ('key', 'value', None)}, + {'line': 'key=""" value """', 'expected': ('key', ' value ', None)}, + {'line': 'key = """ value """', 'expected': ('key', ' value ', None)}, + {'line': ' key = """ value """ ', 'expected': ('key', ' value ', None)}, + {'line': "key='''value'''", 'expected': ('key', 'value', None)}, + {'line': "key = '''value'''", 'expected': ('key', 'value', None)}, + {'line': " key = '''value''' ", 'expected': ('key', 'value', None)}, + {'line': "key=''' value '''", 'expected': ('key', ' value ', None)}, + {'line': "key = ''' value '''", 'expected': ('key', ' value ', None)}, + {'line': " key = ''' value ''' ", 'expected': ('key', ' value ', None)}, + {'line': 'key = \'\'\'"value"\'\'\'', 'expected': ('key', '"value"', None)}, + {'line': 'key = """\'value\'"""', 'expected': ('key', "'value'", None)}, + {'line': 'key = """\\"value\\""""', 'expected': ('key', '"value"', None)}, +] + +# These test does not pass with TOML (even if toml support tripple quoted strings) because indentation +# is lost while parsing the config with configparser. The bahaviour is basically the same as +# running textwrap.dedent() on the text. +INI_TRIPPLE_QUOTED_STRINGS_NOT_COMPATIABLE_WITH_TOML: List[Dict[str, Any]] = [ + {'line': 'key = """"value\\""""', 'expected': ('key', '"value"', None)}, # This is valid for ast.literal_eval but not for TOML. + {'line': 'key = """"value" """', 'expected': ('key', '"value" ', None)}, # Idem. + + {'line': 'key = \'\'\'\'value\\\'\'\'\'', 'expected': ('key', "'value'", None)}, # The rest of the test cases are not passing for TOML, + # we get the indented string instead, anyway, it's not onus to test TOML. + {'line': 'key="""\n value\n """', 'expected': ('key', '\nvalue\n', None)}, + {'line': 'key = """\n value\n """', 'expected': ('key', '\nvalue\n', None)}, + {'line': ' key = """\n value\n """ ', 'expected': ('key', '\nvalue\n', None)}, + {'line': "key='''\n value\n '''", 'expected': ('key', '\nvalue\n', None)}, + {'line': "key = '''\n value\n '''", 'expected': ('key', '\nvalue\n', None)}, + {'line': " key = '''\n value\n ''' ", 'expected': ('key', '\nvalue\n', None)}, + {'line': 'key= \'\'\'\n """\n \'\'\'', 'expected': ('key', '\n"""\n', None)}, + {'line': 'key = \'\'\'\n """""\n \'\'\'', 'expected': ('key', '\n"""""\n', None)}, + {'line': ' key = \'\'\'\n ""\n \'\'\' ', 'expected': ('key', '\n""\n', None)}, + {'line': 'key = \'\'\'\n "value"\n \'\'\'', 'expected': ('key', '\n"value"\n', None)}, + {'line': 'key = """\n \'value\'\n """', 'expected': ('key', "\n'value'\n", None)}, + {'line': 'key = """"\n value\\"\n """', 'expected': ('key', '"\nvalue"\n', None)}, + {'line': 'key = """\n \\"value\\"\n """', 'expected': ('key', '\n"value"\n', None)}, + {'line': 'key = """\n "value" \n """', 'expected': ('key', '\n"value"\n', None)}, # trailling white spaces are removed by configparser + {'line': 'key = \'\'\'\n \'value\\\'\n \'\'\'', 'expected': ('key', "\n'value'\n", None)}, + +] + +INI_LOOKS_LIKE_TRIPPLE_QUOTED_STRINGS: List[Dict[str, Any]] = [ + {'line': 'key= """', 'expected': ('key', '"""', None)}, + {'line': 'key = """""', 'expected': ('key', '"""""', None)}, + {'line': ' key = """" ', 'expected': ('key', '""""', None)}, + {'line': 'key = """"value""""', 'expected': ('key', '""""value""""', None)}, # Not a valid python, so we get the original value, which is normal + {'line': 'key = \'\'\'\'value\'\'\'\'', 'expected': ('key', "''''value''''", None)}, # Idem + {'line': 'key="""value', 'expected': ('key', '"""value', None)}, + {'line': 'key = """value', 'expected': ('key', '"""value', None)}, + {'line': ' key = """value ', 'expected': ('key', '"""value', None)}, + {'line': 'key=value"""', 'expected': ('key', 'value"""', None)}, + {'line': 'key = value"""', 'expected': ('key', 'value"""', None)}, + {'line': ' key = value """ ', 'expected': ('key', 'value """', None)}, + {'line': "key='''value", 'expected': ('key', "'''value", None)}, + {'line': "key = '''value", 'expected': ('key', "'''value", None)}, + {'line': " key = '''value ", 'expected': ('key', "'''value", None)}, + {'line': "key=value'''", 'expected': ('key', "value'''", None)}, + {'line': "key = value'''", 'expected': ('key', "value'''", None)}, + {'line': " key = value ''' ", 'expected': ('key', "value '''", None)}, +] + +INI_BLANK_LINES_QUOTED: List[Dict[str, Any]] = [ + {'line': 'key=""', 'expected': ('key', '', None)}, + {'line': 'key =""', 'expected': ('key', '', None)}, + {'line': 'key= ""', 'expected': ('key', '', None)}, + {'line': 'key = ""', 'expected': ('key', '', None)}, + {'line': 'key = \'\'', 'expected': ('key', '', None)}, + {'line': ' key =\'\' ', 'expected': ('key', '', None)}, +] + +INI_BLANK_LINES_QUOTED_COLONS: List[Dict[str, Any]] = [ + {'line': 'key:\'\'', 'expected': ('key', '', None)}, + {'line': 'key :\'\'', 'expected': ('key', '', None)}, + {'line': 'key: \'\'', 'expected': ('key', '', None)}, + {'line': 'key : \'\'', 'expected': ('key', '', None)}, + {'line': 'key :\'\' ', 'expected': ('key', '', None)}, + {'line': ' key : "" ', 'expected': ('key', '', None)}, +] + +INI_MULTILINE_STRING_LIST: List[Dict[str, Any]] = [ + {'line': 'key = \n hello\n hoho', 'expected': ('key', ["hello", "hoho"], None)}, + {'line': 'key = hello\n hoho', 'expected': ('key', ["hello", "hoho"], None)}, + {'line': 'key : "hello"\n \'hoho\'', 'expected': ('key', ["\"hello\"", "'hoho'"], None)}, # quotes are kept when converting multine strings to list. + {'line': 'key : \n hello\n hoho\n', 'expected': ('key', ["hello", "hoho"], None)}, + {'line': 'key = \n hello\n hoho\n \n\n ', 'expected': ('key', ["hello", "hoho"], None)}, + {'line': 'key = \n hello\n;comment\n\n hoho\n \n\n ', 'expected': ('key', ["hello", "hoho"], None)}, +] + +def get_IniConfigParser_cases() -> List[Dict[str, Any]]: + return (INI_SIMPLE_STRINGS + + INI_QUOTED_STRINGS + + INI_BLANK_LINES + + INI_NEGATIVE_VALUES + + INI_BLANK_LINES_QUOTED + + INI_BLANK_LINES_QUOTED_COLONS + + INI_KEY_SYNTAX + + INI_KEY_SYNTAX_EMPTY + + INI_LITERAL_LIST + + INI_TRIPPLE_QUOTED_STRINGS + + INI_LOOKS_LIKE_TRIPPLE_QUOTED_STRINGS + + INI_QUOTES_CORNER_CASES + + INI_LOOKS_LIKE_QUOTED_STRINGS) + +def get_IniConfigParser_multiline_text_to_list_cases() -> List[Dict[str, Any]]: + cases = get_IniConfigParser_cases() + for case in INI_BLANK_LINES + INI_KEY_SYNTAX_EMPTY: # when multiline_text_to_list is enabled blank lines are simply ignored. + cases.remove(case) + cases.extend(INI_MULTILINE_STRING_LIST) + return cases + +def get_TomlConfigParser_cases() -> List[Dict[str, Any]]: + return (INI_QUOTED_STRINGS + + INI_BLANK_LINES_QUOTED + + INI_LITERAL_LIST + + INI_TRIPPLE_QUOTED_STRINGS) + +def test_IniConfigParser() -> None: + # Not supported by configparser (currently raises error) + # {'line': 'key value', 'expected': ('key', 'value', None)}, + # {'line': 'key value', 'expected': ('key', 'value', None)}, + # {'line': ' key value ', 'expected': ('key', 'value', None)} + # {'line': 'key ', 'expected': ('key', 'true', None)}, + # {'line': 'key', 'expected': ('key', 'true', None)}, + # {'line': 'key ', 'expected': ('key', 'true', None)}, + # {'line': ' key ', 'expected': ('key', 'true', None)}, + + p = IniConfigParser(['soft'], False) + + for test in get_IniConfigParser_cases(): + try: + parsed_obj = p.parse(StringIO('[soft]\n'+test['line'])) + except Exception as e: + raise AssertionError("Line %r, error: %s" % (test['line'], str(e))) from e + else: + parsed_obj = dict(parsed_obj) + expected = {test['expected'][0]: test['expected'][1]} + assert parsed_obj==expected, "Line %r" % (test['line']) + + +def test_IniConfigParser_multiline_text_to_list() -> None: + + p = IniConfigParser(['soft'], True) + + for test in get_IniConfigParser_multiline_text_to_list_cases(): + try: + parsed_obj = p.parse(StringIO('[soft]\n'+test['line'])) + except Exception as e: + raise AssertionError("Line %r, error: %s" % (test['line'], str(e))) from e + else: + parsed_obj = dict(parsed_obj) + expected = {test['expected'][0]: test['expected'][1]} + assert parsed_obj==expected, "Line %r" % (test['line']) + +def test_TomlConfigParser() -> None: + + p = TomlConfigParser(['soft']) + + for test in get_TomlConfigParser_cases(): + try: + parsed_obj = p.parse(StringIO('[soft]\n'+test['line'])) + except Exception as e: + raise AssertionError("Line %r, error: %s" % (test['line'], str(e))) from e + else: + parsed_obj = dict(parsed_obj) + expected = {test['expected'][0]: test['expected'][1]} + assert parsed_obj==expected, "Line %r" % (test['line']) diff --git a/pydoctor/test/test_cyclic_imports_base_classes.py b/pydoctor/test/test_cyclic_imports_base_classes.py new file mode 100644 index 000000000..d331a7eb2 --- /dev/null +++ b/pydoctor/test/test_cyclic_imports_base_classes.py @@ -0,0 +1,39 @@ +""" +This test case is in its own file because it requires the +PYTHONHASHSEED=0 environment variable. See issue #482. +""" + +import os +import subprocess +import sys + +def test_cyclic_imports_base_classes() -> None: + if sys.platform == 'win32': + # Running this script with the following subprocess call fails on Windows + # with an ImportError that isn't actually related to what we want to test. + # So we just skip for Windows. + return + + process = subprocess.Popen( + [sys.executable, os.path.basename(__file__)], + env={'PYTHONHASHSEED': '0'}, + cwd=os.path.dirname(__file__), + ) + assert process.wait() == 0 + + +if __name__ == '__main__': + from test_packages import processPackage, model # type: ignore + + assert os.environ['PYTHONHASHSEED'] == '0' + + def consistent_hash(self: model.Module) -> int: + return hash(self.name) + + if model.Module.__hash__ == object.__hash__: + model.Module.__hash__ = consistent_hash + + system = processPackage('cyclic_imports_base_classes') + b_cls = system.allobjects['cyclic_imports_base_classes.b.B'] + assert isinstance(b_cls, model.Class) + assert b_cls.baseobjects == [system.allobjects['cyclic_imports_base_classes.a.A']] diff --git a/pydoctor/test/test_epydoc2stan.py b/pydoctor/test/test_epydoc2stan.py index fa55dd257..b2d909db3 100644 --- a/pydoctor/test/test_epydoc2stan.py +++ b/pydoctor/test/test_epydoc2stan.py @@ -5,13 +5,15 @@ import pytest from twisted.web.template import Tag, tags -from pydoctor import epydoc2stan, model +from pydoctor import epydoc2stan, model, linker from pydoctor.epydoc.markup import DocstringLinker from pydoctor.stanutils import flatten, flatten_text from pydoctor.epydoc.markup.epytext import ParsedEpytextDocstring from pydoctor.sphinx import SphinxInventory from pydoctor.test.test_astbuilder import fromText, unwrap from pydoctor.test import CapSys +from pydoctor.templatewriter.search import stem_identifier +from pydoctor.utils import partialclass if TYPE_CHECKING: from twisted.web.template import Flattenable @@ -72,7 +74,7 @@ def test_html_empty_module() -> None: mod = fromText(''' """Empty module.""" ''') - assert docstring2html(mod) == "
    Empty module.
    " + assert docstring2html(mod) == "
    \n

    Empty module.

    \n
    " def test_xref_link_not_found() -> None: @@ -86,16 +88,31 @@ def test_xref_link_not_found() -> None: def test_xref_link_same_page() -> None: """A linked name that is documented on the same page is linked using only - a fragment as the URL. + a fragment as the URL. But that does not happend in summaries. """ - mod = fromText(''' + src = ''' """The home of L{local_func}.""" def local_func(): pass - ''', modname='test') + ''' + mod = fromText(src, modname='test') + assert mod.page_object.url == 'index.html' html = docstring2html(mod) assert 'href="#local_func"' in html + html = summary2html(mod) + assert 'href="index.html#local_func"' in html + html = docstring2html(mod) + assert 'href="#local_func"' in html + + mod = fromText(src, modname='test') + html = summary2html(mod) + assert 'href="index.html#local_func"' in html + html = docstring2html(mod) + assert 'href="#local_func"' in html + html = summary2html(mod) + assert 'href="index.html#local_func"' in html + def test_xref_link_other_page() -> None: @@ -390,7 +407,7 @@ def f(): """ ''', modname='test') html = docstring2html(mod.contents['f']).split('\n') - assert 'SpanishInquisition' in html + assert 'SpanishInquisition' in html def test_func_raise_missing_exception_type(capsys: CapSys) -> None: @@ -611,7 +628,7 @@ def single_line_summary(): Ipsum Lorem """ - def no_summary(): + def still_summary_since_2022(): """ Foo Bar @@ -629,7 +646,11 @@ def three_lines_summary(): ''') assert 'Lorem Ipsum' == summary2html(mod.contents['single_line_summary']) assert 'Foo Bar Baz' == summary2html(mod.contents['three_lines_summary']) - assert 'No summary' == summary2html(mod.contents['no_summary']) + + # We get a summary based on the first sentences of the first + # paragraph until reached maximum number characters or the paragraph ends. + # So no matter the number of lines the first paragraph is, we'll always get a summary. + assert 'Foo Bar Baz Qux' == summary2html(mod.contents['still_summary_since_2022']) def test_ivar_overriding_attribute() -> None: @@ -681,7 +702,7 @@ class Sub(Base): sub_a = sub.contents['a'] assert isinstance(sub_a, model.Attribute) assert summary2html(sub_a) == 'sub doc' - assert docstring2html(sub_a) == "
    sub doc
    " + assert docstring2html(sub_a) == "
    \n

    sub doc

    \n
    " sub_b = sub.contents['b'] assert isinstance(sub_b, model.Attribute) assert summary2html(sub_b) == 'not overridden' @@ -760,7 +781,8 @@ def test_EpydocLinker_look_for_intersphinx_no_link() -> None: """ system = model.System() target = model.Module(system, 'ignore-name') - sut = epydoc2stan._EpydocLinker(target) + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) result = sut.look_for_intersphinx('base.module') @@ -776,12 +798,31 @@ def test_EpydocLinker_look_for_intersphinx_hit() -> None: inventory._links['base.module.other'] = ('http://tm.tld', 'some.html') system.intersphinx = inventory target = model.Module(system, 'ignore-name') - sut = epydoc2stan._EpydocLinker(target) + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) result = sut.look_for_intersphinx('base.module.other') assert 'http://tm.tld/some.html' == result +def test_EpydocLinker_adds_intersphinx_link_css_class() -> None: + """ + The EpydocLinker return a link with the CSS class 'intersphinx-link' when it's using intersphinx. + """ + system = model.System() + inventory = SphinxInventory(system.msg) + inventory._links['base.module.other'] = ('http://tm.tld', 'some.html') + system.intersphinx = inventory + target = model.Module(system, 'ignore-name') + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) + + result1 = sut.link_xref('base.module.other', 'base.module.other', 0).children[0] # wrapped in a code tag + result2 = sut.link_to('base.module.other', 'base.module.other') + + res = flatten(result2) + assert flatten(result1) == res + assert 'class="intersphinx-link"' in res def test_EpydocLinker_resolve_identifier_xref_intersphinx_absolute_id() -> None: """ @@ -794,7 +835,8 @@ def test_EpydocLinker_resolve_identifier_xref_intersphinx_absolute_id() -> None: inventory._links['base.module.other'] = ('http://tm.tld', 'some.html') system.intersphinx = inventory target = model.Module(system, 'ignore-name') - sut = epydoc2stan._EpydocLinker(target) + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) url = sut.resolve_identifier('base.module.other') url_xref = sut._resolve_identifier_xref('base.module.other', 0) @@ -819,7 +861,8 @@ def test_EpydocLinker_resolve_identifier_xref_intersphinx_relative_id() -> None: target.contents['ext_module'] = model.Module( system, 'ext_module', parent=ext_package) - sut = epydoc2stan._EpydocLinker(target) + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) # This is called for the L{ext_module} markup. url = sut.resolve_identifier('ext_module') @@ -843,7 +886,8 @@ def test_EpydocLinker_resolve_identifier_xref_intersphinx_link_not_found(capsys: ext_package = model.Module(system, 'ext_package') target.contents['ext_module'] = model.Module( system, 'ext_module', parent=ext_package) - sut = epydoc2stan._EpydocLinker(target) + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) # This is called for the L{ext_module} markup. assert sut.resolve_identifier('ext_module') is None @@ -882,10 +926,11 @@ class C: socket = None ''') mod.system.intersphinx = cast(SphinxInventory, InMemoryInventory()) - linker = epydoc2stan._EpydocLinker(mod) + _linker = mod.docstring_linker + assert isinstance(_linker, linker._CachedEpydocLinker) - url = linker.resolve_identifier('socket.socket') - url_xref = linker._resolve_identifier_xref('socket.socket', 0) + url = _linker.resolve_identifier('socket.socket') + url_xref = _linker._resolve_identifier_xref('socket.socket', 0) assert 'https://docs.python.org/3/library/socket.html#socket.socket' == url assert 'https://docs.python.org/3/library/socket.html#socket.socket' == url_xref @@ -904,14 +949,197 @@ class C: # Dummy module that we want to link from. target = model.Module(system, 'ignore-name') - sut = epydoc2stan._EpydocLinker(target) - + sut = target.docstring_linker + assert isinstance(sut, linker._CachedEpydocLinker) url = sut.resolve_identifier('internal_module.C') xref = sut._resolve_identifier_xref('internal_module.C', 0) assert "internal_module.C.html" == url assert int_mod.contents['C'] is xref +def test_CachedEpydocLinker() -> None: + """ + The CachedEpydocLinker returns the same Tag object without resolving the name and re-creating the link tag all the time. + """ + system = model.System() + inventory = SphinxInventory(system.msg) + inventory._links['base.module.other'] = ('http://tm.tld', 'some.html') + system.intersphinx = inventory + target = model.Module(system, 'ignore-name') + + sut = _TestCachedEpydocLinker(target, max_lookups=1) + + result2 = sut.link_to('base.module.other', 'base.module.other') + assert 'base.module.other' in sut._link_to_cache + assert len(sut._link_to_cache['base.module.other'][True])==1 + result1 = sut.link_xref('base.module.other', 'base.module.other', 0).children[0] # wrapped in a code tag + assert len(sut._link_xref_cache['base.module.other'][True])==0 + assert len(sut._link_to_cache['base.module.other'][True])==1 + result3 = sut.link_to('base.module.other', 'other') + assert len(sut._link_to_cache['base.module.other'][True])==2 + result4 = sut.link_xref('base.module.other', 'other', 0).children[0] + assert len(sut._link_to_cache['base.module.other'][True])==2 + assert len(sut._link_xref_cache['base.module.other'][True])==0 + + res = flatten(result2) + assert flatten(result1) == res == 'base.module.other' + assert flatten(result3) == flatten(result4) == 'other' + +class _TestCachedEpydocLinker(linker._CachedEpydocLinker): + """ + Docstring linker for testing the caching of results. + """ + + def __init__(self, obj: model.Documentable, max_lookups:int, same_page_optimization:bool=True) -> None: + super().__init__(obj, same_page_optimization) + self.lookups = 0 + self.max_lookups = max_lookups + + def link_to(self, target: str, label: "Flattenable") -> Tag: + link = self._lookup_cached_link_to(target, label) + if link is None: + if self.lookups Tag: + link = self._lookup_cached_link_xref(target, label, lineno) + if link is None: + if self.lookups None: + """ + A test case for the testing linker L{_TestCachedEpydocLinker}. + The test linker is initialized with a maximum number of non-cached requests it can make + and an AssertionError is raised if it makes too many requests. + """ + system = model.System() + inventory = SphinxInventory(system.msg) + inventory._links['base.module.other'] = ('http://tm.tld', 'some.html') + system.intersphinx = inventory + target = model.Module(system, 'ignore-name') + + sut = _TestCachedEpydocLinker(target, 2) + sut.link_xref('base.module.other', 'other', 1) + assert sut.lookups==1 + assert len(sut._link_xref_cache['base.module.other'][True])==1 + sut.link_xref('notfound', 'notfound', 1) + assert sut.lookups==2 + assert len(sut._link_xref_cache['notfound'][True])==1 + + with pytest.raises(AssertionError): + sut.link_xref('anothername', 'again notfound', 1) + +def test_CachedEpydocLinker_same_page_optimization() -> None: + """ + When _CachedEpydocLinker.same_page_optimization is True, the linker will create URLs with only the anchor + if we're lnking to an object on the same page. + + Otherwise it will always use return a URL with a filename, this is used to generate the summaries. + """ + mod = fromText(''' + base=1 + class someclass: ... + ''', modname='module') + sut = _TestCachedEpydocLinker(mod, 3) # Raise if it makes more than 3 lookups. + assert isinstance(sut, linker._CachedEpydocLinker) + + sut.same_page_optimization=False + assert sut.link_to('base','module.base').attributes['href']=='index.html#base' + assert len(sut._link_to_cache['base'][False])==1, repr(sut._link_to_cache['base'][False]) + assert sut.link_to('base','base').attributes['href']=='index.html#base' + assert len(sut._link_to_cache['base'][False])==2, sut._link_to_cache['base'][False] + assert sut.link_to('someclass','some random name').attributes['href']=='module.someclass.html' + + sut.same_page_optimization=True + assert sut.link_to('base','base').attributes['href']=='#base' + assert sut.link_to('base','base').attributes['href']=='#base' + assert len(sut._link_to_cache['base'][True])==1 + assert sut.link_to('base', tags.transparent('module.base')).attributes['href']=='#base' + assert sut.link_to('base', tags.transparent('module.base')).attributes['href']=='#base' + # Tags are not properly understood right now but that's ok since these are only used + # when inserting a link with nested markup like L{B{driver} } + assert len(sut._link_to_cache['base'][False])==2 + assert len(sut._link_to_cache['base'][True])==3 + + assert sut.link_to('someclass','some other name').attributes['href']=='module.someclass.html' + assert sut.link_to('someclass','a third name').attributes['href']=='module.someclass.html' + assert len(sut._link_to_cache['someclass'][False])==1 + assert len(sut._link_to_cache['someclass'][True])==2 + + assert sut.link_to('notfound', 'notfound').children[0] == 'notfound' + assert sut.link_to('notfound', 'notfound.notfound').children[0] == 'notfound.notfound' + assert len(sut._link_to_cache['notfound'][True])==2 + +def test_CachedEpydocLinker_warnings(capsys: CapSys) -> None: + """ + Warnings should be reported only once per invalid name per line, + no matter the number of times we call summary2html() or docstring2html() or the order we call these functions. + """ + _default_class = linker._CachedEpydocLinker + try: + linker._CachedEpydocLinker = partialclass(_TestCachedEpydocLinker, max_lookups=2) # type:ignore + src = ''' + """ + L{base} L{regular text } L{notfound} + + L{regular text } L{B{look at the base} } L{I{Important class} } L{notfound} + """ + base=1 + ''' + + mod = fromText(src, modname='module') + assert isinstance(mod.docstring_linker, _TestCachedEpydocLinker) + assert mod.docstring_linker.max_lookups==2 + assert 'href="#base"' in docstring2html(mod) + captured = capsys.readouterr().out + + # Here, we can see that the warning got reported only 2 times but + # the error is present 4 times in the docstring. This is because + # links are on the same line. + + # The rationale about xref warnings is now the following: + # - Warns only once per unresolved identifier per line. + + assert captured == 'module:3: Cannot find link target for "notfound"\nmodule:5: Cannot find link target for "notfound"\n' + + assert 'href="index.html#base"' in summary2html(mod) + summary2html(mod); docstring2html(mod) + + captured = capsys.readouterr().out + + # Other warnings are not logged if running summary2html and docstring2html multiple times. + assert captured == '' + + mod = fromText(src, modname='module') + assert isinstance(mod.docstring_linker, _TestCachedEpydocLinker) + assert mod.docstring_linker.max_lookups==2 + assert 'href="index.html#base"' in summary2html(mod) + captured = capsys.readouterr().out + + assert captured == 'module:3: Cannot find link target for "notfound"\n' + + html = docstring2html(mod) + captured = capsys.readouterr().out + assert captured == 'module:5: Cannot find link target for "notfound"\n' + assert 'href="#base"' in html + + docstring2html(mod); summary2html(mod) + captured = capsys.readouterr().out + assert captured == '' + + finally: + linker._CachedEpydocLinker = _default_class # type:ignore def test_xref_not_found_epytext(capsys: CapSys) -> None: """ @@ -937,8 +1165,6 @@ def test_xref_not_found_restructured(capsys: CapSys) -> None: """ When a link in an reStructedText docstring cannot be resolved, the reference and the line number of the link should be reported. - However, currently the best we can do is report the starting line of the - docstring instead. """ system = model.System() @@ -954,11 +1180,47 @@ def test_xref_not_found_restructured(capsys: CapSys) -> None: epydoc2stan.format_docstring(mod) captured = capsys.readouterr().out - # TODO: Should actually be line 5, but I can't get docutils to fill in - # the line number when it calls visit_title_reference(). - # https://github.com/twisted/pydoctor/issues/237 - assert captured == 'test:3: Cannot find link target for "NoSuchName"\n' + assert captured == 'test:5: Cannot find link target for "NoSuchName"\n' + +def test_xref_not_found_restructured_in_para(capsys: CapSys) -> None: + """ + When an invalid link is in the middle of a paragraph, we still report the right line number. + """ + system = model.System() + system.options.docformat = 'restructuredtext' + mod = fromText(''' + """ + A test module. + + blabla bla blabla bla blabla bla blabla bla + blabla blablabla blablabla blablabla blablabla bla blabla bla + blabla blablabla blablabla blablabla blablabla bla + Link to limbo: `NoSuchName`. + """ + ''', modname='test', system=system) + + epydoc2stan.format_docstring(mod) + captured = capsys.readouterr().out + assert captured == 'test:8: Cannot find link target for "NoSuchName"\n' + system = model.System() + system.options.docformat = 'restructuredtext' + mod = fromText(''' + """ + A test module. + + blabla bla blabla bla blabla bla blabla bla + blabla blablabla blablabla blablabla blablabla bla blabla bla + blabla blablabla blablabla blablabla blablabla bla + Link to limbo: `NoSuchName`. blabla bla blabla bla blabla bla blabla bla + blabla blablabla blablabla blablabla blablabla bla blabla bla + blabla blablabla blablabla blablabla blablabla bla + """ + ''', modname='test', system=system) + + epydoc2stan.format_docstring(mod) + captured = capsys.readouterr().out + assert captured == 'test:8: Cannot find link target for "NoSuchName"\n' class RecordingAnnotationLinker(DocstringLinker): """A DocstringLinker implementation that cannot find any links, @@ -1026,13 +1288,13 @@ def test_module_docformat(capsys: CapSys) -> None: """ system = model.System() - system.options.docformat = 'plaintext' + system.options.docformat = 'epytext' mod = fromText(''' """ - Link to pydoctor: U{pydoctor }. + Link to pydoctor: `pydoctor `_. """ - __docformat__ = "epytext" + __docformat__ = "google" ''', modname='test_epy', system=system) epytext_output = epydoc2stan.format_docstring(mod) @@ -1052,11 +1314,8 @@ def test_module_docformat(capsys: CapSys) -> None: captured = capsys.readouterr().out assert not captured - assert ('Link to pydoctor: pydoctor' in flatten(epytext_output)) - - assert ('Link to pydoctor: pydoctor' in flatten(restructuredtext_output)) + assert ('href="https://github.com/twisted/pydoctor"' in flatten(epytext_output)) + assert ('href="https://github.com/twisted/pydoctor"' in flatten(restructuredtext_output)) def test_module_docformat_inheritence(capsys: CapSys) -> None: top_src = ''' @@ -1130,6 +1389,31 @@ def f(self, a: str, b: int): assert ''.join(docstring2html(B_f).splitlines()) == ''.join(docstring2html(A_f).splitlines()) +def test_cli_docformat_plaintext_overrides_module_docformat(capsys: CapSys) -> None: + """ + When System.options.docformat is set to C{plaintext} it + overwrites any specific Module.docformat defined for a module. + + See https://github.com/twisted/pydoctor/issues/503 for the reason + of this behavior. + """ + + system = model.System() + system.options.docformat = 'plaintext' + + mod = fromText(''' + """ + L{unknown} link. + """ + __docformat__ = "epytext" + ''', system=system) + + epytext_output = epydoc2stan.format_docstring(mod) + + captured = capsys.readouterr().out + assert not captured + + assert flatten(epytext_output).startswith('

    ') def test_constant_values_rst(capsys: CapSys) -> None: """ @@ -1158,7 +1442,7 @@ def f(a, b): expected = ('' '
    Value
    ' '
    ('
    -                'f)
    ') + 'f)
    ') attr = mod.contents['CONST'] assert isinstance(attr, model.Attribute) @@ -1218,3 +1502,43 @@ def func(): captured = capsys.readouterr().out assert captured == '' +def insert_break_points(t:str) -> str: + return flatten(epydoc2stan.insert_break_points(t)) + +def test_insert_break_points_identity() -> None: + """ + No break points are introduced for values containing a single world. + """ + assert insert_break_points('test') == 'test' + assert insert_break_points('_test') == '_test' + assert insert_break_points('_test_') == '_test_' + assert insert_break_points('') == '' + assert insert_break_points('____') == '____' + assert insert_break_points('__test__') == '__test__' + assert insert_break_points('__someverylongname__') == '__someverylongname__' + assert insert_break_points('__SOMEVERYLONGNAME__') == '__SOMEVERYLONGNAME__' + +def test_insert_break_points_snake_case() -> None: + assert insert_break_points('__some_very_long_name__') == '__some_very_long_name__' + assert insert_break_points('__SOME_VERY_LONG_NAME__') == '__SOME_VERY_LONG_NAME__' + +def test_insert_break_points_camel_case() -> None: + assert insert_break_points('__someVeryLongName__') == '__someVeryLongName__' + assert insert_break_points('__einÜberlangerName__') == '__einÜberlangerName__' + +def test_insert_break_points_dotted_name() -> None: + assert insert_break_points('mod.__some_very_long_name__') == 'mod.__some_very_long_name__' + assert insert_break_points('_mod.__SOME_VERY_LONG_NAME__') == '_mod.__SOME_VERY_LONG_NAME__' + assert insert_break_points('pack.mod.__someVeryLongName__') == 'pack.mod.__someVeryLongName__' + assert insert_break_points('pack._mod_.__einÜberlangerName__') == 'pack._mod_.__einÜberlangerName__' + +def test_stem_identifier() -> None: + assert list(stem_identifier('__some_very_long_name__')) == [ + 'very', 'long', 'name' # 'some' has been filtered out because it's part of the stop words. + ] + assert list(stem_identifier('__someVeryLongName__')) == [ + 'Very', 'Long', 'Name' + ] + assert list(stem_identifier('_name')) == ['name'] + assert list(stem_identifier('name')) == ['name'] + assert list(stem_identifier('processModuleAST')) == ['process', 'Module', 'AST'] diff --git a/pydoctor/test/test_model.py b/pydoctor/test/test_model.py index ef7c3dbc3..2b8b3bb8b 100644 --- a/pydoctor/test/test_model.py +++ b/pydoctor/test/test_model.py @@ -2,24 +2,32 @@ Unit tests for model. """ -from optparse import Values +import subprocess +import os +from inspect import signature from pathlib import Path, PurePosixPath, PureWindowsPath -from typing import cast +from typing import cast, Optional import zlib - import pytest -from pydoctor import model -from pydoctor.driver import parse_args +from twisted.web.template import Tag + +from pydoctor.options import Options +from pydoctor import model, stanutils +from pydoctor.templatewriter import pages +from pydoctor.utils import parse_privacy_tuple from pydoctor.sphinx import CacheT +from pydoctor.test import CapSys from pydoctor.test.test_astbuilder import fromText +from pydoctor.test.test_packages import processPackage class FakeOptions: """ - A fake options object as if it came from that stupid optparse thing. + A fake options object as if it came from argparse. """ sourcehref = None + htmlsourcebase: Optional[str] = None projectbasedirectory: Path docformat = 'epytext' @@ -49,15 +57,53 @@ def test_setSourceHrefOption(projectBaseDir: Path) -> None: options = FakeOptions() options.projectbasedirectory = projectBaseDir - - system = model.System() - system.sourcebase = "http://example.org/trac/browser/trunk" - system.options = cast(Values, options) + options.htmlsourcebase = "http://example.org/trac/browser/trunk" + system = model.System(options) # type:ignore[arg-type] mod.system = system system.setSourceHref(mod, projectBaseDir / "package" / "module.py") assert mod.sourceHref == "http://example.org/trac/browser/trunk/package/module.py" +def test_htmlsourcetemplate_auto_detect() -> None: + """ + Tests for the recognition of different version control providers + that uses differents URL templates to point to line numbers. + + Supported templates are:: + + Github : {}#L{lineno} + Bitbucket: {}#lines-{lineno} + SourceForge : {}#l{lineno} + """ + cases = [ + ("http://example.org/trac/browser/trunk", + "http://example.org/trac/browser/trunk/pydoctor/test/testpackages/basic/mod.py#L7"), + + ("https://sourceforge.net/p/epydoc/code/HEAD/tree/trunk/epydoc", + "https://sourceforge.net/p/epydoc/code/HEAD/tree/trunk/epydoc/pydoctor/test/testpackages/basic/mod.py#l7"), + + ("https://bitbucket.org/user/scripts/src/master", + "https://bitbucket.org/user/scripts/src/master/pydoctor/test/testpackages/basic/mod.py#lines-7"), + ] + for base, var_href in cases: + options = model.Options.from_args([f'--html-viewsource-base={base}', '--project-base-dir=.']) + system = model.System(options) + + processPackage('basic', systemcls=lambda:system) + assert system.allobjects['basic.mod.C'].sourceHref == var_href + +def test_htmlsourcetemplate_custom() -> None: + """ + The links to source code web pages can be customized via an CLI argument. + """ + options = model.Options.from_args([ + '--html-viewsource-base=http://example.org/trac/browser/trunk', + '--project-base-dir=.', + '--html-viewsource-template={mod_source_href}#n{lineno}']) + system = model.System(options) + + processPackage('basic', systemcls=lambda:system) + assert system.allobjects['basic.mod.C'].sourceHref == "http://example.org/trac/browser/trunk/pydoctor/test/testpackages/basic/mod.py#n7" def test_initialization_default() -> None: """ @@ -74,7 +120,7 @@ def test_initialization_options() -> None: """ Can be initialized with options. """ - options = cast(Values, object()) + options = Options.defaults() sut = model.System(options=options) @@ -85,11 +131,11 @@ def test_fetchIntersphinxInventories_empty() -> None: """ Convert option to empty dict. """ - options, _ = parse_args([]) + options = Options.defaults() options.intersphinx = [] sut = model.System(options=options) - sut.fetchIntersphinxInventories({}) + sut.fetchIntersphinxInventories(cast('CacheT', {})) # Use internal state since I don't know how else to # check for SphinxInventory state. @@ -101,7 +147,7 @@ def test_fetchIntersphinxInventories_content() -> None: Download and parse intersphinx inventories for each configured intersphix. """ - options, _ = parse_args([]) + options = Options.defaults() options.intersphinx = [ 'http://sphinx/objects.inv', 'file:///twisted/index.inv', @@ -210,22 +256,33 @@ class Dummy: def crash(self) -> None: """Mmm""" + +def dummy_function_with_complex_signature(foo: int, bar: float) -> str: + return "foo" + + def test_introspection_python() -> None: """Find docstrings from this test using introspection on pure Python.""" system = model.System() system.introspectModule(Path(__file__), __name__, None) + system.process() module = system.objForFullName(__name__) assert module is not None assert module.docstring == __doc__ func = module.contents['test_introspection_python'] + assert isinstance(func, model.Function) assert func.docstring == "Find docstrings from this test using introspection on pure Python." + assert func.signature == signature(test_introspection_python) method = system.objForFullName(__name__ + '.Dummy.crash') assert method is not None assert method.docstring == "Mmm" + func = module.contents['dummy_function_with_complex_signature'] + assert isinstance(func, model.Function) + assert func.signature == signature(dummy_function_with_complex_signature) def test_introspection_extension() -> None: """Find docstrings from this test using introspection of an extension.""" @@ -245,6 +302,8 @@ def test_introspection_extension() -> None: Path(cython_test_exception_raiser.raiser.__file__), 'raiser', package) + system.process() + assert not isinstance(module, model.Package) assert system.objForFullName('cython_test_exception_raiser') is package @@ -260,3 +319,156 @@ def test_introspection_extension() -> None: func = module.contents['raiseException'] assert func.docstring is not None assert func.docstring.strip() == "Raise L{RaiserException}." + +testpackages = Path(__file__).parent / 'testpackages' + +@pytest.mark.skipif("platform.python_implementation() == 'PyPy'") +def test_c_module_text_signature(capsys:CapSys) -> None: + + c_module_invalid_text_signature = testpackages / 'c_module_invalid_text_signature' + package_path = c_module_invalid_text_signature / 'mymod' + + # build extension + try: + cwd = os.getcwd() + code, outstr = subprocess.getstatusoutput(f'cd {c_module_invalid_text_signature} && python3 setup.py build_ext --inplace') + os.chdir(cwd) + + assert code==0, outstr + + system = model.System() + system.options.introspect_c_modules = True + + builder = system.systemBuilder(system) + builder.addModule(package_path) + builder.buildModules() + + assert "Cannot parse signature of mymod.base.invalid_text_signature" in capsys.readouterr().out + + mymod_base = system.allobjects['mymod.base'] + assert isinstance(mymod_base, model.Module) + func = mymod_base.contents['invalid_text_signature'] + assert isinstance(func, model.Function) + assert func.signature == None + valid_func = mymod_base.contents['valid_text_signature'] + assert isinstance(valid_func, model.Function) + + assert "(...)" == pages.format_signature(func) + assert "(a='r', b=-3.14)" == stanutils.flatten_text( + cast(Tag, pages.format_signature(valid_func))) + + finally: + # cleanup + subprocess.getoutput(f'rm -f {package_path}/*.so') + +@pytest.mark.skipif("platform.python_implementation() == 'PyPy'") +def test_c_module_python_module_name_clash(capsys:CapSys) -> None: + c_module_python_module_name_clash = testpackages / 'c_module_python_module_name_clash' + package_path = c_module_python_module_name_clash / 'mymod' + + # build extension + try: + cwd = os.getcwd() + code, outstr = subprocess.getstatusoutput(f'cd {c_module_python_module_name_clash} && python3 setup.py build_ext --inplace') + os.chdir(cwd) + + assert code==0, outstr + system = model.System() + system.options.introspect_c_modules = True + + system.addPackage(package_path, None) + system.process() + + mod = system.allobjects['mymod.base'] + # there is only one mymod.base module + assert [mod] == list(system.allobjects['mymod'].contents.values()) + assert len(mod.contents) == 1 + assert 'coming_from_c_module' == mod.contents.popitem()[0] + + finally: + # cleanup + subprocess.getoutput(f'rm -f {package_path}/*.so') + +def test_resolve_name_subclass(capsys:CapSys) -> None: + """ + C{Model.resolveName} knows about single inheritance. + """ + m = fromText( + """ + class B: + v=1 + class C(B): + pass + """ + ) + assert m.resolveName('C.v') == m.contents['B'].contents['v'] + +@pytest.mark.parametrize('privacy', [ + (['public:m._public**', 'public:m.tests', 'public:m.tests.helpers', 'private:m._public.private', 'hidden:m._public.hidden', 'hidden:m.tests.*']), + (reversed(['private:**private', 'hidden:**hidden', 'public:**_public', 'hidden:m.tests.test**', ])), +]) +def test_privacy_switch(privacy:object) -> None: + s = model.System() + s.options.privacy = [parse_privacy_tuple(p, '--privacy') for p in privacy] # type:ignore + + fromText( + """ + class _public: + class _still_public: + ... + class private: + ... + class hidden: + ... + + class tests(B): # public + class helpers: # public + ... + class test1: # everything else hidden + ... + class test2: + ... + class test3: + ... + """, system=s, modname='m' + ) + allobjs = s.allobjects + + assert allobjs['m._public'].privacyClass == model.PrivacyClass.PUBLIC + assert allobjs['m._public._still_public'].privacyClass == model.PrivacyClass.PUBLIC + assert allobjs['m._public.private'].privacyClass == model.PrivacyClass.PRIVATE + assert allobjs['m._public.hidden'].privacyClass == model.PrivacyClass.HIDDEN + + assert allobjs['m.tests'].privacyClass == model.PrivacyClass.PUBLIC + assert allobjs['m.tests.helpers'].privacyClass == model.PrivacyClass.PUBLIC + assert allobjs['m.tests.test1'].privacyClass == model.PrivacyClass.HIDDEN + assert allobjs['m.tests.test2'].privacyClass == model.PrivacyClass.HIDDEN + assert allobjs['m.tests.test3'].privacyClass == model.PrivacyClass.HIDDEN + +def test_privacy_reparented() -> None: + """ + Test that the privacy of an object changes if + the name of the object changes (with reparenting). + """ + + system = model.System() + + mod_private = fromText(''' + class _MyClass: + pass + ''', modname='private', system=system) + + mod_export = fromText( + 'from private import _MyClass # not needed for the test to pass', + modname='public', system=system) + + base = mod_private.contents['_MyClass'] + assert base.privacyClass == model.PrivacyClass.PRIVATE + + # Manually reparent MyClass + base.reparent(mod_export, 'MyClass') + assert base.fullName() == 'public.MyClass' + assert '_MyClass' not in mod_private.contents + assert mod_export.resolveName("MyClass") == base + + assert base.privacyClass == model.PrivacyClass.PUBLIC diff --git a/pydoctor/test/test_napoleon_docstring.py b/pydoctor/test/test_napoleon_docstring.py index 115efc34b..51b70139f 100644 --- a/pydoctor/test/test_napoleon_docstring.py +++ b/pydoctor/test/test_napoleon_docstring.py @@ -5,28 +5,19 @@ :license: BSD, see LICENSE for details. """ import re -from typing import Type, Union, Any +from typing import Type, Union from unittest import TestCase from textwrap import dedent -import functools from pydoctor.napoleon.docstring import (GoogleDocstring as _GoogleDocstring, NumpyDocstring as _NumpyDocstring, TokenType, TypeDocstring, is_type, is_google_typed_arg) +from pydoctor.utils import partialclass import sphinx.ext.napoleon as sphinx_napoleon __docformat__ = "restructuredtext" -def partialclass(cls: Type[Any], *args: Any, **kwds: Any) -> Type[Any]: - # mypy gets errors: - Variable "cls" is not valid as a type - # - Invalid base class "cls" - class NewCls(cls): #type: ignore - __init__ = functools.partialmethod(cls.__init__, *args, **kwds) #type: ignore - __class__ = cls - assert isinstance(NewCls, type) - return NewCls - sphinx_napoleon_config = sphinx_napoleon.Config( napoleon_use_admonition_for_examples=True, napoleon_use_admonition_for_notes=True, diff --git a/pydoctor/test/test_node2stan.py b/pydoctor/test/test_node2stan.py index 1a7e93eb2..e220e71dc 100644 --- a/pydoctor/test/test_node2stan.py +++ b/pydoctor/test/test_node2stan.py @@ -4,11 +4,13 @@ :See: {test.epydoc.test_epytext2html}, {test.epydoc.test_restructuredtext} """ +from pydoctor.epydoc.docutils import get_lineno +from pydoctor.test import CapSys from pydoctor.test.epydoc.test_epytext2html import epytext2node -from pydoctor.test.epydoc.test_restructuredtext import rst2node +from pydoctor.test.epydoc.test_restructuredtext import rst2node, parse_rst from pydoctor.node2stan import gettext - +from docutils import nodes def test_gettext() -> None: doc = ''' @@ -85,3 +87,70 @@ def test_gettext() -> None: assert gettext(rst2node(doc)) == ['This paragraph is not in any section.', 'mailto:postmaster@example.net', 'This is just a note with nested contents'] + +def count_parents(node:nodes.Node) -> int: + count = 0 + ctx = node + + while not isinstance(ctx, nodes.document): + count += 1 + ctx = ctx.parent + return count + +class TitleReferenceDump(nodes.GenericNodeVisitor): + def default_visit(self, node: nodes.Node) -> None: + if not isinstance(node, nodes.title_reference): + return + print('{}{:<15} line: {}, get_lineno: {}, rawsource: {}'.format( + '|'*count_parents(node), + type(node).__name__, + node.line, + get_lineno(node), + node.rawsource.replace('\n', '\\n'))) + +def test_docutils_get_lineno_title_reference(capsys:CapSys) -> None: + """ + We can get the exact line numbers for all `nodes.title_reference` nodes in a docutils document. + """ + + + parsed_doc = parse_rst(''' +Fizz +==== + +Lorem ipsum `notfound`. + +Buzz +**** + +Lorem ``ipsum`` + +.. code-block:: python + + x = 0 + +.. note:: + + Dolor sit amet + `notfound`. + + .. code-block:: python + + y = 1 + +Dolor sit amet `another link `. +Dolor sit amet `link `. +bla blab balba. + +:var foo: Dolor sit amet `link `. +''') + doc = parsed_doc.to_node() + doc.walk(TitleReferenceDump(doc)) + assert capsys.readouterr().out == r'''||title_reference line: None, get_lineno: 4, rawsource: `notfound` +||||title_reference line: None, get_lineno: 18, rawsource: `notfound` +|||title_reference line: None, get_lineno: 24, rawsource: `another link ` +|||title_reference line: None, get_lineno: 25, rawsource: `link ` +''' + parsed_doc.fields[0].body().to_node().walk(TitleReferenceDump(doc)) + assert capsys.readouterr().out == r'''||title_reference line: None, get_lineno: 28, rawsource: `link ` +''' diff --git a/pydoctor/test/test_options.py b/pydoctor/test/test_options.py new file mode 100644 index 000000000..960b0982b --- /dev/null +++ b/pydoctor/test/test_options.py @@ -0,0 +1,225 @@ +import os +from pathlib import Path +import pytest +from io import StringIO + +from pydoctor import model +from pydoctor.options import PydoctorConfigParser, Options + +from pydoctor.test import FixtureRequest, TempPathFactory + +EXAMPLE_TOML_CONF = """ +[tool.poetry] +packages = [ + { include = "my_package" }, + { include = "extra_package" }, +] +name = "awesome" + +[tool.poetry.dependencies] +# These packages are mandatory and form the core of this package’s distribution. +mandatory = "^1.0" + +# A list of all of the optional dependencies, some of which are included in the +# below `extras`. They can be opted into by apps. +psycopg2 = { version = "^2.7", optional = true } +mysqlclient = { version = "^1.3", optional = true } + +[tool.poetry.extras] +mysql = ["mysqlclient"] +pgsql = ["psycopg2"] +""" + +EXAMPLE_INI_CONF = """ +[metadata] +name = setup.cfg +version = 0.9.0.dev +author = Erik M. Bray +author-email = embray@stsci.edu +summary = Reads a distributions's metadata from its setup.cfg file and passes it to setuptools.setup() +description-file = + README.rst + CHANGES.rst +home-page = http://pypi.python.org/pypi/setup.cfg +requires-dist = setuptools +classifier = + Development Status :: 5 - Production/Stable + Environment :: Plugins + Framework :: Setuptools Plugin + Intended Audience :: Developers + License :: OSI Approved :: BSD License + Operating System :: OS Independent + Programming Language :: Python + Programming Language :: Python :: 3 + Topic :: Software Development :: Build Tools + Topic :: Software Development :: Libraries :: Python Modules + Topic :: System :: Archiving :: Packaging + +[files] +packages = + setup + setup.cfg + setup.cfg.extern +extra_files = + CHANGES.rst + LICENSE + ez_setup.py +""" + +PYDOCTOR_SECTIONS = [""" +[pydoctor] +intersphinx = ["https://docs.python.org/3/objects.inv", + "https://twistedmatrix.com/documents/current/api/objects.inv", + "https://urllib3.readthedocs.io/en/latest/objects.inv", + "https://requests.readthedocs.io/en/latest/objects.inv", + "https://www.attrs.org/en/stable/objects.inv", + "https://tristanlatr.github.io/apidocs/docutils/objects.inv"] +docformat = 'restructuredtext' +project-name = 'MyProject' +project-url = "https://github.com/twisted/pydoctor" +privacy = ["HIDDEN:pydoctor.test"] +quiet = 1 +warnings-as-errors = true +""", # toml/ini + +""" +[tool.pydoctor] +intersphinx = ["https://docs.python.org/3/objects.inv", + "https://twistedmatrix.com/documents/current/api/objects.inv", + "https://urllib3.readthedocs.io/en/latest/objects.inv", + "https://requests.readthedocs.io/en/latest/objects.inv", + "https://www.attrs.org/en/stable/objects.inv", + "https://tristanlatr.github.io/apidocs/docutils/objects.inv"] +docformat = "restructuredtext" +project-name = "MyProject" +project-url = "https://github.com/twisted/pydoctor" +privacy = ["HIDDEN:pydoctor.test"] +quiet = 1 +warnings-as-errors = true +""", # toml/ini + +""" +[tool:pydoctor] +intersphinx = + https://docs.python.org/3/objects.inv + https://twistedmatrix.com/documents/current/api/objects.inv + https://urllib3.readthedocs.io/en/latest/objects.inv + https://requests.readthedocs.io/en/latest/objects.inv + https://www.attrs.org/en/stable/objects.inv + https://tristanlatr.github.io/apidocs/docutils/objects.inv +docformat = restructuredtext +project-name = MyProject +project-url = https://github.com/twisted/pydoctor +privacy = + HIDDEN:pydoctor.test +quiet = 1 +warnings-as-errors = true +""", # ini only + +""" +[pydoctor] +intersphinx: ["https://docs.python.org/3/objects.inv", + "https://twistedmatrix.com/documents/current/api/objects.inv", + "https://urllib3.readthedocs.io/en/latest/objects.inv", + "https://requests.readthedocs.io/en/latest/objects.inv", + "https://www.attrs.org/en/stable/objects.inv", + "https://tristanlatr.github.io/apidocs/docutils/objects.inv"] +docformat: restructuredtext +project-name: MyProject +project-url: '''https://github.com/twisted/pydoctor''' +privacy = + HIDDEN:pydoctor.test +quiet = 1 +warnings-as-errors = true +""", # ini only +] + +@pytest.fixture(scope='module') +def tempDir(request: FixtureRequest, tmp_path_factory: TempPathFactory) -> Path: + name = request.module.__name__.split('.')[-1] + return tmp_path_factory.mktemp(f'{name}-cache') + +@pytest.mark.parametrize('project_conf', [EXAMPLE_TOML_CONF, EXAMPLE_INI_CONF]) +@pytest.mark.parametrize('pydoctor_conf', PYDOCTOR_SECTIONS) +def test_config_parsers(project_conf:str, pydoctor_conf:str, tempDir:Path) -> None: + + if '[tool:pydoctor]' in pydoctor_conf and '[tool.poetry]' in project_conf: + # colons in section names are not supported in TOML (without quotes) + return + if 'intersphinx:' in pydoctor_conf and '[tool.poetry]' in project_conf: + # colons to defined key pairs are not supported in TOML + return + + parser = PydoctorConfigParser() + stream = StringIO(project_conf + '\n' + pydoctor_conf) + data = parser.parse(stream) + + assert data['docformat'] == 'restructuredtext', data + assert data['project-url'] == 'https://github.com/twisted/pydoctor', data + assert len(data['intersphinx']) == 6, data + + conf_file = (tempDir / "pydoctor_temp_conf") + + with conf_file.open('w') as f: + f.write(project_conf + '\n' + pydoctor_conf) + + options = Options.from_args([f"--config={conf_file}"]) + assert options.verbosity == -1 + assert options.warnings_as_errors == True + assert options.privacy == [(model.PrivacyClass.HIDDEN, 'pydoctor.test')] + assert options.intersphinx[0] == "https://docs.python.org/3/objects.inv" + assert options.intersphinx[-1] == "https://tristanlatr.github.io/apidocs/docutils/objects.inv" + +def test_repeatable_options_multiple_configs_and_args(tempDir:Path) -> None: + config1 = """ +[pydoctor] +intersphinx = ["https://docs.python.org/3/objects.inv"] +verbose = 1 +""" + config2 = """ +[tool.pydoctor] +intersphinx = ["https://twistedmatrix.com/documents/current/api/objects.inv"] +verbose = -1 +project-version = 2050.4C +""" + config3 = """ +[tool:pydoctor] +intersphinx = ["https://requests.readthedocs.io/en/latest/objects.inv"] +verbose = 0 +project-name = "Hello World!" +""" + + cwd = os.getcwd() + try: + conf_file1 = (tempDir / "pydoctor.ini") + conf_file2 = (tempDir / "pyproject.toml") + conf_file3 = (tempDir / "setup.cfg") + + for cfg, file in zip([config1, config2, config3],[conf_file1, conf_file2, conf_file3]): + with open(file, 'w') as f: + f.write(cfg) + + os.chdir(tempDir) + options = Options.defaults() + + assert options.verbosity == 1 + assert options.intersphinx == ["https://docs.python.org/3/objects.inv",] + assert options.projectname == "Hello World!" + assert options.projectversion == "2050.4C" + + options = Options.from_args(['-vv']) + + assert options.verbosity == 3 # I would have expected 2 + assert options.intersphinx == ["https://docs.python.org/3/objects.inv",] + assert options.projectname == "Hello World!" + assert options.projectversion == "2050.4C" + + options = Options.from_args(['-vv', '--intersphinx=https://twistedmatrix.com/documents/current/api/objects.inv', '--intersphinx=https://urllib3.readthedocs.io/en/latest/objects.inv']) + + assert options.verbosity == 3 # I would have expected 2 + assert options.intersphinx == ["https://twistedmatrix.com/documents/current/api/objects.inv", "https://urllib3.readthedocs.io/en/latest/objects.inv"] + assert options.projectname == "Hello World!" + assert options.projectversion == "2050.4C" + + finally: + os.chdir(cwd) \ No newline at end of file diff --git a/pydoctor/test/test_packages.py b/pydoctor/test/test_packages.py index 261fa31a4..fe16a9991 100644 --- a/pydoctor/test/test_packages.py +++ b/pydoctor/test/test_packages.py @@ -1,14 +1,16 @@ from pathlib import Path -from typing import Type +from typing import Callable +import pytest from pydoctor import model testpackages = Path(__file__).parent / 'testpackages' -def processPackage(packname: str, systemcls: Type[model.System] = model.System) -> model.System: +def processPackage(packname: str, systemcls: Callable[[], model.System] = model.System) -> model.System: system = systemcls() - system.addPackage(testpackages / packname) - system.process() + builder = system.systemBuilder(system) + builder.addModule(testpackages / packname) + builder.buildModules() return system def test_relative_import() -> None: @@ -82,3 +84,86 @@ def test_cyclic_imports() -> None: assert mod_a.expandName('B') == 'cyclic_imports.b.B' mod_b = system.allobjects['cyclic_imports.b'] assert mod_b.expandName('A') == 'cyclic_imports.a.A' + +def test_package_module_name_clash() -> None: + """ + When a module and a package have the same full name, the package wins. + """ + system = processPackage('package_module_name_clash') + pack = system.allobjects['package_module_name_clash.pack'] + assert 'package' == pack.contents.popitem()[0] + +def test_reparented_module() -> None: + """ + A module that is imported in a package as a different name and exported + in that package under the new name via C{__all__} is presented using the + new name. + """ + system = processPackage('reparented_module') + + mod = system.allobjects['reparented_module.module'] + top = system.allobjects['reparented_module'] + + assert mod.fullName() == 'reparented_module.module' + assert top.resolveName('module') is top.contents['module'] + assert top.resolveName('module.f') is mod.contents['f'] + + # The module old name is not in allobjects + assert 'reparented_module.mod' not in system.allobjects + # But can still be resolved with it's old name + assert top.resolveName('mod') is top.contents['module'] + +def test_reparenting_follows_aliases() -> None: + """ + Test for https://github.com/twisted/pydoctor/issues/505 + + Reparenting process follows aliases. + """ + + system = processPackage('reparenting_follows_aliases') + + # reparenting_follows_aliases.main: imports MyClass from ._myotherthing and re-export it in it's __all__ variable. + # reparenting_follows_aliases._mything: defines class MyClass. + # reparenting_follows_aliases._myotherthing: imports class MyClass from ._mything, but do not export it. + + # Test that we do not get KeyError + klass = system.allobjects['reparenting_follows_aliases.main.MyClass'] + + # Test older names still resolves to reparented object + top = system.allobjects['reparenting_follows_aliases'] + + myotherthing = top.contents['_myotherthing'] + mything = top.contents['_mything'] + + assert isinstance(mything, model.Module) + assert isinstance(myotherthing, model.Module) + + assert mything._localNameToFullName('MyClass') == 'reparenting_follows_aliases.main.MyClass' + assert myotherthing._localNameToFullName('MyClass') == 'reparenting_follows_aliases._mything.MyClass' + + system.find_object('reparenting_follows_aliases._mything.MyClass') == klass + + # This part of the test cannot pass for now since we don't recursively resolve aliases. + # See https://github.com/twisted/pydoctor/pull/414 and https://github.com/twisted/pydoctor/issues/430 + + try: + assert system.find_object('reparenting_follows_aliases._myotherthing.MyClass') == klass + assert myotherthing.resolveName('MyClass') == klass + assert mything.resolveName('MyClass') == klass + assert top.resolveName('_myotherthing.MyClass') == klass + assert top.resolveName('_mything.MyClass') == klass + except (AssertionError, LookupError): + return + else: + raise AssertionError("Congratulation!") + +@pytest.mark.parametrize('modname', ['reparenting_crash','reparenting_crash_alt']) +def test_reparenting_crash(modname: str) -> None: + """ + Test for https://github.com/twisted/pydoctor/issues/513 + """ + system = processPackage(modname) + mod = system.allobjects[modname] + assert isinstance(mod.contents[modname], model.Class) + assert isinstance(mod.contents['reparented_func'], model.Function) + assert isinstance(mod.contents[modname].contents['reparented_func'], model.Function) diff --git a/pydoctor/test/test_pydantic_fields.py b/pydoctor/test/test_pydantic_fields.py new file mode 100644 index 000000000..7a1a0298b --- /dev/null +++ b/pydoctor/test/test_pydantic_fields.py @@ -0,0 +1,70 @@ +import ast +from typing import List, Type +from pydoctor import astutils, extensions, model + +class ModVisitor(extensions.ModuleVisitorExt): + + def depart_AnnAssign(self, node: ast.AnnAssign) -> None: + """ + Called after an annotated assignment definition is visited. + """ + ctx = self.visitor.builder.current + if not isinstance(ctx, model.Class): + # check if the current context object is a class + return + + if not any(ctx.expandName(b) == 'pydantic.BaseModel' for b in ctx.bases): + # check if the current context object if a class derived from ``pydantic.BaseModel`` + return + + dottedname = astutils.node2dottedname(node.target) + if not dottedname or len(dottedname)!=1: + # check if the assignment is a simple name, otherwise ignore it + return + + # Get the attribute from current context + attr = ctx.contents[dottedname[0]] + + assert isinstance(attr, model.Attribute) + + # All class variables that are not annotated with ClassVar will be transformed to instance variables. + if astutils.is_using_typing_classvar(attr.annotation, attr): + return + + if attr.kind == model.DocumentableKind.CLASS_VARIABLE: + attr.kind = model.DocumentableKind.INSTANCE_VARIABLE + +def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None: + r.register_astbuilder_visitor(ModVisitor) + +class PydanticSystem2(model.System): + # Add our custom extension + extensions: List[str] = [] + custom_extensions = ['pydoctor.test.test_pydantic_fields'] + +## Testing code + +import pytest +from pydoctor.test.test_astbuilder import fromText, PydanticSystem + +pydantic_systemcls_param = pytest.mark.parametrize('systemcls', (PydanticSystem, PydanticSystem2)) + +@pydantic_systemcls_param +def test_pydantic_fields(systemcls: Type[model.System]) -> None: + src = ''' + from typing import ClassVar + from pydantic import BaseModel, Field + class Model(BaseModel): + a: int + b: int = Field(...) + name:str = 'Jane Doe' + kind:ClassVar = 'person' + ''' + + mod = fromText(src, modname='mod', systemcls=systemcls) + + assert mod.contents['Model'].contents['a'].kind == model.DocumentableKind.INSTANCE_VARIABLE + assert mod.contents['Model'].contents['b'].kind == model.DocumentableKind.INSTANCE_VARIABLE + assert mod.contents['Model'].contents['name'].kind == model.DocumentableKind.INSTANCE_VARIABLE + assert mod.contents['Model'].contents['kind'].kind == model.DocumentableKind.CLASS_VARIABLE + diff --git a/pydoctor/test/test_qnmatch.py b/pydoctor/test/test_qnmatch.py new file mode 100644 index 000000000..2f4bb4154 --- /dev/null +++ b/pydoctor/test/test_qnmatch.py @@ -0,0 +1,130 @@ +import unittest + +from pydoctor.qnmatch import qnmatch, translate + +def test_qnmatch() -> None: + + assert(qnmatch('site.yml', 'site.yml')) + + assert(not qnmatch('site.yml', '**.site.yml')) + assert(not qnmatch('site.yml', 'site.yml.**')) + assert(not qnmatch('SITE.YML', 'site.yml')) + assert(not qnmatch('SITE.YML', '**.site.yml')) + + assert(qnmatch('images.logo.png', '*.*.png')) + assert(not qnmatch('images.images.logo.png', '*.*.png')) + assert(not qnmatch('images.logo.png', '*.*.*.png')) + assert(qnmatch('images.logo.png', '**.png')) + assert(qnmatch('images.logo.png', '**.*.png')) + assert(qnmatch('images.logo.png', '**png')) + + assert(not qnmatch('images.logo.png', 'images.**.*.png')) + assert(not qnmatch('images.logo.png', '**.images.**.png')) + assert(not qnmatch('images.logo.png', '**.images.**.???')) + assert(not qnmatch('images.logo.png', '**.image?.**.???')) + + assert(qnmatch('images.logo.png', 'images.**.png')) + assert(qnmatch('images.logo.png', 'images.**.png')) + assert(qnmatch('images.logo.png', 'images.**.???')) + assert(qnmatch('images.logo.png', 'image?.**.???')) + + assert(qnmatch('images.gitkeep', '**.*')) + assert(qnmatch('output.gitkeep', '**.*')) + + assert(qnmatch('images.gitkeep', '*.**')) + assert(qnmatch('output.gitkeep', '*.**')) + + assert(qnmatch('.hidden', '**.*')) + assert(qnmatch('sub.hidden', '**.*')) + assert(qnmatch('sub.sub.hidden', '**.*')) + + assert(qnmatch('.hidden', '**.hidden')) + assert(qnmatch('sub.hidden', '**.hidden')) + assert(qnmatch('sub.sub.hidden', '**.hidden')) + + assert(qnmatch('site.yml.Class', 'site.yml.*')) + assert(not qnmatch('site.yml.Class.property', 'site.yml.*')) + assert(not qnmatch('site.yml.Class.property', 'site.yml.Class')) + + assert(qnmatch('site.yml.Class.__init__', '**.__*__')) + assert(qnmatch('site._yml.Class.property', '**._*.**')) + assert(qnmatch('site.yml._Class.property', '**._*.**')) + assert(not qnmatch('site.yml.Class.property', '**._*.**')) + assert(not qnmatch('site.yml_.Class.property', '**._*.**')) + assert(not qnmatch('site.yml.Class._property', '**._*.**')) + +class TranslateTestCase(unittest.TestCase): + def test_translate(self) -> None: + self.assertEqual(translate('*'), r'(?s:[^\.]*?)\Z') + self.assertEqual(translate('**'), r'(?s:.*?)\Z') + self.assertEqual(translate('?'), r'(?s:.)\Z') + self.assertEqual(translate('a?b*'), r'(?s:a.b[^\.]*?)\Z') + self.assertEqual(translate('[abc]'), r'(?s:[abc])\Z') + self.assertEqual(translate('[]]'), r'(?s:[]])\Z') + self.assertEqual(translate('[!x]'), r'(?s:[^x])\Z') + self.assertEqual(translate('[^x]'), r'(?s:[\^x])\Z') + self.assertEqual(translate('[x'), r'(?s:\[x)\Z') + +class FnmatchTestCase(unittest.TestCase): + + def check_match(self, filename, pattern, should_match=True, fn=qnmatch) -> None: # type: ignore + if should_match: + self.assertTrue(fn(filename, pattern), + "expected %r to match pattern %r" + % (filename, pattern)) + else: + self.assertFalse(fn(filename, pattern), + "expected %r not to match pattern %r" + % (filename, pattern)) + + def test_fnmatch(self) -> None: + check = self.check_match + check('abc', 'abc') + check('abc', '?*?') + check('abc', '???*') + check('abc', '*???') + check('abc', '???') + check('abc', '*') + check('abc', 'ab[cd]') + check('abc', 'ab[!de]') + check('abc', 'ab[de]', False) + check('a', '??', False) + check('a', 'b', False) + + # these test that '\' is handled correctly in character sets; + # see SF bug #409651 + check('\\', r'[\]') + check('a', r'[!\]') + check('\\', r'[!\]', False) + + # test that filenames with newlines in them are handled correctly. + # http://bugs.python.org/issue6665 + check('foo\nbar', 'foo*') + check('foo\nbar\n', 'foo*') + check('\nfoo', 'foo*', False) + check('\n', '*') + + def test_mix_bytes_str(self) -> None: + self.assertRaises(TypeError, qnmatch, 'test', b'*') + self.assertRaises(TypeError, qnmatch, b'test', '*') + self.assertRaises(TypeError, qnmatch, 'test', b'*') + self.assertRaises(TypeError, qnmatch, b'test', '*') + + def test_fnmatchcase(self) -> None: + check = self.check_match + check('abc', 'abc', True, qnmatch) + check('AbC', 'abc', False, qnmatch) + check('abc', 'AbC', False, qnmatch) + check('AbC', 'AbC', True, qnmatch) + + check('usr/bin', 'usr/bin', True, qnmatch) + check('usr\\bin', 'usr/bin', False, qnmatch) + check('usr/bin', 'usr\\bin', False, qnmatch) + check('usr\\bin', 'usr\\bin', True, qnmatch) + + def test_case(self) -> None: + check = self.check_match + check('abc', 'abc') + check('AbC', 'abc', False) + check('abc', 'AbC', False) + check('AbC', 'AbC') diff --git a/pydoctor/test/test_sphinx.py b/pydoctor/test/test_sphinx.py index fea447db3..e28f8eda8 100644 --- a/pydoctor/test/test_sphinx.py +++ b/pydoctor/test/test_sphinx.py @@ -88,8 +88,10 @@ def inv_writer_nolog() -> sphinx.SphinxInventoryWriter: project_version='2.3.0', ) +class IgnoreSystem: + root_names = () -IGNORE_SYSTEM = cast(model.System, 'ignore-system') +IGNORE_SYSTEM = cast(model.System, IgnoreSystem()) """Passed as a System when we don't want the system to be accessed.""" @@ -365,7 +367,7 @@ def test_update_functional(inv_reader_nolog: sphinx.SphinxInventory) -> None: url = 'http://some.url/api/objects.inv' - inv_reader_nolog.update({url: content}, url) + inv_reader_nolog.update(cast('sphinx.CacheT', {url: content}), url) assert 'http://some.url/api/module1.html' == inv_reader_nolog.getLink('some.module1') assert 'http://some.url/api/module2.html' == inv_reader_nolog.getLink('other.module2') @@ -376,7 +378,7 @@ def test_update_bad_url(inv_reader: InvReader) -> None: Log an error when failing to get base url from url. """ - inv_reader.update({}, 'really.bad.url') + inv_reader.update(cast('sphinx.CacheT', {}), 'really.bad.url') assert inv_reader._links == {} expected_log = [( @@ -390,7 +392,7 @@ def test_update_fail(inv_reader: InvReader) -> None: Log an error when failing to get content from url. """ - inv_reader.update({}, 'http://some.tld/o.inv') + inv_reader.update(cast('sphinx.CacheT', {}), 'http://some.tld/o.inv') assert inv_reader._links == {} expected_log = [( @@ -574,7 +576,7 @@ def test_ClosingBytesIO() -> None: assert cbio.closed - assert b''.join(buffer) == data + assert b''.join(buffer) == data # type:ignore[unreachable] class TestIntersphinxCache: @@ -728,7 +730,7 @@ def test_prepareCache( cacheDirectory.mkdir(exist_ok=True) for child in cacheDirectory.iterdir(): child.unlink() - with open(cacheDirectory / cacheDirectoryName, 'w'): + with open(cacheDirectory / cacheDirectoryName, 'w', encoding='utf-8'): pass try: diff --git a/pydoctor/test/test_templatewriter.py b/pydoctor/test/test_templatewriter.py index eb3927c88..8daf0c9a6 100644 --- a/pydoctor/test/test_templatewriter.py +++ b/pydoctor/test/test_templatewriter.py @@ -1,20 +1,22 @@ from io import BytesIO -from typing import Callable, Union, cast, TYPE_CHECKING +from typing import Callable, Union, Any, cast, TYPE_CHECKING import pytest import warnings import sys import tempfile import os from pathlib import Path, PurePath -from pydoctor import model, templatewriter, stanutils -from pydoctor.templatewriter import (FailedToCreateTemplate, StaticTemplate, pages, writer, + +from pydoctor import model, templatewriter, stanutils, __version__ +from pydoctor.templatewriter import (FailedToCreateTemplate, StaticTemplate, pages, writer, util, TemplateLookup, Template, HtmlTemplate, UnsupportedTemplateVersion, OverrideTemplateNotAllowed) from pydoctor.templatewriter.pages.table import ChildTable -from pydoctor.templatewriter.summary import isClassNodePrivate, isPrivate +from pydoctor.templatewriter.summary import isClassNodePrivate, isPrivate, moduleSummary from pydoctor.test.test_astbuilder import fromText -from pydoctor.test.test_packages import processPackage +from pydoctor.test.test_packages import processPackage, testpackages +from pydoctor.themes import get_themes if TYPE_CHECKING: from twisted.web.template import Flattenable @@ -23,7 +25,7 @@ if sys.version_info >= (3, 9): from importlib.abc import Traversable else: - Traversable = Path + Traversable = Any else: Traversable = object @@ -52,6 +54,36 @@ def getHTMLOf(ob: model.Documentable) -> str: return f.getvalue().decode() +def test_sidebar() -> None: + src = ''' + class C: + + def f(): ... + def h(): ... + + class D: + def l(): ... + + ''' + system = model.System(model.Options.from_args( + ['--sidebar-expand-depth=3'])) + + mod = fromText(src, modname='mod', system=system) + + mod_html = getHTMLOf(mod) + + mod_parts = [ + ' None: src = ''' def f(): @@ -63,13 +95,13 @@ def f(): def test_empty_table() -> None: mod = fromText('') - t = ChildTable(pages.DocGetter(), mod, [], ChildTable.lookup_loader(TemplateLookup(template_dir))) + t = ChildTable(util.DocGetter(), mod, [], ChildTable.lookup_loader(TemplateLookup(template_dir))) flattened = flatten(t) assert 'The renderer named' not in flattened def test_nonempty_table() -> None: mod = fromText('def f(): pass') - t = ChildTable(pages.DocGetter(), mod, mod.contents.values(), ChildTable.lookup_loader(TemplateLookup(template_dir))) + t = ChildTable(util.DocGetter(), mod, mod.contents.values(), ChildTable.lookup_loader(TemplateLookup(template_dir))) flattened = flatten(t) assert 'The renderer named' not in flattened @@ -93,8 +125,6 @@ def test_document_code_in_init_module() -> None: def test_basic_package(tmp_path: Path) -> None: system = processPackage("basic") w = writer.TemplateWriter(tmp_path, TemplateLookup(template_dir)) - system.options.htmlusesplitlinks = True - system.options.htmlusesorttable = True w.prepOutputDirectory() root, = system.rootobjects w._writeDocsFor(root) @@ -404,6 +434,17 @@ def test_template_subfolders_write_casing(tmp_path: Path) -> None: assert not test_build_dir.joinpath('Static/Fonts').is_dir() assert test_build_dir.joinpath('static/fonts/bar.svg').is_file() +def test_themes_template_versions() -> None: + """ + All our templates should be up to date. + """ + + for theme in get_themes(): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + lookup = TemplateLookup(importlib_resources.files('pydoctor.themes') / 'base') + lookup.add_templatedir(importlib_resources.files('pydoctor.themes') / theme) + assert len(w) == 0, [str(_w) for _w in w] @pytest.mark.parametrize('func', [isPrivate, isClassNodePrivate]) def test_isPrivate(func: Callable[[model.Class], bool]) -> None: @@ -469,3 +510,83 @@ def func(): r"""\\/:*?"<>|\f\v\t\r\n""" """'))
    @simple_decorator""" """(max_examples=700, deadline=None, option=range(10))
    """) + + +def test_compact_module_summary() -> None: + system = model.System() + + top = fromText('', modname='top', is_package=True, system=system) + for x in range(50): + fromText('', parent_name='top', modname='sub' + str(x), system=system) + + ul = moduleSummary(top, '').children[-1] + assert ul.tagName == 'ul' # type: ignore + assert len(ul.children) == 50 # type: ignore + + # the 51th module triggers the compact summary, no matter if it's a package or module + fromText('', parent_name='top', modname='_yet_another_sub', system=system, is_package=True) + + ul = moduleSummary(top, '').children[-1] + assert ul.tagName == 'ul' # type: ignore + assert len(ul.children) == 1 # type: ignore + + # test that the last module is private + assert 'private' in ul.children[0].children[-1].attributes['class'] # type: ignore + + # for the compact summary no submodule (packages) may have further submodules + fromText('', parent_name='top._yet_another_sub', modname='subsubmodule', system=system) + + ul = moduleSummary(top, '').children[-1] + assert ul.tagName == 'ul' # type: ignore + assert len(ul.children) == 51 # type: ignore + + +def test_index_contains_infos(tmp_path: Path) -> None: + """ + Test if index.html contains the following informations: + + - meta generator tag + - nav and links to modules, classes, names + - link to the root packages + - pydoctor github link in the footer + """ + + infos = (f'
    allgames', + 'basic', + 'pydoctor',) + + system = model.System() + builder = system.systemBuilder(system) + builder.addModule(testpackages / "allgames") + builder.addModule(testpackages / "basic") + builder.buildModules() + w = writer.TemplateWriter(tmp_path, TemplateLookup(template_dir)) + w.writeSummaryPages(system) + + with open(tmp_path / 'index.html', encoding='utf-8') as f: + page = f.read() + for i in infos: + assert i in page, page + +def test_objects_order_mixed_modules_and_packages() -> None: + """ + Packages and modules are mixed when sorting with objects_order. + """ + system = model.System() + + top = fromText('', modname='top', is_package=True, system=system) + fromText('', parent_name='top', modname='aaa', system=system) + fromText('', parent_name='top', modname='bbb', system=system) + fromText('', parent_name='top', modname='aba', system=system, is_package=True) + + _sorted = sorted(top.contents.values(), key=pages.objects_order) + names = [s.name for s in _sorted] + + assert names == ['aaa', 'aba', 'bbb'] + diff --git a/pydoctor/test/test_twisted_python_deprecate.py b/pydoctor/test/test_twisted_python_deprecate.py new file mode 100644 index 000000000..d0822a9ba --- /dev/null +++ b/pydoctor/test/test_twisted_python_deprecate.py @@ -0,0 +1,165 @@ + +import re +from typing import Type + +from pydoctor import model +from pydoctor.stanutils import flatten_text, html2stan +from pydoctor.test import CapSys, test_templatewriter +from pydoctor.test.test_astbuilder import fromText, DeprecateSystem + +import pytest + +_html_template_with_replacement = r'(.*){name} was deprecated in {package} {version}; please use {replacement} instead\.(.*)' +_html_template_without_replacement = r'(.*){name} was deprecated in {package} {version}\.(.*)' + +twisted_deprecated_systemcls_param = pytest.mark.parametrize( + 'systemcls', (model.System, # system with all extensions enabled + DeprecateSystem, # system with deprecated extension only + ) + ) +@twisted_deprecated_systemcls_param +def test_twisted_python_deprecate(capsys: CapSys, systemcls: Type[model.System]) -> None: + """ + It recognizes Twisted deprecation decorators and add the + deprecation info as part of the documentation. + """ + + # Adjusted from Twisted's tests at + # https://github.com/twisted/twisted/blob/3bbe558df65181ed455b0c5cc609c0131d68d265/src/twisted/python/test/test_release.py#L516 + system = systemcls() + system.options.verbosity = -1 + + mod = fromText( + """ + from twisted.python.deprecate import deprecated, deprecatedProperty + from incremental import Version + @deprecated(Version('Twisted', 15, 0, 0), 'Baz') + def foo(): + 'docstring' + from twisted.python import deprecate + import incremental + @deprecate.deprecated(incremental.Version('Twisted', 16, 0, 0)) + def _bar(): + 'should appear' + @deprecated(Version('Twisted', 14, 2, 3), replacement='stuff') + class Baz: + @deprecatedProperty(Version('Twisted', 'NEXT', 0, 0), replacement='faam') + @property + def foom(self): + ... + @property + def faam(self): + ... + class stuff: ... + """, system=system, modname='mod') + + mod_html_text = flatten_text(html2stan(test_templatewriter.getHTMLOf(mod))) + class_html_text = flatten_text(html2stan(test_templatewriter.getHTMLOf(mod.contents['Baz']))) + + assert capsys.readouterr().out == '' + + assert 'docstring' in mod_html_text + assert 'should appear' in mod_html_text + + assert re.match(_html_template_with_replacement.format( + name='foo', package='Twisted', version=r'15\.0\.0', replacement='Baz' + ), mod_html_text, re.DOTALL), mod_html_text + assert re.match(_html_template_without_replacement.format( + name='_bar', package='Twisted', version=r'16\.0\.0' + ), mod_html_text, re.DOTALL), mod_html_text + + _class = mod.contents['Baz'] + assert len(_class.extra_info)==1 + assert re.match(_html_template_with_replacement.format( + name='Baz', package='Twisted', version=r'14\.2\.3', replacement='stuff' + ), flatten_text(_class.extra_info[0].to_stan(mod.docstring_linker, False)).strip(), re.DOTALL) + + assert re.match(_html_template_with_replacement.format( + name='Baz', package='Twisted', version=r'14\.2\.3', replacement='stuff' + ), class_html_text, re.DOTALL), class_html_text + + assert re.match(_html_template_with_replacement.format( + name='foom', package='Twisted', version=r'NEXT', replacement='faam' + ), class_html_text, re.DOTALL), class_html_text + +@twisted_deprecated_systemcls_param +def test_twisted_python_deprecate_security(capsys: CapSys, systemcls: Type[model.System]) -> None: + system = systemcls() + system.options.verbosity = -1 + + mod = fromText( + """ + from twisted.python.deprecate import deprecated + from incremental import Version + @deprecated(Version('Twisted\\n.. raw:: html\\n\\n ', 15, 0, 0), 'Baz') + def foo(): ... + @deprecated(Version('Twisted', 16, 0, 0), replacement='\\n.. raw:: html\\n\\n ') + def _bar(): ... + """, system=system, modname='mod') + + mod_html = test_templatewriter.getHTMLOf(mod) + + assert capsys.readouterr().out == '''mod:4: Invalid package name: 'Twisted\\n.. raw:: html\\n\\n ' +mod:6: Invalid replacement name: '\\n.. raw:: html\\n\\n ' +''', capsys.readouterr().out + assert '' not in mod_html + +@twisted_deprecated_systemcls_param +def test_twisted_python_deprecate_corner_cases(capsys: CapSys, systemcls: Type[model.System]) -> None: + """ + It does not crash and report appropriate warnings while handling Twisted deprecation decorators. + """ + system = systemcls() + system.options.verbosity = -1 + + mod = fromText( + """ + from twisted.python.deprecate import deprecated, deprecatedProperty + from incremental import Version + # wrong incremental.Version() call (missing micro) + @deprecated(Version('Twisted', 15, 0), 'Baz') + def foo(): + 'docstring' + + # wrong incremental.Version() call (argument should be 'NEXT') + @deprecated(Version('Twisted', 'latest', 0, 0)) + def _bar(): + 'should appear' + + # wrong deprecated() call (argument should be incremental.Version() call) + @deprecated('14.2.3', replacement='stuff') + class Baz: + + # bad deprecation text: replacement not found + @deprecatedProperty(Version('Twisted', 'NEXT', 0, 0), replacement='notfound') + @property + def foom(self): + ... + + # replacement as callable works + @deprecatedProperty(Version('Twisted', 'NEXT', 0, 0), replacement=Baz.faam) + @property + def foum(self): + ... + @property + def faam(self): + ... + class stuff: ... + """, system=system, modname='mod') + + test_templatewriter.getHTMLOf(mod) + class_html_text = flatten_text(html2stan(test_templatewriter.getHTMLOf(mod.contents['Baz']))) + + assert capsys.readouterr().out=="""mod:5: missing a required argument: 'micro' +mod:10: Invalid call to incremental.Version(), 'major' should be an int or 'NEXT'. +mod:15: Invalid call to twisted.python.deprecate.deprecated(), first argument should be a call to incremental.Version() +mod:20: Cannot find link target for "notfound" +""", capsys.readouterr().out + + assert re.match(_html_template_with_replacement.format( + name='foom', package='Twisted', version='NEXT', replacement='notfound' + ), class_html_text, re.DOTALL), class_html_text + + assert re.match(_html_template_with_replacement.format( + name='foum', package='Twisted', version='NEXT', replacement='mod.Baz.faam' + ), class_html_text, re.DOTALL), class_html_text diff --git a/pydoctor/test/test_visitor.py b/pydoctor/test/test_visitor.py new file mode 100644 index 000000000..1c9fab630 --- /dev/null +++ b/pydoctor/test/test_visitor.py @@ -0,0 +1,155 @@ + +from typing import Iterable +from pydoctor.test import CapSys +from pydoctor.test.epydoc.test_restructuredtext import parse_rst +from pydoctor import visitor +from docutils import nodes + +def dump(node: nodes.Node, text:str='') -> None: + print('{}{:<15} line: {}, rawsource: {}'.format( + text, + type(node).__name__, + node.line, + node.rawsource.replace('\n', '\\n'))) + +class DocutilsNodeVisitor(visitor.Visitor[nodes.Node]): + def unknown_visit(self, ob: nodes.Node) -> None: + pass + + @classmethod + def get_children(cls, ob:nodes.Node) -> Iterable[nodes.Node]: + if isinstance(ob, nodes.Element): + return ob.children # type:ignore[no-any-return] + return [] + +class MainVisitor(DocutilsNodeVisitor): + def visit_title_reference(self, node: nodes.Node) -> None: + raise self.SkipNode() + +class ParagraphDump(visitor.VisitorExt[nodes.Node]): + when = visitor.When.AFTER + def visit_paragraph(self, node: nodes.Node) -> None: + dump(node) + +class TitleReferenceDumpAfter(visitor.VisitorExt[nodes.Node]): + when = visitor.When.AFTER + def visit_title_reference(self, node: nodes.Node) -> None: + dump(node) + +class GenericDump(DocutilsNodeVisitor): + def unknown_visit(self, node: nodes.Node) -> None: + dump(node, '[visit-main] ') + def unknown_departure(self, node: nodes.Node) -> None: + dump(node, '[depart-main] ') + +class GenericDumpAfter(visitor.VisitorExt[nodes.Node]): + when = visitor.When.INNER + def unknown_visit(self, node: nodes.Node) -> None: + dump(node, '[visit-inner] ') + def unknown_departure(self, node: nodes.Node) -> None: + dump(node, '[depart-inner] ') + +class GenericDumpBefore(visitor.VisitorExt[nodes.Node]): + when = visitor.When.OUTTER + def unknown_visit(self, node: nodes.Node) -> None: + dump(node, '[visit-outter] ') + def unknown_departure(self, node: nodes.Node) -> None: + dump(node, '[depart-outter] ') + + +def test_visitor_ext(capsys:CapSys) -> None: + + parsed_doc = parse_rst(''' +Hello +===== + +Dolor sit amet +''') + doc = parsed_doc.to_node() + + vis = GenericDump() + vis.extensions.add(GenericDumpAfter, GenericDumpBefore) + vis.walkabout(doc) + assert capsys.readouterr().out == r'''[visit-outter] document line: None, rawsource: +[visit-main] document line: None, rawsource: +[visit-inner] document line: None, rawsource: +[visit-outter] title line: 3, rawsource: Hello +[visit-main] title line: 3, rawsource: Hello +[visit-inner] title line: 3, rawsource: Hello +[visit-outter] Text line: None, rawsource: Hello +[visit-main] Text line: None, rawsource: Hello +[visit-inner] Text line: None, rawsource: Hello +[depart-inner] Text line: None, rawsource: Hello +[depart-main] Text line: None, rawsource: Hello +[depart-outter] Text line: None, rawsource: Hello +[depart-inner] title line: 3, rawsource: Hello +[depart-main] title line: 3, rawsource: Hello +[depart-outter] title line: 3, rawsource: Hello +[visit-outter] paragraph line: 5, rawsource: Dolor sit amet +[visit-main] paragraph line: 5, rawsource: Dolor sit amet +[visit-inner] paragraph line: 5, rawsource: Dolor sit amet +[visit-outter] Text line: None, rawsource: Dolor sit amet +[visit-main] Text line: None, rawsource: Dolor sit amet +[visit-inner] Text line: None, rawsource: Dolor sit amet +[depart-inner] Text line: None, rawsource: Dolor sit amet +[depart-main] Text line: None, rawsource: Dolor sit amet +[depart-outter] Text line: None, rawsource: Dolor sit amet +[depart-inner] paragraph line: 5, rawsource: Dolor sit amet +[depart-main] paragraph line: 5, rawsource: Dolor sit amet +[depart-outter] paragraph line: 5, rawsource: Dolor sit amet +[depart-inner] document line: None, rawsource: +[depart-main] document line: None, rawsource: +[depart-outter] document line: None, rawsource: +''' + + +def test_visitor(capsys:CapSys) -> None: + + parsed_doc = parse_rst(''' +Fizz +==== +Lorem ipsum `notfound`. + +Buzz +**** + +Lorem ``ipsum`` + +.. code-block:: python + + x = 0 + +.. note:: + + Dolor sit amet + `notfound`. + + .. code-block:: python + + y = 1 + +Dolor sit amet `another link `. +Dolor sit amet `link `. +bla blab balba. +''') + doc = parsed_doc.to_node() + + MainVisitor(visitor.ExtList(TitleReferenceDumpAfter)).walk(doc) + assert capsys.readouterr().out == r'''title_reference line: None, rawsource: `notfound` +title_reference line: None, rawsource: `notfound` +title_reference line: None, rawsource: `another link ` +title_reference line: None, rawsource: `link ` +''' + + vis = MainVisitor() + vis.extensions.add(ParagraphDump, TitleReferenceDumpAfter) + vis.walk(doc) + assert capsys.readouterr().out == r'''paragraph line: 4, rawsource: Lorem ipsum `notfound`. +title_reference line: None, rawsource: `notfound` +paragraph line: 9, rawsource: Lorem ``ipsum`` +paragraph line: 17, rawsource: Dolor sit amet\n`notfound`. +title_reference line: None, rawsource: `notfound` +paragraph line: 24, rawsource: Dolor sit amet `another link `.\nDolor sit amet `link `.\nbla blab balba. +title_reference line: None, rawsource: `another link ` +title_reference line: None, rawsource: `link ` +''' diff --git a/pydoctor/test/test_zopeinterface.py b/pydoctor/test/test_zopeinterface.py index 5e86afd77..f79014dcd 100644 --- a/pydoctor/test/test_zopeinterface.py +++ b/pydoctor/test/test_zopeinterface.py @@ -1,19 +1,27 @@ -from typing import cast -from pydoctor.test.test_astbuilder import fromText, type2html +from typing import Type, cast +from pydoctor.test.test_astbuilder import fromText, type2html, ZopeInterfaceSystem from pydoctor.test.test_packages import processPackage -from pydoctor.zopeinterface import ZopeInterfaceClass, ZopeInterfaceSystem +from pydoctor.extensions.zopeinterface import ZopeInterfaceClass from pydoctor.epydoc.markup import ParsedDocstring from pydoctor import model from pydoctor.stanutils import flatten +import pytest + from . import CapSys, NotFoundLinker +zope_interface_systemcls_param = pytest.mark.parametrize( + 'systemcls', (model.System, # system with all extensions enalbed + ZopeInterfaceSystem, # system with zopeinterface extension only + ) + ) # we set up the same situation using both implements and # classImplements and run the same tests. -def test_implements() -> None: +@zope_interface_systemcls_param +def test_implements(systemcls: Type[model.System]) -> None: src = ''' import zope.interface @@ -29,9 +37,10 @@ class FooBar(Foo): class OnlyBar(Foo): zope.interface.implementsOnly(IBar) ''' - implements_test(src) + implements_test(src, systemcls) -def test_classImplements() -> None: +@zope_interface_systemcls_param +def test_classImplements(systemcls: Type[model.System]) -> None: src = ''' import zope.interface class IFoo(zope.interface.Interface): @@ -48,9 +57,10 @@ class OnlyBar(Foo): zope.interface.classImplements(FooBar, IBar) zope.interface.classImplementsOnly(OnlyBar, IBar) ''' - implements_test(src) + implements_test(src, systemcls) -def test_implementer() -> None: +@zope_interface_systemcls_param +def test_implementer(systemcls: Type[model.System]) -> None: src = ''' import zope.interface @@ -68,10 +78,10 @@ class FooBar(Foo): class OnlyBar(Foo): zope.interface.implementsOnly(IBar) ''' - implements_test(src) + implements_test(src, systemcls) -def implements_test(src: str) -> None: - mod = fromText(src, modname='zi', systemcls=ZopeInterfaceSystem) +def implements_test(src: str, systemcls: Type[model.System]) -> None: + mod = fromText(src, modname='zi', systemcls=systemcls) ifoo = mod.contents['IFoo'] ibar = mod.contents['IBar'] foo = mod.contents['Foo'] @@ -100,17 +110,18 @@ def implements_test(src: str) -> None: assert ifoo.implementedby_directly == [foo] assert ibar.implementedby_directly == [foobar, onlybar] - -def test_subclass_with_same_name() -> None: +@zope_interface_systemcls_param +def test_subclass_with_same_name(systemcls: Type[model.System]) -> None: src = ''' class A: pass class A(A): pass ''' - fromText(src, modname='zi', systemcls=ZopeInterfaceSystem) + fromText(src, modname='zi', systemcls=systemcls) -def test_multiply_inheriting_interfaces() -> None: +@zope_interface_systemcls_param +def test_multiply_inheriting_interfaces(systemcls: Type[model.System]) -> None: src = ''' from zope.interface import Interface, implements @@ -120,19 +131,20 @@ class One: implements(IOne) class Two: implements(ITwo) class Both(One, Two): pass ''' - mod = fromText(src, modname='zi', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='zi', systemcls=systemcls) B = mod.contents['Both'] assert isinstance(B, ZopeInterfaceClass) assert len(list(B.allImplementedInterfaces)) == 2 -def test_attribute(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_attribute(capsys: CapSys, systemcls: Type[model.System]) -> None: src = ''' import zope.interface as zi class C(zi.Interface): attr = zi.Attribute("documented attribute") bad_attr = zi.Attribute(0) ''' - mod = fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='mod', systemcls=systemcls) assert len(mod.contents['C'].contents) == 2 attr = mod.contents['C'].contents['attr'] assert attr.kind is model.DocumentableKind.ATTRIBUTE @@ -145,8 +157,9 @@ class C(zi.Interface): captured = capsys.readouterr().out assert captured == 'mod:5: definition of attribute "bad_attr" should have docstring as its sole argument\n' -def test_interfaceclass() -> None: - system = processPackage('interfaceclass', systemcls=ZopeInterfaceSystem) +@zope_interface_systemcls_param +def test_interfaceclass(systemcls: Type[model.System]) -> None: + system = processPackage('interfaceclass', systemcls=systemcls) mod = system.allobjects['interfaceclass.mod'] I = mod.contents['MyInterface'] assert isinstance(I, ZopeInterfaceClass) @@ -157,19 +170,21 @@ def test_interfaceclass() -> None: assert isinstance(J, ZopeInterfaceClass) assert J.isinterface -def test_warnerproofing() -> None: +@zope_interface_systemcls_param +def test_warnerproofing(systemcls: Type[model.System]) -> None: src = ''' from zope import interface Interface = interface.Interface class IMyInterface(Interface): pass ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) I = mod.contents['IMyInterface'] assert isinstance(I, ZopeInterfaceClass) assert I.isinterface -def test_zopeschema(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_zopeschema(capsys: CapSys, systemcls: Type[model.System]) -> None: src = ''' from zope import schema, interface class IMyInterface(interface.Interface): @@ -177,7 +192,7 @@ class IMyInterface(interface.Interface): undoc = schema.Bool() bad = schema.ASCII(description=False) ''' - mod = fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='mod', systemcls=systemcls) text = mod.contents['IMyInterface'].contents['text'] assert text.docstring == 'fun in a bun' assert type2html(text)== "schema.TextLine" @@ -193,20 +208,22 @@ class IMyInterface(interface.Interface): captured = capsys.readouterr().out assert captured == 'mod:6: description of field "bad" is not a string literal\n' -def test_aliasing_in_class() -> None: +@zope_interface_systemcls_param +def test_aliasing_in_class(systemcls: Type[model.System]) -> None: src = ''' from zope import interface class IMyInterface(interface.Interface): Attrib = interface.Attribute attribute = Attrib("fun in a bun") ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) attr = mod.contents['IMyInterface'].contents['attribute'] assert mod.contents['IMyInterface'].contents['Attrib'].kind is model.DocumentableKind.ALIAS assert attr.docstring == 'fun in a bun' assert attr.kind is model.DocumentableKind.ATTRIBUTE -def test_zopeschema_inheritance() -> None: +@zope_interface_systemcls_param +def test_zopeschema_inheritance(systemcls: Type[model.System]) -> None: src = ''' from zope import schema, interface from zope.schema import Int as INTEGERSCHMEMAFIELD @@ -219,7 +236,7 @@ class IMyInterface(interface.Interface): myothertext = MyOtherTextLine(description="fun in another bun") myint = INTEGERSCHMEMAFIELD(description="not as much fun") ''' - mod = fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='mod', systemcls=systemcls) mytext = mod.contents['IMyInterface'].contents['mytext'] assert mytext.docstring == 'fun in a bun' assert flatten(cast(ParsedDocstring, mytext.parsed_type).to_stan(NotFoundLinker())) == "MyTextLine" @@ -232,7 +249,8 @@ class IMyInterface(interface.Interface): assert flatten(cast(ParsedDocstring, myint.parsed_type).to_stan(NotFoundLinker())) == "INTEGERSCHMEMAFIELD" assert myint.kind is model.DocumentableKind.SCHEMA_FIELD -def test_docsources_includes_interface() -> None: +@zope_interface_systemcls_param +def test_docsources_includes_interface(systemcls: Type[model.System]) -> None: src = ''' from zope import interface class IInterface(interface.Interface): @@ -243,12 +261,13 @@ class Implementation: def method(self): pass ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) imethod = mod.contents['IInterface'].contents['method'] method = mod.contents['Implementation'].contents['method'] assert imethod in method.docsources(), list(method.docsources()) -def test_docsources_includes_baseinterface() -> None: +@zope_interface_systemcls_param +def test_docsources_includes_baseinterface(systemcls: Type[model.System]) -> None: src = ''' from zope import interface class IBase(interface.Interface): @@ -261,12 +280,13 @@ class Implementation: def method(self): pass ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) imethod = mod.contents['IBase'].contents['method'] method = mod.contents['Implementation'].contents['method'] assert imethod in method.docsources(), list(method.docsources()) -def test_docsources_interface_attribute() -> None: +@zope_interface_systemcls_param +def test_docsources_interface_attribute(systemcls: Type[model.System]) -> None: src = ''' from zope import interface class IInterface(interface.Interface): @@ -275,12 +295,13 @@ class IInterface(interface.Interface): class Implementation: attr = True ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) iattr = mod.contents['IInterface'].contents['attr'] attr = mod.contents['Implementation'].contents['attr'] assert iattr in list(attr.docsources()) -def test_implementer_decoration() -> None: +@zope_interface_systemcls_param +def test_implementer_decoration(systemcls: Type[model.System]) -> None: src = ''' from zope.interface import Interface, implementer class IMyInterface(Interface): @@ -291,13 +312,14 @@ class Implementation: def method(self): pass ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) iface = mod.contents['IMyInterface'] impl = mod.contents['Implementation'] assert isinstance(impl, ZopeInterfaceClass) assert impl.implements_directly == [iface.fullName()] -def test_docsources_from_moduleprovides() -> None: +@zope_interface_systemcls_param +def test_docsources_from_moduleprovides(systemcls: Type[model.System]) -> None: src = ''' from zope import interface @@ -310,13 +332,14 @@ def bar(): def bar(): pass ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) imethod = mod.contents['IBase'].contents['bar'] function = mod.contents['bar'] assert imethod in function.docsources(), list(function.docsources()) -def test_interfaceallgames() -> None: - system = processPackage('interfaceallgames', systemcls=ZopeInterfaceSystem) +@zope_interface_systemcls_param +def test_interfaceallgames(systemcls: Type[model.System]) -> None: + system = processPackage('interfaceallgames', systemcls=systemcls) mod = system.allobjects['interfaceallgames.interface'] iface = mod.contents['IAnInterface'] assert isinstance(iface, ZopeInterfaceClass) @@ -324,7 +347,8 @@ def test_interfaceallgames() -> None: 'interfaceallgames.implementation.Implementation' ] -def test_implementer_with_star() -> None: +@zope_interface_systemcls_param +def test_implementer_with_star(systemcls: Type[model.System]) -> None: """ If the implementer call contains a split out empty list, don't fail on attempting to process it. @@ -340,14 +364,15 @@ class Implementation: def method(self): pass ''' - mod = fromText(src, systemcls=ZopeInterfaceSystem) + mod = fromText(src, systemcls=systemcls) iface = mod.contents['IMyInterface'] impl = mod.contents['Implementation'] assert isinstance(impl, ZopeInterfaceClass) assert isinstance(iface, ZopeInterfaceClass) assert impl.implements_directly == [iface.fullName()] -def test_implementer_nonname(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_implementer_nonname(capsys: CapSys, systemcls: Type[model.System]) -> None: """ Non-name arguments passed to @implementer are warned about and then ignored. """ @@ -357,14 +382,15 @@ def test_implementer_nonname(capsys: CapSys) -> None: class Implementation: pass ''' - mod = fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='mod', systemcls=systemcls) impl = mod.contents['Implementation'] assert isinstance(impl, ZopeInterfaceClass) assert impl.implements_directly == [] captured = capsys.readouterr().out assert captured == 'mod:3: Interface argument 1 does not look like a name\n' -def test_implementer_nonclass(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_implementer_nonclass(capsys: CapSys, systemcls: Type[model.System]) -> None: """ Non-class arguments passed to @implementer are warned about but are stored as implemented interfaces. @@ -376,14 +402,15 @@ def test_implementer_nonclass(capsys: CapSys) -> None: class Implementation: pass ''' - mod = fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='mod', systemcls=systemcls) impl = mod.contents['Implementation'] assert isinstance(impl, ZopeInterfaceClass) assert impl.implements_directly == ['mod.var'] captured = capsys.readouterr().out assert captured == 'mod:4: Supposed interface "mod.var" not detected as a class\n' -def test_implementer_plainclass(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_implementer_plainclass(capsys: CapSys, systemcls: Type[model.System]) -> None: """ A non-interface class passed to @implementer will be warned about but will be stored as an implemented interface. @@ -396,7 +423,7 @@ class C: class Implementation: pass ''' - mod = fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + mod = fromText(src, modname='mod', systemcls=systemcls) C = mod.contents['C'] impl = mod.contents['Implementation'] assert isinstance(impl, ZopeInterfaceClass) @@ -407,7 +434,8 @@ class Implementation: captured = capsys.readouterr().out assert captured == 'mod:5: Class "mod.C" is not an interface\n' -def test_implementer_not_found(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_implementer_not_found(capsys: CapSys, systemcls: Type[model.System]) -> None: """ An unknown class passed to @implementer is warned about if its full name is part of our system. @@ -419,17 +447,18 @@ def test_implementer_not_found(capsys: CapSys) -> None: class Implementation: pass ''' - fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + fromText(src, modname='mod', systemcls=systemcls) captured = capsys.readouterr().out assert captured == 'mod:4: Interface "mod.INoSuchInterface" not found\n' -def test_implementer_reparented() -> None: +@zope_interface_systemcls_param +def test_implementer_reparented(systemcls: Type[model.System]) -> None: """ A class passed to @implementer can be found even when it is moved to a different module. """ - system = ZopeInterfaceSystem() + system = systemcls() mod_iface = fromText(''' from zope.interface import Interface @@ -456,13 +485,18 @@ class Implementation: impl = mod_impl.contents['Implementation'] assert isinstance(impl, ZopeInterfaceClass) assert impl.implements_directly == ['_private.IMyInterface'] - assert iface.implementedby_directly == [] + # The system is already post-processed at this time + assert iface.implementedby_directly == [impl] + + # But since we've manually reparent 'IMyInterface' to 'public', + # we need to post-process it again. system.postProcess() assert impl.implements_directly == ['public.IMyInterface'] assert iface.implementedby_directly == [impl] -def test_implementer_nocall(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_implementer_nocall(capsys: CapSys, systemcls: Type[model.System]) -> None: """ Report a warning when @implementer is used without calling it. """ @@ -472,11 +506,12 @@ def test_implementer_nocall(capsys: CapSys) -> None: class C: pass ''' - fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + fromText(src, modname='mod', systemcls=systemcls) captured = capsys.readouterr().out assert captured == "mod:3: @implementer requires arguments\n" -def test_classimplements_badarg(capsys: CapSys) -> None: +@zope_interface_systemcls_param +def test_classimplements_badarg(capsys: CapSys, systemcls: Type[model.System]) -> None: """ Report a warning when the arguments to classImplements() don't make sense. """ @@ -491,7 +526,7 @@ def f(): classImplements(f, IBar) classImplements(g, IBar) ''' - fromText(src, modname='mod', systemcls=ZopeInterfaceSystem) + fromText(src, modname='mod', systemcls=systemcls) captured = capsys.readouterr().out assert captured == ( 'mod:7: required argument to classImplements() missing\n' diff --git a/pydoctor/test/testcustomtemplates/allok/nav.html b/pydoctor/test/testcustomtemplates/allok/nav.html index 61882ff66..adf5be7f5 100644 --- a/pydoctor/test/testcustomtemplates/allok/nav.html +++ b/pydoctor/test/testcustomtemplates/allok/nav.html @@ -1,5 +1,5 @@

    diff --git a/pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/__init__.py b/pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/base.c b/pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/base.c new file mode 100644 index 000000000..6e5da46c9 --- /dev/null +++ b/pydoctor/test/testpackages/c_module_invalid_text_signature/mymod/base.c @@ -0,0 +1,42 @@ +/* Example of Python c module with an invalid __text_signature__ */ + +#include "Python.h" + +static PyObject* base_valid(PyObject *self, PyObject* args) +{ + printf("Hello World\n"); + return Py_None; +} + +static PyObject* base_invalid(PyObject *self, PyObject* args) +{ + printf("Hello World\n"); + return Py_None; +} + +static PyMethodDef base_methods[] = { + {"valid_text_signature", base_valid, METH_VARARGS, "valid_text_signature($self, a='r', b=-3.14)\n" + "--\n" + "\n" + "Function demonstrating a valid __text_signature__ from C code."}, + + {"invalid_text_signature", base_invalid, METH_VARARGS, "invalid_text_signature(!invalid) -> NotSupported\n" + "--\n" + "\n" + "Function demonstrating an invalid __text_signature__ from C code."}, + + {NULL, NULL, 0, NULL} /* sentinel */ +}; + +static PyModuleDef base_definition = { + PyModuleDef_HEAD_INIT, + "base", + "A Python module demonstrating valid and invalid __text_signature__ from C code.", + -1, + base_methods +}; + +PyObject* PyInit_base(void) { + Py_Initialize(); + return PyModule_Create(&base_definition); +} diff --git a/pydoctor/test/testpackages/c_module_invalid_text_signature/setup.py b/pydoctor/test/testpackages/c_module_invalid_text_signature/setup.py new file mode 100644 index 000000000..7be3cc8ba --- /dev/null +++ b/pydoctor/test/testpackages/c_module_invalid_text_signature/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup, Extension + +cmodule = Extension("mymod.base", sources=["mymod/base.c"],) +setup( + name="mymod", + ext_modules=[cmodule], + packages=['mymod'], +) diff --git a/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/__init__.py b/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.c b/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.c new file mode 100644 index 000000000..947caf48e --- /dev/null +++ b/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.c @@ -0,0 +1,31 @@ +/* Example of Python c module with an invalid __text_signature__ */ + +#include "Python.h" + +static PyObject* base_valid(PyObject *self, PyObject* args) +{ + printf("Hello World\n"); + return Py_None; +} + +static PyMethodDef base_methods[] = { + {"coming_from_c_module", base_valid, METH_VARARGS, "coming_from_c_module($self, a='r', b=-3.14)\n" + "--\n" + "\n" + "Function demonstrating a valid __text_signature__ from C code."}, + + {NULL, NULL, 0, NULL} /* sentinel */ +}; + +static PyModuleDef base_definition = { + PyModuleDef_HEAD_INIT, + "base", + "Dummy c-module.", + -1, + base_methods +}; + +PyObject* PyInit_base(void) { + Py_Initialize(); + return PyModule_Create(&base_definition); +} diff --git a/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.py b/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.py new file mode 100644 index 000000000..e62ad3d53 --- /dev/null +++ b/pydoctor/test/testpackages/c_module_python_module_name_clash/mymod/base.py @@ -0,0 +1,13 @@ + +# Example of stub loader generated by setuptools: +# https://github.com/pypa/setuptools/blob/4d64156de17596dae33f2b12aaaea1d6c9327fd9/setuptools/command/build_ext.py#L238-L275 +# We emulate this behaviour with this module. + +def __bootstrap__(): + global __bootstrap__, __loader__, __file__ + import sys, pkg_resources + from importlib.machinery import ExtensionFileLoader + __file__ = pkg_resources.resource_filename(__name__, 'base.cpython-39-darwin.so') + __loader__ = None; del __bootstrap__, __loader__ + ExtensionFileLoader(__name__,__file__).load_module() +__bootstrap__() \ No newline at end of file diff --git a/pydoctor/test/testpackages/c_module_python_module_name_clash/setup.py b/pydoctor/test/testpackages/c_module_python_module_name_clash/setup.py new file mode 100644 index 000000000..7be3cc8ba --- /dev/null +++ b/pydoctor/test/testpackages/c_module_python_module_name_clash/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup, Extension + +cmodule = Extension("mymod.base", sources=["mymod/base.c"],) +setup( + name="mymod", + ext_modules=[cmodule], + packages=['mymod'], +) diff --git a/pydoctor/test/testpackages/cyclic_imports_base_classes/__init__.py b/pydoctor/test/testpackages/cyclic_imports_base_classes/__init__.py new file mode 100644 index 000000000..b02981cc0 --- /dev/null +++ b/pydoctor/test/testpackages/cyclic_imports_base_classes/__init__.py @@ -0,0 +1 @@ +from . import b diff --git a/pydoctor/test/testpackages/cyclic_imports_base_classes/a.py b/pydoctor/test/testpackages/cyclic_imports_base_classes/a.py new file mode 100644 index 000000000..d73aa1276 --- /dev/null +++ b/pydoctor/test/testpackages/cyclic_imports_base_classes/a.py @@ -0,0 +1,4 @@ +from . import x + +class A(object): + pass diff --git a/pydoctor/test/testpackages/cyclic_imports_base_classes/b.py b/pydoctor/test/testpackages/cyclic_imports_base_classes/b.py new file mode 100644 index 000000000..6eb1e3f3e --- /dev/null +++ b/pydoctor/test/testpackages/cyclic_imports_base_classes/b.py @@ -0,0 +1,4 @@ +from . import a + +class B(a.A): + pass diff --git a/pydoctor/test/testpackages/package_module_name_clash/__init__.py b/pydoctor/test/testpackages/package_module_name_clash/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydoctor/test/testpackages/package_module_name_clash/pack.py b/pydoctor/test/testpackages/package_module_name_clash/pack.py new file mode 100644 index 000000000..d998fb9b3 --- /dev/null +++ b/pydoctor/test/testpackages/package_module_name_clash/pack.py @@ -0,0 +1 @@ +module=True \ No newline at end of file diff --git a/pydoctor/test/testpackages/package_module_name_clash/pack/__init__.py b/pydoctor/test/testpackages/package_module_name_clash/pack/__init__.py new file mode 100644 index 000000000..ad7d733d6 --- /dev/null +++ b/pydoctor/test/testpackages/package_module_name_clash/pack/__init__.py @@ -0,0 +1 @@ +package=True \ No newline at end of file diff --git a/pydoctor/test/testpackages/reparented_module/__init__.py b/pydoctor/test/testpackages/reparented_module/__init__.py new file mode 100644 index 000000000..a7c376d0f --- /dev/null +++ b/pydoctor/test/testpackages/reparented_module/__init__.py @@ -0,0 +1,6 @@ +""" +Here the module C{mod} is made available under an alias name +that is explicitly advertised under the alias name. +""" +from . import mod as module +__all__=('module',) diff --git a/pydoctor/test/testpackages/reparented_module/mod.py b/pydoctor/test/testpackages/reparented_module/mod.py new file mode 100644 index 000000000..ef0761303 --- /dev/null +++ b/pydoctor/test/testpackages/reparented_module/mod.py @@ -0,0 +1,5 @@ +""" +This is the "origin" module which for testing purpose is used from the C{reparented_module} package. +""" +def f(): + pass diff --git a/pydoctor/test/testpackages/reparenting_crash/__init__.py b/pydoctor/test/testpackages/reparenting_crash/__init__.py new file mode 100644 index 000000000..d7b9a5bb0 --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_crash/__init__.py @@ -0,0 +1,3 @@ +from .reparenting_crash import reparenting_crash, reparented_func + +__all__ = ['reparenting_crash', 'reparented_func'] \ No newline at end of file diff --git a/pydoctor/test/testpackages/reparenting_crash/reparenting_crash.py b/pydoctor/test/testpackages/reparenting_crash/reparenting_crash.py new file mode 100644 index 000000000..e5de4bb01 --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_crash/reparenting_crash.py @@ -0,0 +1,8 @@ + +class reparenting_crash: + ... + def reparented_func(): + ... + +def reparented_func(): + ... \ No newline at end of file diff --git a/pydoctor/test/testpackages/reparenting_crash_alt/__init__.py b/pydoctor/test/testpackages/reparenting_crash_alt/__init__.py new file mode 100644 index 000000000..9397062c3 --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_crash_alt/__init__.py @@ -0,0 +1,3 @@ +from .reparenting_crash_alt import reparenting_crash_alt, reparented_func + +__all__ = ['reparenting_crash_alt', 'reparented_func'] diff --git a/pydoctor/test/testpackages/reparenting_crash_alt/_impl.py b/pydoctor/test/testpackages/reparenting_crash_alt/_impl.py new file mode 100644 index 000000000..fb0a6552a --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_crash_alt/_impl.py @@ -0,0 +1,6 @@ +class reparenting_crash_alt: + ... + def reparented_func(): + ... +def reparented_func(): + ... diff --git a/pydoctor/test/testpackages/reparenting_crash_alt/reparenting_crash_alt.py b/pydoctor/test/testpackages/reparenting_crash_alt/reparenting_crash_alt.py new file mode 100644 index 000000000..119fd4ee8 --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_crash_alt/reparenting_crash_alt.py @@ -0,0 +1,2 @@ + +from ._impl import reparenting_crash_alt, reparented_func diff --git a/pydoctor/test/testpackages/reparenting_follows_aliases/__init__.py b/pydoctor/test/testpackages/reparenting_follows_aliases/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydoctor/test/testpackages/reparenting_follows_aliases/_myotherthing.py b/pydoctor/test/testpackages/reparenting_follows_aliases/_myotherthing.py new file mode 100644 index 000000000..3aefc4084 --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_follows_aliases/_myotherthing.py @@ -0,0 +1,4 @@ +""" +This module imports a class, it does not re-export it in it's __all__ variable. +""" +from ._mything import MyClass diff --git a/pydoctor/test/testpackages/reparenting_follows_aliases/_mything.py b/pydoctor/test/testpackages/reparenting_follows_aliases/_mything.py new file mode 100644 index 000000000..454fc672b --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_follows_aliases/_mything.py @@ -0,0 +1,3 @@ +"""This module defines a class""" +class MyClass: + ... diff --git a/pydoctor/test/testpackages/reparenting_follows_aliases/main.py b/pydoctor/test/testpackages/reparenting_follows_aliases/main.py new file mode 100644 index 000000000..3b97c263d --- /dev/null +++ b/pydoctor/test/testpackages/reparenting_follows_aliases/main.py @@ -0,0 +1,10 @@ +""" +This module imports MyClass from _myotherthing +and re-export it in it's __all__ varaible + +But _myotherthing.MyClass is a alias to _mything.MyClass, +so _mything.MyClass should be reparented to main.MyClass. +""" +from ._myotherthing import MyClass +__all__=('myfunc', 'MyClass') +def myfunc(): ... diff --git a/pydoctor/themes/base/ajax.js b/pydoctor/themes/base/ajax.js new file mode 100644 index 000000000..9a951eb13 --- /dev/null +++ b/pydoctor/themes/base/ajax.js @@ -0,0 +1,50 @@ +// Implement simple cached AJAX functions. + +var _cache = {}; + +/* +* Get a promise for the HTTP get responseText. +*/ +function httpGetPromise(url) { + const promise = new Promise((_resolve, _reject) => { + httpGet(url, (responseText) => { + _resolve(responseText); + }, + (error) => { + _reject(error); + }); + }); + return promise +} + +function httpGet(url, onload, onerror) { + if (_cache[url]) { + _cachedHttpGet(url, onload, onerror); + } + else{ + _httpGet(url, onload, onerror); + } +} + +function _cachedHttpGet(url, onload, onerror) { + setTimeout(() => { onload(_cache[url]) }, 0); +} + +function _httpGet(url, onload, onerror) { + + var xobj = new XMLHttpRequest(); + xobj.open('GET', url, true); // Asynchronous + + xobj.onload = function () { + // add document to cache. + _cache[url] = xobj.responseText; + onload(xobj.responseText); + }; + + xobj.onerror = function (error) { + console.log(error) + onerror(error) + }; + + xobj.send(null); +} diff --git a/pydoctor/themes/base/all-documents.html b/pydoctor/themes/base/all-documents.html new file mode 100644 index 000000000..55b2c30de --- /dev/null +++ b/pydoctor/themes/base/all-documents.html @@ -0,0 +1,28 @@ + + + + Head + +
    + + Nav + +
    + +
    +

    All Documents

    + +
      +
    • + +
      +
      +
      +
      +
      +
      +
    • +
    +
    + + diff --git a/pydoctor/themes/base/apidocs.css b/pydoctor/themes/base/apidocs.css index f0d763b7a..6d7a67d6a 100644 --- a/pydoctor/themes/base/apidocs.css +++ b/pydoctor/themes/base/apidocs.css @@ -5,21 +5,38 @@ body { overflow-y: scroll; } +.container-fluid{ + max-width: 1380px; + width: 100%; + flex: auto; +} + nav.navbar { width:100%; margin-bottom: 0; } -nav.navbar > .navbar-header { - margin-right: 0; - margin-left: 0; - height: 100%; - display: inline-block; +nav.mainnavbar > div.container-fluid { + display: flex; + flex-wrap: wrap; +} + +nav div.layoutOptions { + display: flex; + flex-wrap: wrap; + align-items: end; + margin-left: auto; + padding-top:11px; +} + +nav.navbar .navbar-header { + float: none; + width: 100%; + position: relative; } .page-header { margin-top: 22px; - position: sticky; top: 0; display: flex; flex-wrap: wrap; @@ -29,10 +46,13 @@ nav.navbar > .navbar-header { margin-bottom: 3px; border-bottom: 0; box-shadow: 0 0 8px 8px #fff; + z-index: 99; } .navbar-brand { padding: 0; + margin: 0; + height: auto; } .navbar-brand a, .navbar-brand span { @@ -70,6 +90,7 @@ a.projecthome:hover { padding: 10px 0 10px 15px; } + .navlinks > a:hover { background-color: transparent; text-decoration: none; @@ -99,7 +120,7 @@ footer.navbar { a[name] { position: relative; - bottom: 80px; + bottom: 10px; font-size: 0; } @@ -114,6 +135,11 @@ li { padding-bottom: 5px; } +#summaryTree .compact-modules { + list-style: none; + line-height: 1.8em; +} + li a { text-decoration: none; } @@ -158,8 +184,8 @@ ul ul ul ul ul ul ul { } .functionBody p { - margin-top: 6px; - margin-bottom: 6px; + margin: 0; + padding: 8px 0 6px; } #splitTables > p { @@ -188,7 +214,6 @@ ul ul ul ul ul ul ul { .fieldTable { width: 100%; - display: block; border: 0; } @@ -215,7 +240,7 @@ ul ul ul ul ul ul ul { /* Argument name + type column table */ .fieldTable tr td.fieldArgContainer { - width: 250px; + width: 325px; word-break: break-word; } @@ -245,75 +270,87 @@ ul ul ul ul ul ul ul { /* Attr name column table */ #splitTables > table tr td:nth-child(2) { - width: 200px; + width: 240px; word-break: break-word; } -/* For smaller displays, i.e. half screen or mobile phone */ -@media only screen and (max-width: 1000px) { +/* Fix proportion size of summary table columns */ +#splitTables > table { + table-layout: fixed; +} - /* Fix size of summary table columns */ - #splitTables > table { - table-layout: fixed; - } - - /* Kind column table */ - #splitTables > table tr td:first-child { - border-left: none; - width: 20%; - } +/* For smaller displays, i.e. half screen */ +@media only screen and (max-width: 1100px) { /* Attr name column table */ #splitTables > table tr td:nth-child(2) { - width: 35%; + width: 200px; } /* Summary column table */ #splitTables > table tr td:nth-child(3) { - width: 45%; + width: auto; } } -@media only screen and (max-width: 650px) { +@media only screen and (max-width: 820px) { + + /* Kind column table */ + #splitTables > table tr td:first-child { + border-left: none; + width: 20%; + } + + /* Attr name column table */ + #splitTables > table tr td:nth-child(2) { + width: 160px; + } /* Argument name + type column table */ .fieldTable tr td.fieldArgContainer { - width: 175px; + width: 170px; + } + .fieldTable { + table-layout: fixed; } } -@media only screen and (max-width: 400px) { +@media only screen and (max-width: 450px) { + /* Attr name column table */ + #splitTables > table tr td:nth-child(2) { + width: 100px; + } /* Argument name + type column table */ .fieldTable tr td.fieldArgContainer { width: 125px; } } -tr.package { +table .package { background-color: #fff3e0; } -tr.module { +table .module { background-color: #fff8e1; } -tr.class, tr.classvariable, tr.baseclassvariable { +table .class, table .classvariable, table .baseclassvariable { background-color: #fffde7; } -tr.instancevariable, tr.baseinstancevariable, tr.variable, tr.attribute, tr.property { +table .instancevariable, table .baseinstancevariable, table .variable, table .attribute, table .property { background-color: #f3e5f5; } -tr.interface { +table .interface { background-color: #fbe9e7; } -tr.method, tr.function, tr.basemethod, tr.baseclassmethod, tr.classmethod { +table .method, table .function, table .basemethod, table .baseclassmethod, table .classmethod { background-color: #f1f8e9; } -tr.private { +table .private { background-color: #f1f1f1; } @@ -326,10 +363,15 @@ tr.private { margin: 10px; padding: 10px; padding-bottom: 5px; + display: block; + border-left-color: #03a9f4; + border-left-width: 1px; + border-left-style: solid; + background: #fafafa; } .functionBody { - margin-left: 15px; + margin-left: 5px; } .functionBody > #part { @@ -355,9 +397,7 @@ tr.private { /* - Links to class/function/etc names are nested like this: label - This applies to inline docstring content marked up as code, - for example L{foo} in epytext or `bar` in restructuredtext, - but also to links that are present in summary tables. + - 'functionHeader' is used for lines like `def func():` and `var =` */ code, .literal, .pre, #childList > div .functionHeader, @@ -367,10 +407,23 @@ code, .literal, .pre, #childList > div .functionHeader, code, #childList > div .functionHeader, .fieldArg { color: #222222; } -code > a, #childList > div .functionHeader a { + +/* Intersphinx links are not red, but simply blue */ +a.intersphinx-link { + color: #03458a; + background-color: #f0ebe694; +} + +/* Links to objects within the system use this special css. +This applies to inline docstring content marked up as code, + for example L{foo} in epytext or `bar` in restructuredtext, + but also to links that are present in summary tables. +*/ +a.internal-link { color:#c7254e; background-color:#f9f2f4; } + /* top navagation bar */ .page-header > h1 { margin-top: 0; @@ -379,6 +432,13 @@ code > a, #childList > div .functionHeader a { color: #971c3a; } +/* Bootstart 3.x sets font-size to 17.5px which just + looks ridiculously large, so we unset it here. +*/ +blockquote { + font-size: unset; +} + /* This defines the code style, it's black on light gray. It also overwrite the default values inherited from bootstrap min @@ -397,12 +457,7 @@ a.sourceLink { } -#childList > div { - border-left-color: #03a9f4; - border-left-width: 1px; - border-left-style: solid; - background: #fafafa; -} + .moduleDocstring { margin: 20px; @@ -425,18 +480,22 @@ pre { body.private-hidden #splitTables .private, body.private-hidden #childList .private, -body.private-hidden #summaryTree .private { - display: none; +body.private-hidden #summaryTree .private, +body.private-hidden nav.sidebar .private, +body.private-hidden #search-results .private, +body.private-hidden .container > .private { + display: none!important; } -/* Show private */ +/* Show private and other options */ #showPrivate:hover { text-decoration: none; } #showPrivate button { - padding: 10px; + padding: 5px; + padding-bottom: 15px; } #showPrivate button:hover { @@ -529,12 +588,508 @@ div.tip, div.hint, div.important { border-color: #bce8f1; } +.sidebarcontainer { + width: 297px; /* Set the width of the sidebar: 290px + 2px for the border + 5px for the padding */ + max-height: 100vh; /* Full-height: remove this if you want "auto" height */ + float: left; + padding: 10px 0px 10px 5px; + margin:24px 20px 20px 0; + border: 1px solid; + border-radius: 4px; + display: flex; + position: sticky; + top: 0; + overflow-wrap: break-word; + overflow-x: none; + overflow-y: scroll; + background-color: #fbfbfb; + border-color: #e7e7e7; + scrollbar-width: thin; + scrollbar-color: rgb(194,194,194) rgb(249,249,249); +} + +.sidebarcontainer::-webkit-scrollbar { + width: 10px; /* Scrollbar width on Chromium-based browsers */ + border: solid 1px rgb(229,229,229); + background-color: rgb(249,249,249); +} + +.sidebarcontainer::-webkit-scrollbar:horizontal { + display: none; +} + +.sidebarcontainer::-webkit-scrollbar-track { + box-shadow: inset 0 0 5px 5px transparent; + border: solid 1px transparent; +} + +.sidebarcontainer::-webkit-scrollbar-thumb { + box-shadow: inset 0 0 5px 5px rgb(194,194,194); + border: solid 2px transparent; + border-radius: 5px; +} + + +/* The sidebar menu */ + +.sidebar { + /*! padding-bottom: 10px; */ + width: 100%; +} + +.sidebar > div { + width: 100%; + padding-top: 7px; +} + +.sidebar > div:first-child { + padding-top: 0; + margin-top: -4px; +} + +.sidebar > div:last-child { + padding-bottom: 15px; +} + +.sidebar > div:nth-child(2) { + background-color: RGBA(0,10,10, 0.03); + box-shadow: -5px 5px 0px 10px RGBA(0,10,10, 0.03); + margin-top: 20px; +} + +.sidebar ul { + display: block; + margin: 0 0 5px 0; + padding: 0 0 0 10px; + width: 100%; +} + +.sidebar li { + width: 100%; + padding: 0; + display: flex; + overflow: hidden; + flex-wrap: wrap; + word-break: break-word; +} + +.sidebar li p { + margin: 0; + width: 100%; +} + +.sidebar li ul { + margin: 0 0 2px 0; + padding: 0 0 0 7px; + border: 0; +} + +/* Generated TOC */ +.sidebar ul.rst-simple, .sidebar ul.rst-simple ul { + margin: 0 0 5px 0; + padding: 0 0 0 15px; + margin: 0; + border-left: 1px solid #e7e7e7; +} + +.sidebar li a { + display: inline-block; + width: 100%; + padding-top: 3px; + padding-bottom: 3px; + color: #414141; +} + +.sidebar li a:hover { + color: #C7354E; +} + +.sidebar > div ul > li > .itemName > code, .sidebar > div ul > li > .itemName > code > a { + background-color: transparent; +} + +.sidebar ul > li > .itemName { + width: 100%; +} + +.sidebar > div ul > li > .itemName > code { + padding: 0; + width: 100%; +} + +.sidebar .thingTitle { + margin-bottom: 7px; + margin-top: 7px; + overflow: hidden; + color: #555; + font-size: 18px; + display: flex; + flex-wrap: wrap; + align-items: baseline; + word-break: break-word; + padding: 0 15px 3px 1px; + box-shadow: -10px 12px 0px -11px #888; +} + +.sidebar .thingTitle > span { + margin-right: 7px; +} + +.sidebar .thingTitle > code { + font-size: 16px; + color: #555; + background-color: transparent; + padding-left: 0; + padding-right: 0; + display: flex; +} + +.sidebar .thingTitle > code a { + background-color: transparent; +} + +.sidebar .childrenKindTitle { + color: #414141; + margin-left: 4px; + margin-bottom: 3px; + font-size: 15px; + /*! border-bottom: solid 1px #9d9d9d; */ + box-shadow: -11px 11px 0px -10px #aeaeaec4; + font-style: italic; +} + + +/* Style page content */ +#main { + + /* Same as the width of the sidebar + 20px*/ + display: flex; + flex-direction: column; +} + +/* Special case for the --nosidebar option */ +.nosidebar { + margin-left: 10px!important; +} + +/* For bigger displays, i.e. full screen */ +@media only screen and (min-width: 1330px) { + .sidebarcontainer { + width: 317px; /* Set the width of the sidebar: 310px + 2px for the border + 5px for the scrollbar */ + } +} + +/* For smaller displays, i.e. half screen */ +@media only screen and (max-width: 1100px) { + .sidebarcontainer { + width: 257px; /* Set the width of the sidebar: 250px + 2px for the border + 5px for the scrollbar */ + } +} + +/* For smaller displays mobile phone */ +@media only screen and (max-width: 900px) { + .sidebarcontainer { + width: 207px; /* Set the width of the sidebar: 200px + 2px for the border + 5px for the scrollbar */ + } +} + + +nav.foot { + margin-top: 20px; + background-color: #fff; + text-align: center; + border-width: 1px 0 0 0; + border-radius: 0; +} + +nav.foot address { + padding-top: 15px; + text-align: center; +} + +#collapseSideBar { + border-radius: 4px; + color: rgb(68, 68, 68); + font-size: 1.2em; + display: block; + float: left; + width: 0; + padding: 0; + margin: 0; + position: sticky; + top: 0; + right: 0; +} + +#collapseSideBar > a:hover{ + background-color: #e1e1e1; + text-decoration: none; +} + +#collapseSideBar > a { + height: 42px; + width: 15px; + font-size: 1.2em; + color: #333; + padding: 1px; + background-color: #e7e7e7; + border-radius: 0 4px 0 4px; + margin: -11px 0 0 -15px; + text-align: center; + display: flex; + align-items: center; + justify-content: center; + border: solid 1px #e7e7e7; +} + +/* collapsed */ + +body.sidebar-collapsed .sidebar { + display: none; +} + +body.sidebar-collapsed .sidebarcontainer { + border: none; + padding: 0; + width: 5px; + overflow: visible; + background-color: transparent; +} + +body.sidebar-collapsed #main { + margin: 0 0 0 25px!important; +} + +body.sidebar-collapsed #collapseSideBar { + left: 1px; +} + +body.sidebar-collapsed #collapseSideBar > a { + margin-top: -1px; + margin-left: 0; + border-radius: 4px; + background-color: #f8f8f8; +} + +body.sidebar-collapsed #collapseSideBar > a:hover { + background-color: #e7e7e7; +} + +/* On smaller screens, where width is less than 650px, simply hide sidebar */ +@media screen and (max-width: 650px) { + .sidebar { + display: none; + } + #main { + margin: 0; + } + .sidebarcontainer { + display: none!important; + } + #collapseSideBar { + display: none; + } +} + +/* Style for expandable content */ + +input.tocChildrenToggle { + display: none; + } + +.lbl-toggle { + display: block; + width: 18px; + font-weight: bold; + font-family: monospace; + font-size: 12px; + text-transform: uppercase; + text-align: center; + color: #333; + /* background: #0069ff; */ + cursor: pointer; + border-radius: 7px; + transition: all 0.1s ease-out; + margin: 0 0 0 0; + padding: 5px 2px 0 2px; + color: rgb(163, 163, 163); + position: absolute; +} + +.lbl-toggle::before { + content: " "; + display: inline-block; + border-top: 5px solid transparent; + border-bottom: 5px solid transparent; + border-left: 5px solid currentColor; + vertical-align: middle; + margin-right: 0.7rem; + transform: translateY(-2px); + } + +.lbl-toggle:hover { + color: #333; + } + +.tocChildrenToggle:checked + .lbl-toggle::before { + transform: rotate(90deg) translateX(-3px); + } + +.expandableContent { + height: 0px; + overflow: hidden; + flex-basis: 100%; + padding: 0 0 0 8px; + margin-left: 5px; + border-left: 1px solid #e7e7e7; +} + +.expandableContent > div { + margin-top: 5px; +} + +.tocChildrenToggle:checked ~ .expandableContent { + height: auto; +} + +.tocChildrenToggle:not(:checked) ~ .expandableContent .lbl-toggle { + position: relative; +} + +.tocChildrenToggle:checked + .lbl-toggle { + border-bottom-right-radius: 0; + border-bottom-left-radius: 0; + color: #333; + } + +.expandableContent .childrenKindTitle { + font-size: 14px; + /* margin-left: 5px; */ +} + +.expandableItem { + display: flex; + flex-wrap: wrap; +} + +.expandableItem > code { + width: calc(100% - 20px)!important; + margin-left: 18px; +} + +/* Special cases to display the current object name in the sidebar */ +.thisobject a { + font-weight: bold; +} +.expandableItem label.notExpandable { + cursor: not-allowed; +} /* Version modified style */ .rst-versionmodified { display: block; font-weight: bold; } +/* Search */ + +/* clears the ‘X’ from search input for Chrome */ +input[type="search"]::-webkit-search-decoration, +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-results-button, +input[type="search"]::-webkit-search-results-decoration { display: none; } + +.navlinks > #search-box-container { + padding: 0 0 8px 15px; + align-self: flex-end; + margin-left: auto; + display: none; +} + +#search-results { + margin-top: 5px; +} + +#search-results tr{ + display:block; + border-bottom: 0.5px solid #CCC; +} + +#search-results tr { + border-bottom: 1px #ddd solid; + padding-bottom: 1px; +} + +#search-results tr td { + border-left: 1px #ddd solid; + padding: 2px; +} + +#search-results tr td:first-child { + width: 120px; +} + +#search-results tr:last-child{ + border-bottom: none; +} + +#search-results tr article, #search-results tr article *{ + display:inline; +} + +#search-results section { + padding: 5px 0 0 8px; +} + +.search-help-hidden #search-help-box{ + display: none!important; +} + +#search-help-button{ + background-color: #e6e6e6; +} + +.search-help-hidden #search-help-button{ + background-color: rgb(255, 255, 255); +} + +.search-help-hidden #search-help-button:hover { + background-color: #e6e6e6; +} + +#search-results-container { + padding: 10px; + width: 100%; + max-width: 850px; + max-height: calc(100vh - 70px); + right: 0; + position: absolute; + overflow-x: hidden; + overflow-y: scroll; + background-color: #fbfbfb; + border: 1px solid #CCC; + border-radius: 4px; + z-index: 500; + margin-top: -9px; + word-break: break-word; +} + +#search-status{ + padding-bottom:2px; +} + +#search-buttons{ + float: right; +} + +#search-buttons > span { + padding: 0.3em 0.4em 0.4em; +} + +#toggle-search-in-docstrings-checkbox{ + margin-top: -2.5px; + cursor: pointer; +} + /* Constant values repr */ pre.constant-value { padding: .5em; } .rst-variable-linewrap { color: #604000; font-weight: bold; } @@ -552,3 +1107,19 @@ pre.constant-value { padding: .5em; } .rst-re-op { color: #fc7844; } .rst-re-group { color: #309078; } .rst-re-ref { color: #890000; } + +/* highlight the targeted item with "#" */ +#childList a:target ~ .functionHeader, #childList a:target ~ .functionBody{ + background-color: rgb(253, 255, 223); +} +#childList a:target ~ .functionHeader{ + box-shadow: 0px 0px 0px 10px rgb(253, 255, 223); +} +#childList a:target ~ .functionBody{ + box-shadow: -2px -8px 0px 13px rgb(253 255 223); +} + +/* deprecations uses a orange text */ +.rst-deprecated > .rst-versionmodified{ + color:#aa6708; +} diff --git a/pydoctor/themes/base/attribute-child.html b/pydoctor/themes/base/attribute-child.html index f45358e59..50ec4e6d4 100644 --- a/pydoctor/themes/base/attribute-child.html +++ b/pydoctor/themes/base/attribute-child.html @@ -1,5 +1,5 @@
    - - + Docstring. diff --git a/pydoctor/themes/base/common.html b/pydoctor/themes/base/common.html index fd7827835..44f4a24dc 100644 --- a/pydoctor/themes/base/common.html +++ b/pydoctor/themes/base/common.html @@ -1,6 +1,6 @@ - + Head @@ -8,57 +8,75 @@
    - Nav + + +
    -
    - -
    - -