From d4ba892baa42cdee8189742c3f27bf63d6094370 Mon Sep 17 00:00:00 2001
From: tristanlatr
Date: Tue, 10 Dec 2024 17:32:02 -0500
Subject: [PATCH 1/5] Add simple black config
---
.black.toml | 12 ++++++++++++
tox.ini | 13 +++++++++++++
2 files changed, 25 insertions(+)
create mode 100644 .black.toml
diff --git a/.black.toml b/.black.toml
new file mode 100644
index 000000000..f946dd780
--- /dev/null
+++ b/.black.toml
@@ -0,0 +1,12 @@
+[tool.black]
+line-length = 120
+skip-string-normalization = 1
+required-version = 24
+target-version = ['py39']
+
+# 'extend-exclude' excludes files or directories in addition to the defaults
+extend-exclude = '''
+(
+ .+/sre_.+.py | .+/testpackages/.+
+)
+'''
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index ec3c4accf..1add50a25 100644
--- a/tox.ini
+++ b/tox.ini
@@ -73,6 +73,19 @@ commands =
sh -c "find pydoctor/ -name \*.py ! -path '*/testpackages/*' ! -path '*/sre_parse36.py' ! -path '*/sre_constants36.py' | xargs pyflakes"
sh -c "find docs/ -name \*.py ! -path '*demo/*' | xargs pyflakes"
+[testenv:black]
+description = Run black over the pydoctor code
+deps =
+ black==24.8.0
+commands =
+ black --check --diff --color --config=.black.toml ./pydoctor
+
+[testenv:black-reformat]
+description = Run black over the pydoctor code
+deps =
+ black==24.8.0
+commands =
+ black --color --config=.black.toml ./pydoctor
[testenv:cpython-apidocs]
description = Build CPython 3.11 API documentation
From 38068054e22e32c47afb018624ab1572232af14d Mon Sep 17 00:00:00 2001
From: tristanlatr
Date: Tue, 10 Dec 2024 17:34:11 -0500
Subject: [PATCH 2/5] Rename tox envs to "black" and "reformat"
---
tox.ini | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tox.ini b/tox.ini
index 1add50a25..77e218448 100644
--- a/tox.ini
+++ b/tox.ini
@@ -74,14 +74,14 @@ commands =
sh -c "find docs/ -name \*.py ! -path '*demo/*' | xargs pyflakes"
[testenv:black]
-description = Run black over the pydoctor code
+description = Check the format of the code with black
deps =
black==24.8.0
commands =
black --check --diff --color --config=.black.toml ./pydoctor
-[testenv:black-reformat]
-description = Run black over the pydoctor code
+[testenv:reformat]
+description = Reformat the code with black
deps =
black==24.8.0
commands =
From a10c695097ea21683a1c9d0e1f3da27c284a87fb Mon Sep 17 00:00:00 2001
From: tristanlatr
Date: Thu, 12 Dec 2024 10:08:35 -0500
Subject: [PATCH 3/5] Use the default value for line-length
---
.black.toml | 2 +-
pydoctor/__init__.py | 1 +
pydoctor/_configparser.py | 178 ++-
pydoctor/astbuilder.py | 591 ++++---
pydoctor/astutils.py | 419 ++---
pydoctor/driver.py | 69 +-
pydoctor/epydoc/__init__.py | 1 -
pydoctor/epydoc/doctest.py | 76 +-
pydoctor/epydoc/docutils.py | 66 +-
pydoctor/epydoc/markup/__init__.py | 108 +-
pydoctor/epydoc/markup/_napoleon.py | 25 +-
pydoctor/epydoc/markup/_pyval_repr.py | 483 +++---
pydoctor/epydoc/markup/_types.py | 98 +-
pydoctor/epydoc/markup/epytext.py | 600 ++++---
pydoctor/epydoc/markup/google.py | 1 +
pydoctor/epydoc/markup/numpy.py | 1 +
pydoctor/epydoc/markup/plaintext.py | 35 +-
pydoctor/epydoc/markup/restructuredtext.py | 199 +--
pydoctor/epydoc2stan.py | 392 +++--
pydoctor/extensions/__init__.py | 107 +-
pydoctor/extensions/attrs.py | 78 +-
pydoctor/extensions/deprecate.py | 73 +-
pydoctor/extensions/zopeinterface.py | 136 +-
pydoctor/factory.py | 24 +-
pydoctor/linker.py | 99 +-
pydoctor/model.py | 585 ++++---
pydoctor/mro.py | 4 +-
pydoctor/napoleon/docstring.py | 225 +--
pydoctor/napoleon/iterators.py | 21 +-
pydoctor/node2stan.py | 70 +-
pydoctor/options.py | 554 ++++---
pydoctor/qnmatch.py | 21 +-
pydoctor/sphinx.py | 93 +-
pydoctor/sphinx_ext/build_apidocs.py | 10 +-
pydoctor/stanutils.py | 16 +-
pydoctor/templatewriter/__init__.py | 160 +-
pydoctor/templatewriter/pages/__init__.py | 273 ++--
.../templatewriter/pages/attributechild.py | 11 +-
.../templatewriter/pages/functionchild.py | 10 +-
pydoctor/templatewriter/pages/sidebar.py | 302 ++--
pydoctor/templatewriter/pages/table.py | 42 +-
pydoctor/templatewriter/search.py | 105 +-
pydoctor/templatewriter/summary.py | 132 +-
pydoctor/templatewriter/util.py | 110 +-
pydoctor/templatewriter/writer.py | 46 +-
pydoctor/test/__init__.py | 3 +-
pydoctor/test/epydoc/__init__.py | 7 +-
pydoctor/test/epydoc/test_epytext.py | 66 +-
pydoctor/test/epydoc/test_epytext2html.py | 38 +-
pydoctor/test/epydoc/test_epytext2node.py | 9 +-
pydoctor/test/epydoc/test_google_numpy.py | 43 +-
.../test/epydoc/test_parsed_docstrings.py | 22 +-
pydoctor/test/epydoc/test_pyval_repr.py | 1191 +++++++++++---
pydoctor/test/epydoc/test_restructuredtext.py | 171 +-
pydoctor/test/test_astbuilder.py | 1183 +++++++++-----
pydoctor/test/test_astutils.py | 41 +-
pydoctor/test/test_attrs.py | 70 +-
pydoctor/test/test_colorize.py | 4 +
pydoctor/test/test_commandline.py | 128 +-
pydoctor/test/test_configparser.py | 467 +++---
.../test/test_cyclic_imports_base_classes.py | 3 +-
pydoctor/test/test_epydoc2stan.py | 962 +++++++----
pydoctor/test/test_model.py | 224 +--
pydoctor/test/test_mro.py | 237 ++-
pydoctor/test/test_napoleon_docstring.py | 1414 ++++++++++-------
pydoctor/test/test_napoleon_iterators.py | 4 +
pydoctor/test/test_node2stan.py | 111 +-
pydoctor/test/test_options.py | 75 +-
pydoctor/test/test_packages.py | 16 +-
pydoctor/test/test_pydantic_fields.py | 14 +-
pydoctor/test/test_qnmatch.py | 89 +-
pydoctor/test/test_sphinx.py | 186 +--
pydoctor/test/test_templatewriter.py | 390 +++--
.../test/test_twisted_python_deprecate.py | 135 +-
pydoctor/test/test_type_fields.py | 304 ++--
pydoctor/test/test_utils.py | 11 +-
pydoctor/test/test_visitor.py | 66 +-
pydoctor/test/test_zopeinterface.py | 96 +-
pydoctor/themes/__init__.py | 2 +
pydoctor/utils.py | 30 +-
pydoctor/visitor.py | 312 ++--
81 files changed, 8990 insertions(+), 5786 deletions(-)
diff --git a/.black.toml b/.black.toml
index f946dd780..757d0d0bc 100644
--- a/.black.toml
+++ b/.black.toml
@@ -1,5 +1,5 @@
[tool.black]
-line-length = 120
+line-length = 88
skip-string-normalization = 1
required-version = 24
target-version = ['py39']
diff --git a/pydoctor/__init__.py b/pydoctor/__init__.py
index c8f38d605..c5cdb4b3c 100644
--- a/pydoctor/__init__.py
+++ b/pydoctor/__init__.py
@@ -3,6 +3,7 @@
Warning: PyDoctor's API isn't stable YET, custom builds are prone to break!
"""
+
import importlib.metadata as importlib_metadata
__version__ = importlib_metadata.version('pydoctor')
diff --git a/pydoctor/_configparser.py b/pydoctor/_configparser.py
index 90cd73d15..75524f7c6 100644
--- a/pydoctor/_configparser.py
+++ b/pydoctor/_configparser.py
@@ -20,6 +20,7 @@
>>> parser = ArgumentParser(..., default_config_files=['./pyproject.toml', 'setup.cfg', 'my_super_tool.ini'], config_file_parser_class=MixedParser)
"""
+
from __future__ import annotations
import argparse
@@ -38,11 +39,13 @@
if sys.version_info >= (3, 11):
from tomllib import load as _toml_load
import io
- # The tomllib module from the standard library
- # expect a binary IO and will fail if receives otherwise.
+
+ # The tomllib module from the standard library
+ # expect a binary IO and will fail if receives otherwise.
# So we hack a compat function that will work with TextIO and assume the utf-8 encoding.
def toml_load(stream: TextIO) -> Any:
return _toml_load(io.BytesIO(stream.read().encode()))
+
else:
from toml import load as toml_load
@@ -50,34 +53,37 @@ def toml_load(stream: TextIO) -> Any:
# - https://stackoverflow.com/questions/11859442/how-to-match-string-in-quotes-using-regex
# - and https://stackoverflow.com/a/41005190
-_QUOTED_STR_REGEX = re.compile(r'(^\"(?:\\.|[^\"\\])*\"$)|'
- r'(^\'(?:\\.|[^\'\\])*\'$)')
+_QUOTED_STR_REGEX = re.compile(r'(^\"(?:\\.|[^\"\\])*\"$)|' r'(^\'(?:\\.|[^\'\\])*\'$)')
+
+_TRIPLE_QUOTED_STR_REGEX = re.compile(
+ r'(^\"\"\"(\s+)?(([^\"]|\"([^\"]|\"[^\"]))*(\"\"?)?)?(\s+)?(?:\\.|[^\"\\])\"\"\"$)|'
+ # Unescaped quotes at the end of a string generates
+ # "SyntaxError: EOL while scanning string literal",
+ # so we don't account for those kind of strings as quoted.
+ r'(^\'\'\'(\s+)?(([^\']|\'([^\']|\'[^\']))*(\'\'?)?)?(\s+)?(?:\\.|[^\'\\])\'\'\'$)',
+ flags=re.DOTALL,
+)
-_TRIPLE_QUOTED_STR_REGEX = re.compile(r'(^\"\"\"(\s+)?(([^\"]|\"([^\"]|\"[^\"]))*(\"\"?)?)?(\s+)?(?:\\.|[^\"\\])\"\"\"$)|'
- # Unescaped quotes at the end of a string generates
- # "SyntaxError: EOL while scanning string literal",
- # so we don't account for those kind of strings as quoted.
- r'(^\'\'\'(\s+)?(([^\']|\'([^\']|\'[^\']))*(\'\'?)?)?(\s+)?(?:\\.|[^\'\\])\'\'\'$)', flags=re.DOTALL)
@functools.lru_cache(maxsize=256, typed=True)
-def is_quoted(text:str, triple:bool=True) -> bool:
+def is_quoted(text: str, triple: bool = True) -> bool:
"""
- Detect whether a string is a quoted representation.
+ Detect whether a string is a quoted representation.
@param triple: Also match tripple quoted strings.
"""
- return bool(_QUOTED_STR_REGEX.match(text)) or \
- (triple and bool(_TRIPLE_QUOTED_STR_REGEX.match(text)))
+ return bool(_QUOTED_STR_REGEX.match(text)) or (triple and bool(_TRIPLE_QUOTED_STR_REGEX.match(text)))
+
-def unquote_str(text:str, triple:bool=True) -> str:
+def unquote_str(text: str, triple: bool = True) -> str:
"""
- Unquote a maybe quoted string representation.
+ Unquote a maybe quoted string representation.
If the string is not detected as being a quoted representation, it returns the same string as passed.
It supports all kinds of python quotes: C{\"\"\"}, C{'''}, C{"} and C{'}.
@param triple: Also unquote tripple quoted strings.
@raises ValueError: If the string is detected as beeing quoted but literal_eval() fails to evaluate it as string.
- This would be a bug in the regex.
+ This would be a bug in the regex.
"""
if is_quoted(text, triple=triple):
try:
@@ -88,7 +94,8 @@ def unquote_str(text:str, triple:bool=True) -> str:
return s
return text
-def parse_toml_section_name(section_name:str) -> Tuple[str, ...]:
+
+def parse_toml_section_name(section_name: str) -> Tuple[str, ...]:
"""
Parse a TOML section name to a sequence of strings.
@@ -105,7 +112,8 @@ def parse_toml_section_name(section_name:str) -> Tuple[str, ...]:
section.append(unquote_str(a.strip(), triple=False))
return tuple(section)
-def get_toml_section(data:Dict[str, Any], section:Union[Tuple[str, ...], str]) -> Optional[Dict[str, Any]]:
+
+def get_toml_section(data: Dict[str, Any], section: Union[Tuple[str, ...], str]) -> Optional[Dict[str, Any]]:
"""
Given some TOML data (as loaded with C{toml.load()}), returns the requested section of the data.
Returns C{None} if the section is not found.
@@ -122,6 +130,7 @@ def get_toml_section(data:Dict[str, Any], section:Union[Tuple[str, ...], str]) -
return None
return itemdata
+
class TomlConfigParser(ConfigFileParser):
"""
U{TOML } parser with support for sections.
@@ -132,7 +141,7 @@ class TomlConfigParser(ConfigFileParser):
# this is a comment
# this is TOML section table:
- [tool.my-software]
+ [tool.my-software]
# how to specify a key-value pair (strings must be quoted):
format-string = "restructuredtext"
# how to set an arg which has action="store_true":
@@ -144,9 +153,9 @@ class TomlConfigParser(ConfigFileParser):
"https://twistedmatrix.com/documents/current/api/objects.inv"]
# how to specify a multiline text:
multi-line-text = '''
- Lorem ipsum dolor sit amet, consectetur adipiscing elit.
- Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
- Maecenas quis dapibus leo, a pellentesque leo.
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+ Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
+ Maecenas quis dapibus leo, a pellentesque leo.
'''
# how to specify a empty text:
empty-text = ''
@@ -166,11 +175,11 @@ class TomlConfigParser(ConfigFileParser):
def __init__(self, sections: List[str]) -> None:
super().__init__()
self.sections = sections
-
+
def __call__(self) -> ConfigFileParser:
return self
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
"""Parses the keys and values from a TOML config file."""
# parse with configparser to allow multi-line values
try:
@@ -184,7 +193,7 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
for section in self.sections:
data = get_toml_section(config, section)
if data:
- # Seems a little weird, but anything that is not a list is converted to string,
+ # Seems a little weird, but anything that is not a list is converted to string,
# It will be converted back to boolean, int or whatever after.
# Because config values are still passed to argparser for computation.
for key, value in data.items():
@@ -195,26 +204,29 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
else:
result[key] = str(value)
break
-
+
return result
def get_syntax_description(self) -> str:
- return ("Config file syntax is Tom's Obvious, Minimal Language. "
- "See https://github.com/toml-lang/toml/blob/v0.5.0/README.md for details.")
+ return (
+ "Config file syntax is Tom's Obvious, Minimal Language. "
+ "See https://github.com/toml-lang/toml/blob/v0.5.0/README.md for details."
+ )
+
class IniConfigParser(ConfigFileParser):
"""
INI parser with support for sections.
-
- This parser somewhat ressembles L{configargparse.ConfigparserConfigFileParser}.
- It uses L{configparser} and evaluate values written with python list syntax.
- With the following changes:
+ This parser somewhat ressembles L{configargparse.ConfigparserConfigFileParser}.
+ It uses L{configparser} and evaluate values written with python list syntax.
+
+ With the following changes:
- Must be created with argument to bind the parser to a list of sections.
- Does not convert multiline strings to single line.
- - Optional support for converting multiline strings to list (if ``split_ml_text_to_list=True``).
- - Optional support for quoting strings in config file
- (useful when text must not be converted to list or when text
+ - Optional support for converting multiline strings to list (if ``split_ml_text_to_list=True``).
+ - Optional support for quoting strings in config file
+ (useful when text must not be converted to list or when text
should contain trailing whitespaces).
- Comments may only appear on their own in an otherwise empty line (like in configparser).
@@ -226,7 +238,7 @@ class IniConfigParser(ConfigFileParser):
; also a comment
[my_super_tool]
# how to specify a key-value pair:
- format-string: restructuredtext
+ format-string: restructuredtext
# white space are ignored, so name = value same as name=value
# this is why you can quote strings (double quotes works just as well)
quoted-string = '\thello\tmom... '
@@ -238,39 +250,39 @@ class IniConfigParser(ConfigFileParser):
repeatable-option = ["https://docs.python.org/3/objects.inv",
"https://twistedmatrix.com/documents/current/api/objects.inv"]
# how to specify a multiline text:
- multi-line-text =
- Lorem ipsum dolor sit amet, consectetur adipiscing elit.
- Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
- Maecenas quis dapibus leo, a pellentesque leo.
+ multi-line-text =
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+ Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
+ Maecenas quis dapibus leo, a pellentesque leo.
# how to specify a empty text:
- empty-text =
+ empty-text =
# this also works:
empty-text = ''
# how to specify a empty list:
empty-list = []
- If you use L{IniConfigParser(sections, split_ml_text_to_list=True)},
+ If you use L{IniConfigParser(sections, split_ml_text_to_list=True)},
the same rules are applicable with the following changes::
[my-software]
- # to specify a list arg (eg. arg which has action="append"),
+ # to specify a list arg (eg. arg which has action="append"),
# just enter one value per line (the list literal format can still be used):
repeatable-option =
https://docs.python.org/3/objects.inv
https://twistedmatrix.com/documents/current/api/objects.inv
# to specify a multiline text, you have to quote it:
multi-line-text = '''
- Lorem ipsum dolor sit amet, consectetur adipiscing elit.
- Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
- Maecenas quis dapibus leo, a pellentesque leo.
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+ Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
+ Maecenas quis dapibus leo, a pellentesque leo.
'''
# how to specify a empty text:
empty-text = ''
# how to specify a empty list:
empty-list = []
- # the following empty value would be simply ignored because we can't
+ # the following empty value would be simply ignored because we can't
# differenciate between simple value and list value without any data:
- totally-ignored-field =
+ totally-ignored-field =
Usage:
@@ -282,7 +294,7 @@ class IniConfigParser(ConfigFileParser):
"""
- def __init__(self, sections:List[str], split_ml_text_to_list:bool) -> None:
+ def __init__(self, sections: List[str], split_ml_text_to_list: bool) -> None:
super().__init__()
self.sections = sections
self.split_ml_text_to_list = split_ml_text_to_list
@@ -290,7 +302,7 @@ def __init__(self, sections:List[str], split_ml_text_to_list:bool) -> None:
def __call__(self) -> ConfigFileParser:
return self
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
"""Parses the keys and values from an INI config file."""
# parse with configparser to allow multi-line values
config = configparser.ConfigParser()
@@ -304,7 +316,7 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
for section in config.sections() + [configparser.DEFAULTSECT]:
if section not in self.sections:
continue
- for k,value in config[section].items():
+ for k, value in config[section].items():
# value is already strip by configparser
if not value and self.split_ml_text_to_list:
# ignores empty values when split_ml_text_to_list is True
@@ -320,7 +332,11 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
except Exception as e:
# error evaluating object
_tripple = 'tripple ' if '\n' in value else ''
- raise ConfigFileParserException("Error evaluating list: " + str(e) + f". Put {_tripple}quotes around your text if it's meant to be a string.") from e
+ raise ConfigFileParserException(
+ "Error evaluating list: "
+ + str(e)
+ + f". Put {_tripple}quotes around your text if it's meant to be a string."
+ ) from e
else:
if is_quoted(value):
# evaluate quoted string
@@ -337,22 +353,27 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
return result
def get_syntax_description(self) -> str:
- msg = ("Uses configparser module to parse an INI file which allows multi-line values. "
- "See https://docs.python.org/3/library/configparser.html for details. "
- "This parser includes support for quoting strings literal as well as python list syntax evaluation. ")
+ msg = (
+ "Uses configparser module to parse an INI file which allows multi-line values. "
+ "See https://docs.python.org/3/library/configparser.html for details. "
+ "This parser includes support for quoting strings literal as well as python list syntax evaluation. "
+ )
if self.split_ml_text_to_list:
- msg += ("Alternatively lists can be constructed with a plain multiline string, "
- "each non-empty line will be converted to a list item.")
+ msg += (
+ "Alternatively lists can be constructed with a plain multiline string, "
+ "each non-empty line will be converted to a list item."
+ )
return msg
+
class CompositeConfigParser(ConfigFileParser):
"""
A config parser that understands multiple formats.
- This parser will successively try to parse the file with each compisite parser, until it succeeds,
+ This parser will successively try to parse the file with each compisite parser, until it succeeds,
else it fails showing all encountered error messages.
- The following code will make configargparse understand both TOML and INI formats.
+ The following code will make configargparse understand both TOML and INI formats.
Making it easy to integrate in both C{pyproject.toml} and C{setup.cfg}.
>>> import configargparse
@@ -361,7 +382,7 @@ class CompositeConfigParser(ConfigFileParser):
>>> parser = configargparse.ArgParser(
... default_config_files=['setup.cfg', 'my_super_tool.ini'],
... config_file_parser_class=configargparse.CompositeConfigParser(
- ... [configargparse.TomlConfigParser(my_tool_sections),
+ ... [configargparse.TomlConfigParser(my_tool_sections),
... configargparse.IniConfigParser(my_tool_sections, split_ml_text_to_list=True)]
... ),
... )
@@ -375,36 +396,36 @@ def __init__(self, config_parser_types: List[Callable[[], ConfigFileParser]]) ->
def __call__(self) -> ConfigFileParser:
return self
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
errors = []
for p in self.parsers:
try:
- return p.parse(stream) # type: ignore[no-any-return]
+ return p.parse(stream) # type: ignore[no-any-return]
except Exception as e:
stream.seek(0)
errors.append(e)
- raise ConfigFileParserException(
- f"Error parsing config: {', '.join(repr(str(e)) for e in errors)}")
-
+ raise ConfigFileParserException(f"Error parsing config: {', '.join(repr(str(e)) for e in errors)}")
+
def get_syntax_description(self) -> str:
msg = "Uses multiple config parser settings (in order): \n"
- for i, parser in enumerate(self.parsers):
+ for i, parser in enumerate(self.parsers):
msg += f"[{i+1}] {parser.__class__.__name__}: {parser.get_syntax_description()} \n"
return msg
+
class ValidatorParser(ConfigFileParser):
"""
- A parser that warns when unknown options are used.
+ A parser that warns when unknown options are used.
It must be created with a reference to the ArgumentParser object, so like::
parser = ArgumentParser(
prog='mysoft',
config_file_parser_class=ConfigParser,)
-
+
# Add the validator to the config file parser, this is arguably a hack.
parser._config_file_parser = ValidatorParser(parser._config_file_parser, parser)
-
- @note: Using this parser implies acting
+
+ @note: Using this parser implies acting
like L{ArgumentParser}'s option C{ignore_unknown_config_file_keys=True}.
So no need to explicitely mention it.
"""
@@ -413,18 +434,21 @@ def __init__(self, config_parser: ConfigFileParser, argument_parser: ArgumentPar
super().__init__()
self.config_parser = config_parser
self.argument_parser = argument_parser
-
+
def get_syntax_description(self) -> str:
- return self.config_parser.get_syntax_description() #type:ignore[no-any-return]
+ return self.config_parser.get_syntax_description() # type:ignore[no-any-return]
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
data: Dict[str, Any] = self.config_parser.parse(stream)
# Prepare for checking config file.
- # This code maps all supported config keys to their
+ # This code maps all supported config keys to their
# argparse action counterpart, it will allow more checks to be done down the road.
- known_config_keys: Dict[str, argparse.Action] = {config_key: action for action in self.argument_parser._actions
- for config_key in self.argument_parser.get_possible_config_keys(action)}
+ known_config_keys: Dict[str, argparse.Action] = {
+ config_key: action
+ for action in self.argument_parser._actions
+ for config_key in self.argument_parser.get_possible_config_keys(action)
+ }
# Trigger warning
new_data = {}
@@ -436,5 +460,5 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
# Remove option
else:
new_data[key] = value
-
+
return new_data
diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py
index f80acdcc0..85240ce00 100644
--- a/pydoctor/astbuilder.py
+++ b/pydoctor/astbuilder.py
@@ -1,4 +1,5 @@
"""Convert ASTs into L{pydoctor.model.Documentable} instances."""
+
from __future__ import annotations
import ast
@@ -9,15 +10,47 @@
from inspect import Parameter, Signature
from pathlib import Path
from typing import (
- Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple,
- Type, TypeVar, Union, Set, cast
+ Any,
+ Callable,
+ Collection,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ Set,
+ cast,
)
from pydoctor import epydoc2stan, model, node2stan, extensions, linker
from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval
-from pydoctor.astutils import (is_none_literal, is_typing_annotation, is_using_annotations, is_using_typing_final, node2dottedname, node2fullname,
- is__name__equals__main__, unstring_annotation, upgrade_annotation, iterassign, extract_docstring_linenum, infer_type, get_parents,
- get_docstring_node, get_assign_docstring_node, unparse, NodeVisitor, Parentage, Str)
+from pydoctor.astutils import (
+ is_none_literal,
+ is_typing_annotation,
+ is_using_annotations,
+ is_using_typing_final,
+ node2dottedname,
+ node2fullname,
+ is__name__equals__main__,
+ unstring_annotation,
+ upgrade_annotation,
+ iterassign,
+ extract_docstring_linenum,
+ infer_type,
+ get_parents,
+ get_docstring_node,
+ get_assign_docstring_node,
+ unparse,
+ NodeVisitor,
+ Parentage,
+ Str,
+)
def parseFile(path: Path) -> ast.Module:
@@ -26,29 +59,29 @@ def parseFile(path: Path) -> ast.Module:
src = f.read() + b'\n'
return _parse(src, filename=str(path))
+
_parse = partial(ast.parse, type_comments=True)
+
def _maybeAttribute(cls: model.Class, name: str) -> bool:
"""Check whether a name is a potential attribute of the given class.
This is used to prevent an assignment that wraps a method from
creating an attribute that would overwrite or shadow that method.
@return: L{True} if the name does not exist or is an existing (possibly
- inherited) attribute, L{False} if this name defines something else than an L{Attribute}.
+ inherited) attribute, L{False} if this name defines something else than an L{Attribute}.
"""
obj = cls.find(name)
return obj is None or isinstance(obj, model.Attribute)
+
class IgnoreAssignment(Exception):
"""
A control flow exception meaning that the assignment should not be further proccessed.
"""
-def _handleAliasing(
- ctx: model.CanContainImportsDocumentable,
- target: str,
- expr: Optional[ast.expr]
- ) -> bool:
+
+def _handleAliasing(ctx: model.CanContainImportsDocumentable, target: str, expr: Optional[ast.expr]) -> bool:
"""If the given expression is a name assigned to a target that is not yet
in use, create an alias.
@return: L{True} iff an alias was created.
@@ -62,8 +95,15 @@ def _handleAliasing(
return True
-_CONTROL_FLOW_BLOCKS:Tuple[Type[ast.stmt],...] = (ast.If, ast.While, ast.For, ast.Try,
- ast.AsyncFor, ast.With, ast.AsyncWith)
+_CONTROL_FLOW_BLOCKS: Tuple[Type[ast.stmt], ...] = (
+ ast.If,
+ ast.While,
+ ast.For,
+ ast.Try,
+ ast.AsyncFor,
+ ast.With,
+ ast.AsyncWith,
+)
"""
AST types that introduces a new control flow block, potentially conditionnal.
"""
@@ -72,17 +112,16 @@ def _handleAliasing(
if sys.version_info >= (3, 11):
_CONTROL_FLOW_BLOCKS += (ast.TryStar,)
-def is_constant(obj: model.Attribute,
- annotation:Optional[ast.expr],
- value:Optional[ast.expr]) -> bool:
+
+def is_constant(obj: model.Attribute, annotation: Optional[ast.expr], value: Optional[ast.expr]) -> bool:
"""
- Detect if the given assignment is a constant.
+ Detect if the given assignment is a constant.
- For an assignment to be detected as constant, it should:
+ For an assignment to be detected as constant, it should:
- have all-caps variable name or using L{typing.Final} annotation
- not be overriden
- not be defined in a conditionnal block or any other kind of control flow blocks
-
+
@note: Must be called after setting obj.annotation to detect variables using Final.
"""
if is_using_typing_final(annotation, obj):
@@ -92,27 +131,29 @@ def is_constant(obj: model.Attribute,
return obj.name.isupper()
return False
+
class TypeAliasVisitorExt(extensions.ModuleVisitorExt):
"""
This visitor implements the handling of type aliases and type variables.
"""
+
def _isTypeVariable(self, ob: model.Attribute) -> bool:
if ob.value is not None:
- if isinstance(ob.value, ast.Call) and \
- node2fullname(ob.value.func, ob) in ('typing.TypeVar',
- 'typing_extensions.TypeVar',
- 'typing.TypeVarTuple',
- 'typing_extensions.TypeVarTuple'):
+ if isinstance(ob.value, ast.Call) and node2fullname(ob.value.func, ob) in (
+ 'typing.TypeVar',
+ 'typing_extensions.TypeVar',
+ 'typing.TypeVarTuple',
+ 'typing_extensions.TypeVarTuple',
+ ):
return True
return False
-
+
def _isTypeAlias(self, ob: model.Attribute) -> bool:
"""
Return C{True} if the Attribute is a type alias.
"""
if ob.value is not None:
- if is_using_annotations(ob.annotation, ('typing.TypeAlias',
- 'typing_extensions.TypeAlias'), ob):
+ if is_using_annotations(ob.annotation, ('typing.TypeAlias', 'typing_extensions.TypeAlias'), ob):
return True
if is_typing_annotation(ob.value, ob.parent):
return True
@@ -120,8 +161,8 @@ def _isTypeAlias(self, ob: model.Attribute) -> bool:
def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
current = self.visitor.builder.current
- for dottedname in iterassign(node):
- if dottedname and len(dottedname)==1:
+ for dottedname in iterassign(node):
+ if dottedname and len(dottedname) == 1:
attr = current.contents.get(dottedname[0])
if attr is None:
return
@@ -130,27 +171,36 @@ def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
if self._isTypeAlias(attr) is True:
attr.kind = model.DocumentableKind.TYPE_ALIAS
# unstring type aliases
- attr.value = upgrade_annotation(unstring_annotation(
- # this cast() is safe because _isTypeAlias() return True only if value is not None
- cast(ast.expr, attr.value), attr, section='type alias'), attr, section='type alias')
+ attr.value = upgrade_annotation(
+ unstring_annotation(
+ # this cast() is safe because _isTypeAlias() return True only if value is not None
+ cast(ast.expr, attr.value),
+ attr,
+ section='type alias',
+ ),
+ attr,
+ section='type alias',
+ )
elif self._isTypeVariable(attr) is True:
# TODO: unstring bound argument of type variables
attr.kind = model.DocumentableKind.TYPE_VARIABLE
-
+
visit_AnnAssign = visit_Assign
+
def is_attribute_overridden(obj: model.Attribute, new_value: Optional[ast.expr]) -> bool:
"""
Detect if the optional C{new_value} expression override the one already stored in the L{Attribute.value} attribute.
"""
return obj.value is not None and new_value is not None
+
def extract_final_subscript(annotation: ast.Subscript) -> ast.expr:
"""
Extract the "str" part from annotations like "Final[str]".
@raises ValueError: If the "Final" annotation is not valid.
- """
+ """
ann_slice = annotation.slice
if isinstance(ann_slice, (ast.Slice, ast.Tuple)):
raise ValueError("Annotation is invalid, it should not contain slices.")
@@ -158,6 +208,7 @@ def extract_final_subscript(annotation: ast.Subscript) -> ast.expr:
assert isinstance(ann_slice, ast.expr)
return ann_slice
+
class ModuleVistor(NodeVisitor):
def __init__(self, builder: 'ASTBuilder', module: model.Module):
@@ -166,13 +217,13 @@ def __init__(self, builder: 'ASTBuilder', module: model.Module):
self.system = builder.system
self.module = module
self._override_guard_state: Tuple[Optional[model.Documentable], Set[str]] = (None, set())
-
+
@contextlib.contextmanager
def override_guard(self) -> Iterator[None]:
"""
- Returns a context manager that will make the builder ignore any new
+ Returns a context manager that will make the builder ignore any new
assigments to existing names within the same context. Currently used to visit C{If.orelse} and C{Try.handlers}.
-
+
@note: The list of existing names is generated at the moment of
calling the function, such that new names defined inside these blocks follows the usual override rules.
"""
@@ -186,10 +237,10 @@ def override_guard(self) -> Iterator[None]:
self._override_guard_state = (ctx, set(ctx.localNames()))
yield
self._override_guard_state = ignore_override_init
-
- def _ignore_name(self, ob: model.Documentable, name:str) -> bool:
+
+ def _ignore_name(self, ob: model.Documentable, name: str) -> bool:
"""
- Should this C{name} be ignored because it matches
+ Should this C{name} be ignored because it matches
the override guard in the context of C{ob}?
"""
ctx, names = self._override_guard_state
@@ -201,17 +252,17 @@ def _infer_attr_annotations(self, scope: model.Documentable) -> None:
for attrib in scope.contents.values():
if not isinstance(attrib, model.Attribute):
continue
- # If this attribute has not explicit annotation,
+ # If this attribute has not explicit annotation,
# infer its type from it's ast expression.
if attrib.annotation is None and attrib.value is not None:
# do not override explicit annotation
attrib.annotation = infer_type(attrib.value)
-
+
def _tweak_constants_annotations(self, scope: model.Documentable) -> None:
# tweak constants annotations when we leave the scope so we can still
# check whether the annotation uses Final while we're visiting other nodes.
for attrib in scope.contents.values():
- if not isinstance(attrib, model.Attribute) or attrib.kind is not model.DocumentableKind.CONSTANT :
+ if not isinstance(attrib, model.Attribute) or attrib.kind is not model.DocumentableKind.CONSTANT:
continue
self._tweak_constant_annotation(attrib)
@@ -222,24 +273,24 @@ def visit_If(self, node: ast.If) -> None:
# whatever is declared in them cannot be imported
# and thus is not part of the API
raise self.SkipChildren()
-
+
def depart_If(self, node: ast.If) -> None:
# At this point the body of the If node has already been visited
# Visit the 'orelse' block of the If node, with override guard
with self.override_guard():
for n in node.orelse:
self.walkabout(n)
-
+
def depart_Try(self, node: ast.Try) -> None:
# At this point the body of the Try node has already been visited
# Visit the 'orelse' and 'finalbody' blocks of the Try node.
-
+
for n in node.orelse:
self.walkabout(n)
for n in node.finalbody:
self.walkabout(n)
-
- # Visit the handlers with override guard
+
+ # Visit the handlers with override guard
with self.override_guard():
for h in node.handlers:
for n in h.body:
@@ -277,29 +328,30 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None:
# This handles generics in MRO, by extracting the first
# subscript value::
# class Visitor(MyGeneric[T]):...
- # 'MyGeneric' will be added to rawbases instead
+ # 'MyGeneric' will be added to rawbases instead
# of 'MyGeneric[T]' which cannot resolve to anything.
name_node = base_node
if isinstance(base_node, ast.Subscript):
name_node = base_node.value
-
- str_base = '.'.join(node2dottedname(name_node) or \
- # Fallback on unparse() if the expression is unknown by node2dottedname().
- [unparse(base_node).strip()])
-
+
+ str_base = '.'.join(
+ node2dottedname(name_node) # Fallback on unparse() if the expression is unknown by node2dottedname().
+ or [unparse(base_node).strip()]
+ )
+
# Store the base as string and as ast.expr in rawbases list.
rawbases += [(str_base, base_node)]
-
+
# Try to resolve the base, put None if could not resolve it,
# if we can't resolve it now, it most likely mean that there are
- # import cycles (maybe in TYPE_CHECKING blocks).
+ # import cycles (maybe in TYPE_CHECKING blocks).
# None bases will be re-resolved in post-processing.
expandbase = parent.expandName(str_base)
baseobj = self.system.objForFullName(expandbase)
-
+
if not isinstance(baseobj, model.Class):
baseobj = None
-
+
initialbases.append(expandbase)
initialbaseobjects.append(baseobj)
@@ -319,9 +371,9 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None:
epydoc2stan.extract_fields(cls)
if node.decorator_list:
-
+
cls.raw_decorators = node.decorator_list
-
+
for decnode in node.decorator_list:
args: Optional[Sequence[ast.expr]]
if isinstance(decnode, ast.Call):
@@ -338,18 +390,15 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None:
else:
cls.decorators.append((base, args))
-
- # We're not resolving the subclasses at this point yet because all
+ # We're not resolving the subclasses at this point yet because all
# modules might not have been processed, and since subclasses are only used in the presentation,
# it's better to resolve them in the post-processing instead.
-
def depart_ClassDef(self, node: ast.ClassDef) -> None:
self._tweak_constants_annotations(self.builder.current)
self._infer_attr_annotations(self.builder.current)
self.builder.popClass()
-
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
ctx = self.builder.current
if not isinstance(ctx, model.CanContainImportsDocumentable):
@@ -369,10 +418,7 @@ def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
parent = parent.parent
if parent is None:
assert ctx.parentMod is not None
- ctx.parentMod.report(
- "relative import level (%d) too high" % node.level,
- lineno_offset=node.lineno
- )
+ ctx.parentMod.report("relative import level (%d) too high" % node.level, lineno_offset=node.lineno)
return
if modname is None:
modname = parent.fullName()
@@ -405,8 +451,7 @@ def _importAll(self, modname: str) -> None:
# names that are not private.
names = mod.all
if names is None:
- names = [ name for name in mod.localNames()
- if not name.startswith('_') ]
+ names = [name for name in mod.localNames() if not name.startswith('_')]
# Fetch names to export.
exports = self._getCurrentModuleExports()
@@ -438,9 +483,9 @@ def _getCurrentModuleExports(self) -> Collection[str]:
exports = []
return exports
- def _handleReExport(self, curr_mod_exports:Collection[str],
- origin_name:str, as_name:str,
- origin_module:model.Module) -> bool:
+ def _handleReExport(
+ self, curr_mod_exports: Collection[str], origin_name: str, as_name: str, origin_module: model.Module
+ ) -> bool:
"""
Move re-exported objects into current module.
@@ -451,17 +496,13 @@ def _handleReExport(self, curr_mod_exports:Collection[str],
modname = origin_module.fullName()
if as_name in curr_mod_exports:
# In case of duplicates names, we can't rely on resolveName,
- # So we use content.get first to resolve non-alias names.
+ # So we use content.get first to resolve non-alias names.
ob = origin_module.contents.get(origin_name) or origin_module.resolveName(origin_name)
if ob is None:
- current.report("cannot resolve re-exported name :"
- f'{modname}.{origin_name}', thresh=1)
+ current.report("cannot resolve re-exported name :" f'{modname}.{origin_name}', thresh=1)
else:
if origin_module.all is None or origin_name not in origin_module.all:
- self.system.msg(
- "astbuilder",
- "moving %r into %r" % (ob.fullName(), current.fullName())
- )
+ self.system.msg("astbuilder", "moving %r into %r" % (ob.fullName(), current.fullName()))
# Must be a Module since the exports is set to an empty list if it's not.
assert isinstance(current, model.Module)
ob.reparent(current, as_name)
@@ -484,11 +525,11 @@ def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None:
orgname, asname = al.name, al.asname
if asname is None:
asname = orgname
-
+
# Ignore in override guard
if self._ignore_name(current, asname):
continue
-
+
# If we're importing from a package, make sure imported modules
# are processed (getProcessedModule() ignores non-modules).
if isinstance(mod, model.Package):
@@ -516,7 +557,7 @@ def visit_Import(self, node: ast.Import) -> None:
# processing import statement in odd context
return
_localNameToFullName = current._localNameToFullName_map
-
+
for al in node.names:
targetname, asname = al.name, al.asname
if asname is None:
@@ -537,7 +578,7 @@ def _handleOldSchoolMethodDecoration(self, target: str, expr: Optional[ast.expr]
args = expr.args
if len(args) != 1:
return False
- arg, = args
+ (arg,) = args
if not isinstance(arg, ast.Name):
return False
if target == arg.id and func_name in ['staticmethod', 'classmethod']:
@@ -555,11 +596,14 @@ def _handleOldSchoolMethodDecoration(self, target: str, expr: Optional[ast.expr]
return False
@classmethod
- def _handleConstant(cls, obj:model.Attribute,
- annotation:Optional[ast.expr],
- value:Optional[ast.expr],
- lineno:int,
- defaultKind:model.DocumentableKind) -> None:
+ def _handleConstant(
+ cls,
+ obj: model.Attribute,
+ annotation: Optional[ast.expr],
+ value: Optional[ast.expr],
+ lineno: int,
+ defaultKind: model.DocumentableKind,
+ ) -> None:
if is_constant(obj, annotation=annotation, value=value):
obj.kind = model.DocumentableKind.CONSTANT
# do not call tweak annotation just yet...
@@ -568,7 +612,7 @@ def _handleConstant(cls, obj:model.Attribute,
# declared as constants
if not is_using_typing_final(obj.annotation, obj):
obj.kind = defaultKind
-
+
@staticmethod
def _tweak_constant_annotation(obj: model.Attribute) -> None:
# Display variables annotated with Final with the real type instead.
@@ -578,7 +622,7 @@ def _tweak_constant_annotation(obj: model.Attribute) -> None:
try:
annotation = extract_final_subscript(annotation)
except ValueError as e:
- obj.report(str(e), section='ast', lineno_offset=annotation.lineno-obj.linenumber)
+ obj.report(str(e), section='ast', lineno_offset=annotation.lineno - obj.linenumber)
obj.annotation = infer_type(obj.value) if obj.value else None
else:
# Will not display as "Final[str]" but rather only "str"
@@ -589,35 +633,38 @@ def _tweak_constant_annotation(obj: model.Attribute) -> None:
obj.annotation = infer_type(obj.value) if obj.value else None
@staticmethod
- def _setAttributeAnnotation(obj: model.Attribute,
- annotation: Optional[ast.expr],) -> None:
+ def _setAttributeAnnotation(
+ obj: model.Attribute,
+ annotation: Optional[ast.expr],
+ ) -> None:
if annotation is not None:
# TODO: What to do when an attribute has several explicit annotations?
# (mypy reports a warning in these kind of cases)
obj.annotation = annotation
@staticmethod
- def _storeAttrValue(obj:model.Attribute, new_value:Optional[ast.expr],
- augassign:Optional[ast.operator]=None) -> None:
+ def _storeAttrValue(
+ obj: model.Attribute, new_value: Optional[ast.expr], augassign: Optional[ast.operator] = None
+ ) -> None:
if new_value:
- if augassign:
+ if augassign:
if obj.value:
- # We're storing the value of augmented assignemnt value as binop for the sake
+ # We're storing the value of augmented assignemnt value as binop for the sake
# of correctness, but we're not doing anything special with it at the
# moment, nonethless this could be useful for future developments.
# We don't bother reporting warnings, pydoctor is not a checker.
obj.value = ast.BinOp(left=obj.value, op=augassign, right=new_value)
else:
obj.value = new_value
-
-
- def _handleModuleVar(self,
- target: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
+
+ def _handleModuleVar(
+ self,
+ target: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
if target in MODULE_VARIABLES_META_PARSERS:
# This is metadata, not a variable that needs to be documented,
# and therefore doesn't need an Attribute instance.
@@ -627,40 +674,39 @@ def _handleModuleVar(self,
if obj is None:
if augassign:
return
- obj = self.builder.addAttribute(name=target,
- kind=model.DocumentableKind.VARIABLE,
- parent=parent,
- lineno=lineno)
-
- # If it's not an attribute it means that the name is already denifed as function/class
- # probably meaning that this attribute is a bound callable.
+ obj = self.builder.addAttribute(
+ name=target, kind=model.DocumentableKind.VARIABLE, parent=parent, lineno=lineno
+ )
+
+ # If it's not an attribute it means that the name is already denifed as function/class
+ # probably meaning that this attribute is a bound callable.
#
# def func(value, stock) -> int:...
# var = 2
# func = partial(func, value=var)
#
# We don't know how to handle this,
- # so we ignore it to document the original object. This means that we might document arguments
+ # so we ignore it to document the original object. This means that we might document arguments
# that are in reality not existing because they have values in a partial() call for instance.
if not isinstance(obj, model.Attribute):
raise IgnoreAssignment()
-
+
self._setAttributeAnnotation(obj, annotation)
-
+
obj.setLineNumber(lineno)
-
- self._handleConstant(obj, annotation, expr, lineno,
- model.DocumentableKind.VARIABLE)
+
+ self._handleConstant(obj, annotation, expr, lineno, model.DocumentableKind.VARIABLE)
self._storeAttrValue(obj, expr, augassign)
- def _handleAssignmentInModule(self,
- target: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
+ def _handleAssignmentInModule(
+ self,
+ target: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
module = self.builder.current
assert isinstance(module, model.Module)
if not _handleAliasing(module, target, expr):
@@ -668,14 +714,15 @@ def _handleAssignmentInModule(self,
else:
raise IgnoreAssignment()
- def _handleClassVar(self,
- name: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
-
+ def _handleClassVar(
+ self,
+ name: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
+
cls = self.builder.current
assert isinstance(cls, model.Class)
if not _maybeAttribute(cls, name):
@@ -693,21 +740,16 @@ def _handleClassVar(self,
obj.kind = model.DocumentableKind.CLASS_VARIABLE
self._setAttributeAnnotation(obj, annotation)
-
+
obj.setLineNumber(lineno)
- self._handleConstant(obj, annotation, expr, lineno,
- model.DocumentableKind.CLASS_VARIABLE)
+ self._handleConstant(obj, annotation, expr, lineno, model.DocumentableKind.CLASS_VARIABLE)
self._storeAttrValue(obj, expr, augassign)
-
- def _handleInstanceVar(self,
- name: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int
- ) -> None:
- if not (cls:=self._getClassFromMethodContext()):
+ def _handleInstanceVar(
+ self, name: str, annotation: Optional[ast.expr], expr: Optional[ast.expr], lineno: int
+ ) -> None:
+ if not (cls := self._getClassFromMethodContext()):
raise IgnoreAssignment()
if not _maybeAttribute(cls, name):
raise IgnoreAssignment()
@@ -726,13 +768,14 @@ def _handleInstanceVar(self,
obj.kind = model.DocumentableKind.INSTANCE_VARIABLE
self._storeAttrValue(obj, expr)
- def _handleAssignmentInClass(self,
- target: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
+ def _handleAssignmentInClass(
+ self,
+ target: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
cls = self.builder.current
assert isinstance(cls, model.Class)
if not _handleAliasing(cls, target, expr):
@@ -740,11 +783,7 @@ def _handleAssignmentInClass(self,
else:
raise IgnoreAssignment()
- def _handleDocstringUpdate(self,
- targetNode: ast.expr,
- expr: Optional[ast.expr],
- lineno: int
- ) -> None:
+ def _handleDocstringUpdate(self, targetNode: ast.expr, expr: Optional[ast.expr], lineno: int) -> None:
def warn(msg: str) -> None:
module = self.builder.currentMod
assert module is not None
@@ -764,8 +803,9 @@ def warn(msg: str) -> None:
else:
obj = self.system.objForFullName(full_name)
if obj is None:
- warn("Unable to figure out target for __doc__ assignment: "
- "computed full name not found: " + full_name)
+ warn(
+ "Unable to figure out target for __doc__ assignment: " "computed full name not found: " + full_name
+ )
# Determine docstring value.
try:
@@ -775,8 +815,7 @@ def warn(msg: str) -> None:
raise ValueError()
docstring: object = ast.literal_eval(expr)
except ValueError:
- warn("Unable to figure out value for __doc__ assignment, "
- "maybe too complex")
+ warn("Unable to figure out value for __doc__ assignment, " "maybe too complex")
return
if not isinstance(docstring, str):
warn("Ignoring value assigned to __doc__: not a string")
@@ -788,13 +827,14 @@ def warn(msg: str) -> None:
# we have the final docstrings for all objects.
obj.parsed_docstring = None
- def _handleAssignment(self,
- targetNode: ast.expr,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator]=None,
- ) -> None:
+ def _handleAssignment(
+ self,
+ targetNode: ast.expr,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator] = None,
+ ) -> None:
"""
@raises IgnoreAssignment: If the assignemnt should not be further processed.
"""
@@ -826,12 +866,14 @@ def visit_Assign(self, node: ast.Assign) -> None:
if type_comment is None:
annotation = None
else:
- annotation = upgrade_annotation(unstring_annotation(
- ast.Constant(type_comment, lineno=lineno), self.builder.current), self.builder.current)
+ annotation = upgrade_annotation(
+ unstring_annotation(ast.Constant(type_comment, lineno=lineno), self.builder.current),
+ self.builder.current,
+ )
for target in node.targets:
try:
- if isTupleAssignment:=isinstance(target, ast.Tuple):
+ if isTupleAssignment := isinstance(target, ast.Tuple):
# TODO: Only one level of nested tuple is taken into account...
# ideally we would extract al the names declared in the lhs, not
# only the first level ones.
@@ -847,12 +889,13 @@ def visit_Assign(self, node: ast.Assign) -> None:
if not isTupleAssignment:
self._handleInlineDocstrings(node, target)
else:
- for elem in cast(ast.Tuple, target).elts: # mypy is not as smart as pyright yet.
+ for elem in cast(ast.Tuple, target).elts: # mypy is not as smart as pyright yet.
self._handleInlineDocstrings(node, elem)
def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
- annotation = upgrade_annotation(unstring_annotation(
- node.annotation, self.builder.current), self.builder.current)
+ annotation = upgrade_annotation(
+ unstring_annotation(node.annotation, self.builder.current), self.builder.current
+ )
try:
self._handleAssignment(node.target, annotation, node.value, node.lineno)
except IgnoreAssignment:
@@ -868,12 +911,12 @@ def _getClassFromMethodContext(self) -> Optional[model.Class]:
if not isinstance(cls, model.Class):
return None
return cls
-
- def _contextualizeTarget(self, target:ast.expr) -> Tuple[model.Documentable, str]:
+
+ def _contextualizeTarget(self, target: ast.expr) -> Tuple[model.Documentable, str]:
"""
- Find out the documentatble wich is the parent of the assignment's target as well as it's name.
+ Find out the documentatble wich is the parent of the assignment's target as well as it's name.
- @returns: Tuple C{parent, name}.
+ @returns: Tuple C{parent, name}.
@raises ValueError: if the target does not bind a new variable.
"""
dottedname = node2dottedname(target)
@@ -884,7 +927,7 @@ def _contextualizeTarget(self, target:ast.expr) -> Tuple[model.Documentable, str
# an instance variable.
# TODO: This currently only works if the first argument of methods
# is named 'self'.
- if (maybe_cls:=self._getClassFromMethodContext()) is None:
+ if (maybe_cls := self._getClassFromMethodContext()) is None:
raise ValueError('using self in unsupported context')
dottedname = dottedname[1:]
parent = maybe_cls
@@ -894,28 +937,26 @@ def _contextualizeTarget(self, target:ast.expr) -> Tuple[model.Documentable, str
parent = self.builder.current
return parent, dottedname[0]
- def _handleInlineDocstrings(self, assign:Union[ast.Assign, ast.AnnAssign], target:ast.expr) -> None:
+ def _handleInlineDocstrings(self, assign: Union[ast.Assign, ast.AnnAssign], target: ast.expr) -> None:
# Process the inline docstrings
try:
parent, name = self._contextualizeTarget(target)
except ValueError:
return
-
+
docstring_node = get_assign_docstring_node(assign)
if docstring_node:
# fetch the target of the inline docstring
attr = parent.contents.get(name)
if attr:
attr.setDocstring(docstring_node)
-
- def visit_AugAssign(self, node:ast.AugAssign) -> None:
+
+ def visit_AugAssign(self, node: ast.AugAssign) -> None:
try:
- self._handleAssignment(node.target, None, node.value,
- node.lineno, augassign=node.op)
+ self._handleAssignment(node.target, None, node.value, node.lineno, augassign=node.op)
except IgnoreAssignment:
pass
-
def visit_Expr(self, node: ast.Expr) -> None:
# Visit's ast.Expr.value with the visitor, used by extensions to visit top-level calls.
self.generic_visit(node)
@@ -926,10 +967,7 @@ def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
self._handleFunctionDef(node, is_async=False)
- def _handleFunctionDef(self,
- node: Union[ast.AsyncFunctionDef, ast.FunctionDef],
- is_async: bool
- ) -> None:
+ def _handleFunctionDef(self, node: Union[ast.AsyncFunctionDef, ast.FunctionDef], is_async: bool) -> None:
# Ignore inner functions.
parent = self.builder.current
if isinstance(parent, model.Function):
@@ -983,7 +1021,7 @@ def _handleFunctionDef(self,
attr.report(f'{attr.fullName()} is both property and classmethod')
if is_staticmethod:
attr.report(f'{attr.fullName()} is both property and staticmethod')
- raise self.SkipNode() # visitor extensions will still be called.
+ raise self.SkipNode() # visitor extensions will still be called.
# Check if it's a new func or exists with an overload
existing_func = parent.contents.get(func_name)
@@ -993,7 +1031,10 @@ def _handleFunctionDef(self,
# which we do not allow. This also ensures that func will have
# properties set for the primary function and not overloads.
if existing_func.signature and is_overload_func:
- existing_func.report(f'{existing_func.fullName()} overload appeared after primary function', lineno_offset=lineno-existing_func.linenumber)
+ existing_func.report(
+ f'{existing_func.fullName()} overload appeared after primary function',
+ lineno_offset=lineno - existing_func.linenumber,
+ )
raise self.IgnoreNode()
# Do not recreate function object, just re-push it
self.builder.push(existing_func, lineno)
@@ -1006,7 +1047,9 @@ def _handleFunctionDef(self,
# Docstring not allowed on overload
if is_overload_func:
docline = extract_docstring_linenum(doc_node)
- func.report(f'{func.fullName()} overload has docstring, unsupported', lineno_offset=docline-func.linenumber)
+ func.report(
+ f'{func.fullName()} overload has docstring, unsupported', lineno_offset=docline - func.linenumber
+ )
else:
func.setDocstring(doc_node)
func.decorators = node.decorator_list
@@ -1031,10 +1074,15 @@ def get_default(index: int) -> Optional[ast.expr]:
return None if index < 0 else defaults[index]
parameters: List[Parameter] = []
+
def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
default_val = Parameter.empty if default is None else _ValueFormatter(default, ctx=func)
- # this cast() is safe since we're checking if annotations.get(name) is None first
- annotation = Parameter.empty if annotations.get(name) is None else _AnnotationValueFormatter(cast(ast.expr, annotations[name]), ctx=func)
+ # this cast() is safe since we're checking if annotations.get(name) is None first
+ annotation = (
+ Parameter.empty
+ if annotations.get(name) is None
+ else _AnnotationValueFormatter(cast(ast.expr, annotations[name]), ctx=func)
+ )
parameters.append(Parameter(name, kind, default=default_val, annotation=annotation))
for index, arg in enumerate(posonlyargs):
@@ -1056,7 +1104,11 @@ def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
add_arg(kwarg.arg, Parameter.VAR_KEYWORD, None)
return_type = annotations.get('return')
- return_annotation = Parameter.empty if return_type is None or is_none_literal(return_type) else _AnnotationValueFormatter(return_type, ctx=func)
+ return_annotation = (
+ Parameter.empty
+ if return_type is None or is_none_literal(return_type)
+ else _AnnotationValueFormatter(return_type, ctx=func)
+ )
try:
signature = Signature(parameters, return_annotation=return_annotation)
except ValueError as ex:
@@ -1067,7 +1119,9 @@ def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
# Only set main function signature if it is a non-overload
if is_overload_func:
- func.overloads.append(model.FunctionOverload(primary=func, signature=signature, decorators=node.decorator_list))
+ func.overloads.append(
+ model.FunctionOverload(primary=func, signature=signature, decorators=node.decorator_list)
+ )
else:
func.signature = signature
@@ -1077,16 +1131,13 @@ def depart_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
def depart_FunctionDef(self, node: ast.FunctionDef) -> None:
self.builder.popFunction()
- def _handlePropertyDef(self,
- node: Union[ast.AsyncFunctionDef, ast.FunctionDef],
- doc_node: Optional[Str],
- lineno: int
- ) -> model.Attribute:
+ def _handlePropertyDef(
+ self, node: Union[ast.AsyncFunctionDef, ast.FunctionDef], doc_node: Optional[Str], lineno: int
+ ) -> model.Attribute:
- attr = self.builder.addAttribute(name=node.name,
- kind=model.DocumentableKind.PROPERTY,
- parent=self.builder.current,
- lineno=lineno)
+ attr = self.builder.addAttribute(
+ name=node.name, kind=model.DocumentableKind.PROPERTY, parent=self.builder.current, lineno=lineno
+ )
attr.setLineNumber(lineno)
if doc_node is not None:
@@ -1114,14 +1165,15 @@ def _handlePropertyDef(self,
return attr
def _annotations_from_function(
- self, func: Union[ast.AsyncFunctionDef, ast.FunctionDef]
- ) -> Mapping[str, Optional[ast.expr]]:
+ self, func: Union[ast.AsyncFunctionDef, ast.FunctionDef]
+ ) -> Mapping[str, Optional[ast.expr]]:
"""Get annotations from a function definition.
@param func: The function definition's AST.
@return: Mapping from argument name to annotation.
The name C{return} is used for the return type.
Unannotated arguments are omitted.
"""
+
def _get_all_args() -> Iterator[ast.arg]:
base_args = func.args
yield from base_args.posonlyargs
@@ -1135,21 +1187,27 @@ def _get_all_args() -> Iterator[ast.arg]:
if kwargs:
kwargs.arg = epydoc2stan.KeywordArgument(kwargs.arg)
yield kwargs
+
def _get_all_ast_annotations() -> Iterator[Tuple[str, Optional[ast.expr]]]:
for arg in _get_all_args():
yield arg.arg, arg.annotation
returns = func.returns
if returns:
yield 'return', returns
+
return {
# Include parameter names even if they're not annotated, so that
# we can use the key set to know which parameters exist and warn
# when non-existing parameters are documented.
- name: None if value is None else upgrade_annotation(unstring_annotation(
- value, self.builder.current), self.builder.current)
+ name: (
+ None
+ if value is None
+ else upgrade_annotation(unstring_annotation(value, self.builder.current), self.builder.current)
+ )
for name, value in _get_all_ast_annotations()
- }
-
+ }
+
+
class _ValueFormatter:
"""
Class to encapsulate a python value and translate it to HTML when calling L{repr()} on the L{_ValueFormatter}.
@@ -1169,50 +1227,53 @@ def __init__(self, value: ast.expr, ctx: model.Documentable):
def __repr__(self) -> str:
"""
- Present the python value as HTML.
+ Present the python value as HTML.
Without the englobing tags.
"""
- # Using node2stan.node2html instead of flatten(to_stan()).
- # This avoids calling flatten() twice,
+ # Using node2stan.node2html instead of flatten(to_stan()).
+ # This avoids calling flatten() twice,
# but potential XML parser errors caused by XMLString needs to be handled later.
return ''.join(node2stan.node2html(self._colorized.to_node(), self._linker))
+
class _AnnotationValueFormatter(_ValueFormatter):
"""
Special L{_ValueFormatter} for function annotations.
"""
+
def __init__(self, value: ast.expr, ctx: model.Function):
super().__init__(value, ctx)
self._linker = linker._AnnotationLinker(ctx)
-
+
def __repr__(self) -> str:
"""
Present the annotation wrapped inside tags.
"""
return '%s' % super().__repr__()
+
DocumentableT = TypeVar('DocumentableT', bound=model.Documentable)
+
class ASTBuilder:
"""
Keeps tracks of the state of the AST build, creates documentable and adds objects to the system.
"""
+
ModuleVistor = ModuleVistor
def __init__(self, system: model.System):
self.system = system
-
- self.current = cast(model.Documentable, None) # current visited object.
- self.currentMod: Optional[model.Module] = None # current module, set when visiting ast.Module.
-
+
+ self.current = cast(model.Documentable, None) # current visited object.
+ self.currentMod: Optional[model.Module] = None # current module, set when visiting ast.Module.
+
self._stack: List[model.Documentable] = []
self.ast_cache: Dict[Path, Optional[ast.Module]] = {}
- def _push(self,
- cls: Type[DocumentableT],
- name: str,
- lineno: int,
- parent:Optional[model.Documentable]=None) -> DocumentableT:
+ def _push(
+ self, cls: Type[DocumentableT], name: str, lineno: int, parent: Optional[model.Documentable] = None
+ ) -> DocumentableT:
"""
Create and enter a new object of the given type and add it to the system.
@@ -1220,7 +1281,7 @@ def _push(self,
Used for attributes declared in methods, typically ``__init__``.
"""
obj = cls(self.system, name, parent or self.current)
- self.push(obj, lineno)
+ self.push(obj, lineno)
# make sure push() is called before addObject() since addObject() can trigger a warning for duplicates
# and this relies on the correct parentMod attribute, which is set in push().
self.system.addObject(obj)
@@ -1282,12 +1343,9 @@ def popFunction(self) -> None:
"""
self._pop(self.system.Function)
- def addAttribute(self,
- name: str,
- kind: Optional[model.DocumentableKind],
- parent: model.Documentable,
- lineno: int
- ) -> model.Attribute:
+ def addAttribute(
+ self, name: str, kind: Optional[model.DocumentableKind], parent: model.Documentable, lineno: int
+ ) -> model.Attribute:
"""
Add a new attribute to the system.
"""
@@ -1296,7 +1354,6 @@ def addAttribute(self,
attr.kind = kind
return attr
-
def processModuleAST(self, mod_ast: ast.Module, mod: model.Module) -> None:
for name, node in findModuleLevelAssign(mod_ast):
@@ -1324,8 +1381,8 @@ def parseFile(self, path: Path, ctx: model.Module) -> Optional[ast.Module]:
self.ast_cache[path] = mod
return mod
-
- def parseString(self, py_string:str, ctx: model.Module) -> Optional[ast.Module]:
+
+ def parseString(self, py_string: str, ctx: model.Module) -> Optional[ast.Module]:
mod = None
try:
mod = _parse(py_string)
@@ -1333,27 +1390,26 @@ def parseString(self, py_string:str, ctx: model.Module) -> Optional[ast.Module]:
ctx.report("cannot parse string")
return mod
+
model.System.defaultBuilder = ASTBuilder
+
def findModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[str, ast.Assign]]:
"""
- Find module level Assign.
+ Find module level Assign.
Yields tuples containing the assigment name and the Assign node.
"""
for node in mod_ast.body:
- if isinstance(node, ast.Assign) and \
- len(node.targets) == 1 and \
- isinstance(node.targets[0], ast.Name):
- yield (node.targets[0].id, node)
+ if isinstance(node, ast.Assign) and len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
+ yield (node.targets[0].id, node)
+
def parseAll(node: ast.Assign, mod: model.Module) -> None:
- """Find and attempt to parse into a list of names the
+ """Find and attempt to parse into a list of names the
C{__all__} variable of a module's AST and set L{Module.all} accordingly."""
if not isinstance(node.value, (ast.List, ast.Tuple)):
- mod.report(
- 'Cannot parse value assigned to "__all__"',
- section='all', lineno_offset=node.lineno)
+ mod.report('Cannot parse value assigned to "__all__"', section='all', lineno_offset=node.lineno)
return
names = []
@@ -1361,29 +1417,27 @@ def parseAll(node: ast.Assign, mod: model.Module) -> None:
try:
name: object = ast.literal_eval(item)
except ValueError:
- mod.report(
- f'Cannot parse element {idx} of "__all__"',
- section='all', lineno_offset=node.lineno)
+ mod.report(f'Cannot parse element {idx} of "__all__"', section='all', lineno_offset=node.lineno)
else:
if isinstance(name, str):
names.append(name)
else:
mod.report(
- f'Element {idx} of "__all__" has '
- f'type "{type(name).__name__}", expected "str"',
- section='all', lineno_offset=node.lineno)
+ f'Element {idx} of "__all__" has ' f'type "{type(name).__name__}", expected "str"',
+ section='all',
+ lineno_offset=node.lineno,
+ )
if mod.all is not None:
- mod.report(
- 'Assignment to "__all__" overrides previous assignment',
- section='all', lineno_offset=node.lineno)
+ mod.report('Assignment to "__all__" overrides previous assignment', section='all', lineno_offset=node.lineno)
mod.all = names
+
def parseDocformat(node: ast.Assign, mod: model.Module) -> None:
"""
- Find C{__docformat__} variable of this
+ Find C{__docformat__} variable of this
module's AST and set L{Module.docformat} accordingly.
-
+
This is all valid::
__docformat__ = "reStructuredText en"
@@ -1396,37 +1450,46 @@ def parseDocformat(node: ast.Assign, mod: model.Module) -> None:
except ValueError:
mod.report(
'Cannot parse value assigned to "__docformat__": not a string',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
return
-
+
if not isinstance(value, str):
mod.report(
'Cannot parse value assigned to "__docformat__": not a string',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
return
-
+
if not value.strip():
mod.report(
'Cannot parse value assigned to "__docformat__": empty value',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
return
-
+
# Language is ignored and parser name is lowercased.
value = value.split(" ", 1)[0].lower()
if mod._docformat is not None:
mod.report(
'Assignment to "__docformat__" overrides previous assignment',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
mod.docformat = value
+
MODULE_VARIABLES_META_PARSERS: Mapping[str, Callable[[ast.Assign, model.Module], None]] = {
'__all__': parseAll,
- '__docformat__': parseDocformat
+ '__docformat__': parseDocformat,
}
-def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
+def setup_pydoctor_extension(r: extensions.ExtRegistrar) -> None:
r.register_astbuilder_visitor(TypeAliasVisitorExt)
r.register_post_processor(model.defaultPostProcess, priority=200)
diff --git a/pydoctor/astutils.py b/pydoctor/astutils.py
index 2163c841b..9a1c89ed5 100644
--- a/pydoctor/astutils.py
+++ b/pydoctor/astutils.py
@@ -1,12 +1,26 @@
"""
Various bits of reusable code related to L{ast.AST} node processing.
"""
+
from __future__ import annotations
import inspect
import sys
from numbers import Number
-from typing import Any, Callable, Collection, Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union, cast
+from typing import (
+ Any,
+ Callable,
+ Collection,
+ Iterator,
+ Optional,
+ List,
+ Iterable,
+ Sequence,
+ TYPE_CHECKING,
+ Tuple,
+ Union,
+ cast,
+)
from inspect import BoundArguments, Signature
import ast
@@ -19,6 +33,7 @@
# AST visitors
+
def iter_values(node: ast.AST) -> Iterator[ast.AST]:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
@@ -28,18 +43,20 @@ def iter_values(node: ast.AST) -> Iterator[ast.AST]:
elif isinstance(value, ast.AST):
yield value
+
class NodeVisitor(visitor.PartialVisitor[ast.AST]):
"""
- Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
+ Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
:See: L{visitor} for more informations.
"""
+
def generic_visit(self, node: ast.AST) -> None:
"""
- Helper method to visit a node by calling C{visit()} on each child of the node.
- This is useful because this vistitor only visits statements inside C{.body} attribute.
-
+ Helper method to visit a node by calling C{visit()} on each child of the node.
+ This is useful because this vistitor only visits statements inside C{.body} attribute.
+
So if one wants to visit L{ast.Expr} children with their visitor, they should include::
def visit_Expr(self, node:ast.Expr):
@@ -47,7 +64,7 @@ def visit_Expr(self, node:ast.Expr):
"""
for v in iter_values(node):
self.visit(v)
-
+
@classmethod
def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
"""
@@ -58,13 +75,16 @@ def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
for child in body:
yield child
-class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
- ...
+
+class NodeVisitorExt(visitor.VisitorExt[ast.AST]): ...
+
_AssingT = Union[ast.Assign, ast.AnnAssign]
-def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
+
+
+def iterassign(node: _AssingT) -> Iterator[Optional[List[str]]]:
"""
- Utility function to iterate assignments targets.
+ Utility function to iterate assignments targets.
Useful for all the following AST assignments:
@@ -82,15 +102,16 @@ def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
>>> from ast import parse
>>> node = parse('self.var = target = thing[0] = node.astext()').body[0]
>>> list(iterassign(node))
-
+
"""
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
- dottedname = node2dottedname(target)
+ dottedname = node2dottedname(target)
yield dottedname
+
def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
"""
- Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
+ Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
"""
parts = []
while isinstance(node, ast.Attribute):
@@ -103,10 +124,10 @@ def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
parts.reverse()
return parts
-def node2fullname(expr: Optional[ast.AST],
- ctx: model.Documentable | None = None,
- *,
- expandName:Callable[[str], str] | None = None) -> Optional[str]:
+
+def node2fullname(
+ expr: Optional[ast.AST], ctx: model.Documentable | None = None, *, expandName: Callable[[str], str] | None = None
+) -> Optional[str]:
if expandName is None:
if ctx is None:
raise TypeError('this function takes exactly two arguments')
@@ -119,6 +140,7 @@ def node2fullname(expr: Optional[ast.AST],
return None
return expandName('.'.join(dottedname))
+
def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
"""
Binds the arguments of a function call to that function's signature.
@@ -130,49 +152,56 @@ def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
# When keywords are passed using '**kwargs', the 'arg' field will
# be None. We don't currently support keywords passed that way.
if kw.arg is not None
- }
+ }
return sig.bind(*call.args, **kwargs)
-def get_str_value(expr:ast.expr) -> Optional[str]:
+def get_str_value(expr: ast.expr) -> Optional[str]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
return expr.value
return None
-def get_num_value(expr:ast.expr) -> Optional[Number]:
+
+
+def get_num_value(expr: ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
return expr.value
return None
+
+
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Constant) and expr.value == s
+
def get_int_value(expr: ast.expr) -> Optional[int]:
num = get_num_value(expr)
if isinstance(num, int):
- return num # type:ignore[unreachable]
+ return num # type:ignore[unreachable]
return None
+
def is__name__equals__main__(cmp: ast.Compare) -> bool:
"""
Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
"""
- return isinstance(cmp.left, ast.Name) \
- and cmp.left.id == '__name__' \
- and len(cmp.ops) == 1 \
- and isinstance(cmp.ops[0], ast.Eq) \
- and len(cmp.comparators) == 1 \
- and _is_str_constant(cmp.comparators[0], '__main__')
+ return (
+ isinstance(cmp.left, ast.Name)
+ and cmp.left.id == '__name__'
+ and len(cmp.ops) == 1
+ and isinstance(cmp.ops[0], ast.Eq)
+ and len(cmp.comparators) == 1
+ and _is_str_constant(cmp.comparators[0], '__main__')
+ )
+
-def is_using_typing_final(expr: Optional[ast.AST],
- ctx:'model.Documentable') -> bool:
+def is_using_typing_final(expr: Optional[ast.AST], ctx: 'model.Documentable') -> bool:
return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
-def is_using_typing_classvar(expr: Optional[ast.AST],
- ctx:'model.Documentable') -> bool:
+
+def is_using_typing_classvar(expr: Optional[ast.AST], ctx: 'model.Documentable') -> bool:
return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
-def is_using_annotations(expr: Optional[ast.AST],
- annotations:Sequence[str],
- ctx:'model.Documentable') -> bool:
+
+def is_using_annotations(expr: Optional[ast.AST], annotations: Sequence[str], ctx: 'model.Documentable') -> bool:
"""
Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
"""
@@ -188,10 +217,11 @@ def is_using_annotations(expr: Optional[ast.AST],
return True
return False
+
def get_node_block(node: ast.AST) -> tuple[ast.AST, str]:
"""
- Tell in wich block the given node lives in.
-
+ Tell in wich block the given node lives in.
+
A block is defined by a tuple: (parent node, fieldname)
"""
try:
@@ -205,7 +235,8 @@ def get_node_block(node: ast.AST) -> tuple[ast.AST, str]:
raise ValueError(f"node {node} not found in {parent}")
return parent, fieldname
-def get_assign_docstring_node(assign:ast.Assign | ast.AnnAssign) -> Str | None:
+
+def get_assign_docstring_node(assign: ast.Assign | ast.AnnAssign) -> Str | None:
"""
Get the docstring for a L{ast.Assign} or L{ast.AnnAssign} node.
@@ -215,25 +246,26 @@ def get_assign_docstring_node(assign:ast.Assign | ast.AnnAssign) -> Str | None:
# if this call raises an ValueError it means that we're doing something nasty with the ast...
parent_node, fieldname = get_node_block(assign)
statements = getattr(parent_node, fieldname, None)
-
+
if isinstance(statements, Sequence):
- # it must be a sequence if it's not None since an assignment
+ # it must be a sequence if it's not None since an assignment
# can only be a part of a compound statement.
assign_index = statements.index(assign)
try:
- right_sibling = statements[assign_index+1]
+ right_sibling = statements[assign_index + 1]
except IndexError:
return None
- if isinstance(right_sibling, ast.Expr) and \
- get_str_value(right_sibling.value) is not None:
+ if isinstance(right_sibling, ast.Expr) and get_str_value(right_sibling.value) is not None:
return cast(Str, right_sibling.value)
return None
+
def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, ast.Constant) and node.value is None
-
-def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
+
+
+def unstring_annotation(node: ast.expr, ctx: 'model.Documentable', section: str = 'annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned.
@@ -249,6 +281,7 @@ def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='a
assert isinstance(expr, ast.expr), expr
return expr
+
class _AnnotationStringParser(ast.NodeTransformer):
"""Implementation of L{unstring_annotation()}.
@@ -262,7 +295,7 @@ def _parse_string(self, value: str) -> ast.expr:
statements = ast.parse(value).body
if len(statements) != 1:
raise SyntaxError("expected expression, found multiple statements")
- stmt, = statements
+ (stmt,) = statements
if isinstance(stmt, ast.Expr):
# Expression wrapped in an Expr statement.
expr = self.visit(stmt.value)
@@ -286,7 +319,7 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.Subscript:
def visit_fast(self, node: ast.expr) -> ast.expr:
return node
-
+
visit_Attribute = visit_Name = visit_fast
def visit_Constant(self, node: ast.Constant) -> ast.expr:
@@ -298,29 +331,33 @@ def visit_Constant(self, node: ast.Constant) -> ast.expr:
assert isinstance(const, ast.Constant), const
return const
-def upgrade_annotation(node: ast.expr, ctx: model.Documentable, section:str='annotation') -> ast.expr:
+
+def upgrade_annotation(node: ast.expr, ctx: model.Documentable, section: str = 'annotation') -> ast.expr:
"""
- Transform the annotation to use python 3.10+ syntax.
+ Transform the annotation to use python 3.10+ syntax.
"""
return _UpgradeDeprecatedAnnotations(ctx).visit(node)
+
class _UpgradeDeprecatedAnnotations(ast.NodeTransformer):
if TYPE_CHECKING:
- def visit(self, node:ast.AST) -> ast.expr:...
+
+ def visit(self, node: ast.AST) -> ast.expr: ...
def __init__(self, ctx: model.Documentable) -> None:
- def _node2fullname(node:ast.expr) -> str | None:
+ def _node2fullname(node: ast.expr) -> str | None:
return node2fullname(node, expandName=ctx.expandAnnotationName)
+
self.node2fullname = _node2fullname
- def _union_args_to_bitor(self, args: list[ast.expr], ctxnode:ast.AST) -> ast.BinOp:
+ def _union_args_to_bitor(self, args: list[ast.expr], ctxnode: ast.AST) -> ast.BinOp:
assert len(args) > 1
*others, right = args
if len(others) == 1:
rnode = ast.BinOp(left=others[0], right=right, op=ast.BitOr())
else:
rnode = ast.BinOp(left=self._union_args_to_bitor(others, ctxnode), right=right, op=ast.BitOr())
-
+
return ast.fix_missing_locations(ast.copy_location(rnode, ctxnode))
def visit_Name(self, node: ast.Name | ast.Attribute) -> Any:
@@ -328,7 +365,7 @@ def visit_Name(self, node: ast.Name | ast.Attribute) -> Any:
if fullName in DEPRECATED_TYPING_ALIAS_BUILTINS:
return ast.Name(id=DEPRECATED_TYPING_ALIAS_BUILTINS[fullName], ctx=ast.Load())
# TODO: Support all deprecated aliases including the ones in the collections.abc module.
- # In order to support that we need to generate the parsed docstring directly and include
+ # In order to support that we need to generate the parsed docstring directly and include
# custom refmap or transform the ast such that missing imports are added.
return node
@@ -338,9 +375,9 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
node.value = self.visit(node.value)
node.slice = self.visit(node.slice)
fullName = self.node2fullname(node.value)
-
+
if fullName == 'typing.Union':
- # typing.Union can be used with a single type or a
+ # typing.Union can be used with a single type or a
# tuple of types, includea single element tuple, which is the same
# as the directly using the type: Union[x] == Union[(x,)] == x
slice_ = node.slice
@@ -352,7 +389,7 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
return args[0]
elif isinstance(slice_, (ast.Attribute, ast.Name, ast.Subscript, ast.BinOp)):
return slice_
-
+
elif fullName == 'typing.Optional':
# typing.Optional requires a single type, so we don't process when slice is a tuple.
slice_ = node.slice
@@ -360,15 +397,16 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
return self._union_args_to_bitor([slice_, ast.Constant(value=None)], node)
return node
-
+
+
DEPRECATED_TYPING_ALIAS_BUILTINS = {
- "typing.Text": 'str',
- "typing.Dict": 'dict',
- "typing.Tuple": 'tuple',
- "typing.Type": 'type',
- "typing.List": 'list',
- "typing.Set": 'set',
- "typing.FrozenSet": 'frozenset',
+ "typing.Text": 'str',
+ "typing.Dict": 'dict',
+ "typing.Tuple": 'tuple',
+ "typing.Type": 'type',
+ "typing.List": 'list',
+ "typing.Set": 'set',
+ "typing.FrozenSet": 'frozenset',
}
# These do not belong in the deprecated builtins aliases, so we make sure it doesn't happen.
@@ -376,100 +414,103 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
assert 'typing.Optional' not in DEPRECATED_TYPING_ALIAS_BUILTINS
TYPING_ALIAS = (
- "typing.Hashable",
- "typing.Awaitable",
- "typing.Coroutine",
- "typing.AsyncIterable",
- "typing.AsyncIterator",
- "typing.Iterable",
- "typing.Iterator",
- "typing.Reversible",
- "typing.Sized",
- "typing.Container",
- "typing.Collection",
- "typing.Callable",
- "typing.AbstractSet",
- "typing.MutableSet",
- "typing.Mapping",
- "typing.MutableMapping",
- "typing.Sequence",
- "typing.MutableSequence",
- "typing.ByteString",
- "typing.Deque",
- "typing.MappingView",
- "typing.KeysView",
- "typing.ItemsView",
- "typing.ValuesView",
- "typing.ContextManager",
- "typing.AsyncContextManager",
- "typing.DefaultDict",
- "typing.OrderedDict",
- "typing.Counter",
- "typing.ChainMap",
- "typing.Generator",
- "typing.AsyncGenerator",
- "typing.Pattern",
- "typing.Match",
- # Special forms
- "typing.Union",
- "typing.Literal",
- "typing.Optional",
- *DEPRECATED_TYPING_ALIAS_BUILTINS,
- )
+ "typing.Hashable",
+ "typing.Awaitable",
+ "typing.Coroutine",
+ "typing.AsyncIterable",
+ "typing.AsyncIterator",
+ "typing.Iterable",
+ "typing.Iterator",
+ "typing.Reversible",
+ "typing.Sized",
+ "typing.Container",
+ "typing.Collection",
+ "typing.Callable",
+ "typing.AbstractSet",
+ "typing.MutableSet",
+ "typing.Mapping",
+ "typing.MutableMapping",
+ "typing.Sequence",
+ "typing.MutableSequence",
+ "typing.ByteString",
+ "typing.Deque",
+ "typing.MappingView",
+ "typing.KeysView",
+ "typing.ItemsView",
+ "typing.ValuesView",
+ "typing.ContextManager",
+ "typing.AsyncContextManager",
+ "typing.DefaultDict",
+ "typing.OrderedDict",
+ "typing.Counter",
+ "typing.ChainMap",
+ "typing.Generator",
+ "typing.AsyncGenerator",
+ "typing.Pattern",
+ "typing.Match",
+ # Special forms
+ "typing.Union",
+ "typing.Literal",
+ "typing.Optional",
+ *DEPRECATED_TYPING_ALIAS_BUILTINS,
+)
SUBSCRIPTABLE_CLASSES_PEP585 = (
- "tuple",
- "list",
- "dict",
- "set",
- "frozenset",
- "type",
- "builtins.tuple",
- "builtins.list",
- "builtins.dict",
- "builtins.set",
- "builtins.frozenset",
- "builtins.type",
- "collections.deque",
- "collections.defaultdict",
- "collections.OrderedDict",
- "collections.Counter",
- "collections.ChainMap",
- "collections.abc.Awaitable",
- "collections.abc.Coroutine",
- "collections.abc.AsyncIterable",
- "collections.abc.AsyncIterator",
- "collections.abc.AsyncGenerator",
- "collections.abc.Iterable",
- "collections.abc.Iterator",
- "collections.abc.Generator",
- "collections.abc.Reversible",
- "collections.abc.Container",
- "collections.abc.Collection",
- "collections.abc.Callable",
- "collections.abc.Set",
- "collections.abc.MutableSet",
- "collections.abc.Mapping",
- "collections.abc.MutableMapping",
- "collections.abc.Sequence",
- "collections.abc.MutableSequence",
- "collections.abc.ByteString",
- "collections.abc.MappingView",
- "collections.abc.KeysView",
- "collections.abc.ItemsView",
- "collections.abc.ValuesView",
- "contextlib.AbstractContextManager",
- "contextlib.AbstractAsyncContextManager",
- "re.Pattern",
- "re.Match",
- )
+ "tuple",
+ "list",
+ "dict",
+ "set",
+ "frozenset",
+ "type",
+ "builtins.tuple",
+ "builtins.list",
+ "builtins.dict",
+ "builtins.set",
+ "builtins.frozenset",
+ "builtins.type",
+ "collections.deque",
+ "collections.defaultdict",
+ "collections.OrderedDict",
+ "collections.Counter",
+ "collections.ChainMap",
+ "collections.abc.Awaitable",
+ "collections.abc.Coroutine",
+ "collections.abc.AsyncIterable",
+ "collections.abc.AsyncIterator",
+ "collections.abc.AsyncGenerator",
+ "collections.abc.Iterable",
+ "collections.abc.Iterator",
+ "collections.abc.Generator",
+ "collections.abc.Reversible",
+ "collections.abc.Container",
+ "collections.abc.Collection",
+ "collections.abc.Callable",
+ "collections.abc.Set",
+ "collections.abc.MutableSet",
+ "collections.abc.Mapping",
+ "collections.abc.MutableMapping",
+ "collections.abc.Sequence",
+ "collections.abc.MutableSequence",
+ "collections.abc.ByteString",
+ "collections.abc.MappingView",
+ "collections.abc.KeysView",
+ "collections.abc.ItemsView",
+ "collections.abc.ValuesView",
+ "contextlib.AbstractContextManager",
+ "contextlib.AbstractAsyncContextManager",
+ "re.Pattern",
+ "re.Match",
+)
+
def is_typing_annotation(node: ast.AST, ctx: 'model.Documentable') -> bool:
"""
Whether this annotation node refers to a typing alias.
"""
- return is_using_annotations(node, TYPING_ALIAS, ctx) or \
- is_using_annotations(node, SUBSCRIPTABLE_CLASSES_PEP585, ctx)
+ return is_using_annotations(node, TYPING_ALIAS, ctx) or is_using_annotations(
+ node, SUBSCRIPTABLE_CLASSES_PEP585, ctx
+ )
+
def get_docstring_node(node: ast.AST) -> Str | None:
"""
@@ -484,15 +525,17 @@ def get_docstring_node(node: ast.AST) -> Str | None:
return node.value
return None
+
class _StrMeta(type):
def __instancecheck__(self, instance: object) -> bool:
if isinstance(instance, ast.expr):
return get_str_value(instance) is not None
return False
+
class Str(ast.expr, metaclass=_StrMeta):
"""
- Wraps ast.Constant/ast.Str for `isinstance` checks and annotations.
+ Wraps ast.Constant/ast.Str for `isinstance` checks and annotations.
Ensures that the value is actually a string.
Do not try to instanciate this class.
"""
@@ -502,6 +545,7 @@ class Str(ast.expr, metaclass=_StrMeta):
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise TypeError(f'{Str.__qualname__} cannot be instanciated')
+
def extract_docstring_linenum(node: Str) -> int:
r"""
In older CPython versions, the AST only tells us the end line
@@ -522,14 +566,15 @@ def extract_docstring_linenum(node: Str) -> int:
lineno += 1
elif not ch.isspace():
break
-
+
return lineno
+
def extract_docstring(node: Str) -> Tuple[int, str]:
"""
Extract docstring information from an ast node that represents the docstring.
- @returns:
+ @returns:
- The line number of the first non-blank line of the docsring. See L{extract_docstring_linenum}.
- The docstring to be parsed, cleaned by L{inspect.cleandoc}.
"""
@@ -554,6 +599,7 @@ def infer_type(expr: ast.expr) -> Optional[ast.expr]:
else:
return ast.fix_missing_locations(ast.copy_location(ann, expr))
+
def _annotation_for_value(value: object) -> Optional[ast.expr]:
if value is None:
return None
@@ -569,11 +615,10 @@ def _annotation_for_value(value: object) -> Optional[ast.expr]:
if ann_elem is not None:
if name == 'tuple':
ann_elem = ast.Tuple(elts=[ann_elem, ast.Constant(value=...)], ctx=ast.Load())
- return ast.Subscript(value=ast.Name(id=name, ctx=ast.Load()),
- slice=ann_elem,
- ctx=ast.Load())
+ return ast.Subscript(value=ast.Name(id=name, ctx=ast.Load()), slice=ann_elem, ctx=ast.Load())
return ast.Name(id=name, ctx=ast.Load())
+
def _annotation_for_elements(sequence: Iterable[object]) -> Optional[ast.expr]:
names = set()
for elem in sequence:
@@ -590,11 +635,12 @@ def _annotation_for_elements(sequence: Iterable[object]) -> Optional[ast.expr]:
# Empty sequence or no uniform type.
return None
-
+
class Parentage(ast.NodeVisitor):
"""
Add C{parent} attribute to ast nodes instances.
"""
+
def __init__(self) -> None:
self.current: ast.AST | None = None
@@ -606,21 +652,25 @@ def generic_visit(self, node: ast.AST) -> None:
self.generic_visit(child)
self.current = current
-def get_parents(node:ast.AST) -> Iterator[ast.AST]:
+
+def get_parents(node: ast.AST) -> Iterator[ast.AST]:
"""
Once nodes have the C{.parent} attribute with {Parentage}, use this function
to get a iterator on all parents of the given node up to the root module.
"""
- def _yield_parents(n:Optional[ast.AST]) -> Iterator[ast.AST]:
+
+ def _yield_parents(n: Optional[ast.AST]) -> Iterator[ast.AST]:
if n:
yield n
p = cast(ast.AST, getattr(n, 'parent', None))
yield from _yield_parents(p)
+
yield from _yield_parents(getattr(node, 'parent', None))
-#Part of the astor library for Python AST manipulation.
-#License: 3-clause BSD
-#Copyright (c) 2015 Patrick Maupin
+
+# Part of the astor library for Python AST manipulation.
+# License: 3-clause BSD
+# Copyright (c) 2015 Patrick Maupin
_op_data = """
GeneratorExp 1
@@ -691,49 +741,60 @@ def _yield_parents(n:Optional[ast.AST]) -> Iterator[ast.AST]:
Constant 1
"""
-_op_data = [x.split() for x in _op_data.splitlines()] # type:ignore
-_op_data = [[x[0], ' '.join(x[1:-1]), int(x[-1])] for x in _op_data if x] # type:ignore
+_op_data = [x.split() for x in _op_data.splitlines()] # type:ignore
+_op_data = [[x[0], ' '.join(x[1:-1]), int(x[-1])] for x in _op_data if x] # type:ignore
for _index in range(1, len(_op_data)):
- _op_data[_index][2] *= 2 # type:ignore
- _op_data[_index][2] += _op_data[_index - 1][2] # type:ignore
+ _op_data[_index][2] *= 2 # type:ignore
+ _op_data[_index][2] += _op_data[_index - 1][2] # type:ignore
_deprecated: Collection[str] = ()
if sys.version_info >= (3, 12):
_deprecated = ('Num', 'Str', 'Bytes', 'Ellipsis', 'NameConstant')
-_precedence_data = dict((getattr(ast, x, None), z) for x, y, z in _op_data if x not in _deprecated) # type:ignore
-_symbol_data = dict((getattr(ast, x, None), y) for x, y, z in _op_data if x not in _deprecated) # type:ignore
+_precedence_data = dict((getattr(ast, x, None), z) for x, y, z in _op_data if x not in _deprecated) # type:ignore
+_symbol_data = dict((getattr(ast, x, None), y) for x, y, z in _op_data if x not in _deprecated) # type:ignore
+
class op_util:
"""
This class provides data and functions for mapping
AST nodes to symbols and precedences.
"""
+
@classmethod
- def get_op_symbol(cls, obj:ast.operator|ast.boolop|ast.cmpop|ast.unaryop,
- fmt:str='%s',
- symbol_data:dict[type[ast.AST]|None, str]=_symbol_data,
- type:Callable[[object], type[Any]]=type) -> str:
- """Given an AST node object, returns a string containing the symbol.
- """
+ def get_op_symbol(
+ cls,
+ obj: ast.operator | ast.boolop | ast.cmpop | ast.unaryop,
+ fmt: str = '%s',
+ symbol_data: dict[type[ast.AST] | None, str] = _symbol_data,
+ type: Callable[[object], type[Any]] = type,
+ ) -> str:
+ """Given an AST node object, returns a string containing the symbol."""
return fmt % symbol_data[type(obj)]
+
@classmethod
- def get_op_precedence(cls, obj:ast.AST,
- precedence_data:dict[type[ast.AST]|None, int]=_precedence_data,
- type:Callable[[object], type[Any]]=type) -> int:
+ def get_op_precedence(
+ cls,
+ obj: ast.AST,
+ precedence_data: dict[type[ast.AST] | None, int] = _precedence_data,
+ type: Callable[[object], type[Any]] = type,
+ ) -> int:
"""Given an AST node object, returns the precedence.
- @raises KeyError: If the node is not explicitely supported by this function.
+ @raises KeyError: If the node is not explicitely supported by this function.
This is a very legacy piece of code, all calls to L{get_op_precedence} should be
guarded in a C{try:... except KeyError:...} statement.
"""
return precedence_data[type(obj)]
if not TYPE_CHECKING:
+
class Precedence(object):
vars().update((cast(str, x), z) for x, _, z in _op_data)
highest = max(cast(int, z) for _, _, z in _op_data) + 2
+
else:
Precedence: Any
+
del _op_data, _index, _precedence_data, _symbol_data, _deprecated
# This was part of the astor library for Python AST manipulation.
diff --git a/pydoctor/driver.py b/pydoctor/driver.py
index 221d7de52..972a94ebf 100644
--- a/pydoctor/driver.py
+++ b/pydoctor/driver.py
@@ -1,7 +1,8 @@
"""The entry point."""
+
from __future__ import annotations
-from typing import Sequence
+from typing import Sequence
import datetime
import os
import sys
@@ -17,26 +18,28 @@
# On older versions, a compatibility package must be installed from PyPI.
import importlib.resources as importlib_resources
+
def get_system(options: model.Options) -> model.System:
"""
Get a system with the defined options. Load packages and modules.
"""
- cache = prepareCache(clearCache=options.clear_intersphinx_cache,
- enableCache=options.enable_intersphinx_cache,
- cachePath=options.intersphinx_cache_path,
- maxAge=options.intersphinx_cache_max_age)
+ cache = prepareCache(
+ clearCache=options.clear_intersphinx_cache,
+ enableCache=options.enable_intersphinx_cache,
+ cachePath=options.intersphinx_cache_path,
+ maxAge=options.intersphinx_cache_max_age,
+ )
# step 1: make/find the system
system = options.systemclass(options)
system.fetchIntersphinxInventories(cache)
- cache.close() # Fixes ResourceWarning: unclosed
+ cache.close() # Fixes ResourceWarning: unclosed
# TODO: load buildtime with default factory and converter in model.Options
# Support source date epoch:
# https://reproducible-builds.org/specs/source-date-epoch/
try:
- system.buildtime = datetime.datetime.utcfromtimestamp(
- int(os.environ['SOURCE_DATE_EPOCH']))
+ system.buildtime = datetime.datetime.utcfromtimestamp(int(os.environ['SOURCE_DATE_EPOCH']))
except ValueError as e:
error(str(e))
except KeyError:
@@ -44,11 +47,10 @@ def get_system(options: model.Options) -> model.System:
# Load custom buildtime
if options.buildtime:
try:
- system.buildtime = datetime.datetime.strptime(
- options.buildtime, BUILDTIME_FORMAT)
+ system.buildtime = datetime.datetime.strptime(options.buildtime, BUILDTIME_FORMAT)
except ValueError as e:
error(str(e))
-
+
# step 1.5: create the builder
builderT = system.systemBuilder
@@ -79,37 +81,38 @@ def get_system(options: model.Options) -> model.System:
return system
+
def make(system: model.System) -> None:
"""
- Produce the html/intersphinx output, as configured in the system's options.
+ Produce the html/intersphinx output, as configured in the system's options.
"""
options = system.options
# step 4: make html, if desired
if options.makehtml:
options.makeintersphinx = True
-
- system.msg('html', 'writing html to %s using %s.%s'%(
- options.htmloutput, options.htmlwriter.__module__,
- options.htmlwriter.__name__))
+
+ system.msg(
+ 'html',
+ 'writing html to %s using %s.%s'
+ % (options.htmloutput, options.htmlwriter.__module__, options.htmlwriter.__name__),
+ )
writer: IWriter
-
+
# Always init the writer with the 'base' set of templates at least.
- template_lookup = TemplateLookup(
- importlib_resources.files('pydoctor.themes') / 'base')
-
+ template_lookup = TemplateLookup(importlib_resources.files('pydoctor.themes') / 'base')
+
# Handle theme selection, 'classic' by default.
if system.options.theme != 'base':
- template_lookup.add_templatedir(
- importlib_resources.files('pydoctor.themes') / system.options.theme)
+ template_lookup.add_templatedir(importlib_resources.files('pydoctor.themes') / system.options.theme)
# Handle custom HTML templates
if system.options.templatedir:
try:
for t in system.options.templatedir:
template_lookup.add_templatedir(Path(t))
- except TemplateError as e:
+ except TemplateError as e:
error(str(e))
build_directory = Path(options.htmloutput)
@@ -128,7 +131,7 @@ def make(system: model.System) -> None:
writer.writeIndividualFiles(subjects)
if not options.htmlsubjects:
writer.writeLinks(system)
-
+
if options.makeintersphinx:
if not options.makehtml:
subjects = system.rootobjects
@@ -137,13 +140,14 @@ def make(system: model.System) -> None:
logger=system.msg,
project_name=system.projectname,
project_version=system.options.projectversion,
- )
+ )
if not os.path.exists(options.htmloutput):
os.makedirs(options.htmloutput)
sphinx_inventory.generate(
subjects=subjects,
basepath=options.htmloutput,
- )
+ )
+
def main(args: Sequence[str] = sys.argv[1:]) -> int:
"""
@@ -163,7 +167,7 @@ def main(args: Sequence[str] = sys.argv[1:]) -> int:
# Build model
system = get_system(options)
-
+
# Produce output (HMTL, json, ect)
make(system)
@@ -174,10 +178,10 @@ def main(args: Sequence[str] = sys.argv[1:]) -> int:
def p(msg: str) -> None:
system.msg('docstring-summary', msg, thresh=-1, topthresh=1)
- p("these %s objects' docstrings contain syntax errors:"
- %(len(docstring_syntax_errors),))
+
+ p("these %s objects' docstrings contain syntax errors:" % (len(docstring_syntax_errors),))
for fn in sorted(docstring_syntax_errors):
- p(' '+fn)
+ p(' ' + fn)
# If there is any other kind of parse errors, exit with code 2 as well.
# This applies to errors generated from colorizing AST.
@@ -187,11 +191,12 @@ def p(msg: str) -> None:
if system.violations and options.warnings_as_errors:
# Update exit code if the run has produced warnings.
exitcode = 3
-
+
except:
if options.pdb:
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
raise
-
+
return exitcode
diff --git a/pydoctor/epydoc/__init__.py b/pydoctor/epydoc/__init__.py
index 7976685aa..692f61333 100644
--- a/pydoctor/epydoc/__init__.py
+++ b/pydoctor/epydoc/__init__.py
@@ -62,4 +62,3 @@
# - Add a faq?
# - @type a,b,c: ...
# - new command line option: --command-line-order
-
diff --git a/pydoctor/epydoc/doctest.py b/pydoctor/epydoc/doctest.py
index 442ef131e..196aa50cc 100644
--- a/pydoctor/epydoc/doctest.py
+++ b/pydoctor/epydoc/doctest.py
@@ -21,11 +21,39 @@
#: A list of the names of all Python keywords.
_KEYWORDS = [
- 'and', 'as', 'assert', 'async', 'await', 'break', 'class', 'continue',
- 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global',
- 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass',
- 'raise', 'return', 'try', 'while', 'with', 'yield'
- ]
+ 'and',
+ 'as',
+ 'assert',
+ 'async',
+ 'await',
+ 'break',
+ 'class',
+ 'continue',
+ 'def',
+ 'del',
+ 'elif',
+ 'else',
+ 'except',
+ 'finally',
+ 'for',
+ 'from',
+ 'global',
+ 'if',
+ 'import',
+ 'in',
+ 'is',
+ 'lambda',
+ 'nonlocal',
+ 'not',
+ 'or',
+ 'pass',
+ 'raise',
+ 'return',
+ 'try',
+ 'while',
+ 'with',
+ 'yield',
+]
# The following are technically keywords since Python 3,
# but we don't want to colorize them as such: 'None', 'True', 'False'.
@@ -40,8 +68,8 @@
#: A regexp group that matches Python strings.
_STRING_GRP = '|'.join(
- [r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))',
- r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"])
+ [r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))', r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"]
+)
#: A regexp group that matches Python comments.
_COMMENT_GRP = '(#.*?$)'
@@ -59,16 +87,13 @@
DEFINE_FUNC_RE = re.compile(r'(?P\w+)(?P\s+)(?P\w+)')
#: A regexp that matches Python prompts
-PROMPT_RE = re.compile(f'({_PROMPT1_GRP}|{_PROMPT2_GRP})',
- re.MULTILINE | re.DOTALL)
+PROMPT_RE = re.compile(f'({_PROMPT1_GRP}|{_PROMPT2_GRP})', re.MULTILINE | re.DOTALL)
#: A regexp that matches Python "..." prompts.
-PROMPT2_RE = re.compile(f'({_PROMPT2_GRP})',
- re.MULTILINE | re.DOTALL)
+PROMPT2_RE = re.compile(f'({_PROMPT2_GRP})', re.MULTILINE | re.DOTALL)
#: A regexp that matches doctest exception blocks.
-EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*',
- re.DOTALL | re.MULTILINE)
+EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*', re.DOTALL | re.MULTILINE)
#: A regexp that matches doctest directives.
DOCTEST_DIRECTIVE_RE = re.compile(r'#[ \t]*doctest:.*')
@@ -77,17 +102,19 @@
#: that should be colored.
DOCTEST_RE = re.compile(
'('
- rf'(?P{_STRING_GRP})|(?P{_COMMENT_GRP})|'
- rf'(?P{_DEFINE_GRP})|'
- rf'(?P{_KEYWORD_GRP})|(?P{_BUILTIN_GRP})|'
- rf'(?P{_PROMPT1_GRP})|(?P{_PROMPT2_GRP})|(?P\Z)'
+ rf'(?P{_STRING_GRP})|(?P{_COMMENT_GRP})|'
+ rf'(?P{_DEFINE_GRP})|'
+ rf'(?P{_KEYWORD_GRP})|(?P{_BUILTIN_GRP})|'
+ rf'(?P{_PROMPT1_GRP})|(?P{_PROMPT2_GRP})|(?P\Z)'
')',
- re.MULTILINE | re.DOTALL)
+ re.MULTILINE | re.DOTALL,
+)
#: This regular expression is used to find doctest examples in a
#: string. This is copied from the standard Python doctest.py
#: module (after the refactoring in Python 2.4+).
-DOCTEST_EXAMPLE_RE = re.compile(r'''
+DOCTEST_EXAMPLE_RE = re.compile(
+ r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P
(?:^(?P [ ]*) >>> .*) # PS1 line
@@ -98,7 +125,10 @@
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
- ''', re.MULTILINE | re.VERBOSE)
+ ''',
+ re.MULTILINE | re.VERBOSE,
+)
+
def colorize_codeblock(s: str) -> Tag:
"""
@@ -121,6 +151,7 @@ def colorize_codeblock(s: str) -> Tag:
return tags.pre('\n', *colorize_codeblock_body(s), class_='py-doctest')
+
def colorize_doctest(s: str) -> Tag:
"""
Perform syntax highlighting on the given doctest string, and
@@ -136,13 +167,14 @@ def colorize_doctest(s: str) -> Tag:
return tags.pre('\n', *colorize_doctest_body(s), class_='py-doctest')
+
def colorize_doctest_body(s: str) -> Iterator[Union[str, Tag]]:
idx = 0
for match in DOCTEST_EXAMPLE_RE.finditer(s):
# Parse the doctest example:
pysrc, want = match.group('source', 'want')
# Pre-example text:
- yield s[idx:match.start()]
+ yield s[idx : match.start()]
# Example source code:
yield from colorize_codeblock_body(pysrc)
# Example output:
@@ -155,6 +187,7 @@ def colorize_doctest_body(s: str) -> Iterator[Union[str, Tag]]:
# Add any remaining post-example text.
yield s[idx:]
+
def colorize_codeblock_body(s: str) -> Iterator[Union[Tag, str]]:
idx = 0
for match in DOCTEST_RE.finditer(s):
@@ -166,6 +199,7 @@ def colorize_codeblock_body(s: str) -> Iterator[Union[Tag, str]]:
# DOCTEST_RE matches end-of-string.
assert idx == len(s)
+
def subfunc(match: Match[str]) -> Iterator[Union[Tag, str]]:
text = match.group(1)
if match.group('PROMPT1'):
diff --git a/pydoctor/epydoc/docutils.py b/pydoctor/epydoc/docutils.py
index 66442b2de..91a0a149b 100644
--- a/pydoctor/epydoc/docutils.py
+++ b/pydoctor/epydoc/docutils.py
@@ -1,6 +1,7 @@
"""
Collection of helper functions and classes related to the creation and processing of L{docutils} nodes.
"""
+
from __future__ import annotations
from typing import Iterable, Iterator, Optional, TypeVar, cast
@@ -14,6 +15,7 @@
_DEFAULT_DOCUTILS_SETTINGS: Optional[optparse.Values] = None
+
def new_document(source_path: str, settings: Optional[optparse.Values] = None) -> nodes.document:
"""
Create a new L{nodes.document} using the provided settings or cached default settings.
@@ -23,7 +25,7 @@ def new_document(source_path: str, settings: Optional[optparse.Values] = None) -
global _DEFAULT_DOCUTILS_SETTINGS
# If we have docutils >= 0.19 we use get_default_settings to calculate and cache
# the default settings. Otherwise we let new_document figure it out.
- if settings is None and docutils_version_info >= (0,19):
+ if settings is None and docutils_version_info >= (0, 19):
if _DEFAULT_DOCUTILS_SETTINGS is None:
_DEFAULT_DOCUTILS_SETTINGS = frontend.get_default_settings()
@@ -31,21 +33,27 @@ def new_document(source_path: str, settings: Optional[optparse.Values] = None) -
return utils.new_document(source_path, settings)
+
def _set_nodes_parent(nodes: Iterable[nodes.Node], parent: nodes.Element) -> Iterator[nodes.Node]:
"""
- Set the L{nodes.Node.parent} attribute of the C{nodes} to the defined C{parent}.
-
+ Set the L{nodes.Node.parent} attribute of the C{nodes} to the defined C{parent}.
+
@returns: An iterator containing the modified nodes.
"""
for node in nodes:
node.parent = parent
yield node
+
TNode = TypeVar('TNode', bound=nodes.Node)
-def set_node_attributes(node: TNode,
- document: Optional[nodes.document] = None,
- lineno: Optional[int] = None,
- children: Optional[Iterable[nodes.Node]] = None) -> TNode:
+
+
+def set_node_attributes(
+ node: TNode,
+ document: Optional[nodes.document] = None,
+ lineno: Optional[int] = None,
+ children: Optional[Iterable[nodes.Node]] = None,
+) -> TNode:
"""
Set the attributes of a Node and return the modified node.
This is required to manually construct a docutils document that is consistent.
@@ -53,49 +61,50 @@ def set_node_attributes(node: TNode,
@param node: A node to edit.
@param document: The L{nodes.Node.document} attribute.
@param lineno: The L{nodes.Node.line} attribute.
- @param children: The L{nodes.Element.children} attribute. Special care is taken
- to appropriately set the L{nodes.Node.parent} attribute on the child nodes.
+ @param children: The L{nodes.Element.children} attribute. Special care is taken
+ to appropriately set the L{nodes.Node.parent} attribute on the child nodes.
"""
if lineno is not None:
node.line = lineno
-
+
if document:
node.document = document
if children:
- assert isinstance(node, nodes.Element), (f'Cannot set the children on Text node: "{node.astext()}". '
- f'Children: {children}')
+ assert isinstance(node, nodes.Element), (
+ f'Cannot set the children on Text node: "{node.astext()}". ' f'Children: {children}'
+ )
node.extend(_set_nodes_parent(children, node))
return node
+
def build_table_of_content(node: nodes.Element, depth: int, level: int = 0) -> nodes.Element | None:
"""
- Simplified from docutils Contents transform.
+ Simplified from docutils Contents transform.
All section nodes MUST have set attribute 'ids' to a list of strings.
"""
def _copy_and_filter(node: nodes.Element) -> nodes.Element:
"""Return a copy of a title, with references, images, etc. removed."""
- if (doc:=node.document) is None:
+ if (doc := node.document) is None:
raise AssertionError(f'missing document attribute on {node}')
visitor = parts.ContentsFilter(doc)
node.walkabout(visitor)
# the stubs are currently imcomplete, 2024.
- return visitor.get_entry_text() # type:ignore
+ return visitor.get_entry_text() # type:ignore
level += 1
sections = [sect for sect in node if isinstance(sect, nodes.section)]
entries = []
- if (doc:=node.document) is None:
+ if (doc := node.document) is None:
raise AssertionError(f'missing document attribute on {node}')
-
+
for section in sections:
- title = cast(nodes.Element, section[0]) # the first element of a section is the header.
+ title = cast(nodes.Element, section[0]) # the first element of a section is the header.
entrytext = _copy_and_filter(title)
- reference = nodes.reference('', '', refid=section['ids'][0],
- *entrytext)
+ reference = nodes.reference('', '', refid=section['ids'][0], *entrytext)
ref_id = doc.set_id(reference, suggested_prefix='toc-entry')
entry = nodes.paragraph('', '', reference)
item = nodes.list_item('', entry)
@@ -111,6 +120,7 @@ def _copy_and_filter(node: nodes.Element) -> nodes.Element:
else:
return None
+
def get_lineno(node: nodes.Element) -> int:
"""
Get the 0-based line number for a docutils `nodes.title_reference`.
@@ -119,23 +129,22 @@ def get_lineno(node: nodes.Element) -> int:
counts the number of newlines until the reference element is found.
"""
# Fixes https://github.com/twisted/pydoctor/issues/237
-
+
def get_first_parent_lineno(_node: nodes.Element | None) -> int:
if _node is None:
return 0
-
+
if _node.line:
# This line points to the start of the containing node
# Here we are removing 1 to the result because ParseError class is zero-based
# while docutils line attribute is 1-based.
- line:int = _node.line-1
- # Let's figure out how many newlines we need to add to this number
+ line: int = _node.line - 1
+ # Let's figure out how many newlines we need to add to this number
# to get the right line number.
parent_rawsource: Optional[str] = _node.rawsource or None
node_rawsource: Optional[str] = node.rawsource or None
- if parent_rawsource is not None and \
- node_rawsource is not None:
+ if parent_rawsource is not None and node_rawsource is not None:
if node_rawsource in parent_rawsource:
node_index = parent_rawsource.index(node_rawsource)
# Add the required number of newlines to the result
@@ -148,16 +157,19 @@ def get_first_parent_lineno(_node: nodes.Element | None) -> int:
line = node.line
else:
line = get_first_parent_lineno(node.parent)
-
+
return line
+
class wbr(nodes.inline):
"""
Word break opportunity.
"""
+
def __init__(self) -> None:
super().__init__('', '')
+
class obj_reference(nodes.title_reference):
"""
A reference to a documentable object.
diff --git a/pydoctor/epydoc/markup/__init__.py b/pydoctor/epydoc/markup/__init__.py
index 613443aed..b1654edb9 100644
--- a/pydoctor/epydoc/markup/__init__.py
+++ b/pydoctor/epydoc/markup/__init__.py
@@ -31,6 +31,7 @@
each error.
"""
from __future__ import annotations
+
__docformat__ = 'epytext en'
from typing import Callable, ContextManager, List, Optional, Sequence, Iterator, TYPE_CHECKING
@@ -74,6 +75,7 @@
ParserFunction = Callable[[str, List['ParseError']], 'ParsedDocstring']
+
def get_supported_docformats() -> Iterator[str]:
"""
Get the list of currently supported docformat.
@@ -85,43 +87,47 @@ def get_supported_docformats() -> Iterator[str]:
else:
yield moduleName
+
def get_parser_by_name(docformat: str, objclass: ObjClass | None = None) -> ParserFunction:
"""
- Get the C{parse_docstring(str, List[ParseError], bool) -> ParsedDocstring} function based on a parser name.
+ Get the C{parse_docstring(str, List[ParseError], bool) -> ParsedDocstring} function based on a parser name.
@raises ImportError: If the parser could not be imported, probably meaning that your are missing a dependency
or it could be that the docformat name do not match any know L{pydoctor.epydoc.markup} submodules.
"""
mod = import_module(f'pydoctor.epydoc.markup.{docformat}')
- # We can be sure the 'get_parser' function exist and is "correct"
+ # We can be sure the 'get_parser' function exist and is "correct"
# since the docformat is validated beforehand.
get_parser: Callable[[ObjClass | None], ParserFunction] = mod.get_parser
return get_parser(objclass)
-def processtypes(parse:ParserFunction) -> ParserFunction:
+
+def processtypes(parse: ParserFunction) -> ParserFunction:
"""
Wraps a docstring parser function to provide option --process-types.
"""
-
+
def _processtypes(doc: 'ParsedDocstring', errs: List['ParseError']) -> None:
"""
- Mutates the type fields of the given parsed docstring to replace
+ Mutates the type fields of the given parsed docstring to replace
their body by parsed version with type auto-linking.
"""
from pydoctor.epydoc.markup._types import ParsedTypeDocstring
+
for field in doc.fields:
if field.tag() in ParsedTypeDocstring.FIELDS:
body = ParsedTypeDocstring(field.body().to_node(), lineno=field.lineno)
- append_warnings(body.warnings, errs, lineno=field.lineno+1)
+ append_warnings(body.warnings, errs, lineno=field.lineno + 1)
field.replace_body(body)
-
- def parse_and_processtypes(doc:str, errs:List['ParseError']) -> 'ParsedDocstring':
+
+ def parse_and_processtypes(doc: str, errs: List['ParseError']) -> 'ParsedDocstring':
parsed_doc = parse(doc, errs)
_processtypes(parsed_doc, errs)
return parsed_doc
return parse_and_processtypes
+
##################################################
## ParsedDocstring
##################################################
@@ -133,10 +139,10 @@ class ParsedDocstring(abc.ABC):
or L{pydoctor.epydoc.markup.restructuredtext.parse_docstring()}.
Subclasses must implement L{has_body()} and L{to_node()}.
-
+
A default implementation for L{to_stan()} method, relying on L{to_node()} is provided.
But some subclasses override this behaviour.
-
+
Implementation of L{get_toc()} also relies on L{to_node()}.
"""
@@ -158,7 +164,7 @@ def has_body(self) -> bool:
The body is the part of the docstring that remains after the fields
have been split off.
"""
-
+
def get_toc(self, depth: int) -> Optional['ParsedDocstring']:
"""
The table of contents of the docstring if titles are defined or C{None}.
@@ -172,6 +178,7 @@ def get_toc(self, depth: int) -> Optional['ParsedDocstring']:
if contents:
docstring_toc.extend(contents)
from pydoctor.epydoc.markup.restructuredtext import ParsedRstDocstring
+
return ParsedRstDocstring(docstring_toc, ())
else:
return None
@@ -180,7 +187,7 @@ def to_stan(self, docstring_linker: 'DocstringLinker') -> Tag:
"""
Translate this docstring to a Stan tree.
- @note: The default implementation relies on functionalities
+ @note: The default implementation relies on functionalities
provided by L{node2stan.node2stan} and L{ParsedDocstring.to_node()}.
@param docstring_linker: An HTML translator for crossreference
@@ -193,7 +200,7 @@ def to_stan(self, docstring_linker: 'DocstringLinker') -> Tag:
return self._stan
self._stan = Tag('', children=node2stan.node2stan(self.to_node(), docstring_linker).children)
return self._stan
-
+
@abc.abstractmethod
def to_node(self) -> nodes.document:
"""
@@ -205,28 +212,31 @@ def to_node(self) -> nodes.document:
This method might raise L{NotImplementedError} in such cases. (i.e. L{pydoctor.epydoc.markup._types.ParsedTypeDocstring})
"""
raise NotImplementedError()
-
+
def get_summary(self) -> 'ParsedDocstring':
"""
Returns the summary of this docstring.
-
+
@note: The summary is cached.
"""
# Avoid rare cyclic import error, see https://github.com/twisted/pydoctor/pull/538#discussion_r845668735
from pydoctor import epydoc2stan
+
if self._summary is not None:
return self._summary
- try:
+ try:
_document = self.to_node()
visitor = SummaryExtractor(_document)
_document.walk(visitor)
- except Exception:
+ except Exception:
self._summary = epydoc2stan.ParsedStanOnly(tags.span(class_='undocumented')("Broken summary"))
else:
- self._summary = visitor.summary or epydoc2stan.ParsedStanOnly(tags.span(class_='undocumented')("No summary"))
+ self._summary = visitor.summary or epydoc2stan.ParsedStanOnly(
+ tags.span(class_='undocumented')("No summary")
+ )
return self._summary
-
+
##################################################
## Fields
##################################################
@@ -269,8 +279,8 @@ def body(self) -> ParsedDocstring:
@return: This field's body.
"""
return self._body
-
- def replace_body(self, newbody:ParsedDocstring) -> None:
+
+ def replace_body(self, newbody: ParsedDocstring) -> None:
self._body = newbody
def __repr__(self) -> str:
@@ -279,6 +289,7 @@ def __repr__(self) -> str:
else:
return f''
+
##################################################
## Docstring Linker (resolves crossreferences)
##################################################
@@ -315,24 +326,26 @@ def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
In either case, the returned top-level tag will be C{}.
"""
- def switch_context(self, ob:Optional['Documentable']) -> ContextManager[None]:
+ def switch_context(self, ob: Optional['Documentable']) -> ContextManager[None]:
"""
Switch the context of the linker, keeping the same underlying lookup rules.
Useful to resolve links with the right L{Documentable} context but
- create correct - absolute or relative - links to be clicked on from another page
+ create correct - absolute or relative - links to be clicked on from another page
rather than the initial page of the context. "Cannot find link target" errors will be reported
relatively to the new context object.
- Pass C{None} to always generate full URLs (for summaries for example),
+ Pass C{None} to always generate full URLs (for summaries for example),
in this case error will NOT be reported at all.
"""
+
##################################################
## ParseError exceptions
##################################################
-def append_warnings(warns:List[str], errs:List['ParseError'], lineno:int) -> None:
+
+def append_warnings(warns: List[str], errs: List['ParseError'], lineno: int) -> None:
"""
Utility method to create non fatal L{ParseError}s and append them to the provided list.
@@ -342,16 +355,13 @@ def append_warnings(warns:List[str], errs:List['ParseError'], lineno:int) -> Non
for warn in warns:
errs.append(ParseError(warn, linenum=lineno, is_fatal=False))
+
class ParseError(Exception):
"""
The base class for errors generated while parsing docstrings.
"""
- def __init__(self,
- descr: str,
- linenum: Optional[int] = None,
- is_fatal: bool = True
- ):
+ def __init__(self, descr: str, linenum: Optional[int] = None, is_fatal: bool = True):
"""
@param descr: A description of the error.
@param linenum: The line on which the error occured within
@@ -376,8 +386,10 @@ def linenum(self) -> Optional[int]:
any offset). If the line number is unknown, then return
C{None}.
"""
- if self._linenum is None: return None
- else: return self._linenum + 1
+ if self._linenum is None:
+ return None
+ else:
+ return self._linenum + 1
def descr(self) -> str:
"""
@@ -411,15 +423,17 @@ def __repr__(self) -> str:
else:
return f''
+
class SummaryExtractor(nodes.NodeVisitor):
"""
A docutils node visitor that extracts first sentences from
the first paragraph in a document.
"""
- def __init__(self, document: nodes.document, maxchars:int=200) -> None:
+
+ def __init__(self, document: nodes.document, maxchars: int = 200) -> None:
"""
@param document: The docutils document to extract a summary from.
- @param maxchars: Maximum of characters the summary can span.
+ @param maxchars: Maximum of characters the summary can span.
Sentences are not cut in the middle, so the actual length
might be longer if your have a large first paragraph.
"""
@@ -442,7 +456,7 @@ def visit_paragraph(self, node: nodes.paragraph) -> None:
summary_doc = new_document('summary')
summary_pieces: list[nodes.Node] = []
- # Extract the first sentences from the first paragraph until maximum number
+ # Extract the first sentences from the first paragraph until maximum number
# of characters is reach or until the end of the paragraph.
char_count = 0
@@ -450,16 +464,16 @@ def visit_paragraph(self, node: nodes.paragraph) -> None:
if char_count > self.maxchars:
break
-
+
if isinstance(child, nodes.Text):
text = child.astext().replace('\n', ' ')
- sentences = [item for item in self._SENTENCE_RE_SPLIT.split(text) if item] # Not empty values only
-
- for i,s in enumerate(sentences):
-
+ sentences = [item for item in self._SENTENCE_RE_SPLIT.split(text) if item] # Not empty values only
+
+ for i, s in enumerate(sentences):
+
if char_count > self.maxchars:
# Leave final point alone.
- if not (i == len(sentences)-1 and len(s)==1):
+ if not (i == len(sentences) - 1 and len(s) == 1):
break
summary_pieces.append(set_node_attributes(nodes.Text(s), document=summary_doc))
@@ -468,17 +482,21 @@ def visit_paragraph(self, node: nodes.paragraph) -> None:
else:
summary_pieces.append(set_node_attributes(child.deepcopy(), document=summary_doc))
char_count += len(''.join(node2stan.gettext(child)))
-
+
if char_count > self.maxchars:
if not summary_pieces[-1].astext().endswith('.'):
summary_pieces.append(set_node_attributes(nodes.Text('...'), document=summary_doc))
self.other_docs = True
- set_node_attributes(summary_doc, children=[
- set_node_attributes(nodes.paragraph('', ''), document=summary_doc, lineno=1,
- children=summary_pieces)])
+ set_node_attributes(
+ summary_doc,
+ children=[
+ set_node_attributes(nodes.paragraph('', ''), document=summary_doc, lineno=1, children=summary_pieces)
+ ],
+ )
from pydoctor.epydoc.markup.restructuredtext import ParsedRstDocstring
+
self.summary = ParsedRstDocstring(summary_doc, fields=[])
def visit_field(self, node: nodes.Node) -> None:
diff --git a/pydoctor/epydoc/markup/_napoleon.py b/pydoctor/epydoc/markup/_napoleon.py
index 78d7bc643..cd995ff19 100644
--- a/pydoctor/epydoc/markup/_napoleon.py
+++ b/pydoctor/epydoc/markup/_napoleon.py
@@ -2,6 +2,7 @@
This module contains a class to wrap shared behaviour between
L{pydoctor.epydoc.markup.numpy} and L{pydoctor.epydoc.markup.google}.
"""
+
from __future__ import annotations
from pydoctor.epydoc.markup import ObjClass, ParsedDocstring, ParseError, processtypes
@@ -27,9 +28,7 @@ def __init__(self, objclass: ObjClass | None = None):
"""
self.objclass = objclass
- def parse_google_docstring(
- self, docstring: str, errors: list[ParseError]
- ) -> ParsedDocstring:
+ def parse_google_docstring(self, docstring: str, errors: list[ParseError]) -> ParsedDocstring:
"""
Parse the given docstring, which is formatted as Google style docstring.
Return a L{ParsedDocstring} representation of its contents.
@@ -39,11 +38,12 @@ def parse_google_docstring(
will be stored.
"""
return self._parse_docstring(
- docstring, errors, GoogleDocstring, )
+ docstring,
+ errors,
+ GoogleDocstring,
+ )
- def parse_numpy_docstring(
- self, docstring: str, errors: list[ParseError]
- ) -> ParsedDocstring:
+ def parse_numpy_docstring(self, docstring: str, errors: list[ParseError]) -> ParsedDocstring:
"""
Parse the given docstring, which is formatted as NumPy style docstring.
Return a L{ParsedDocstring} representation of its contents.
@@ -53,7 +53,10 @@ def parse_numpy_docstring(
will be stored.
"""
return self._parse_docstring(
- docstring, errors, NumpyDocstring, )
+ docstring,
+ errors,
+ NumpyDocstring,
+ )
def _parse_docstring(
self,
@@ -63,16 +66,14 @@ def _parse_docstring(
) -> ParsedDocstring:
docstring_obj = docstring_cls(
- docstring,
+ docstring,
what=self.objclass,
)
return self._parse_docstring_obj(docstring_obj, errors)
@staticmethod
- def _parse_docstring_obj(
- docstring_obj: GoogleDocstring, errors: list[ParseError]
- ) -> ParsedDocstring:
+ def _parse_docstring_obj(docstring_obj: GoogleDocstring, errors: list[ParseError]) -> ParsedDocstring:
"""
Helper method to parse L{GoogleDocstring} or L{NumpyDocstring} objects.
"""
diff --git a/pydoctor/epydoc/markup/_pyval_repr.py b/pydoctor/epydoc/markup/_pyval_repr.py
index 1275385cb..433b802ec 100644
--- a/pydoctor/epydoc/markup/_pyval_repr.py
+++ b/pydoctor/epydoc/markup/_pyval_repr.py
@@ -51,6 +51,7 @@
from pydoctor.epydoc.docutils import set_node_attributes, wbr, obj_reference, new_document
from pydoctor.astutils import node2dottedname, bind_args, Parentage, get_parents, unparse, op_util
+
def decode_with_backslashreplace(s: bytes) -> str:
r"""
Convert the given 8-bit string into unicode, treating any
@@ -63,10 +64,8 @@ def decode_with_backslashreplace(s: bytes) -> str:
# s.encode('string-escape') is not appropriate here, since it
# also adds backslashes to some ascii chars (eg \ and ').
- return (s
- .decode('latin1')
- .encode('ascii', 'backslashreplace')
- .decode('ascii'))
+ return s.decode('latin1').encode('ascii', 'backslashreplace').decode('ascii')
+
@attr.s(auto_attribs=True)
class _MarkedColorizerState:
@@ -76,6 +75,7 @@ class _MarkedColorizerState:
linebreakok: bool
stacklength: int
+
class _ColorizerState:
"""
An object uesd to keep track of the current state of the pyval
@@ -83,8 +83,9 @@ class _ColorizerState:
a backup point, and restore back to that backup point. This is
used by several colorization methods that first try colorizing
their object on a single line (setting linebreakok=False); and
- then fall back on a multi-line output if that fails.
+ then fall back on a multi-line output if that fails.
"""
+
def __init__(self) -> None:
self.result: list[nodes.Node] = []
self.charpos = 0
@@ -95,25 +96,25 @@ def __init__(self) -> None:
def mark(self) -> _MarkedColorizerState:
return _MarkedColorizerState(
- length=len(self.result),
- charpos=self.charpos,
- lineno=self.lineno,
- linebreakok=self.linebreakok,
- stacklength=len(self.stack))
+ length=len(self.result),
+ charpos=self.charpos,
+ lineno=self.lineno,
+ linebreakok=self.linebreakok,
+ stacklength=len(self.stack),
+ )
def restore(self, mark: _MarkedColorizerState) -> List[nodes.Node]:
"""
Return what's been trimmed from the result.
"""
- (self.charpos, self.lineno,
- self.linebreakok) = (mark.charpos, mark.lineno,
- mark.linebreakok)
- trimmed = self.result[mark.length:]
- del self.result[mark.length:]
- del self.stack[mark.stacklength:]
+ (self.charpos, self.lineno, self.linebreakok) = (mark.charpos, mark.lineno, mark.linebreakok)
+ trimmed = self.result[mark.length :]
+ del self.result[mark.length :]
+ del self.stack[mark.stacklength :]
return trimmed
-# TODO: add support for comparators when needed.
+
+# TODO: add support for comparators when needed.
# _OperatorDelimitier is needed for:
# - IfExp (TODO)
# - UnaryOp (DONE)
@@ -123,13 +124,17 @@ def restore(self, mark: _MarkedColorizerState) -> List[nodes.Node]:
# - Lambda (TODO)
class _OperatorDelimiter:
"""
- A context manager that can add enclosing delimiters to nested operators when needed.
-
+ A context manager that can add enclosing delimiters to nested operators when needed.
+
Adapted from C{astor} library, thanks.
"""
- def __init__(self, colorizer: 'PyvalColorizer', state: _ColorizerState,
- node: ast.expr,) -> None:
+ def __init__(
+ self,
+ colorizer: 'PyvalColorizer',
+ state: _ColorizerState,
+ node: ast.expr,
+ ) -> None:
self.discard = True
"""No parenthesis by default."""
@@ -144,8 +149,8 @@ def __init__(self, colorizer: 'PyvalColorizer', state: _ColorizerState,
parent_node: ast.AST = next(get_parents(node))
except StopIteration:
return
-
- # avoid needless parenthesis, since we now collect parents for every nodes
+
+ # avoid needless parenthesis, since we now collect parents for every nodes
if isinstance(parent_node, (ast.expr, ast.keyword, ast.comprehension)):
try:
precedence = op_util.get_op_precedence(getattr(node, 'op', node))
@@ -155,11 +160,10 @@ def __init__(self, colorizer: 'PyvalColorizer', state: _ColorizerState,
try:
parent_precedence = op_util.get_op_precedence(getattr(parent_node, 'op', parent_node))
if isinstance(getattr(parent_node, 'op', None), ast.Pow) or isinstance(parent_node, ast.BoolOp):
- parent_precedence+=1
+ parent_precedence += 1
except KeyError:
- parent_precedence = colorizer.explicit_precedence.get(
- node, op_util.Precedence.highest)
-
+ parent_precedence = colorizer.explicit_precedence.get(node, op_util.Precedence.highest)
+
if precedence < parent_precedence:
self.discard = False
@@ -173,20 +177,24 @@ def __exit__(self, *exc_info: Any) -> None:
self.state.result.extend(trimmed)
self.colorizer._output(')', self.colorizer.GROUP_TAG, self.state)
+
class _Maxlines(Exception):
"""A control-flow exception that is raised when PyvalColorizer
exeeds the maximum number of allowed lines."""
+
class _Linebreak(Exception):
"""A control-flow exception that is raised when PyvalColorizer
generates a string containing a newline, but the state object's
linebreakok variable is False."""
+
class ColorizedPyvalRepr(ParsedRstDocstring):
"""
@ivar is_complete: True if this colorized repr completely describes
the object.
"""
+
def __init__(self, document: nodes.document, is_complete: bool, warnings: List[str]) -> None:
super().__init__(document, ())
self.is_complete = is_complete
@@ -194,52 +202,62 @@ def __init__(self, document: nodes.document, is_complete: bool, warnings: List[s
"""
List of warnings
"""
-
+
def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
return Tag('code')(super().to_stan(docstring_linker))
-def colorize_pyval(pyval: Any, linelen:Optional[int], maxlines:int, linebreakok:bool=True, refmap:Optional[Dict[str, str]]=None) -> ColorizedPyvalRepr:
+
+def colorize_pyval(
+ pyval: Any, linelen: Optional[int], maxlines: int, linebreakok: bool = True, refmap: Optional[Dict[str, str]] = None
+) -> ColorizedPyvalRepr:
"""
- Get a L{ColorizedPyvalRepr} instance for this piece of ast.
+ Get a L{ColorizedPyvalRepr} instance for this piece of ast.
- @param refmap: A mapping that maps local names to full names.
- This can be used to explicitely links some objects by assigning an
+ @param refmap: A mapping that maps local names to full names.
+ This can be used to explicitely links some objects by assigning an
explicit 'refuri' value on the L{obj_reference} node.
This can be used for cases the where the linker might be wrong, obviously this is just a workaround.
@return: A L{ColorizedPyvalRepr} describing the given pyval.
"""
return PyvalColorizer(linelen=linelen, maxlines=maxlines, linebreakok=linebreakok, refmap=refmap).colorize(pyval)
-def colorize_inline_pyval(pyval: Any, refmap:Optional[Dict[str, str]]=None) -> ColorizedPyvalRepr:
+
+def colorize_inline_pyval(pyval: Any, refmap: Optional[Dict[str, str]] = None) -> ColorizedPyvalRepr:
"""
Used to colorize type annotations and parameters default values.
@returns: C{L{colorize_pyval}(pyval, linelen=None, linebreakok=False)}
"""
return colorize_pyval(pyval, linelen=None, maxlines=1, linebreakok=False, refmap=refmap)
-def _get_str_func(pyval: AnyStr) -> Callable[[str], AnyStr]:
- func = cast(Callable[[str], AnyStr], str if isinstance(pyval, str) else \
- functools.partial(bytes, encoding='utf-8', errors='replace'))
+
+def _get_str_func(pyval: AnyStr) -> Callable[[str], AnyStr]:
+ func = cast(
+ Callable[[str], AnyStr],
+ str if isinstance(pyval, str) else functools.partial(bytes, encoding='utf-8', errors='replace'),
+ )
return func
+
+
def _str_escape(s: str) -> str:
"""
Encode a string such that it's correctly represented inside simple quotes.
"""
+
# displays unicode caracters as is.
def enc(c: str) -> str:
if c == "'":
c = r"\'"
- elif c == '\t':
+ elif c == '\t':
c = r'\t'
- elif c == '\r':
+ elif c == '\r':
c = r'\r'
- elif c == '\n':
+ elif c == '\n':
c = r'\n'
- elif c == '\f':
+ elif c == '\f':
c = r'\f'
- elif c == '\v':
+ elif c == '\v':
c = r'\v'
- elif c == "\\":
+ elif c == "\\":
c = r'\\'
return c
@@ -252,38 +270,42 @@ def enc(c: str) -> str:
except UnicodeEncodeError:
# Otherwise replace them with backslashreplace
s = s.encode('utf-8', 'backslashreplace').decode('utf-8')
-
+
return s
+
def _bytes_escape(b: bytes) -> str:
return repr(b)[2:-1]
+
class PyvalColorizer:
"""
Syntax highlighter for Python values.
"""
- def __init__(self, linelen:Optional[int], maxlines:int, linebreakok:bool=True, refmap:Optional[Dict[str, str]]=None):
- self.linelen: Optional[int] = linelen if linelen!=0 else None
- self.maxlines: Union[int, float] = maxlines if maxlines!=0 else float('inf')
+ def __init__(
+ self, linelen: Optional[int], maxlines: int, linebreakok: bool = True, refmap: Optional[Dict[str, str]] = None
+ ):
+ self.linelen: Optional[int] = linelen if linelen != 0 else None
+ self.maxlines: Union[int, float] = maxlines if maxlines != 0 else float('inf')
self.linebreakok = linebreakok
self.refmap = refmap if refmap is not None else {}
- # some edge cases require to compute the precedence ahead of time and can't be
+ # some edge cases require to compute the precedence ahead of time and can't be
# easily done with access only to the parent node of some operators.
- self.explicit_precedence:Dict[ast.AST, int] = {}
+ self.explicit_precedence: Dict[ast.AST, int] = {}
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
# Colorization Tags & other constants
- #////////////////////////////////////////////////////////////
-
- GROUP_TAG = None # was 'variable-group' # e.g., "[" and "]"
- COMMA_TAG = None # was 'variable-op' # The "," that separates elements
- COLON_TAG = None # was 'variable-op' # The ":" in dictionaries
- CONST_TAG = None # None, True, False
- NUMBER_TAG = None # ints, floats, etc
- QUOTE_TAG = 'variable-quote' # Quotes around strings.
- STRING_TAG = 'variable-string' # Body of string literals
- LINK_TAG = 'variable-link' # Links to other documentables, extracted from AST names and attributes.
+ # ////////////////////////////////////////////////////////////
+
+ GROUP_TAG = None # was 'variable-group' # e.g., "[" and "]"
+ COMMA_TAG = None # was 'variable-op' # The "," that separates elements
+ COLON_TAG = None # was 'variable-op' # The ":" in dictionaries
+ CONST_TAG = None # None, True, False
+ NUMBER_TAG = None # ints, floats, etc
+ QUOTE_TAG = 'variable-quote' # Quotes around strings.
+ STRING_TAG = 'variable-string' # Body of string literals
+ LINK_TAG = 'variable-link' # Links to other documentables, extracted from AST names and attributes.
ELLIPSIS_TAG = 'variable-ellipsis'
LINEWRAP_TAG = 'variable-linewrap'
UNKNOWN_TAG = 'variable-unknown'
@@ -304,7 +326,7 @@ def __init__(self, linelen:Optional[int], maxlines:int, linebreakok:bool=True, r
RE_COMPILE_SIGNATURE = signature(re.compile)
- def _set_precedence(self, precedence:int, *node:ast.AST) -> None:
+ def _set_precedence(self, precedence: int, *node: ast.AST) -> None:
for n in node:
self.explicit_precedence[n] = precedence
@@ -331,17 +353,17 @@ def colorize(self, pyval: Any) -> ColorizedPyvalRepr:
is_complete = False
else:
is_complete = True
-
+
# Put it all together.
document = new_document('pyval_repr')
# This ensure the .parent and .document attributes of the child nodes are set correcly.
set_node_attributes(document, children=[set_node_attributes(node, document=document) for node in state.result])
return ColorizedPyvalRepr(document, is_complete, state.warnings)
-
+
def _colorize(self, pyval: Any, state: _ColorizerState) -> None:
pyvaltype = type(pyval)
-
+
# Individual "is" checks are required here to be sure we don't consider 0 as True and 1 as False!
if pyval is False or pyval is True or pyval is None or pyval is NotImplemented:
# Link built-in constants to the standard library.
@@ -357,14 +379,13 @@ def _colorize(self, pyval: Any, state: _ColorizerState) -> None:
self._colorize_str(pyval, state, b'b', escape_fcn=_bytes_escape)
elif pyvaltype is tuple:
# tuples need an ending comma when they contains only one value.
- self._multiline(self._colorize_iter, pyval, state, prefix='(',
- suffix=(',' if len(pyval) <= 1 else '')+')')
+ self._multiline(
+ self._colorize_iter, pyval, state, prefix='(', suffix=(',' if len(pyval) <= 1 else '') + ')'
+ )
elif pyvaltype is set:
- self._multiline(self._colorize_iter, pyval,
- state, prefix='set([', suffix='])')
+ self._multiline(self._colorize_iter, pyval, state, prefix='set([', suffix='])')
elif pyvaltype is frozenset:
- self._multiline(self._colorize_iter, pyval,
- state, prefix='frozenset([', suffix='])')
+ self._multiline(self._colorize_iter, pyval, state, prefix='frozenset([', suffix='])')
elif pyvaltype is list:
self._multiline(self._colorize_iter, pyval, state, prefix='[', suffix=']')
elif issubclass(pyvaltype, ast.AST):
@@ -374,9 +395,11 @@ def _colorize(self, pyval: Any, state: _ColorizerState) -> None:
try:
pyval_repr = repr(pyval)
if not isinstance(pyval_repr, str):
- pyval_repr = str(pyval_repr) #type: ignore[unreachable]
+ pyval_repr = str(pyval_repr) # type: ignore[unreachable]
except Exception:
- state.warnings.append(f"Cannot colorize object of type '{pyval.__class__.__name__}', repr() raised an exception.")
+ state.warnings.append(
+ f"Cannot colorize object of type '{pyval.__class__.__name__}', repr() raised an exception."
+ )
state.result.append(self.UNKNOWN_REPR)
else:
match = self.GENERIC_OBJECT_RE.search(pyval_repr)
@@ -387,14 +410,14 @@ def _colorize(self, pyval: Any, state: _ColorizerState) -> None:
def _trim_result(self, result: List[nodes.Node], num_chars: int) -> None:
while num_chars > 0:
- if not result:
+ if not result:
return
- if isinstance(r1:=result[-1], nodes.Element):
+ if isinstance(r1 := result[-1], nodes.Element):
if len(r1.children) >= 1:
data = r1[-1].astext()
trim = min(num_chars, len(data))
r1[-1] = nodes.Text(data[:-trim])
- if not r1[-1].astext():
+ if not r1[-1].astext():
if len(r1.children) == 1:
result.pop()
else:
@@ -408,22 +431,24 @@ def _trim_result(self, result: List[nodes.Node], num_chars: int) -> None:
assert isinstance(r1, nodes.Text)
trim = min(num_chars, len(r1))
result[-1] = nodes.Text(r1.astext()[:-trim])
- if not result[-1].astext():
+ if not result[-1].astext():
result.pop()
num_chars -= trim
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
# Object Colorization Functions
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
def _insert_comma(self, indent: int, state: _ColorizerState) -> None:
if state.linebreakok:
self._output(',', self.COMMA_TAG, state)
- self._output('\n'+' '*indent, None, state)
+ self._output('\n' + ' ' * indent, None, state)
else:
self._output(', ', self.COMMA_TAG, state)
- def _multiline(self, func: Callable[..., None], pyval: Iterable[Any], state: _ColorizerState, **kwargs: Any) -> None:
+ def _multiline(
+ self, func: Callable[..., None], pyval: Iterable[Any], state: _ColorizerState, **kwargs: Any
+ ) -> None:
"""
Helper for container-type colorizers. First, try calling
C{func(pyval, state, **kwargs)} with linebreakok set to false;
@@ -443,14 +468,18 @@ def _multiline(self, func: Callable[..., None], pyval: Iterable[Any], state: _Co
state.restore(mark)
func(pyval, state, **kwargs)
- def _colorize_iter(self, pyval: Iterable[Any], state: _ColorizerState,
- prefix: Optional[AnyStr] = None,
- suffix: Optional[AnyStr] = None) -> None:
+ def _colorize_iter(
+ self,
+ pyval: Iterable[Any],
+ state: _ColorizerState,
+ prefix: Optional[AnyStr] = None,
+ suffix: Optional[AnyStr] = None,
+ ) -> None:
if prefix is not None:
self._output(prefix, self.GROUP_TAG, state)
indent = state.charpos
for i, elt in enumerate(pyval):
- if i>=1:
+ if i >= 1:
self._insert_comma(indent, state)
# word break opportunity for inline values
state.result.append(self.WORD_BREAK_OPPORTUNITY)
@@ -458,12 +487,13 @@ def _colorize_iter(self, pyval: Iterable[Any], state: _ColorizerState,
if suffix is not None:
self._output(suffix, self.GROUP_TAG, state)
- def _colorize_ast_dict(self, items: Iterable[Tuple[Optional[ast.AST], ast.AST]],
- state: _ColorizerState, prefix: str, suffix: str) -> None:
+ def _colorize_ast_dict(
+ self, items: Iterable[Tuple[Optional[ast.AST], ast.AST]], state: _ColorizerState, prefix: str, suffix: str
+ ) -> None:
self._output(prefix, self.GROUP_TAG, state)
indent = state.charpos
for i, (key, val) in enumerate(items):
- if i>=1:
+ if i >= 1:
self._insert_comma(indent, state)
state.result.append(self.WORD_BREAK_OPPORTUNITY)
if key:
@@ -474,18 +504,19 @@ def _colorize_ast_dict(self, items: Iterable[Tuple[Optional[ast.AST], ast.AST]],
self._output('**', None, state)
self._colorize(val, state)
self._output(suffix, self.GROUP_TAG, state)
-
- def _colorize_str(self, pyval: AnyStr, state: _ColorizerState, prefix: AnyStr,
- escape_fcn: Callable[[AnyStr], str]) -> None:
-
+
+ def _colorize_str(
+ self, pyval: AnyStr, state: _ColorizerState, prefix: AnyStr, escape_fcn: Callable[[AnyStr], str]
+ ) -> None:
+
str_func = _get_str_func(pyval)
# Decide which quote to use.
if str_func('\n') in pyval and state.linebreakok:
quote = str_func("'''")
- else:
+ else:
quote = str_func("'")
-
+
# Open quote.
self._output(prefix, None, state)
self._output(quote, self.QUOTE_TAG, state)
@@ -497,26 +528,26 @@ def _colorize_str(self, pyval: AnyStr, state: _ColorizerState, prefix: AnyStr,
lines = [pyval]
# Body
for i, line in enumerate(lines):
- if i>0:
+ if i > 0:
self._output(str_func('\n'), None, state)
# It's not redundant when line is bytes
- line = cast(AnyStr, escape_fcn(line)) # type:ignore[redundant-cast]
-
+ line = cast(AnyStr, escape_fcn(line)) # type:ignore[redundant-cast]
+
self._output(line, self.STRING_TAG, state)
# Close quote.
self._output(quote, self.QUOTE_TAG, state)
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
# Support for AST
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
# Nodes not explicitely handled that would be nice to handle.
- # f-strings,
- # comparators,
- # generator expressions,
+ # f-strings,
+ # comparators,
+ # generator expressions,
# Slice and ExtSlice
-
+
def _colorize_ast_constant(self, pyval: ast.Constant, state: _ColorizerState) -> None:
val = pyval.value
# Handle elipsis
@@ -533,7 +564,7 @@ def _colorize_ast(self, pyval: ast.AST, state: _ColorizerState) -> None:
except StopIteration:
Parentage().visit(pyval)
- if isinstance(pyval, ast.Constant):
+ if isinstance(pyval, ast.Constant):
self._colorize_ast_constant(pyval, state)
elif isinstance(pyval, ast.UnaryOp):
self._colorize_ast_unary_op(pyval, state)
@@ -571,7 +602,7 @@ def _colorize_ast(self, pyval: ast.AST, state: _ColorizerState) -> None:
else:
self._colorize_ast_generic(pyval, state)
assert state.stack.pop() is pyval
-
+
def _colorize_ast_unary_op(self, pyval: ast.UnaryOp, state: _ColorizerState) -> None:
with _OperatorDelimiter(self, state, pyval):
if isinstance(pyval.op, ast.USub):
@@ -587,7 +618,7 @@ def _colorize_ast_unary_op(self, pyval: ast.UnaryOp, state: _ColorizerState) ->
self._colorize_ast_generic(pyval, state)
self._colorize(pyval.operand, state)
-
+
def _colorize_ast_binary_op(self, pyval: ast.BinOp, state: _ColorizerState) -> None:
with _OperatorDelimiter(self, state, pyval):
# Colorize first operand
@@ -604,10 +635,10 @@ def _colorize_ast_binary_op(self, pyval: ast.BinOp, state: _ColorizerState) -> N
# Colorize second operand
self._colorize(pyval.right, state)
-
+
def _colorize_ast_bool_op(self, pyval: ast.BoolOp, state: _ColorizerState) -> None:
with _OperatorDelimiter(self, state, pyval):
- _maxindex = len(pyval.values)-1
+ _maxindex = len(pyval.values) - 1
for index, value in enumerate(pyval.values):
self._colorize(value, state)
@@ -647,11 +678,11 @@ def _colorize_ast_subscript(self, node: ast.Subscript, state: _ColorizerState) -
else:
state.result.append(self.WORD_BREAK_OPPORTUNITY)
self._colorize(sub, state)
-
+
self._output(']', self.GROUP_TAG, state)
-
+
def _colorize_ast_call(self, node: ast.Call, state: _ColorizerState) -> None:
-
+
if node2dottedname(node.func) == ['re', 'compile']:
# Colorize regexps from re.compile AST arguments.
self._colorize_ast_re(node, state)
@@ -664,21 +695,21 @@ def _colorize_ast_call_generic(self, node: ast.Call, state: _ColorizerState) ->
self._output('(', self.GROUP_TAG, state)
indent = state.charpos
self._multiline(self._colorize_iter, node.args, state)
- if len(node.keywords)>0:
- if len(node.args)>0:
+ if len(node.keywords) > 0:
+ if len(node.args) > 0:
self._insert_comma(indent, state)
self._multiline(self._colorize_iter, node.keywords, state)
self._output(')', self.GROUP_TAG, state)
- def _colorize_ast_re(self, node:ast.Call, state: _ColorizerState) -> None:
-
+ def _colorize_ast_re(self, node: ast.Call, state: _ColorizerState) -> None:
+
try:
# Can raise TypeError
args = bind_args(self.RE_COMPILE_SIGNATURE, node)
except TypeError:
self._colorize_ast_call_generic(node, state)
return
-
+
ast_pattern = args.arguments['pattern']
# Cannot colorize regex
@@ -687,7 +718,7 @@ def _colorize_ast_re(self, node:ast.Call, state: _ColorizerState) -> None:
return
pat = ast_pattern.value
-
+
# Just in case regex pattern is not valid type
if not isinstance(pat, (bytes, str)):
state.warnings.append("Cannot colorize regular expression: pattern must be bytes or str.")
@@ -695,15 +726,15 @@ def _colorize_ast_re(self, node:ast.Call, state: _ColorizerState) -> None:
return
mark = state.mark()
-
+
self._output("re.compile", None, state, link=True)
self._output('(', self.GROUP_TAG, state)
indent = state.charpos
-
+
try:
# Can raise ValueError or re.error
# Value of type variable "AnyStr" cannot be "Union[bytes, str]": Yes it can.
- self._colorize_re_pattern_str(pat, state) #type:ignore[type-var]
+ self._colorize_re_pattern_str(pat, state) # type:ignore[type-var]
except (ValueError, sre_constants.error) as e:
# Make sure not to swallow control flow errors.
# Colorize the ast.Call as any other node if the pattern parsing fails.
@@ -721,30 +752,30 @@ def _colorize_ast_re(self, node:ast.Call, state: _ColorizerState) -> None:
def _colorize_ast_generic(self, pyval: ast.AST, state: _ColorizerState) -> None:
try:
- # Always wrap the expression inside parenthesis because we can't be sure
- # if there are required since we don;t have support for all operators
+ # Always wrap the expression inside parenthesis because we can't be sure
+ # if there are required since we don;t have support for all operators
# See TODO comment in _OperatorDelimiter.
source = unparse(pyval).strip()
- if isinstance(pyval, (ast.IfExp, ast.Compare, ast.Lambda)) and len(state.stack)>1:
+ if isinstance(pyval, (ast.IfExp, ast.Compare, ast.Lambda)) and len(state.stack) > 1:
source = f'({source})'
- except Exception: # No defined handler for node of type
+ except Exception: # No defined handler for node of type
state.result.append(self.UNKNOWN_REPR)
else:
# TODO: Maybe try to colorize anyway, without links, with epydoc.doctest ?
self._output(source, None, state)
-
- #////////////////////////////////////////////////////////////
+
+ # ////////////////////////////////////////////////////////////
# Support for Regexes
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
def _colorize_re_pattern_str(self, pat: AnyStr, state: _ColorizerState) -> None:
# Currently, the colorizer do not render multiline regex patterns correctly because we don't
- # recover the flag values from re.compile() arguments (so we don't know when re.VERBOSE is used for instance).
+ # recover the flag values from re.compile() arguments (so we don't know when re.VERBOSE is used for instance).
# With default flags, newlines are mixed up with literals \n and probably more fun stuff like that.
# Turns out the sre_parse.parse() function treats caracters "\n" and "\\n" the same way.
-
+
# If the pattern string is composed by mutiple lines, simply use the string colorizer instead.
- # It's more informative to have the proper newlines than the fancy regex colors.
+ # It's more informative to have the proper newlines than the fancy regex colors.
# Note: Maybe this decision is driven by a misunderstanding of regular expression.
@@ -759,30 +790,29 @@ def _colorize_re_pattern_str(self, pat: AnyStr, state: _ColorizerState) -> None:
self._colorize_re_pattern(pat, state, b'rb')
else:
self._colorize_re_pattern(pat, state, 'r')
-
+
def _colorize_re_pattern(self, pat: AnyStr, state: _ColorizerState, prefix: AnyStr) -> None:
# Parse the regexp pattern.
# The regex pattern strings are always parsed with the default flags.
- # Flag values are displayed as regular ast.Call arguments.
+ # Flag values are displayed as regular ast.Call arguments.
tree: sre_parse36.SubPattern = sre_parse36.parse(pat, 0)
# from python 3.8 SubPattern.pattern is named SubPattern.state, but we don't care right now because we use sre_parse36
pattern = tree.pattern
- groups = dict([(num,name) for (name,num) in
- pattern.groupdict.items()])
+ groups = dict([(num, name) for (name, num) in pattern.groupdict.items()])
flags: int = pattern.flags
-
+
# Open quote. Never triple quote regex patterns string, anyway parterns that includes an '\n' caracter are displayed as regular strings.
quote = "'"
self._output(prefix, None, state)
self._output(quote, self.QUOTE_TAG, state)
-
+
if flags != sre_constants.SRE_FLAG_UNICODE:
# If developers included flags in the regex string, display them.
# By default, do not display the '(?u)'
self._colorize_re_flags(flags, state)
-
+
# Colorize it!
self._colorize_re_tree(tree.data, state, True, groups)
@@ -791,13 +821,17 @@ def _colorize_re_pattern(self, pat: AnyStr, state: _ColorizerState, prefix: AnyS
def _colorize_re_flags(self, flags: int, state: _ColorizerState) -> None:
if flags:
- flags_list = [c for (c,n) in sorted(sre_parse36.FLAGS.items())
- if (n&flags)]
+ flags_list = [c for (c, n) in sorted(sre_parse36.FLAGS.items()) if (n & flags)]
flags_str = '(?%s)' % ''.join(flags_list)
self._output(flags_str, self.RE_FLAGS_TAG, state)
- def _colorize_re_tree(self, tree: Sequence[Tuple[sre_constants._NamedIntConstant, Any]],
- state: _ColorizerState, noparen: bool, groups: Dict[int, str]) -> None:
+ def _colorize_re_tree(
+ self,
+ tree: Sequence[Tuple[sre_constants._NamedIntConstant, Any]],
+ state: _ColorizerState,
+ noparen: bool,
+ groups: Dict[int, str],
+ ) -> None:
if len(tree) > 1 and not noparen:
self._output('(', self.RE_GROUP_TAG, state)
@@ -806,89 +840,107 @@ def _colorize_re_tree(self, tree: Sequence[Tuple[sre_constants._NamedIntConstant
op = elt[0]
args = elt[1]
- if op == sre_constants.LITERAL: #type:ignore[attr-defined]
+ if op == sre_constants.LITERAL: # type:ignore[attr-defined]
c = chr(cast(int, args))
# Add any appropriate escaping.
- if c in '.^$\\*+?{}[]|()\'':
+ if c in '.^$\\*+?{}[]|()\'':
c = '\\' + c
- elif c == '\t':
+ elif c == '\t':
c = r'\t'
- elif c == '\r':
+ elif c == '\r':
c = r'\r'
- elif c == '\n':
+ elif c == '\n':
c = r'\n'
- elif c == '\f':
+ elif c == '\f':
c = r'\f'
- elif c == '\v':
+ elif c == '\v':
c = r'\v'
# Keep unicode chars as is, so do nothing if ord(c) > 65535
- elif ord(c) > 255 and ord(c) <= 65535:
- c = rb'\u%04x' % ord(c) # type:ignore[assignment]
- elif (ord(c)<32 or ord(c)>=127) and ord(c) <= 65535:
- c = rb'\x%02x' % ord(c) # type:ignore[assignment]
+ elif ord(c) > 255 and ord(c) <= 65535:
+ c = rb'\u%04x' % ord(c) # type:ignore[assignment]
+ elif (ord(c) < 32 or ord(c) >= 127) and ord(c) <= 65535:
+ c = rb'\x%02x' % ord(c) # type:ignore[assignment]
self._output(c, self.RE_CHAR_TAG, state)
- elif op == sre_constants.ANY: #type:ignore[attr-defined]
+ elif op == sre_constants.ANY: # type:ignore[attr-defined]
self._output('.', self.RE_CHAR_TAG, state)
- elif op == sre_constants.BRANCH: #type:ignore[attr-defined]
+ elif op == sre_constants.BRANCH: # type:ignore[attr-defined]
if args[0] is not None:
- raise ValueError('Branch expected None arg but got %s'
- % args[0])
+ raise ValueError('Branch expected None arg but got %s' % args[0])
for i, item in enumerate(args[1]):
if i > 0:
self._output('|', self.RE_OP_TAG, state)
self._colorize_re_tree(item, state, True, groups)
- elif op == sre_constants.IN: #type:ignore[attr-defined]
- if (len(args) == 1 and args[0][0] == sre_constants.CATEGORY): #type:ignore[attr-defined]
+ elif op == sre_constants.IN: # type:ignore[attr-defined]
+ if len(args) == 1 and args[0][0] == sre_constants.CATEGORY: # type:ignore[attr-defined]
self._colorize_re_tree(args, state, False, groups)
else:
self._output('[', self.RE_GROUP_TAG, state)
self._colorize_re_tree(args, state, True, groups)
self._output(']', self.RE_GROUP_TAG, state)
- elif op == sre_constants.CATEGORY: #type:ignore[attr-defined]
- if args == sre_constants.CATEGORY_DIGIT: val = r'\d' #type:ignore[attr-defined]
- elif args == sre_constants.CATEGORY_NOT_DIGIT: val = r'\D' #type:ignore[attr-defined]
- elif args == sre_constants.CATEGORY_SPACE: val = r'\s' #type:ignore[attr-defined]
- elif args == sre_constants.CATEGORY_NOT_SPACE: val = r'\S' #type:ignore[attr-defined]
- elif args == sre_constants.CATEGORY_WORD: val = r'\w' #type:ignore[attr-defined]
- elif args == sre_constants.CATEGORY_NOT_WORD: val = r'\W' #type:ignore[attr-defined]
- else: raise ValueError('Unknown category %s' % args)
+ elif op == sre_constants.CATEGORY: # type:ignore[attr-defined]
+ if args == sre_constants.CATEGORY_DIGIT:
+ val = r'\d' # type:ignore[attr-defined]
+ elif args == sre_constants.CATEGORY_NOT_DIGIT:
+ val = r'\D' # type:ignore[attr-defined]
+ elif args == sre_constants.CATEGORY_SPACE:
+ val = r'\s' # type:ignore[attr-defined]
+ elif args == sre_constants.CATEGORY_NOT_SPACE:
+ val = r'\S' # type:ignore[attr-defined]
+ elif args == sre_constants.CATEGORY_WORD:
+ val = r'\w' # type:ignore[attr-defined]
+ elif args == sre_constants.CATEGORY_NOT_WORD:
+ val = r'\W' # type:ignore[attr-defined]
+ else:
+ raise ValueError('Unknown category %s' % args)
self._output(val, self.RE_CHAR_TAG, state)
- elif op == sre_constants.AT: #type:ignore[attr-defined]
- if args == sre_constants.AT_BEGINNING_STRING: val = r'\A' #type:ignore[attr-defined]
- elif args == sre_constants.AT_BEGINNING: val = '^' #type:ignore[attr-defined]
- elif args == sre_constants.AT_END: val = '$' #type:ignore[attr-defined]
- elif args == sre_constants.AT_BOUNDARY: val = r'\b' #type:ignore[attr-defined]
- elif args == sre_constants.AT_NON_BOUNDARY: val = r'\B' #type:ignore[attr-defined]
- elif args == sre_constants.AT_END_STRING: val = r'\Z' #type:ignore[attr-defined]
- else: raise ValueError('Unknown position %s' % args)
+ elif op == sre_constants.AT: # type:ignore[attr-defined]
+ if args == sre_constants.AT_BEGINNING_STRING:
+ val = r'\A' # type:ignore[attr-defined]
+ elif args == sre_constants.AT_BEGINNING:
+ val = '^' # type:ignore[attr-defined]
+ elif args == sre_constants.AT_END:
+ val = '$' # type:ignore[attr-defined]
+ elif args == sre_constants.AT_BOUNDARY:
+ val = r'\b' # type:ignore[attr-defined]
+ elif args == sre_constants.AT_NON_BOUNDARY:
+ val = r'\B' # type:ignore[attr-defined]
+ elif args == sre_constants.AT_END_STRING:
+ val = r'\Z' # type:ignore[attr-defined]
+ else:
+ raise ValueError('Unknown position %s' % args)
self._output(val, self.RE_CHAR_TAG, state)
- elif op in (sre_constants.MAX_REPEAT, sre_constants.MIN_REPEAT): #type:ignore[attr-defined]
+ elif op in (sre_constants.MAX_REPEAT, sre_constants.MIN_REPEAT): # type:ignore[attr-defined]
minrpt = args[0]
maxrpt = args[1]
if maxrpt == sre_constants.MAXREPEAT:
- if minrpt == 0: val = '*'
- elif minrpt == 1: val = '+'
- else: val = '{%d,}' % (minrpt)
+ if minrpt == 0:
+ val = '*'
+ elif minrpt == 1:
+ val = '+'
+ else:
+ val = '{%d,}' % (minrpt)
elif minrpt == 0:
- if maxrpt == 1: val = '?'
- else: val = '{,%d}' % (maxrpt)
+ if maxrpt == 1:
+ val = '?'
+ else:
+ val = '{,%d}' % (maxrpt)
elif minrpt == maxrpt:
val = '{%d}' % (maxrpt)
else:
val = '{%d,%d}' % (minrpt, maxrpt)
- if op == sre_constants.MIN_REPEAT: #type:ignore[attr-defined]
+ if op == sre_constants.MIN_REPEAT: # type:ignore[attr-defined]
val += '?'
self._colorize_re_tree(args[2], state, False, groups)
self._output(val, self.RE_OP_TAG, state)
- elif op == sre_constants.SUBPATTERN: #type:ignore[attr-defined]
+ elif op == sre_constants.SUBPATTERN: # type:ignore[attr-defined]
if args[0] is None:
self._output(r'(?:', self.RE_GROUP_TAG, state)
elif args[0] in groups:
@@ -905,20 +957,28 @@ def _colorize_re_tree(self, tree: Sequence[Tuple[sre_constants._NamedIntConstant
self._colorize_re_tree(args[3], state, True, groups)
self._output(')', self.RE_GROUP_TAG, state)
- elif op == sre_constants.GROUPREF: #type:ignore[attr-defined]
+ elif op == sre_constants.GROUPREF: # type:ignore[attr-defined]
self._output('\\%d' % args, self.RE_REF_TAG, state)
- elif op == sre_constants.RANGE: #type:ignore[attr-defined]
- self._colorize_re_tree( ((sre_constants.LITERAL, args[0]),), #type:ignore[attr-defined]
- state, False, groups )
+ elif op == sre_constants.RANGE: # type:ignore[attr-defined]
+ self._colorize_re_tree(
+ ((sre_constants.LITERAL, args[0]),), # type:ignore[attr-defined]
+ state,
+ False,
+ groups,
+ )
self._output('-', self.RE_OP_TAG, state)
- self._colorize_re_tree( ((sre_constants.LITERAL, args[1]),), #type:ignore[attr-defined]
- state, False, groups )
-
- elif op == sre_constants.NEGATE: #type:ignore[attr-defined]
+ self._colorize_re_tree(
+ ((sre_constants.LITERAL, args[1]),), # type:ignore[attr-defined]
+ state,
+ False,
+ groups,
+ )
+
+ elif op == sre_constants.NEGATE: # type:ignore[attr-defined]
self._output('^', self.RE_OP_TAG, state)
- elif op == sre_constants.ASSERT: #type:ignore[attr-defined]
+ elif op == sre_constants.ASSERT: # type:ignore[attr-defined]
if args[0] > 0:
self._output('(?=', self.RE_GROUP_TAG, state)
else:
@@ -926,7 +986,7 @@ def _colorize_re_tree(self, tree: Sequence[Tuple[sre_constants._NamedIntConstant
self._colorize_re_tree(args[1], state, True, groups)
self._output(')', self.RE_GROUP_TAG, state)
- elif op == sre_constants.ASSERT_NOT: #type:ignore[attr-defined]
+ elif op == sre_constants.ASSERT_NOT: # type:ignore[attr-defined]
if args[0] > 0:
self._output('(?!', self.RE_GROUP_TAG, state)
else:
@@ -934,22 +994,25 @@ def _colorize_re_tree(self, tree: Sequence[Tuple[sre_constants._NamedIntConstant
self._colorize_re_tree(args[1], state, True, groups)
self._output(')', self.RE_GROUP_TAG, state)
- elif op == sre_constants.NOT_LITERAL: #type:ignore[attr-defined]
+ elif op == sre_constants.NOT_LITERAL: # type:ignore[attr-defined]
self._output('[^', self.RE_GROUP_TAG, state)
- self._colorize_re_tree( ((sre_constants.LITERAL, args),), #type:ignore[attr-defined]
- state, False, groups )
+ self._colorize_re_tree(
+ ((sre_constants.LITERAL, args),), # type:ignore[attr-defined]
+ state,
+ False,
+ groups,
+ )
self._output(']', self.RE_GROUP_TAG, state)
else:
raise ValueError(f"Unsupported element :{elt}")
if len(tree) > 1 and not noparen:
self._output(')', self.RE_GROUP_TAG, state)
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
# Output function
- #////////////////////////////////////////////////////////////
+ # ////////////////////////////////////////////////////////////
- def _output(self, s: AnyStr, css_class: Optional[str],
- state: _ColorizerState, link: bool = False) -> None:
+ def _output(self, s: AnyStr, css_class: Optional[str], state: _ColorizerState, link: bool = False) -> None:
"""
Add the string C{s} to the result list, tagging its contents
with the specified C{css_class}. Any lines that go beyond L{PyvalColorizer.linelen} will
@@ -969,32 +1032,34 @@ def _output(self, s: AnyStr, css_class: Optional[str],
# If this isn't the first segment, then add a newline to
# split it from the previous segment.
if i > 0:
- if (state.lineno+1) > self.maxlines:
+ if (state.lineno + 1) > self.maxlines:
raise _Maxlines()
if not state.linebreakok:
raise _Linebreak()
state.result.append(self.NEWLINE)
state.lineno += 1
state.charpos = 0
-
- segment_len = len(segment)
+
+ segment_len = len(segment)
# If the segment fits on the current line, then just call
# markup to tag it, and store the result.
# Don't break links into separate segments, neither quotes.
element: nodes.Node
- if (self.linelen is None or
- state.charpos + segment_len <= self.linelen
- or link is True
- or css_class in ('variable-quote',)):
+ if (
+ self.linelen is None
+ or state.charpos + segment_len <= self.linelen
+ or link is True
+ or css_class in ('variable-quote',)
+ ):
state.charpos += segment_len
if link is True:
- # Here, we bypass the linker if refmap contains the segment we're linking to.
+ # Here, we bypass the linker if refmap contains the segment we're linking to.
# The linker can be problematic because it has some design blind spots when the same name is declared in the imports and in the module body.
-
- # Note that the argument name is 'refuri', not 'refuid.
+
+ # Note that the argument name is 'refuri', not 'refuid.
element = obj_reference('', segment, refuri=self.refmap.get(segment, segment))
elif css_class is not None:
element = nodes.inline('', segment, classes=[css_class])
@@ -1010,8 +1075,8 @@ def _output(self, s: AnyStr, css_class: Optional[str],
# next iteration through the loop.)
else:
assert isinstance(self.linelen, int)
- split = self.linelen-state.charpos
- segments.insert(i+1, segment[split:])
+ split = self.linelen - state.charpos
+ segments.insert(i + 1, segment[split:])
segment = segment[:split]
if css_class is not None:
diff --git a/pydoctor/epydoc/markup/_types.py b/pydoctor/epydoc/markup/_types.py
index 8e94243d6..56e6f873f 100644
--- a/pydoctor/epydoc/markup/_types.py
+++ b/pydoctor/epydoc/markup/_types.py
@@ -3,6 +3,7 @@
This module provides yet another L{ParsedDocstring} subclass.
"""
+
from __future__ import annotations
from typing import Any, Callable, Dict, List, Tuple, Union, cast
@@ -14,37 +15,37 @@
from docutils import nodes
from twisted.web.template import Tag, tags
+
class ParsedTypeDocstring(TypeDocstring, ParsedDocstring):
"""
- Add L{ParsedDocstring} interface on top of L{TypeDocstring} and
+ Add L{ParsedDocstring} interface on top of L{TypeDocstring} and
allow to parse types from L{nodes.Node} objects, providing the C{--process-types} option.
"""
FIELDS = ('type', 'rtype', 'ytype', 'returntype', 'yieldtype')
-
+
# yes this overrides the superclass type!
- _tokens: list[tuple[str | nodes.Node, TokenType]] # type: ignore
+ _tokens: list[tuple[str | nodes.Node, TokenType]] # type: ignore
- def __init__(self, annotation: Union[nodes.document, str],
- warns_on_unknown_tokens: bool = False, lineno: int = 0) -> None:
+ def __init__(
+ self, annotation: Union[nodes.document, str], warns_on_unknown_tokens: bool = False, lineno: int = 0
+ ) -> None:
ParsedDocstring.__init__(self, ())
if isinstance(annotation, nodes.document):
TypeDocstring.__init__(self, '', warns_on_unknown_tokens)
_tokens = self._tokenize_node_type_spec(annotation)
- self._tokens = cast('list[tuple[str | nodes.Node, TokenType]]',
- self._build_tokens(_tokens))
+ self._tokens = cast('list[tuple[str | nodes.Node, TokenType]]', self._build_tokens(_tokens))
self._trigger_warnings()
else:
TypeDocstring.__init__(self, annotation, warns_on_unknown_tokens)
-
-
+
# We need to store the line number because we need to pass it to DocstringLinker.link_xref
self._lineno = lineno
@property
def has_body(self) -> bool:
- return len(self._tokens)>0
+ return len(self._tokens) > 0
def to_node(self) -> nodes.document:
"""
@@ -54,25 +55,27 @@ def to_node(self) -> nodes.document:
def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
"""
- Present the type as a stan tree.
+ Present the type as a stan tree.
"""
return self._convert_type_spec_to_stan(docstring_linker)
def _tokenize_node_type_spec(self, spec: nodes.document) -> List[Union[str, nodes.Node]]:
- def _warn_not_supported(n:nodes.Node) -> None:
- self.warnings.append(f"Unexpected element in type specification field: element '{n.__class__.__name__}'. "
- "This value should only contain text or inline markup.")
+ def _warn_not_supported(n: nodes.Node) -> None:
+ self.warnings.append(
+ f"Unexpected element in type specification field: element '{n.__class__.__name__}'. "
+ "This value should only contain text or inline markup."
+ )
tokens: List[Union[str, nodes.Node]] = []
# Determine if the content is nested inside a paragraph
# this is generally the case, except for consolidated fields generate documents.
if spec.children and isinstance(spec.children[0], nodes.paragraph):
- if len(spec.children)>1:
+ if len(spec.children) > 1:
_warn_not_supported(spec.children[1])
children = spec.children[0].children
else:
children = spec.children
-
+
for child in children:
if isinstance(child, nodes.Text):
# Tokenize the Text node with the same method TypeDocstring uses.
@@ -81,20 +84,21 @@ def _warn_not_supported(n:nodes.Node) -> None:
tokens.append(child)
else:
_warn_not_supported(child)
-
+
return tokens
- def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Any, TokenType]],
- docstring_linker: DocstringLinker) -> list[tuple[Any, TokenType]]:
+ def _convert_obj_tokens_to_stan(
+ self, tokens: List[Tuple[Any, TokenType]], docstring_linker: DocstringLinker
+ ) -> list[tuple[Any, TokenType]]:
"""
- Convert L{TokenType.OBJ} and PEP 484 like L{TokenType.DELIMITER} type to stan, merge them together. Leave the rest untouched.
+ Convert L{TokenType.OBJ} and PEP 484 like L{TokenType.DELIMITER} type to stan, merge them together. Leave the rest untouched.
Exemple:
>>> tokens = [("list", TokenType.OBJ), ("(", TokenType.DELIMITER), ("int", TokenType.OBJ), (")", TokenType.DELIMITER)]
>>> ann._convert_obj_tokens_to_stan(tokens, NotFoundLinker())
... [(Tag('code', children=['list', '(', 'int', ')']), TokenType.OBJ)]
-
+
@param tokens: List of tuples: C{(token, type)}
"""
@@ -104,24 +108,24 @@ def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Any, TokenType]],
open_square_braces = 0
for _token, _type in tokens:
- # The actual type of_token is str | Tag | Node.
+ # The actual type of_token is str | Tag | Node.
- if (_type is TokenType.DELIMITER and _token in ('[', '(', ')', ']')) \
- or _type is TokenType.OBJ:
- if _token == "[": open_square_braces += 1
- elif _token == "(": open_parenthesis += 1
+ if (_type is TokenType.DELIMITER and _token in ('[', '(', ')', ']')) or _type is TokenType.OBJ:
+ if _token == "[":
+ open_square_braces += 1
+ elif _token == "(":
+ open_parenthesis += 1
if _type is TokenType.OBJ:
- _token = docstring_linker.link_xref(
- _token, _token, self._lineno)
+ _token = docstring_linker.link_xref(_token, _token, self._lineno)
if open_square_braces + open_parenthesis > 0:
- try: last_processed_token = combined_tokens[-1]
+ try:
+ last_processed_token = combined_tokens[-1]
except IndexError:
combined_tokens.append((_token, _type))
else:
- if last_processed_token[1] is TokenType.OBJ \
- and isinstance(last_processed_token[0], Tag):
+ if last_processed_token[1] is TokenType.OBJ and isinstance(last_processed_token[0], Tag):
# Merge with last Tag
if _type is TokenType.OBJ:
assert isinstance(_token, Tag)
@@ -132,9 +136,11 @@ def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Any, TokenType]],
combined_tokens.append((_token, _type))
else:
combined_tokens.append((_token, _type))
-
- if _token == "]": open_square_braces -= 1
- elif _token == ")": open_parenthesis -= 1
+
+ if _token == "]":
+ open_square_braces -= 1
+ elif _token == ")":
+ open_parenthesis -= 1
else:
# the token will be processed in _convert_type_spec_to_stan() method.
@@ -152,16 +158,24 @@ def _convert_type_spec_to_stan(self, docstring_linker: DocstringLinker) -> Tag:
warnings: List[ParseError] = []
converters: Dict[TokenType, Callable[[Union[str, Tag]], Union[str, Tag]]] = {
- TokenType.LITERAL: lambda _token: tags.span(_token, class_="literal"),
- TokenType.CONTROL: lambda _token: tags.em(_token),
- # We don't use safe_to_stan() here, if these converter functions raise an exception,
+ TokenType.LITERAL: lambda _token: tags.span(_token, class_="literal"),
+ TokenType.CONTROL: lambda _token: tags.em(_token),
+ # We don't use safe_to_stan() here, if these converter functions raise an exception,
# the whole type docstring will be rendered as plaintext.
# it does not crash on invalid xml entities
- TokenType.REFERENCE: lambda _token: get_parser_by_name('restructuredtext')(_token, warnings).to_stan(docstring_linker) if isinstance(_token, str) else _token,
- TokenType.UNKNOWN: lambda _token: get_parser_by_name('restructuredtext')(_token, warnings).to_stan(docstring_linker) if isinstance(_token, str) else _token,
- TokenType.OBJ: lambda _token: _token, # These convertions (OBJ and DELIMITER) are done in _convert_obj_tokens_to_stan().
- TokenType.DELIMITER: lambda _token: _token,
- TokenType.ANY: lambda _token: _token,
+ TokenType.REFERENCE: lambda _token: (
+ get_parser_by_name('restructuredtext')(_token, warnings).to_stan(docstring_linker)
+ if isinstance(_token, str)
+ else _token
+ ),
+ TokenType.UNKNOWN: lambda _token: (
+ get_parser_by_name('restructuredtext')(_token, warnings).to_stan(docstring_linker)
+ if isinstance(_token, str)
+ else _token
+ ),
+ TokenType.OBJ: lambda _token: _token, # These convertions (OBJ and DELIMITER) are done in _convert_obj_tokens_to_stan().
+ TokenType.DELIMITER: lambda _token: _token,
+ TokenType.ANY: lambda _token: _token,
}
for w in warnings:
diff --git a/pydoctor/epydoc/markup/epytext.py b/pydoctor/epydoc/markup/epytext.py
index 414d8c75d..df10ffdb9 100644
--- a/pydoctor/epydoc/markup/epytext.py
+++ b/pydoctor/epydoc/markup/epytext.py
@@ -146,6 +146,7 @@
## Helper functions
##################################################
+
def gettext(node: Union[str, 'Element', List[Union[str, 'Element']]]) -> List[str]:
"""Return the text inside the epytext element(s)."""
filtered: List[str] = []
@@ -158,7 +159,8 @@ def gettext(node: Union[str, 'Element', List[Union[str, 'Element']]]) -> List[st
filtered.extend(gettext(node.children))
return filtered
-def slugify(string:str) -> str:
+
+def slugify(string: str) -> str:
# zacharyvoase/slugify is licensed under the The Unlicense
"""
A generic slugifier utility (currently only for Latin-based scripts).
@@ -166,18 +168,21 @@ def slugify(string:str) -> str:
>>> slugify("Héllo Wörld")
"hello-world"
"""
- return re.sub(r'[-\s]+', '-',
- re.sub(rb'[^\w\s-]', b'',
- unicodedata.normalize('NFKD', string)
- .encode('ascii', 'ignore'))
- .strip()
- .lower()
- .decode())
+ return re.sub(
+ r'[-\s]+',
+ '-',
+ re.sub(rb'[^\w\s-]', b'', unicodedata.normalize('NFKD', string).encode('ascii', 'ignore'))
+ .strip()
+ .lower()
+ .decode(),
+ )
+
##################################################
## DOM-Like Encoding
##################################################
+
class Element:
"""
A very simple DOM-like representation for parsed epytext
@@ -186,6 +191,7 @@ class Element:
node is marked by a L{tag} and zero or more attributes, L{attribs}. Each
attribute is a mapping from a string key to a string value.
"""
+
def __init__(self, tag: str, *children: Union[str, 'Element'], **attribs: Any):
self.tag = tag
"""A string tag indicating the type of this element."""
@@ -212,6 +218,7 @@ def __repr__(self) -> str:
args = ''.join(f', {c!r}' for c in self.children)
return f'Element({self.tag}{args}{attribs})'
+
##################################################
## Constants
##################################################
@@ -221,44 +228,124 @@ def __repr__(self) -> str:
_HEADING_CHARS = '=-~'
# Escape codes. These should be needed very rarely.
-_ESCAPES = {'lb':'{', 'rb': '}'}
+_ESCAPES = {'lb': '{', 'rb': '}'}
# Symbols. These can be generated via S{...} escapes.
SYMBOLS = [
# Arrows
- '<-', '->', '^', 'v',
-
+ '<-',
+ '->',
+ '^',
+ 'v',
# Greek letters
- 'alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta',
- 'eta', 'theta', 'iota', 'kappa', 'lambda', 'mu',
- 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma',
- 'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega',
- 'Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta',
- 'Eta', 'Theta', 'Iota', 'Kappa', 'Lambda', 'Mu',
- 'Nu', 'Xi', 'Omicron', 'Pi', 'Rho', 'Sigma',
- 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega',
-
+ 'alpha',
+ 'beta',
+ 'gamma',
+ 'delta',
+ 'epsilon',
+ 'zeta',
+ 'eta',
+ 'theta',
+ 'iota',
+ 'kappa',
+ 'lambda',
+ 'mu',
+ 'nu',
+ 'xi',
+ 'omicron',
+ 'pi',
+ 'rho',
+ 'sigma',
+ 'tau',
+ 'upsilon',
+ 'phi',
+ 'chi',
+ 'psi',
+ 'omega',
+ 'Alpha',
+ 'Beta',
+ 'Gamma',
+ 'Delta',
+ 'Epsilon',
+ 'Zeta',
+ 'Eta',
+ 'Theta',
+ 'Iota',
+ 'Kappa',
+ 'Lambda',
+ 'Mu',
+ 'Nu',
+ 'Xi',
+ 'Omicron',
+ 'Pi',
+ 'Rho',
+ 'Sigma',
+ 'Tau',
+ 'Upsilon',
+ 'Phi',
+ 'Chi',
+ 'Psi',
+ 'Omega',
# HTML character entities
- 'larr', 'rarr', 'uarr', 'darr', 'harr', 'crarr',
- 'lArr', 'rArr', 'uArr', 'dArr', 'hArr',
- 'copy', 'times', 'forall', 'exist', 'part',
- 'empty', 'isin', 'notin', 'ni', 'prod', 'sum',
- 'prop', 'infin', 'ang', 'and', 'or', 'cap', 'cup',
- 'int', 'there4', 'sim', 'cong', 'asymp', 'ne',
- 'equiv', 'le', 'ge', 'sub', 'sup', 'nsub',
- 'sube', 'supe', 'oplus', 'otimes', 'perp',
-
+ 'larr',
+ 'rarr',
+ 'uarr',
+ 'darr',
+ 'harr',
+ 'crarr',
+ 'lArr',
+ 'rArr',
+ 'uArr',
+ 'dArr',
+ 'hArr',
+ 'copy',
+ 'times',
+ 'forall',
+ 'exist',
+ 'part',
+ 'empty',
+ 'isin',
+ 'notin',
+ 'ni',
+ 'prod',
+ 'sum',
+ 'prop',
+ 'infin',
+ 'ang',
+ 'and',
+ 'or',
+ 'cap',
+ 'cup',
+ 'int',
+ 'there4',
+ 'sim',
+ 'cong',
+ 'asymp',
+ 'ne',
+ 'equiv',
+ 'le',
+ 'ge',
+ 'sub',
+ 'sup',
+ 'nsub',
+ 'sube',
+ 'supe',
+ 'oplus',
+ 'otimes',
+ 'perp',
# Alternate (long) names
- 'infinity', 'integral', 'product',
- '>=', '<=',
- ]
+ 'infinity',
+ 'integral',
+ 'product',
+ '>=',
+ '<=',
+]
# Convert to a set, for quick lookup
_SYMBOLS = set(SYMBOLS)
# Add symbols to the docstring.
symblist = ' '
-symblist += ';\n '.join(' - C{E{S}{%s}}=S{%s}' % (symbol, symbol)
- for symbol in SYMBOLS)
+symblist += ';\n '.join(' - C{E{S}{%s}}=S{%s}' % (symbol, symbol) for symbol in SYMBOLS)
__doc__ = __doc__.replace('<<>>', symblist)
del symblist
@@ -269,10 +356,10 @@ def __repr__(self) -> str:
'I': 'italic',
'B': 'bold',
'U': 'uri',
- 'L': 'link', # A Python identifier that should be linked to
- 'E': 'escape', # escapes characters or creates symbols
+ 'L': 'link', # A Python identifier that should be linked to
+ 'E': 'escape', # escapes characters or creates symbols
'S': 'symbol',
- }
+}
# Which tags can use "link syntax" (e.g., U{Python})?
_LINK_COLORIZING_TAGS = ['link', 'uri']
@@ -281,6 +368,7 @@ def __repr__(self) -> str:
## Structuring (Top Level)
##################################################
+
def parse(text: str, errors: List[ParseError]) -> Optional[Element]:
"""
Return a DOM tree encoding the contents of an epytext string. Any
@@ -296,7 +384,7 @@ def parse(text: str, errors: List[ParseError]) -> Optional[Element]:
accumulator was provided.
@raise ParseError: If C{errors} is C{None} and an error is
encountered while parsing.
- """
+ """
# Preprocess the string.
text = re.sub('\015\012', '\012', text)
text = text.expandtabs()
@@ -325,7 +413,7 @@ def parse(text: str, errors: List[ParseError]) -> Optional[Element]:
for token in tokens:
# Uncomment this for debugging:
- #print('%s: %s\n%s: %s\n' %
+ # print('%s: %s\n%s: %s\n' %
# (''.join('%-11s' % (t and t.tag) for t in stack),
# token.tag, ''.join('%-11s' % i for i in indent_stack),
# token.indent))
@@ -360,8 +448,7 @@ def parse(text: str, errors: List[ParseError]) -> Optional[Element]:
encountered_field = True
elif encountered_field:
if len(stack) <= 3:
- estr = ("Fields must be the final elements in an "+
- "epytext string.")
+ estr = "Fields must be the final elements in an " + "epytext string."
errors.append(StructuringError(estr, token.startline))
# If there was an error, then signal it!
@@ -373,11 +460,8 @@ def parse(text: str, errors: List[ParseError]) -> Optional[Element]:
# Return the top-level epytext DOM element.
return doc
-def _pop_completed_blocks(
- token: 'Token',
- stack: List[Element],
- indent_stack: List[Optional[int]]
- ) -> None:
+
+def _pop_completed_blocks(token: 'Token', stack: List[Element], indent_stack: List[Optional[int]]) -> None:
"""
Pop any completed blocks off the stack. This includes any
blocks that we have dedented past, as well as any list item
@@ -387,7 +471,7 @@ def _pop_completed_blocks(
"""
indent = token.indent
if indent is not None:
- while (len(stack) > 2):
+ while len(stack) > 2:
pop = False
# Dedent past a block
@@ -398,25 +482,23 @@ def _pop_completed_blocks(
# Dedent to a list item, if it is follwed by another list
# item with the same indentation.
- elif (token.tag == 'bullet' and indent==indent_stack[-2] and
- stack[-1].tag in ('li', 'field')): pop = True
+ elif token.tag == 'bullet' and indent == indent_stack[-2] and stack[-1].tag in ('li', 'field'):
+ pop = True
# End of a list (no more list items available)
- elif (stack[-1].tag in ('ulist', 'olist') and
- (token.tag != 'bullet' or token.contents[-1] == ':')):
+ elif stack[-1].tag in ('ulist', 'olist') and (token.tag != 'bullet' or token.contents[-1] == ':'):
pop = True
# Pop the block, if it's complete. Otherwise, we're done.
- if not pop: return
+ if not pop:
+ return
stack.pop()
indent_stack.pop()
+
def _add_para(
- para_token: 'Token',
- stack: List[Element],
- indent_stack: List[Optional[int]],
- errors: List[ParseError]
- ) -> None:
+ para_token: 'Token', stack: List[Element], indent_stack: List[Optional[int]], errors: List[ParseError]
+) -> None:
"""Colorize the given paragraph, and add it to the DOM tree."""
# Check indentation, and update the parent's indentation
# when appropriate.
@@ -430,12 +512,10 @@ def _add_para(
estr = "Improper paragraph indentation."
errors.append(StructuringError(estr, para_token.startline))
+
def _add_section(
- heading_token: 'Token',
- stack: List[Element],
- indent_stack: List[Optional[int]],
- errors: List[ParseError]
- ) -> None:
+ heading_token: 'Token', stack: List[Element], indent_stack: List[Optional[int]], errors: List[ParseError]
+) -> None:
"""Add a new section to the DOM tree, with the given heading."""
if indent_stack[-1] is None:
indent_stack[-1] = heading_token.indent
@@ -469,12 +549,10 @@ def _add_section(
sec.children.append(head)
indent_stack.append(None)
+
def _add_list(
- bullet_token: 'Token',
- stack: List[Element],
- indent_stack: List[Optional[int]],
- errors: List[ParseError]
- ) -> None:
+ bullet_token: 'Token', stack: List[Element], indent_stack: List[Optional[int]], errors: List[ParseError]
+) -> None:
"""
Add a new list item or field to the DOM tree, with the given
bullet or field tag. When necessary, create the associated
@@ -498,8 +576,7 @@ def _add_list(
old_listitem = cast(Element, stack[-1].children[-1])
old_bullet = old_listitem.attribs['bullet'].split('.')[:-1]
new_bullet = bullet_token.contents.split('.')[:-1]
- if (new_bullet[:-1] != old_bullet[:-1] or
- int(new_bullet[-1]) != int(old_bullet[-1])+1):
+ if new_bullet[:-1] != old_bullet[:-1] or int(new_bullet[-1]) != int(old_bullet[-1]) + 1:
newlist = True
# Create the new list.
@@ -517,8 +594,7 @@ def _add_list(
stack.pop()
indent_stack.pop()
- if (list_type != 'fieldlist' and indent_stack[-1] is not None and
- bullet_token.indent == indent_stack[-1]):
+ if list_type != 'fieldlist' and indent_stack[-1] is not None and bullet_token.indent == indent_stack[-1]:
# Ignore this error if there's text on the same line as
# the comment-opening quote -- epydoc can't reliably
# determine the indentation for that line.
@@ -531,8 +607,7 @@ def _add_list(
for tok in stack[2:]:
if tok.tag != 'section':
estr = "Fields must be at the top level."
- errors.append(
- StructuringError(estr, bullet_token.startline))
+ errors.append(StructuringError(estr, bullet_token.startline))
break
stack[2:] = []
indent_stack[2:] = []
@@ -572,10 +647,12 @@ def _add_list(
stack.append(li)
indent_stack.append(None)
+
##################################################
## Tokenization
##################################################
+
class Token:
"""
C{Token}s are an intermediate data structure used while
@@ -636,6 +713,7 @@ class Token:
value is also used for field tag C{Token}s, since fields
function syntactically the same as list items.
"""
+
# The possible token types.
PARA = 'para'
LBLOCK = 'literalblock'
@@ -643,13 +721,7 @@ class Token:
HEADING = 'heading'
BULLET = 'bullet'
- def __init__(self,
- tag: str,
- startline: int,
- contents: str,
- indent: Optional[int],
- level: Optional[int] = None
- ):
+ def __init__(self, tag: str, startline: int, contents: str, indent: Optional[int], level: Optional[int] = None):
"""
Create a new C{Token}.
@@ -685,26 +757,22 @@ def to_dom(self) -> Element:
e.children.append(self.contents)
return e
+
# Construct regular expressions for recognizing bullets. These are
# global so they don't have to be reconstructed each time we tokenize
# a docstring.
_ULIST_BULLET = r'[-]( +|$)'
_OLIST_BULLET = r'(\d+[.])+( +|$)'
_FIELD_BULLET = r'@\w+( [^{}:\n]+)?:'
-_BULLET_RE = re.compile(_ULIST_BULLET + '|' +
- _OLIST_BULLET + '|' +
- _FIELD_BULLET)
+_BULLET_RE = re.compile(_ULIST_BULLET + '|' + _OLIST_BULLET + '|' + _FIELD_BULLET)
_LIST_BULLET_RE = re.compile(_ULIST_BULLET + '|' + _OLIST_BULLET)
_FIELD_BULLET_RE = re.compile(_FIELD_BULLET)
del _ULIST_BULLET, _OLIST_BULLET, _FIELD_BULLET
+
def _tokenize_doctest(
- lines: List[str],
- start: int,
- block_indent: int,
- tokens: List[Token],
- errors: List[ParseError]
- ) -> int:
+ lines: List[str], start: int, block_indent: int, tokens: List[Token], errors: List[ParseError]
+) -> int:
"""
Construct a L{Token} containing the doctest block starting at
C{lines[start]}, and append it to C{tokens}. C{block_indent}
@@ -735,7 +803,8 @@ def _tokenize_doctest(
indent = len(line) - len(line.lstrip())
# A blank line ends doctest block.
- if indent == len(line): break
+ if indent == len(line):
+ break
# A Dedent past block_indent is an error.
if indent < block_indent:
@@ -751,13 +820,10 @@ def _tokenize_doctest(
tokens.append(Token(Token.DTBLOCK, start, contents, block_indent))
return linenum
+
def _tokenize_literal(
- lines: List[str],
- start: int,
- block_indent: int,
- tokens: List[Token],
- errors: List[ParseError]
- ) -> int:
+ lines: List[str], start: int, block_indent: int, tokens: List[Token], errors: List[ParseError]
+) -> int:
"""
Construct a L{Token} containing the literal block starting at
C{lines[start]}, and append it to C{tokens}. C{block_indent}
@@ -796,13 +862,10 @@ def _tokenize_literal(
tokens.append(Token(Token.LBLOCK, start, contents, block_indent))
return linenum
+
def _tokenize_listart(
- lines: List[str],
- start: int,
- bullet_indent: int,
- tokens: List[Token],
- errors: List[ParseError]
- ) -> int:
+ lines: List[str], start: int, bullet_indent: int, tokens: List[Token], errors: List[ParseError]
+) -> int:
"""
Construct L{Token}s for the bullet and the first paragraph of the
list item (or field) starting at C{lines[start]}, and append them
@@ -829,7 +892,7 @@ def _tokenize_listart(
match = _BULLET_RE.match(lines[start], bullet_indent)
assert match is not None
para_start = match.end()
- bcontents = lines[start][bullet_indent : para_start].strip()
+ bcontents = lines[start][bullet_indent:para_start].strip()
while linenum < len(lines):
# Find the indentation of this line.
@@ -837,24 +900,31 @@ def _tokenize_listart(
indent = len(line) - len(line.lstrip())
# "::" markers end paragraphs.
- if doublecolon: break
- if line.rstrip()[-2:] == '::': doublecolon = True
+ if doublecolon:
+ break
+ if line.rstrip()[-2:] == '::':
+ doublecolon = True
# A blank line ends the token
- if indent == len(line): break
+ if indent == len(line):
+ break
# Dedenting past bullet_indent ends the list item.
- if indent < bullet_indent: break
+ if indent < bullet_indent:
+ break
# A line beginning with a bullet ends the token.
- if _BULLET_RE.match(line, indent): break
+ if _BULLET_RE.match(line, indent):
+ break
# If this is the second line, set the paragraph indentation, or
# end the token, as appropriate.
- if para_indent is None: para_indent = indent
+ if para_indent is None:
+ para_indent = indent
# A change in indentation ends the token
- if indent != para_indent: break
+ if indent != para_indent:
+ break
# Go on to the next line.
linenum += 1
@@ -864,22 +934,18 @@ def _tokenize_listart(
# Add the paragraph token.
pcontents = ' '.join(
- [lines[start][para_start:].strip()] +
- [ln.strip() for ln in lines[start+1:linenum]]
- ).strip()
+ [lines[start][para_start:].strip()] + [ln.strip() for ln in lines[start + 1 : linenum]]
+ ).strip()
if pcontents:
tokens.append(Token(Token.PARA, start, pcontents, para_indent))
# Return the linenum after the paragraph token ends.
return linenum
+
def _tokenize_para(
- lines: List[str],
- start: int,
- para_indent: int,
- tokens: List[Token],
- errors: List[ParseError]
- ) -> int:
+ lines: List[str], start: int, para_indent: int, tokens: List[Token], errors: List[ParseError]
+) -> int:
"""
Construct a L{Token} containing the paragraph starting at
C{lines[start]}, and append it to C{tokens}. C{para_indent}
@@ -906,17 +972,22 @@ def _tokenize_para(
indent = len(line) - len(line.lstrip())
# "::" markers end paragraphs.
- if doublecolon: break
- if line.rstrip()[-2:] == '::': doublecolon = True
+ if doublecolon:
+ break
+ if line.rstrip()[-2:] == '::':
+ doublecolon = True
# Blank lines end paragraphs
- if indent == len(line): break
+ if indent == len(line):
+ break
# Indentation changes end paragraphs
- if indent != para_indent: break
+ if indent != para_indent:
+ break
# List bullets end paragraphs
- if _BULLET_RE.match(line, indent): break
+ if _BULLET_RE.match(line, indent):
+ break
# Check for mal-formatted field items.
if line[indent] == '@':
@@ -929,9 +1000,7 @@ def _tokenize_para(
contents = [ln.strip() for ln in lines[start:linenum]]
# Does this token look like a heading?
- if ((len(contents) < 2) or
- (contents[1][0] not in _HEADING_CHARS) or
- (abs(len(contents[0])-len(contents[1])) > 5)):
+ if (len(contents) < 2) or (contents[1][0] not in _HEADING_CHARS) or (abs(len(contents[0]) - len(contents[1])) > 5):
looks_like_heading = False
else:
looks_like_heading = True
@@ -942,20 +1011,22 @@ def _tokenize_para(
if looks_like_heading:
if len(contents[0]) != len(contents[1]):
- estr = ("Possible heading typo: the number of "+
- "underline characters must match the "+
- "number of heading characters.")
+ estr = (
+ "Possible heading typo: the number of "
+ + "underline characters must match the "
+ + "number of heading characters."
+ )
errors.append(TokenizationError(estr, start, is_fatal=False))
else:
level = _HEADING_CHARS.index(contents[1][0])
- tokens.append(Token(Token.HEADING, start,
- contents[0], para_indent, level))
- return start+2
+ tokens.append(Token(Token.HEADING, start, contents[0], para_indent, level))
+ return start + 2
# Add the paragraph token, and return the linenum after it ends.
tokens.append(Token(Token.PARA, start, ' '.join(contents), para_indent))
return linenum
+
def _tokenize(text: str, errors: List[ParseError]) -> List[Token]:
"""
Split a given formatted docstring into an ordered list of
@@ -976,20 +1047,18 @@ def _tokenize(text: str, errors: List[ParseError]) -> List[Token]:
while linenum < len(lines):
# Get the current line and its indentation.
line = lines[linenum]
- indent = len(line)-len(line.lstrip())
+ indent = len(line) - len(line.lstrip())
if indent == len(line):
# Ignore blank lines.
linenum += 1
continue
- elif line[indent:indent+4] == '>>> ':
+ elif line[indent : indent + 4] == '>>> ':
# blocks starting with ">>> " are doctest block tokens.
- linenum = _tokenize_doctest(lines, linenum, indent,
- tokens, errors)
+ linenum = _tokenize_doctest(lines, linenum, indent, tokens, errors)
elif _BULLET_RE.match(line, indent):
# blocks starting with a bullet are LI start tokens.
- linenum = _tokenize_listart(lines, linenum, indent,
- tokens, errors)
+ linenum = _tokenize_listart(lines, linenum, indent, tokens, errors)
if tokens[-1].indent is not None:
indent = tokens[-1].indent
else:
@@ -1002,8 +1071,7 @@ def _tokenize(text: str, errors: List[ParseError]) -> List[Token]:
linenum = _tokenize_para(lines, linenum, indent, tokens, errors)
# Paragraph tokens ending in '::' initiate literal blocks.
- if (tokens[-1].tag == Token.PARA and
- tokens[-1].contents[-2:] == '::'):
+ if tokens[-1].tag == Token.PARA and tokens[-1].contents[-2:] == '::':
tokens[-1].contents = tokens[-1].contents[:-1]
linenum = _tokenize_literal(lines, linenum, indent, tokens, errors)
@@ -1018,6 +1086,7 @@ def _tokenize(text: str, errors: List[ParseError]) -> List[Token]:
_BRACE_RE = re.compile(r'{|}')
_TARGET_RE = re.compile(r'^(.*?)\s*<(?:URI:|URL:)?([^<>]+)>$')
+
def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') -> Element:
"""
Given a string containing the contents of a paragraph, produce a
@@ -1056,7 +1125,8 @@ def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') ->
start = 0
while 1:
match = _BRACE_RE.search(text, start)
- if match is None: break
+ if match is None:
+ break
end = match.start()
# Open braces start new colorizing elements. When preceeded
@@ -1066,15 +1136,15 @@ def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') ->
# and convert them to literal braces once we find the matching
# close-brace.
if match.group() == '{':
- if (end>0) and 'A' <= text[end-1] <= 'Z':
- if (end-1) > start:
- stack[-1].children.append(text[start:end-1])
- if text[end-1] not in _COLORIZING_TAGS:
+ if (end > 0) and 'A' <= text[end - 1] <= 'Z':
+ if (end - 1) > start:
+ stack[-1].children.append(text[start : end - 1])
+ if text[end - 1] not in _COLORIZING_TAGS:
estr = "Unknown inline markup tag."
- errors.append(ColorizingError(estr, token, end-1))
+ errors.append(ColorizingError(estr, token, end - 1))
stack.append(Element('unknown'))
else:
- tag = _COLORIZING_TAGS[text[end-1]]
+ tag = _COLORIZING_TAGS[text[end - 1]]
stack.append(Element(tag))
else:
if end > start:
@@ -1098,8 +1168,7 @@ def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') ->
# Special handling for symbols:
if stack[-1].tag == 'symbol':
- if (len(stack[-1].children) != 1 or
- not isinstance(stack[-1].children[0], str)):
+ if len(stack[-1].children) != 1 or not isinstance(stack[-1].children[0], str):
estr = "Invalid symbol code."
errors.append(ColorizingError(estr, token, end))
else:
@@ -1113,8 +1182,7 @@ def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') ->
# Special handling for escape elements:
if stack[-1].tag == 'escape':
- if (len(stack[-1].children) != 1 or
- not isinstance(stack[-1].children[0], str)):
+ if len(stack[-1].children) != 1 or not isinstance(stack[-1].children[0], str):
estr = "Invalid escape code."
errors.append(ColorizingError(estr, token, end))
else:
@@ -1141,7 +1209,7 @@ def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') ->
openbrace_stack.pop()
stack.pop()
- start = end+1
+ start = end + 1
# Add any final text.
if start < len(text):
@@ -1153,11 +1221,12 @@ def _colorize(token: Token, errors: List[ParseError], tagName: str = 'para') ->
return stack[0]
+
def _colorize_link(link: Element, token: Token, end: int, errors: List[ParseError]) -> None:
variables = link.children[:]
# If the last child isn't text, we know it's bad.
- if len(variables)==0 or not isinstance(variables[-1], str):
+ if len(variables) == 0 or not isinstance(variables[-1], str):
estr = f"Bad {link.tag} target."
errors.append(ColorizingError(estr, token, end))
return
@@ -1181,13 +1250,13 @@ def _colorize_link(link: Element, token: Token, end: int, errors: List[ParseErro
# Clean up the target. For URIs, assume http or mailto if they
# don't specify (no relative urls)
target = re.sub(r'\s', '', target)
- if link.tag=='uri':
+ if link.tag == 'uri':
if not re.match(r'\w+:', target):
if re.match(r'\w+@(\w+)(\.\w+)*', target):
target = 'mailto:' + target
else:
- target = 'http://'+target
- elif link.tag=='link':
+ target = 'http://' + target
+ elif link.tag == 'link':
# Remove arg lists for functions (e.g., L{_colorize_link()})
target = re.sub(r'\(.*\)$', '', target)
if not re.match(r'^[a-zA-Z_]\w*(\.[a-zA-Z_]\w*)*$', target):
@@ -1201,26 +1270,31 @@ def _colorize_link(link: Element, token: Token, end: int, errors: List[ParseErro
# Add them to the link element.
link.children = [name_elt, target_elt]
+
##################################################
## Parse Errors
##################################################
+
class TokenizationError(ParseError):
"""
An error generated while tokenizing a formatted documentation
string.
"""
+
class StructuringError(ParseError):
"""
An error generated while structuring a formatted documentation
string.
"""
+
class ColorizingError(ParseError):
"""
An error generated while colorizing a paragraph.
"""
+
def __init__(self, descr: str, token: Token, charnum: int, is_fatal: bool = True):
"""
Construct a new colorizing exception.
@@ -1235,23 +1309,25 @@ def __init__(self, descr: str, token: Token, charnum: int, is_fatal: bool = True
self.charnum = charnum
CONTEXT_RANGE = 20
+
def descr(self) -> str:
RANGE = self.CONTEXT_RANGE
if self.charnum <= RANGE:
- left = self.token.contents[0:self.charnum]
+ left = self.token.contents[0 : self.charnum]
else:
- left = '...'+self.token.contents[self.charnum-RANGE:self.charnum]
- if (len(self.token.contents)-self.charnum) <= RANGE:
- right = self.token.contents[self.charnum:]
+ left = '...' + self.token.contents[self.charnum - RANGE : self.charnum]
+ if (len(self.token.contents) - self.charnum) <= RANGE:
+ right = self.token.contents[self.charnum :]
else:
- right = (self.token.contents[self.charnum:self.charnum+RANGE]
- + '...')
+ right = self.token.contents[self.charnum : self.charnum + RANGE] + '...'
return f"{self._descr}\n\n{left}{right}\n{' '*len(left)}^"
+
#################################################################
## SUPPORT FOR EPYDOC
#################################################################
+
def parse_docstring(docstring: str, errors: List[ParseError]) -> ParsedDocstring:
"""
Parse the given docstring, which is formatted using epytext; and
@@ -1279,8 +1355,7 @@ def parse_docstring(docstring: str, errors: List[ParseError]) -> ParsedDocstring
# Get the argument.
if field.children and cast(Element, field.children[0]).tag == 'arg':
- arg: Optional[str] = \
- cast(str, cast(Element, field.children.pop(0)).children[0])
+ arg: Optional[str] = cast(str, cast(Element, field.children.pop(0)).children[0])
else:
arg = None
@@ -1296,57 +1371,124 @@ def parse_docstring(docstring: str, errors: List[ParseError]) -> ParsedDocstring
else:
return ParsedEpytextDocstring(None, fields)
+
def get_parser(_: ObjClass | None) -> ParserFunction:
"""
- Get the L{parse_docstring} function.
+ Get the L{parse_docstring} function.
"""
return parse_docstring
+
class ParsedEpytextDocstring(ParsedDocstring):
SYMBOL_TO_CODEPOINT = {
# Symbols
- '<-': 8592, '->': 8594, '^': 8593, 'v': 8595,
-
+ '<-': 8592,
+ '->': 8594,
+ '^': 8593,
+ 'v': 8595,
# Greek letters
- 'alpha': 945, 'beta': 946, 'gamma': 947,
- 'delta': 948, 'epsilon': 949, 'zeta': 950,
- 'eta': 951, 'theta': 952, 'iota': 953,
- 'kappa': 954, 'lambda': 955, 'mu': 956,
- 'nu': 957, 'xi': 958, 'omicron': 959,
- 'pi': 960, 'rho': 961, 'sigma': 963,
- 'tau': 964, 'upsilon': 965, 'phi': 966,
- 'chi': 967, 'psi': 968, 'omega': 969,
- 'Alpha': 913, 'Beta': 914, 'Gamma': 915,
- 'Delta': 916, 'Epsilon': 917, 'Zeta': 918,
- 'Eta': 919, 'Theta': 920, 'Iota': 921,
- 'Kappa': 922, 'Lambda': 923, 'Mu': 924,
- 'Nu': 925, 'Xi': 926, 'Omicron': 927,
- 'Pi': 928, 'Rho': 929, 'Sigma': 931,
- 'Tau': 932, 'Upsilon': 933, 'Phi': 934,
- 'Chi': 935, 'Psi': 936, 'Omega': 937,
-
+ 'alpha': 945,
+ 'beta': 946,
+ 'gamma': 947,
+ 'delta': 948,
+ 'epsilon': 949,
+ 'zeta': 950,
+ 'eta': 951,
+ 'theta': 952,
+ 'iota': 953,
+ 'kappa': 954,
+ 'lambda': 955,
+ 'mu': 956,
+ 'nu': 957,
+ 'xi': 958,
+ 'omicron': 959,
+ 'pi': 960,
+ 'rho': 961,
+ 'sigma': 963,
+ 'tau': 964,
+ 'upsilon': 965,
+ 'phi': 966,
+ 'chi': 967,
+ 'psi': 968,
+ 'omega': 969,
+ 'Alpha': 913,
+ 'Beta': 914,
+ 'Gamma': 915,
+ 'Delta': 916,
+ 'Epsilon': 917,
+ 'Zeta': 918,
+ 'Eta': 919,
+ 'Theta': 920,
+ 'Iota': 921,
+ 'Kappa': 922,
+ 'Lambda': 923,
+ 'Mu': 924,
+ 'Nu': 925,
+ 'Xi': 926,
+ 'Omicron': 927,
+ 'Pi': 928,
+ 'Rho': 929,
+ 'Sigma': 931,
+ 'Tau': 932,
+ 'Upsilon': 933,
+ 'Phi': 934,
+ 'Chi': 935,
+ 'Psi': 936,
+ 'Omega': 937,
# HTML character entities
- 'larr': 8592, 'rarr': 8594, 'uarr': 8593,
- 'darr': 8595, 'harr': 8596, 'crarr': 8629,
- 'lArr': 8656, 'rArr': 8658, 'uArr': 8657,
- 'dArr': 8659, 'hArr': 8660,
- 'copy': 169, 'times': 215, 'forall': 8704,
- 'exist': 8707, 'part': 8706,
- 'empty': 8709, 'isin': 8712, 'notin': 8713,
- 'ni': 8715, 'prod': 8719, 'sum': 8721,
- 'prop': 8733, 'infin': 8734, 'ang': 8736,
- 'and': 8743, 'or': 8744, 'cap': 8745, 'cup': 8746,
- 'int': 8747, 'there4': 8756, 'sim': 8764,
- 'cong': 8773, 'asymp': 8776, 'ne': 8800,
- 'equiv': 8801, 'le': 8804, 'ge': 8805,
- 'sub': 8834, 'sup': 8835, 'nsub': 8836,
- 'sube': 8838, 'supe': 8839, 'oplus': 8853,
- 'otimes': 8855, 'perp': 8869,
-
+ 'larr': 8592,
+ 'rarr': 8594,
+ 'uarr': 8593,
+ 'darr': 8595,
+ 'harr': 8596,
+ 'crarr': 8629,
+ 'lArr': 8656,
+ 'rArr': 8658,
+ 'uArr': 8657,
+ 'dArr': 8659,
+ 'hArr': 8660,
+ 'copy': 169,
+ 'times': 215,
+ 'forall': 8704,
+ 'exist': 8707,
+ 'part': 8706,
+ 'empty': 8709,
+ 'isin': 8712,
+ 'notin': 8713,
+ 'ni': 8715,
+ 'prod': 8719,
+ 'sum': 8721,
+ 'prop': 8733,
+ 'infin': 8734,
+ 'ang': 8736,
+ 'and': 8743,
+ 'or': 8744,
+ 'cap': 8745,
+ 'cup': 8746,
+ 'int': 8747,
+ 'there4': 8756,
+ 'sim': 8764,
+ 'cong': 8773,
+ 'asymp': 8776,
+ 'ne': 8800,
+ 'equiv': 8801,
+ 'le': 8804,
+ 'ge': 8805,
+ 'sub': 8834,
+ 'sup': 8835,
+ 'nsub': 8836,
+ 'sube': 8838,
+ 'supe': 8839,
+ 'oplus': 8853,
+ 'otimes': 8855,
+ 'perp': 8869,
# Alternate (long) names
- 'infinity': 8734, 'integral': 8747, 'product': 8719,
- '<=': 8804, '>=': 8805,
- }
+ 'infinity': 8734,
+ 'integral': 8747,
+ 'product': 8719,
+ '<=': 8804,
+ '>=': 8805,
+ }
def __init__(self, body: Optional[Element], fields: Sequence['Field']):
ParsedDocstring.__init__(self, fields)
@@ -1363,14 +1505,14 @@ def __str__(self) -> str:
def has_body(self) -> bool:
return self._tree is not None
- def _slugify(self, text:str) -> str:
- # Takes special care to ensure we don't generate
+ def _slugify(self, text: str) -> str:
+ # Takes special care to ensure we don't generate
# twice the same ID for sections.
s = slugify(text)
i = 1
while s in self._section_slugs:
s = slugify(f"{text}-{i}")
- i+=1
+ i += 1
self._section_slugs.add(s)
return s
@@ -1382,15 +1524,15 @@ def to_node(self) -> nodes.document:
self._document = new_document('epytext')
if self._tree is not None:
- node, = self._to_node(self._tree)
- # The contents is encapsulated inside a section node.
- # Reparent the contents of the second level to the root level.
+ (node,) = self._to_node(self._tree)
+ # The contents is encapsulated inside a section node.
+ # Reparent the contents of the second level to the root level.
self._document = set_node_attributes(self._document, children=node.children)
-
+
return self._document
-
+
def _to_node(self, tree: Element) -> Iterable[nodes.Node]:
-
+
# Process the children first.
variables: List[nodes.Node] = []
for child in tree.children:
@@ -1408,22 +1550,27 @@ def _to_node(self, tree: Element) -> Iterable[nodes.Node]:
yield set_node_attributes(nodes.literal('', ''), document=self._document, children=variables)
elif tree.tag == 'uri':
label, target = variables
- yield set_node_attributes(nodes.reference(
- '', internal=False, refuri=target), document=self._document, children=label.children)
+ yield set_node_attributes(
+ nodes.reference('', internal=False, refuri=target), document=self._document, children=label.children
+ )
elif tree.tag == 'link':
label, target = variables
assert isinstance(target, nodes.Text)
assert isinstance(label, nodes.inline)
- # Figure the line number to warn on precise lines.
+ # Figure the line number to warn on precise lines.
# This is needed only for links currently.
lineno = int(cast(Element, tree.children[1]).attribs['lineno'])
- yield set_node_attributes(nodes.title_reference(
- '', '', refuri=target.astext()), document=self._document, lineno=lineno, children=label.children)
- elif tree.tag == 'name':
+ yield set_node_attributes(
+ nodes.title_reference('', '', refuri=target.astext()),
+ document=self._document,
+ lineno=lineno,
+ children=label.children,
+ )
+ elif tree.tag == 'name':
# name can contain nested inline markup, so we use nodes.inline instead of nodes.Text
yield set_node_attributes(nodes.inline('', ''), document=self._document, children=variables)
elif tree.tag == 'target':
- value, = variables
+ (value,) = variables
if not isinstance(value, nodes.Text):
raise AssertionError("target contents must be a simple text.")
yield set_node_attributes(value, document=self._document)
@@ -1446,15 +1593,18 @@ def _to_node(self, tree: Element) -> Iterable[nodes.Node]:
elif tree.tag == 'literalblock':
yield set_node_attributes(nodes.literal_block('', ''), document=self._document, children=variables)
elif tree.tag == 'doctestblock':
- if not isinstance(contents:=tree.children[0], str):
+ if not isinstance(contents := tree.children[0], str):
raise AssertionError("doctest block contents is not a string")
yield set_node_attributes(nodes.doctest_block(contents, contents), document=self._document)
elif tree.tag in ('fieldlist', 'tag', 'arg'):
raise AssertionError("There should not be any field lists left")
elif tree.tag == 'section':
- assert len(tree.children)>0, f"empty section {tree}"
- yield set_node_attributes(nodes.section('', ids=[self._slugify(' '.join(gettext(tree.children[0])))]),
- document=self._document, children=variables)
+ assert len(tree.children) > 0, f"empty section {tree}"
+ yield set_node_attributes(
+ nodes.section('', ids=[self._slugify(' '.join(gettext(tree.children[0])))]),
+ document=self._document,
+ children=variables,
+ )
elif tree.tag == 'epytext':
yield set_node_attributes(nodes.section(''), document=self._document, children=variables)
elif tree.tag == 'symbol':
diff --git a/pydoctor/epydoc/markup/google.py b/pydoctor/epydoc/markup/google.py
index 41a55a438..7eb61474d 100644
--- a/pydoctor/epydoc/markup/google.py
+++ b/pydoctor/epydoc/markup/google.py
@@ -4,6 +4,7 @@
@See: L{pydoctor.epydoc.markup.numpy}
@See: L{pydoctor.epydoc.markup._napoleon}
"""
+
from __future__ import annotations
from pydoctor.epydoc.markup import ObjClass, ParserFunction
diff --git a/pydoctor/epydoc/markup/numpy.py b/pydoctor/epydoc/markup/numpy.py
index 6a9fa4001..757f8e1f4 100644
--- a/pydoctor/epydoc/markup/numpy.py
+++ b/pydoctor/epydoc/markup/numpy.py
@@ -4,6 +4,7 @@
@See: L{pydoctor.epydoc.markup.google}
@See: L{pydoctor.epydoc.markup._napoleon}
"""
+
from __future__ import annotations
from pydoctor.epydoc.markup import ObjClass, ParserFunction
diff --git a/pydoctor/epydoc/markup/plaintext.py b/pydoctor/epydoc/markup/plaintext.py
index aefb7fe3f..940c6d0c0 100644
--- a/pydoctor/epydoc/markup/plaintext.py
+++ b/pydoctor/epydoc/markup/plaintext.py
@@ -9,6 +9,7 @@
verbatim output, preserving all whitespace.
"""
from __future__ import annotations
+
__docformat__ = 'epytext en'
from typing import List, Optional
@@ -19,6 +20,7 @@
from pydoctor.epydoc.markup import DocstringLinker, ObjClass, ParsedDocstring, ParseError, ParserFunction
from pydoctor.epydoc.docutils import set_node_attributes, new_document
+
def parse_docstring(docstring: str, errors: List[ParseError]) -> ParsedDocstring:
"""
Parse the given docstring, which is formatted as plain text; and
@@ -30,12 +32,14 @@ def parse_docstring(docstring: str, errors: List[ParseError]) -> ParsedDocstring
"""
return ParsedPlaintextDocstring(docstring)
+
def get_parser(_: ObjClass | None) -> ParserFunction:
"""
- Just return the L{parse_docstring} function.
+ Just return the L{parse_docstring} function.
"""
return parse_docstring
+
class ParsedPlaintextDocstring(ParsedDocstring):
def __init__(self, text: str):
@@ -47,14 +51,14 @@ def __init__(self, text: str):
@property
def has_body(self) -> bool:
return bool(self._text)
-
- # plaintext parser overrides the default to_stan() method for performance and design reasons.
- # We don't want to use docutils to process the plaintext format because we won't
- # actually use the document tree ,it does not contains any additionnalt information compared to the raw docstring.
+
+ # plaintext parser overrides the default to_stan() method for performance and design reasons.
+ # We don't want to use docutils to process the plaintext format because we won't
+ # actually use the document tree ,it does not contains any additionnalt information compared to the raw docstring.
# Also, the consolidated fields handling in restructuredtext.py relies on this "pre" class.
def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
return tags.p(self._text, class_='pre')
-
+
def to_node(self) -> nodes.document:
# This code is mainly used to generate summary of plaintext docstrings.
@@ -65,15 +69,18 @@ def to_node(self) -> nodes.document:
_document = new_document('plaintext')
# split text into paragraphs
- paragraphs = [set_node_attributes(nodes.paragraph('',''), children=[
- set_node_attributes(nodes.Text(p.strip('\n')), document=_document, lineno=0)],
- document=_document, lineno=0)
- for p in self._text.split('\n\n')]
-
+ paragraphs = [
+ set_node_attributes(
+ nodes.paragraph('', ''),
+ children=[set_node_attributes(nodes.Text(p.strip('\n')), document=_document, lineno=0)],
+ document=_document,
+ lineno=0,
+ )
+ for p in self._text.split('\n\n')
+ ]
+
# assemble document
- _document = set_node_attributes(_document,
- children=paragraphs,
- document=_document, lineno=0)
+ _document = set_node_attributes(_document, children=paragraphs, document=_document, lineno=0)
self._document = _document
return self._document
diff --git a/pydoctor/epydoc/markup/restructuredtext.py b/pydoctor/epydoc/markup/restructuredtext.py
index fd3f47784..56dacb7b3 100644
--- a/pydoctor/epydoc/markup/restructuredtext.py
+++ b/pydoctor/epydoc/markup/restructuredtext.py
@@ -39,12 +39,14 @@
the list.
"""
from __future__ import annotations
+
__docformat__ = 'epytext en'
from typing import TYPE_CHECKING, Any, Iterable, List, Optional, Sequence, Set, cast
+
if TYPE_CHECKING:
from typing import TypeAlias
-
+
import re
from docutils import nodes
@@ -73,7 +75,7 @@
'groups': 'group',
'types': 'type',
'keywords': 'keyword',
- }
+}
#: A list of consolidated fields whose bodies may be specified using a
#: definition list, rather than a bulleted list. For these fields, the
@@ -81,9 +83,11 @@
#: a @type field.
CONSOLIDATED_DEFLIST_FIELDS = ['param', 'arg', 'var', 'ivar', 'cvar', 'keyword']
-def parse_docstring(docstring: str,
- errors: List[ParseError],
- ) -> ParsedDocstring:
+
+def parse_docstring(
+ docstring: str,
+ errors: List[ParseError],
+) -> ParsedDocstring:
"""
Parse the given docstring, which is formatted using
ReStructuredText; and return a L{ParsedDocstring} representation
@@ -94,18 +98,18 @@ def parse_docstring(docstring: str,
will be stored.
"""
writer = _DocumentPseudoWriter()
- reader = _EpydocReader(errors) # Outputs errors to the list.
+ reader = _EpydocReader(errors) # Outputs errors to the list.
# Credits: mhils - Maximilian Hils from the pdoc repository https://github.com/mitmproxy/pdoc
# Strip Sphinx interpreted text roles for code references: :obj:`foo` -> `foo`
- docstring = re.sub(
- r"(:py)?:(mod|func|data|const|class|meth|attr|exc|obj):", "", docstring
- )
+ docstring = re.sub(r"(:py)?:(mod|func|data|const|class|meth|attr|exc|obj):", "", docstring)
- publish_string(docstring, writer=writer, reader=reader,
- settings_overrides={'report_level':10000,
- 'halt_level':10000,
- 'warning_stream':None})
+ publish_string(
+ docstring,
+ writer=writer,
+ reader=reader,
+ settings_overrides={'report_level': 10000, 'halt_level': 10000, 'warning_stream': None},
+ )
document = writer.document
visitor = _SplitFieldsTranslator(document, errors)
@@ -113,21 +117,24 @@ def parse_docstring(docstring: str,
return ParsedRstDocstring(document, visitor.fields)
+
def get_parser(_: ObjClass | None) -> ParserFunction:
"""
- Get the L{parse_docstring} function.
+ Get the L{parse_docstring} function.
"""
return parse_docstring
+
class OptimizedReporter(Reporter):
"""A reporter that ignores all debug messages. This is used to
shave a couple seconds off of epydoc's run time, since docutils
isn't very fast about processing its own debug messages.
"""
- def debug(self, *args: Any, **kwargs: Any) -> None: # type:ignore[override]
+ def debug(self, *args: Any, **kwargs: Any) -> None: # type:ignore[override]
pass
+
class ParsedRstDocstring(ParsedDocstring):
"""
An encoded version of a ReStructuredText docstring. The contents
@@ -139,19 +146,13 @@ def __init__(self, document: nodes.document, fields: Sequence[Field]):
self._document = document
"""A ReStructuredText document, encoding the docstring."""
- document.reporter = OptimizedReporter(
- document.reporter.source,
- report_level=10000, halt_level=10000,
- stream='')
+ document.reporter = OptimizedReporter(document.reporter.source, report_level=10000, halt_level=10000, stream='')
ParsedDocstring.__init__(self, fields)
@property
def has_body(self) -> bool:
- return any(
- isinstance(child, nodes.Text) or child.children
- for child in self._document.children
- )
+ return any(isinstance(child, nodes.Text) or child.children for child in self._document.children)
def to_node(self) -> nodes.document:
return self._document
@@ -159,6 +160,7 @@ def to_node(self) -> nodes.document:
def __repr__(self) -> str:
return ''
+
class _EpydocReader(StandaloneReader):
"""
A reader that captures all errors that are generated by parsing,
@@ -172,8 +174,7 @@ def __init__(self, errors: List[ParseError]):
def get_transforms(self) -> List[Transform]:
# Remove the DocInfo transform, to ensure that :author: fields
# are correctly handled.
- return [t for t in StandaloneReader.get_transforms(self)
- if t != frontmatter.DocInfo]
+ return [t for t in StandaloneReader.get_transforms(self) if t != frontmatter.DocInfo]
def new_document(self) -> nodes.document:
document = new_document(self.source.source_path, self.settings)
@@ -192,11 +193,13 @@ def report(self, error: nodes.system_message) -> None:
self._errors.append(ParseError(msg, linenum, is_fatal))
+
if TYPE_CHECKING:
_StrWriter: TypeAlias = Writer[str]
else:
_StrWriter = Writer
+
class _DocumentPseudoWriter(_StrWriter):
"""
A pseudo-writer for the docutils framework, that can be used to
@@ -212,6 +215,7 @@ class _DocumentPseudoWriter(_StrWriter):
def translate(self) -> None:
self.output = ''
+
class _SplitFieldsTranslator(nodes.NodeVisitor):
"""
A docutils translator that removes all fields from a document, and
@@ -247,16 +251,16 @@ def visit_field(self, node: nodes.field) -> None:
# :param str user_agent: user agent
tag = node[0].astext().split(None, 1)
tagname = tag[0]
- if len(tag)>1:
+ if len(tag) > 1:
arg = tag[1]
- else:
+ else:
arg = None
# Handle special fields:
fbody = node[1]
assert isinstance(fbody, nodes.Element)
if arg is None:
- for (list_tag, entry_tag) in CONSOLIDATED_FIELDS.items():
+ for list_tag, entry_tag in CONSOLIDATED_FIELDS.items():
if tagname.lower() == list_tag:
try:
self.handle_consolidated_field(fbody, entry_tag)
@@ -264,27 +268,21 @@ def visit_field(self, node: nodes.field) -> None:
except ValueError as e:
estr = 'Unable to split consolidated field '
estr += f'"{tagname}" - {e}'
- self._errors.append(ParseError(estr, node.line,
- is_fatal=False))
+ self._errors.append(ParseError(estr, node.line, is_fatal=False))
# Use a @newfield to let it be displayed as-is.
if tagname.lower() not in self._newfields:
- newfield = Field('newfield', tagname.lower(),
- ParsedPlaintextDocstring(tagname),
- (node.line or 1) - 1)
+ newfield = Field(
+ 'newfield', tagname.lower(), ParsedPlaintextDocstring(tagname), (node.line or 1) - 1
+ )
self.fields.append(newfield)
self._newfields.add(tagname.lower())
self._add_field(tagname, arg, fbody, node.line)
- def _add_field(self,
- tagname: str,
- arg: Optional[str],
- fbody: Iterable[nodes.Node],
- lineno: int | None
- ) -> None:
+ def _add_field(self, tagname: str, arg: Optional[str], fbody: Iterable[nodes.Node], lineno: int | None) -> None:
field_doc = self.document.copy()
- for child in fbody:
+ for child in fbody:
field_doc.append(child)
field_parsed_doc = ParsedRstDocstring(field_doc, ())
self.fields.append(Field(tagname, arg, field_parsed_doc, (lineno or 1) - 1))
@@ -300,17 +298,15 @@ def handle_consolidated_field(self, body: nodes.Element, tagname: str) -> None:
"""
if len(body) != 1:
raise ValueError('does not contain a single list.')
- if not isinstance(b0:=body[0], nodes.Element):
+ if not isinstance(b0 := body[0], nodes.Element):
# unfornutate assertion required for typing purposes
raise ValueError('does not contain a list.')
if isinstance(b0, nodes.bullet_list):
self.handle_consolidated_bullet_list(b0, tagname)
- elif (isinstance(b0, nodes.definition_list) and
- tagname in CONSOLIDATED_DEFLIST_FIELDS):
+ elif isinstance(b0, nodes.definition_list) and tagname in CONSOLIDATED_DEFLIST_FIELDS:
self.handle_consolidated_definition_list(b0, tagname)
elif tagname in CONSOLIDATED_DEFLIST_FIELDS:
- raise ValueError('does not contain a bulleted list or '
- 'definition list.')
+ raise ValueError('does not contain a bulleted list or ' 'definition list.')
else:
raise ValueError('does not contain a bulleted list.')
@@ -319,19 +315,21 @@ def handle_consolidated_bullet_list(self, items: nodes.bullet_list, tagname: str
# item should have the form:
# - `arg`: description...
n = 0
- _BAD_ITEM = ("list item %d is not well formed. Each item must "
- "consist of a single marked identifier (e.g., `x`), "
- "optionally followed by a colon or dash and a "
- "description.")
+ _BAD_ITEM = (
+ "list item %d is not well formed. Each item must "
+ "consist of a single marked identifier (e.g., `x`), "
+ "optionally followed by a colon or dash and a "
+ "description."
+ )
for item in items:
n += 1
if not isinstance(item, nodes.list_item) or len(item) == 0:
raise ValueError('bad bulleted list (bad child %d).' % n)
- if not isinstance(i0:=item[0], nodes.paragraph):
+ if not isinstance(i0 := item[0], nodes.paragraph):
if isinstance(i0, nodes.definition_list):
- raise ValueError(('list item %d contains a definition '+
- 'list (it\'s probably indented '+
- 'wrong).') % n)
+ raise ValueError(
+ ('list item %d contains a definition ' + 'list (it\'s probably indented ' + 'wrong).') % n
+ )
else:
raise ValueError(_BAD_ITEM % n)
if len(i0) == 0:
@@ -341,9 +339,9 @@ def handle_consolidated_bullet_list(self, items: nodes.bullet_list, tagname: str
# Everything looks good; convert to multiple fields.
for item in items:
- assert isinstance(item, nodes.list_item) # for typing
+ assert isinstance(item, nodes.list_item) # for typing
# Extract the arg, item[0][0] is safe since we checked eariler for malformated list.
- arg = item[0][0].astext() # type: ignore
+ arg = item[0][0].astext() # type: ignore
# Extract the field body, and remove the arg
fbody = cast('list[nodes.Element]', item[:])
@@ -351,8 +349,7 @@ def handle_consolidated_bullet_list(self, items: nodes.bullet_list, tagname: str
fbody[0][:] = cast(nodes.paragraph, item[0])[1:]
# Remove the separating ":", if present
- if (len(fbody[0]) > 0 and
- isinstance(fbody[0][0], nodes.Text)):
+ if len(fbody[0]) > 0 and isinstance(fbody[0][0], nodes.Text):
text = fbody[0][0].astext()
if text[:1] in ':-':
fbody[0][0] = nodes.Text(text[1:].lstrip())
@@ -365,21 +362,27 @@ def handle_consolidated_bullet_list(self, items: nodes.bullet_list, tagname: str
def handle_consolidated_definition_list(self, items: nodes.definition_list, tagname: str) -> None:
# Check the list contents.
n = 0
- _BAD_ITEM = ("item %d is not well formed. Each item's term must "
- "consist of a single marked identifier (e.g., `x`), "
- "optionally followed by a space, colon, space, and "
- "a type description.")
+ _BAD_ITEM = (
+ "item %d is not well formed. Each item's term must "
+ "consist of a single marked identifier (e.g., `x`), "
+ "optionally followed by a space, colon, space, and "
+ "a type description."
+ )
for item in items:
n += 1
- if (not isinstance(item, nodes.definition_list_item) or len(item) < 2 or
- not isinstance(item[-1], nodes.definition) or
- not isinstance(i0:=item[0], nodes.Element)):
+ if (
+ not isinstance(item, nodes.definition_list_item)
+ or len(item) < 2
+ or not isinstance(item[-1], nodes.definition)
+ or not isinstance(i0 := item[0], nodes.Element)
+ ):
raise ValueError('bad definition list (bad child %d).' % n)
if len(item) > 3:
raise ValueError(_BAD_ITEM % n)
- if not ((isinstance(i0[0], nodes.title_reference)) or
- (self.ALLOW_UNMARKED_ARG_IN_CONSOLIDATED_FIELD and
- isinstance(i0[0], nodes.Text))):
+ if not (
+ (isinstance(i0[0], nodes.title_reference))
+ or (self.ALLOW_UNMARKED_ARG_IN_CONSOLIDATED_FIELD and isinstance(i0[0], nodes.Text))
+ ):
raise ValueError(_BAD_ITEM % n)
for child in i0[1:]:
if child.astext() != '':
@@ -387,7 +390,7 @@ def handle_consolidated_definition_list(self, items: nodes.definition_list, tagn
# Extract it.
for item in items:
- assert isinstance(item, nodes.definition_list_item) # for typing
+ assert isinstance(item, nodes.definition_list_item) # for typing
# The basic field.
arg = cast(nodes.Element, item[0])[0].astext()
lineno = item[0].line
@@ -401,28 +404,31 @@ def handle_consolidated_definition_list(self, items: nodes.definition_list, tagn
def unknown_visit(self, node: nodes.Node) -> None:
'Ignore all unknown nodes'
+
versionlabels = {
- 'versionadded': 'New in version %s',
+ 'versionadded': 'New in version %s',
'versionchanged': 'Changed in version %s',
- 'deprecated': 'Deprecated since version %s',
+ 'deprecated': 'Deprecated since version %s',
}
versionlabel_classes = {
- 'versionadded': 'added',
- 'versionchanged': 'changed',
- 'deprecated': 'deprecated',
+ 'versionadded': 'added',
+ 'versionchanged': 'changed',
+ 'deprecated': 'deprecated',
}
+
class VersionChange(Directive):
"""
Directive to describe a change/addition/deprecation in a specific version.
"""
+
class versionmodified(nodes.Admonition, nodes.TextElement):
"""Node for version change entries.
Currently used for "versionadded", "versionchanged" and "deprecated"
directives.
"""
-
+
has_content = True
required_arguments = 1
optional_arguments = 1
@@ -435,8 +441,7 @@ def run(self) -> List[nodes.Node]:
node['version'] = self.arguments[0]
text = versionlabels[self.name] % self.arguments[0]
if len(self.arguments) == 2:
- inodes, messages = self.state.inline_text(self.arguments[1],
- self.lineno + 1)
+ inodes, messages = self.state.inline_text(self.arguments[1], self.lineno + 1)
para = nodes.paragraph(self.arguments[1], '', *inodes)
node.append(para)
else:
@@ -455,25 +460,30 @@ def run(self) -> List[nodes.Node]:
para = cast(nodes.paragraph, node[0])
para.insert(0, nodes.inline('', '%s: ' % text, classes=classes))
else:
- para = nodes.paragraph('', '',
- nodes.inline('', '%s.' % text,
- classes=classes), )
+ para = nodes.paragraph(
+ '',
+ '',
+ nodes.inline('', '%s.' % text, classes=classes),
+ )
node.append(para)
ret = [node] # type: List[nodes.Node]
ret += messages
return ret
-# Do like Sphinx does for the seealso directive.
+
+# Do like Sphinx does for the seealso directive.
class SeeAlso(BaseAdmonition):
"""
An admonition mentioning things to look at as reference.
"""
+
class seealso(nodes.Admonition, nodes.Element):
"""Custom "see also" admonition node."""
node_class = seealso
+
class PythonCodeDirective(Directive):
"""
A custom restructuredtext directive which can be used to display
@@ -486,32 +496,35 @@ class PythonCodeDirective(Directive):
"""
has_content = True
-
+
def run(self) -> List[nodes.Node]:
text = '\n'.join(self.content)
node = nodes.doctest_block(text, text, codeblock=True)
- return [ node ]
+ return [node]
+
class DocutilsAndSphinxCodeBlockAdapter(PythonCodeDirective):
- # Docutils and Sphinx code blocks have both one optional argument,
+ # Docutils and Sphinx code blocks have both one optional argument,
# so we accept it here as well but do nothing with it.
required_arguments = 0
optional_arguments = 1
# Listing all options that docutils.parsers.rst.directives.body.CodeBlock provides
- # And also sphinx.directives.code.CodeBlock. We don't care about their values,
+ # And also sphinx.directives.code.CodeBlock. We don't care about their values,
# we just don't want to see them in self.content.
- option_spec = {'class': directives.class_option,
- 'name': directives.unchanged,
- 'number-lines': directives.unchanged, # integer or None
- 'force': directives.flag,
- 'linenos': directives.flag,
- 'dedent': directives.unchanged, # integer or None
- 'lineno-start': int,
- 'emphasize-lines': directives.unchanged_required,
- 'caption': directives.unchanged_required,
+ option_spec = {
+ 'class': directives.class_option,
+ 'name': directives.unchanged,
+ 'number-lines': directives.unchanged, # integer or None
+ 'force': directives.flag,
+ 'linenos': directives.flag,
+ 'dedent': directives.unchanged, # integer or None
+ 'lineno-start': int,
+ 'emphasize-lines': directives.unchanged_required,
+ 'caption': directives.unchanged_required,
}
+
directives.register_directive('python', PythonCodeDirective)
directives.register_directive('code', DocutilsAndSphinxCodeBlockAdapter)
directives.register_directive('code-block', DocutilsAndSphinxCodeBlockAdapter)
diff --git a/pydoctor/epydoc2stan.py b/pydoctor/epydoc2stan.py
index 6e60a4eaa..555842358 100644
--- a/pydoctor/epydoc2stan.py
+++ b/pydoctor/epydoc2stan.py
@@ -1,13 +1,26 @@
"""
Convert L{pydoctor.epydoc} parsed markup into renderable content.
"""
+
from __future__ import annotations
from collections import defaultdict
import enum
from typing import (
- TYPE_CHECKING, Any, Callable, ClassVar, DefaultDict, Dict, Generator,
- Iterator, List, Mapping, Optional, Sequence, Tuple, Union,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ ClassVar,
+ DefaultDict,
+ Dict,
+ Generator,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
)
import ast
import re
@@ -35,6 +48,7 @@
BROKEN = tags.p(class_="undocumented")('Broken description')
+
def _get_docformat(obj: model.Documentable) -> str:
"""
Returns the docformat to use to parse the docstring of this object.
@@ -49,6 +63,7 @@ def _get_docformat(obj: model.Documentable) -> str:
docformat = obj.module.docformat or obj.system.options.docformat
return docformat
+
@attr.s(auto_attribs=True)
class FieldDesc:
"""
@@ -60,6 +75,7 @@ class FieldDesc:
:type foo: SomeClass
"""
+
_UNDOCUMENTED: ClassVar[Tag] = tags.span(class_='undocumented')("Undocumented")
name: Optional[str] = None
@@ -102,6 +118,7 @@ def format(self) -> Generator[Tag, None, None]:
#
yield tags.td(formatted, colspan="2")
+
@attr.s(auto_attribs=True)
class _SignatureDesc(FieldDesc):
type_origin: Optional['FieldOrigin'] = None
@@ -109,14 +126,18 @@ class _SignatureDesc(FieldDesc):
def is_documented(self) -> bool:
return bool(self.body or self.type_origin is FieldOrigin.FROM_DOCSTRING)
+
@attr.s(auto_attribs=True)
-class ReturnDesc(_SignatureDesc):...
+class ReturnDesc(_SignatureDesc): ...
+
@attr.s(auto_attribs=True)
-class ParamDesc(_SignatureDesc):...
+class ParamDesc(_SignatureDesc): ...
+
@attr.s(auto_attribs=True)
-class KeywordDesc(_SignatureDesc):...
+class KeywordDesc(_SignatureDesc): ...
+
class RaisesDesc(FieldDesc):
"""Description of an exception that can be raised by function/method."""
@@ -126,6 +147,7 @@ def format(self) -> Generator[Tag, None, None]:
yield tags.td(tags.code(self.type), class_="fieldArgContainer")
yield tags.td(self.body or self._UNDOCUMENTED)
+
def format_desc_list(label: str, descs: Sequence[FieldDesc]) -> Iterator[Tag]:
"""
Format list of L{FieldDesc}. Used for param, returns, raises, etc.
@@ -166,6 +188,7 @@ def format_desc_list(label: str, descs: Sequence[FieldDesc]) -> Iterator[Tag]:
row(d.format())
yield row
+
@attr.s(auto_attribs=True)
class Field:
"""Like L{pydoctor.epydoc.markup.Field}, but without the gross accessor
@@ -186,20 +209,18 @@ class Field:
@classmethod
def from_epydoc(cls, field: EpydocField, source: model.Documentable) -> 'Field':
- return cls(
- tag=field.tag(),
- arg=field.arg(),
- source=source,
- lineno=field.lineno,
- body=field.body()
- )
+ return cls(tag=field.tag(), arg=field.arg(), source=source, lineno=field.lineno, body=field.body())
def format(self) -> Tag:
"""Present this field's body as HTML."""
- return safe_to_stan(self.body, self.source.docstring_linker, self.source,
- # the parsed docstring maybe doesn't support to_node(), i.e. ParsedTypeDocstring,
- # so we can only show the broken text.
- fallback=lambda _, __, ___:BROKEN)
+ return safe_to_stan(
+ self.body,
+ self.source.docstring_linker,
+ self.source,
+ # the parsed docstring maybe doesn't support to_node(), i.e. ParsedTypeDocstring,
+ # so we can only show the broken text.
+ fallback=lambda _, __, ___: BROKEN,
+ )
def report(self, message: str) -> None:
self.source.report(message, lineno_offset=self.lineno, section='docstring')
@@ -231,25 +252,30 @@ def format_field_list(singular: str, plural: str, fields: Sequence[Field]) -> It
row(tags.td(colspan="2")(field.format()))
yield row
+
class VariableArgument(str):
"""
Encapsulate the name of C{vararg} parameters in L{Function.annotations} mapping keys.
"""
+
class KeywordArgument(str):
"""
Encapsulate the name of C{kwarg} parameters in L{Function.annotations} mapping keys.
"""
+
class FieldOrigin(enum.Enum):
FROM_AST = 0
FROM_DOCSTRING = 1
+
@attr.s(auto_attribs=True)
class ParamType:
stan: Tag
origin: FieldOrigin
+
class FieldHandler:
def __init__(self, obj: model.Documentable):
@@ -269,19 +295,27 @@ def __init__(self, obj: model.Documentable):
self.sinces: List[Field] = []
self.unknowns: DefaultDict[str, List[FieldDesc]] = defaultdict(list)
- def set_param_types_from_annotations(
- self, annotations: Mapping[str, Optional[ast.expr]]
- ) -> None:
+ def set_param_types_from_annotations(self, annotations: Mapping[str, Optional[ast.expr]]) -> None:
_linker = linker._AnnotationLinker(self.obj)
formatted_annotations = {
- name: None if value is None
- else ParamType(safe_to_stan(colorize_inline_pyval(value), _linker,
- self.obj, fallback=colorized_pyval_fallback, section='annotation', report=False),
- # don't spam the log, invalid annotation are going to be reported when the signature gets colorized
- origin=FieldOrigin.FROM_AST)
-
+ name: (
+ None
+ if value is None
+ else ParamType(
+ safe_to_stan(
+ colorize_inline_pyval(value),
+ _linker,
+ self.obj,
+ fallback=colorized_pyval_fallback,
+ section='annotation',
+ report=False,
+ ),
+ # don't spam the log, invalid annotation are going to be reported when the signature gets colorized
+ origin=FieldOrigin.FROM_AST,
+ )
+ )
for name, value in annotations.items()
- }
+ }
ret_type = formatted_annotations.pop('return', None)
self.types.update(formatted_annotations)
@@ -295,7 +329,7 @@ def set_param_types_from_annotations(
self.return_desc = ReturnDesc(type=ret_type.stan, type_origin=ret_type.origin)
@staticmethod
- def _report_unexpected_argument(field:Field) -> None:
+ def _report_unexpected_argument(field: Field) -> None:
if field.arg is not None:
field.report('Unexpected argument in %s field' % (field.tag,))
@@ -304,6 +338,7 @@ def handle_return(self, field: Field) -> None:
if not self.return_desc:
self.return_desc = ReturnDesc()
self.return_desc.body = field.format()
+
handle_returns = handle_return
def handle_yield(self, field: Field) -> None:
@@ -311,6 +346,7 @@ def handle_yield(self, field: Field) -> None:
if not self.yields_desc:
self.yields_desc = FieldDesc()
self.yields_desc.body = field.format()
+
handle_yields = handle_yield
def handle_returntype(self, field: Field) -> None:
@@ -319,6 +355,7 @@ def handle_returntype(self, field: Field) -> None:
self.return_desc = ReturnDesc()
self.return_desc.type = field.format()
self.return_desc.type_origin = FieldOrigin.FROM_DOCSTRING
+
handle_rtype = handle_returntype
def handle_yieldtype(self, field: Field) -> None:
@@ -326,6 +363,7 @@ def handle_yieldtype(self, field: Field) -> None:
if not self.yields_desc:
self.yields_desc = FieldDesc()
self.yields_desc.type = field.format()
+
handle_ytype = handle_yieldtype
def _handle_param_name(self, field: Field) -> Optional[str]:
@@ -388,11 +426,16 @@ def handle_type(self, field: Field) -> None:
return
elif isinstance(self.obj, model.Function):
name = self._handle_param_name(field)
- if name is not None and name not in self.types and not any(
+ if (
+ name is not None
+ and name not in self.types
+ and not any(
# Don't warn about keywords or about parameters we already
# reported a warning for.
- desc.name == name for desc in self.parameter_descs
- ):
+ desc.name == name
+ for desc in self.parameter_descs
+ )
+ ):
self._handle_param_not_found(name, field)
else:
# Note: extract_fields() will issue warnings about missing field
@@ -423,7 +466,6 @@ def handle_keyword(self, field: Field) -> None:
if name in self.types:
field.report('Parameter "%s" is documented as keyword' % (name,))
-
def handled_elsewhere(self, field: Field) -> None:
# Some fields are handled by extract_fields below.
pass
@@ -440,6 +482,7 @@ def handle_raises(self, field: Field) -> None:
else:
typ_fmt = self._linker.link_to(name, name)
self.raise_descs.append(RaisesDesc(type=typ_fmt, body=field.format()))
+
handle_raise = handle_raises
handle_except = handle_raises
@@ -455,6 +498,7 @@ def handle_warns(self, field: Field) -> None:
def handle_seealso(self, field: Field) -> None:
self.seealsos.append(field)
+
handle_see = handle_seealso
def handle_note(self, field: Field) -> None:
@@ -468,7 +512,7 @@ def handle_since(self, field: Field) -> None:
def handleUnknownField(self, field: Field) -> None:
name = field.tag
- field.report(f"Unknown field '{name}'" )
+ field.report(f"Unknown field '{name}'")
self.unknowns[name].append(FieldDesc(name=field.arg, body=field.format()))
def handle(self, field: Field) -> None:
@@ -492,14 +536,16 @@ def resolve_types(self) -> None:
if index == 0:
# Strip 'self' or 'cls' from parameter table when it semantically makes sens.
- if name=='self' and self.obj.kind is model.DocumentableKind.METHOD:
+ if name == 'self' and self.obj.kind is model.DocumentableKind.METHOD:
continue
- if name=='cls' and self.obj.kind is model.DocumentableKind.CLASS_METHOD:
+ if name == 'cls' and self.obj.kind is model.DocumentableKind.CLASS_METHOD:
continue
- param = ParamDesc(name=name,
+ param = ParamDesc(
+ name=name,
type=param_type.stan if param_type else None,
- type_origin=param_type.origin if param_type else None,)
+ type_origin=param_type.origin if param_type else None,
+ )
any_info |= param_type is not None
else:
@@ -517,7 +563,7 @@ def resolve_types(self) -> None:
if any_info:
self.parameter_descs = new_parameter_descs
- # loops thought the parameters and remove eventual **kwargs
+ # loops thought the parameters and remove eventual **kwargs
# entry if keywords are specifically documented.
kwargs = None
has_keywords = False
@@ -550,10 +596,12 @@ def format(self) -> Tag:
r += format_desc_list("Raises", self.raise_descs)
r += format_desc_list("Warns", self.warns_desc)
- for s_p_l in (('Author', 'Authors', self.authors),
- ('See Also', 'See Also', self.seealsos),
- ('Present Since', 'Present Since', self.sinces),
- ('Note', 'Notes', self.notes)):
+ for s_p_l in (
+ ('Author', 'Authors', self.authors),
+ ('See Also', 'See Also', self.seealsos),
+ ('Present Since', 'Present Since', self.sinces),
+ ('Note', 'Notes', self.notes),
+ ):
r += format_field_list(*s_p_l)
for kind, fieldlist in self.unknowns.items():
r += format_desc_list(f"Unknown Field: {kind}", fieldlist)
@@ -563,11 +611,13 @@ def format(self) -> Tag:
else:
return tags.transparent
-def reportWarnings(obj: model.Documentable, warns: Sequence[str], **kwargs:Any) -> None:
+
+def reportWarnings(obj: model.Documentable, warns: Sequence[str], **kwargs: Any) -> None:
for message in warns:
obj.report(message, **kwargs)
-def reportErrors(obj: model.Documentable, errs: Sequence[ParseError], section:str='docstring') -> None:
+
+def reportErrors(obj: model.Documentable, errs: Sequence[ParseError], section: str = 'docstring') -> None:
if not errs:
return
@@ -577,11 +627,8 @@ def reportErrors(obj: model.Documentable, errs: Sequence[ParseError], section:st
errors.add(obj.fullName())
for err in errs:
- obj.report(
- f'bad {section}: ' + err.descr(),
- lineno_offset=(err.linenum() or 1) - 1,
- section=section
- )
+ obj.report(f'bad {section}: ' + err.descr(), lineno_offset=(err.linenum() or 1) - 1, section=section)
+
def _objclass(obj: model.Documentable) -> ObjClass | None:
# There is only 4 main kinds of objects
@@ -595,14 +642,17 @@ def _objclass(obj: model.Documentable) -> ObjClass | None:
return 'function'
return None
+
_docformat_skip_processtypes = ('google', 'numpy', 'plaintext')
+
+
def parse_docstring(
- obj: model.Documentable,
- doc: str,
- source: model.Documentable,
- markup: Optional[str]=None,
- section: str='docstring',
- ) -> ParsedDocstring:
+ obj: model.Documentable,
+ doc: str,
+ source: model.Documentable,
+ markup: Optional[str] = None,
+ section: str = 'docstring',
+) -> ParsedDocstring:
"""Parse a docstring.
@param obj: The object we're parsing the documentation for.
@param doc: The docstring.
@@ -619,8 +669,11 @@ def parse_docstring(
try:
parser = get_parser_by_name(docformat, _objclass(obj))
except (ImportError, AttributeError) as e:
- _err = 'Error trying to fetch %r parser:\n\n %s: %s\n\nUsing plain text formatting only.'%(
- docformat, e.__class__.__name__, e)
+ _err = 'Error trying to fetch %r parser:\n\n %s: %s\n\nUsing plain text formatting only.' % (
+ docformat,
+ e.__class__.__name__,
+ e,
+ )
obj.system.msg('epydoc2stan', _err, thresh=-1, once=True)
parser = pydoctor.epydoc.markup.plaintext.parse_docstring
@@ -645,6 +698,7 @@ def parse_docstring(
reportErrors(source, errs, section=section)
return parsed_doc
+
def ensure_parsed_docstring(obj: model.Documentable) -> Optional[model.Documentable]:
"""
Currently, it's not 100% clear at what point the L{Documentable.parsed_docstring} attribute is set.
@@ -688,6 +742,7 @@ class ParsedStanOnly(ParsedDocstring):
L{to_stan} method simply returns back what's given to L{ParsedStanOnly.__init__}.
"""
+
def __init__(self, stan: Tag):
super().__init__(fields=[])
self._fromstan = stan
@@ -702,6 +757,7 @@ def to_stan(self, docstring_linker: Any) -> Tag:
def to_node(self) -> Any:
raise NotImplementedError()
+
def _get_parsed_summary(obj: model.Documentable) -> Tuple[Optional[model.Documentable], ParsedDocstring]:
"""
Ensures that the L{model.Documentable.parsed_summary} attribute of a documentable is set to it's final value.
@@ -725,15 +781,19 @@ def _get_parsed_summary(obj: model.Documentable) -> Tuple[Optional[model.Documen
return (source, summary_parsed_doc)
+
def get_to_stan_error(e: Exception) -> ParseError:
return ParseError(f"{e.__class__.__name__}: {e}", 0)
-def safe_to_stan(parsed_doc: ParsedDocstring,
- linker: 'DocstringLinker',
- ctx: model.Documentable,
- fallback: Callable[[List[ParseError], ParsedDocstring, model.Documentable], Tag],
- report: bool = True,
- section:str='docstring') -> Tag:
+
+def safe_to_stan(
+ parsed_doc: ParsedDocstring,
+ linker: 'DocstringLinker',
+ ctx: model.Documentable,
+ fallback: Callable[[List[ParseError], ParsedDocstring, model.Documentable], Tag],
+ report: bool = True,
+ section: str = 'docstring',
+) -> Tag:
"""
Wraps L{ParsedDocstring.to_stan()} to catch exception and handle them in C{fallback}.
This is used to convert docstrings as well as other colorized AST values to stan.
@@ -757,7 +817,8 @@ def safe_to_stan(parsed_doc: ParsedDocstring,
reportErrors(ctx, errs, section=section)
return stan
-def format_docstring_fallback(errs: List[ParseError], parsed_doc:ParsedDocstring, ctx:model.Documentable) -> Tag:
+
+def format_docstring_fallback(errs: List[ParseError], parsed_doc: ParsedDocstring, ctx: model.Documentable) -> Tag:
if ctx.docstring is None:
stan = BROKEN
else:
@@ -765,9 +826,10 @@ def format_docstring_fallback(errs: List[ParseError], parsed_doc:ParsedDocstring
stan = parsed_doc_plain.to_stan(ctx.docstring_linker)
return stan
-def _wrap_in_paragraph(body:Sequence["Flattenable"]) -> bool:
+
+def _wrap_in_paragraph(body: Sequence["Flattenable"]) -> bool:
"""
- Whether to wrap the given docstring stan body inside a paragraph.
+ Whether to wrap the given docstring stan body inside a paragraph.
"""
has_paragraph = False
for e in body:
@@ -775,12 +837,13 @@ def _wrap_in_paragraph(body:Sequence["Flattenable"]) -> bool:
has_paragraph = True
# only check the first element of the body
break
- return bool(len(body)>0 and not has_paragraph)
+ return bool(len(body) > 0 and not has_paragraph)
-def unwrap_docstring_stan(stan:Tag) -> "Flattenable":
+
+def unwrap_docstring_stan(stan: Tag) -> "Flattenable":
"""
- Unwrap the body of the given C{Tag} instance if it has a non-empty tag name and
- ensure there is at least one paragraph.
+ Unwrap the body of the given C{Tag} instance if it has a non-empty tag name and
+ ensure there is at least one paragraph.
@note: This is the counterpart of what we're doing in L{HTMLTranslator.should_be_compact_paragraph()}.
Since the L{HTMLTranslator} is generic for all parsed docstrings types, it always generates compact paragraphs.
@@ -795,6 +858,7 @@ def unwrap_docstring_stan(stan:Tag) -> "Flattenable":
else:
return body
+
def format_docstring(obj: model.Documentable) -> Tag:
"""Generate an HTML representation of a docstring"""
@@ -820,26 +884,27 @@ def format_docstring(obj: model.Documentable) -> Tag:
ret(fh.format())
return ret
-def format_summary_fallback(errs: List[ParseError], parsed_doc:ParsedDocstring, ctx:model.Documentable) -> Tag:
+
+def format_summary_fallback(errs: List[ParseError], parsed_doc: ParsedDocstring, ctx: model.Documentable) -> Tag:
stan = BROKEN
# override parsed_summary instance variable to remeber this one is broken.
ctx.parsed_summary = ParsedStanOnly(stan)
return stan
+
def format_summary(obj: model.Documentable) -> Tag:
"""Generate an shortened HTML representation of a docstring."""
source, parsed_doc = _get_parsed_summary(obj)
if not source:
source = obj
-
+
# do not optimize url in order to make sure we're always generating full urls.
# avoids breaking links when including the summaries on other pages.
with source.docstring_linker.switch_context(None):
# ParserErrors will likely be reported by the full docstring as well,
# so don't spam the log, pass report=False.
- stan = safe_to_stan(parsed_doc, source.docstring_linker, source, report=False,
- fallback=format_summary_fallback)
+ stan = safe_to_stan(parsed_doc, source.docstring_linker, source, report=False, fallback=format_summary_fallback)
return stan
@@ -848,7 +913,7 @@ def format_undocumented(obj: model.Documentable) -> Tag:
"""Generate an HTML representation for an object lacking a docstring."""
sub_objects_with_docstring_count: DefaultDict[model.DocumentableKind, int] = defaultdict(int)
- sub_objects_total_count: DefaultDict[model.DocumentableKind, int] = defaultdict(int)
+ sub_objects_total_count: DefaultDict[model.DocumentableKind, int] = defaultdict(int)
for sub_ob in obj.contents.values():
kind = sub_ob.kind
if kind is not None:
@@ -860,17 +925,18 @@ def format_undocumented(obj: model.Documentable) -> Tag:
if sub_objects_with_docstring_count:
kind = obj.kind
- assert kind is not None # if kind is None, object is invisible
+ assert kind is not None # if kind is None, object is invisible
tag(
- "No ", format_kind(kind).lower(), " docstring; ",
+ "No ",
+ format_kind(kind).lower(),
+ " docstring; ",
', '.join(
f"{sub_objects_with_docstring_count[kind]}/{sub_objects_total_count[kind]} "
f"{format_kind(kind, plural=sub_objects_with_docstring_count[kind]>=2).lower()}"
-
- for kind in sorted(sub_objects_total_count, key=(lambda x:x.value))
- ),
- " documented"
- )
+ for kind in sorted(sub_objects_total_count, key=(lambda x: x.value))
+ ),
+ " documented",
+ )
else:
tag("Undocumented")
return tag
@@ -886,8 +952,8 @@ def type2stan(obj: model.Documentable) -> Optional[Tag]:
return None
else:
_linker = linker._AnnotationLinker(obj)
- return safe_to_stan(parsed_type, _linker, obj,
- fallback=colorized_pyval_fallback, section='annotation')
+ return safe_to_stan(parsed_type, _linker, obj, fallback=colorized_pyval_fallback, section='annotation')
+
def get_parsed_type(obj: model.Documentable) -> Optional[ParsedDocstring]:
"""
@@ -904,6 +970,7 @@ def get_parsed_type(obj: model.Documentable) -> Optional[ParsedDocstring]:
return None
+
def format_toc(obj: model.Documentable) -> Optional[Tag]:
# Load the parsed_docstring if it's not already done.
ensure_parsed_docstring(obj)
@@ -912,8 +979,7 @@ def format_toc(obj: model.Documentable) -> Optional[Tag]:
if obj.system.options.sidebartocdepth > 0:
toc = obj.parsed_docstring.get_toc(depth=obj.system.options.sidebartocdepth)
if toc:
- return safe_to_stan(toc, obj.docstring_linker, obj, report=False,
- fallback=lambda _,__,___:BROKEN)
+ return safe_to_stan(toc, obj.docstring_linker, obj, report=False, fallback=lambda _, __, ___: BROKEN)
return None
@@ -921,7 +987,7 @@ def format_toc(obj: model.Documentable) -> Optional[Tag]:
'ivar': model.DocumentableKind.INSTANCE_VARIABLE,
'cvar': model.DocumentableKind.CLASS_VARIABLE,
'var': model.DocumentableKind.VARIABLE,
- }
+}
def extract_fields(obj: model.CanContainImportsDocumentable) -> None:
@@ -940,8 +1006,7 @@ def extract_fields(obj: model.CanContainImportsDocumentable) -> None:
if tag in ['ivar', 'cvar', 'var', 'type']:
arg = field.arg()
if arg is None:
- obj.report("Missing field name in @%s" % (tag,),
- 'docstring', field.lineno)
+ obj.report("Missing field name in @%s" % (tag,), 'docstring', field.lineno)
continue
attrobj: Optional[model.Documentable] = obj.contents.get(arg)
if attrobj is None:
@@ -959,46 +1024,49 @@ def extract_fields(obj: model.CanContainImportsDocumentable) -> None:
attrobj.parsed_docstring = field.body()
attrobj.kind = field_name_to_kind[tag]
+
def format_kind(kind: model.DocumentableKind, plural: bool = False) -> str:
"""
Transform a `model.DocumentableKind` Enum value to string.
"""
names = {
- model.DocumentableKind.PACKAGE : 'Package',
- model.DocumentableKind.MODULE : 'Module',
- model.DocumentableKind.INTERFACE : 'Interface',
- model.DocumentableKind.CLASS : 'Class',
- model.DocumentableKind.CLASS_METHOD : 'Class Method',
- model.DocumentableKind.STATIC_METHOD : 'Static Method',
- model.DocumentableKind.METHOD : 'Method',
- model.DocumentableKind.FUNCTION : 'Function',
- model.DocumentableKind.CLASS_VARIABLE : 'Class Variable',
- model.DocumentableKind.ATTRIBUTE : 'Attribute',
- model.DocumentableKind.INSTANCE_VARIABLE : 'Instance Variable',
- model.DocumentableKind.PROPERTY : 'Property',
- model.DocumentableKind.VARIABLE : 'Variable',
- model.DocumentableKind.SCHEMA_FIELD : 'Attribute',
- model.DocumentableKind.CONSTANT : 'Constant',
- model.DocumentableKind.EXCEPTION : 'Exception',
- model.DocumentableKind.TYPE_ALIAS : 'Type Alias',
- model.DocumentableKind.TYPE_VARIABLE : 'Type Variable',
+ model.DocumentableKind.PACKAGE: 'Package',
+ model.DocumentableKind.MODULE: 'Module',
+ model.DocumentableKind.INTERFACE: 'Interface',
+ model.DocumentableKind.CLASS: 'Class',
+ model.DocumentableKind.CLASS_METHOD: 'Class Method',
+ model.DocumentableKind.STATIC_METHOD: 'Static Method',
+ model.DocumentableKind.METHOD: 'Method',
+ model.DocumentableKind.FUNCTION: 'Function',
+ model.DocumentableKind.CLASS_VARIABLE: 'Class Variable',
+ model.DocumentableKind.ATTRIBUTE: 'Attribute',
+ model.DocumentableKind.INSTANCE_VARIABLE: 'Instance Variable',
+ model.DocumentableKind.PROPERTY: 'Property',
+ model.DocumentableKind.VARIABLE: 'Variable',
+ model.DocumentableKind.SCHEMA_FIELD: 'Attribute',
+ model.DocumentableKind.CONSTANT: 'Constant',
+ model.DocumentableKind.EXCEPTION: 'Exception',
+ model.DocumentableKind.TYPE_ALIAS: 'Type Alias',
+ model.DocumentableKind.TYPE_VARIABLE: 'Type Variable',
}
plurals = {
- model.DocumentableKind.CLASS : 'Classes',
- model.DocumentableKind.PROPERTY : 'Properties',
- model.DocumentableKind.TYPE_ALIAS : 'Type Aliases',
+ model.DocumentableKind.CLASS: 'Classes',
+ model.DocumentableKind.PROPERTY: 'Properties',
+ model.DocumentableKind.TYPE_ALIAS: 'Type Aliases',
}
if plural:
return plurals.get(kind, names[kind] + 's')
else:
return names[kind]
-def colorized_pyval_fallback(_: List[ParseError], doc:ParsedDocstring, __:model.Documentable) -> Tag:
+
+def colorized_pyval_fallback(_: List[ParseError], doc: ParsedDocstring, __: model.Documentable) -> Tag:
"""
This fallback function uses L{ParsedDocstring.to_node()}, so it must be used only with L{ParsedDocstring} subclasses that implements C{to_node()}.
"""
return Tag('code')(node2stan.gettext(doc.to_node()))
+
def _format_constant_value(obj: model.Attribute) -> Iterator["Flattenable"]:
# yield the table title, "Value"
@@ -1007,12 +1075,13 @@ def _format_constant_value(obj: model.Attribute) -> Iterator["Flattenable"]:
# yield the first row.
yield row
- doc = colorize_pyval(obj.value,
- linelen=obj.system.options.pyvalreprlinelen,
- maxlines=obj.system.options.pyvalreprmaxlines)
+ doc = colorize_pyval(
+ obj.value, linelen=obj.system.options.pyvalreprlinelen, maxlines=obj.system.options.pyvalreprmaxlines
+ )
- value_repr = safe_to_stan(doc, obj.docstring_linker, obj,
- fallback=colorized_pyval_fallback, section='rendering of constant')
+ value_repr = safe_to_stan(
+ doc, obj.docstring_linker, obj, fallback=colorized_pyval_fallback, section='rendering of constant'
+ )
# Report eventual warnings. It warns when a regex failed to parse.
reportWarnings(obj, doc.warnings, section='colorize constant')
@@ -1022,6 +1091,7 @@ def _format_constant_value(obj: model.Attribute) -> Iterator["Flattenable"]:
row(tags.td(tags.pre(class_='constant-value')(value_repr)))
yield row
+
def format_constant_value(obj: model.Attribute) -> "Flattenable":
"""
Should be only called for L{Attribute} objects that have the L{Attribute.value} property set.
@@ -1029,14 +1099,15 @@ def format_constant_value(obj: model.Attribute) -> "Flattenable":
rows = list(_format_constant_value(obj))
return tags.table(class_='valueTable')(*rows)
-def _split_indentifier_parts_on_case(indentifier:str) -> List[str]:
- def split(text:str, sep:str) -> List[str]:
+def _split_indentifier_parts_on_case(indentifier: str) -> List[str]:
+
+ def split(text: str, sep: str) -> List[str]:
# We use \u200b as temp token to hack a split that passes the tests.
- return text.replace(sep, '\u200b'+sep).split('\u200b')
+ return text.replace(sep, '\u200b' + sep).split('\u200b')
match = re.match('(_{1,2})?(.*?)(_{1,2})?$', indentifier)
- assert match is not None # the regex always matches
+ assert match is not None # the regex always matches
prefix, text, suffix = match.groups(default='')
text_parts = []
@@ -1061,7 +1132,7 @@ def split(text:str, sep:str) -> List[str]:
if current_part:
text_parts.append(current_part)
- if not text_parts: # the name is composed only by underscores
+ if not text_parts: # the name is composed only by underscores
text_parts = ['']
if prefix:
@@ -1071,6 +1142,7 @@ def split(text:str, sep:str) -> List[str]:
return text_parts
+
def insert_break_points(text: str) -> 'Flattenable':
"""
Browsers aren't smart enough to recognize word breaking opportunities in
@@ -1085,58 +1157,60 @@ def insert_break_points(text: str) -> 'Flattenable':
r: List['Flattenable'] = []
parts = text.split('.')
- for i,t in enumerate(parts):
+ for i, t in enumerate(parts):
_parts = _split_indentifier_parts_on_case(t)
- for i_,p in enumerate(_parts):
+ for i_, p in enumerate(_parts):
r += [p]
- if i_ != len(_parts)-1:
+ if i_ != len(_parts) - 1:
r += [tags.wbr()]
- if i != len(parts)-1:
+ if i != len(parts) - 1:
r += [tags.wbr(), '.']
return tags.transparent(*r)
+
def format_constructor_short_text(constructor: model.Function, forclass: model.Class) -> str:
"""
Returns a simplified signature of the constructor.
C{forclass} is not always the function's parent, it can be a subclass.
"""
args = ''
- # for signature with more than 5 parameters,
+ # for signature with more than 5 parameters,
# we just show the elipsis after the fourth parameter
annotations = constructor.annotations.items()
many_param = len(annotations) > 6
-
+
for index, (name, ann) in enumerate(annotations):
- if name=='return':
+ if name == 'return':
continue
if many_param and index > 4:
args += ', ...'
break
-
+
# Special casing __new__ because it's actually a static method
- if index==0 and (constructor.name in ('__new__', '__init__') or
- constructor.kind is model.DocumentableKind.CLASS_METHOD):
+ if index == 0 and (
+ constructor.name in ('__new__', '__init__') or constructor.kind is model.DocumentableKind.CLASS_METHOD
+ ):
# Omit first argument (self/cls) from simplified signature.
continue
star = ''
if isinstance(name, VariableArgument):
- star='*'
+ star = '*'
elif isinstance(name, KeywordArgument):
- star='**'
-
+ star = '**'
+
if args:
args += ', '
-
+
args += f"{star}{name}"
-
+
# display innner classes with their name starting at the top level class.
- _current:model.CanContainImportsDocumentable = forclass
- class_name = []
+ _current: model.CanContainImportsDocumentable = forclass
+ class_name = []
while isinstance(_current, model.Class):
class_name.append(_current.name)
_current = _current.parent
-
+
callable_name = '.'.join(reversed(class_name))
if constructor.name not in ('__new__', '__init__'):
@@ -1146,41 +1220,35 @@ def format_constructor_short_text(constructor: model.Function, forclass: model.C
return f"{callable_name}({args})"
-def get_constructors_extra(cls:model.Class) -> ParsedDocstring | None:
+
+def get_constructors_extra(cls: model.Class) -> ParsedDocstring | None:
"""
Get an extra docstring to represent Class constructors.
"""
from pydoctor.templatewriter import util
+
constructors = cls.public_constructors
if not constructors:
return None
-
+
document = new_document('constructors')
elements: list[nodes.Node] = []
- plural = 's' if len(constructors)>1 else ''
- elements.append(set_node_attributes(
- nodes.Text(f'Constructor{plural}: '),
- document=document,
- lineno=1))
-
- for i, c in enumerate(sorted(constructors,
- key=util.alphabetical_order_func)):
+ plural = 's' if len(constructors) > 1 else ''
+ elements.append(set_node_attributes(nodes.Text(f'Constructor{plural}: '), document=document, lineno=1))
+
+ for i, c in enumerate(sorted(constructors, key=util.alphabetical_order_func)):
if i != 0:
- elements.append(set_node_attributes(
- nodes.Text(', '),
- document=document,
- lineno=1))
+ elements.append(set_node_attributes(nodes.Text(', '), document=document, lineno=1))
short_text = format_constructor_short_text(c, cls)
- elements.append(set_node_attributes(
- nodes.title_reference('', '', refuri=c.fullName()),
- document=document,
- children=[set_node_attributes(
- nodes.Text(short_text),
- document=document,
- lineno=1
- )],
- lineno=1))
-
+ elements.append(
+ set_node_attributes(
+ nodes.title_reference('', '', refuri=c.fullName()),
+ document=document,
+ children=[set_node_attributes(nodes.Text(short_text), document=document, lineno=1)],
+ lineno=1,
+ )
+ )
+
set_node_attributes(document, children=elements)
return ParsedRstDocstring(document, ())
diff --git a/pydoctor/extensions/__init__.py b/pydoctor/extensions/__init__.py
index 53b8ad256..08def8e8b 100644
--- a/pydoctor/extensions/__init__.py
+++ b/pydoctor/extensions/__init__.py
@@ -3,6 +3,7 @@
An extension can be composed by mixin classes, AST builder visitor extensions and post processors.
"""
+
from __future__ import annotations
import importlib
@@ -18,30 +19,47 @@
import attr
from pydoctor import astutils
+
class ClassMixin:
"""Base class for mixins applied to L{model.Class} objects."""
+
+
class ModuleMixin:
"""Base class for mixins applied to L{model.Module} objects."""
+
+
class PackageMixin:
"""Base class for mixins applied to L{model.Package} objects."""
+
+
class FunctionMixin:
"""Base class for mixins applied to L{model.Function} objects."""
+
+
class AttributeMixin:
"""Base class for mixins applied to L{model.Attribute} objects."""
+
+
class DocumentableMixin(ModuleMixin, ClassMixin, FunctionMixin, AttributeMixin):
"""Base class for mixins applied to all L{model.Documentable} objects."""
+
+
class CanContainImportsDocumentableMixin(PackageMixin, ModuleMixin, ClassMixin):
"""Base class for mixins applied to L{model.Class}, L{model.Module} and L{model.Package} objects."""
+
+
class InheritableMixin(FunctionMixin, AttributeMixin):
"""Base class for mixins applied to L{model.Function} and L{model.Attribute} objects."""
+
MixinT = Union[ClassMixin, ModuleMixin, PackageMixin, FunctionMixin, AttributeMixin]
+
def _importlib_resources_contents(package: str) -> Iterable[str]:
"""Return an iterable of entries in C{package}.
Note that not all entries are resources. Specifically, directories are
- not considered resources.
+ not considered resources.
"""
return [path.name for path in importlib_resources.files(package).iterdir()]
@@ -57,48 +75,52 @@ def _importlib_resources_is_resource(package: str, name: str) -> bool:
for traversable in importlib_resources.files(package).iterdir()
)
+
def _get_submodules(pkg: str) -> Iterator[str]:
for name in _importlib_resources_contents(pkg):
if (not name.startswith('_') and _importlib_resources_is_resource(pkg, name)) and name.endswith('.py'):
- name = name[:-len('.py')]
+ name = name[: -len('.py')]
yield f"{pkg}.{name}"
+
def _get_setup_extension_func_from_module(module: str) -> Callable[['ExtRegistrar'], None]:
"""
Will look for the special function C{setup_pydoctor_extension} in the provided module.
-
+
@Raises AssertionError: if module do not provide a valid setup_pydoctor_extension() function.
@Raises ModuleNotFoundError: if module is not found.
@Returns: a tuple(str, callable): extension module name, setup_pydoctor_extension() function.
"""
mod = importlib.import_module(module)
-
+
assert hasattr(mod, 'setup_pydoctor_extension'), f"{mod}.setup_pydoctor_extension() function not found."
assert callable(mod.setup_pydoctor_extension), f"{mod}.setup_pydoctor_extension should be a callable."
return cast('Callable[[ExtRegistrar], None]', mod.setup_pydoctor_extension)
+
_mixin_to_class_name: Dict[Any, str] = {
- ClassMixin: 'Class',
- ModuleMixin: 'Module',
- PackageMixin: 'Package',
- FunctionMixin: 'Function',
- AttributeMixin: 'Attribute',
- }
+ ClassMixin: 'Class',
+ ModuleMixin: 'Module',
+ PackageMixin: 'Package',
+ FunctionMixin: 'Function',
+ AttributeMixin: 'Attribute',
+}
+
def _get_mixins(*mixins: Type[MixinT]) -> Dict[str, List[Type[MixinT]]]:
"""
- Transform a list of mixins classes to a dict from the
+ Transform a list of mixins classes to a dict from the
concrete class name to the mixins that must be applied to it.
- This relies on the fact that mixins shoud extend one of the
+ This relies on the fact that mixins shoud extend one of the
base mixin classes in L{pydoctor.extensions} module.
-
- @raises AssertionError: If a mixin does not extends any of the
+
+ @raises AssertionError: If a mixin does not extends any of the
provided base mixin classes.
"""
mixins_by_name: Dict[str, List[Type[MixinT]]] = {}
for mixin in mixins:
added = False
- for k,v in _mixin_to_class_name.items():
+ for k, v in _mixin_to_class_name.items():
if isinstance(mixin, type) and issubclass(mixin, k):
mixins_by_name.setdefault(v, [])
mixins_by_name[v].append(mixin)
@@ -109,34 +131,36 @@ def _get_mixins(*mixins: Type[MixinT]) -> Dict[str, List[Type[MixinT]]]:
assert False, f"Invalid mixin {mixin.__name__!r}. Mixins must subclass one of the base class."
return mixins_by_name
+
# Largely inspired by docutils Transformer class.
DEFAULT_PRIORITY = 100
+
+
class PriorityProcessor:
"""
Stores L{Callable} and applies them to the system based on priority or insertion order.
- The default priority is C{100}, see code source of L{astbuilder.setup_pydoctor_extension},
+ The default priority is C{100}, see code source of L{astbuilder.setup_pydoctor_extension},
and others C{setup_pydoctor_extension} functions.
Highest priority callables will be called first, when priority is the same it's FIFO order.
One L{PriorityProcessor} should only be run once on the system.
"""
-
- def __init__(self, system:'model.System'):
+
+ def __init__(self, system: 'model.System'):
self.system = system
self.applied: List[Callable[['model.System'], None]] = []
self._post_processors: List[Tuple[object, Callable[['model.System'], None]]] = []
self._counter = 256
"""Internal counter to keep track of the add order of callables."""
-
- def add_post_processor(self, post_processor:Callable[['model.System'], None],
- priority:Optional[int]) -> None:
+
+ def add_post_processor(self, post_processor: Callable[['model.System'], None], priority: Optional[int]) -> None:
if priority is None:
priority = DEFAULT_PRIORITY
priority_key = self._get_priority_key(priority)
self._post_processors.append((priority_key, post_processor))
-
- def _get_priority_key(self, priority:int) -> object:
+
+ def _get_priority_key(self, priority: int) -> object:
"""
Return a tuple, `priority` combined with `self._counter`.
@@ -144,52 +168,51 @@ def _get_priority_key(self, priority:int) -> object:
"""
self._counter -= 1
return (priority, self._counter)
-
+
def apply_processors(self) -> None:
"""Apply all of the stored processors, in priority order."""
if self.applied:
- # this is typically only reached in tests, when we
- # call fromText() several times with the same
+ # this is typically only reached in tests, when we
+ # call fromText() several times with the same
# system or when we manually call System.postProcess()
- self.system.msg('post processing',
- 'warning: multiple post-processing pass detected',
- thresh=-1)
+ self.system.msg('post processing', 'warning: multiple post-processing pass detected', thresh=-1)
self.applied.clear()
-
+
self._post_processors.sort()
for p in reversed(self._post_processors):
_, post_processor = p
post_processor(self.system)
self.applied.append(post_processor)
+
@attr.s(auto_attribs=True)
class ExtRegistrar:
"""
The extension registrar class provides utilites to register an extension's components.
"""
+
system: 'model.System'
def register_mixin(self, *mixin: Type[MixinT]) -> None:
"""
- Register mixin for model objects. Mixins shoud extend one of the
+ Register mixin for model objects. Mixins shoud extend one of the
base mixin classes in L{pydoctor.extensions} module, i.e. L{ClassMixin} or L{DocumentableMixin}, etc.
"""
self.system._factory.add_mixins(**_get_mixins(*mixin))
- def register_astbuilder_visitor(self,
- *visitor: Type[astutils.NodeVisitorExt]) -> None:
+ def register_astbuilder_visitor(self, *visitor: Type[astutils.NodeVisitorExt]) -> None:
"""
Register AST visitor(s). Typically visitor extensions inherits from L{ModuleVisitorExt}.
"""
self.system._astbuilder_visitors.extend(visitor)
-
- def register_post_processor(self,
- *post_processor: Callable[['model.System'], None],
- priority:Optional[int]=None) -> None:
+
+ def register_post_processor(
+ self, *post_processor: Callable[['model.System'], None], priority: Optional[int] = None
+ ) -> None:
"""
Register post processor(s).
-
- A post-processor is simply a one-argument callable receiving
+
+ A post-processor is simply a one-argument callable receiving
the processed L{model.System} and doing stuff on the L{model.Documentable} tree.
@param priority: See L{PriorityProcessor}.
@@ -197,22 +220,26 @@ def register_post_processor(self,
for p in post_processor:
self.system._post_processor.add_post_processor(p, priority)
-def load_extension_module(system:'model.System', mod: str) -> None:
+
+def load_extension_module(system: 'model.System', mod: str) -> None:
"""
Load the pydoctor extension module into the system.
"""
setup_pydoctor_extension = _get_setup_extension_func_from_module(mod)
setup_pydoctor_extension(ExtRegistrar(system))
+
def get_extensions() -> Iterator[str]:
"""
Get the full names of all the pydoctor extension modules.
"""
return _get_submodules('pydoctor.extensions')
+
class ModuleVisitorExt(astutils.NodeVisitorExt):
"""
Base class to extend the L{astbuilder.ModuleVistor}.
"""
+
when = astutils.NodeVisitorExt.When.AFTER
visitor: 'astbuilder.ModuleVistor'
diff --git a/pydoctor/extensions/attrs.py b/pydoctor/extensions/attrs.py
index 212910f80..a28244c6e 100644
--- a/pydoctor/extensions/attrs.py
+++ b/pydoctor/extensions/attrs.py
@@ -1,6 +1,7 @@
"""
Support for L{attrs}.
"""
+
from __future__ import annotations
import ast
@@ -18,6 +19,7 @@
attrib_signature = inspect.signature(attr.ib)
"""Signature of the L{attr.ib} function for defining class attributes."""
+
def uses_auto_attribs(call: ast.AST, module: model.Module) -> bool:
"""Does the given L{attr.s()} decoration contain C{auto_attribs=True}?
@param call: AST of the call to L{attr.s()}.
@@ -36,10 +38,7 @@ def uses_auto_attribs(call: ast.AST, module: model.Module) -> bool:
args = astutils.bind_args(attrs_decorator_signature, call)
except TypeError as ex:
message = str(ex).replace("'", '"')
- module.report(
- f"Invalid arguments for attr.s(): {message}",
- lineno_offset=call.lineno
- )
+ module.report(f"Invalid arguments for attr.s(): {message}", lineno_offset=call.lineno)
return False
auto_attribs_expr = args.arguments.get('auto_attribs')
@@ -50,51 +49,47 @@ def uses_auto_attribs(call: ast.AST, module: model.Module) -> bool:
value = ast.literal_eval(auto_attribs_expr)
except ValueError:
module.report(
- 'Unable to figure out value for "auto_attribs" argument '
- 'to attr.s(), maybe too complex',
- lineno_offset=call.lineno
- )
+ 'Unable to figure out value for "auto_attribs" argument ' 'to attr.s(), maybe too complex',
+ lineno_offset=call.lineno,
+ )
return False
if not isinstance(value, bool):
module.report(
- f'Value for "auto_attribs" argument to attr.s() '
- f'has type "{type(value).__name__}", expected "bool"',
- lineno_offset=call.lineno
- )
+ f'Value for "auto_attribs" argument to attr.s() ' f'has type "{type(value).__name__}", expected "bool"',
+ lineno_offset=call.lineno,
+ )
return False
return value
+
def is_attrib(expr: Optional[ast.expr], ctx: model.Documentable) -> bool:
"""Does this expression return an C{attr.ib}?"""
return isinstance(expr, ast.Call) and astutils.node2fullname(expr.func, ctx) in (
- 'attr.ib', 'attr.attrib', 'attr.attr'
- )
+ 'attr.ib',
+ 'attr.attrib',
+ 'attr.attr',
+ )
+
def attrib_args(expr: ast.expr, ctx: model.Documentable) -> Optional[inspect.BoundArguments]:
"""Get the arguments passed to an C{attr.ib} definition.
@return: The arguments, or L{None} if C{expr} does not look like
an C{attr.ib} definition or the arguments passed to it are invalid.
"""
- if isinstance(expr, ast.Call) and astutils.node2fullname(expr.func, ctx) in (
- 'attr.ib', 'attr.attrib', 'attr.attr'
- ):
+ if isinstance(expr, ast.Call) and astutils.node2fullname(expr.func, ctx) in ('attr.ib', 'attr.attrib', 'attr.attr'):
try:
return astutils.bind_args(attrib_signature, expr)
except TypeError as ex:
message = str(ex).replace("'", '"')
- ctx.module.report(
- f"Invalid arguments for attr.ib(): {message}",
- lineno_offset=expr.lineno
- )
+ ctx.module.report(f"Invalid arguments for attr.ib(): {message}", lineno_offset=expr.lineno)
return None
+
def annotation_from_attrib(
- self: astbuilder.ModuleVistor,
- expr: ast.expr,
- ctx: model.Documentable
- ) -> Optional[ast.expr]:
+ self: astbuilder.ModuleVistor, expr: ast.expr, ctx: model.Documentable
+) -> Optional[ast.expr]:
"""Get the type of an C{attr.ib} definition.
@param expr: The L{ast.Call} expression's AST.
@param ctx: The context in which this expression is evaluated.
@@ -111,20 +106,21 @@ def annotation_from_attrib(
return astutils.infer_type(default)
return None
+
class ModuleVisitor(extensions.ModuleVisitorExt):
-
- def visit_ClassDef(self, node:ast.ClassDef) -> None:
+
+ def visit_ClassDef(self, node: ast.ClassDef) -> None:
"""
Called when a class definition is visited.
"""
cls = self.visitor.builder.current
- if not isinstance(cls, model.Class) or cls.name!=node.name:
+ if not isinstance(cls, model.Class) or cls.name != node.name:
return
assert isinstance(cls, AttrsClass)
cls.auto_attribs = any(uses_auto_attribs(decnode, cls.module) for decnode in node.decorator_list)
- def _handleAttrsAssignmentInClass(self, target:str, node: Union[ast.Assign, ast.AnnAssign]) -> None:
+ def _handleAttrsAssignmentInClass(self, target: str, node: Union[ast.Assign, ast.AnnAssign]) -> None:
cls = self.visitor.builder.current
assert isinstance(cls, AttrsClass)
@@ -135,32 +131,31 @@ def _handleAttrsAssignmentInClass(self, target:str, node: Union[ast.Assign, ast.
return
annotation = node.annotation if isinstance(node, ast.AnnAssign) else None
-
+
if is_attrib(node.value, cls) or (
- cls.auto_attribs and \
- annotation is not None and \
- not astutils.is_using_typing_classvar(annotation, cls)):
-
+ cls.auto_attribs and annotation is not None and not astutils.is_using_typing_classvar(annotation, cls)
+ ):
+
attr.kind = model.DocumentableKind.INSTANCE_VARIABLE
if annotation is None and node.value is not None:
attr.annotation = annotation_from_attrib(self.visitor, node.value, cls)
def _handleAttrsAssignment(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
for dottedname in astutils.iterassign(node):
- if dottedname and len(dottedname)==1:
+ if dottedname and len(dottedname) == 1:
# Here, we consider single name assignment only
current = self.visitor.builder.current
if isinstance(current, model.Class):
- self._handleAttrsAssignmentInClass(
- dottedname[0], node
- )
-
+ self._handleAttrsAssignmentInClass(dottedname[0], node)
+
def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
self._handleAttrsAssignment(node)
+
visit_AnnAssign = visit_Assign
+
class AttrsClass(extensions.ClassMixin, model.Class):
-
+
def setup(self) -> None:
super().setup()
self.auto_attribs: bool = False
@@ -169,6 +164,7 @@ def setup(self) -> None:
library to automatically convert annotated fields into attributes.
"""
-def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
+
+def setup_pydoctor_extension(r: extensions.ExtRegistrar) -> None:
r.register_astbuilder_visitor(ModuleVisitor)
r.register_mixin(AttrsClass)
diff --git a/pydoctor/extensions/deprecate.py b/pydoctor/extensions/deprecate.py
index 02c4fd8e8..a07583aa5 100644
--- a/pydoctor/extensions/deprecate.py
+++ b/pydoctor/extensions/deprecate.py
@@ -1,4 +1,3 @@
-
# Copyright (c) Twisted Matrix Laboratories.
# Adjusted from file twisted/python/_pydoctor.py
@@ -19,7 +18,8 @@
if TYPE_CHECKING:
import incremental
-def getDeprecated(self:model.Documentable, decorators:Sequence[ast.expr]) -> None:
+
+def getDeprecated(self: model.Documentable, decorators: Sequence[ast.expr]) -> None:
"""
With a list of decorators, and the object it is running on, set the
C{_deprecated_info} flag if any of the decorators are a Twisted deprecation
@@ -43,15 +43,17 @@ def getDeprecated(self:model.Documentable, decorators:Sequence[ast.expr]) -> Non
# Add a deprecation info with reStructuredText .. deprecated:: directive.
parsed_info = epydoc2stan.parse_docstring(
obj=self,
- doc=f".. deprecated:: {version}\n {text}",
- source=self,
- markup='restructuredtext',
- section='deprecation text',)
+ doc=f".. deprecated:: {version}\n {text}",
+ source=self,
+ markup='restructuredtext',
+ section='deprecation text',
+ )
self.extra_info.append(parsed_info)
+
class ModuleVisitor(extensions.ModuleVisitorExt):
-
- def depart_ClassDef(self, node:ast.ClassDef) -> None:
+
+ def depart_ClassDef(self, node: ast.ClassDef) -> None:
"""
Called after a class definition is visited.
"""
@@ -63,7 +65,7 @@ def depart_ClassDef(self, node:ast.ClassDef) -> None:
return
getDeprecated(cls, node.decorator_list)
- def depart_FunctionDef(self, node:ast.FunctionDef) -> None:
+ def depart_FunctionDef(self, node: ast.FunctionDef) -> None:
"""
Called after a function definition is visited.
"""
@@ -76,8 +78,11 @@ def depart_FunctionDef(self, node:ast.FunctionDef) -> None:
return
getDeprecated(func, node.decorator_list)
+
_incremental_Version_signature = inspect.signature(Version)
-def versionToUsefulObject(version:ast.Call) -> 'incremental.Version':
+
+
+def versionToUsefulObject(version: ast.Call) -> 'incremental.Version':
"""
Change an AST C{Version()} to a real one.
@@ -86,22 +91,28 @@ def versionToUsefulObject(version:ast.Call) -> 'incremental.Version':
"""
bound_args = astutils.bind_args(_incremental_Version_signature, version)
package = astutils.get_str_value(bound_args.arguments['package'])
- major: Union[int, str, None] = astutils.get_int_value(bound_args.arguments['major']) or \
- astutils.get_str_value(bound_args.arguments['major'])
- if major is None or (isinstance(major, str) and major != "NEXT"):
+ major: Union[int, str, None] = astutils.get_int_value(bound_args.arguments['major']) or astutils.get_str_value(
+ bound_args.arguments['major']
+ )
+ if major is None or (isinstance(major, str) and major != "NEXT"):
raise ValueError("Invalid call to incremental.Version(), 'major' should be an int or 'NEXT'.")
assert isinstance(major, (int, str))
minor = astutils.get_int_value(bound_args.arguments['minor'])
micro = astutils.get_int_value(bound_args.arguments['micro'])
if minor is None or micro is None:
raise ValueError("Invalid call to incremental.Version(), 'minor' and 'micro' should be an ints.")
- return Version(package, major, minor=minor, micro=micro) # type:ignore[arg-type]
+ return Version(package, major, minor=minor, micro=micro) # type:ignore[arg-type]
-_deprecation_text_with_replacement_template = "``{name}`` was deprecated in {package} {version}; please use `{replacement}` instead."
+
+_deprecation_text_with_replacement_template = (
+ "``{name}`` was deprecated in {package} {version}; please use `{replacement}` instead."
+)
_deprecation_text_without_replacement_template = "``{name}`` was deprecated in {package} {version}."
_deprecated_signature = inspect.signature(deprecated)
-def deprecatedToUsefulText(ctx:model.Documentable, name:str, deprecated:ast.Call) -> Tuple[str, str]:
+
+
+def deprecatedToUsefulText(ctx: model.Documentable, name: str, deprecated: ast.Call) -> Tuple[str, str]:
"""
Change a C{@deprecated} to a display string.
@@ -114,12 +125,16 @@ def deprecatedToUsefulText(ctx:model.Documentable, name:str, deprecated:ast.Call
bound_args = astutils.bind_args(_deprecated_signature, deprecated)
_version_call = bound_args.arguments['version']
-
+
# Also support using incremental from twisted.python.versions: https://github.com/twisted/twisted/blob/twisted-22.4.0/src/twisted/python/versions.py
- if not isinstance(_version_call, ast.Call) or \
- astbuilder.node2fullname(_version_call.func, ctx) not in ("incremental.Version", "twisted.python.versions.Version"):
- raise ValueError("Invalid call to twisted.python.deprecate.deprecated(), first argument should be a call to incremental.Version()")
-
+ if not isinstance(_version_call, ast.Call) or astbuilder.node2fullname(_version_call.func, ctx) not in (
+ "incremental.Version",
+ "twisted.python.versions.Version",
+ ):
+ raise ValueError(
+ "Invalid call to twisted.python.deprecate.deprecated(), first argument should be a call to incremental.Version()"
+ )
+
version = versionToUsefulObject(_version_call)
replacement: Optional[str] = None
@@ -133,34 +148,32 @@ def deprecatedToUsefulText(ctx:model.Documentable, name:str, deprecated:ast.Call
_package = version.package
# Avoids html injections
- def validate_identifier(_text:str) -> bool:
+ def validate_identifier(_text: str) -> bool:
if not all(p.isidentifier() for p in _text.split('.')):
return False
return True
if not validate_identifier(_package):
raise ValueError(f"Invalid package name: {_package!r}")
-
+
if replacement is not None and not validate_identifier(replacement):
# The replacement is not an identifier, so don't even try to resolve it.
# By adding extras backtics, we make the replacement a literal text.
replacement = replacement.replace('\n', ' ')
replacement = f"`{replacement}`"
-
+
if replacement is not None:
text = _deprecation_text_with_replacement_template.format(
- name=name,
- package=_package,
- version=_version,
- replacement=replacement
+ name=name, package=_package, version=_version, replacement=replacement
)
else:
text = _deprecation_text_without_replacement_template.format(
- name=name,
+ name=name,
package=_package,
version=_version,
)
return _version, text
-def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
+
+def setup_pydoctor_extension(r: extensions.ExtRegistrar) -> None:
r.register_astbuilder_visitor(ModuleVisitor)
diff --git a/pydoctor/extensions/zopeinterface.py b/pydoctor/extensions/zopeinterface.py
index a687184c7..8945472de 100644
--- a/pydoctor/extensions/zopeinterface.py
+++ b/pydoctor/extensions/zopeinterface.py
@@ -1,4 +1,5 @@
"""Support for Zope interfaces."""
+
from __future__ import annotations
from typing import Iterable, Iterator, List, Optional, Union
@@ -9,6 +10,7 @@
from pydoctor import model
from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval
+
class ZopeInterfaceModule(model.Module, extensions.ModuleMixin):
def setup(self) -> None:
@@ -17,8 +19,7 @@ def setup(self) -> None:
@property
def allImplementedInterfaces(self) -> Iterable[str]:
- """Return all the interfaces provided by this module
- """
+ """Return all the interfaces provided by this module"""
return self.implements_directly
@@ -55,6 +56,7 @@ def allImplementedInterfaces(self) -> Iterable[str]:
r.append(interface)
return r
+
def _inheritedDocsources(obj: model.Documentable) -> Iterator[model.Documentable]:
if not isinstance(obj.parent, (ZopeInterfaceClass, ZopeInterfaceModule)):
return
@@ -67,21 +69,22 @@ def _inheritedDocsources(obj: model.Documentable) -> Iterator[model.Documentable
if name in io2.contents:
yield io2.contents[name]
+
class ZopeInterfaceFunction(model.Function, extensions.FunctionMixin):
def docsources(self) -> Iterator[model.Documentable]:
yield from super().docsources()
yield from _inheritedDocsources(self)
+
class ZopeInterfaceAttribute(model.Attribute, extensions.AttributeMixin):
def docsources(self) -> Iterator[model.Documentable]:
yield from super().docsources()
yield from _inheritedDocsources(self)
+
def addInterfaceInfoToScope(
- scope: Union[ZopeInterfaceClass, ZopeInterfaceModule],
- interfaceargs: Iterable[ast.expr],
- ctx: model.Documentable
- ) -> None:
+ scope: Union[ZopeInterfaceClass, ZopeInterfaceModule], interfaceargs: Iterable[ast.expr], ctx: model.Documentable
+) -> None:
"""Mark the given class or module as implementing the given interfaces.
@param scope: class or module to modify
@param interfaceargs: AST expressions of interface objects
@@ -95,15 +98,12 @@ def addInterfaceInfoToScope(
fullName = astbuilder.node2fullname(arg, ctx)
if fullName is None:
- scope.report(
- 'Interface argument %d does not look like a name' % (idx + 1),
- section='zopeinterface')
+ scope.report('Interface argument %d does not look like a name' % (idx + 1), section='zopeinterface')
else:
scope.implements_directly.append(fullName)
-def _handle_implemented(
- implementer: Union[ZopeInterfaceClass, ZopeInterfaceModule]
- ) -> None:
+
+def _handle_implemented(implementer: Union[ZopeInterfaceClass, ZopeInterfaceModule]) -> None:
"""This is the counterpart to addInterfaceInfoToScope(), which is called
during post-processing.
"""
@@ -112,9 +112,7 @@ def _handle_implemented(
try:
iface = implementer.system.find_object(iface_name)
except LookupError:
- implementer.report(
- 'Interface "%s" not found' % iface_name,
- section='zopeinterface')
+ implementer.report('Interface "%s" not found' % iface_name, section='zopeinterface')
continue
# Update names of reparented interfaces.
@@ -125,31 +123,23 @@ def _handle_implemented(
if isinstance(iface, ZopeInterfaceClass):
if iface.isinterface:
- # System might be post processed mutilple times during tests,
+ # System might be post processed mutilple times during tests,
# so we check if implementer is already there.
if implementer not in iface.implementedby_directly:
iface.implementedby_directly.append(implementer)
else:
- implementer.report(
- 'Class "%s" is not an interface' % iface_name,
- section='zopeinterface')
+ implementer.report('Class "%s" is not an interface' % iface_name, section='zopeinterface')
elif iface is not None:
- implementer.report(
- 'Supposed interface "%s" not detected as a class' % iface_name,
- section='zopeinterface')
-
-def addInterfaceInfoToModule(
- module: ZopeInterfaceModule,
- interfaceargs: Iterable[ast.expr]
- ) -> None:
+ implementer.report('Supposed interface "%s" not detected as a class' % iface_name, section='zopeinterface')
+
+
+def addInterfaceInfoToModule(module: ZopeInterfaceModule, interfaceargs: Iterable[ast.expr]) -> None:
addInterfaceInfoToScope(module, interfaceargs, module)
+
def addInterfaceInfoToClass(
- cls: ZopeInterfaceClass,
- interfaceargs: Iterable[ast.expr],
- ctx: model.Documentable,
- implementsOnly: bool
- ) -> None:
+ cls: ZopeInterfaceClass, interfaceargs: Iterable[ast.expr], ctx: model.Documentable, implementsOnly: bool
+) -> None:
cls.implementsOnly = implementsOnly
if implementsOnly:
cls.implements_directly = []
@@ -157,9 +147,8 @@ def addInterfaceInfoToClass(
schema_prog = re.compile(r'zope\.schema\.([a-zA-Z_][a-zA-Z0-9_]*)')
-interface_prog = re.compile(
- r'zope\.schema\.interfaces\.([a-zA-Z_][a-zA-Z0-9_]*)'
- r'|zope\.interface\.Interface')
+interface_prog = re.compile(r'zope\.schema\.interfaces\.([a-zA-Z_][a-zA-Z0-9_]*)' r'|zope\.interface\.Interface')
+
def namesInterface(system: model.System, name: str) -> bool:
if interface_prog.match(name):
@@ -169,13 +158,10 @@ def namesInterface(system: model.System, name: str) -> bool:
return False
return obj.isinterface
+
class ZopeInterfaceModuleVisitor(extensions.ModuleVisitorExt):
- def _handleZopeInterfaceAssignmentInModule(self,
- target: str,
- expr: Optional[ast.expr],
- lineno: int
- ) -> None:
+ def _handleZopeInterfaceAssignmentInModule(self, target: str, expr: Optional[ast.expr], lineno: int) -> None:
if not isinstance(expr, ast.Call):
return
funcName = astbuilder.node2fullname(expr.func, self.visitor.builder.current)
@@ -188,12 +174,12 @@ def _handleZopeInterfaceAssignmentInModule(self,
# Fetch older attr documentable
old_attr = self.visitor.builder.current.contents.get(target)
if old_attr:
- self.visitor.builder.system._remove(old_attr) # avoid duplicate warning by simply removing the old item
+ self.visitor.builder.system._remove(old_attr) # avoid duplicate warning by simply removing the old item
interface = self.visitor.builder.pushClass(target, lineno)
assert isinstance(interface, ZopeInterfaceClass)
-
- # the docstring node has already been attached to the documentable
+
+ # the docstring node has already been attached to the documentable
# by the time the zopeinterface extension is run, so we fetch the right docstring info from old documentable.
if old_attr:
interface.docstring = old_attr.docstring
@@ -203,11 +189,7 @@ def _handleZopeInterfaceAssignmentInModule(self,
interface.implementedby_directly = []
self.visitor.builder.popClass()
- def _handleZopeInterfaceAssignmentInClass(self,
- target: str,
- expr: Optional[ast.expr],
- lineno: int
- ) -> None:
+ def _handleZopeInterfaceAssignmentInClass(self, target: str, expr: Optional[ast.expr], lineno: int) -> None:
if not isinstance(expr, ast.Call):
return
@@ -225,9 +207,9 @@ def _handleZopeInterfaceAssignmentInClass(self,
attr.setDocstring(args[0])
else:
attr.report(
- 'definition of attribute "%s" should have docstring '
- 'as its sole argument' % attr.name,
- section='zopeinterface')
+ 'definition of attribute "%s" should have docstring ' 'as its sole argument' % attr.name,
+ section='zopeinterface',
+ )
else:
if schema_prog.match(funcName):
attr.kind = model.DocumentableKind.SCHEMA_FIELD
@@ -246,26 +228,21 @@ def _handleZopeInterfaceAssignmentInClass(self,
if isinstance(descrNode, astutils.Str):
attr.setDocstring(descrNode)
elif descrNode is not None:
- attr.report(
- 'description of field "%s" is not a string literal' % attr.name,
- section='zopeinterface')
-
+ attr.report('description of field "%s" is not a string literal' % attr.name, section='zopeinterface')
+
def _handleZopeInterfaceAssignment(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
for dottedname in astutils.iterassign(node):
- if dottedname and len(dottedname)==1:
+ if dottedname and len(dottedname) == 1:
# Here, we consider single name assignment only
current = self.visitor.builder.current
if isinstance(current, model.Class):
- self._handleZopeInterfaceAssignmentInClass(
- dottedname[0], node.value, node.lineno
- )
+ self._handleZopeInterfaceAssignmentInClass(dottedname[0], node.value, node.lineno)
elif isinstance(current, model.Module):
- self._handleZopeInterfaceAssignmentInModule(
- dottedname[0], node.value, node.lineno
- )
-
+ self._handleZopeInterfaceAssignmentInModule(dottedname[0], node.value, node.lineno)
+
def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
self._handleZopeInterfaceAssignment(node)
+
visit_AnnAssign = visit_Assign
def visit_Call(self, node: ast.Call) -> None:
@@ -286,8 +263,8 @@ def visit_Call_zope_interface_implements(self, funcName: str, node: ast.Call) ->
cls = self.visitor.builder.current
if not isinstance(cls, ZopeInterfaceClass):
return
- addInterfaceInfoToClass(cls, node.args, cls,
- funcName == 'zope.interface.implementsOnly')
+ addInterfaceInfoToClass(cls, node.args, cls, funcName == 'zope.interface.implementsOnly')
+
visit_Call_zope_interface_implementsOnly = visit_Call_zope_interface_implements
def visit_Call_zope_interface_classImplements(self, funcName: str, node: ast.Call) -> None:
@@ -295,9 +272,9 @@ def visit_Call_zope_interface_classImplements(self, funcName: str, node: ast.Cal
if not node.args:
self.visitor.builder.system.msg(
'zopeinterface',
- f'{parent.description}:{node.lineno}: '
- f'required argument to classImplements() missing',
- thresh=-1)
+ f'{parent.description}:{node.lineno}: ' f'required argument to classImplements() missing',
+ thresh=-1,
+ )
return
clsname = astbuilder.node2fullname(node.args[0], parent)
cls = None if clsname is None else self.visitor.system.allobjects.get(clsname)
@@ -310,17 +287,17 @@ def visit_Call_zope_interface_classImplements(self, funcName: str, node: ast.Cal
problem = 'not found' if cls is None else 'is not a class'
self.visitor.builder.system.msg(
'zopeinterface',
- f'{parent.description}:{node.lineno}: '
- f'argument {argdesc} to classImplements() {problem}',
- thresh=-1)
+ f'{parent.description}:{node.lineno}: ' f'argument {argdesc} to classImplements() {problem}',
+ thresh=-1,
+ )
return
- addInterfaceInfoToClass(cls, node.args[1:], parent,
- funcName == 'zope.interface.classImplementsOnly')
+ addInterfaceInfoToClass(cls, node.args[1:], parent, funcName == 'zope.interface.classImplementsOnly')
+
visit_Call_zope_interface_classImplementsOnly = visit_Call_zope_interface_classImplements
def depart_ClassDef(self, node: ast.ClassDef) -> None:
cls = self.visitor.builder.current.contents.get(node.name)
-
+
if not isinstance(cls, ZopeInterfaceClass):
return
@@ -345,7 +322,8 @@ def depart_ClassDef(self, node: ast.ClassDef) -> None:
continue
addInterfaceInfoToClass(cls, args, cls.parent, False)
-def postProcess(self:model.System) -> None:
+
+def postProcess(self: model.System) -> None:
for mod in self.objectsOfType(ZopeInterfaceModule):
_handle_implemented(mod)
@@ -353,10 +331,8 @@ def postProcess(self:model.System) -> None:
for cls in self.objectsOfType(ZopeInterfaceClass):
_handle_implemented(cls)
-def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
- r.register_mixin(ZopeInterfaceModule,
- ZopeInterfaceFunction,
- ZopeInterfaceClass,
- ZopeInterfaceAttribute)
+
+def setup_pydoctor_extension(r: extensions.ExtRegistrar) -> None:
+ r.register_mixin(ZopeInterfaceModule, ZopeInterfaceFunction, ZopeInterfaceClass, ZopeInterfaceAttribute)
r.register_astbuilder_visitor(ZopeInterfaceModuleVisitor)
r.register_post_processor(postProcess)
diff --git a/pydoctor/factory.py b/pydoctor/factory.py
index 57be564f0..1417b4b6f 100644
--- a/pydoctor/factory.py
+++ b/pydoctor/factory.py
@@ -1,6 +1,7 @@
"""
Create customizable model classes.
"""
+
from __future__ import annotations
from typing import Dict, List, Tuple, Type, Any, Union, Sequence, TYPE_CHECKING
@@ -8,6 +9,7 @@
if TYPE_CHECKING:
from pydoctor import model
+
class GenericFactory:
def __init__(self, bases: Dict[str, Type[Any]]) -> None:
@@ -15,22 +17,22 @@ def __init__(self, bases: Dict[str, Type[Any]]) -> None:
self.mixins: Dict[str, List[Type[Any]]] = {}
self._class_cache: Dict[Tuple[str, Tuple[Type[Any], ...]], Type[Any]] = {}
- def add_mixin(self, for_class: str, mixin:Type[Any]) -> None:
+ def add_mixin(self, for_class: str, mixin: Type[Any]) -> None:
"""
- Add a mixin class to the specified object in the factory.
+ Add a mixin class to the specified object in the factory.
"""
try:
mixins = self.mixins[for_class]
except KeyError:
mixins = []
self.mixins[for_class] = mixins
-
+
assert isinstance(mixins, list)
mixins.append(mixin)
- def add_mixins(self, **kwargs:Union[Sequence[Type[Any]], Type[Any]]) -> None:
+ def add_mixins(self, **kwargs: Union[Sequence[Type[Any]], Type[Any]]) -> None:
"""
- Add mixin classes to objects in the factory.
+ Add mixin classes to objects in the factory.
Example::
class MyClassMixin: ...
class MyDataMixin: ...
@@ -38,15 +40,15 @@ class MyDataMixin: ...
factory.add_mixins(Class=MyClassMixin, Attribute=MyDataMixin)
:param kwargs: Minin(s) classes to apply to names.
"""
- for key,value in kwargs.items():
+ for key, value in kwargs.items():
if isinstance(value, Sequence):
for item in value:
self.add_mixin(key, item)
else:
self.add_mixin(key, value)
- def get_class(self, name:str) -> Type[Any]:
- class_id = name, tuple(self.mixins.get(name, [])+[self.bases[name]])
+ def get_class(self, name: str) -> Type[Any]:
+ class_id = name, tuple(self.mixins.get(name, []) + [self.bases[name]])
cached = self._class_cache.get(class_id)
if cached is not None:
cls = cached
@@ -55,14 +57,16 @@ def get_class(self, name:str) -> Type[Any]:
self._class_cache[class_id] = cls
return cls
+
class Factory(GenericFactory):
"""
- Classes are created dynamically with `type` such that they can inherith from customizable mixin classes.
+ Classes are created dynamically with `type` such that they can inherith from customizable mixin classes.
"""
def __init__(self) -> None:
# Workaround cyclic import issue.
from pydoctor import model
+
self.model = model
_bases = {
'Class': model.Class,
@@ -101,7 +105,7 @@ def Module(self) -> Type['model.Module']:
mod = self.get_class('Module')
assert issubclass(mod, self.model.Module)
return mod
-
+
@property
def Package(self) -> Type['model.Package']:
mod = self.get_class('Package')
diff --git a/pydoctor/linker.py b/pydoctor/linker.py
index a569107b2..2814a5584 100644
--- a/pydoctor/linker.py
+++ b/pydoctor/linker.py
@@ -1,25 +1,23 @@
"""
This module provides implementations of epydoc's L{DocstringLinker} class.
"""
+
from __future__ import annotations
import contextlib
from twisted.web.template import Tag, tags
-from typing import (
- TYPE_CHECKING, Iterable, Iterator,
- Optional, Union
-)
+from typing import TYPE_CHECKING, Iterable, Iterator, Optional, Union
from pydoctor.epydoc.markup import DocstringLinker
if TYPE_CHECKING:
from twisted.web.template import Flattenable
-
+
# This import must be kept in the TYPE_CHECKING block for circular references issues.
from pydoctor import model
-def taglink(o: 'model.Documentable', page_url: str,
- label: Optional["Flattenable"] = None) -> Tag:
+
+def taglink(o: 'model.Documentable', page_url: str, label: Optional["Flattenable"] = None) -> Tag:
"""
Create a link to an object that exists in the system.
@@ -29,7 +27,7 @@ def taglink(o: 'model.Documentable', page_url: str,
@param label: The label to use for the link
"""
if not o.isVisible:
- o.system.msg("html", "don't link to %s"%o.fullName())
+ o.system.msg("html", "don't link to %s" % o.fullName())
if label is None:
label = o.fullName()
@@ -39,47 +37,49 @@ def taglink(o: 'model.Documentable', page_url: str,
# When linking to an item on the same page, omit the path.
# Besides shortening the HTML, this also avoids the page being reloaded
# if the query string is non-empty.
- url = url[len(page_url):]
+ url = url[len(page_url) :]
ret: Tag = tags.a(label, href=url, class_='internal-link')
if label != o.fullName():
ret(title=o.fullName())
return ret
-def intersphinx_link(label:"Flattenable", url:str) -> Tag:
+
+def intersphinx_link(label: "Flattenable", url: str) -> Tag:
"""
- Create a intersphinx link.
-
+ Create a intersphinx link.
+
It's special because it uses the 'intersphinx-link' CSS class.
"""
return tags.a(label, href=url, class_='intersphinx-link')
+
class _EpydocLinker(DocstringLinker):
"""
This linker implements the xref lookup logic.
"""
def __init__(self, obj: 'model.Documentable') -> None:
- self.reporting_obj:Optional['model.Documentable'] = obj
+ self.reporting_obj: Optional['model.Documentable'] = obj
"""
Object used for reporting link not found errors. Changed when the linker L{switch_context}.
"""
-
+
self._init_obj = obj
self._page_object: Optional['model.Documentable'] = obj.page_object
-
+
@property
def obj(self) -> 'model.Documentable':
"""
Object used for resolving the target name, it's NOT changed when the linker L{switch_context}.
"""
return self._init_obj
-
+
@property
def page_url(self) -> str:
"""
- URL of the page used to compute the relative links from.
- Can be an empty string to always generate full urls.
+ URL of the page used to compute the relative links from.
+ Can be an empty string to always generate full urls.
"""
pageob = self._page_object
if pageob is not None:
@@ -87,24 +87,22 @@ def page_url(self) -> str:
return ''
@contextlib.contextmanager
- def switch_context(self, ob:Optional['model.Documentable']) -> Iterator[None]:
-
+ def switch_context(self, ob: Optional['model.Documentable']) -> Iterator[None]:
+
old_page_object = self._page_object
old_reporting_object = self.reporting_obj
self._page_object = None if ob is None else ob.page_object
self.reporting_obj = ob
-
+
yield
-
+
self._page_object = old_page_object
self.reporting_obj = old_reporting_object
- def look_for_name(self,
- name: str,
- candidates: Iterable['model.Documentable'],
- lineno: int
- ) -> Optional['model.Documentable']:
+ def look_for_name(
+ self, name: str, candidates: Iterable['model.Documentable'], lineno: int
+ ) -> Optional['model.Documentable']:
part0 = name.split('.')[0]
potential_targets = []
for src in candidates:
@@ -117,10 +115,10 @@ def look_for_name(self,
return potential_targets[0]
elif len(potential_targets) > 1 and self.reporting_obj:
self.reporting_obj.report(
- "ambiguous ref to %s, could be %s" % (
- name,
- ', '.join(ob.fullName() for ob in potential_targets)),
- 'resolve_identifier_xref', lineno)
+ "ambiguous ref to %s, could be %s" % (name, ', '.join(ob.fullName() for ob in potential_targets)),
+ 'resolve_identifier_xref',
+ lineno,
+ )
return None
def look_for_intersphinx(self, name: str) -> Optional[str]:
@@ -159,13 +157,10 @@ def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
xref = intersphinx_link(label, url=resolved)
else:
xref = taglink(resolved, self.page_url, label)
-
+
return tags.code(xref)
- def _resolve_identifier_xref(self,
- identifier: str,
- lineno: int
- ) -> Union[str, 'model.Documentable']:
+ def _resolve_identifier_xref(self, identifier: str, lineno: int) -> Union[str, 'model.Documentable']:
"""
Resolve a crossreference link to a Python identifier.
This will resolve the identifier to any reasonable target,
@@ -228,7 +223,10 @@ def _resolve_identifier_xref(self,
# found, complain.
target = self.look_for_name(
# System.objectsOfType now supports passing the type as string.
- identifier, self.obj.system.objectsOfType('pydoctor.model.Module'), lineno)
+ identifier,
+ self.obj.system.objectsOfType('pydoctor.model.Module'),
+ lineno,
+ )
if target is not None:
return target
@@ -242,49 +240,52 @@ def _resolve_identifier_xref(self,
self.reporting_obj.report(message, 'resolve_identifier_xref', lineno)
raise LookupError(identifier)
+
class _AnnotationLinker(DocstringLinker):
"""
- Specialized linker to resolve annotations attached to the given L{Documentable}.
+ Specialized linker to resolve annotations attached to the given L{Documentable}.
- Links will be created in the context of C{obj} but
+ Links will be created in the context of C{obj} but
generated with the C{obj.module}'s linker when possible.
"""
- def __init__(self, obj:'model.Documentable') -> None:
+
+ def __init__(self, obj: 'model.Documentable') -> None:
self._obj = obj
self._module = obj.module
self._scope = obj.parent or obj
self._scope_linker = _EpydocLinker(self._scope)
-
+
@property
def obj(self) -> 'model.Documentable':
return self._obj
- def warn_ambiguous_annotation(self, target:str) -> None:
+ def warn_ambiguous_annotation(self, target: str) -> None:
# report a low-level message about ambiguous annotation
mod_ann = self._module.expandName(target)
obj_ann = self._scope.expandName(target)
if mod_ann != obj_ann and '.' in obj_ann and '.' in mod_ann:
self.obj.report(
- f'ambiguous annotation {target!r}, could be interpreted as '
- f'{obj_ann!r} instead of {mod_ann!r}', section='annotation',
- thresh=1
+ f'ambiguous annotation {target!r}, could be interpreted as ' f'{obj_ann!r} instead of {mod_ann!r}',
+ section='annotation',
+ thresh=1,
)
-
+
def link_to(self, target: str, label: "Flattenable") -> Tag:
with self.switch_context(self._obj):
if self._module.isNameDefined(target):
self.warn_ambiguous_annotation(target)
return self._scope_linker.link_to(target, label, is_annotation=True)
-
+
def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
with self.switch_context(self._obj):
return self.obj.docstring_linker.link_xref(target, label, lineno)
@contextlib.contextmanager
- def switch_context(self, ob:Optional['model.Documentable']) -> Iterator[None]:
+ def switch_context(self, ob: Optional['model.Documentable']) -> Iterator[None]:
with self._scope_linker.switch_context(ob):
yield
+
class NotFoundLinker(DocstringLinker):
"""A DocstringLinker implementation that cannot find any links."""
@@ -293,7 +294,7 @@ def link_to(self, target: str, label: "Flattenable") -> Tag:
def link_xref(self, target: str, label: "Flattenable", lineno: int) -> Tag:
return tags.code(label)
-
+
@contextlib.contextmanager
def switch_context(self, ob: Optional[model.Documentable]) -> Iterator[None]:
yield
diff --git a/pydoctor/model.py b/pydoctor/model.py
index fceaf21d9..3fc23bf39 100644
--- a/pydoctor/model.py
+++ b/pydoctor/model.py
@@ -5,6 +5,7 @@
system being documented. An instance of L{System} represents the whole system
being documented -- a System is a bad of Documentables, in some sense.
"""
+
from __future__ import annotations
import abc
@@ -20,8 +21,23 @@
from inspect import signature, Signature
from pathlib import Path
from typing import (
- TYPE_CHECKING, Any, Collection, Dict, Iterator, List, Mapping, Callable,
- Optional, Sequence, Set, Tuple, Type, TypeVar, Union, cast, overload
+ TYPE_CHECKING,
+ Any,
+ Collection,
+ Dict,
+ Iterator,
+ List,
+ Mapping,
+ Callable,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+ overload,
)
from urllib.parse import quote
@@ -57,14 +73,16 @@
class LineFromAst(int):
"Simple L{int} wrapper for linenumbers coming from ast analysis."
+
class LineFromDocstringField(int):
"Simple L{int} wrapper for linenumbers coming from docstrings."
+
class DocLocation(Enum):
OWN_PAGE = 1
PARENT_PAGE = 2
# Nothing uses this yet. Parameters will one day.
- #UNDER_PARENT_DOCSTRING = 3
+ # UNDER_PARENT_DOCSTRING = 3
class ProcessingState(Enum):
@@ -87,30 +105,33 @@ class PrivacyClass(Enum):
# For compatibility
VISIBLE = PUBLIC
+
class DocumentableKind(Enum):
"""
L{Enum} containing values indicating the possible object types.
@note: Presentation order is derived from the enum values
"""
- PACKAGE = 1000
- MODULE = 900
- CLASS = 800
- INTERFACE = 850
- EXCEPTION = 750
- CLASS_METHOD = 700
- STATIC_METHOD = 600
- METHOD = 500
- FUNCTION = 400
- CONSTANT = 310
- TYPE_VARIABLE = 306
- TYPE_ALIAS = 305
- CLASS_VARIABLE = 300
- SCHEMA_FIELD = 220
- ATTRIBUTE = 210
- INSTANCE_VARIABLE = 200
- PROPERTY = 150
- VARIABLE = 100
+
+ PACKAGE = 1000
+ MODULE = 900
+ CLASS = 800
+ INTERFACE = 850
+ EXCEPTION = 750
+ CLASS_METHOD = 700
+ STATIC_METHOD = 600
+ METHOD = 500
+ FUNCTION = 400
+ CONSTANT = 310
+ TYPE_VARIABLE = 306
+ TYPE_ALIAS = 305
+ CLASS_VARIABLE = 300
+ SCHEMA_FIELD = 220
+ ATTRIBUTE = 210
+ INSTANCE_VARIABLE = 200
+ PROPERTY = 150
+ VARIABLE = 100
+
class Documentable:
"""An object that can be documented.
@@ -121,6 +142,7 @@ class Documentable:
@ivar system: The system the object is part of.
"""
+
docstring: Optional[str] = None
parsed_docstring: Optional[ParsedDocstring] = None
parsed_summary: Optional[ParsedDocstring] = None
@@ -134,10 +156,8 @@ class Documentable:
"""Page location where we are documented."""
def __init__(
- self, system: 'System', name: str,
- parent: Optional['Documentable'] = None,
- source_path: Optional[Path] = None
- ):
+ self, system: 'System', name: str, parent: Optional['Documentable'] = None, source_path: Optional[Path] = None
+ ):
if source_path is None and parent is not None:
source_path = parent.source_path
self.system = system
@@ -163,16 +183,18 @@ def setDocstring(self, node: astutils.Str) -> None:
lineno, doc = astutils.extract_docstring(node)
self._setDocstringValue(doc, lineno)
- def _setDocstringValue(self, doc:str, lineno:int) -> None:
- if self.docstring or self.parsed_docstring: # some object have a parsed docstring only like the ones coming from ivar fields
+ def _setDocstringValue(self, doc: str, lineno: int) -> None:
+ if (
+ self.docstring or self.parsed_docstring
+ ): # some object have a parsed docstring only like the ones coming from ivar fields
msg = 'Existing docstring'
if self.docstring_lineno:
msg += f' at line {self.docstring_lineno}'
msg += ' is overriden'
- self.report(msg, 'docstring', lineno_offset=lineno-self.docstring_lineno)
+ self.report(msg, 'docstring', lineno_offset=lineno - self.docstring_lineno)
self.docstring = doc
self.docstring_lineno = lineno
- # Due to the current process for parsing doc strings, some objects might already have a parsed_docstring populated at this moment.
+ # Due to the current process for parsing doc strings, some objects might already have a parsed_docstring populated at this moment.
# This is an unfortunate behaviour but it’s too big of a refactor for now (see https://github.com/twisted/pydoctor/issues/798).
if self.parsed_docstring:
self.parsed_docstring = None
@@ -186,13 +208,13 @@ def setLineNumber(self, lineno: LineFromDocstringField | LineFromAst | int) -> N
if not from docstring fields as well, the old docstring based linumber will be replaced
with the one from ast analysis since this takes precedence.
- @param lineno: The linenumber.
- If the given linenumber is simply an L{int} we'll assume it's coming from the ast builder
+ @param lineno: The linenumber.
+ If the given linenumber is simply an L{int} we'll assume it's coming from the ast builder
and it will be converted to an L{LineFromAst} instance.
"""
if not self.linenumber or (
- isinstance(self.linenumber, LineFromDocstringField)
- and not isinstance(lineno, LineFromDocstringField)):
+ isinstance(self.linenumber, LineFromDocstringField) and not isinstance(lineno, LineFromDocstringField)
+ ):
if not isinstance(lineno, (LineFromAst, LineFromDocstringField)):
lineno = LineFromAst(lineno)
self.linenumber = lineno
@@ -201,8 +223,7 @@ def setLineNumber(self, lineno: LineFromDocstringField | LineFromAst | int) -> N
parentSourceHref = parentMod.sourceHref
if parentSourceHref:
self.sourceHref = self.system.options.htmlsourcetemplate.format(
- mod_source_href=parentSourceHref,
- lineno=str(lineno)
+ mod_source_href=parentSourceHref, lineno=str(lineno)
)
@property
@@ -270,7 +291,6 @@ def docsources(self) -> Iterator['Documentable']:
"""
yield self
-
def reparent(self, new_parent: 'Module', new_name: str) -> None:
# this code attempts to preserve "rather a lot" of
# invariants assumed by various bits of pydoctor
@@ -297,11 +317,11 @@ def _handle_reparenting_post(self) -> None:
self.system.allobjects[self.fullName()] = self
for o in self.contents.values():
o._handle_reparenting_post()
-
+
def _localNameToFullName(self, name: str) -> str:
raise NotImplementedError(self._localNameToFullName)
-
- def isNameDefined(self, name:str) -> bool:
+
+ def isNameDefined(self, name: str) -> bool:
"""
Is the given name defined in the globals/locals of self-context?
Only the first name of a dotted name is checked.
@@ -330,7 +350,7 @@ class E:
In the context of mod2.E, expandName("RenamedExternal") should be
"external_location.External" and expandName("renamed_mod.Local")
- should be "mod1.Local". """
+ should be "mod1.Local"."""
parts = name.split('.')
obj: Documentable = self
for i, p in enumerate(parts):
@@ -340,7 +360,7 @@ class E:
# If we're looking at a class, we try our luck with the inherited members
if isinstance(obj, Class):
inherited = obj.find(p)
- if inherited:
+ if inherited:
full_name = inherited.fullName()
if full_name == p:
# We don't have a full name
@@ -352,11 +372,11 @@ class E:
if nxt is None:
break
obj = nxt
- return '.'.join([full_name] + parts[i + 1:])
+ return '.'.join([full_name] + parts[i + 1 :])
def expandAnnotationName(self, name: str) -> str:
"""
- Like L{expandName} but gives precedence to the module scope when a
+ Like L{expandName} but gives precedence to the module scope when a
name is defined both in the current scope and the module scope.
"""
if self.module.isNameDefined(name):
@@ -406,14 +426,14 @@ def module(self) -> 'Module':
assert parentMod is not None
return parentMod
- def report(self, descr: str, section: str = 'parsing', lineno_offset: int = 0, thresh:int=-1) -> None:
+ def report(self, descr: str, section: str = 'parsing', lineno_offset: int = 0, thresh: int = -1) -> None:
"""
Log an error or warning about this documentable object.
@param descr: The error/warning string
@param section: What the warning is about.
@param lineno_offset: Offset
- @param thresh: Thresh to pass to L{System.msg}, it will use C{-1} by default,
+ @param thresh: Thresh to pass to L{System.msg}, it will use C{-1} by default,
meaning it will count as a violation and will fail the build if option C{-W} is passed.
But this behaviour is not applicable if C{thresh} is greater or equal to zero.
"""
@@ -430,16 +450,13 @@ def report(self, descr: str, section: str = 'parsing', lineno_offset: int = 0, t
else:
linenumber = '???'
- self.system.msg(
- section,
- f'{self.description}:{linenumber}: {descr}',
- thresh=thresh)
+ self.system.msg(section, f'{self.description}:{linenumber}: {descr}', thresh=thresh)
@property
def docstring_linker(self) -> 'linker.DocstringLinker':
"""
Returns an instance of L{DocstringLinker} suitable for resolving names
- in the context of the object.
+ in the context of the object.
"""
if self._linker is not None:
return self._linker
@@ -451,7 +468,7 @@ class CanContainImportsDocumentable(Documentable):
def setup(self) -> None:
super().setup()
self._localNameToFullName_map: Dict[str, str] = {}
-
+
def isNameDefined(self, name: str) -> bool:
name = name.split('.')[0]
if name in self.contents:
@@ -462,10 +479,10 @@ def isNameDefined(self, name: str) -> bool:
return self.module.isNameDefined(name)
else:
return False
-
+
def localNames(self) -> Iterator[str]:
- return chain(self.contents.keys(),
- self._localNameToFullName_map.keys())
+ return chain(self.contents.keys(), self._localNameToFullName_map.keys())
+
class Module(CanContainImportsDocumentable):
kind = DocumentableKind.MODULE
@@ -517,8 +534,8 @@ def module(self) -> 'Module':
@property
def docformat(self) -> Optional[str]:
"""The name of the format to be used for parsing docstrings in this module.
-
- The docformat value are inherited from packages if a C{__docformat__} variable
+
+ The docformat value are inherited from packages if a C{__docformat__} variable
is defined in the C{__init__.py} file.
If no C{__docformat__} variable was found or its
@@ -529,45 +546,95 @@ def docformat(self) -> Optional[str]:
elif isinstance(self.parent, Package):
return self.parent.docformat
return None
-
+
@docformat.setter
def docformat(self, value: str) -> None:
self._docformat = value
def submodules(self) -> Iterator['Module']:
"""Returns an iterator over the visible submodules."""
- return (m for m in self.contents.values()
- if isinstance(m, Module) and m.isVisible)
+ return (m for m in self.contents.values() if isinstance(m, Module) and m.isVisible)
+
class Package(Module):
kind = DocumentableKind.PACKAGE
+
# List of exceptions class names in the standard library, Python 3.8.10
-_STD_LIB_EXCEPTIONS = ('ArithmeticError', 'AssertionError', 'AttributeError',
- 'BaseException', 'BlockingIOError', 'BrokenPipeError',
- 'BufferError', 'BytesWarning', 'ChildProcessError',
- 'ConnectionAbortedError', 'ConnectionError',
- 'ConnectionRefusedError', 'ConnectionResetError',
- 'DeprecationWarning', 'EOFError',
- 'EnvironmentError', 'Exception', 'FileExistsError',
- 'FileNotFoundError', 'FloatingPointError', 'FutureWarning',
- 'GeneratorExit', 'IOError', 'ImportError', 'ImportWarning',
- 'IndentationError', 'IndexError', 'InterruptedError',
- 'IsADirectoryError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
- 'MemoryError', 'ModuleNotFoundError', 'NameError',
- 'NotADirectoryError', 'NotImplementedError',
- 'OSError', 'OverflowError', 'PendingDeprecationWarning', 'PermissionError',
- 'ProcessLookupError', 'RecursionError', 'ReferenceError',
- 'ResourceWarning', 'RuntimeError', 'RuntimeWarning', 'StopAsyncIteration',
- 'StopIteration', 'SyntaxError', 'SyntaxWarning', 'SystemError',
- 'SystemExit', 'TabError', 'TimeoutError', 'TypeError',
- 'UnboundLocalError', 'UnicodeDecodeError', 'UnicodeEncodeError',
- 'UnicodeError', 'UnicodeTranslateError', 'UnicodeWarning', 'UserWarning',
- 'ValueError', 'Warning', 'ZeroDivisionError')
+_STD_LIB_EXCEPTIONS = (
+ 'ArithmeticError',
+ 'AssertionError',
+ 'AttributeError',
+ 'BaseException',
+ 'BlockingIOError',
+ 'BrokenPipeError',
+ 'BufferError',
+ 'BytesWarning',
+ 'ChildProcessError',
+ 'ConnectionAbortedError',
+ 'ConnectionError',
+ 'ConnectionRefusedError',
+ 'ConnectionResetError',
+ 'DeprecationWarning',
+ 'EOFError',
+ 'EnvironmentError',
+ 'Exception',
+ 'FileExistsError',
+ 'FileNotFoundError',
+ 'FloatingPointError',
+ 'FutureWarning',
+ 'GeneratorExit',
+ 'IOError',
+ 'ImportError',
+ 'ImportWarning',
+ 'IndentationError',
+ 'IndexError',
+ 'InterruptedError',
+ 'IsADirectoryError',
+ 'KeyError',
+ 'KeyboardInterrupt',
+ 'LookupError',
+ 'MemoryError',
+ 'ModuleNotFoundError',
+ 'NameError',
+ 'NotADirectoryError',
+ 'NotImplementedError',
+ 'OSError',
+ 'OverflowError',
+ 'PendingDeprecationWarning',
+ 'PermissionError',
+ 'ProcessLookupError',
+ 'RecursionError',
+ 'ReferenceError',
+ 'ResourceWarning',
+ 'RuntimeError',
+ 'RuntimeWarning',
+ 'StopAsyncIteration',
+ 'StopIteration',
+ 'SyntaxError',
+ 'SyntaxWarning',
+ 'SystemError',
+ 'SystemExit',
+ 'TabError',
+ 'TimeoutError',
+ 'TypeError',
+ 'UnboundLocalError',
+ 'UnicodeDecodeError',
+ 'UnicodeEncodeError',
+ 'UnicodeError',
+ 'UnicodeTranslateError',
+ 'UnicodeWarning',
+ 'UserWarning',
+ 'ValueError',
+ 'Warning',
+ 'ZeroDivisionError',
+)
+
+
def is_exception(cls: 'Class') -> bool:
"""
- Whether is class should be considered as
- an exception and be marked with the special
+ Whether is class should be considered as
+ an exception and be marked with the special
kind L{DocumentableKind.EXCEPTION}.
"""
for base in cls.mro(True, False):
@@ -575,18 +642,20 @@ def is_exception(cls: 'Class') -> bool:
return True
return False
-def compute_mro(cls:'Class') -> Sequence[Union['Class', str]]:
+
+def compute_mro(cls: 'Class') -> Sequence[Union['Class', str]]:
"""
Compute the method resolution order for this class.
- This function will also set the
- C{_finalbaseobjects} and C{_finalbases} attributes on
+ This function will also set the
+ C{_finalbaseobjects} and C{_finalbases} attributes on
this class and all it's superclasses.
"""
- def init_finalbaseobjects(o: 'Class', path:Optional[List['Class']]=None) -> None:
+
+ def init_finalbaseobjects(o: 'Class', path: Optional[List['Class']] = None) -> None:
if not path:
path = []
if o in path:
- cycle_str = " -> ".join([o.fullName() for o in path[path.index(cls):] + [cls]])
+ cycle_str = " -> ".join([o.fullName() for o in path[path.index(cls) :] + [cls]])
raise ValueError(f"Cycle found while computing inheritance hierarchy: {cycle_str}")
path.append(o)
if o._finalbaseobjects is not None:
@@ -594,7 +663,7 @@ def init_finalbaseobjects(o: 'Class', path:Optional[List['Class']]=None) -> None
if o.rawbases:
finalbaseobjects: List[Optional[Class]] = []
finalbases: List[str] = []
- for i,((str_base, _), base) in enumerate(zip(o.rawbases, o._initialbaseobjects)):
+ for i, ((str_base, _), base) in enumerate(zip(o.rawbases, o._initialbaseobjects)):
if base:
finalbaseobjects.append(base)
finalbases.append(base.fullName())
@@ -614,18 +683,18 @@ def init_finalbaseobjects(o: 'Class', path:Optional[List['Class']]=None) -> None
init_finalbaseobjects(base, path.copy())
o._finalbaseobjects = finalbaseobjects
o._finalbases = finalbases
-
- def localbases(o:'Class') -> Iterator[Union['Class', str]]:
+
+ def localbases(o: 'Class') -> Iterator[Union['Class', str]]:
"""
Like L{Class.baseobjects} but fallback to the expanded name if the base is not resolved to a L{Class} object.
"""
- for s,b in zip(o.bases, o.baseobjects):
+ for s, b in zip(o.bases, o.baseobjects):
if isinstance(b, Class):
yield b
else:
yield s
- def getbases(o:Union['Class', str]) -> List[Union['Class', str]]:
+ def getbases(o: Union['Class', str]) -> List[Union['Class', str]]:
if isinstance(o, str):
return []
return list(localbases(o))
@@ -633,12 +702,13 @@ def getbases(o:Union['Class', str]) -> List[Union['Class', str]]:
init_finalbaseobjects(cls)
return mro.mro(cls, getbases)
-def _find_dunder_constructor(cls:'Class') -> Optional['Function']:
+
+def _find_dunder_constructor(cls: 'Class') -> Optional['Function']:
"""
Find the a non-default python-powered dunder constructor.
Returns C{None} if neither C{__new__} or C{__init__} are defined.
- @note: C{__new__} takes precedence orver C{__init__}.
+ @note: C{__new__} takes precedence orver C{__init__}.
More infos: U{https://docs.python.org/3/reference/datamodel.html#object.__new__}
"""
_new = cls.find('__new__')
@@ -650,7 +720,8 @@ def _find_dunder_constructor(cls:'Class') -> Optional['Function']:
return _init
return None
-def get_constructors(cls:Class) -> Iterator[Function]:
+
+def get_constructors(cls: Class) -> Iterator[Function]:
"""
Look for python language powered constructors or classmethod constructors.
A constructor MUST be a method accessible in the locals of the class.
@@ -673,23 +744,23 @@ def get_constructors(cls:Class) -> Iterator[Function]:
# get return annotation, if it returns the same type as self, it's a constructor method.
if not 'return' in fun.annotations:
# we currently only support constructor detection trought explicit annotations.
- continue
+ continue
# annotation should be resolved at the module scope
return_ann = astutils.node2fullname(fun.annotations['return'], cls.module)
# pydoctor understand explicit annotation as well as the Self-Type.
- if return_ann == cls.fullName() or \
- return_ann in ('typing.Self', 'typing_extensions.Self'):
+ if return_ann == cls.fullName() or return_ann in ('typing.Self', 'typing_extensions.Self'):
yield fun
+
class Class(CanContainImportsDocumentable):
kind = DocumentableKind.CLASS
parent: CanContainImportsDocumentable
decorators: Sequence[Tuple[str, Optional[Sequence[ast.expr]]]]
# set in post-processing:
- _finalbaseobjects: Optional[List[Optional['Class']]] = None
+ _finalbaseobjects: Optional[List[Optional['Class']]] = None
_finalbases: Optional[List[str]] = None
_mro: Optional[Sequence[Union['Class', str]]] = None
@@ -700,7 +771,7 @@ def setup(self) -> None:
self.subclasses: List[Class] = []
self._initialbases: List[str] = []
self._initialbaseobjects: List[Optional['Class']] = []
-
+
def _init_mro(self) -> None:
"""
Compute the correct value of the method resolution order returned by L{mro()}.
@@ -710,14 +781,14 @@ def _init_mro(self) -> None:
except ValueError as e:
self.report(str(e), 'mro')
self._mro = list(self.allbases(True))
-
+
@overload
- def mro(self, include_external:'Literal[True]', include_self:bool=True) -> Sequence[Union['Class', str]]:...
+ def mro(self, include_external: 'Literal[True]', include_self: bool = True) -> Sequence[Union['Class', str]]: ...
@overload
- def mro(self, include_external:'Literal[False]'=False, include_self:bool=True) -> Sequence['Class']:...
- def mro(self, include_external:bool=False, include_self:bool=True) -> Sequence[Union['Class', str]]:
+ def mro(self, include_external: 'Literal[False]' = False, include_self: bool = True) -> Sequence['Class']: ...
+ def mro(self, include_external: bool = False, include_self: bool = True) -> Sequence[Union['Class', str]]:
"""
- Get the method resution order of this class.
+ Get the method resution order of this class.
@note: The actual correct value is only set in post-processing, if L{mro()} is called
in the AST visitors, it will return the same as C{list(self.allbases(include_self))}.
@@ -738,23 +809,20 @@ def bases(self) -> List[str]:
"""
Fully qualified names of the bases of this class.
"""
- return self._finalbases if \
- self._finalbases is not None else self._initialbases
+ return self._finalbases if self._finalbases is not None else self._initialbases
-
@property
def baseobjects(self) -> List[Optional['Class']]:
"""
Base objects, L{None} value is inserted when the base class could not be found in the system.
-
- @note: This property is currently computed two times, a first time when we're visiting the ClassDef and initially creating the object.
- It's computed another time in post-processing to try to resolve the names that could not be resolved the first time. This is needed when there are import cycles.
-
+
+ @note: This property is currently computed two times, a first time when we're visiting the ClassDef and initially creating the object.
+ It's computed another time in post-processing to try to resolve the names that could not be resolved the first time. This is needed when there are import cycles.
+
Meaning depending on the state of the system, this property can return either the initial objects or the final objects
"""
- return self._finalbaseobjects if \
- self._finalbaseobjects is not None else self._initialbaseobjects
-
+ return self._finalbaseobjects if self._finalbaseobjects is not None else self._initialbaseobjects
+
@property
def public_constructors(self) -> Sequence['Function']:
"""
@@ -767,16 +835,16 @@ def public_constructors(self) -> Sequence['Function']:
if not c.isVisible:
continue
args = list(c.annotations)
- try: args.remove('return')
- except ValueError: pass
- if c.kind in (DocumentableKind.CLASS_METHOD,
- DocumentableKind.METHOD):
+ try:
+ args.remove('return')
+ except ValueError:
+ pass
+ if c.kind in (DocumentableKind.CLASS_METHOD, DocumentableKind.METHOD):
try:
args.pop(0)
except IndexError:
pass
- if (len(args)==0 and get_docstring(c)[0] is None and
- c.name in ('__init__', '__new__')):
+ if len(args) == 0 and get_docstring(c)[0] is None and c.name in ('__init__', '__new__'):
continue
r.append(c)
return r
@@ -843,10 +911,11 @@ def docsources(self) -> Iterator[Documentable]:
def _localNameToFullName(self, name: str) -> str:
return self.parent._localNameToFullName(name)
-
+
def isNameDefined(self, name: str) -> bool:
return self.parent.isNameDefined(name)
+
class Function(Inheritable):
kind = DocumentableKind.FUNCTION
is_async: bool
@@ -862,15 +931,18 @@ def setup(self) -> None:
self.signature = None
self.overloads = []
+
@attr.s(auto_attribs=True)
class FunctionOverload:
"""
- @note: This is not an actual documentable type.
+ @note: This is not an actual documentable type.
"""
+
primary: Function
signature: Signature
decorators: Sequence[ast.expr]
+
class Attribute(Inheritable):
kind: Optional[DocumentableKind] = DocumentableKind.ATTRIBUTE
annotation: Optional[ast.expr] = None
@@ -882,15 +954,17 @@ class Attribute(Inheritable):
None value means the value is not initialized at the current point of the the process.
"""
+
# Work around the attributes of the same name within the System class.
_ModuleT = Module
_PackageT = Package
T = TypeVar('T')
-def import_mod_from_file_location(module_full_name:str, path: Path) -> types.ModuleType:
+
+def import_mod_from_file_location(module_full_name: str, path: Path) -> types.ModuleType:
spec = importlib.util.spec_from_file_location(module_full_name, path)
- if spec is None:
+ if spec is None:
raise RuntimeError(f"Cannot find spec for module {module_full_name} at {path}")
py_mod = importlib.util.module_from_spec(spec)
loader = spec.loader
@@ -904,16 +978,18 @@ def import_mod_from_file_location(module_full_name:str, path: Path) -> types.Mod
func_types: Tuple[Type[Any], ...] = (types.BuiltinFunctionType, types.FunctionType)
if hasattr(types, "MethodDescriptorType"):
# This is Python >= 3.7 only
- func_types += (types.MethodDescriptorType, )
+ func_types += (types.MethodDescriptorType,)
else:
- func_types += (type(str.join), )
+ func_types += (type(str.join),)
if hasattr(types, "ClassMethodDescriptorType"):
# This is Python >= 3.7 only
- func_types += (types.ClassMethodDescriptorType, )
+ func_types += (types.ClassMethodDescriptorType,)
else:
- func_types += (type(dict.__dict__["fromkeys"]), )
+ func_types += (type(dict.__dict__["fromkeys"]),)
_default_extensions = object()
+
+
class System:
"""A collection of related documentable objects.
@@ -922,7 +998,7 @@ class System:
"""
# Not assigned here for circularity reasons:
- #defaultBuilder = astbuilder.ASTBuilder
+ # defaultBuilder = astbuilder.ASTBuilder
defaultBuilder: Type[ASTBuilder]
systemBuilder: Type['ISystemBuilder']
options: 'Options'
@@ -939,9 +1015,7 @@ class System:
Additional list of extensions to load alongside default extensions.
"""
- show_attr_value = (DocumentableKind.CONSTANT,
- DocumentableKind.TYPE_VARIABLE,
- DocumentableKind.TYPE_ALIAS)
+ show_attr_value = (DocumentableKind.CONSTANT, DocumentableKind.TYPE_VARIABLE, DocumentableKind.TYPE_ALIAS)
"""
What kind of attributes we should display the value for?
"""
@@ -988,7 +1062,7 @@ def __init__(self, options: Optional['Options'] = None):
# We use the fullName of the objets as the dict key in order to bind a full name to a privacy, not an object to a privacy.
# this way, we are sure the objects' privacy stay true even if we reparent them manually.
self._privacyClassCache: Dict[str, PrivacyClass] = {}
-
+
# workaround cyclic import issue
from pydoctor import extensions
@@ -996,7 +1070,7 @@ def __init__(self, options: Optional['Options'] = None):
self._factory = factory.Factory()
self._astbuilder_visitors: List[Type['astutils.NodeVisitorExt']] = []
self._post_processor = extensions.PriorityProcessor(self)
-
+
if self.extensions == _default_extensions:
self.extensions = list(extensions.get_extensions())
assert isinstance(self.extensions, list)
@@ -1010,15 +1084,19 @@ def __init__(self, options: Optional['Options'] = None):
@property
def Class(self) -> Type['Class']:
return self._factory.Class
+
@property
def Function(self) -> Type['Function']:
return self._factory.Function
+
@property
def Module(self) -> Type['Module']:
return self._factory.Module
+
@property
def Package(self) -> Type['Package']:
return self._factory.Package
+
@property
def Attribute(self) -> Type['Attribute']:
return self._factory.Attribute
@@ -1038,7 +1116,7 @@ def progress(self, section: str, i: int, n: Optional[int], msg: str) -> None:
else:
d = f'{i}/{n}'
if self.options.verbosity == 0 and sys.stdout.isatty():
- print('\r'+d, msg, end='')
+ print('\r' + d, msg, end='')
sys.stdout.flush()
if d == n:
self.needsnl = False
@@ -1046,22 +1124,23 @@ def progress(self, section: str, i: int, n: Optional[int], msg: str) -> None:
else:
self.needsnl = True
- def msg(self,
- section: str,
- msg: str,
- thresh: int = 0,
- topthresh: int = 100,
- nonl: bool = False,
- wantsnl: bool = True,
- once: bool = False
- ) -> None:
+ def msg(
+ self,
+ section: str,
+ msg: str,
+ thresh: int = 0,
+ topthresh: int = 100,
+ nonl: bool = False,
+ wantsnl: bool = True,
+ once: bool = False,
+ ) -> None:
"""
Log a message. pydoctor's logging system is bit messy.
-
+
@param section: API doc generation step this message belongs to.
@param msg: The message.
@param thresh: The minimum verbosity level of the system for this message to actually be printed.
- Meaning passing thresh=-1 will make message still display if C{-q} is passed but not if C{-qq}.
+ Meaning passing thresh=-1 will make message still display if C{-q} is passed but not if C{-qq}.
Similarly, passing thresh=1 will make the message only apprear if the verbosity level is at least increased once with C{-v}.
Using negative thresh will count this message as a violation and will fail the build if option C{-W} is passed.
@param topthresh: The maximum verbosity level of the system for this message to actually be printed.
@@ -1122,10 +1201,11 @@ def find_object(self, full_name: str) -> Optional[Documentable]:
return None
def objectsOfType(self, cls: Union[Type['DocumentableT'], str]) -> Iterator['DocumentableT']:
- """Iterate over all instances of C{cls} present in the system. """
+ """Iterate over all instances of C{cls} present in the system."""
if isinstance(cls, str):
- cls = utils.findClassFromDottedName(cls, 'objectsOfType',
- base_class=cast(Type['DocumentableT'], Documentable))
+ cls = utils.findClassFromDottedName(
+ cls, 'objectsOfType', base_class=cast(Type['DocumentableT'], Documentable)
+ )
assert isinstance(cls, type)
for o in self.allobjects.values():
if isinstance(o, cls):
@@ -1136,17 +1216,16 @@ def privacyClass(self, ob: Documentable) -> PrivacyClass:
cached_privacy = self._privacyClassCache.get(ob_fullName)
if cached_privacy is not None:
return cached_privacy
-
+
# kind should not be None, this is probably a relica of a past age of pydoctor.
# but keep it just in case.
if ob.kind is None:
return PrivacyClass.HIDDEN
-
+
privacy = PrivacyClass.PUBLIC
- if ob.name.startswith('_') and \
- not (ob.name.startswith('__') and ob.name.endswith('__')):
+ if ob.name.startswith('_') and not (ob.name.startswith('__') and ob.name.endswith('__')):
privacy = PrivacyClass.PRIVATE
-
+
# Precedence order: CLI arguments order
# Check exact matches first, then qnmatch
_found_exact_match = False
@@ -1167,18 +1246,19 @@ def privacyClass(self, ob: Documentable) -> PrivacyClass:
def membersOrder(self, ob: Documentable) -> Callable[[Documentable], Tuple[Any, ...]]:
"""
- Returns a callable suitable to be used with L{sorted} function.
+ Returns a callable suitable to be used with L{sorted} function.
Used to sort the given object's members for presentation.
Users can customize class and module members order independently, or can override this method
with a custom system class for further tweaks.
"""
from pydoctor.templatewriter.util import objects_order
+
if isinstance(ob, Class):
return objects_order(self.options.cls_member_order)
else:
return objects_order(self.options.mod_member_order)
-
+
def addObject(self, obj: Documentable) -> None:
"""Add C{object} to the system."""
@@ -1216,7 +1296,7 @@ def setSourceHref(self, mod: _ModuleT, source_path: Path) -> None:
if self.sourcebase is None:
mod.sourceHref = None
else:
- # pydoctor supports generating documentation covering more than one package,
+ # pydoctor supports generating documentation covering more than one package,
# in which case it is not certain that all of the source is even viewable below a single URL.
# We ignore this limitation by not assigning sourceHref for now, but it would be good to add support for it.
projBaseDir = mod.system.options.projectbasedirectory
@@ -1230,27 +1310,18 @@ def setSourceHref(self, mod: _ModuleT, source_path: Path) -> None:
mod.sourceHref = f'{self.sourcebase}/{relative}'
@overload
- def analyzeModule(self,
- modpath: Path,
- modname: str,
- parentPackage: Optional[_PackageT],
- is_package: Literal[False] = False
- ) -> _ModuleT: ...
+ def analyzeModule(
+ self, modpath: Path, modname: str, parentPackage: Optional[_PackageT], is_package: Literal[False] = False
+ ) -> _ModuleT: ...
@overload
- def analyzeModule(self,
- modpath: Path,
- modname: str,
- parentPackage: Optional[_PackageT],
- is_package: Literal[True]
- ) -> _PackageT: ...
+ def analyzeModule(
+ self, modpath: Path, modname: str, parentPackage: Optional[_PackageT], is_package: Literal[True]
+ ) -> _PackageT: ...
- def analyzeModule(self,
- modpath: Path,
- modname: str,
- parentPackage: Optional[_PackageT] = None,
- is_package: bool = False
- ) -> _ModuleT:
+ def analyzeModule(
+ self, modpath: Path, modname: str, parentPackage: Optional[_PackageT] = None, is_package: bool = False
+ ) -> _ModuleT:
factory = self.Package if is_package else self.Module
mod = factory(self, modname, parentPackage, modpath)
self._addUnprocessedModule(mod)
@@ -1259,8 +1330,8 @@ def analyzeModule(self,
def _addUnprocessedModule(self, mod: _ModuleT) -> None:
"""
- First add the new module into the unprocessed_modules list.
- Handle eventual duplication of module names, and finally add the
+ First add the new module into the unprocessed_modules list.
+ Handle eventual duplication of module names, and finally add the
module to the system.
"""
assert mod.state is ProcessingState.UNPROCESSED
@@ -1272,16 +1343,14 @@ def _addUnprocessedModule(self, mod: _ModuleT) -> None:
else:
self.unprocessed_modules.append(mod)
self.addObject(mod)
- self.progress(
- "analyzeModule", len(self.allobjects),
- None, "modules and packages discovered")
+ self.progress("analyzeModule", len(self.allobjects), None, "modules and packages discovered")
self.module_count += 1
def _handleDuplicateModule(self, first: _ModuleT, dup: _ModuleT) -> None:
"""
- This is called when two modules have the same name.
+ This is called when two modules have the same name.
- Current rules are the following:
+ Current rules are the following:
- C-modules wins over regular python modules
- Packages wins over modules
- Else, the last added module wins
@@ -1302,13 +1371,14 @@ def _handleDuplicateModule(self, first: _ModuleT, dup: _ModuleT) -> None:
def _introspectThing(self, thing: object, parent: CanContainImportsDocumentable, parentMod: _ModuleT) -> None:
for k, v in thing.__dict__.items():
- if (isinstance(v, func_types)
- # In PyPy 7.3.1, functions from extensions are not
- # instances of the abstract types in func_types, it will have the type 'builtin_function_or_method'.
- # Additionnaly cython3 produces function of type 'cython_function_or_method',
- # so se use a heuristic on the class name as a fall back detection.
- or (hasattr(v, "__class__") and
- v.__class__.__name__.endswith('function_or_method'))):
+ if (
+ isinstance(v, func_types)
+ # In PyPy 7.3.1, functions from extensions are not
+ # instances of the abstract types in func_types, it will have the type 'builtin_function_or_method'.
+ # Additionnaly cython3 produces function of type 'cython_function_or_method',
+ # so se use a heuristic on the class name as a fall back detection.
+ or (hasattr(v, "__class__") and v.__class__.__name__.endswith('function_or_method'))
+ ):
f = self.Function(self, k, parent)
f.parentMod = parentMod
f.docstring = v.__doc__
@@ -1320,10 +1390,10 @@ def _introspectThing(self, thing: object, parent: CanContainImportsDocumentable,
parent.report(f"Cannot parse signature of {parent.fullName()}.{k}")
f.signature = None
except TypeError:
- # in pypy we get a TypeError calling signature() on classmethods,
+ # in pypy we get a TypeError calling signature() on classmethods,
# because apparently, they are not callable :/
f.signature = None
-
+
f.is_async = False
f.annotations = {name: None for name in f.signature.parameters} if f.signature else {}
self.addObject(f)
@@ -1335,11 +1405,7 @@ def _introspectThing(self, thing: object, parent: CanContainImportsDocumentable,
self.addObject(c)
self._introspectThing(v, c, parentMod)
- def introspectModule(self,
- path: Path,
- module_name: str,
- package: Optional[_PackageT]
- ) -> _ModuleT:
+ def introspectModule(self, path: Path, module_name: str, package: Optional[_PackageT]) -> _ModuleT:
if package is None:
module_full_name = module_name
@@ -1351,17 +1417,16 @@ def introspectModule(self,
factory = self.Package if is_package else self.Module
module = factory(self, module_name, package, path)
-
+
module.docstring = py_mod.__doc__
module._is_c_module = True
module._py_mod = py_mod
-
+
self._addUnprocessedModule(module)
return module
def addPackage(self, package_path: Path, parentPackage: Optional[_PackageT] = None) -> None:
- package = self.analyzeModule(
- package_path / '__init__.py', package_path.name, parentPackage, is_package=True)
+ package = self.analyzeModule(package_path / '__init__.py', package_path.name, parentPackage, is_package=True)
for path in sorted(package_path.iterdir()):
if path.is_dir():
@@ -1375,14 +1440,14 @@ def addModuleFromPath(self, path: Path, package: Optional[_PackageT]) -> None:
for suffix in importlib.machinery.all_suffixes():
if not name.endswith(suffix):
continue
- module_name = name[:-len(suffix)]
+ module_name = name[: -len(suffix)]
if suffix in importlib.machinery.EXTENSION_SUFFIXES:
if self.options.introspect_c_modules:
self.introspectModule(path, module_name, package)
elif suffix in importlib.machinery.SOURCE_SUFFIXES:
self.analyzeModule(path, module_name, package)
break
-
+
def _remove(self, o: Documentable) -> None:
del self.allobjects[o.fullName()]
oc = list(o.contents.values())
@@ -1412,14 +1477,15 @@ def meth(self):
obj.report(f"duplicate {str(prev)}", thresh=1)
self._remove(prev)
prev.name = obj.name + ' ' + str(i)
+
def readd(o: Documentable) -> None:
self.allobjects[o.fullName()] = o
for c in o.contents.values():
readd(c)
+
readd(prev)
self.allobjects[fullName] = obj
-
def getProcessedModule(self, modname: str) -> Optional[_ModuleT]:
mod = self.allobjects.get(modname)
if mod is None:
@@ -1442,7 +1508,7 @@ def processModule(self, mod: _ModuleT) -> None:
assert mod._py_string is not None
if mod._is_c_module:
self.processing_modules.append(mod.fullName())
- self.msg("processModule", "processing %s"%(self.processing_modules), 1)
+ self.msg("processModule", "processing %s" % (self.processing_modules), 1)
self._introspectThing(mod._py_mod, mod, mod)
mod.state = ProcessingState.PROCESSED
head = self.processing_modules.pop()
@@ -1457,7 +1523,7 @@ def processModule(self, mod: _ModuleT) -> None:
if ast:
self.processing_modules.append(mod.fullName())
if mod._py_string is None:
- self.msg("processModule", "processing %s"%(self.processing_modules), 1)
+ self.msg("processModule", "processing %s" % (self.processing_modules), 1)
builder.processModuleAST(ast, mod)
mod.state = ProcessingState.PROCESSED
head = self.processing_modules.pop()
@@ -1466,8 +1532,8 @@ def processModule(self, mod: _ModuleT) -> None:
'process',
self.module_count - len(self.unprocessed_modules),
self.module_count,
- f"modules processed, {self.violations} warnings")
-
+ f"modules processed, {self.violations} warnings",
+ )
def process(self) -> None:
while self.unprocessed_modules:
@@ -1475,7 +1541,6 @@ def process(self) -> None:
self.processModule(mod)
self.postProcess()
-
def postProcess(self) -> None:
"""Called when there are no more unprocessed modules.
@@ -1494,7 +1559,8 @@ def fetchIntersphinxInventories(self, cache: CacheT) -> None:
for url in self.options.intersphinx:
self.intersphinx.update(cache, url)
-def defaultPostProcess(system:'System') -> None:
+
+def defaultPostProcess(system: 'System') -> None:
for cls in system.objectsOfType(Class):
# Initiate the MROs
cls._init_mro()
@@ -1507,9 +1573,10 @@ def defaultPostProcess(system:'System') -> None:
# Checking whether the class is an exception
if is_exception(cls):
cls.kind = DocumentableKind.EXCEPTION
-
+
for attrib in system.objectsOfType(Attribute):
- _inherits_instance_variable_kind(attrib)
+ _inherits_instance_variable_kind(attrib)
+
def _inherits_instance_variable_kind(attr: Attribute) -> None:
"""
@@ -1525,9 +1592,8 @@ def _inherits_instance_variable_kind(attr: Attribute) -> None:
attr.kind = DocumentableKind.INSTANCE_VARIABLE
break
-def get_docstring(
- obj: Documentable
- ) -> Tuple[Optional[str], Optional[Documentable]]:
+
+def get_docstring(obj: Documentable) -> Tuple[Optional[str], Optional[Documentable]]:
"""
Fetch the docstring for a documentable.
Treat empty docstring as undocumented.
@@ -1546,50 +1612,70 @@ def get_docstring(
return None, source
return None, None
+
class SystemBuildingError(Exception):
"""
Raised when there is a (handled) fatal error while adding modules to the builder.
"""
+
class ISystemBuilder(abc.ABC):
"""
Interface class for building a system.
"""
+
@abc.abstractmethod
def __init__(self, system: 'System') -> None:
"""
Create the builder.
"""
+
@abc.abstractmethod
- def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
+ def addModule(
+ self,
+ path: Path,
+ parent_name: Optional[str] = None,
+ ) -> None:
"""
- Add a module or package from file system path to the pydoctor system.
+ Add a module or package from file system path to the pydoctor system.
If the path points to a directory, adds all submodules recursively.
@raises SystemBuildingError: If there is an error while adding the module/package.
"""
+
@abc.abstractmethod
- def addModuleString(self, text: str, modname: str,
- parent_name: Optional[str] = None,
- is_package: bool = False, ) -> None:
+ def addModuleString(
+ self,
+ text: str,
+ modname: str,
+ parent_name: Optional[str] = None,
+ is_package: bool = False,
+ ) -> None:
"""
Add a module from text to the system.
"""
+
@abc.abstractmethod
def buildModules(self) -> None:
"""
Build the modules.
"""
+
class SystemBuilder(ISystemBuilder):
"""
- This class is only an adapter for some System methods related to module building.
+ This class is only an adapter for some System methods related to module building.
"""
+
def __init__(self, system: 'System') -> None:
self.system = system
self._added: Set[Path] = set()
- def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
+ def addModule(
+ self,
+ path: Path,
+ parent_name: Optional[str] = None,
+ ) -> None:
if path in self._added:
return
# Path validity check
@@ -1600,7 +1686,7 @@ def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
try:
path.relative_to(projBaseDir)
except ValueError:
- if self.system.options.htmlsourcebase:
+ if self.system.options.htmlsourcebase:
# We now support building documentation when the source path is outside of the build directory.
# We simply leave a warning and skip the sourceHref attribute.
# https://github.com/twisted/pydoctor/issues/658
@@ -1625,16 +1711,20 @@ def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
raise SystemBuildingError(f"Source path does not exist: {path}")
self._added.add(path)
- def addModuleString(self, text: str, modname: str,
- parent_name: Optional[str] = None,
- is_package: bool = False, ) -> None:
+ def addModuleString(
+ self,
+ text: str,
+ modname: str,
+ parent_name: Optional[str] = None,
+ is_package: bool = False,
+ ) -> None:
if parent_name is None:
parent = None
else:
# Set containing package as parent.
parent = self.system.allobjects[parent_name]
assert isinstance(parent, Package), f"{parent.fullName()} is not a Package, it's a {parent.kind}"
-
+
factory = self.system.Package if is_package else self.system.Module
mod = factory(self.system, name=modname, parent=parent, source_path=None)
mod._py_string = textwrap.dedent(text)
@@ -1643,40 +1733,49 @@ def addModuleString(self, text: str, modname: str,
def buildModules(self) -> None:
self.system.process()
+
System.systemBuilder = SystemBuilder
-def prepend_package(builderT:Type[ISystemBuilder], package:str) -> Type[ISystemBuilder]:
+
+def prepend_package(builderT: Type[ISystemBuilder], package: str) -> Type[ISystemBuilder]:
"""
- Get a new system builder class, that extends the original C{builder} such that it will always use a "fake"
+ Get a new system builder class, that extends the original C{builder} such that it will always use a "fake"
C{package} to be the only root object of the system and add new modules under it.
"""
-
- class PrependPackageBuidler(builderT): # type:ignore
+
+ class PrependPackageBuidler(builderT): # type:ignore
"""
Support for option C{--prepend-package}.
"""
- def __init__(self, system: 'System', *, package:str) -> None:
+ def __init__(self, system: 'System', *, package: str) -> None:
super().__init__(system)
-
+
self.package = package
-
+
prependedpackage = None
for m in package.split('.'):
- prependedpackage = system.Package(
- system, m, prependedpackage)
+ prependedpackage = system.Package(system, m, prependedpackage)
system.addObject(prependedpackage)
-
- def addModule(self, path: Path, parent_name: Optional[str] = None, ) -> None:
+
+ def addModule(
+ self,
+ path: Path,
+ parent_name: Optional[str] = None,
+ ) -> None:
if parent_name is None:
parent_name = self.package
super().addModule(path, parent_name)
-
- def addModuleString(self, text: str, modname: str,
- parent_name: Optional[str] = None,
- is_package: bool = False, ) -> None:
+
+ def addModuleString(
+ self,
+ text: str,
+ modname: str,
+ parent_name: Optional[str] = None,
+ is_package: bool = False,
+ ) -> None:
if parent_name is None:
parent_name = self.package
super().addModuleString(text, modname, parent_name, is_package=is_package)
-
+
return utils.partialclass(PrependPackageBuidler, package=package)
diff --git a/pydoctor/mro.py b/pydoctor/mro.py
index e8941e2aa..4af45bc30 100644
--- a/pydoctor/mro.py
+++ b/pydoctor/mro.py
@@ -31,6 +31,7 @@
T = TypeVar('T')
+
class Dependency(deque):
@property
def head(self) -> Optional[T]:
@@ -40,7 +41,7 @@ def head(self) -> Optional[T]:
return None
@property
- def tail(self) -> islice:
+ def tail(self) -> islice:
"""
Return islice object, which is suffice for iteration or calling `in`
"""
@@ -57,6 +58,7 @@ class DependencyList:
It's needed to the merge process preserves the local
precedence order of direct parent classes.
"""
+
def __init__(self, *lists: Tuple[List[T]]) -> None:
self._lists = [Dependency(i) for i in lists]
diff --git a/pydoctor/napoleon/docstring.py b/pydoctor/napoleon/docstring.py
index 0ab64dd59..9b783c1d1 100644
--- a/pydoctor/napoleon/docstring.py
+++ b/pydoctor/napoleon/docstring.py
@@ -8,6 +8,7 @@
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
from __future__ import annotations
import collections
@@ -32,19 +33,19 @@
r'((?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:`.+?`)|'
r'(?:``.+?``)|'
# r'(?::meta .+:.*)|' # 'meta' is not a supported field by pydoctor at the moment.
- r'(?:`.+?\s*(?`))')
+ r'(?:`.+?\s*(?`))'
+)
_xref_regex = re.compile(r"(?:(?::(?:[a-zA-Z0-9]+[\-_+:.])*[a-zA-Z0-9]+:)?`.+?`)")
_bullet_list_regex = re.compile(r"^(\*|\+|\-)(\s+\S|\s*$)")
_enumerated_list_regex = re.compile(
- r"^(?P\()?"
- r"(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])"
- r"(?(paren)\)|\.)(\s+\S|\s*$)"
+ r"^(?P\()?" r"(\d+|#|[ivxlcdm]+|[IVXLCDM]+|[a-zA-Z])" r"(?(paren)\)|\.)(\s+\S|\s*$)"
)
+
@attr.s(auto_attribs=True)
class Field:
"""
- Represent a field with a name and/or a type and/or a description. Commonly a parameter description.
+ Represent a field with a name and/or a type and/or a description. Commonly a parameter description.
It's also used for ``Returns`` section and other sections structured with fields.
This representation do not hold the information about which section the field correspond, it depends of context of usage.
@@ -58,7 +59,7 @@ class Field:
type: str
"""The enventual type of the parameter/return value. """
-
+
content: List[str]
"""The content of the field. """
@@ -67,10 +68,11 @@ class Field:
def __bool__(self) -> bool:
"""
- Returns True if the field has any kind of content.
+ Returns True if the field has any kind of content.
"""
return bool(self.name or self.type or self.content)
+
def is_obj_identifier(string: str) -> bool:
"""
Is this string a Python object(s) identifier?
@@ -78,11 +80,11 @@ def is_obj_identifier(string: str) -> bool:
An object identifier is a valid type string.
But a valid type can be more complex than an object identifier.
"""
- # support detecting "dict-like" as an object type even
+ # support detecting "dict-like" as an object type even
# if dashes are not actually allowed to keep compatibility with
# upstream napoleon.
string = string.replace('-', '_')
-
+
if string.isidentifier() or _xref_regex.match(string):
return True
if all([p.isidentifier() or not p for p in string.split('.')]):
@@ -100,10 +102,7 @@ def is_type(string: str) -> bool:
:see: `TypeDocstring`
"""
- return (
- is_obj_identifier(string)
- or len(TypeDocstring(string, warns_on_unknown_tokens=True).warnings) == 0
- )
+ return is_obj_identifier(string) or len(TypeDocstring(string, warns_on_unknown_tokens=True).warnings) == 0
# The sphinx's implementation allow regular sentences inside type string.
# But automatically detect that type of construct seems technically hard.
# Arg warns_on_unknown_tokens allows to narow the checks and match only docstrings
@@ -138,14 +137,16 @@ def is_google_typed_arg(string: str, parse_type: bool = True) -> bool:
return True
return False
+
class TokenType(Enum):
- LITERAL = auto()
- OBJ = auto()
- DELIMITER = auto()
- CONTROL = auto()
- REFERENCE = auto()
- UNKNOWN = auto()
- ANY = auto()
+ LITERAL = auto()
+ OBJ = auto()
+ DELIMITER = auto()
+ CONTROL = auto()
+ REFERENCE = auto()
+ UNKNOWN = auto()
+ ANY = auto()
+
@attr.s(auto_attribs=True)
class FreeFormException(Exception):
@@ -183,12 +184,9 @@ class TypeDocstring:
- ``complicated string`` or `strIO `
"""
- _natural_language_delimiters_regex_str = (
- r",\sor\s|\sor\s|\sof\s|:\s|\sto\s|,\sand\s|\sand\s"
- )
- _natural_language_delimiters_regex = re.compile(
- f"({_natural_language_delimiters_regex_str})"
- )
+
+ _natural_language_delimiters_regex_str = r",\sor\s|\sor\s|\sof\s|:\s|\sto\s|,\sand\s|\sand\s"
+ _natural_language_delimiters_regex = re.compile(f"({_natural_language_delimiters_regex_str})")
_ast_like_delimiters_regex_str = r",\s|,|[\[]|[\]]|[\(|\)]"
_ast_like_delimiters_regex = re.compile(f"({_ast_like_delimiters_regex_str})")
@@ -230,7 +228,7 @@ def __str__(self) -> str:
The parsed type in reStructuredText format.
"""
return self._convert_type_spec_to_rst()
-
+
def _trigger_warnings(self) -> None:
"""
Append some warnings.
@@ -239,12 +237,16 @@ def _trigger_warnings(self) -> None:
open_square_braces = 0
for _token, _type in self._tokens:
- if _type is TokenType.DELIMITER and _token in '[]()':
- if _token == "[": open_square_braces += 1
- elif _token == "(": open_parenthesis += 1
- elif _token == "]": open_square_braces -= 1
- elif _token == ")": open_parenthesis -= 1
-
+ if _type is TokenType.DELIMITER and _token in '[]()':
+ if _token == "[":
+ open_square_braces += 1
+ elif _token == "(":
+ open_parenthesis += 1
+ elif _token == "]":
+ open_square_braces -= 1
+ elif _token == ")":
+ open_parenthesis -= 1
+
if open_parenthesis != 0:
self.warnings.append("unbalanced parenthesis in type expression")
if open_square_braces != 0:
@@ -332,12 +334,7 @@ def postprocess(item: str) -> List[str]:
else:
return [item]
- tokens = list(
- item
- for raw_token in cls._token_regex.split(spec)
- for item in postprocess(raw_token)
- if item
- )
+ tokens = list(item for raw_token in cls._token_regex.split(spec) for item in postprocess(raw_token) if item)
return tokens
def _token_type(self, token: Union[str, Any]) -> TokenType:
@@ -354,7 +351,7 @@ def is_numeric(token: str) -> bool:
else:
return True
- # If the token is not a string, it's tagged as 'any',
+ # If the token is not a string, it's tagged as 'any',
# in practice this is used when a docutils.nodes.Element is passed as a token.
if not isinstance(token, str):
type_ = TokenType.ANY
@@ -378,14 +375,10 @@ def is_numeric(token: str) -> bool:
self.warnings.append(f"invalid value set (missing opening brace): {token}")
type_ = TokenType.LITERAL
elif token.startswith("'") or token.startswith('"'):
- self.warnings.append(
- f"malformed string literal (missing closing quote): {token}"
- )
+ self.warnings.append(f"malformed string literal (missing closing quote): {token}")
type_ = TokenType.LITERAL
elif token.endswith("'") or token.endswith('"'):
- self.warnings.append(
- f"malformed string literal (missing opening quote): {token}"
- )
+ self.warnings.append(f"malformed string literal (missing opening quote): {token}")
type_ = TokenType.LITERAL
# keyword supported by the reference implementation (numpydoc)
elif token in (
@@ -426,8 +419,7 @@ def _convert(
# the last token has reST markup:
# we might have to escape
- if not converted_token.startswith(" ") and \
- not converted_token.endswith(" "):
+ if not converted_token.startswith(" ") and not converted_token.endswith(" "):
if _next_token != iter_types.sentinel:
if _next_token[1] in token_type_using_rest_markup:
need_escaped_space = True
@@ -442,8 +434,12 @@ def _convert(
converters: Dict[
TokenType, Callable[[Tuple[str, TokenType], Tuple[str, TokenType], Tuple[str, TokenType]], Union[str, Any]]
] = {
- TokenType.LITERAL: lambda _token, _last_token, _next_token: _convert(_token, _last_token, _next_token, "``%s``"),
- TokenType.CONTROL: lambda _token, _last_token, _next_token: _convert(_token, _last_token, _next_token, "*%s*"),
+ TokenType.LITERAL: lambda _token, _last_token, _next_token: _convert(
+ _token, _last_token, _next_token, "``%s``"
+ ),
+ TokenType.CONTROL: lambda _token, _last_token, _next_token: _convert(
+ _token, _last_token, _next_token, "*%s*"
+ ),
TokenType.DELIMITER: lambda _token, _last_token, _next_token: _convert(_token, _last_token, _next_token),
TokenType.REFERENCE: lambda _token, _last_token, _next_token: _convert(_token, _last_token, _next_token),
TokenType.UNKNOWN: lambda _token, _last_token, _next_token: _convert(_token, _last_token, _next_token),
@@ -519,13 +515,14 @@ class GoogleDocstring:
"""
_name_rgx = re.compile(
- r"^\s*((?::(?P\S+):)?`(?P~?[a-zA-Z0-9_.-]+)`|"
- r" (?P~?[a-zA-Z0-9_.-]+))\s*",
+ r"^\s*((?::(?P\S+):)?`(?P~?[a-zA-Z0-9_.-]+)`|" r" (?P~?[a-zA-Z0-9_.-]+))\s*",
re.X,
)
# overriden
- def __init__(self, docstring: Union[str, List[str]],
+ def __init__(
+ self,
+ docstring: Union[str, List[str]],
what: Literal['function', 'module', 'class', 'attribute'] | None = None,
process_type_fields: bool = False,
) -> None:
@@ -535,7 +532,7 @@ def __init__(self, docstring: Union[str, List[str]],
docstring : str or list of str
The docstring to parse, given either as a string or split into
individual lines.
- what:
+ what:
Optional string representing the type of object we're documenting.
process_type_fields: bool
Whether to process the type fields or to leave them untouched (default) in order to be processed later.
@@ -543,20 +540,17 @@ def __init__(self, docstring: Union[str, List[str]],
"""
self._what = what
self._process_type_fields = process_type_fields
-
+
if isinstance(docstring, str):
lines = docstring.splitlines()
else:
lines = docstring
- self._line_iter: modify_iter[str] = modify_iter(
- lines, modifier=lambda s: s.rstrip()
- )
+ self._line_iter: modify_iter[str] = modify_iter(lines, modifier=lambda s: s.rstrip())
self._parsed_lines = [] # type: List[str]
self._is_in_section = False
self._section_indent = 0
-
self._sections: Dict[str, Callable[[str], List[str]]] = {
"args": self._parse_parameters_section,
"arguments": self._parse_parameters_section,
@@ -580,7 +574,7 @@ def __init__(self, docstring: Union[str, List[str]],
"receives": self._parse_parameters_section, # same as parameters
"return": self._parse_returns_section,
"returns": self._parse_returns_section,
- "yield": self._parse_returns_section, # same process as returns section
+ "yield": self._parse_returns_section, # same process as returns section
"yields": self._parse_returns_section,
"raise": self._parse_raises_section,
"raises": self._parse_raises_section,
@@ -632,20 +626,14 @@ def lines(self) -> List[str]:
def _consume_indented_block(self, indent: int = 1) -> List[str]:
lines = []
line = self._line_iter.peek()
- while not self._is_section_break() and (
- not line or self._is_indented(line, indent)
- ):
+ while not self._is_section_break() and (not line or self._is_indented(line, indent)):
lines.append(next(self._line_iter))
line = self._line_iter.peek()
return lines
def _consume_contiguous(self) -> List[str]:
lines = []
- while (
- self._line_iter.has_next()
- and self._line_iter.peek()
- and not self._is_section_header()
- ):
+ while self._line_iter.has_next() and self._line_iter.peek() and not self._is_section_header():
lines.append(next(self._line_iter))
return lines
@@ -658,12 +646,7 @@ def _consume_empty(self) -> List[str]:
return lines
# overriden: enforce type pre-processing + made more smart to understand multiline types.
- def _consume_field(
- self,
- parse_type: bool = True,
- prefer_type: bool = False,
- **kwargs: Any
- ) -> Field:
+ def _consume_field(self, parse_type: bool = True, prefer_type: bool = False, **kwargs: Any) -> Field:
line = next(self._line_iter)
indent = self._get_indent(line) + 1
@@ -690,10 +673,7 @@ def _consume_field(
if prefer_type and not _type:
_type, _name = _name, _type
- return Field(name=_name,
- type=_type,
- content=_descs,
- lineno=self._line_iter.counter)
+ return Field(name=_name, type=_type, content=_descs, lineno=self._line_iter.counter)
# overriden: Allow any parameters to be passed to _consume_field with **kwargs
def _consume_fields(
@@ -706,13 +686,12 @@ def _consume_fields(
self._consume_empty()
fields = []
while not self._is_section_break():
- f = self._consume_field(parse_type, prefer_type, **kwargs)
+ f = self._consume_field(parse_type, prefer_type, **kwargs)
if multiple and f.name:
for name in f.name.split(","):
- fields.append(Field(name=name.strip(),
- type=f.type,
- content=f.content,
- lineno=self._line_iter.counter))
+ fields.append(
+ Field(name=name.strip(), type=f.type, content=f.content, lineno=self._line_iter.counter)
+ )
elif f:
fields.append(f)
return fields
@@ -735,8 +714,7 @@ def _consume_returns_section(self) -> List[Field]:
lines = self._dedent(self._consume_to_next_section())
if lines:
- before_colon, colon, _descs = self._partition_multiline_field_on_colon(
- lines, format_validator=is_type)
+ before_colon, colon, _descs = self._partition_multiline_field_on_colon(lines, format_validator=is_type)
_type = ""
if _descs:
@@ -759,10 +737,7 @@ def _consume_returns_section(self) -> List[Field]:
_descs = self.__class__(_descs).lines()
_name = ""
- return [Field(name=_name,
- type=_type,
- content=_descs,
- lineno=self._line_iter.counter)]
+ return [Field(name=_name, type=_type, content=_descs, lineno=self._line_iter.counter)]
else:
return []
@@ -789,18 +764,18 @@ def _consume_to_next_section(self) -> List[str]:
# new method: handle type pre-processing the same way for google and numpy style.
def _convert_type(self, _type: str, is_type_field: bool = True, lineno: int = 0) -> str:
"""
- Tokenize the string type and convert it with additional markup and auto linking,
+ Tokenize the string type and convert it with additional markup and auto linking,
with L{TypeDocstring}.
-
+
Arguments
---------
_type: bool
the string type to convert.
is_type_field: bool
- Whether the string is the content of a ``:type:`` or ``rtype`` field.
- If this is ``True`` and `GoogleDocstring`'s ``process_type_fields`` is ``False`` (defaults),
- the type will NOT be converted (instead, it's returned as is) because it will be converted by the code provided by
- ``ParsedTypeDocstring`` class in a later stage of docstring parsing.
+ Whether the string is the content of a ``:type:`` or ``rtype`` field.
+ If this is ``True`` and `GoogleDocstring`'s ``process_type_fields`` is ``False`` (defaults),
+ the type will NOT be converted (instead, it's returned as is) because it will be converted by the code provided by
+ ``ParsedTypeDocstring`` class in a later stage of docstring parsing.
"""
if not is_type_field or self._process_type_fields:
type_spec = TypeDocstring(_type)
@@ -851,9 +826,7 @@ def _format_admonition(self, admonition: str, lines: List[str]) -> List[str]:
return [f".. {admonition}::", ""]
# overriden to avoid extra unecessary blank lines
- def _format_block(
- self, prefix: str, lines: List[str], padding: str = ""
- ) -> List[str]:
+ def _format_block(self, prefix: str, lines: List[str], padding: str = "") -> List[str]:
# remove the last line of the block if it's empty
if not lines[-1]:
lines.pop(-1)
@@ -1002,11 +975,7 @@ def _is_section_break(self) -> bool:
return bool(
not self._line_iter.has_next()
or self._is_section_header()
- or (
- self._is_in_section
- and line
- and not self._is_indented(line, self._section_indent)
- )
+ or (self._is_in_section and line and not self._is_indented(line, self._section_indent))
)
# overriden: call _parse_attribute_docstring if the object is an attribute
@@ -1101,9 +1070,7 @@ def _parse_generic_section(self, section: str) -> List[str]:
# + enforce napoleon_use_keyword = True
def _parse_keyword_arguments_section(self, section: str) -> List[str]:
fields = self._consume_fields()
- return self._format_docutils_params(
- fields, field_role="keyword", type_role="type"
- )
+ return self._format_docutils_params(fields, field_role="keyword", type_role="type")
# overriden: ignore noindex options + hack something that renders ok as is
def _parse_methods_section(self, section: str) -> List[str]:
@@ -1136,9 +1103,7 @@ def _parse_parameters_section(self, section: str) -> List[str]:
# This allows sections to have compatible syntax as raises syntax BUT not mandatory).
# If prefer_type=False: If something in the type place of the type
# but no description, assume type contains the description, and there is not type in the docs.
- def _parse_raises_section(
- self, section: str, field_type: str = "raises", prefer_type: bool = True
- ) -> List[str]:
+ def _parse_raises_section(self, section: str, field_type: str = "raises", prefer_type: bool = True) -> List[str]:
fields = self._consume_fields(parse_type=False, prefer_type=True)
lines = [] # type: List[str]
for field in fields:
@@ -1181,7 +1146,7 @@ def _parse_returns_section(self, section: str) -> List[str]:
if multi:
if lines:
- lines.extend(self._format_block(" "*(len(section)+2)+" * ", field))
+ lines.extend(self._format_block(" " * (len(section) + 2) + " * ", field))
else:
lines.extend(self._format_block(f":{section}: * ", field))
else:
@@ -1200,10 +1165,7 @@ def _parse_see_also_section(self, section: str) -> List[str]:
# overriden: no translation + use compatible syntax with raises, but as well as standard field syntax.
# This mean the the :warns: field can have an argument like: :warns RessourceWarning:
def _parse_warns_section(self, section: str) -> List[str]:
- return self._parse_raises_section(
- section, field_type="warns", prefer_type=False
- )
-
+ return self._parse_raises_section(section, field_type="warns", prefer_type=False)
def _partition_field_on_colon(self, line: str) -> Tuple[str, str, str]:
before_colon = []
@@ -1256,9 +1218,7 @@ def _partition_multiline_field_on_colon(
Can contains lines with only white spaces.
"""
- before_colon, colon, after_colon_start = self._partition_field_on_colon(
- lines[0]
- )
+ before_colon, colon, after_colon_start = self._partition_field_on_colon(lines[0])
# save before colon string
before_colon_start = before_colon
@@ -1271,9 +1231,7 @@ def _partition_multiline_field_on_colon(
# the first line of the field is not complete or malformed.
if raw_descs:
# try to complete type info from next lines.
- partinioned_lines = [
- self._partition_field_on_colon(l) for l in raw_descs
- ]
+ partinioned_lines = [self._partition_field_on_colon(l) for l in raw_descs]
for i, p_line in enumerate(partinioned_lines):
multiline = True
before, colon, after = p_line
@@ -1344,7 +1302,7 @@ class NumpyDocstring(GoogleDocstring):
Example
-------
-
+
.. python::
>>> from pydoctor.napoleon import NumpyDocstring
>>> docstring = '''One line summary.
@@ -1441,10 +1399,7 @@ def _consume_field(
_desc = self._dedent(self._consume_indented_block(indent))
_desc = self.__class__(_desc).lines()
- return Field(name=_name,
- type=_type,
- content=_desc,
- lineno=self._line_iter.counter)
+ return Field(name=_name, type=_type, content=_desc, lineno=self._line_iter.counter)
# The field either do not provide description and data contains the name and type informations,
# or the _name and _type variable contains directly the description. i.e.
@@ -1462,10 +1417,7 @@ def _consume_field(
_type = self._convert_type_and_maybe_consume_free_form_field(
_name, _type, allow_free_form=allow_free_form
) # Can raise FreeFormException
- return Field(name=_name,
- type=_type,
- content=[],
- lineno=self._line_iter.counter)
+ return Field(name=_name, type=_type, content=[], lineno=self._line_iter.counter)
# allow to pass any args to super()._consume_fields(). Used for allow_free_form=True
def _consume_fields(
@@ -1483,10 +1435,7 @@ def _consume_fields(
**kwargs,
)
except FreeFormException as e:
- return [Field(name="",
- type="",
- content=e.lines,
- lineno=self._line_iter.counter)]
+ return [Field(name="", type="", content=e.lines, lineno=self._line_iter.counter)]
# Pass allow_free_form = True
def _consume_returns_section(self) -> List[Field]:
@@ -1505,11 +1454,7 @@ def _is_section_break(self) -> bool:
not self._line_iter.has_next()
or self._is_section_header()
or ["", ""] == [line1, line2]
- or (
- self._is_in_section
- and line1
- and not self._is_indented(line1, self._section_indent)
- )
+ or (self._is_in_section and line1 and not self._is_indented(line1, self._section_indent))
)
def _is_section_header(self) -> bool:
@@ -1547,9 +1492,9 @@ def _parse_see_also_section(self, section: str) -> List[str]:
def _parse_numpydoc_see_also_section(self, content: List[str]) -> List[str]:
"""
Derived from the NumpyDoc implementation of ``_parse_see_also``.
-
+
Parses this kind of see also sections::
-
+
See Also
--------
func_name : Descriptive text
diff --git a/pydoctor/napoleon/iterators.py b/pydoctor/napoleon/iterators.py
index e7ca2a8e7..d1714f507 100644
--- a/pydoctor/napoleon/iterators.py
+++ b/pydoctor/napoleon/iterators.py
@@ -8,6 +8,7 @@
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
from __future__ import annotations
import collections
@@ -45,9 +46,7 @@ class peek_iter(Generic[T]):
Store and increment line number to report correct lines!
"""
- def __init__(
- self, o: Union[Callable[[], T], Iterable[T]], sentinel: Optional[T] = None
- ) -> None:
+ def __init__(self, o: Union[Callable[[], T], Iterable[T]], sentinel: Optional[T] = None) -> None:
"""
Parameters
----------
@@ -70,9 +69,7 @@ def __init__(
self._iterable = iter(o, sentinel)
else:
if sentinel:
- raise TypeError(
- "If sentinel is given, then o must be a callable object."
- )
+ raise TypeError("If sentinel is given, then o must be a callable object.")
self._iterable = iter(o)
self._cache: Deque[T] = collections.deque()
@@ -118,12 +115,10 @@ def has_next(self) -> bool:
return self.peek() != self.sentinel
@overload
- def next(self, n: int) -> Sequence[T]:
- ...
+ def next(self, n: int) -> Sequence[T]: ...
@overload
- def next(self) -> T:
- ...
+ def next(self) -> T: ...
def next(self, n: Optional[int] = None) -> Union[Sequence[T], T]:
"""
@@ -162,12 +157,10 @@ def next(self, n: Optional[int] = None) -> Union[Sequence[T], T]:
return result
@overload
- def peek(self, n: int) -> Sequence[T]:
- ...
+ def peek(self, n: int) -> Sequence[T]: ...
@overload
- def peek(self) -> T:
- ...
+ def peek(self) -> T: ...
def peek(self, n: Optional[int] = None) -> Union[Sequence[T], T]:
"""Preview the next item or ``n`` items of the iterator.
diff --git a/pydoctor/node2stan.py b/pydoctor/node2stan.py
index 8b2f00665..7b76fe1a3 100644
--- a/pydoctor/node2stan.py
+++ b/pydoctor/node2stan.py
@@ -1,6 +1,7 @@
"""
Helper function to convert L{docutils} nodes to Stan tree.
"""
+
from __future__ import annotations
from itertools import chain
@@ -11,6 +12,7 @@
from docutils import nodes, frontend, __version_info__ as docutils_version_info
from twisted.web.template import Tag
+
if TYPE_CHECKING:
from twisted.web.template import Flattenable
from pydoctor.epydoc.markup import DocstringLinker
@@ -20,23 +22,25 @@
from pydoctor.epydoc.doctest import colorize_codeblock, colorize_doctest
from pydoctor.stanutils import flatten, html2stan
+
def node2html(node: nodes.Node, docstring_linker: 'DocstringLinker') -> List[str]:
"""
Convert a L{docutils.nodes.Node} object to HTML strings.
"""
- if (doc:=node.document) is None:
+ if (doc := node.document) is None:
raise AssertionError(f'missing document attribute on {node}')
visitor = HTMLTranslator(doc, docstring_linker)
node.walkabout(visitor)
return visitor.body
+
def node2stan(node: Union[nodes.Node, Iterable[nodes.Node]], docstring_linker: 'DocstringLinker') -> Tag:
"""
Convert L{docutils.nodes.Node} objects to a Stan tree.
@param node: An docutils document or a fragment of document.
@return: The element as a stan tree.
- @note: Any L{nodes.Node} can be passed to that function, the only requirement is
+ @note: Any L{nodes.Node} can be passed to that function, the only requirement is
that the node's L{nodes.Node.document} attribute is set to a valid L{nodes.document} object.
"""
html = []
@@ -62,56 +66,57 @@ def gettext(node: Union[nodes.Node, List[nodes.Node]]) -> List[str]:
_TARGET_RE = re.compile(r'^(.*?)\s*<(?:URI:|URL:)?([^<>]+)>$')
_VALID_IDENTIFIER_RE = re.compile('[^0-9a-zA-Z_]')
+
def _valid_identifier(s: str) -> str:
- """Remove invalid characters to create valid CSS identifiers. """
+ """Remove invalid characters to create valid CSS identifiers."""
return _VALID_IDENTIFIER_RE.sub('', s)
+
class HTMLTranslator(html4css1.HTMLTranslator):
"""
Pydoctor's HTML translator.
"""
-
+
settings: ClassVar[Optional[optparse.Values]] = None
body: List[str]
- def __init__(self,
- document: nodes.document,
- docstring_linker: 'DocstringLinker'
- ):
+ def __init__(self, document: nodes.document, docstring_linker: 'DocstringLinker'):
self._linker = docstring_linker
# Set the document's settings.
if self.settings is None:
- if docutils_version_info >= (0,19):
+ if docutils_version_info >= (0, 19):
# Direct access to OptionParser is deprecated from Docutils 0.19
settings = frontend.get_default_settings(html4css1.Writer())
else:
- settings = frontend.OptionParser([html4css1.Writer()]).get_default_values() # type: ignore
-
+ settings = frontend.OptionParser([html4css1.Writer()]).get_default_values() # type: ignore
+
# Save default settings as class attribute not to re-compute it all the times
self.__class__.settings = settings
else:
# yes "optparse.Values" and "docutils.frontend.Values" are compatible.
- settings = self.settings # type: ignore
-
+ settings = self.settings # type: ignore
+
document.settings = settings
super().__init__(document)
# don't allow