Skip to content

Commit

Permalink
Fix bug in the handling of types inside consolidated fields (#766)
Browse files Browse the repository at this point in the history
* Fix #765
  • Loading branch information
tristanlatr authored Mar 25, 2024
1 parent 9becf85 commit f835fb4
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 36 deletions.
1 change: 1 addition & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ This is the last major release to support Python 3.7.
* `ExtRegistrar.register_post_processor()` now supports a `priority` argument that is an int.
Highest priority callables will be called first during post-processing.
* Fix too noisy ``--verbose`` mode (suppres some ambiguous annotations warnings).
* Fix type processing inside restructuredtext consolidated fields.

pydoctor 23.9.1
^^^^^^^^^^^^^^^
Expand Down
58 changes: 23 additions & 35 deletions pydoctor/epydoc/markup/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,42 +57,30 @@ def to_stan(self, docstring_linker: DocstringLinker) -> Tag:
return self._convert_type_spec_to_stan(docstring_linker)

def _tokenize_node_type_spec(self, spec: nodes.document) -> List[Union[str, nodes.Node]]:
def _warn_not_supported(n:nodes.Node) -> None:
self.warnings.append(f"Unexpected element in type specification field: element '{n.__class__.__name__}'. "
"This value should only contain text or inline markup.")

tokens: List[Union[str, nodes.Node]] = []
# Determine if the content is nested inside a paragraph
# this is generally the case, except for consolidated fields generate documents.
if spec.children and isinstance(spec.children[0], nodes.paragraph):
if len(spec.children)>1:
_warn_not_supported(spec.children[1])
children = spec.children[0].children
else:
children = spec.children

class Tokenizer(nodes.GenericNodeVisitor):

def __init__(self, document: nodes.document) -> None:
super().__init__(document)
self.tokens: List[Union[str, nodes.Node]] = []
self.rest = nodes.document
self.warnings: List[str] = []

def default_visit(self, node: nodes.Node) -> None:
# Tokenize only the first level text in paragraph only,
# Simply warn and ignore the rest.

parent = node.parent
super_parent = parent.parent if parent else None

# first level
if isinstance(parent, nodes.document) and not isinstance(node, nodes.paragraph):
self.warnings.append(f"Unexpected element in type specification field: element '{node.__class__.__name__}'. "
"This value should only contain regular paragraphs with text or inline markup.")
raise nodes.SkipNode()

# second level
if isinstance(super_parent, nodes.document):
# only text in paragraph nodes are taken into account
if isinstance(node, nodes.Text):
# Tokenize the Text node with the same method TypeDocstring uses.
self.tokens.extend(TypeDocstring._tokenize_type_spec(node.astext()))
else:
self.tokens.append(node)
raise nodes.SkipNode()

tokenizer = Tokenizer(spec)
spec.walk(tokenizer)
self.warnings.extend(tokenizer.warnings)
return tokenizer.tokens
for child in children:
if isinstance(child, nodes.Text):
# Tokenize the Text node with the same method TypeDocstring uses.
tokens.extend(TypeDocstring._tokenize_type_spec(child.astext()))
elif isinstance(child, nodes.Inline):
tokens.append(child)
else:
_warn_not_supported(child)

return tokens

def _convert_obj_tokens_to_stan(self, tokens: List[Tuple[Union[str, nodes.Node], TokenType]],
docstring_linker: DocstringLinker) -> List[Tuple[Union[str, Tag, nodes.Node], TokenType]]:
Expand Down
38 changes: 37 additions & 1 deletion pydoctor/test/test_type_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from textwrap import dedent
from pydoctor.epydoc.markup import ParseError, get_parser_by_name
from pydoctor.test.epydoc.test_restructuredtext import prettify
from pydoctor.test.test_templatewriter import getHTMLOfAttribute
from pydoctor.test import NotFoundLinker, CapSys
from pydoctor.test.epydoc import parse_docstring
from pydoctor.test.test_epydoc2stan import docstring2html
Expand Down Expand Up @@ -364,10 +365,15 @@ def foo(**args):
- "numpy"
- "google"
h: things
k: stuff
:type h: stuff
>>> python
:type k: a paragraph
another one
"""
'''

Expand All @@ -387,4 +393,34 @@ def foo(**args):
warns:13: bad docstring: invalid value set (missing opening brace): 3}
warns:15: bad docstring: malformed string literal (missing closing quote): '2
warns:17: bad docstring: malformed string literal (missing opening quote): 2"
warns:23: bad docstring: Unexpected element in type specification field: element 'doctest_block'. This value should only contain regular paragraphs with text or inline markup.'''
warns:24: bad docstring: Unexpected element in type specification field: element 'doctest_block'. This value should only contain text or inline markup.
warns:28: bad docstring: Unexpected element in type specification field: element 'paragraph'. This value should only contain text or inline markup.'''

def test_process_types_with_consolidated_fields(capsys: CapSys) -> None:
"""
Test for issue https://github.com/twisted/pydoctor/issues/765
"""
src = '''
class V:
"""
Doc.
:CVariables:
`id` : int
Classvar doc.
"""
'''
system = model.System()

system.options.processtypes = True
system.options.docformat = 'restructuredtext'

mod = fromText(src, modname='do_not_warn_please', system=system)
attr = mod.contents['V'].contents['id']
assert isinstance(attr, model.Attribute)
html = getHTMLOfAttribute(attr)
# Filter docstring linker warnings
lines = [line for line in capsys.readouterr().out.splitlines() if 'Cannot find link target' not in line]
assert not lines
assert '<code>int</code>' in html

0 comments on commit f835fb4

Please sign in to comment.