diff --git a/LICENSE.txt b/LICENSE.txt index 46799d31d..414de12e8 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -117,3 +117,67 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + +Support for token processing and doc-comments has been adapted from the Sphinx project - +as well as many other docstring parsing related helpers and features. +Sphinx is licensed as follows: + + +Copyright (c) 2007-2024 by the Sphinx team (see AUTHORS file). +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +The implementation of numpydoc docstring preprocessor +was derived from Sphinx's which itselft is partially derived +from code under the following license: + +------------------------------------------------------------------------------- + +Copyright (C) 2008 Stefan van der Walt , Pauli Virtanen + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/README.rst b/README.rst index 680679920..ceb17517b 100644 --- a/README.rst +++ b/README.rst @@ -74,6 +74,8 @@ in development ^^^^^^^^^^^^^^ * Drop Python 3.7 and support Python 3.13. +* Add support for doc-comments as found in Sphinx. Use the special comment formatting ``#:`` to start the comment instead of just ``#``. + Comments need to be either on a line of their own before the definition, or immediately after the assignment on the same line. * Implement canonical HTML element (````) to help search engines reduce outdated content. Enable this feature by passing the base URL of the API documentation with option ``--html-base-url``. * Improve collection of objects: diff --git a/docs/source/codedoc.rst b/docs/source/codedoc.rst index a8e3d7602..8c35193f6 100644 --- a/docs/source/codedoc.rst +++ b/docs/source/codedoc.rst @@ -83,6 +83,28 @@ Assignments to ``__doc__`` inside functions are ignored by pydoctor. This can be Augmented assignments like ``+=`` are currently ignored as well, but that is an implementation limitation rather than a design decision, so this might change in the future. +Doc-comments +------------ + +Documentation can also be put into a comment with special formatting, using a ``#:`` to start the comment instead of just ``#``. +Comments need to be either on their own before the definition, OR immediately after the assignment on the same line. +The latter form is restricted to one line only.:: + + var = True #: Doc comment for module attribute. + + class Foo: + + #: Doc comment for class attribute Foo.bar. + #: It can have multiple lines. + #: @type: int + bar = 1 + + flox = 1.5 #: Doc comment for Foo.flox. One line only. + + def __init__(self): + #: Doc comment for instance attribute qux. + self.qux = 3 + Constants --------- diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py index aa35626cc..02f2caa2a 100644 --- a/pydoctor/astbuilder.py +++ b/pydoctor/astbuilder.py @@ -2,6 +2,7 @@ from __future__ import annotations import ast +import tokenize import contextlib import sys @@ -17,14 +18,17 @@ from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval from pydoctor.astutils import (is_none_literal, is_typing_annotation, is_using_annotations, is_using_typing_final, node2dottedname, node2fullname, is__name__equals__main__, unstring_annotation, upgrade_annotation, iterassign, extract_docstring_linenum, infer_type, get_parents, - get_docstring_node, get_assign_docstring_node, unparse, NodeVisitor, Parentage, Str) + get_docstring_node, get_assign_docstring_node, extract_doc_comment_before, extract_doc_comment_after, unparse, NodeVisitor, Parentage, Str) +def parseFile(path: Path) -> tuple[ast.Module, Sequence[str]]: + """ + Parse the contents of a Python source file. -def parseFile(path: Path) -> ast.Module: - """Parse the contents of a Python source file.""" - with open(path, 'rb') as f: - src = f.read() + b'\n' - return _parse(src, filename=str(path)) + @returns: Tuple: ast module, sequence of source code lines. + """ + with tokenize.open(path) as f: + src = f.read() + '\n' + return _parse(src, filename=str(path)), src.splitlines(keepends=True) if sys.version_info >= (3,8): _parse = partial(ast.parse, type_comments=True) @@ -817,6 +821,25 @@ def _handleAssignment(self, else: raise IgnoreAssignment() + def _handleDocComment(self, node: ast.Assign | ast.AnnAssign, target: ast.expr) -> None: + # Process the doc-comments, this is very similiar to the inline docstrings. + try: + parent, name = self._contextualizeTarget(target) + except ValueError: + return + + # fetch the target of the doc-comment + if (attr:=parent.contents.get(name)) is None: + return + + lines = self.builder.lines_collection[self.module] + if lines: + for doc_comment in [extract_doc_comment_before(node, lines), + extract_doc_comment_after(node, lines)]: + if doc_comment: + attr._setDocstringValue(doc_comment[1], doc_comment[0]) + + def visit_Assign(self, node: ast.Assign) -> None: lineno = node.lineno expr = node.value @@ -844,9 +867,11 @@ def visit_Assign(self, node: ast.Assign) -> None: continue else: if not isTupleAssignment: + self._handleDocComment(node, target) self._handleInlineDocstrings(node, target) else: for elem in cast(ast.Tuple, target).elts: # mypy is not as smart as pyright yet. + self._handleDocComment(node, elem) self._handleInlineDocstrings(node, elem) def visit_AnnAssign(self, node: ast.AnnAssign) -> None: @@ -857,6 +882,7 @@ def visit_AnnAssign(self, node: ast.AnnAssign) -> None: except IgnoreAssignment: return else: + self._handleDocComment(node, node.target) self._handleInlineDocstrings(node, node.target) def _getClassFromMethodContext(self) -> Optional[model.Class]: @@ -1206,11 +1232,12 @@ class ASTBuilder: def __init__(self, system: model.System): self.system = system - self.current = cast(model.Documentable, None) # current visited object. - self.currentMod: Optional[model.Module] = None # current module, set when visiting ast.Module. + self.current = cast(model.Documentable, None) #: current visited object + self.currentMod: Optional[model.Module] = None #: module, set when visiting ast.Module self._stack: List[model.Documentable] = [] - self.ast_cache: Dict[Path, Optional[ast.Module]] = {} + self.ast_cache: Dict[Path, Optional[ast.Module]] = {} #: avoids calling parse() twice for the same path + self.lines_collection: dict[model.Module, Sequence[str] | None] = {} #: mapping from modules to source code lines def _push(self, cls: Type[DocumentableT], @@ -1321,20 +1348,24 @@ def parseFile(self, path: Path, ctx: model.Module) -> Optional[ast.Module]: return self.ast_cache[path] except KeyError: mod: Optional[ast.Module] = None + lines: Sequence[str] | None = None try: - mod = parseFile(path) + mod, lines = parseFile(path) except (SyntaxError, ValueError) as e: ctx.report(f"cannot parse file, {e}") self.ast_cache[path] = mod + self.lines_collection[ctx] = lines return mod def parseString(self, py_string:str, ctx: model.Module) -> Optional[ast.Module]: - mod = None + mod: Optional[ast.Module] = None + lines: Sequence[str] | None = None try: - mod = _parse(py_string) + mod, lines = _parse(py_string), py_string.splitlines(keepends=True) except (SyntaxError, ValueError): ctx.report("cannot parse string") + self.lines_collection[ctx] = lines return mod model.System.defaultBuilder = ASTBuilder diff --git a/pydoctor/astutils.py b/pydoctor/astutils.py index 203735ff0..33afaa0f6 100644 --- a/pydoctor/astutils.py +++ b/pydoctor/astutils.py @@ -5,6 +5,7 @@ import inspect import platform +import re import sys from numbers import Number from typing import Any, Callable, Collection, Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union, cast @@ -221,6 +222,8 @@ def get_node_block(node: ast.AST) -> tuple[ast.AST, str]: Tell in wich block the given node lives in. A block is defined by a tuple: (parent node, fieldname) + + @raise ValueError: If the assignment parent is missing or boggus. """ try: parent = next(get_parents(node)) @@ -240,7 +243,7 @@ def get_assign_docstring_node(assign:ast.Assign | ast.AnnAssign) -> Str | None: This helper function relies on the non-standard C{.parent} attribute on AST nodes to navigate upward in the tree and determine this node direct siblings. """ - # if this call raises an ValueError it means that we're doing something nasty with the ast... + # this call raises an ValueError if we're doing something nasty with the ast... please report parent_node, fieldname = get_node_block(assign) statements = getattr(parent_node, fieldname, None) @@ -811,3 +814,174 @@ class Precedence(object): del _op_data, _index, _precedence_data, _symbol_data, _deprecated # This was part of the astor library for Python AST manipulation. + + +# Part of the sphinx.pycode.parser module. +# Copyright 2007-2020 by the Sphinx team, see AUTHORS. +# BSD, see LICENSE for details. +from token import DEDENT, INDENT, NAME, NEWLINE, NUMBER, OP, STRING +from tokenize import COMMENT, generate_tokens, tok_name + +class Token: + """Better token wrapper for tokenize module.""" + + def __init__(self, kind: int, value: Any, start: Tuple[int, int], end: Tuple[int, int], + source: str) -> None: + self.kind = kind + self.value = value + self.start = start + self.end = end + self.source = source + + def __eq__(self, other: Any) -> bool: + if isinstance(other, int): + return self.kind == other + elif isinstance(other, str): + return bool(self.value == other) + elif isinstance(other, (list, tuple)): + return [self.kind, self.value] == list(other) + elif other is None: + return False + else: + raise ValueError('Unknown value: %r' % other) + + def match(self, *conditions: Any) -> bool: + return any(self == candidate for candidate in conditions) + + def __repr__(self) -> str: + return '' % (tok_name[self.kind], + self.value.strip()) + +class TokenProcessor: + def __init__(self, buffers: Sequence[str]) -> None: + lines = iter(buffers) + self.buffers = buffers + self.tokens = generate_tokens(lambda: next(lines)) + self.current: Token | None = None + self.previous: Token | None = None + + def get_line(self, lineno: int) -> str: + """Returns specified line.""" + return self.buffers[lineno - 1] + + def fetch_token(self) -> Token | None: + """Fetch a next token from source code. + + Returns ``None`` if sequence finished. + """ + try: + self.previous = self.current + self.current = Token(*next(self.tokens)) + except StopIteration: + self.current = None + + return self.current + + def fetch_until(self, condition: Any) -> List[Token]: + """Fetch tokens until specified token appeared. + + .. note:: This also handles parenthesis well. + """ + tokens = [] + while self.fetch_token(): + assert self.current + tokens.append(self.current) + if self.current == condition: + break + elif self.current == [OP, '(']: + tokens += self.fetch_until([OP, ')']) + elif self.current == [OP, '{']: + tokens += self.fetch_until([OP, '}']) + elif self.current == [OP, '[']: + tokens += self.fetch_until([OP, ']']) + + return tokens + + +class AfterCommentParser(TokenProcessor): + """Python source code parser to pick up comment after assignment. + + This parser takes a python code starts with assignment statement, + and returns the comments for variable if exists. + """ + + def __init__(self, lines: Sequence[str]) -> None: + super().__init__(lines) + self.comment: str | None = None + + def fetch_rvalue(self) -> Sequence[Token]: + """Fetch right-hand value of assignment.""" + tokens: list[Token] = [] + while self.fetch_token(): + assert self.current + tokens.append(self.current) + if self.current == [OP, '(']: + tokens += self.fetch_until([OP, ')']) + elif self.current == [OP, '{']: + tokens += self.fetch_until([OP, '}']) + elif self.current == [OP, '[']: + tokens += self.fetch_until([OP, ']']) + elif self.current == INDENT: + tokens += self.fetch_until(DEDENT) + elif self.current == [OP, ';']: + break + elif self.current.kind not in (OP, NAME, NUMBER, STRING): + break + + return tokens + + def parse(self) -> None: + """Parse the code and obtain comment after assignment.""" + # skip lvalue (or whole of AnnAssign) + while (current:=self.fetch_token()) and not current.match([OP, '='], NEWLINE, COMMENT): + assert self.current + + # skip rvalue (if exists) + if self.current == [OP, '=']: + self.fetch_rvalue() + + if self.current == COMMENT: + assert self.current + self.comment = self.current.value + +comment_re = re.compile('^\\s*#: ?(.*)\r?\n?$') +indent_re = re.compile('^\\s*$') + +def extract_doc_comment_after(node: ast.Assign | ast.AnnAssign, lines: Sequence[str]) -> Tuple[int, str] | None: + """ + Support for doc comment as found in sphinx. + + @param node: the assignment node + @param lines: the lines of the source code, as generated by + C{code.splitlines(keepends=True)}. + @returns: A tuple linenumber, docstring or None if the assignment doesn't have a doc comment. + """ + # check doc comments after assignment + current_line = lines[node.lineno - 1] + parser = AfterCommentParser([current_line[node.col_offset:], *lines[node.lineno:]]) + parser.parse() + if parser.comment and comment_re.match(parser.comment): + docstring = comment_re.sub('\\1', parser.comment) + return node.lineno, docstring + + return None + +def extract_doc_comment_before(node: ast.Assign | ast.AnnAssign, lines: Sequence[str]) -> Tuple[int, str] | None: + """ + Same as L{extract_doc_comment_after} but fetch the comment before the assignment. + """ + # check doc comments before assignment + comment_lines = [] + for i in range(node.lineno - 1): + before_line = lines[node.lineno - 2 - i] + if comment_re.match(before_line): + comment_lines.append(comment_re.sub('\\1', before_line)) + else: + break + if comment_lines: + docstring = inspect.cleandoc('\n'.join(reversed(comment_lines))) + return node.lineno - len(comment_lines), docstring + + return None + +# This was part of the sphinx.pycode.parser module. \ No newline at end of file diff --git a/pydoctor/epydoc/markup/restructuredtext.py b/pydoctor/epydoc/markup/restructuredtext.py index 8c11806d7..e7a51a3ac 100644 --- a/pydoctor/epydoc/markup/restructuredtext.py +++ b/pydoctor/epydoc/markup/restructuredtext.py @@ -61,9 +61,6 @@ from pydoctor.epydoc.docutils import new_document from pydoctor.model import Documentable -#: A dictionary whose keys are the "consolidated fields" that are -#: recognized by epydoc; and whose values are the corresponding epydoc -#: field names that should be used for the individual fields. CONSOLIDATED_FIELDS = { 'parameters': 'param', 'arguments': 'arg', diff --git a/pydoctor/test/test_astbuilder.py b/pydoctor/test/test_astbuilder.py index bf45246fb..5effad9e4 100644 --- a/pydoctor/test/test_astbuilder.py +++ b/pydoctor/test/test_astbuilder.py @@ -1,3 +1,5 @@ +from __future__ import annotations + from typing import Optional, Tuple, Type, List, overload, cast import ast import sys @@ -3059,6 +3061,84 @@ def test_typealias_unstring(systemcls: Type[model.System]) -> None: # there is not Constant nodes in the type alias anymore next(n for n in ast.walk(typealias.value) if isinstance(n, ast.Constant)) +@systemcls_param +def test_doc_comment(systemcls: Type[model.System], capsys: CapSys) -> None: + """ + Tests for feature https://github.com/twisted/pydoctor/issues/800 + """ + code = ('class Foo(object):\n' + ' """class Foo!"""\n' + ' #: comment before attr1\n' + ' attr1 = None\n' + ' attr2 = None # attribute comment for attr2 (without colon)\n' + ' attr3 = None #: attribute comment for attr3\n' + ' attr4 = None #: long attribute comment\n' + ' #: for attr4\n' + ' #: comment before attr5\n' + ' attr5 = None #: attribute comment for attr5\n' + ' attr6, attr7 = 1, 2 #: this comment is not ignored\n' + '\n' + ' def __init__(self):\n' + ' self.attr8 = None #: first attribute comment (ignored)\n' + ' self.attr8 = None #: attribute comment for attr8\n' + ' #: comment before attr9\n' + ' self.attr9 = None #: comment after attr9\n' + ' "string after attr9"\n' + '\n' + ' def bar(self, arg1, arg2=True, *args, **kwargs):\n' + ' """method Foo.bar"""\n' + ' pass\n' + '\n' + 'def baz():\n' + ' """function baz"""\n' + ' pass\n' + '\n' + 'class Qux: attr1 = 1; attr2 = 2') + + mod = fromText(code, systemcls=systemcls) + + def docs(name: str) -> str | None: + return mod.contents['Foo'].contents[name].docstring + + assert docs('attr1') == 'comment before attr1' + assert docs('attr2') == None # not a doc comment + assert docs('attr3') == 'attribute comment for attr3' + assert docs('attr4') == 'long attribute comment' + assert docs('attr4') == 'long attribute comment' + assert docs('attr5') == 'attribute comment for attr5' + assert docs('attr6') == 'this comment is not ignored' + assert docs('attr7') == 'this comment is not ignored' + assert docs('attr8') == 'attribute comment for attr8' + assert docs('attr9') == 'string after attr9' + +@systemcls_param +def test_doc_comment_module_var(systemcls: Type[model.System], capsys: CapSys) -> None: + src = """ + a: int = 42 #: This is a variable. + + #: This is b variable. + b = None + + #: This is c variable. + c: float #: This takes precedence! + + d: None #: This is also ignored. + '''Because I exist!''' + + #: this is not documentation + + e = 43 + """ + mod = fromText(src, systemcls=systemcls) + + def docs(name: str) -> str | None: + return mod.contents[name].docstring + + assert docs('a') == 'This is a variable.' + assert docs('c') == 'This takes precedence!' + assert docs('d') == 'Because I exist!' + assert docs('e') is None + @systemcls_param def test_mutilple_docstrings_warnings(systemcls: Type[model.System], capsys: CapSys) -> None: """ @@ -3090,7 +3170,7 @@ class A: def test_mutilple_docstring_with_doc_comments_warnings(systemcls: Type[model.System], capsys: CapSys) -> None: src = ''' class C: - a: int;"docs" #: re-docs + a: int;"re-docs" #: docs class B: """ @@ -3107,10 +3187,56 @@ class B2: a: int "re-re-docs" ''' - fromText(src, systemcls=systemcls) - # TODO: handle doc comments.x - assert capsys.readouterr().out == ':18: Existing docstring at line 14 is overriden\n' + mod = fromText(src, systemcls=systemcls) + + assert capsys.readouterr().out == ( + ':3: Existing docstring at line 3 is overriden\n' + ':9: Existing docstring at line 7 is overriden\n' + ':16: Existing docstring at line 14 is overriden\n' + ':18: Existing docstring at line 16 is overriden\n') + + assert mod.contents['C'].contents['a'].docstring == 're-docs' + assert mod.contents['B'].contents['a'].docstring == 're-docs' + assert mod.contents['B2'].contents['a'].docstring == 're-re-docs' +@systemcls_param +def test_doc_comment_multiple_assigments(systemcls: Type[model.System], capsys: CapSys) -> None: + # TODO: this currently does not support nested tuple assignments. + src = ''' + class C: + def __init__(self): + self.x, x = 1, 1 #: x docs + self.y = x = 1 #: y docs + x,y = 1,1 #: x and y docs + v = w = 1 #: v and w docs + ''' + mod = fromText(src, systemcls=systemcls) + assert not capsys.readouterr().out + assert mod.contents['x'].docstring == 'x and y docs' + assert mod.contents['y'].docstring == 'x and y docs' + assert mod.contents['v'].docstring == 'v and w docs' + assert mod.contents['w'].docstring == 'v and w docs' + assert mod.contents['C'].contents['x'].docstring == 'x docs' + assert mod.contents['C'].contents['y'].docstring == 'y docs' + +@systemcls_param +def test_other_encoding(systemcls: Type[model.System], capsys: CapSys) -> None: + # Test for issue https://github.com/twisted/pydoctor/issues/805 + processPackage('coding_not_utf8', + systemcls=lambda: systemcls(model.Options.from_args(['-q']))) + assert not capsys.readouterr().out + +@systemcls_param +def test_alias_resets_attribute_state(systemcls: Type[model.System], capsys:CapSys) -> None: + # from https://github.com/lxml/lxml/blob/a56babb0013dc46baf480f49ebd5cc1ab65bc418/src/lxml/html/builder.py + src = ''' + E = True #: Legit docstring + A = E.a #: trash1 + ABBR = E.abbr #: trash2 + ''' + fromText(src, systemcls=systemcls) + assert not capsys.readouterr().out + @systemcls_param def test_import_all_inside_else_branch_is_processed(systemcls: Type[model.System], capsys: CapSys) -> None: src1 = ''' diff --git a/pydoctor/test/test_astutils.py b/pydoctor/test/test_astutils.py index 8dfe1c912..c3b67fce6 100644 --- a/pydoctor/test/test_astutils.py +++ b/pydoctor/test/test_astutils.py @@ -18,7 +18,6 @@ def test_get_assign_docstring_node() -> None: astutils.Parentage().visit(tree) assert astutils.get_str_value(astutils.get_assign_docstring_node(tree.body[0])) == "inline docs" # type:ignore - def test_get_assign_docstring_node_not_in_body() -> None: src = dedent(''' if True: pass diff --git a/pydoctor/test/testpackages/coding_not_utf8/__init__.py b/pydoctor/test/testpackages/coding_not_utf8/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/pydoctor/test/testpackages/coding_not_utf8/ascii_coding.py b/pydoctor/test/testpackages/coding_not_utf8/ascii_coding.py new file mode 100644 index 000000000..bf9667a38 --- /dev/null +++ b/pydoctor/test/testpackages/coding_not_utf8/ascii_coding.py @@ -0,0 +1,3 @@ +# -*- coding: ascii +var = True +'HELUZ H4 - kominy.xml' \ No newline at end of file diff --git a/pydoctor/test/testpackages/coding_not_utf8/other_coding.py b/pydoctor/test/testpackages/coding_not_utf8/other_coding.py new file mode 100644 index 000000000..616b553fc --- /dev/null +++ b/pydoctor/test/testpackages/coding_not_utf8/other_coding.py @@ -0,0 +1,3 @@ +# -*- coding: cp852 +var = True +'HELUZ H4 - kom¡ny.xml' \ No newline at end of file