From f62d92972256e98045f106a3883ed46443331fc0 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Thu, 18 Aug 2022 10:40:01 +0200 Subject: [PATCH 01/11] features to align assignment operators and dictionary colons --- yapf/pytree/comment_splicer.py | 1 - yapf/pytree/subtype_assigner.py | 16 +- yapf/yapflib/format_decision_state.py | 1 + yapf/yapflib/format_token.py | 89 +++++ yapf/yapflib/reformatter.py | 508 ++++++++++++++++++++++++++ yapf/yapflib/style.py | 24 ++ yapf/yapflib/subtypes.py | 1 + yapftests/format_token_test.py | 34 +- yapftests/reformatter_basic_test.py | 368 ++++++++++++++++++- yapftests/subtype_assigner_test.py | 109 +++++- yapftests/yapf_test.py | 2 +- 11 files changed, 1138 insertions(+), 15 deletions(-) diff --git a/yapf/pytree/comment_splicer.py b/yapf/pytree/comment_splicer.py index ae5ffe66f..9e8f02c48 100644 --- a/yapf/pytree/comment_splicer.py +++ b/yapf/pytree/comment_splicer.py @@ -42,7 +42,6 @@ def SpliceComments(tree): # This is a list because Python 2.x doesn't have 'nonlocal' :) prev_leaf = [None] _AnnotateIndents(tree) - def _VisitNodeRec(node): """Recursively visit each node to splice comments into the AST.""" # This loop may insert into node.children, so we'll iterate over a copy. diff --git a/yapf/pytree/subtype_assigner.py b/yapf/pytree/subtype_assigner.py index dd3ea3d1e..03d7efe1a 100644 --- a/yapf/pytree/subtype_assigner.py +++ b/yapf/pytree/subtype_assigner.py @@ -240,6 +240,7 @@ def Visit_argument(self, node): # pylint: disable=invalid-name # argument ::= # test [comp_for] | test '=' test self._ProcessArgLists(node) + #TODO add a subtype to each argument? def Visit_arglist(self, node): # pylint: disable=invalid-name # arglist ::= @@ -300,6 +301,7 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name for i in range(1, len(node.children)): prev_child = node.children[i - 1] child = node.children[i] + if prev_child.type == grammar_token.COMMA: _AppendFirstLeafTokenSubtype(child, subtypes.PARAMETER_START) elif child.type == grammar_token.COMMA: @@ -309,6 +311,10 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name tname = True _SetArgListSubtype(child, subtypes.TYPED_NAME, subtypes.TYPED_NAME_ARG_LIST) + # NOTE Every element of the tynamme argument list + # should have this list type + _AppendSubtypeRec(child, subtypes.TYPED_NAME_ARG_LIST) + elif child.type == grammar_token.COMMA: tname = False elif child.type == grammar_token.EQUAL and tname: @@ -383,21 +389,25 @@ def HasSubtype(node): for child in node.children: node_name = pytree_utils.NodeName(child) + #TODO exclude it if the first leaf is a comment in appendfirstleaftokensubtype if node_name not in {'atom', 'COMMA'}: _AppendFirstLeafTokenSubtype(child, list_subtype) + def _AppendTokenSubtype(node, subtype): """Append the token's subtype only if it's not already set.""" pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE, subtype) - +#TODO should exclude comment child to all Appendsubtypes functions def _AppendFirstLeafTokenSubtype(node, subtype): """Append the first leaf token's subtypes.""" + #TODO exclude the comment leaf if isinstance(node, pytree.Leaf): - _AppendTokenSubtype(node, subtype) - return + _AppendTokenSubtype(node, subtype) + return + _AppendFirstLeafTokenSubtype(node.children[0], subtype) diff --git a/yapf/yapflib/format_decision_state.py b/yapf/yapflib/format_decision_state.py index c299d1c85..efcef0ba4 100644 --- a/yapf/yapflib/format_decision_state.py +++ b/yapf/yapflib/format_decision_state.py @@ -978,6 +978,7 @@ def _GetNewlineColumn(self): not self.param_list_stack[-1].SplitBeforeClosingBracket( top_of_stack.indent) and top_of_stack.indent == ((self.line.depth + 1) * style.Get('INDENT_WIDTH'))): + # NOTE: comment inside argument list is not excluded in subtype assigner if (subtypes.PARAMETER_START in current.subtypes or (previous.is_comment and subtypes.PARAMETER_START in previous.subtypes)): diff --git a/yapf/yapflib/format_token.py b/yapf/yapflib/format_token.py index 6eea05473..f8658f772 100644 --- a/yapf/yapflib/format_token.py +++ b/yapf/yapflib/format_token.py @@ -14,6 +14,7 @@ """Enhanced token information for formatting.""" import keyword +from operator import sub import re from lib2to3.pgen2 import token @@ -124,6 +125,7 @@ def __init__(self, node, name): self.subtypes = {subtypes.NONE} if not stypes else stypes self.is_pseudo = hasattr(node, 'is_pseudo') and node.is_pseudo + @property def formatted_whitespace_prefix(self): if style.Get('INDENT_BLANK_LINES'): @@ -322,3 +324,90 @@ def is_pytype_comment(self): def is_copybara_comment(self): return self.is_comment and re.match( r'#.*\bcopybara:\s*(strip|insert|replace)', self.value) + + + @property + def is_assign(self): + return subtypes.ASSIGN_OPERATOR in self.subtypes + + @property + def is_dict_colon(self): + # if the token is dictionary colon and + # the dictionary has no comp_for + return self.value == ':' and self.previous_token.is_dict_key + + @property + def is_dict_key(self): + # if the token is dictionary key which is not preceded by doubel stars and + # the dictionary has no comp_for + return subtypes.DICTIONARY_KEY_PART in self.subtypes + + @property + def is_dict_key_start(self): + # if the token is dictionary key start + return subtypes.DICTIONARY_KEY in self.subtypes + + @property + def is_dict_value(self): + return subtypes.DICTIONARY_VALUE in self.subtypes + + @property + def is_augassign(self): + augassigns = {'+=', '-=' , '*=' , '@=' , '/=' , '%=' , '&=' , '|=' , '^=' , + '<<=' , '>>=' , '**=' , '//='} + return self.value in augassigns + + @property + def is_argassign(self): + return (subtypes.DEFAULT_OR_NAMED_ASSIGN in self.subtypes + or subtypes.VARARGS_LIST in self.subtypes) + + @property + def is_argname(self): + # it's the argument part before argument assignment operator, + # including tnames and data type + # not the assign operator, + # not the value after the assign operator + + # argument without assignment is also included + # the token is arg part before '=' but not after '=' + if self.is_argname_start: + return True + + # exclude comment inside argument list + if not self.is_comment: + # the token is any element in typed arglist + if subtypes.TYPED_NAME_ARG_LIST in self.subtypes: + return True + + return False + + + @property + def is_argname_start(self): + # return true if it's the start of every argument entry + if self.previous_token: + previous_subtypes = self.previous_token.subtypes + + return ( + (not self.is_comment + and subtypes.DEFAULT_OR_NAMED_ASSIGN not in self.subtypes + and subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in self.subtypes + and subtypes.DEFAULT_OR_NAMED_ASSIGN not in previous_subtypes + and (not subtypes.PARAMETER_STOP in self.subtypes + or subtypes.PARAMETER_START in self.subtypes) + ) + or # if there is comment, the arg after it is the argname start + (not self.is_comment and self.previous_token and self.previous_token.is_comment + and + (subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in previous_subtypes + or subtypes.TYPED_NAME_ARG_LIST in self.subtypes + or subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in self.subtypes)) + ) + + + + + + + diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index 14e0bde70..92aaa950e 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -22,6 +22,7 @@ from __future__ import unicode_literals import collections +from distutils.errors import LinkError import heapq import re @@ -102,6 +103,15 @@ def Reformat(llines, verify=False, lines=None): final_lines.append(lline) prev_line = lline + + if style.Get('ALIGN_ASSIGNMENT'): + _AlignAssignment(final_lines) + if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') + and style.Get('ALIGN_DICT_COLON')): + _AlignDictColon(final_lines) + if style.Get('ALIGN_ARGUMENT_ASSIGNMENT'): + _AlignArgAssign(final_lines) + _AlignTrailingComments(final_lines) return _FormatFinalLines(final_lines, verify) @@ -394,6 +404,504 @@ def _AlignTrailingComments(final_lines): final_lines_index += 1 +def _AlignAssignment(final_lines): + """Align assignment operators and augmented assignment operators to the same column""" + + final_lines_index = 0 + while final_lines_index < len(final_lines): + line = final_lines[final_lines_index] + + assert line.tokens + process_content = False + + for tok in line.tokens: + if tok.is_assign or tok.is_augassign: + # all pre assignment variable lengths in one block of lines + all_pa_variables_lengths = [] + max_variables_length = 0 + + while True: + # EOF + if final_lines_index + len(all_pa_variables_lengths) == len(final_lines): + break + + this_line_index = final_lines_index + len(all_pa_variables_lengths) + this_line = final_lines[this_line_index] + + next_line = None + if this_line_index < len(final_lines) - 1: + next_line = final_lines[final_lines_index + len(all_pa_variables_lengths) + 1 ] + + assert this_line.tokens, next_line.tokens + + # align them differently when there is a blank line in between + if (all_pa_variables_lengths and + this_line.tokens[0].formatted_whitespace_prefix.startswith('\n\n') + ): + break + + # if there is a standalone comment or keyword statement line + # or other lines without assignment in between, break + elif (all_pa_variables_lengths and + True not in [tok.is_assign or tok.is_augassign for tok in this_line.tokens]): + if this_line.tokens[0].is_comment: + if style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE'): + break + else: break + + if this_line.disable: + all_pa_variables_lengths.append([]) + continue + + variables_content = '' + pa_variables_lengths = [] + contain_object = False + line_tokens = this_line.tokens + # only one assignment expression is on each line + for index in range(len(line_tokens)): + line_tok = line_tokens[index] + + prefix = line_tok.formatted_whitespace_prefix + newline_index = prefix.rfind('\n') + if newline_index != -1: + variables_content = '' + prefix = prefix[newline_index + 1:] + + if line_tok.is_assign or line_tok.is_augassign: + next_toks = [line_tokens[i] for i in range(index+1, len(line_tokens))] + # if there is object(list/tuple/dict) with newline entries, break, + # update the alignment so far and start to calulate new alignment + for tok in next_toks: + if tok.value in ['(', '[', '{'] and tok.next_token: + if (tok.next_token.formatted_whitespace_prefix.startswith('\n') + or (tok.next_token.is_comment and tok.next_token.next_token.formatted_whitespace_prefix.startswith('\n'))): + pa_variables_lengths.append(len(variables_content)) + contain_object = True + break + if not contain_object: + if line_tok.is_assign: + pa_variables_lengths.append(len(variables_content)) + # if augassign, add the extra augmented part to the max length caculation + elif line_tok.is_augassign: + pa_variables_lengths.append(len(variables_content) + len(line_tok.value) - 1 ) + # don't add the tokens + # after the assignment operator + break + else: + variables_content += '{}{}'.format(prefix, line_tok.value) + + if pa_variables_lengths: + max_variables_length = max(max_variables_length, max(pa_variables_lengths)) + + all_pa_variables_lengths.append(pa_variables_lengths) + + # after saving this line's max variable length, + # we check if next line has the same depth as this line, + # if not, we don't want to calculate their max variable length together + # so we break the while loop, update alignment so far, and + # then go to next line that has '=' + if next_line: + if this_line.depth != next_line.depth: + break + # if this line contains objects with newline entries, + # start new block alignment + if contain_object: + break + + # if no update of max_length, just go to the next block + if max_variables_length == 0: continue + + max_variables_length += 2 + + # Update the assignment token values based on the max variable length + for all_pa_variables_lengths_index, pa_variables_lengths in enumerate( + all_pa_variables_lengths): + if not pa_variables_lengths: + continue + this_line = final_lines[final_lines_index + all_pa_variables_lengths_index] + + # only the first assignment operator on each line + pa_variables_lengths_index = 0 + for line_tok in this_line.tokens: + if line_tok.is_assign or line_tok.is_augassign: + assert pa_variables_lengths[0] < max_variables_length + + if pa_variables_lengths_index < len(pa_variables_lengths): + whitespace = ' ' * ( + max_variables_length - pa_variables_lengths[0] - 1) + + assign_content = '{}{}'.format(whitespace, line_tok.value.strip()) + + existing_whitespace_prefix = \ + line_tok.formatted_whitespace_prefix.lstrip('\n') + + # in case the existing spaces are larger than padded spaces + if (len(whitespace) == 1 or len(whitespace) > 1 and + len(existing_whitespace_prefix)>len(whitespace)): + line_tok.whitespace_prefix = '' + elif assign_content.startswith(existing_whitespace_prefix): + assign_content = assign_content[len(existing_whitespace_prefix):] + + # update the assignment operator value + line_tok.value = assign_content + + pa_variables_lengths_index += 1 + + final_lines_index += len(all_pa_variables_lengths) + + process_content = True + break + + if not process_content: + final_lines_index += 1 + + +def _AlignArgAssign(final_lines): + """Align the assign operators in a argument list to the same column""" + """NOTE One argument list of one function is on one logical line! + But funtion calls/argument lists can be in argument list. + """ + final_lines_index = 0 + while final_lines_index < len(final_lines): + line = final_lines[final_lines_index] + if line.disable: + final_lines_index += 1 + continue + + assert line.tokens + process_content = False + + for tok in line.tokens: + if tok.is_argassign: + + this_line = line + line_tokens = this_line.tokens + + for open_index in range(len(line_tokens)): + line_tok = line_tokens[open_index] + + if (line_tok.value == '(' and not line_tok.is_pseudo + and line_tok.next_token.formatted_whitespace_prefix.startswith('\n')): + index = open_index + # skip the comments in the beginning + index += 1 + line_tok = line_tokens[index] + while not line_tok.is_argname_start and index < len(line_tokens)-1: + index += 1 + line_tok = line_tokens[index] + + # check if the argstart is on newline + if line_tok.is_argname_start and line_tok.formatted_whitespace_prefix.startswith('\n'): + first_arg_index = index + first_arg_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) + + closing = False + all_arg_name_lengths = [] + arg_name_lengths = [] + name_content = '' + arg_column = first_arg_column + + # start with the first argument + # that has nextline prefix + while not closing: + # if there is a comment in between, save, reset and continue to calulate new alignment + if (style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE') + and arg_name_lengths and line_tok.is_comment + and line_tok.formatted_whitespace_prefix.startswith('\n')): + all_arg_name_lengths.append(arg_name_lengths) + arg_name_lengths = [] + index += 1 + line_tok = line_tokens[index] + continue + + prefix = line_tok.formatted_whitespace_prefix + newline_index = prefix.rfind('\n') + + if newline_index != -1: + if line_tok.is_argname_start: + name_content = '' + prefix = prefix[newline_index + 1:] + arg_column = len(prefix) + # if a typed arg name is so long + # that there are newlines inside + # only calulate the last line arg_name that has the assignment + elif line_tok.is_argname: + name_content = '' + prefix = prefix[newline_index + 1:] + # if any argument not on newline + elif line_tok.is_argname_start: + name_content = '' + arg_column = line_tok.column + # in case they are formatted into one line in final_line + # but are put in separated lines in original codes + if arg_column == first_arg_column: + arg_column = line_tok.formatted_whitespace_prefix + # on the same argument level + if (line_tok.is_argname_start and arg_name_lengths + and arg_column==first_arg_column): + argname_end = line_tok + while argname_end.is_argname: + argname_end = argname_end.next_token + # argument without assignment in between + if not argname_end.is_argassign: + all_arg_name_lengths.append(arg_name_lengths) + arg_name_lengths = [] + index += 1 + line_tok = line_tokens[index] + continue + + if line_tok.is_argassign and arg_column == first_arg_column: + arg_name_lengths.append(len(name_content)) + elif line_tok.is_argname and arg_column == first_arg_column: + name_content += '{}{}'.format(prefix, line_tok.value) + # add up all token values before the arg assign operator + + index += 1 + if index < len(line_tokens): + line_tok = line_tokens[index] + # when the matching closing bracket never found + # due to edge cases where the closing bracket + # is not indented or dedented + else: + all_arg_name_lengths.append(arg_name_lengths) + break + + # if there is a new object(list/tuple/dict) with its entries on newlines, + # save, reset and continue to calulate new alignment + if (line_tok.value in ['(', '[','{'] and line_tok.next_token + and line_tok.next_token.formatted_whitespace_prefix.startswith('\n')): + if arg_name_lengths: + all_arg_name_lengths.append(arg_name_lengths) + arg_name_lengths = [] + index += 1 + line_tok = line_tokens[index] + continue + + if line_tok.value == ')'and not line_tok.is_pseudo: + if line_tok.formatted_whitespace_prefix.startswith('\n'): + close_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) + else: close_column = line_tok.column + if close_column < first_arg_column: + if arg_name_lengths: + all_arg_name_lengths.append(arg_name_lengths) + closing = True + + # update the alignment once one full arg list is processed + if all_arg_name_lengths: + # if argument list with only the first argument on newline + if len(all_arg_name_lengths) == 1 and len(all_arg_name_lengths[0]) == 1: + continue + max_name_length = 0 + all_arg_name_lengths_index = 0 + arg_name_lengths = all_arg_name_lengths[all_arg_name_lengths_index] + max_name_length = max(arg_name_lengths or [0]) + 2 + arg_lengths_index = 0 + for token in line_tokens[first_arg_index:index]: + if token.is_argassign: + name_token = token.previous_token + while name_token.is_argname and not name_token.is_argname_start: + name_token = name_token.previous_token + name_column = len(name_token.formatted_whitespace_prefix.lstrip('\n')) + if name_column == first_arg_column: + if all_arg_name_lengths_index < len(all_arg_name_lengths): + if arg_lengths_index == len(arg_name_lengths): + all_arg_name_lengths_index += 1 + arg_name_lengths = all_arg_name_lengths[all_arg_name_lengths_index] + max_name_length = max(arg_name_lengths or [0]) + 2 + arg_lengths_index = 0 + + if arg_lengths_index < len(arg_name_lengths): + + assert arg_name_lengths[arg_lengths_index] < max_name_length + + padded_spaces = ' ' * ( + max_name_length - arg_name_lengths[arg_lengths_index] - 1) + arg_lengths_index += 1 + + assign_content = '{}{}'.format(padded_spaces, token.value.strip()) + existing_whitespace_prefix = \ + token.formatted_whitespace_prefix.lstrip('\n') + + # in case the existing spaces are larger than padded spaces + if (len(padded_spaces)==1 or len(padded_spaces)>1 and + len(existing_whitespace_prefix)>len(padded_spaces)): + token.whitespace_prefix = '' + elif assign_content.startswith(existing_whitespace_prefix): + assign_content = assign_content[len(existing_whitespace_prefix):] + + token.value = assign_content + + final_lines_index += 1 + process_content = True + break + + if not process_content: + final_lines_index += 1 + + +def _AlignDictColon(final_lines): + """Align colons in a dict to the same column""" + """NOTE One (nested) dict/list is one logical line!""" + final_lines_index = 0 + while final_lines_index < len(final_lines): + line = final_lines[final_lines_index] + if line.disable: + final_lines_index += 1 + continue + + assert line.tokens + process_content = False + + for tok in line.tokens: + # make sure each dict entry on separate lines and + # the dict has more than one entry + if (tok.is_dict_key and tok.formatted_whitespace_prefix.startswith('\n') and + not tok.is_comment): + + this_line = line + + line_tokens = this_line.tokens + for open_index in range(len(line_tokens)): + line_tok = line_tokens[open_index] + + # check each time if the detected dict is the dict we aim for + if line_tok.value == '{' and line_tok.next_token.formatted_whitespace_prefix.startswith('\n'): + index = open_index + # skip the comments in the beginning + index += 1 + line_tok = line_tokens[index] + while not line_tok.is_dict_key and index < len(line_tokens)-1: + index += 1 + line_tok = line_tokens[index] + # in case empty dict, check if dict key again + if line_tok.is_dict_key and line_tok.formatted_whitespace_prefix.startswith('\n'): + closing = False # the closing bracket in dict '}'. + keys_content = '' + all_dict_keys_lengths = [] + dict_keys_lengths = [] + + # record the column number of the first key + first_key_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) + key_column = first_key_column + + # while not closing: + while not closing: + prefix = line_tok.formatted_whitespace_prefix + newline = prefix.startswith('\n') + if newline: + # if comments inbetween, save, reset and continue to caluclate new alignment + if (style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE') + and dict_keys_lengths and line_tok.is_comment): + all_dict_keys_lengths.append(dict_keys_lengths) + dict_keys_lengths =[] + index += 1 + line_tok = line_tokens[index] + continue + if line_tok.is_dict_key_start: + keys_content = '' + prefix = prefix.lstrip('\n') + key_column = len(prefix) + # if the dict key is so long that it has multi-lines + # only caculate the last line that has the colon + elif line_tok.is_dict_key: + keys_content = '' + prefix = prefix.lstrip('\n') + elif line_tok.is_dict_key_start: + key_column = line_tok.column + + if line_tok.is_dict_colon and key_column == first_key_column: + dict_keys_lengths.append(len(keys_content)) + elif line_tok.is_dict_key and key_column == first_key_column: + keys_content += '{}{}'.format(prefix, line_tok.value) + + index += 1 + if index < len(line_tokens): + line_tok = line_tokens[index] + # when the matching closing bracket never found + # due to edge cases where the closing bracket + # is not indented or dedented, e.g. ']}', with another bracket before + else: + all_dict_keys_lengths.append(dict_keys_lengths) + break + + # if there is new objects(list/tuple/dict) with its entries on newlines, + # or a function call with any of its arguments on newlines, + # save, reset and continue to calulate new alignment + if (line_tok.value in ['(', '[', '{'] and not line_tok.is_pseudo and line_tok.next_token + and line_tok.next_token.formatted_whitespace_prefix.startswith('\n')): + if dict_keys_lengths: + all_dict_keys_lengths.append(dict_keys_lengths) + dict_keys_lengths = [] + index += 1 + line_tok = line_tokens[index] + continue + # the matching closing bracket is either same indented or dedented + # accordingly to previous level's indentation + # the first found, immediately break the while loop + if line_tok.value == '}': + if line_tok.formatted_whitespace_prefix.startswith('\n'): + close_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) + else: close_column = line_tok.column + if close_column < first_key_column: + if dict_keys_lengths: + all_dict_keys_lengths.append(dict_keys_lengths) + closing = True + + # update the alignment once one dict is processed + if all_dict_keys_lengths: + max_keys_length = 0 + all_dict_keys_lengths_index = 0 + dict_keys_lengths = all_dict_keys_lengths[all_dict_keys_lengths_index] + max_keys_length = max(dict_keys_lengths or [0]) + 2 + keys_lengths_index = 0 + for token in line_tokens[open_index+1:index]: + if token.is_dict_colon: + # check if the key has multiple tokens and + # get the first key token in this key + key_token = token.previous_token + while key_token.previous_token.is_dict_key: + key_token = key_token.previous_token + key_column = len(key_token.formatted_whitespace_prefix.lstrip('\n')) + + if key_column == first_key_column: + + if keys_lengths_index == len(dict_keys_lengths): + all_dict_keys_lengths_index += 1 + dict_keys_lengths = all_dict_keys_lengths[all_dict_keys_lengths_index] + max_keys_length = max(dict_keys_lengths or [0]) + 2 + keys_lengths_index = 0 + + if keys_lengths_index < len(dict_keys_lengths): + assert dict_keys_lengths[keys_lengths_index] < max_keys_length + + padded_spaces = ' ' * ( + max_keys_length - dict_keys_lengths[keys_lengths_index] - 1) + keys_lengths_index += 1 + #NOTE if the existing whitespaces are larger than padded spaces + existing_whitespace_prefix = \ + token.formatted_whitespace_prefix.lstrip('\n') + colon_content = '{}{}'.format(padded_spaces, token.value.strip()) + + # in case the existing spaces are larger than the paddes spaces + if (len(padded_spaces) == 1 or len(padded_spaces) > 1 + and len(existing_whitespace_prefix) >= len(padded_spaces)): + # remove the existing spaces + token.whitespace_prefix = '' + elif colon_content.startswith(existing_whitespace_prefix): + colon_content = colon_content[len(existing_whitespace_prefix):] + + token.value = colon_content + + final_lines_index += 1 + + process_content = True + break + + if not process_content: + final_lines_index += 1 + + + def _FormatFinalLines(final_lines, verify): """Compose the final output from the finalized lines.""" formatted_code = [] diff --git a/yapf/yapflib/style.py b/yapf/yapflib/style.py index 233a64e6b..a4c54b5f8 100644 --- a/yapf/yapflib/style.py +++ b/yapf/yapflib/style.py @@ -54,6 +54,22 @@ def SetGlobalStyle(style): _STYLE_HELP = dict( ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent("""\ Align closing bracket with visual indentation."""), + ALIGN_ASSIGNMENT=textwrap.dedent("""\ + Align assignment or augmented assignment operators. + If there is a blank line or newline comment or objects with newline entries in between, + it will start new block alignment."""), + ALIGN_ARGUMENT_ASSIGNMENT=textwrap.dedent("""\ + Align assignment operators in the argument list if they are all split on newlines. + Arguments without assignment are ignored. + Arguments without assignment in between will initiate new block alignment calulation. + Newline comments or objects with newline entries will also start new block alignment."""), + ALIGN_DICT_COLON=textwrap.dedent("""\ + Align the colons in the dictionary + if all entries in dictionay are split on newlines. + and 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' set True. + """), + NEW_ALIGNMENT_AFTER_COMMENTLINE=textwrap.dedent("""\ + Start new assignment or colon alignment when there is a newline comment in between."""), ALLOW_MULTILINE_LAMBDAS=textwrap.dedent("""\ Allow lambdas to be formatted on more than one line."""), ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent("""\ @@ -419,6 +435,10 @@ def CreatePEP8Style(): """Create the PEP8 formatting style.""" return dict( ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True, + ALIGN_ASSIGNMENT=False, + ALIGN_ARGUMENT_ASSIGNMENT=False, + ALIGN_DICT_COLON=False, + NEW_ALIGNMENT_AFTER_COMMENTLINE=False, ALLOW_MULTILINE_LAMBDAS=False, ALLOW_MULTILINE_DICTIONARY_KEYS=False, ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=True, @@ -607,6 +627,10 @@ def _IntOrIntListConverter(s): # Note: this dict has to map all the supported style options. _STYLE_OPTION_VALUE_CONVERTER = dict( ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=_BoolConverter, + ALIGN_ASSIGNMENT=_BoolConverter, + ALIGN_DICT_COLON=_BoolConverter, + NEW_ALIGNMENT_AFTER_COMMENTLINE=_BoolConverter, + ALIGN_ARGUMENT_ASSIGNMENT=_BoolConverter, ALLOW_MULTILINE_LAMBDAS=_BoolConverter, ALLOW_MULTILINE_DICTIONARY_KEYS=_BoolConverter, ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=_BoolConverter, diff --git a/yapf/yapflib/subtypes.py b/yapf/yapflib/subtypes.py index b4b7efe75..21ca213ad 100644 --- a/yapf/yapflib/subtypes.py +++ b/yapf/yapflib/subtypes.py @@ -38,3 +38,4 @@ SIMPLE_EXPRESSION = 22 PARAMETER_START = 23 PARAMETER_STOP = 24 + diff --git a/yapftests/format_token_test.py b/yapftests/format_token_test.py index 6ea24af63..3bb1ce9f5 100644 --- a/yapftests/format_token_test.py +++ b/yapftests/format_token_test.py @@ -15,10 +15,11 @@ import unittest -from lib2to3 import pytree +from lib2to3 import pytree, pygram from lib2to3.pgen2 import token from yapf.yapflib import format_token +from yapf.pytree import subtype_assigner class TabbedContinuationAlignPaddingTest(unittest.TestCase): @@ -89,6 +90,37 @@ def testIsMultilineString(self): pytree.Leaf(token.STRING, 'r"""hello"""'), 'STRING') self.assertTrue(tok.is_multiline_string) + #------------test argument names------------ + # fun( + # a='hello world', + # # comment, + # b='') + child1 = pytree.Leaf(token.NAME, 'a') + child2 = pytree.Leaf(token.EQUAL, '=') + child3 = pytree.Leaf(token.STRING, "'hello world'") + child4 = pytree.Leaf(token.COMMA, ',') + child5 = pytree.Leaf(token.COMMENT,'# comment') + child6 = pytree.Leaf(token.COMMA, ',') + child7 = pytree.Leaf(token.NAME, 'b') + child8 = pytree.Leaf(token.EQUAL, '=') + child9 = pytree.Leaf(token.STRING, "''") + node_type = pygram.python_grammar.symbol2number['arglist'] + node = pytree.Node(node_type, [child1, child2, child3, child4, child5, + child6, child7, child8,child9]) + subtype_assigner.AssignSubtypes(node) + + def testIsArgName(self, node=node): + tok = format_token.FormatToken(node.children[0],'NAME') + self.assertTrue(tok.is_argname) + + def testIsArgAssign(self, node=node): + tok = format_token.FormatToken(node.children[1], 'EQUAL') + self.assertTrue(tok.is_argassign) + + # test if comment inside is not argname + def testCommentNotIsArgName(self, node=node): + tok = format_token.FormatToken(node.children[4], 'COMMENT') + self.assertFalse(tok.is_argname) if __name__ == '__main__': unittest.main() diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 657d1e246..0c68c8525 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -1583,18 +1583,20 @@ def testNoSplittingWithinSubscriptList(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) + def testExcessCharacters(self): code = textwrap.dedent("""\ - class foo: + class foo: - def bar(self): - self.write(s=[ - '%s%s %s' % ('many of really', 'long strings', '+ just makes up 81') - ]) - """) # noqa + def bar(self): + self.write(s=[ + '%s%s %s' % ('many of really', 'long strings', '+ just makes up 81') + ]) + """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) + unformatted_code = textwrap.dedent("""\ def _(): if True: @@ -2863,6 +2865,8 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass """) # noqa + # if dedent closing brackets and Align argAssign are true, there will be + # spaces before the argassign expected_formatted_code = textwrap.dedent("""\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None @@ -2879,7 +2883,8 @@ def function( try: style.SetGlobalStyle( style.CreateStyleFromConfig('{based_on_style: yapf,' - ' dedent_closing_brackets: True}')) + ' dedent_closing_brackets: True,' + ' align_argument_assignment: False}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, @@ -3165,6 +3170,355 @@ def testWalrus(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected, reformatter.Reformat(llines)) + #------tests for alignment functions-------- + def testAlignAssignBlankLineInbetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_assignment: true}')) + unformatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + + val_third = 3 + """) + expected_formatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + + val_third = 3 + """) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignAssignCommentLineInbetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_assignment: true,' + 'new_alignment_after_commentline = true}')) + unformatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + # comment + val_third = 3 + """) + expected_formatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + # comment + val_third = 3 + """) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignAssignDefLineInbetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_assignment: true}')) + unformatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + def fun(): + a = 'example' + abc = '' + val_third = 3 + """) + expected_formatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + + + def fun(): + a = 'example' + abc = '' + + + val_third = 3 + """) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignAssignObjectWithNewLineInbetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_assignment: true}')) + unformatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + object = { + entry1:1, + entry2:2, + entry3:3, + } + val_third = 3 + """) + expected_formatted_code = textwrap.dedent("""\ + val_first = 1 + val_second += 2 + object = { + entry1: 1, + entry2: 2, + entry3: 3, + } + val_third = 3 + """) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignAssignWithOnlyOneAssignmentLine(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_assignment: true}')) + unformatted_code = textwrap.dedent("""\ + val_first = 1 + """) + expected_formatted_code = unformatted_code + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + ########## for Align_ArgAssign()########### + def testAlignArgAssignTypedName(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_argument_assignment: true,' + 'dedent_closing_brackets: true}')) + unformatted_code = textwrap.dedent("""\ +def f1( + self, + *, + app_name:str="", + server=None, + main_app=None, + db: Optional[NemDB]=None, + root: Optional[str]="", + conf: Optional[dict]={1, 2}, + ini_section: str="" +): pass +""") + expected_formatted_code = textwrap.dedent("""\ +def f1( + self, + *, + app_name: str = "", + server =None, + main_app =None, + db: Optional[NemDB] = None, + root: Optional[str] = "", + conf: Optional[dict] = {1, 2}, + ini_section: str = "" +): + pass +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + # test both object/nested argument list with newlines and + # argument without assignment in between + def testAlignArgAssignNestedArglistInBetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_argument_assignment: true,' + 'dedent_closing_brackets: true}')) + unformatted_code = textwrap.dedent("""\ +arglist = test( + first_argument='', + second_argument=fun( + self, role=3, username_id, client_id=1, very_long_long_long_long_long='' + ), + third_argument=3, + fourth_argument=4 +) +""") + expected_formatted_code = textwrap.dedent("""\ +arglist = test( + first_argument ='', + second_argument =fun( + self, + role =3, + username_id, + client_id =1, + very_long_long_long_long_long ='' + ), + third_argument =3, + fourth_argument =4 +) +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + # start new alignment after comment line in between + def testAlignArgAssignCommentLineInBetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_argument_assignment: true,' + 'dedent_closing_brackets: true,' + 'new_alignment_after_commentline:true}')) + unformatted_code = textwrap.dedent("""\ +arglist = test( + client_id=0, + username_id=1, + # comment + user_name='xxxxxxxxxxxxxxxxxxxxx' +) +""") + expected_formatted_code = textwrap.dedent("""\ +arglist = test( + client_id =0, + username_id =1, + # comment + user_name ='xxxxxxxxxxxxxxxxxxxxx' +) +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignArgAssignWithOnlyFirstArgOnNewline(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_argument_assignment: true,' + 'dedent_closing_brackets: true}')) + unformatted_code = textwrap.dedent("""\ +arglist = self.get_data_from_excelsheet( + client_id=0, username_id=1, user_name='xxxxxxxxxxxxxxxxxxxx' +) +""") + expected_formatted_code = unformatted_code + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignArgAssignArgumentsCanFitInOneLine(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_argument_assignment: true,' + 'dedent_closing_brackets: true}')) + unformatted_code = textwrap.dedent("""\ +def function( + first_argument_xxxxxx =(0,), + second_argument =None +) -> None: + pass +""") + expected_formatted_code = textwrap.dedent("""\ +def function(first_argument_xxxxxx=(0,), second_argument=None) -> None: + pass +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + ########for align dictionary colons######### + def testAlignDictColonNestedDictInBetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_dict_colon: true,' + 'dedent_closing_brackets: true}')) + unformatted_code = textwrap.dedent("""\ +fields = [{"type": "text","required": True,"html": {"attr": 'style="width: 250px;" maxlength="30"',"page": 0,}, + "list" : [1, 2, 3, 4]}] +""") + expected_formatted_code = textwrap.dedent("""\ +fields = [ + { + "type" : "text", + "required" : True, + "html" : { + "attr" : 'style="width: 250px;" maxlength="30"', + "page" : 0, + }, + "list" : [1, 2, 3, 4] + } +] +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignDictColonCommentLineInBetween(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_dict_colon: true,' + 'dedent_closing_brackets: true,' + 'new_alignment_after_commentline: true}')) + unformatted_code = textwrap.dedent("""\ +fields = [{ + "type": "text", + "required": True, + # comment + "list": [1, 2, 3, 4]}] +""") + expected_formatted_code = textwrap.dedent("""\ +fields = [{ + "type" : "text", + "required" : True, + # comment + "list" : [1, 2, 3, 4] +}] +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + def testAlignDictColonLargerExistingSpacesBefore(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_dict_colon: true,' + 'dedent_closing_brackets: true}')) + unformatted_code = textwrap.dedent("""\ +fields = [{ + "type" : "text", + "required" : True, + "list" : [1, 2, 3, 4], +}] +""") + expected_formatted_code = textwrap.dedent("""\ +fields = [{ + "type" : "text", + "required" : True, + "list" : [1, 2, 3, 4], +}] +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) + + + + + if __name__ == '__main__': unittest.main() diff --git a/yapftests/subtype_assigner_test.py b/yapftests/subtype_assigner_test.py index 8616169c9..c69d13e4e 100644 --- a/yapftests/subtype_assigner_test.py +++ b/yapftests/subtype_assigner_test.py @@ -123,12 +123,117 @@ def testFuncCallWithDefaultAssign(self): subtypes.NONE, subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, }), - ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), - ("'hello world'", {subtypes.NONE}), + ('=', { + subtypes.DEFAULT_OR_NAMED_ASSIGN + }), + ("'hello world'", { + subtypes.NONE + }), + (')', {subtypes.NONE}), + ], + ]) + + #----test comment subtype inside the argument list---- + def testCommentSubtypesInsideArglist(self): + code = textwrap.dedent("""\ + foo( + # comment + x, + a='hello world') + """) + llines = yapf_test_helper.ParseAndUnwrap(code) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('foo', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ('# comment', {subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), + ('x', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ('a', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ('=', { + subtypes.DEFAULT_OR_NAMED_ASSIGN + }), + ("'hello world'", { + subtypes.NONE + }), + (')', {subtypes.NONE}), + ], + ]) + + # ----test typed arguments subtypes------ + def testTypedArgumentsInsideArglist(self): + code = textwrap.dedent("""\ +def foo( + self, + preprocess: Callable[[str], str] = identity + ): pass +""") + llines = yapf_test_helper.ParseAndUnwrap(code) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('def', {subtypes.NONE}), + ('foo', {subtypes.FUNC_DEF}), + ('(', {subtypes.NONE}), + ('self', {subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + subtypes.PARAMETER_START, + subtypes.PARAMETER_STOP + }), + (',', {subtypes.NONE}), + ('preprocess', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + subtypes.PARAMETER_START, + subtypes.TYPED_NAME_ARG_LIST + }), + (':', { + subtypes.TYPED_NAME, + subtypes.TYPED_NAME_ARG_LIST, + }), + ('Callable', {subtypes.TYPED_NAME_ARG_LIST + }), + ('[', { + subtypes.SUBSCRIPT_BRACKET, + subtypes.TYPED_NAME_ARG_LIST + }), + ('[', {subtypes.TYPED_NAME_ARG_LIST + }), + ('str', {subtypes.TYPED_NAME_ARG_LIST + }), + (']', {subtypes.TYPED_NAME_ARG_LIST + }), + (',', {subtypes.TYPED_NAME_ARG_LIST + }), + ('str', {subtypes.TYPED_NAME_ARG_LIST + }), + (']', { + subtypes.SUBSCRIPT_BRACKET, + subtypes.TYPED_NAME_ARG_LIST + }), + ('=', { + subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + subtypes.TYPED_NAME + }), + ('identity', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + subtypes.PARAMETER_STOP + }), (')', {subtypes.NONE}), + (':', {subtypes.NONE})], + [('pass', {subtypes.NONE}), ], ]) + def testSetComprehension(self): code = textwrap.dedent("""\ def foo(strs): diff --git a/yapftests/yapf_test.py b/yapftests/yapf_test.py index 2330f4e18..4ddaf5c8a 100644 --- a/yapftests/yapf_test.py +++ b/yapftests/yapf_test.py @@ -26,7 +26,7 @@ from lib2to3.pgen2 import tokenize -from yapf.yapflib import errors +from yapf.yapflib import errors, reformatter from yapf.yapflib import py3compat from yapf.yapflib import style from yapf.yapflib import yapf_api From 7a7de673b6fb23ef7097e320f030f213c6d85fac Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 30 Aug 2022 11:59:59 +0200 Subject: [PATCH 02/11] update the changelog and readme --- CHANGELOG | 4 ++ CONTRIBUTORS | 1 + README.rst | 51 ++++++++++++++++++++++++ yapf/pytree/comment_splicer.py | 1 + yapf/pytree/subtype_assigner.py | 9 +---- yapf/yapflib/format_token.py | 11 ----- yapf/yapflib/reformatter.py | 1 - yapf/yapflib/style.py | 2 +- yapf/yapflib/subtypes.py | 3 +- yapftests/reformatter_basic_test.py | 19 ++++----- yapftests/subtype_assigner_test.py | 62 +++++++++-------------------- yapftests/yapf_test.py | 2 +- 12 files changed, 88 insertions(+), 78 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index 5ac3e6329..9004d6da0 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -2,6 +2,10 @@ # All notable changes to this project will be documented in this file. # This project adheres to [Semantic Versioning](http://semver.org/). +## [0.41.1] 2022-08-30 +### Added +- Add 4 new knobs to align assignment operators and dictionary colons. They are align_assignment, align_argument_assignment, align_dict_colon and new_alignment_after_commentline. + ## [0.40.0] UNRELEASED ### Added - Add a new Python parser to generate logical lines. diff --git a/CONTRIBUTORS b/CONTRIBUTORS index 054ef2652..1852a9133 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -15,3 +15,4 @@ Sam Clegg Łukasz Langa Oleg Butuzov Mauricio Herrera Cuadra +Xiao Wang \ No newline at end of file diff --git a/README.rst b/README.rst index 5734a5d76..39b368c7d 100644 --- a/README.rst +++ b/README.rst @@ -390,6 +390,57 @@ Options:: Knobs ===== +``ALIGN_ASSIGNMENT`` + Align assignment or augmented assignment operators. + If there is a blank line or a newline comment or a multiline object + (e.g. a dictionary, a list, a function call) in between, + it will start new block alignment. Lines in the same block have the same + indentation level. + + .. code-block:: python + a = 1 + abc = 2 + if condition == None: + var += '' + var_long -= 4 + b = 3 + bc = 4 + +``ALIGN_ARGUMENT_ASSIGNMENT`` + Align assignment operators in the argument list if they are all split on newlines. + Arguments without assignment in between will initiate new block alignment calulation; + for example, a comment line. + Multiline objects in between will also initiate a new alignment block. + + .. code-block:: python + rglist = test( + var_first = 0, + var_second = '', + var_dict = { + "key_1" : '', + "key_2" : 2, + "key_3" : True, + }, + var_third = 1, + var_very_long = None ) + +``ALIGN_DICT_COLON`` + Align the colons in the dictionary if all entries in dictionay are split on newlines + or 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' is set True. + A commentline or multi-line object in between will start new alignment block. + .. code-block:: python + fields = + { + "field" : "ediid", + "type" : "text", + # key: value + "required" : True, + } + +``NEW_ALIGNMENT_AFTER_COMMENTLINE`` + Make it optional to whether start a new alignmetn block for assignment + alignment and colon alignment after a comment line. + ``ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT`` Align closing bracket with visual indentation. diff --git a/yapf/pytree/comment_splicer.py b/yapf/pytree/comment_splicer.py index 9e8f02c48..ae5ffe66f 100644 --- a/yapf/pytree/comment_splicer.py +++ b/yapf/pytree/comment_splicer.py @@ -42,6 +42,7 @@ def SpliceComments(tree): # This is a list because Python 2.x doesn't have 'nonlocal' :) prev_leaf = [None] _AnnotateIndents(tree) + def _VisitNodeRec(node): """Recursively visit each node to splice comments into the AST.""" # This loop may insert into node.children, so we'll iterate over a copy. diff --git a/yapf/pytree/subtype_assigner.py b/yapf/pytree/subtype_assigner.py index 03d7efe1a..5ebefc704 100644 --- a/yapf/pytree/subtype_assigner.py +++ b/yapf/pytree/subtype_assigner.py @@ -301,7 +301,6 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name for i in range(1, len(node.children)): prev_child = node.children[i - 1] child = node.children[i] - if prev_child.type == grammar_token.COMMA: _AppendFirstLeafTokenSubtype(child, subtypes.PARAMETER_START) elif child.type == grammar_token.COMMA: @@ -311,7 +310,7 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name tname = True _SetArgListSubtype(child, subtypes.TYPED_NAME, subtypes.TYPED_NAME_ARG_LIST) - # NOTE Every element of the tynamme argument list + # NOTE Every element of the tynamme argument # should have this list type _AppendSubtypeRec(child, subtypes.TYPED_NAME_ARG_LIST) @@ -389,25 +388,21 @@ def HasSubtype(node): for child in node.children: node_name = pytree_utils.NodeName(child) - #TODO exclude it if the first leaf is a comment in appendfirstleaftokensubtype if node_name not in {'atom', 'COMMA'}: _AppendFirstLeafTokenSubtype(child, list_subtype) - def _AppendTokenSubtype(node, subtype): """Append the token's subtype only if it's not already set.""" pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE, subtype) -#TODO should exclude comment child to all Appendsubtypes functions + def _AppendFirstLeafTokenSubtype(node, subtype): """Append the first leaf token's subtypes.""" - #TODO exclude the comment leaf if isinstance(node, pytree.Leaf): _AppendTokenSubtype(node, subtype) return - _AppendFirstLeafTokenSubtype(node.children[0], subtype) diff --git a/yapf/yapflib/format_token.py b/yapf/yapflib/format_token.py index f8658f772..1618b35af 100644 --- a/yapf/yapflib/format_token.py +++ b/yapf/yapflib/format_token.py @@ -14,7 +14,6 @@ """Enhanced token information for formatting.""" import keyword -from operator import sub import re from lib2to3.pgen2 import token @@ -125,7 +124,6 @@ def __init__(self, node, name): self.subtypes = {subtypes.NONE} if not stypes else stypes self.is_pseudo = hasattr(node, 'is_pseudo') and node.is_pseudo - @property def formatted_whitespace_prefix(self): if style.Get('INDENT_BLANK_LINES'): @@ -325,7 +323,6 @@ def is_copybara_comment(self): return self.is_comment and re.match( r'#.*\bcopybara:\s*(strip|insert|replace)', self.value) - @property def is_assign(self): return subtypes.ASSIGN_OPERATOR in self.subtypes @@ -382,7 +379,6 @@ def is_argname(self): return False - @property def is_argname_start(self): # return true if it's the start of every argument entry @@ -404,10 +400,3 @@ def is_argname_start(self): or subtypes.TYPED_NAME_ARG_LIST in self.subtypes or subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in self.subtypes)) ) - - - - - - - diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index 92aaa950e..4cdaf165c 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -103,7 +103,6 @@ def Reformat(llines, verify=False, lines=None): final_lines.append(lline) prev_line = lline - if style.Get('ALIGN_ASSIGNMENT'): _AlignAssignment(final_lines) if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') diff --git a/yapf/yapflib/style.py b/yapf/yapflib/style.py index a4c54b5f8..d9e9e5e9e 100644 --- a/yapf/yapflib/style.py +++ b/yapf/yapflib/style.py @@ -66,7 +66,7 @@ def SetGlobalStyle(style): ALIGN_DICT_COLON=textwrap.dedent("""\ Align the colons in the dictionary if all entries in dictionay are split on newlines. - and 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' set True. + or 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' is set True. """), NEW_ALIGNMENT_AFTER_COMMENTLINE=textwrap.dedent("""\ Start new assignment or colon alignment when there is a newline comment in between."""), diff --git a/yapf/yapflib/subtypes.py b/yapf/yapflib/subtypes.py index 21ca213ad..2c0431853 100644 --- a/yapf/yapflib/subtypes.py +++ b/yapf/yapflib/subtypes.py @@ -37,5 +37,4 @@ TYPED_NAME_ARG_LIST = 21 SIMPLE_EXPRESSION = 22 PARAMETER_START = 23 -PARAMETER_STOP = 24 - +PARAMETER_STOP = 24 \ No newline at end of file diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 0c68c8525..3371ac339 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -1583,20 +1583,18 @@ def testNoSplittingWithinSubscriptList(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - def testExcessCharacters(self): code = textwrap.dedent("""\ - class foo: + class foo: - def bar(self): - self.write(s=[ - '%s%s %s' % ('many of really', 'long strings', '+ just makes up 81') - ]) - """) # noqa + def bar(self): + self.write(s=[ + '%s%s %s' % ('many of really', 'long strings', '+ just makes up 81') + ]) + """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ def _(): if True: @@ -2865,8 +2863,6 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass """) # noqa - # if dedent closing brackets and Align argAssign are true, there will be - # spaces before the argassign expected_formatted_code = textwrap.dedent("""\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None @@ -2883,8 +2879,7 @@ def function( try: style.SetGlobalStyle( style.CreateStyleFromConfig('{based_on_style: yapf,' - ' dedent_closing_brackets: True,' - ' align_argument_assignment: False}')) + ' dedent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, diff --git a/yapftests/subtype_assigner_test.py b/yapftests/subtype_assigner_test.py index c69d13e4e..97f9cd3ac 100644 --- a/yapftests/subtype_assigner_test.py +++ b/yapftests/subtype_assigner_test.py @@ -123,12 +123,8 @@ def testFuncCallWithDefaultAssign(self): subtypes.NONE, subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, }), - ('=', { - subtypes.DEFAULT_OR_NAMED_ASSIGN - }), - ("'hello world'", { - subtypes.NONE - }), + ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), + ("'hello world'", {subtypes.NONE}), (')', {subtypes.NONE}), ], ]) @@ -150,19 +146,13 @@ def testCommentSubtypesInsideArglist(self): subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), ('x', { subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), (',', {subtypes.NONE}), ('a', { subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ('=', { - subtypes.DEFAULT_OR_NAMED_ASSIGN - }), - ("'hello world'", { - subtypes.NONE - }), + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), + ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), + ("'hello world'", {subtypes.NONE}), (')', {subtypes.NONE}), ], ]) @@ -184,56 +174,42 @@ def foo( ('self', {subtypes.NONE, subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, subtypes.PARAMETER_START, - subtypes.PARAMETER_STOP - }), + subtypes.PARAMETER_STOP}), (',', {subtypes.NONE}), ('preprocess', { subtypes.NONE, subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, subtypes.PARAMETER_START, - subtypes.TYPED_NAME_ARG_LIST - }), + subtypes.TYPED_NAME_ARG_LIST}), (':', { subtypes.TYPED_NAME, - subtypes.TYPED_NAME_ARG_LIST, - }), - ('Callable', {subtypes.TYPED_NAME_ARG_LIST - }), + subtypes.TYPED_NAME_ARG_LIST}), + ('Callable', {subtypes.TYPED_NAME_ARG_LIST}), ('[', { subtypes.SUBSCRIPT_BRACKET, - subtypes.TYPED_NAME_ARG_LIST - }), - ('[', {subtypes.TYPED_NAME_ARG_LIST - }), - ('str', {subtypes.TYPED_NAME_ARG_LIST - }), - (']', {subtypes.TYPED_NAME_ARG_LIST - }), - (',', {subtypes.TYPED_NAME_ARG_LIST - }), - ('str', {subtypes.TYPED_NAME_ARG_LIST - }), + subtypes.TYPED_NAME_ARG_LIST}), + ('[', {subtypes.TYPED_NAME_ARG_LIST}), + ('str', {subtypes.TYPED_NAME_ARG_LIST}), + (']', {subtypes.TYPED_NAME_ARG_LIST}), + (',', {subtypes.TYPED_NAME_ARG_LIST}), + ('str', {subtypes.TYPED_NAME_ARG_LIST}), (']', { subtypes.SUBSCRIPT_BRACKET, - subtypes.TYPED_NAME_ARG_LIST - }), + subtypes.TYPED_NAME_ARG_LIST}), ('=', { subtypes.DEFAULT_OR_NAMED_ASSIGN, subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.TYPED_NAME - }), + subtypes.TYPED_NAME}), ('identity', { subtypes.NONE, subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.PARAMETER_STOP - }), + subtypes.PARAMETER_STOP}), (')', {subtypes.NONE}), (':', {subtypes.NONE})], [('pass', {subtypes.NONE}), ], ]) - def testSetComprehension(self): code = textwrap.dedent("""\ def foo(strs): diff --git a/yapftests/yapf_test.py b/yapftests/yapf_test.py index 4ddaf5c8a..2330f4e18 100644 --- a/yapftests/yapf_test.py +++ b/yapftests/yapf_test.py @@ -26,7 +26,7 @@ from lib2to3.pgen2 import tokenize -from yapf.yapflib import errors, reformatter +from yapf.yapflib import errors from yapf.yapflib import py3compat from yapf.yapflib import style from yapf.yapflib import yapf_api From 94cca9bb5e10bd92999367681cfabf0d8f5d71e3 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 30 Aug 2022 12:08:43 +0200 Subject: [PATCH 03/11] small fixes --- README.rst | 2 +- yapf/pytree/subtype_assigner.py | 4 ++-- yapf/yapflib/subtypes.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 39b368c7d..845ea441d 100644 --- a/README.rst +++ b/README.rst @@ -438,7 +438,7 @@ Knobs } ``NEW_ALIGNMENT_AFTER_COMMENTLINE`` - Make it optional to whether start a new alignmetn block for assignment + Make it optional to start a new alignmetn block for assignment alignment and colon alignment after a comment line. ``ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT`` diff --git a/yapf/pytree/subtype_assigner.py b/yapf/pytree/subtype_assigner.py index 5ebefc704..0ee247a82 100644 --- a/yapf/pytree/subtype_assigner.py +++ b/yapf/pytree/subtype_assigner.py @@ -401,8 +401,8 @@ def _AppendTokenSubtype(node, subtype): def _AppendFirstLeafTokenSubtype(node, subtype): """Append the first leaf token's subtypes.""" if isinstance(node, pytree.Leaf): - _AppendTokenSubtype(node, subtype) - return + _AppendTokenSubtype(node, subtype) + return _AppendFirstLeafTokenSubtype(node.children[0], subtype) diff --git a/yapf/yapflib/subtypes.py b/yapf/yapflib/subtypes.py index 2c0431853..b4b7efe75 100644 --- a/yapf/yapflib/subtypes.py +++ b/yapf/yapflib/subtypes.py @@ -37,4 +37,4 @@ TYPED_NAME_ARG_LIST = 21 SIMPLE_EXPRESSION = 22 PARAMETER_START = 23 -PARAMETER_STOP = 24 \ No newline at end of file +PARAMETER_STOP = 24 From 2ecde17df8c6d400d8bb46c16c3217422c7916b6 Mon Sep 17 00:00:00 2001 From: lizawang <56673986+lizawang@users.noreply.github.com> Date: Tue, 30 Aug 2022 13:32:06 +0200 Subject: [PATCH 04/11] Update README.rst --- README.rst | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/README.rst b/README.rst index 845ea441d..71e4497be 100644 --- a/README.rst +++ b/README.rst @@ -398,6 +398,7 @@ Knobs indentation level. .. code-block:: python + a = 1 abc = 2 if condition == None: @@ -413,22 +414,25 @@ Knobs Multiline objects in between will also initiate a new alignment block. .. code-block:: python - rglist = test( - var_first = 0, - var_second = '', - var_dict = { - "key_1" : '', - "key_2" : 2, - "key_3" : True, - }, - var_third = 1, - var_very_long = None ) + + rglist = test( + var_first = 0, + var_second = '', + var_dict = { + "key_1" : '', + "key_2" : 2, + "key_3" : True, + }, + var_third = 1, + var_very_long = None ) ``ALIGN_DICT_COLON`` Align the colons in the dictionary if all entries in dictionay are split on newlines or 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' is set True. A commentline or multi-line object in between will start new alignment block. + .. code-block:: python + fields = { "field" : "ediid", From e2181d87c945a88c9314c1cf2cf5dc8b498828d9 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 30 Aug 2022 13:57:19 +0200 Subject: [PATCH 05/11] fix readme --- README.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.rst b/README.rst index 845ea441d..c54801650 100644 --- a/README.rst +++ b/README.rst @@ -398,6 +398,7 @@ Knobs indentation level. .. code-block:: python + a = 1 abc = 2 if condition == None: @@ -413,6 +414,7 @@ Knobs Multiline objects in between will also initiate a new alignment block. .. code-block:: python + rglist = test( var_first = 0, var_second = '', @@ -428,7 +430,9 @@ Knobs Align the colons in the dictionary if all entries in dictionay are split on newlines or 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' is set True. A commentline or multi-line object in between will start new alignment block. + .. code-block:: python + fields = { "field" : "ediid", From 82f1326b8ff973bc1a91cc5ac7f40679b8b355e1 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 13 Sep 2022 18:12:28 +0200 Subject: [PATCH 06/11] fix align_dict_colon --- yapf/yapflib/format_token.py | 1 + yapf/yapflib/reformatter.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/yapf/yapflib/format_token.py b/yapf/yapflib/format_token.py index 1618b35af..070987851 100644 --- a/yapf/yapflib/format_token.py +++ b/yapf/yapflib/format_token.py @@ -382,6 +382,7 @@ def is_argname(self): @property def is_argname_start(self): # return true if it's the start of every argument entry + previous_subtypes = {0} if self.previous_token: previous_subtypes = self.previous_token.subtypes diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index 4cdaf165c..e63917134 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -858,7 +858,7 @@ def _AlignDictColon(final_lines): # check if the key has multiple tokens and # get the first key token in this key key_token = token.previous_token - while key_token.previous_token.is_dict_key: + while key_token.is_dict_key and not key_token.is_dict_key_start: key_token = key_token.previous_token key_column = len(key_token.formatted_whitespace_prefix.lstrip('\n')) From 6c273dc210d2514089fa53e1fd526979cd2d599e Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Thu, 15 Sep 2022 10:46:49 +0200 Subject: [PATCH 07/11] add test for the case when the dict starts with a comment --- yapf/yapflib/reformatter.py | 4 +- yapftests/reformatter_basic_test.py | 87 ++++++++++++++++------------- 2 files changed, 50 insertions(+), 41 deletions(-) diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index e63917134..8f8a103f8 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -658,7 +658,7 @@ def _AlignArgAssign(final_lines): index += 1 if index < len(line_tokens): line_tok = line_tokens[index] - # when the matching closing bracket never found + # when the matching closing bracket is never found # due to edge cases where the closing bracket # is not indented or dedented else: @@ -816,7 +816,7 @@ def _AlignDictColon(final_lines): index += 1 if index < len(line_tokens): line_tok = line_tokens[index] - # when the matching closing bracket never found + # when the matching closing bracket is never found # due to edge cases where the closing bracket # is not indented or dedented, e.g. ']}', with another bracket before else: diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 3371ac339..0eeeefdce 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -3285,12 +3285,12 @@ def testAlignAssignWithOnlyOneAssignmentLine(self): finally: style.SetGlobalStyle(style.CreateYapfStyle()) - ########## for Align_ArgAssign()########### + ########## for Align_ArgAssign()########### def testAlignArgAssignTypedName(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'dedent_closing_brackets: true}')) + 'split_before_first_argument: true}')) unformatted_code = textwrap.dedent("""\ def f1( self, @@ -3314,8 +3314,7 @@ def f1( db: Optional[NemDB] = None, root: Optional[str] = "", conf: Optional[dict] = {1, 2}, - ini_section: str = "" -): + ini_section: str = ""): pass """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) @@ -3329,13 +3328,12 @@ def f1( def testAlignArgAssignNestedArglistInBetween(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'dedent_closing_brackets: true}')) + style.CreateStyleFromConfig('{align_argument_assignment: true}')) unformatted_code = textwrap.dedent("""\ arglist = test( first_argument='', second_argument=fun( - self, role=3, username_id, client_id=1, very_long_long_long_long_long='' + self, role=None, client_name='', client_id=1, very_long_long_long_long_long='' ), third_argument=3, fourth_argument=4 @@ -3346,14 +3344,12 @@ def testAlignArgAssignNestedArglistInBetween(self): first_argument ='', second_argument =fun( self, - role =3, - username_id, + role =None, + client_name ='', client_id =1, - very_long_long_long_long_long ='' - ), + very_long_long_long_long_long =''), third_argument =3, - fourth_argument =4 -) + fourth_argument =4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, @@ -3366,7 +3362,6 @@ def testAlignArgAssignCommentLineInBetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'dedent_closing_brackets: true,' 'new_alignment_after_commentline:true}')) unformatted_code = textwrap.dedent("""\ arglist = test( @@ -3381,8 +3376,7 @@ def testAlignArgAssignCommentLineInBetween(self): client_id =0, username_id =1, # comment - user_name ='xxxxxxxxxxxxxxxxxxxxx' -) + user_name ='xxxxxxxxxxxxxxxxxxxxx') """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, @@ -3393,12 +3387,10 @@ def testAlignArgAssignCommentLineInBetween(self): def testAlignArgAssignWithOnlyFirstArgOnNewline(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'dedent_closing_brackets: true}')) + style.CreateStyleFromConfig('{align_argument_assignment: true}')) unformatted_code = textwrap.dedent("""\ arglist = self.get_data_from_excelsheet( - client_id=0, username_id=1, user_name='xxxxxxxxxxxxxxxxxxxx' -) + client_id=0, username_id=1, user_name='xxxxxxxxxxxxxxxxxxxx') """) expected_formatted_code = unformatted_code llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) @@ -3410,8 +3402,7 @@ def testAlignArgAssignWithOnlyFirstArgOnNewline(self): def testAlignArgAssignArgumentsCanFitInOneLine(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'dedent_closing_brackets: true}')) + style.CreateStyleFromConfig('{align_argument_assignment: true}')) unformatted_code = textwrap.dedent("""\ def function( first_argument_xxxxxx =(0,), @@ -3433,24 +3424,21 @@ def function(first_argument_xxxxxx=(0,), second_argument=None) -> None: def testAlignDictColonNestedDictInBetween(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_dict_colon: true,' - 'dedent_closing_brackets: true}')) + style.CreateStyleFromConfig('{align_dict_colon: true}')) unformatted_code = textwrap.dedent("""\ fields = [{"type": "text","required": True,"html": {"attr": 'style="width: 250px;" maxlength="30"',"page": 0,}, "list" : [1, 2, 3, 4]}] """) expected_formatted_code = textwrap.dedent("""\ -fields = [ - { - "type" : "text", - "required" : True, - "html" : { - "attr" : 'style="width: 250px;" maxlength="30"', - "page" : 0, - }, - "list" : [1, 2, 3, 4] - } -] +fields = [{ + "type" : "text", + "required" : True, + "html" : { + "attr" : 'style="width: 250px;" maxlength="30"', + "page" : 0, + }, + "list" : [1, 2, 3, 4] +}] """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, @@ -3462,7 +3450,6 @@ def testAlignDictColonCommentLineInBetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_dict_colon: true,' - 'dedent_closing_brackets: true,' 'new_alignment_after_commentline: true}')) unformatted_code = textwrap.dedent("""\ fields = [{ @@ -3488,8 +3475,7 @@ def testAlignDictColonCommentLineInBetween(self): def testAlignDictColonLargerExistingSpacesBefore(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_dict_colon: true,' - 'dedent_closing_brackets: true}')) + style.CreateStyleFromConfig('{align_dict_colon: true}')) unformatted_code = textwrap.dedent("""\ fields = [{ "type" : "text", @@ -3510,7 +3496,30 @@ def testAlignDictColonLargerExistingSpacesBefore(self): finally: style.SetGlobalStyle(style.CreateYapfStyle()) - + def testAlignDictColonCommentAfterOpenBracket(self): + try: + style.SetGlobalStyle( + style.CreateStyleFromConfig('{align_dict_colon: true}')) + unformatted_code = textwrap.dedent("""\ +fields = [{ + # comment + "type": "text", + "required": True, + "list": [1, 2, 3, 4]}] +""") + expected_formatted_code = textwrap.dedent("""\ +fields = [{ + # comment + "type" : "text", + "required" : True, + "list" : [1, 2, 3, 4] +}] +""") + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) + finally: + style.SetGlobalStyle(style.CreateYapfStyle()) From db002172d591b064f53b62e4123c520745f12043 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Mon, 3 Oct 2022 14:46:26 +0200 Subject: [PATCH 08/11] branch for assignment alignment --- yapf/__init__.py | 536 +++--- yapf/pyparser/pyparser.py | 143 +- yapf/pyparser/pyparser_utils.py | 92 +- yapf/pyparser/split_penalty_visitor.py | 1776 ++++++++++---------- yapf/pytree/blank_line_calculator.py | 243 +-- yapf/pytree/comment_splicer.py | 526 +++--- yapf/pytree/continuation_splicer.py | 40 +- yapf/pytree/pytree_unwrapper.py | 537 +++--- yapf/pytree/pytree_utils.py | 285 ++-- yapf/pytree/pytree_visitor.py | 90 +- yapf/pytree/split_penalty.py | 1145 ++++++------- yapf/pytree/subtype_assigner.py | 867 +++++----- yapf/third_party/yapf_diff/yapf_diff.py | 197 +-- yapf/yapflib/errors.py | 23 +- yapf/yapflib/file_resources.py | 412 +++-- yapf/yapflib/format_decision_state.py | 2056 ++++++++++++----------- yapf/yapflib/format_token.py | 554 +++--- yapf/yapflib/identify_container.py | 60 +- yapf/yapflib/line_joiner.py | 84 +- yapf/yapflib/logical_line.py | 1146 ++++++------- yapf/yapflib/object_state.py | 303 ++-- yapf/yapflib/py3compat.py | 158 +- yapf/yapflib/reformatter.py | 1718 ++++++++----------- yapf/yapflib/split_penalty.py | 16 +- yapf/yapflib/style.py | 1043 ++++++------ yapf/yapflib/subtypes.py | 48 +- yapf/yapflib/verifier.py | 102 +- yapf/yapflib/yapf_api.py | 369 ++-- yapftests/format_token_test.py | 31 - yapftests/reformatter_basic_test.py | 243 +-- yapftests/subtype_assigner_test.py | 81 - 31 files changed, 7155 insertions(+), 7769 deletions(-) diff --git a/yapf/__init__.py b/yapf/__init__.py index 94e445b59..e8825c1cb 100644 --- a/yapf/__init__.py +++ b/yapf/__init__.py @@ -41,8 +41,8 @@ __version__ = '0.32.0' -def main(argv): - """Main program. +def main( argv ): + """Main program. Arguments: argv: command-line arguments, such as sys.argv (including the program name @@ -55,114 +55,116 @@ def main(argv): Raises: YapfError: if none of the supplied files were Python files. """ - parser = _BuildParser() - args = parser.parse_args(argv[1:]) - style_config = args.style - - if args.style_help: - _PrintHelp(args) - return 0 - - if args.lines and len(args.files) > 1: - parser.error('cannot use -l/--lines with more than one file') - - lines = _GetLines(args.lines) if args.lines is not None else None - if not args.files: - # No arguments specified. Read code from stdin. - if args.in_place or args.diff: - parser.error('cannot use --in-place or --diff flags when reading ' - 'from stdin') - - original_source = [] - while True: - # Test that sys.stdin has the "closed" attribute. When using pytest, it - # co-opts sys.stdin, which makes the "main_tests.py" fail. This is gross. - if hasattr(sys.stdin, "closed") and sys.stdin.closed: - break - try: - # Use 'raw_input' instead of 'sys.stdin.read', because otherwise the - # user will need to hit 'Ctrl-D' more than once if they're inputting - # the program by hand. 'raw_input' throws an EOFError exception if - # 'Ctrl-D' is pressed, which makes it easy to bail out of this loop. - original_source.append(py3compat.raw_input()) - except EOFError: - break - except KeyboardInterrupt: - return 1 - - if style_config is None and not args.no_local_style: - style_config = file_resources.GetDefaultStyleForDir(os.getcwd()) - - source = [line.rstrip() for line in original_source] - source[0] = py3compat.removeBOM(source[0]) - - try: - reformatted_source, _ = yapf_api.FormatCode( - py3compat.unicode('\n'.join(source) + '\n'), - filename='', - style_config=style_config, - lines=lines, - verify=args.verify) - except errors.YapfError: - raise - except Exception as e: - raise errors.YapfError(errors.FormatErrorMsg(e)) - - file_resources.WriteReformattedCode('', reformatted_source) - return 0 - - # Get additional exclude patterns from ignorefile - exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir( - os.getcwd()) - - files = file_resources.GetCommandLineFiles(args.files, args.recursive, - (args.exclude or []) + - exclude_patterns_from_ignore_file) - if not files: - raise errors.YapfError('input filenames did not match any python files') - - changed = FormatFiles( - files, - lines, - style_config=args.style, - no_local_style=args.no_local_style, - in_place=args.in_place, - print_diff=args.diff, - verify=args.verify, - parallel=args.parallel, - quiet=args.quiet, - verbose=args.verbose) - return 1 if changed and (args.diff or args.quiet) else 0 - - -def _PrintHelp(args): - """Prints the help menu.""" - - if args.style is None and not args.no_local_style: - args.style = file_resources.GetDefaultStyleForDir(os.getcwd()) - style.SetGlobalStyle(style.CreateStyleFromConfig(args.style)) - print('[style]') - for option, docstring in sorted(style.Help().items()): - for line in docstring.splitlines(): - print('#', line and ' ' or '', line, sep='') - option_value = style.Get(option) - if isinstance(option_value, (set, list)): - option_value = ', '.join(map(str, option_value)) - print(option.lower(), '=', option_value, sep='') - print() - - -def FormatFiles(filenames, - lines, - style_config=None, - no_local_style=False, - in_place=False, - print_diff=False, - verify=False, - parallel=False, - quiet=False, - verbose=False): - """Format a list of files. + parser = _BuildParser() + args = parser.parse_args( argv[ 1 : ] ) + style_config = args.style + + if args.style_help: + _PrintHelp( args ) + return 0 + + if args.lines and len( args.files ) > 1: + parser.error( 'cannot use -l/--lines with more than one file' ) + + lines = _GetLines( args.lines ) if args.lines is not None else None + if not args.files: + # No arguments specified. Read code from stdin. + if args.in_place or args.diff: + parser.error( + 'cannot use --in-place or --diff flags when reading ' + 'from stdin' ) + + original_source = [] + while True: + # Test that sys.stdin has the "closed" attribute. When using pytest, it + # co-opts sys.stdin, which makes the "main_tests.py" fail. This is gross. + if hasattr( sys.stdin, "closed" ) and sys.stdin.closed: + break + try: + # Use 'raw_input' instead of 'sys.stdin.read', because otherwise the + # user will need to hit 'Ctrl-D' more than once if they're inputting + # the program by hand. 'raw_input' throws an EOFError exception if + # 'Ctrl-D' is pressed, which makes it easy to bail out of this loop. + original_source.append( py3compat.raw_input() ) + except EOFError: + break + except KeyboardInterrupt: + return 1 + + if style_config is None and not args.no_local_style: + style_config = file_resources.GetDefaultStyleForDir( os.getcwd() ) + + source = [ line.rstrip() for line in original_source ] + source[ 0 ] = py3compat.removeBOM( source[ 0 ] ) + + try: + reformatted_source, _ = yapf_api.FormatCode( + py3compat.unicode( '\n'.join( source ) + '\n' ), + filename = '', + style_config = style_config, + lines = lines, + verify = args.verify ) + except errors.YapfError: + raise + except Exception as e: + raise errors.YapfError( errors.FormatErrorMsg( e ) ) + + file_resources.WriteReformattedCode( '', reformatted_source ) + return 0 + + # Get additional exclude patterns from ignorefile + exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir( + os.getcwd() ) + + files = file_resources.GetCommandLineFiles( + args.files, args.recursive, + ( args.exclude or [] ) + exclude_patterns_from_ignore_file ) + if not files: + raise errors.YapfError( 'input filenames did not match any python files' ) + + changed = FormatFiles( + files, + lines, + style_config = args.style, + no_local_style = args.no_local_style, + in_place = args.in_place, + print_diff = args.diff, + verify = args.verify, + parallel = args.parallel, + quiet = args.quiet, + verbose = args.verbose ) + return 1 if changed and ( args.diff or args.quiet ) else 0 + + +def _PrintHelp( args ): + """Prints the help menu.""" + + if args.style is None and not args.no_local_style: + args.style = file_resources.GetDefaultStyleForDir( os.getcwd() ) + style.SetGlobalStyle( style.CreateStyleFromConfig( args.style ) ) + print( '[style]' ) + for option, docstring in sorted( style.Help().items() ): + for line in docstring.splitlines(): + print( '#', line and ' ' or '', line, sep = '' ) + option_value = style.Get( option ) + if isinstance( option_value, ( set, list ) ): + option_value = ', '.join( map( str, option_value ) ) + print( option.lower(), '=', option_value, sep = '' ) + print() + + +def FormatFiles( + filenames, + lines, + style_config = None, + no_local_style = False, + in_place = False, + print_diff = False, + verify = False, + parallel = False, + quiet = False, + verbose = False ): + """Format a list of files. Arguments: filenames: (list of unicode) A list of files to reformat. @@ -184,65 +186,68 @@ def FormatFiles(filenames, Returns: True if the source code changed in any of the files being formatted. """ - changed = False - if parallel: - import multiprocessing # pylint: disable=g-import-not-at-top - import concurrent.futures # pylint: disable=g-import-not-at-top - workers = min(multiprocessing.cpu_count(), len(filenames)) - with concurrent.futures.ProcessPoolExecutor(workers) as executor: - future_formats = [ - executor.submit(_FormatFile, filename, lines, style_config, - no_local_style, in_place, print_diff, verify, quiet, - verbose) for filename in filenames - ] - for future in concurrent.futures.as_completed(future_formats): - changed |= future.result() - else: - for filename in filenames: - changed |= _FormatFile(filename, lines, style_config, no_local_style, - in_place, print_diff, verify, quiet, verbose) - return changed - - -def _FormatFile(filename, - lines, - style_config=None, - no_local_style=False, - in_place=False, - print_diff=False, - verify=False, - quiet=False, - verbose=False): - """Format an individual file.""" - if verbose and not quiet: - print('Reformatting %s' % filename) - - if style_config is None and not no_local_style: - style_config = file_resources.GetDefaultStyleForDir( - os.path.dirname(filename)) - - try: - reformatted_code, encoding, has_change = yapf_api.FormatFile( + changed = False + if parallel: + import multiprocessing # pylint: disable=g-import-not-at-top + import concurrent.futures # pylint: disable=g-import-not-at-top + workers = min( multiprocessing.cpu_count(), len( filenames ) ) + with concurrent.futures.ProcessPoolExecutor( workers ) as executor: + future_formats = [ + executor.submit( + _FormatFile, filename, lines, style_config, no_local_style, + in_place, print_diff, verify, quiet, verbose ) + for filename in filenames + ] + for future in concurrent.futures.as_completed( future_formats ): + changed |= future.result() + else: + for filename in filenames: + changed |= _FormatFile( + filename, lines, style_config, no_local_style, in_place, print_diff, + verify, quiet, verbose ) + return changed + + +def _FormatFile( filename, - in_place=in_place, - style_config=style_config, - lines=lines, - print_diff=print_diff, - verify=verify, - logger=logging.warning) - except errors.YapfError: - raise - except Exception as e: - raise errors.YapfError(errors.FormatErrorMsg(e)) - - if not in_place and not quiet and reformatted_code: - file_resources.WriteReformattedCode(filename, reformatted_code, encoding, - in_place) - return has_change - - -def _GetLines(line_strings): - """Parses the start and end lines from a line string like 'start-end'. + lines, + style_config = None, + no_local_style = False, + in_place = False, + print_diff = False, + verify = False, + quiet = False, + verbose = False ): + """Format an individual file.""" + if verbose and not quiet: + print( 'Reformatting %s' % filename ) + + if style_config is None and not no_local_style: + style_config = file_resources.GetDefaultStyleForDir( + os.path.dirname( filename ) ) + + try: + reformatted_code, encoding, has_change = yapf_api.FormatFile( + filename, + in_place = in_place, + style_config = style_config, + lines = lines, + print_diff = print_diff, + verify = verify, + logger = logging.warning ) + except errors.YapfError: + raise + except Exception as e: + raise errors.YapfError( errors.FormatErrorMsg( e ) ) + + if not in_place and not quiet and reformatted_code: + file_resources.WriteReformattedCode( + filename, reformatted_code, encoding, in_place ) + return has_change + + +def _GetLines( line_strings ): + """Parses the start and end lines from a line string like 'start-end'. Arguments: line_strings: (array of string) A list of strings representing a line @@ -254,114 +259,117 @@ def _GetLines(line_strings): Raises: ValueError: If the line string failed to parse or was an invalid line range. """ - lines = [] - for line_string in line_strings: - # The 'list' here is needed by Python 3. - line = list(map(int, line_string.split('-', 1))) - if line[0] < 1: - raise errors.YapfError('invalid start of line range: %r' % line) - if line[0] > line[1]: - raise errors.YapfError('end comes before start in line range: %r' % line) - lines.append(tuple(line)) - return lines + lines = [] + for line_string in line_strings: + # The 'list' here is needed by Python 3. + line = list( map( int, line_string.split( '-', 1 ) ) ) + if line[ 0 ] < 1: + raise errors.YapfError( 'invalid start of line range: %r' % line ) + if line[ 0 ] > line[ 1 ]: + raise errors.YapfError( 'end comes before start in line range: %r' % line ) + lines.append( tuple( line ) ) + return lines def _BuildParser(): - """Constructs the parser for the command line arguments. + """Constructs the parser for the command line arguments. Returns: An ArgumentParser instance for the CLI. """ - parser = argparse.ArgumentParser( - prog='yapf', description='Formatter for Python code.') - parser.add_argument( - '-v', - '--version', - action='version', - version='%(prog)s {}'.format(__version__)) - - diff_inplace_quiet_group = parser.add_mutually_exclusive_group() - diff_inplace_quiet_group.add_argument( - '-d', - '--diff', - action='store_true', - help='print the diff for the fixed source') - diff_inplace_quiet_group.add_argument( - '-i', - '--in-place', - action='store_true', - help='make changes to files in place') - diff_inplace_quiet_group.add_argument( - '-q', - '--quiet', - action='store_true', - help='output nothing and set return value') - - lines_recursive_group = parser.add_mutually_exclusive_group() - lines_recursive_group.add_argument( - '-r', - '--recursive', - action='store_true', - help='run recursively over directories') - lines_recursive_group.add_argument( - '-l', - '--lines', - metavar='START-END', - action='append', - default=None, - help='range of lines to reformat, one-based') - - parser.add_argument( - '-e', - '--exclude', - metavar='PATTERN', - action='append', - default=None, - help='patterns for files to exclude from formatting') - parser.add_argument( - '--style', - action='store', - help=('specify formatting style: either a style name (for example "pep8" ' + parser = argparse.ArgumentParser( + prog = 'yapf', description = 'Formatter for Python code.' ) + parser.add_argument( + '-v', + '--version', + action = 'version', + version = '%(prog)s {}'.format( __version__ ) ) + + diff_inplace_quiet_group = parser.add_mutually_exclusive_group() + diff_inplace_quiet_group.add_argument( + '-d', + '--diff', + action = 'store_true', + help = 'print the diff for the fixed source' ) + diff_inplace_quiet_group.add_argument( + '-i', + '--in-place', + action = 'store_true', + help = 'make changes to files in place' ) + diff_inplace_quiet_group.add_argument( + '-q', + '--quiet', + action = 'store_true', + help = 'output nothing and set return value' ) + + lines_recursive_group = parser.add_mutually_exclusive_group() + lines_recursive_group.add_argument( + '-r', + '--recursive', + action = 'store_true', + help = 'run recursively over directories' ) + lines_recursive_group.add_argument( + '-l', + '--lines', + metavar = 'START-END', + action = 'append', + default = None, + help = 'range of lines to reformat, one-based' ) + + parser.add_argument( + '-e', + '--exclude', + metavar = 'PATTERN', + action = 'append', + default = None, + help = 'patterns for files to exclude from formatting' ) + parser.add_argument( + '--style', + action = 'store', + help = ( + 'specify formatting style: either a style name (for example "pep8" ' 'or "google"), or the name of a file with style settings. The ' 'default is pep8 unless a %s or %s or %s file located in the same ' 'directory as the source or one of its parent directories ' '(for stdin, the current directory is used).' % - (style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML))) - parser.add_argument( - '--style-help', - action='store_true', - help=('show style settings and exit; this output can be ' + ( style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML ) ) ) + parser.add_argument( + '--style-help', + action = 'store_true', + help = ( + 'show style settings and exit; this output can be ' 'saved to .style.yapf to make your settings ' - 'permanent')) - parser.add_argument( - '--no-local-style', - action='store_true', - help="don't search for local style definition") - parser.add_argument('--verify', action='store_true', help=argparse.SUPPRESS) - parser.add_argument( - '-p', - '--parallel', - action='store_true', - help=('run YAPF in parallel when formatting multiple files. Requires ' - 'concurrent.futures in Python 2.X')) - parser.add_argument( - '-vv', - '--verbose', - action='store_true', - help='print out file names while processing') - - parser.add_argument( - 'files', nargs='*', help='reads from stdin when no files are specified.') - return parser - - -def run_main(): # pylint: disable=invalid-name - try: - sys.exit(main(sys.argv)) - except errors.YapfError as e: - sys.stderr.write('yapf: ' + str(e) + '\n') - sys.exit(1) + 'permanent' ) ) + parser.add_argument( + '--no-local-style', + action = 'store_true', + help = "don't search for local style definition" ) + parser.add_argument( '--verify', action = 'store_true', help = argparse.SUPPRESS ) + parser.add_argument( + '-p', + '--parallel', + action = 'store_true', + help = ( + 'run YAPF in parallel when formatting multiple files. Requires ' + 'concurrent.futures in Python 2.X' ) ) + parser.add_argument( + '-vv', + '--verbose', + action = 'store_true', + help = 'print out file names while processing' ) + + parser.add_argument( + 'files', nargs = '*', help = 'reads from stdin when no files are specified.' ) + return parser + + +def run_main(): # pylint: disable=invalid-name + try: + sys.exit( main( sys.argv ) ) + except errors.YapfError as e: + sys.stderr.write( 'yapf: ' + str( e ) + '\n' ) + sys.exit( 1 ) if __name__ == '__main__': - run_main() + run_main() diff --git a/yapf/pyparser/pyparser.py b/yapf/pyparser/pyparser.py index a8a28ebc8..b6b7c50d7 100644 --- a/yapf/pyparser/pyparser.py +++ b/yapf/pyparser/pyparser.py @@ -46,8 +46,8 @@ CONTINUATION = token.N_TOKENS -def ParseCode(unformatted_source, filename=''): - """Parse a string of Python code into logical lines. +def ParseCode( unformatted_source, filename = '' ): + """Parse a string of Python code into logical lines. This provides an alternative entry point to YAPF. @@ -61,27 +61,27 @@ def ParseCode(unformatted_source, filename=''): Raises: An exception is raised if there's an error during AST parsing. """ - if not unformatted_source.endswith(os.linesep): - unformatted_source += os.linesep + if not unformatted_source.endswith( os.linesep ): + unformatted_source += os.linesep - try: - ast_tree = ast.parse(unformatted_source, filename) - ast.fix_missing_locations(ast_tree) - readline = py3compat.StringIO(unformatted_source).readline - tokens = tokenize.generate_tokens(readline) - except Exception: - raise + try: + ast_tree = ast.parse( unformatted_source, filename ) + ast.fix_missing_locations( ast_tree ) + readline = py3compat.StringIO( unformatted_source ).readline + tokens = tokenize.generate_tokens( readline ) + except Exception: + raise - logical_lines = _CreateLogicalLines(tokens) + logical_lines = _CreateLogicalLines( tokens ) - # Process the logical lines. - split_penalty_visitor.SplitPenalty(logical_lines).visit(ast_tree) + # Process the logical lines. + split_penalty_visitor.SplitPenalty( logical_lines ).visit( ast_tree ) - return logical_lines + return logical_lines -def _CreateLogicalLines(tokens): - """Separate tokens into logical lines. +def _CreateLogicalLines( tokens ): + """Separate tokens into logical lines. Arguments: tokens: (list of tokenizer.TokenInfo) Tokens generated by tokenizer. @@ -89,57 +89,58 @@ def _CreateLogicalLines(tokens): Returns: A list of LogicalLines. """ - logical_lines = [] - cur_logical_line = [] - prev_tok = None - depth = 0 - - for tok in tokens: - tok = py3compat.TokenInfo(*tok) - if tok.type == tokenize.NEWLINE: - # End of a logical line. - logical_lines.append(logical_line.LogicalLine(depth, cur_logical_line)) - cur_logical_line = [] - prev_tok = None - elif tok.type == tokenize.INDENT: - depth += 1 - elif tok.type == tokenize.DEDENT: - depth -= 1 - elif tok.type not in {tokenize.NL, tokenize.ENDMARKER}: - if (prev_tok and prev_tok.line.rstrip().endswith('\\') and - prev_tok.start[0] < tok.start[0]): - # Insert a token for a line continuation. - ctok = py3compat.TokenInfo( - type=CONTINUATION, - string='\\', - start=(prev_tok.start[0], prev_tok.start[1] + 1), - end=(prev_tok.end[0], prev_tok.end[0] + 2), - line=prev_tok.line) - ctok.lineno = ctok.start[0] - ctok.column = ctok.start[1] - ctok.value = '\\' - cur_logical_line.append(format_token.FormatToken(ctok, 'CONTINUATION')) - tok.lineno = tok.start[0] - tok.column = tok.start[1] - tok.value = tok.string - cur_logical_line.append( - format_token.FormatToken(tok, token.tok_name[tok.type])) - prev_tok = tok - - # Link the FormatTokens in each line together to for a doubly linked list. - for line in logical_lines: - previous = line.first - bracket_stack = [previous] if previous.OpensScope() else [] - for tok in line.tokens[1:]: - tok.previous_token = previous - previous.next_token = tok - previous = tok - - # Set up the "matching_bracket" attribute. - if tok.OpensScope(): - bracket_stack.append(tok) - elif tok.ClosesScope(): - bracket_stack[-1].matching_bracket = tok - tok.matching_bracket = bracket_stack.pop() - - return logical_lines + logical_lines = [] + cur_logical_line = [] + prev_tok = None + depth = 0 + + for tok in tokens: + tok = py3compat.TokenInfo( *tok ) + if tok.type == tokenize.NEWLINE: + # End of a logical line. + logical_lines.append( logical_line.LogicalLine( depth, cur_logical_line ) ) + cur_logical_line = [] + prev_tok = None + elif tok.type == tokenize.INDENT: + depth += 1 + elif tok.type == tokenize.DEDENT: + depth -= 1 + elif tok.type not in { tokenize.NL, tokenize.ENDMARKER }: + if ( prev_tok and prev_tok.line.rstrip().endswith( '\\' ) and + prev_tok.start[ 0 ] < tok.start[ 0 ] ): + # Insert a token for a line continuation. + ctok = py3compat.TokenInfo( + type = CONTINUATION, + string = '\\', + start = ( prev_tok.start[ 0 ], prev_tok.start[ 1 ] + 1 ), + end = ( prev_tok.end[ 0 ], prev_tok.end[ 0 ] + 2 ), + line = prev_tok.line ) + ctok.lineno = ctok.start[ 0 ] + ctok.column = ctok.start[ 1 ] + ctok.value = '\\' + cur_logical_line.append( + format_token.FormatToken( ctok, 'CONTINUATION' ) ) + tok.lineno = tok.start[ 0 ] + tok.column = tok.start[ 1 ] + tok.value = tok.string + cur_logical_line.append( + format_token.FormatToken( tok, token.tok_name[ tok.type ] ) ) + prev_tok = tok + + # Link the FormatTokens in each line together to for a doubly linked list. + for line in logical_lines: + previous = line.first + bracket_stack = [ previous ] if previous.OpensScope() else [] + for tok in line.tokens[ 1 : ]: + tok.previous_token = previous + previous.next_token = tok + previous = tok + + # Set up the "matching_bracket" attribute. + if tok.OpensScope(): + bracket_stack.append( tok ) + elif tok.ClosesScope(): + bracket_stack[ -1 ].matching_bracket = tok + tok.matching_bracket = bracket_stack.pop() + + return logical_lines diff --git a/yapf/pyparser/pyparser_utils.py b/yapf/pyparser/pyparser_utils.py index 3f17b15a4..4a37b89a9 100644 --- a/yapf/pyparser/pyparser_utils.py +++ b/yapf/pyparser/pyparser_utils.py @@ -29,68 +29,68 @@ """ -def GetTokens(logical_lines, node): - """Get a list of tokens within the node's range from the logical lines.""" - start = TokenStart(node) - end = TokenEnd(node) - tokens = [] +def GetTokens( logical_lines, node ): + """Get a list of tokens within the node's range from the logical lines.""" + start = TokenStart( node ) + end = TokenEnd( node ) + tokens = [] - for line in logical_lines: - if line.start > end: - break - if line.start <= start or line.end >= end: - tokens.extend(GetTokensInSubRange(line.tokens, node)) + for line in logical_lines: + if line.start > end: + break + if line.start <= start or line.end >= end: + tokens.extend( GetTokensInSubRange( line.tokens, node ) ) - return tokens + return tokens -def GetTokensInSubRange(tokens, node): - """Get a subset of tokens representing the node.""" - start = TokenStart(node) - end = TokenEnd(node) - tokens_in_range = [] +def GetTokensInSubRange( tokens, node ): + """Get a subset of tokens representing the node.""" + start = TokenStart( node ) + end = TokenEnd( node ) + tokens_in_range = [] - for tok in tokens: - tok_range = (tok.lineno, tok.column) - if tok_range >= start and tok_range < end: - tokens_in_range.append(tok) + for tok in tokens: + tok_range = ( tok.lineno, tok.column ) + if tok_range >= start and tok_range < end: + tokens_in_range.append( tok ) - return tokens_in_range + return tokens_in_range -def GetTokenIndex(tokens, pos): - """Get the index of the token at pos.""" - for index, token in enumerate(tokens): - if (token.lineno, token.column) == pos: - return index +def GetTokenIndex( tokens, pos ): + """Get the index of the token at pos.""" + for index, token in enumerate( tokens ): + if ( token.lineno, token.column ) == pos: + return index - return None + return None -def GetNextTokenIndex(tokens, pos): - """Get the index of the next token after pos.""" - for index, token in enumerate(tokens): - if (token.lineno, token.column) >= pos: - return index +def GetNextTokenIndex( tokens, pos ): + """Get the index of the next token after pos.""" + for index, token in enumerate( tokens ): + if ( token.lineno, token.column ) >= pos: + return index - return None + return None -def GetPrevTokenIndex(tokens, pos): - """Get the index of the previous token before pos.""" - for index, token in enumerate(tokens): - if index > 0 and (token.lineno, token.column) >= pos: - return index - 1 +def GetPrevTokenIndex( tokens, pos ): + """Get the index of the previous token before pos.""" + for index, token in enumerate( tokens ): + if index > 0 and ( token.lineno, token.column ) >= pos: + return index - 1 - return None + return None -def TokenStart(node): - return (node.lineno, node.col_offset) +def TokenStart( node ): + return ( node.lineno, node.col_offset ) -def TokenEnd(node): - return (node.end_lineno, node.end_col_offset) +def TokenEnd( node ): + return ( node.end_lineno, node.end_col_offset ) ############################################################################# @@ -98,6 +98,6 @@ def TokenEnd(node): ############################################################################# -def AstDump(node): - import ast - print(ast.dump(node, include_attributes=True, indent=4)) +def AstDump( node ): + import ast + print( ast.dump( node, include_attributes = True, indent = 4 ) ) diff --git a/yapf/pyparser/split_penalty_visitor.py b/yapf/pyparser/split_penalty_visitor.py index 047b48a3d..4d05558ba 100644 --- a/yapf/pyparser/split_penalty_visitor.py +++ b/yapf/pyparser/split_penalty_visitor.py @@ -21,892 +21,896 @@ # This is a skeleton of an AST visitor. -class SplitPenalty(ast.NodeVisitor): - """Compute split penalties between tokens.""" - - def __init__(self, logical_lines): - super(SplitPenalty, self).__init__() - self.logical_lines = logical_lines - - # We never want to split before a colon or comma. - for logical_line in logical_lines: - for token in logical_line.tokens: - if token.value in frozenset({',', ':'}): - token.split_penalty = split_penalty.UNBREAKABLE - - def _GetTokens(self, node): - return pyutils.GetTokens(self.logical_lines, node) - - ############################################################################ - # Statements # - ############################################################################ - - def visit_FunctionDef(self, node): - # FunctionDef(name=Name, - # args=arguments( - # posonlyargs=[], - # args=[], - # vararg=[], - # kwonlyargs=[], - # kw_defaults=[], - # defaults=[]), - # body=[...], - # decorator_list=[Call_1, Call_2, ..., Call_n], - # keywords=[]) - tokens = self._GetTokens(node) - for decorator in node.decorator_list: - # The decorator token list begins after the '@'. The body of the decorator - # is formatted like a normal "call." - decorator_range = self._GetTokens(decorator) - # Don't split after the '@'. - decorator_range[0].split_penalty = split_penalty.UNBREAKABLE - - for token in tokens[1:]: - if token.value == '(': - break - _SetPenalty(token, split_penalty.UNBREAKABLE) - - if node.returns: - start_index = pyutils.GetTokenIndex(tokens, - pyutils.TokenStart(node.returns)) - _IncreasePenalty(tokens[start_index - 1:start_index + 1], - split_penalty.VERY_STRONGLY_CONNECTED) - end_index = pyutils.GetTokenIndex(tokens, pyutils.TokenEnd(node.returns)) - _IncreasePenalty(tokens[start_index + 1:end_index], - split_penalty.STRONGLY_CONNECTED) - - return self.generic_visit(node) - - def visit_AsyncFunctionDef(self, node): - # AsyncFunctionDef(name=Name, - # args=arguments( - # posonlyargs=[], - # args=[], - # vararg=[], - # kwonlyargs=[], - # kw_defaults=[], - # defaults=[]), - # body=[...], - # decorator_list=[Expr_1, Expr_2, ..., Expr_n], - # keywords=[]) - return self.visit_FunctionDef(node) - - def visit_ClassDef(self, node): - # ClassDef(name=Name, - # bases=[Expr_1, Expr_2, ..., Expr_n], - # keywords=[], - # body=[], - # decorator_list=[Expr_1, Expr_2, ..., Expr_m]) - for base in node.bases: - tokens = self._GetTokens(base) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - for decorator in node.decorator_list: - # Don't split after the '@'. - decorator_range = self._GetTokens(decorator) - decorator_range[0].split_penalty = split_penalty.UNBREAKABLE - - return self.generic_visit(node) - - def visit_Return(self, node): - # Return(value=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_Delete(self, node): - # Delete(targets=[Expr_1, Expr_2, ..., Expr_n]) - for target in node.targets: - tokens = self._GetTokens(target) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_Assign(self, node): - # Assign(targets=[Expr_1, Expr_2, ..., Expr_n], - # value=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_AugAssign(self, node): - # AugAssign(target=Name, - # op=Add(), - # value=Expr) - return self.generic_visit(node) - - def visit_AnnAssign(self, node): - # AnnAssign(target=Expr, - # annotation=TypeName, - # value=Expr, - # simple=number) - return self.generic_visit(node) - - def visit_For(self, node): - # For(target=Expr, - # iter=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit(node) - - def visit_AsyncFor(self, node): - # AsyncFor(target=Expr, - # iter=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit(node) - - def visit_While(self, node): - # While(test=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit(node) - - def visit_If(self, node): - # If(test=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit(node) - - def visit_With(self, node): - # With(items=[withitem_1, withitem_2, ..., withitem_n], - # body=[...]) - return self.generic_visit(node) - - def visit_AsyncWith(self, node): - # AsyncWith(items=[withitem_1, withitem_2, ..., withitem_n], - # body=[...]) - return self.generic_visit(node) - - def visit_Match(self, node): - # Match(subject=Expr, - # cases=[ - # match_case( - # pattern=pattern, - # guard=Expr, - # body=[...]), - # ... - # ]) - return self.generic_visit(node) - - def visit_Raise(self, node): - # Raise(exc=Expr) - return self.generic_visit(node) - - def visit_Try(self, node): - # Try(body=[...], - # handlers=[ExceptHandler_1, ExceptHandler_2, ..., ExceptHandler_b], - # orelse=[...], - # finalbody=[...]) - return self.generic_visit(node) - - def visit_Assert(self, node): - # Assert(test=Expr) - return self.generic_visit(node) - - def visit_Import(self, node): - # Import(names=[ - # alias( - # name=Identifier, - # asname=Identifier), - # ... - # ]) - return self.generic_visit(node) - - def visit_ImportFrom(self, node): - # ImportFrom(module=Identifier, - # names=[ - # alias( - # name=Identifier, - # asname=Identifier), - # ... - # ], - # level=num - return self.generic_visit(node) - - def visit_Global(self, node): - # Global(names=[Identifier_1, Identifier_2, ..., Identifier_n]) - return self.generic_visit(node) - - def visit_Nonlocal(self, node): - # Nonlocal(names=[Identifier_1, Identifier_2, ..., Identifier_n]) - return self.generic_visit(node) - - def visit_Expr(self, node): - # Expr(value=Expr) - return self.generic_visit(node) - - def visit_Pass(self, node): - # Pass() - return self.generic_visit(node) - - def visit_Break(self, node): - # Break() - return self.generic_visit(node) - - def visit_Continue(self, node): - # Continue() - return self.generic_visit(node) - - ############################################################################ - # Expressions # - ############################################################################ - - def visit_BoolOp(self, node): - # BoolOp(op=And | Or, - # values=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - # Lower the split penalty to allow splitting before or after the logical - # operator. - split_before_operator = style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR') - operator_indices = [ - pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(value)) - for value in node.values[:-1] - ] - for operator_index in operator_indices: - if not split_before_operator: - operator_index += 1 - _DecreasePenalty(tokens[operator_index], split_penalty.EXPR * 2) - - return self.generic_visit(node) - - def visit_NamedExpr(self, node): - # NamedExpr(target=Name, - # value=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_BinOp(self, node): - # BinOp(left=LExpr - # op=Add | Sub | Mult | MatMult | Div | Mod | Pow | LShift | - # RShift | BitOr | BitXor | BitAnd | FloorDiv - # right=RExpr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - # Lower the split penalty to allow splitting before or after the arithmetic - # operator. - operator_index = pyutils.GetNextTokenIndex(tokens, - pyutils.TokenEnd(node.left)) - if not style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR'): - operator_index += 1 - - _DecreasePenalty(tokens[operator_index], split_penalty.EXPR * 2) - - return self.generic_visit(node) - - def visit_UnaryOp(self, node): - # UnaryOp(op=Not | USub | UAdd | Invert, - # operand=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - _IncreasePenalty(tokens[1], style.Get('SPLIT_PENALTY_AFTER_UNARY_OPERATOR')) - - return self.generic_visit(node) - - def visit_Lambda(self, node): - # Lambda(args=arguments( - # posonlyargs=[arg(...), arg(...), ..., arg(...)], - # args=[arg(...), arg(...), ..., arg(...)], - # kwonlyargs=[arg(...), arg(...), ..., arg(...)], - # kw_defaults=[arg(...), arg(...), ..., arg(...)], - # defaults=[arg(...), arg(...), ..., arg(...)]), - # body=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.LAMBDA) - - if style.Get('ALLOW_MULTILINE_LAMBDAS'): - _SetPenalty(self._GetTokens(node.body), split_penalty.MULTIPLINE_LAMBDA) - - return self.generic_visit(node) - - def visit_IfExp(self, node): - # IfExp(test=TestExpr, - # body=BodyExpr, - # orelse=OrElseExpr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_Dict(self, node): - # Dict(keys=[Expr_1, Expr_2, ..., Expr_n], - # values=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens(node) - - # The keys should be on a single line if at all possible. - for key in node.keys: - subrange = pyutils.GetTokensInSubRange(tokens, key) - _IncreasePenalty(subrange[1:], split_penalty.DICT_KEY_EXPR) - - for value in node.values: - subrange = pyutils.GetTokensInSubRange(tokens, value) - _IncreasePenalty(subrange[1:], split_penalty.DICT_VALUE_EXPR) - - return self.generic_visit(node) - - def visit_Set(self, node): - # Set(elts=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens(node) - for element in node.elts: - subrange = pyutils.GetTokensInSubRange(tokens, element) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_ListComp(self, node): - # ListComp(elt=Expr, - # generators=[ - # comprehension( - # target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0), - # ... - # ]) - tokens = self._GetTokens(node) - element = pyutils.GetTokensInSubRange(tokens, node.elt) - _IncreasePenalty(element[1:], split_penalty.EXPR) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange(tokens, if_expr) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_SetComp(self, node): - # SetComp(elt=Expr, - # generators=[ - # comprehension( - # target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0), - # ... - # ]) - tokens = self._GetTokens(node) - element = pyutils.GetTokensInSubRange(tokens, node.elt) - _IncreasePenalty(element[1:], split_penalty.EXPR) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange(tokens, if_expr) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_DictComp(self, node): - # DictComp(key=KeyExpr, - # value=ValExpr, - # generators=[ - # comprehension( - # target=TargetExpr - # iter=IterExpr, - # ifs=[Expr_1, Expr_2, ..., Expr_n]), - # is_async=0)], - # ... - # ]) - tokens = self._GetTokens(node) - key = pyutils.GetTokensInSubRange(tokens, node.key) - _IncreasePenalty(key[1:], split_penalty.EXPR) - - value = pyutils.GetTokensInSubRange(tokens, node.value) - _IncreasePenalty(value[1:], split_penalty.EXPR) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange(tokens, if_expr) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_GeneratorExp(self, node): - # GeneratorExp(elt=Expr, - # generators=[ - # comprehension( - # target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0), - # ... - # ]) - tokens = self._GetTokens(node) - element = pyutils.GetTokensInSubRange(tokens, node.elt) - _IncreasePenalty(element[1:], split_penalty.EXPR) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange(tokens, if_expr) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_Await(self, node): - # Await(value=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_Yield(self, node): - # Yield(value=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_YieldFrom(self, node): - # YieldFrom(value=Expr) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - tokens[2].split_penalty = split_penalty.UNBREAKABLE - - return self.generic_visit(node) - - def visit_Compare(self, node): - # Compare(left=LExpr, - # ops=[Op_1, Op_2, ..., Op_n], - # comparators=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.EXPR) - - operator_indices = [ - pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(node.left)) - ] + [ - pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(comparator)) - for comparator in node.comparators[:-1] - ] - split_before = style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR') - - for operator_index in operator_indices: - if not split_before: - operator_index += 1 - _DecreasePenalty(tokens[operator_index], split_penalty.EXPR * 2) - - return self.generic_visit(node) - - def visit_Call(self, node): - # Call(func=Expr, - # args=[Expr_1, Expr_2, ..., Expr_n], - # keywords=[ - # keyword( - # arg='d', - # value=Expr), - # ... - # ]) - tokens = self._GetTokens(node) - - # Don't never split before the opening parenthesis. - paren_index = pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(node.func)) - _IncreasePenalty(tokens[paren_index], split_penalty.UNBREAKABLE) - - for arg in node.args: - subrange = pyutils.GetTokensInSubRange(tokens, arg) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - - return self.generic_visit(node) - - def visit_FormattedValue(self, node): - # FormattedValue(value=Expr, - # conversion=-1) - return node # Ignore formatted values. - - def visit_JoinedStr(self, node): - # JoinedStr(values=[Expr_1, Expr_2, ..., Expr_n]) - return self.generic_visit(node) - - def visit_Constant(self, node): - # Constant(value=Expr) - return self.generic_visit(node) - - def visit_Attribute(self, node): - # Attribute(value=Expr, - # attr=Identifier) - tokens = self._GetTokens(node) - split_before = style.Get('SPLIT_BEFORE_DOT') - dot_indices = pyutils.GetNextTokenIndex(tokens, - pyutils.TokenEnd(node.value)) - - if not split_before: - dot_indices += 1 - _IncreasePenalty(tokens[dot_indices], split_penalty.VERY_STRONGLY_CONNECTED) - - return self.generic_visit(node) - - def visit_Subscript(self, node): - # Subscript(value=ValueExpr, - # slice=SliceExpr) - tokens = self._GetTokens(node) - - # Don't split before the opening bracket of a subscript. - bracket_index = pyutils.GetNextTokenIndex(tokens, - pyutils.TokenEnd(node.value)) - _IncreasePenalty(tokens[bracket_index], split_penalty.UNBREAKABLE) - - return self.generic_visit(node) - - def visit_Starred(self, node): - # Starred(value=Expr) - return self.generic_visit(node) - - def visit_Name(self, node): - # Name(id=Identifier) - tokens = self._GetTokens(node) - _IncreasePenalty(tokens[1:], split_penalty.UNBREAKABLE) - - return self.generic_visit(node) - - def visit_List(self, node): - # List(elts=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens(node) - - for element in node.elts: - subrange = pyutils.GetTokensInSubRange(tokens, element) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) - - return self.generic_visit(node) - - def visit_Tuple(self, node): - # Tuple(elts=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens(node) - - for element in node.elts: - subrange = pyutils.GetTokensInSubRange(tokens, element) - _IncreasePenalty(subrange[1:], split_penalty.EXPR) - _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) - - return self.generic_visit(node) - - def visit_Slice(self, node): - # Slice(lower=Expr, - # upper=Expr, - # step=Expr) - tokens = self._GetTokens(node) - - if hasattr(node, 'lower') and node.lower: - subrange = pyutils.GetTokensInSubRange(tokens, node.lower) - _IncreasePenalty(subrange, split_penalty.EXPR) - _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) - - if hasattr(node, 'upper') and node.upper: - colon_index = pyutils.GetPrevTokenIndex(tokens, - pyutils.TokenStart(node.upper)) - _IncreasePenalty(tokens[colon_index], split_penalty.UNBREAKABLE) - subrange = pyutils.GetTokensInSubRange(tokens, node.upper) - _IncreasePenalty(subrange, split_penalty.EXPR) - _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) - - if hasattr(node, 'step') and node.step: - colon_index = pyutils.GetPrevTokenIndex(tokens, - pyutils.TokenStart(node.step)) - _IncreasePenalty(tokens[colon_index], split_penalty.UNBREAKABLE) - subrange = pyutils.GetTokensInSubRange(tokens, node.step) - _IncreasePenalty(subrange, split_penalty.EXPR) - _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) - - return self.generic_visit(node) - - ############################################################################ - # Expression Context # - ############################################################################ - - def visit_Load(self, node): - # Load() - return self.generic_visit(node) - - def visit_Store(self, node): - # Store() - return self.generic_visit(node) - - def visit_Del(self, node): - # Del() - return self.generic_visit(node) - - ############################################################################ - # Boolean Operators # - ############################################################################ +class SplitPenalty( ast.NodeVisitor ): + """Compute split penalties between tokens.""" + + def __init__( self, logical_lines ): + super( SplitPenalty, self ).__init__() + self.logical_lines = logical_lines + + # We never want to split before a colon or comma. + for logical_line in logical_lines: + for token in logical_line.tokens: + if token.value in frozenset( { ',', ':' } ): + token.split_penalty = split_penalty.UNBREAKABLE + + def _GetTokens( self, node ): + return pyutils.GetTokens( self.logical_lines, node ) + + ############################################################################ + # Statements # + ############################################################################ + + def visit_FunctionDef( self, node ): + # FunctionDef(name=Name, + # args=arguments( + # posonlyargs=[], + # args=[], + # vararg=[], + # kwonlyargs=[], + # kw_defaults=[], + # defaults=[]), + # body=[...], + # decorator_list=[Call_1, Call_2, ..., Call_n], + # keywords=[]) + tokens = self._GetTokens( node ) + for decorator in node.decorator_list: + # The decorator token list begins after the '@'. The body of the decorator + # is formatted like a normal "call." + decorator_range = self._GetTokens( decorator ) + # Don't split after the '@'. + decorator_range[ 0 ].split_penalty = split_penalty.UNBREAKABLE + + for token in tokens[ 1 : ]: + if token.value == '(': + break + _SetPenalty( token, split_penalty.UNBREAKABLE ) + + if node.returns: + start_index = pyutils.GetTokenIndex( + tokens, pyutils.TokenStart( node.returns ) ) + _IncreasePenalty( + tokens[ start_index - 1 : start_index + 1 ], + split_penalty.VERY_STRONGLY_CONNECTED ) + end_index = pyutils.GetTokenIndex( + tokens, pyutils.TokenEnd( node.returns ) ) + _IncreasePenalty( + tokens[ start_index + 1 : end_index ], + split_penalty.STRONGLY_CONNECTED ) + + return self.generic_visit( node ) + + def visit_AsyncFunctionDef( self, node ): + # AsyncFunctionDef(name=Name, + # args=arguments( + # posonlyargs=[], + # args=[], + # vararg=[], + # kwonlyargs=[], + # kw_defaults=[], + # defaults=[]), + # body=[...], + # decorator_list=[Expr_1, Expr_2, ..., Expr_n], + # keywords=[]) + return self.visit_FunctionDef( node ) + + def visit_ClassDef( self, node ): + # ClassDef(name=Name, + # bases=[Expr_1, Expr_2, ..., Expr_n], + # keywords=[], + # body=[], + # decorator_list=[Expr_1, Expr_2, ..., Expr_m]) + for base in node.bases: + tokens = self._GetTokens( base ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + for decorator in node.decorator_list: + # Don't split after the '@'. + decorator_range = self._GetTokens( decorator ) + decorator_range[ 0 ].split_penalty = split_penalty.UNBREAKABLE + + return self.generic_visit( node ) + + def visit_Return( self, node ): + # Return(value=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_Delete( self, node ): + # Delete(targets=[Expr_1, Expr_2, ..., Expr_n]) + for target in node.targets: + tokens = self._GetTokens( target ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_Assign( self, node ): + # Assign(targets=[Expr_1, Expr_2, ..., Expr_n], + # value=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_AugAssign( self, node ): + # AugAssign(target=Name, + # op=Add(), + # value=Expr) + return self.generic_visit( node ) + + def visit_AnnAssign( self, node ): + # AnnAssign(target=Expr, + # annotation=TypeName, + # value=Expr, + # simple=number) + return self.generic_visit( node ) + + def visit_For( self, node ): + # For(target=Expr, + # iter=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit( node ) + + def visit_AsyncFor( self, node ): + # AsyncFor(target=Expr, + # iter=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit( node ) + + def visit_While( self, node ): + # While(test=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit( node ) + + def visit_If( self, node ): + # If(test=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit( node ) + + def visit_With( self, node ): + # With(items=[withitem_1, withitem_2, ..., withitem_n], + # body=[...]) + return self.generic_visit( node ) + + def visit_AsyncWith( self, node ): + # AsyncWith(items=[withitem_1, withitem_2, ..., withitem_n], + # body=[...]) + return self.generic_visit( node ) + + def visit_Match( self, node ): + # Match(subject=Expr, + # cases=[ + # match_case( + # pattern=pattern, + # guard=Expr, + # body=[...]), + # ... + # ]) + return self.generic_visit( node ) + + def visit_Raise( self, node ): + # Raise(exc=Expr) + return self.generic_visit( node ) + + def visit_Try( self, node ): + # Try(body=[...], + # handlers=[ExceptHandler_1, ExceptHandler_2, ..., ExceptHandler_b], + # orelse=[...], + # finalbody=[...]) + return self.generic_visit( node ) + + def visit_Assert( self, node ): + # Assert(test=Expr) + return self.generic_visit( node ) + + def visit_Import( self, node ): + # Import(names=[ + # alias( + # name=Identifier, + # asname=Identifier), + # ... + # ]) + return self.generic_visit( node ) + + def visit_ImportFrom( self, node ): + # ImportFrom(module=Identifier, + # names=[ + # alias( + # name=Identifier, + # asname=Identifier), + # ... + # ], + # level=num + return self.generic_visit( node ) + + def visit_Global( self, node ): + # Global(names=[Identifier_1, Identifier_2, ..., Identifier_n]) + return self.generic_visit( node ) + + def visit_Nonlocal( self, node ): + # Nonlocal(names=[Identifier_1, Identifier_2, ..., Identifier_n]) + return self.generic_visit( node ) + + def visit_Expr( self, node ): + # Expr(value=Expr) + return self.generic_visit( node ) + + def visit_Pass( self, node ): + # Pass() + return self.generic_visit( node ) + + def visit_Break( self, node ): + # Break() + return self.generic_visit( node ) + + def visit_Continue( self, node ): + # Continue() + return self.generic_visit( node ) + + ############################################################################ + # Expressions # + ############################################################################ + + def visit_BoolOp( self, node ): + # BoolOp(op=And | Or, + # values=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + # Lower the split penalty to allow splitting before or after the logical + # operator. + split_before_operator = style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ) + operator_indices = [ + pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( value ) ) + for value in node.values[ :-1 ] + ] + for operator_index in operator_indices: + if not split_before_operator: + operator_index += 1 + _DecreasePenalty( tokens[ operator_index ], split_penalty.EXPR * 2 ) + + return self.generic_visit( node ) + + def visit_NamedExpr( self, node ): + # NamedExpr(target=Name, + # value=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_BinOp( self, node ): + # BinOp(left=LExpr + # op=Add | Sub | Mult | MatMult | Div | Mod | Pow | LShift | + # RShift | BitOr | BitXor | BitAnd | FloorDiv + # right=RExpr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + # Lower the split penalty to allow splitting before or after the arithmetic + # operator. + operator_index = pyutils.GetNextTokenIndex( + tokens, pyutils.TokenEnd( node.left ) ) + if not style.Get( 'SPLIT_BEFORE_ARITHMETIC_OPERATOR' ): + operator_index += 1 + + _DecreasePenalty( tokens[ operator_index ], split_penalty.EXPR * 2 ) + + return self.generic_visit( node ) + + def visit_UnaryOp( self, node ): + # UnaryOp(op=Not | USub | UAdd | Invert, + # operand=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + _IncreasePenalty( + tokens[ 1 ], style.Get( 'SPLIT_PENALTY_AFTER_UNARY_OPERATOR' ) ) + + return self.generic_visit( node ) + + def visit_Lambda( self, node ): + # Lambda(args=arguments( + # posonlyargs=[arg(...), arg(...), ..., arg(...)], + # args=[arg(...), arg(...), ..., arg(...)], + # kwonlyargs=[arg(...), arg(...), ..., arg(...)], + # kw_defaults=[arg(...), arg(...), ..., arg(...)], + # defaults=[arg(...), arg(...), ..., arg(...)]), + # body=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.LAMBDA ) + + if style.Get( 'ALLOW_MULTILINE_LAMBDAS' ): + _SetPenalty( self._GetTokens( node.body ), split_penalty.MULTIPLINE_LAMBDA ) + + return self.generic_visit( node ) + + def visit_IfExp( self, node ): + # IfExp(test=TestExpr, + # body=BodyExpr, + # orelse=OrElseExpr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_Dict( self, node ): + # Dict(keys=[Expr_1, Expr_2, ..., Expr_n], + # values=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens( node ) + + # The keys should be on a single line if at all possible. + for key in node.keys: + subrange = pyutils.GetTokensInSubRange( tokens, key ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.DICT_KEY_EXPR ) + + for value in node.values: + subrange = pyutils.GetTokensInSubRange( tokens, value ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.DICT_VALUE_EXPR ) + + return self.generic_visit( node ) + + def visit_Set( self, node ): + # Set(elts=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens( node ) + for element in node.elts: + subrange = pyutils.GetTokensInSubRange( tokens, element ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_ListComp( self, node ): + # ListComp(elt=Expr, + # generators=[ + # comprehension( + # target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0), + # ... + # ]) + tokens = self._GetTokens( node ) + element = pyutils.GetTokensInSubRange( tokens, node.elt ) + _IncreasePenalty( element[ 1 : ], split_penalty.EXPR ) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_SetComp( self, node ): + # SetComp(elt=Expr, + # generators=[ + # comprehension( + # target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0), + # ... + # ]) + tokens = self._GetTokens( node ) + element = pyutils.GetTokensInSubRange( tokens, node.elt ) + _IncreasePenalty( element[ 1 : ], split_penalty.EXPR ) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_DictComp( self, node ): + # DictComp(key=KeyExpr, + # value=ValExpr, + # generators=[ + # comprehension( + # target=TargetExpr + # iter=IterExpr, + # ifs=[Expr_1, Expr_2, ..., Expr_n]), + # is_async=0)], + # ... + # ]) + tokens = self._GetTokens( node ) + key = pyutils.GetTokensInSubRange( tokens, node.key ) + _IncreasePenalty( key[ 1 : ], split_penalty.EXPR ) + + value = pyutils.GetTokensInSubRange( tokens, node.value ) + _IncreasePenalty( value[ 1 : ], split_penalty.EXPR ) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_GeneratorExp( self, node ): + # GeneratorExp(elt=Expr, + # generators=[ + # comprehension( + # target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0), + # ... + # ]) + tokens = self._GetTokens( node ) + element = pyutils.GetTokensInSubRange( tokens, node.elt ) + _IncreasePenalty( element[ 1 : ], split_penalty.EXPR ) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_Await( self, node ): + # Await(value=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_Yield( self, node ): + # Yield(value=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_YieldFrom( self, node ): + # YieldFrom(value=Expr) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + tokens[ 2 ].split_penalty = split_penalty.UNBREAKABLE + + return self.generic_visit( node ) + + def visit_Compare( self, node ): + # Compare(left=LExpr, + # ops=[Op_1, Op_2, ..., Op_n], + # comparators=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) + + operator_indices = [ + pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( node.left ) ) + ] + [ + pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( comparator ) ) + for comparator in node.comparators[ :-1 ] + ] + split_before = style.Get( 'SPLIT_BEFORE_ARITHMETIC_OPERATOR' ) + + for operator_index in operator_indices: + if not split_before: + operator_index += 1 + _DecreasePenalty( tokens[ operator_index ], split_penalty.EXPR * 2 ) + + return self.generic_visit( node ) + + def visit_Call( self, node ): + # Call(func=Expr, + # args=[Expr_1, Expr_2, ..., Expr_n], + # keywords=[ + # keyword( + # arg='d', + # value=Expr), + # ... + # ]) + tokens = self._GetTokens( node ) + + # Don't never split before the opening parenthesis. + paren_index = pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( node.func ) ) + _IncreasePenalty( tokens[ paren_index ], split_penalty.UNBREAKABLE ) + + for arg in node.args: + subrange = pyutils.GetTokensInSubRange( tokens, arg ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + + return self.generic_visit( node ) + + def visit_FormattedValue( self, node ): + # FormattedValue(value=Expr, + # conversion=-1) + return node # Ignore formatted values. + + def visit_JoinedStr( self, node ): + # JoinedStr(values=[Expr_1, Expr_2, ..., Expr_n]) + return self.generic_visit( node ) + + def visit_Constant( self, node ): + # Constant(value=Expr) + return self.generic_visit( node ) + + def visit_Attribute( self, node ): + # Attribute(value=Expr, + # attr=Identifier) + tokens = self._GetTokens( node ) + split_before = style.Get( 'SPLIT_BEFORE_DOT' ) + dot_indices = pyutils.GetNextTokenIndex( + tokens, pyutils.TokenEnd( node.value ) ) + + if not split_before: + dot_indices += 1 + _IncreasePenalty( tokens[ dot_indices ], split_penalty.VERY_STRONGLY_CONNECTED ) + + return self.generic_visit( node ) + + def visit_Subscript( self, node ): + # Subscript(value=ValueExpr, + # slice=SliceExpr) + tokens = self._GetTokens( node ) + + # Don't split before the opening bracket of a subscript. + bracket_index = pyutils.GetNextTokenIndex( + tokens, pyutils.TokenEnd( node.value ) ) + _IncreasePenalty( tokens[ bracket_index ], split_penalty.UNBREAKABLE ) + + return self.generic_visit( node ) + + def visit_Starred( self, node ): + # Starred(value=Expr) + return self.generic_visit( node ) + + def visit_Name( self, node ): + # Name(id=Identifier) + tokens = self._GetTokens( node ) + _IncreasePenalty( tokens[ 1 : ], split_penalty.UNBREAKABLE ) + + return self.generic_visit( node ) + + def visit_List( self, node ): + # List(elts=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens( node ) + + for element in node.elts: + subrange = pyutils.GetTokensInSubRange( tokens, element ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) + + return self.generic_visit( node ) + + def visit_Tuple( self, node ): + # Tuple(elts=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens( node ) + + for element in node.elts: + subrange = pyutils.GetTokensInSubRange( tokens, element ) + _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) + _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) + + return self.generic_visit( node ) + + def visit_Slice( self, node ): + # Slice(lower=Expr, + # upper=Expr, + # step=Expr) + tokens = self._GetTokens( node ) + + if hasattr( node, 'lower' ) and node.lower: + subrange = pyutils.GetTokensInSubRange( tokens, node.lower ) + _IncreasePenalty( subrange, split_penalty.EXPR ) + _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) + + if hasattr( node, 'upper' ) and node.upper: + colon_index = pyutils.GetPrevTokenIndex( + tokens, pyutils.TokenStart( node.upper ) ) + _IncreasePenalty( tokens[ colon_index ], split_penalty.UNBREAKABLE ) + subrange = pyutils.GetTokensInSubRange( tokens, node.upper ) + _IncreasePenalty( subrange, split_penalty.EXPR ) + _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) + + if hasattr( node, 'step' ) and node.step: + colon_index = pyutils.GetPrevTokenIndex( + tokens, pyutils.TokenStart( node.step ) ) + _IncreasePenalty( tokens[ colon_index ], split_penalty.UNBREAKABLE ) + subrange = pyutils.GetTokensInSubRange( tokens, node.step ) + _IncreasePenalty( subrange, split_penalty.EXPR ) + _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) + + return self.generic_visit( node ) + + ############################################################################ + # Expression Context # + ############################################################################ + + def visit_Load( self, node ): + # Load() + return self.generic_visit( node ) + + def visit_Store( self, node ): + # Store() + return self.generic_visit( node ) + + def visit_Del( self, node ): + # Del() + return self.generic_visit( node ) + + ############################################################################ + # Boolean Operators # + ############################################################################ - def visit_And(self, node): - # And() - return self.generic_visit(node) + def visit_And( self, node ): + # And() + return self.generic_visit( node ) - def visit_Or(self, node): - # Or() - return self.generic_visit(node) - - ############################################################################ - # Binary Operators # - ############################################################################ - - def visit_Add(self, node): - # Add() - return self.generic_visit(node) + def visit_Or( self, node ): + # Or() + return self.generic_visit( node ) + + ############################################################################ + # Binary Operators # + ############################################################################ + + def visit_Add( self, node ): + # Add() + return self.generic_visit( node ) - def visit_Sub(self, node): - # Sub() - return self.generic_visit(node) - - def visit_Mult(self, node): - # Mult() - return self.generic_visit(node) - - def visit_MatMult(self, node): - # MatMult() - return self.generic_visit(node) - - def visit_Div(self, node): - # Div() - return self.generic_visit(node) - - def visit_Mod(self, node): - # Mod() - return self.generic_visit(node) - - def visit_Pow(self, node): - # Pow() - return self.generic_visit(node) - - def visit_LShift(self, node): - # LShift() - return self.generic_visit(node) - - def visit_RShift(self, node): - # RShift() - return self.generic_visit(node) - - def visit_BitOr(self, node): - # BitOr() - return self.generic_visit(node) - - def visit_BitXor(self, node): - # BitXor() - return self.generic_visit(node) - - def visit_BitAnd(self, node): - # BitAnd() - return self.generic_visit(node) - - def visit_FloorDiv(self, node): - # FloorDiv() - return self.generic_visit(node) - - ############################################################################ - # Unary Operators # - ############################################################################ - - def visit_Invert(self, node): - # Invert() - return self.generic_visit(node) - - def visit_Not(self, node): - # Not() - return self.generic_visit(node) - - def visit_UAdd(self, node): - # UAdd() - return self.generic_visit(node) - - def visit_USub(self, node): - # USub() - return self.generic_visit(node) - - ############################################################################ - # Comparison Operators # - ############################################################################ - - def visit_Eq(self, node): - # Eq() - return self.generic_visit(node) - - def visit_NotEq(self, node): - # NotEq() - return self.generic_visit(node) - - def visit_Lt(self, node): - # Lt() - return self.generic_visit(node) - - def visit_LtE(self, node): - # LtE() - return self.generic_visit(node) - - def visit_Gt(self, node): - # Gt() - return self.generic_visit(node) - - def visit_GtE(self, node): - # GtE() - return self.generic_visit(node) - - def visit_Is(self, node): - # Is() - return self.generic_visit(node) - - def visit_IsNot(self, node): - # IsNot() - return self.generic_visit(node) - - def visit_In(self, node): - # In() - return self.generic_visit(node) - - def visit_NotIn(self, node): - # NotIn() - return self.generic_visit(node) - - ############################################################################ - # Exception Handler # - ############################################################################ - - def visit_ExceptionHandler(self, node): - # ExceptHandler(type=Expr, - # name=Identifier, - # body=[...]) - return self.generic_visit(node) - - ############################################################################ - # Matching Patterns # - ############################################################################ - - def visit_MatchValue(self, node): - # MatchValue(value=Expr) - return self.generic_visit(node) - - def visit_MatchSingleton(self, node): - # MatchSingleton(value=Constant) - return self.generic_visit(node) - - def visit_MatchSequence(self, node): - # MatchSequence(patterns=[pattern_1, pattern_2, ..., pattern_n]) - return self.generic_visit(node) - - def visit_MatchMapping(self, node): - # MatchMapping(keys=[Expr_1, Expr_2, ..., Expr_n], - # patterns=[pattern_1, pattern_2, ..., pattern_m], - # rest=Identifier) - return self.generic_visit(node) - - def visit_MatchClass(self, node): - # MatchClass(cls=Expr, - # patterns=[pattern_1, pattern_2, ...], - # kwd_attrs=[Identifier_1, Identifier_2, ...], - # kwd_patterns=[pattern_1, pattern_2, ...]) - return self.generic_visit(node) - - def visit_MatchStar(self, node): - # MatchStar(name=Identifier) - return self.generic_visit(node) - - def visit_MatchAs(self, node): - # MatchAs(pattern=pattern, - # name=Identifier) - return self.generic_visit(node) - - def visit_MatchOr(self, node): - # MatchOr(patterns=[pattern_1, pattern_2, ...]) - return self.generic_visit(node) - - ############################################################################ - # Type Ignore # - ############################################################################ - - def visit_TypeIgnore(self, node): - # TypeIgnore(tag=string) - return self.generic_visit(node) - - ############################################################################ - # Miscellaneous # - ############################################################################ - - def visit_comprehension(self, node): - # comprehension(target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0) - return self.generic_visit(node) - - def visit_arguments(self, node): - # arguments(posonlyargs=[arg_1, arg_2, ..., arg_a], - # args=[arg_1, arg_2, ..., arg_b], - # vararg=arg, - # kwonlyargs=[arg_1, arg_2, ..., arg_c], - # kw_defaults=[arg_1, arg_2, ..., arg_d], - # kwarg=arg, - # defaults=[Expr_1, Expr_2, ..., Expr_n]) - return self.generic_visit(node) - - def visit_arg(self, node): - # arg(arg=Identifier, - # annotation=Expr, - # type_comment='') - tokens = self._GetTokens(node) - - # Process any annotations. - if hasattr(node, 'annotation') and node.annotation: - annotation = node.annotation - subrange = pyutils.GetTokensInSubRange(tokens, annotation) - _IncreasePenalty(subrange, split_penalty.ANNOTATION) - - return self.generic_visit(node) - - def visit_keyword(self, node): - # keyword(arg=Identifier, - # value=Expr) - return self.generic_visit(node) - - def visit_alias(self, node): - # alias(name=Identifier, - # asname=Identifier) - return self.generic_visit(node) - - def visit_withitem(self, node): - # withitem(context_expr=Expr, - # optional_vars=Expr) - return self.generic_visit(node) - - def visit_match_case(self, node): - # match_case(pattern=pattern, - # guard=Expr, - # body=[...]) - return self.generic_visit(node) - - -def _IncreasePenalty(tokens, amt): - if not isinstance(tokens, list): - tokens = [tokens] - for token in tokens: - token.split_penalty += amt - - -def _DecreasePenalty(tokens, amt): - if not isinstance(tokens, list): - tokens = [tokens] - for token in tokens: - token.split_penalty -= amt - - -def _SetPenalty(tokens, amt): - if not isinstance(tokens, list): - tokens = [tokens] - for token in tokens: - token.split_penalty = amt + def visit_Sub( self, node ): + # Sub() + return self.generic_visit( node ) + + def visit_Mult( self, node ): + # Mult() + return self.generic_visit( node ) + + def visit_MatMult( self, node ): + # MatMult() + return self.generic_visit( node ) + + def visit_Div( self, node ): + # Div() + return self.generic_visit( node ) + + def visit_Mod( self, node ): + # Mod() + return self.generic_visit( node ) + + def visit_Pow( self, node ): + # Pow() + return self.generic_visit( node ) + + def visit_LShift( self, node ): + # LShift() + return self.generic_visit( node ) + + def visit_RShift( self, node ): + # RShift() + return self.generic_visit( node ) + + def visit_BitOr( self, node ): + # BitOr() + return self.generic_visit( node ) + + def visit_BitXor( self, node ): + # BitXor() + return self.generic_visit( node ) + + def visit_BitAnd( self, node ): + # BitAnd() + return self.generic_visit( node ) + + def visit_FloorDiv( self, node ): + # FloorDiv() + return self.generic_visit( node ) + + ############################################################################ + # Unary Operators # + ############################################################################ + + def visit_Invert( self, node ): + # Invert() + return self.generic_visit( node ) + + def visit_Not( self, node ): + # Not() + return self.generic_visit( node ) + + def visit_UAdd( self, node ): + # UAdd() + return self.generic_visit( node ) + + def visit_USub( self, node ): + # USub() + return self.generic_visit( node ) + + ############################################################################ + # Comparison Operators # + ############################################################################ + + def visit_Eq( self, node ): + # Eq() + return self.generic_visit( node ) + + def visit_NotEq( self, node ): + # NotEq() + return self.generic_visit( node ) + + def visit_Lt( self, node ): + # Lt() + return self.generic_visit( node ) + + def visit_LtE( self, node ): + # LtE() + return self.generic_visit( node ) + + def visit_Gt( self, node ): + # Gt() + return self.generic_visit( node ) + + def visit_GtE( self, node ): + # GtE() + return self.generic_visit( node ) + + def visit_Is( self, node ): + # Is() + return self.generic_visit( node ) + + def visit_IsNot( self, node ): + # IsNot() + return self.generic_visit( node ) + + def visit_In( self, node ): + # In() + return self.generic_visit( node ) + + def visit_NotIn( self, node ): + # NotIn() + return self.generic_visit( node ) + + ############################################################################ + # Exception Handler # + ############################################################################ + + def visit_ExceptionHandler( self, node ): + # ExceptHandler(type=Expr, + # name=Identifier, + # body=[...]) + return self.generic_visit( node ) + + ############################################################################ + # Matching Patterns # + ############################################################################ + + def visit_MatchValue( self, node ): + # MatchValue(value=Expr) + return self.generic_visit( node ) + + def visit_MatchSingleton( self, node ): + # MatchSingleton(value=Constant) + return self.generic_visit( node ) + + def visit_MatchSequence( self, node ): + # MatchSequence(patterns=[pattern_1, pattern_2, ..., pattern_n]) + return self.generic_visit( node ) + + def visit_MatchMapping( self, node ): + # MatchMapping(keys=[Expr_1, Expr_2, ..., Expr_n], + # patterns=[pattern_1, pattern_2, ..., pattern_m], + # rest=Identifier) + return self.generic_visit( node ) + + def visit_MatchClass( self, node ): + # MatchClass(cls=Expr, + # patterns=[pattern_1, pattern_2, ...], + # kwd_attrs=[Identifier_1, Identifier_2, ...], + # kwd_patterns=[pattern_1, pattern_2, ...]) + return self.generic_visit( node ) + + def visit_MatchStar( self, node ): + # MatchStar(name=Identifier) + return self.generic_visit( node ) + + def visit_MatchAs( self, node ): + # MatchAs(pattern=pattern, + # name=Identifier) + return self.generic_visit( node ) + + def visit_MatchOr( self, node ): + # MatchOr(patterns=[pattern_1, pattern_2, ...]) + return self.generic_visit( node ) + + ############################################################################ + # Type Ignore # + ############################################################################ + + def visit_TypeIgnore( self, node ): + # TypeIgnore(tag=string) + return self.generic_visit( node ) + + ############################################################################ + # Miscellaneous # + ############################################################################ + + def visit_comprehension( self, node ): + # comprehension(target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0) + return self.generic_visit( node ) + + def visit_arguments( self, node ): + # arguments(posonlyargs=[arg_1, arg_2, ..., arg_a], + # args=[arg_1, arg_2, ..., arg_b], + # vararg=arg, + # kwonlyargs=[arg_1, arg_2, ..., arg_c], + # kw_defaults=[arg_1, arg_2, ..., arg_d], + # kwarg=arg, + # defaults=[Expr_1, Expr_2, ..., Expr_n]) + return self.generic_visit( node ) + + def visit_arg( self, node ): + # arg(arg=Identifier, + # annotation=Expr, + # type_comment='') + tokens = self._GetTokens( node ) + + # Process any annotations. + if hasattr( node, 'annotation' ) and node.annotation: + annotation = node.annotation + subrange = pyutils.GetTokensInSubRange( tokens, annotation ) + _IncreasePenalty( subrange, split_penalty.ANNOTATION ) + + return self.generic_visit( node ) + + def visit_keyword( self, node ): + # keyword(arg=Identifier, + # value=Expr) + return self.generic_visit( node ) + + def visit_alias( self, node ): + # alias(name=Identifier, + # asname=Identifier) + return self.generic_visit( node ) + + def visit_withitem( self, node ): + # withitem(context_expr=Expr, + # optional_vars=Expr) + return self.generic_visit( node ) + + def visit_match_case( self, node ): + # match_case(pattern=pattern, + # guard=Expr, + # body=[...]) + return self.generic_visit( node ) + + +def _IncreasePenalty( tokens, amt ): + if not isinstance( tokens, list ): + tokens = [ tokens ] + for token in tokens: + token.split_penalty += amt + + +def _DecreasePenalty( tokens, amt ): + if not isinstance( tokens, list ): + tokens = [ tokens ] + for token in tokens: + token.split_penalty -= amt + + +def _SetPenalty( tokens, amt ): + if not isinstance( tokens, list ): + tokens = [ tokens ] + for token in tokens: + token.split_penalty = amt diff --git a/yapf/pytree/blank_line_calculator.py b/yapf/pytree/blank_line_calculator.py index 9d218bf97..141306e07 100644 --- a/yapf/pytree/blank_line_calculator.py +++ b/yapf/pytree/blank_line_calculator.py @@ -29,84 +29,84 @@ from yapf.yapflib import py3compat from yapf.yapflib import style -_NO_BLANK_LINES = 1 -_ONE_BLANK_LINE = 2 +_NO_BLANK_LINES = 1 +_ONE_BLANK_LINE = 2 _TWO_BLANK_LINES = 3 -_PYTHON_STATEMENTS = frozenset({ - 'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt', - 'break_stmt', 'continue_stmt', 'return_stmt', 'raise_stmt', 'yield_stmt', - 'import_stmt', 'global_stmt', 'exec_stmt', 'assert_stmt', 'if_stmt', - 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', 'nonlocal_stmt', - 'async_stmt', 'simple_stmt' -}) +_PYTHON_STATEMENTS = frozenset( + { + 'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt', 'break_stmt', + 'continue_stmt', 'return_stmt', 'raise_stmt', 'yield_stmt', 'import_stmt', + 'global_stmt', 'exec_stmt', 'assert_stmt', 'if_stmt', 'while_stmt', 'for_stmt', + 'try_stmt', 'with_stmt', 'nonlocal_stmt', 'async_stmt', 'simple_stmt' + } ) -def CalculateBlankLines(tree): - """Run the blank line calculator visitor over the tree. +def CalculateBlankLines( tree ): + """Run the blank line calculator visitor over the tree. This modifies the tree in place. Arguments: tree: the top-level pytree node to annotate with subtypes. """ - blank_line_calculator = _BlankLineCalculator() - blank_line_calculator.Visit(tree) - - -class _BlankLineCalculator(pytree_visitor.PyTreeVisitor): - """_BlankLineCalculator - see file-level docstring for a description.""" - - def __init__(self): - self.class_level = 0 - self.function_level = 0 - self.last_comment_lineno = 0 - self.last_was_decorator = False - self.last_was_class_or_function = False - - def Visit_simple_stmt(self, node): # pylint: disable=invalid-name - self.DefaultNodeVisit(node) - if node.children[0].type == grammar_token.COMMENT: - self.last_comment_lineno = node.children[0].lineno - - def Visit_decorator(self, node): # pylint: disable=invalid-name - if (self.last_comment_lineno and - self.last_comment_lineno == node.children[0].lineno - 1): - _SetNumNewlines(node.children[0], _NO_BLANK_LINES) - else: - _SetNumNewlines(node.children[0], self._GetNumNewlines(node)) - for child in node.children: - self.Visit(child) - self.last_was_decorator = True - - def Visit_classdef(self, node): # pylint: disable=invalid-name - self.last_was_class_or_function = False - index = self._SetBlankLinesBetweenCommentAndClassFunc(node) - self.last_was_decorator = False - self.class_level += 1 - for child in node.children[index:]: - self.Visit(child) - self.class_level -= 1 - self.last_was_class_or_function = True - - def Visit_funcdef(self, node): # pylint: disable=invalid-name - self.last_was_class_or_function = False - index = self._SetBlankLinesBetweenCommentAndClassFunc(node) - if _AsyncFunction(node): - index = self._SetBlankLinesBetweenCommentAndClassFunc( - node.prev_sibling.parent) - _SetNumNewlines(node.children[0], None) - else: - index = self._SetBlankLinesBetweenCommentAndClassFunc(node) - self.last_was_decorator = False - self.function_level += 1 - for child in node.children[index:]: - self.Visit(child) - self.function_level -= 1 - self.last_was_class_or_function = True - - def DefaultNodeVisit(self, node): - """Override the default visitor for Node. + blank_line_calculator = _BlankLineCalculator() + blank_line_calculator.Visit( tree ) + + +class _BlankLineCalculator( pytree_visitor.PyTreeVisitor ): + """_BlankLineCalculator - see file-level docstring for a description.""" + + def __init__( self ): + self.class_level = 0 + self.function_level = 0 + self.last_comment_lineno = 0 + self.last_was_decorator = False + self.last_was_class_or_function = False + + def Visit_simple_stmt( self, node ): # pylint: disable=invalid-name + self.DefaultNodeVisit( node ) + if node.children[ 0 ].type == grammar_token.COMMENT: + self.last_comment_lineno = node.children[ 0 ].lineno + + def Visit_decorator( self, node ): # pylint: disable=invalid-name + if ( self.last_comment_lineno and + self.last_comment_lineno == node.children[ 0 ].lineno - 1 ): + _SetNumNewlines( node.children[ 0 ], _NO_BLANK_LINES ) + else: + _SetNumNewlines( node.children[ 0 ], self._GetNumNewlines( node ) ) + for child in node.children: + self.Visit( child ) + self.last_was_decorator = True + + def Visit_classdef( self, node ): # pylint: disable=invalid-name + self.last_was_class_or_function = False + index = self._SetBlankLinesBetweenCommentAndClassFunc( node ) + self.last_was_decorator = False + self.class_level += 1 + for child in node.children[ index : ]: + self.Visit( child ) + self.class_level -= 1 + self.last_was_class_or_function = True + + def Visit_funcdef( self, node ): # pylint: disable=invalid-name + self.last_was_class_or_function = False + index = self._SetBlankLinesBetweenCommentAndClassFunc( node ) + if _AsyncFunction( node ): + index = self._SetBlankLinesBetweenCommentAndClassFunc( + node.prev_sibling.parent ) + _SetNumNewlines( node.children[ 0 ], None ) + else: + index = self._SetBlankLinesBetweenCommentAndClassFunc( node ) + self.last_was_decorator = False + self.function_level += 1 + for child in node.children[ index : ]: + self.Visit( child ) + self.function_level -= 1 + self.last_was_class_or_function = True + + def DefaultNodeVisit( self, node ): + """Override the default visitor for Node. This will set the blank lines required if the last entity was a class or function. @@ -114,15 +114,15 @@ def DefaultNodeVisit(self, node): Arguments: node: (pytree.Node) The node to visit. """ - if self.last_was_class_or_function: - if pytree_utils.NodeName(node) in _PYTHON_STATEMENTS: - leaf = pytree_utils.FirstLeafNode(node) - _SetNumNewlines(leaf, self._GetNumNewlines(leaf)) - self.last_was_class_or_function = False - super(_BlankLineCalculator, self).DefaultNodeVisit(node) + if self.last_was_class_or_function: + if pytree_utils.NodeName( node ) in _PYTHON_STATEMENTS: + leaf = pytree_utils.FirstLeafNode( node ) + _SetNumNewlines( leaf, self._GetNumNewlines( leaf ) ) + self.last_was_class_or_function = False + super( _BlankLineCalculator, self ).DefaultNodeVisit( node ) - def _SetBlankLinesBetweenCommentAndClassFunc(self, node): - """Set the number of blanks between a comment and class or func definition. + def _SetBlankLinesBetweenCommentAndClassFunc( self, node ): + """Set the number of blanks between a comment and class or func definition. Class and function definitions have leading comments as children of the classdef and functdef nodes. @@ -133,47 +133,50 @@ def _SetBlankLinesBetweenCommentAndClassFunc(self, node): Returns: The index of the first child past the comment nodes. """ - index = 0 - while pytree_utils.IsCommentStatement(node.children[index]): - # Standalone comments are wrapped in a simple_stmt node with the comment - # node as its only child. - self.Visit(node.children[index].children[0]) - if not self.last_was_decorator: - _SetNumNewlines(node.children[index].children[0], _ONE_BLANK_LINE) - index += 1 - if (index and node.children[index].lineno - 1 - == node.children[index - 1].children[0].lineno): - _SetNumNewlines(node.children[index], _NO_BLANK_LINES) - else: - if self.last_comment_lineno + 1 == node.children[index].lineno: - num_newlines = _NO_BLANK_LINES - else: - num_newlines = self._GetNumNewlines(node) - _SetNumNewlines(node.children[index], num_newlines) - return index - - def _GetNumNewlines(self, node): - if self.last_was_decorator: - return _NO_BLANK_LINES - elif self._IsTopLevel(node): - return 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION') - return _ONE_BLANK_LINE - - def _IsTopLevel(self, node): - return (not (self.class_level or self.function_level) and - _StartsInZerothColumn(node)) - - -def _SetNumNewlines(node, num_newlines): - pytree_utils.SetNodeAnnotation(node, pytree_utils.Annotation.NEWLINES, - num_newlines) - - -def _StartsInZerothColumn(node): - return (pytree_utils.FirstLeafNode(node).column == 0 or - (_AsyncFunction(node) and node.prev_sibling.column == 0)) - - -def _AsyncFunction(node): - return (py3compat.PY3 and node.prev_sibling and - node.prev_sibling.type == grammar_token.ASYNC) + index = 0 + while pytree_utils.IsCommentStatement( node.children[ index ] ): + # Standalone comments are wrapped in a simple_stmt node with the comment + # node as its only child. + self.Visit( node.children[ index ].children[ 0 ] ) + if not self.last_was_decorator: + _SetNumNewlines( node.children[ index ].children[ 0 ], _ONE_BLANK_LINE ) + index += 1 + if ( index and node.children[ index ].lineno - 1 + == node.children[ index - 1 ].children[ 0 ].lineno ): + _SetNumNewlines( node.children[ index ], _NO_BLANK_LINES ) + else: + if self.last_comment_lineno + 1 == node.children[ index ].lineno: + num_newlines = _NO_BLANK_LINES + else: + num_newlines = self._GetNumNewlines( node ) + _SetNumNewlines( node.children[ index ], num_newlines ) + return index + + def _GetNumNewlines( self, node ): + if self.last_was_decorator: + return _NO_BLANK_LINES + elif self._IsTopLevel( node ): + return 1 + style.Get( 'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION' ) + return _ONE_BLANK_LINE + + def _IsTopLevel( self, node ): + return ( + not ( self.class_level or self.function_level ) and + _StartsInZerothColumn( node ) ) + + +def _SetNumNewlines( node, num_newlines ): + pytree_utils.SetNodeAnnotation( + node, pytree_utils.Annotation.NEWLINES, num_newlines ) + + +def _StartsInZerothColumn( node ): + return ( + pytree_utils.FirstLeafNode( node ).column == 0 or + ( _AsyncFunction( node ) and node.prev_sibling.column == 0 ) ) + + +def _AsyncFunction( node ): + return ( + py3compat.PY3 and node.prev_sibling and + node.prev_sibling.type == grammar_token.ASYNC ) diff --git a/yapf/pytree/comment_splicer.py b/yapf/pytree/comment_splicer.py index ae5ffe66f..33706ae47 100644 --- a/yapf/pytree/comment_splicer.py +++ b/yapf/pytree/comment_splicer.py @@ -28,8 +28,8 @@ from yapf.pytree import pytree_utils -def SpliceComments(tree): - """Given a pytree, splice comments into nodes of their own right. +def SpliceComments( tree ): + """Given a pytree, splice comments into nodes of their own right. Extract comments from the prefixes where they are housed after parsing. The prefixes that previously housed the comments become empty. @@ -38,176 +38,189 @@ def SpliceComments(tree): tree: a pytree.Node - the tree to work on. The tree is modified by this function. """ - # The previous leaf node encountered in the traversal. - # This is a list because Python 2.x doesn't have 'nonlocal' :) - prev_leaf = [None] - _AnnotateIndents(tree) - - def _VisitNodeRec(node): - """Recursively visit each node to splice comments into the AST.""" - # This loop may insert into node.children, so we'll iterate over a copy. - for child in node.children[:]: - if isinstance(child, pytree.Node): - # Nodes don't have prefixes. - _VisitNodeRec(child) - else: - if child.prefix.lstrip().startswith('#'): - # We have a comment prefix in this child, so splicing is needed. - comment_prefix = child.prefix - comment_lineno = child.lineno - comment_prefix.count('\n') - comment_column = child.column - - # Remember the leading indentation of this prefix and clear it. - # Mopping up the prefix is important because we may go over this same - # child in the next iteration... - child_prefix = child.prefix.lstrip('\n') - prefix_indent = child_prefix[:child_prefix.find('#')] - if '\n' in prefix_indent: - prefix_indent = prefix_indent[prefix_indent.rfind('\n') + 1:] - child.prefix = '' - - if child.type == token.NEWLINE: - # If the prefix was on a NEWLINE leaf, it's part of the line so it - # will be inserted after the previously encountered leaf. - # We can't just insert it before the NEWLINE node, because as a - # result of the way pytrees are organized, this node can be under - # an inappropriate parent. - comment_column -= len(comment_prefix.lstrip()) - pytree_utils.InsertNodesAfter( - _CreateCommentsFromPrefix( - comment_prefix, - comment_lineno, - comment_column, - standalone=False), prev_leaf[0]) - elif child.type == token.DEDENT: - # Comment prefixes on DEDENT nodes also deserve special treatment, - # because their final placement depends on their prefix. - # We'll look for an ancestor of this child with a matching - # indentation, and insert the comment before it if the ancestor is - # on a DEDENT node and after it otherwise. - # - # lib2to3 places comments that should be separated into the same - # DEDENT node. For example, "comment 1" and "comment 2" will be - # combined. - # - # def _(): - # for x in y: - # pass - # # comment 1 - # - # # comment 2 - # pass - # - # In this case, we need to split them up ourselves. - - # Split into groups of comments at decreasing levels of indentation - comment_groups = [] - comment_column = None - for cmt in comment_prefix.split('\n'): - col = cmt.find('#') - if col < 0: - if comment_column is None: - # Skip empty lines at the top of the first comment group - comment_lineno += 1 - continue - elif comment_column is None or col < comment_column: - comment_column = col - comment_indent = cmt[:comment_column] - comment_groups.append((comment_column, comment_indent, [])) - comment_groups[-1][-1].append(cmt) - - # Insert a node for each group - for comment_column, comment_indent, comment_group in comment_groups: - ancestor_at_indent = _FindAncestorAtIndent(child, comment_indent) - if ancestor_at_indent.type == token.DEDENT: - InsertNodes = pytree_utils.InsertNodesBefore # pylint: disable=invalid-name # noqa - else: - InsertNodes = pytree_utils.InsertNodesAfter # pylint: disable=invalid-name # noqa - InsertNodes( - _CreateCommentsFromPrefix( - '\n'.join(comment_group) + '\n', - comment_lineno, - comment_column, - standalone=True), ancestor_at_indent) - comment_lineno += len(comment_group) - else: - # Otherwise there are two cases. - # - # 1. The comment is on its own line - # 2. The comment is part of an expression. - # - # Unfortunately, it's fairly difficult to distinguish between the - # two in lib2to3 trees. The algorithm here is to determine whether - # child is the first leaf in the statement it belongs to. If it is, - # then the comment (which is a prefix) belongs on a separate line. - # If it is not, it means the comment is buried deep in the statement - # and is part of some expression. - stmt_parent = _FindStmtParent(child) - - for leaf_in_parent in stmt_parent.leaves(): - if leaf_in_parent.type == token.NEWLINE: - continue - elif id(leaf_in_parent) == id(child): - # This comment stands on its own line, and it has to be inserted - # into the appropriate parent. We'll have to find a suitable - # parent to insert into. See comments above - # _STANDALONE_LINE_NODES for more details. - node_with_line_parent = _FindNodeWithStandaloneLineParent(child) - - if pytree_utils.NodeName( - node_with_line_parent.parent) in {'funcdef', 'classdef'}: - # Keep a comment that's not attached to a function or class - # next to the object it is attached to. - comment_end = ( - comment_lineno + comment_prefix.rstrip('\n').count('\n')) - if comment_end < node_with_line_parent.lineno - 1: - node_with_line_parent = node_with_line_parent.parent - - pytree_utils.InsertNodesBefore( - _CreateCommentsFromPrefix( - comment_prefix, comment_lineno, 0, standalone=True), - node_with_line_parent) - break - else: - if comment_lineno == prev_leaf[0].lineno: - comment_lines = comment_prefix.splitlines() - value = comment_lines[0].lstrip() - if value.rstrip('\n'): - comment_column = prev_leaf[0].column - comment_column += len(prev_leaf[0].value) - comment_column += ( - len(comment_lines[0]) - len(comment_lines[0].lstrip())) - comment_leaf = pytree.Leaf( - type=token.COMMENT, - value=value.rstrip('\n'), - context=('', (comment_lineno, comment_column))) - pytree_utils.InsertNodesAfter([comment_leaf], prev_leaf[0]) - comment_prefix = '\n'.join(comment_lines[1:]) - comment_lineno += 1 - - rindex = (0 if '\n' not in comment_prefix.rstrip() else - comment_prefix.rstrip().rindex('\n') + 1) - comment_column = ( - len(comment_prefix[rindex:]) - - len(comment_prefix[rindex:].lstrip())) - comments = _CreateCommentsFromPrefix( - comment_prefix, - comment_lineno, - comment_column, - standalone=False) - pytree_utils.InsertNodesBefore(comments, child) - break - - prev_leaf[0] = child - - _VisitNodeRec(tree) - - -def _CreateCommentsFromPrefix(comment_prefix, - comment_lineno, - comment_column, - standalone=False): - """Create pytree nodes to represent the given comment prefix. + # The previous leaf node encountered in the traversal. + # This is a list because Python 2.x doesn't have 'nonlocal' :) + prev_leaf = [ None ] + _AnnotateIndents( tree ) + + def _VisitNodeRec( node ): + """Recursively visit each node to splice comments into the AST.""" + # This loop may insert into node.children, so we'll iterate over a copy. + for child in node.children[ : ]: + if isinstance( child, pytree.Node ): + # Nodes don't have prefixes. + _VisitNodeRec( child ) + else: + if child.prefix.lstrip().startswith( '#' ): + # We have a comment prefix in this child, so splicing is needed. + comment_prefix = child.prefix + comment_lineno = child.lineno - comment_prefix.count( '\n' ) + comment_column = child.column + + # Remember the leading indentation of this prefix and clear it. + # Mopping up the prefix is important because we may go over this same + # child in the next iteration... + child_prefix = child.prefix.lstrip( '\n' ) + prefix_indent = child_prefix[ : child_prefix.find( '#' ) ] + if '\n' in prefix_indent: + prefix_indent = prefix_indent[ prefix_indent.rfind( '\n' ) + + 1 : ] + child.prefix = '' + + if child.type == token.NEWLINE: + # If the prefix was on a NEWLINE leaf, it's part of the line so it + # will be inserted after the previously encountered leaf. + # We can't just insert it before the NEWLINE node, because as a + # result of the way pytrees are organized, this node can be under + # an inappropriate parent. + comment_column -= len( comment_prefix.lstrip() ) + pytree_utils.InsertNodesAfter( + _CreateCommentsFromPrefix( + comment_prefix, + comment_lineno, + comment_column, + standalone = False ), prev_leaf[ 0 ] ) + elif child.type == token.DEDENT: + # Comment prefixes on DEDENT nodes also deserve special treatment, + # because their final placement depends on their prefix. + # We'll look for an ancestor of this child with a matching + # indentation, and insert the comment before it if the ancestor is + # on a DEDENT node and after it otherwise. + # + # lib2to3 places comments that should be separated into the same + # DEDENT node. For example, "comment 1" and "comment 2" will be + # combined. + # + # def _(): + # for x in y: + # pass + # # comment 1 + # + # # comment 2 + # pass + # + # In this case, we need to split them up ourselves. + + # Split into groups of comments at decreasing levels of indentation + comment_groups = [] + comment_column = None + for cmt in comment_prefix.split( '\n' ): + col = cmt.find( '#' ) + if col < 0: + if comment_column is None: + # Skip empty lines at the top of the first comment group + comment_lineno += 1 + continue + elif comment_column is None or col < comment_column: + comment_column = col + comment_indent = cmt[ : comment_column ] + comment_groups.append( + ( comment_column, comment_indent, [] ) ) + comment_groups[ -1 ][ -1 ].append( cmt ) + + # Insert a node for each group + for comment_column, comment_indent, comment_group in comment_groups: + ancestor_at_indent = _FindAncestorAtIndent( + child, comment_indent ) + if ancestor_at_indent.type == token.DEDENT: + InsertNodes = pytree_utils.InsertNodesBefore # pylint: disable=invalid-name # noqa + else: + InsertNodes = pytree_utils.InsertNodesAfter # pylint: disable=invalid-name # noqa + InsertNodes( + _CreateCommentsFromPrefix( + '\n'.join( comment_group ) + '\n', + comment_lineno, + comment_column, + standalone = True ), ancestor_at_indent ) + comment_lineno += len( comment_group ) + else: + # Otherwise there are two cases. + # + # 1. The comment is on its own line + # 2. The comment is part of an expression. + # + # Unfortunately, it's fairly difficult to distinguish between the + # two in lib2to3 trees. The algorithm here is to determine whether + # child is the first leaf in the statement it belongs to. If it is, + # then the comment (which is a prefix) belongs on a separate line. + # If it is not, it means the comment is buried deep in the statement + # and is part of some expression. + stmt_parent = _FindStmtParent( child ) + + for leaf_in_parent in stmt_parent.leaves(): + if leaf_in_parent.type == token.NEWLINE: + continue + elif id( leaf_in_parent ) == id( child ): + # This comment stands on its own line, and it has to be inserted + # into the appropriate parent. We'll have to find a suitable + # parent to insert into. See comments above + # _STANDALONE_LINE_NODES for more details. + node_with_line_parent = _FindNodeWithStandaloneLineParent( + child ) + + if pytree_utils.NodeName( + node_with_line_parent.parent ) in { 'funcdef', + 'classdef' + }: + # Keep a comment that's not attached to a function or class + # next to the object it is attached to. + comment_end = ( + comment_lineno + + comment_prefix.rstrip( '\n' ).count( '\n' ) ) + if comment_end < node_with_line_parent.lineno - 1: + node_with_line_parent = node_with_line_parent.parent + + pytree_utils.InsertNodesBefore( + _CreateCommentsFromPrefix( + comment_prefix, + comment_lineno, + 0, + standalone = True ), node_with_line_parent ) + break + else: + if comment_lineno == prev_leaf[ 0 ].lineno: + comment_lines = comment_prefix.splitlines() + value = comment_lines[ 0 ].lstrip() + if value.rstrip( '\n' ): + comment_column = prev_leaf[ 0 ].column + comment_column += len( prev_leaf[ 0 ].value ) + comment_column += ( + len( comment_lines[ 0 ] ) - + len( comment_lines[ 0 ].lstrip() ) ) + comment_leaf = pytree.Leaf( + type = token.COMMENT, + value = value.rstrip( '\n' ), + context = ( + '', ( comment_lineno, + comment_column ) ) ) + pytree_utils.InsertNodesAfter( + [ comment_leaf ], prev_leaf[ 0 ] ) + comment_prefix = '\n'.join( + comment_lines[ 1 : ] ) + comment_lineno += 1 + + rindex = ( + 0 if '\n' not in comment_prefix.rstrip() else + comment_prefix.rstrip().rindex( '\n' ) + 1 ) + comment_column = ( + len( comment_prefix[ rindex : ] ) - + len( comment_prefix[ rindex : ].lstrip() ) ) + comments = _CreateCommentsFromPrefix( + comment_prefix, + comment_lineno, + comment_column, + standalone = False ) + pytree_utils.InsertNodesBefore( comments, child ) + break + + prev_leaf[ 0 ] = child + + _VisitNodeRec( tree ) + + +def _CreateCommentsFromPrefix( + comment_prefix, comment_lineno, comment_column, standalone = False ): + """Create pytree nodes to represent the given comment prefix. Args: comment_prefix: (unicode) the text of the comment from the node's prefix. @@ -220,35 +233,35 @@ def _CreateCommentsFromPrefix(comment_prefix, new COMMENT leafs. The prefix may consist of multiple comment blocks, separated by blank lines. Each block gets its own leaf. """ - # The comment is stored in the prefix attribute, with no lineno of its - # own. So we only know at which line it ends. To find out at which line it - # starts, look at how many newlines the comment itself contains. - comments = [] - - lines = comment_prefix.split('\n') - index = 0 - while index < len(lines): - comment_block = [] - while index < len(lines) and lines[index].lstrip().startswith('#'): - comment_block.append(lines[index].strip()) - index += 1 - - if comment_block: - new_lineno = comment_lineno + index - 1 - comment_block[0] = comment_block[0].strip() - comment_block[-1] = comment_block[-1].strip() - comment_leaf = pytree.Leaf( - type=token.COMMENT, - value='\n'.join(comment_block), - context=('', (new_lineno, comment_column))) - comment_node = comment_leaf if not standalone else pytree.Node( - pygram.python_symbols.simple_stmt, [comment_leaf]) - comments.append(comment_node) - - while index < len(lines) and not lines[index].lstrip(): - index += 1 - - return comments + # The comment is stored in the prefix attribute, with no lineno of its + # own. So we only know at which line it ends. To find out at which line it + # starts, look at how many newlines the comment itself contains. + comments = [] + + lines = comment_prefix.split( '\n' ) + index = 0 + while index < len( lines ): + comment_block = [] + while index < len( lines ) and lines[ index ].lstrip().startswith( '#' ): + comment_block.append( lines[ index ].strip() ) + index += 1 + + if comment_block: + new_lineno = comment_lineno + index - 1 + comment_block[ 0 ] = comment_block[ 0 ].strip() + comment_block[ -1 ] = comment_block[ -1 ].strip() + comment_leaf = pytree.Leaf( + type = token.COMMENT, + value = '\n'.join( comment_block ), + context = ( '', ( new_lineno, comment_column ) ) ) + comment_node = comment_leaf if not standalone else pytree.Node( + pygram.python_symbols.simple_stmt, [ comment_leaf ] ) + comments.append( comment_node ) + + while index < len( lines ) and not lines[ index ].lstrip(): + index += 1 + + return comments # "Standalone line nodes" are tree nodes that have to start a new line in Python @@ -262,14 +275,15 @@ def _CreateCommentsFromPrefix(comment_prefix, # line, not on the same line with other code), it's important to insert it into # an appropriate parent of the node it's attached to. An appropriate parent # is the first "standalone line node" in the parent chain of a node. -_STANDALONE_LINE_NODES = frozenset([ - 'suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', - 'funcdef', 'classdef', 'decorated', 'file_input' -]) +_STANDALONE_LINE_NODES = frozenset( + [ + 'suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', + 'funcdef', 'classdef', 'decorated', 'file_input' + ] ) -def _FindNodeWithStandaloneLineParent(node): - """Find a node whose parent is a 'standalone line' node. +def _FindNodeWithStandaloneLineParent( node ): + """Find a node whose parent is a 'standalone line' node. See the comment above _STANDALONE_LINE_NODES for more details. @@ -279,21 +293,21 @@ def _FindNodeWithStandaloneLineParent(node): Returns: Suitable node that's either the node itself or one of its ancestors. """ - if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES: - return node - else: - # This is guaranteed to terminate because 'file_input' is the root node of - # any pytree. - return _FindNodeWithStandaloneLineParent(node.parent) + if pytree_utils.NodeName( node.parent ) in _STANDALONE_LINE_NODES: + return node + else: + # This is guaranteed to terminate because 'file_input' is the root node of + # any pytree. + return _FindNodeWithStandaloneLineParent( node.parent ) # "Statement nodes" are standalone statements. The don't have to start a new # line. -_STATEMENT_NODES = frozenset(['simple_stmt']) | _STANDALONE_LINE_NODES +_STATEMENT_NODES = frozenset( [ 'simple_stmt' ] ) | _STANDALONE_LINE_NODES -def _FindStmtParent(node): - """Find the nearest parent of node that is a statement node. +def _FindStmtParent( node ): + """Find the nearest parent of node that is a statement node. Arguments: node: node to start from @@ -301,14 +315,14 @@ def _FindStmtParent(node): Returns: Nearest parent (or node itself, if suitable). """ - if pytree_utils.NodeName(node) in _STATEMENT_NODES: - return node - else: - return _FindStmtParent(node.parent) + if pytree_utils.NodeName( node ) in _STATEMENT_NODES: + return node + else: + return _FindStmtParent( node.parent ) -def _FindAncestorAtIndent(node, indent): - """Find an ancestor of node with the given indentation. +def _FindAncestorAtIndent( node, indent ): + """Find an ancestor of node with the given indentation. Arguments: node: node to start from. This must not be the tree root. @@ -319,27 +333,27 @@ def _FindAncestorAtIndent(node, indent): An ancestor node with suitable indentation. If no suitable ancestor is found, the closest ancestor to the tree root is returned. """ - if node.parent.parent is None: - # Our parent is the tree root, so there's nowhere else to go. - return node - - # If the parent has an indent annotation, and it's shorter than node's - # indent, this is a suitable ancestor. - # The reason for "shorter" rather than "equal" is that comments may be - # improperly indented (i.e. by three spaces, where surrounding statements - # have either zero or two or four), and we don't want to propagate them all - # the way to the root. - parent_indent = pytree_utils.GetNodeAnnotation( - node.parent, pytree_utils.Annotation.CHILD_INDENT) - if parent_indent is not None and indent.startswith(parent_indent): - return node - else: - # Keep looking up the tree. - return _FindAncestorAtIndent(node.parent, indent) - - -def _AnnotateIndents(tree): - """Annotate the tree with child_indent annotations. + if node.parent.parent is None: + # Our parent is the tree root, so there's nowhere else to go. + return node + + # If the parent has an indent annotation, and it's shorter than node's + # indent, this is a suitable ancestor. + # The reason for "shorter" rather than "equal" is that comments may be + # improperly indented (i.e. by three spaces, where surrounding statements + # have either zero or two or four), and we don't want to propagate them all + # the way to the root. + parent_indent = pytree_utils.GetNodeAnnotation( + node.parent, pytree_utils.Annotation.CHILD_INDENT ) + if parent_indent is not None and indent.startswith( parent_indent ): + return node + else: + # Keep looking up the tree. + return _FindAncestorAtIndent( node.parent, indent ) + + +def _AnnotateIndents( tree ): + """Annotate the tree with child_indent annotations. A child_indent annotation on a node specifies the indentation (as a string, like " ") of its children. It is inferred from the INDENT child of a node. @@ -350,16 +364,16 @@ def _AnnotateIndents(tree): Raises: RuntimeError: if the tree is malformed. """ - # Annotate the root of the tree with zero indent. - if tree.parent is None: - pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT, - '') - for child in tree.children: - if child.type == token.INDENT: - child_indent = pytree_utils.GetNodeAnnotation( - tree, pytree_utils.Annotation.CHILD_INDENT) - if child_indent is not None and child_indent != child.value: - raise RuntimeError('inconsistent indentation for child', (tree, child)) - pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT, - child.value) - _AnnotateIndents(child) + # Annotate the root of the tree with zero indent. + if tree.parent is None: + pytree_utils.SetNodeAnnotation( tree, pytree_utils.Annotation.CHILD_INDENT, '' ) + for child in tree.children: + if child.type == token.INDENT: + child_indent = pytree_utils.GetNodeAnnotation( + tree, pytree_utils.Annotation.CHILD_INDENT ) + if child_indent is not None and child_indent != child.value: + raise RuntimeError( + 'inconsistent indentation for child', ( tree, child ) ) + pytree_utils.SetNodeAnnotation( + tree, pytree_utils.Annotation.CHILD_INDENT, child.value ) + _AnnotateIndents( child ) diff --git a/yapf/pytree/continuation_splicer.py b/yapf/pytree/continuation_splicer.py index b86188cb5..dea4de29f 100644 --- a/yapf/pytree/continuation_splicer.py +++ b/yapf/pytree/continuation_splicer.py @@ -24,29 +24,29 @@ from yapf.yapflib import format_token -def SpliceContinuations(tree): - """Given a pytree, splice the continuation marker into nodes. +def SpliceContinuations( tree ): + """Given a pytree, splice the continuation marker into nodes. Arguments: tree: (pytree.Node) The tree to work on. The tree is modified by this function. """ - def RecSplicer(node): - """Inserts a continuation marker into the node.""" - if isinstance(node, pytree.Leaf): - if node.prefix.lstrip().startswith('\\\n'): - new_lineno = node.lineno - node.prefix.count('\n') - return pytree.Leaf( - type=format_token.CONTINUATION, - value=node.prefix, - context=('', (new_lineno, 0))) - return None - num_inserted = 0 - for index, child in enumerate(node.children[:]): - continuation_node = RecSplicer(child) - if continuation_node: - node.children.insert(index + num_inserted, continuation_node) - num_inserted += 1 - - RecSplicer(tree) + def RecSplicer( node ): + """Inserts a continuation marker into the node.""" + if isinstance( node, pytree.Leaf ): + if node.prefix.lstrip().startswith( '\\\n' ): + new_lineno = node.lineno - node.prefix.count( '\n' ) + return pytree.Leaf( + type = format_token.CONTINUATION, + value = node.prefix, + context = ( '', ( new_lineno, 0 ) ) ) + return None + num_inserted = 0 + for index, child in enumerate( node.children[ : ] ): + continuation_node = RecSplicer( child ) + if continuation_node: + node.children.insert( index + num_inserted, continuation_node ) + num_inserted += 1 + + RecSplicer( tree ) diff --git a/yapf/pytree/pytree_unwrapper.py b/yapf/pytree/pytree_unwrapper.py index 3fe4ade08..89618066b 100644 --- a/yapf/pytree/pytree_unwrapper.py +++ b/yapf/pytree/pytree_unwrapper.py @@ -40,12 +40,12 @@ from yapf.yapflib import style from yapf.yapflib import subtypes -_OPENING_BRACKETS = frozenset({'(', '[', '{'}) -_CLOSING_BRACKETS = frozenset({')', ']', '}'}) +_OPENING_BRACKETS = frozenset( { '(', '[', '{' } ) +_CLOSING_BRACKETS = frozenset( { ')', ']', '}' } ) -def UnwrapPyTree(tree): - """Create and return a list of logical lines from the given pytree. +def UnwrapPyTree( tree ): + """Create and return a list of logical lines from the given pytree. Arguments: tree: the top-level pytree node to unwrap.. @@ -53,22 +53,23 @@ def UnwrapPyTree(tree): Returns: A list of LogicalLine objects. """ - unwrapper = PyTreeUnwrapper() - unwrapper.Visit(tree) - llines = unwrapper.GetLogicalLines() - llines.sort(key=lambda x: x.lineno) - return llines + unwrapper = PyTreeUnwrapper() + unwrapper.Visit( tree ) + llines = unwrapper.GetLogicalLines() + llines.sort( key = lambda x: x.lineno ) + return llines # Grammar tokens considered as whitespace for the purpose of unwrapping. -_WHITESPACE_TOKENS = frozenset([ - grammar_token.NEWLINE, grammar_token.DEDENT, grammar_token.INDENT, - grammar_token.ENDMARKER -]) +_WHITESPACE_TOKENS = frozenset( + [ + grammar_token.NEWLINE, grammar_token.DEDENT, grammar_token.INDENT, + grammar_token.ENDMARKER + ] ) -class PyTreeUnwrapper(pytree_visitor.PyTreeVisitor): - """PyTreeUnwrapper - see file-level docstring for detailed description. +class PyTreeUnwrapper( pytree_visitor.PyTreeVisitor ): + """PyTreeUnwrapper - see file-level docstring for detailed description. Note: since this implements PyTreeVisitor and node names in lib2to3 are underscore_separated, the visiting methods of this class are named as @@ -82,77 +83,78 @@ class PyTreeUnwrapper(pytree_visitor.PyTreeVisitor): familiarity with the Python grammar is required. """ - def __init__(self): - # A list of all logical lines finished visiting so far. - self._logical_lines = [] + def __init__( self ): + # A list of all logical lines finished visiting so far. + self._logical_lines = [] - # Builds up a "current" logical line while visiting pytree nodes. Some nodes - # will finish a line and start a new one. - self._cur_logical_line = logical_line.LogicalLine(0) + # Builds up a "current" logical line while visiting pytree nodes. Some nodes + # will finish a line and start a new one. + self._cur_logical_line = logical_line.LogicalLine( 0 ) - # Current indentation depth. - self._cur_depth = 0 + # Current indentation depth. + self._cur_depth = 0 - def GetLogicalLines(self): - """Fetch the result of the tree walk. + def GetLogicalLines( self ): + """Fetch the result of the tree walk. Note: only call this after visiting the whole tree. Returns: A list of LogicalLine objects. """ - # Make sure the last line that was being populated is flushed. - self._StartNewLine() - return self._logical_lines + # Make sure the last line that was being populated is flushed. + self._StartNewLine() + return self._logical_lines - def _StartNewLine(self): - """Finish current line and start a new one. + def _StartNewLine( self ): + """Finish current line and start a new one. Place the currently accumulated line into the _logical_lines list and start a new one. """ - if self._cur_logical_line.tokens: - self._logical_lines.append(self._cur_logical_line) - _MatchBrackets(self._cur_logical_line) - _IdentifyParameterLists(self._cur_logical_line) - _AdjustSplitPenalty(self._cur_logical_line) - self._cur_logical_line = logical_line.LogicalLine(self._cur_depth) - - _STMT_TYPES = frozenset({ - 'if_stmt', - 'while_stmt', - 'for_stmt', - 'try_stmt', - 'expect_clause', - 'with_stmt', - 'funcdef', - 'classdef', - }) - - # pylint: disable=invalid-name,missing-docstring - def Visit_simple_stmt(self, node): - # A 'simple_stmt' conveniently represents a non-compound Python statement, - # i.e. a statement that does not contain other statements. - - # When compound nodes have a single statement as their suite, the parser - # can leave it in the tree directly without creating a suite. But we have - # to increase depth in these cases as well. However, don't increase the - # depth of we have a simple_stmt that's a comment node. This represents a - # standalone comment and in the case of it coming directly after the - # funcdef, it is a "top" comment for the whole function. - # TODO(eliben): add more relevant compound statements here. - single_stmt_suite = ( - node.parent and pytree_utils.NodeName(node.parent) in self._STMT_TYPES) - is_comment_stmt = pytree_utils.IsCommentStatement(node) - if single_stmt_suite and not is_comment_stmt: - self._cur_depth += 1 - self._StartNewLine() - self.DefaultNodeVisit(node) - if single_stmt_suite and not is_comment_stmt: - self._cur_depth -= 1 - - def _VisitCompoundStatement(self, node, substatement_names): - """Helper for visiting compound statements. + if self._cur_logical_line.tokens: + self._logical_lines.append( self._cur_logical_line ) + _MatchBrackets( self._cur_logical_line ) + _IdentifyParameterLists( self._cur_logical_line ) + _AdjustSplitPenalty( self._cur_logical_line ) + self._cur_logical_line = logical_line.LogicalLine( self._cur_depth ) + + _STMT_TYPES = frozenset( + { + 'if_stmt', + 'while_stmt', + 'for_stmt', + 'try_stmt', + 'expect_clause', + 'with_stmt', + 'funcdef', + 'classdef', + } ) + + # pylint: disable=invalid-name,missing-docstring + def Visit_simple_stmt( self, node ): + # A 'simple_stmt' conveniently represents a non-compound Python statement, + # i.e. a statement that does not contain other statements. + + # When compound nodes have a single statement as their suite, the parser + # can leave it in the tree directly without creating a suite. But we have + # to increase depth in these cases as well. However, don't increase the + # depth of we have a simple_stmt that's a comment node. This represents a + # standalone comment and in the case of it coming directly after the + # funcdef, it is a "top" comment for the whole function. + # TODO(eliben): add more relevant compound statements here. + single_stmt_suite = ( + node.parent and pytree_utils.NodeName( node.parent ) in self._STMT_TYPES ) + is_comment_stmt = pytree_utils.IsCommentStatement( node ) + if single_stmt_suite and not is_comment_stmt: + self._cur_depth += 1 + self._StartNewLine() + self.DefaultNodeVisit( node ) + if single_stmt_suite and not is_comment_stmt: + self._cur_depth -= 1 + + def _VisitCompoundStatement( self, node, substatement_names ): + """Helper for visiting compound statements. Python compound statements serve as containers for other statements. Thus, when we encounter a new compound statement, we start a new logical line. @@ -162,150 +164,150 @@ def _VisitCompoundStatement(self, node, substatement_names): substatement_names: set of node names. A compound statement will be recognized as a NAME node with a name in this set. """ - for child in node.children: - # A pytree is structured in such a way that a single 'if_stmt' node will - # contain all the 'if', 'elif' and 'else' nodes as children (similar - # structure applies to 'while' statements, 'try' blocks, etc). Therefore, - # we visit all children here and create a new line before the requested - # set of nodes. - if (child.type == grammar_token.NAME and - child.value in substatement_names): - self._StartNewLine() - self.Visit(child) + for child in node.children: + # A pytree is structured in such a way that a single 'if_stmt' node will + # contain all the 'if', 'elif' and 'else' nodes as children (similar + # structure applies to 'while' statements, 'try' blocks, etc). Therefore, + # we visit all children here and create a new line before the requested + # set of nodes. + if ( child.type == grammar_token.NAME and + child.value in substatement_names ): + self._StartNewLine() + self.Visit( child ) - _IF_STMT_ELEMS = frozenset({'if', 'else', 'elif'}) + _IF_STMT_ELEMS = frozenset( { 'if', 'else', 'elif' } ) - def Visit_if_stmt(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._IF_STMT_ELEMS) + def Visit_if_stmt( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._IF_STMT_ELEMS ) - _WHILE_STMT_ELEMS = frozenset({'while', 'else'}) + _WHILE_STMT_ELEMS = frozenset( { 'while', 'else' } ) - def Visit_while_stmt(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._WHILE_STMT_ELEMS) + def Visit_while_stmt( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._WHILE_STMT_ELEMS ) - _FOR_STMT_ELEMS = frozenset({'for', 'else'}) + _FOR_STMT_ELEMS = frozenset( { 'for', 'else' } ) - def Visit_for_stmt(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._FOR_STMT_ELEMS) + def Visit_for_stmt( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._FOR_STMT_ELEMS ) - _TRY_STMT_ELEMS = frozenset({'try', 'except', 'else', 'finally'}) + _TRY_STMT_ELEMS = frozenset( { 'try', 'except', 'else', 'finally' } ) - def Visit_try_stmt(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._TRY_STMT_ELEMS) + def Visit_try_stmt( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._TRY_STMT_ELEMS ) - _EXCEPT_STMT_ELEMS = frozenset({'except'}) + _EXCEPT_STMT_ELEMS = frozenset( { 'except' } ) - def Visit_except_clause(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._EXCEPT_STMT_ELEMS) + def Visit_except_clause( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._EXCEPT_STMT_ELEMS ) - _FUNC_DEF_ELEMS = frozenset({'def'}) + _FUNC_DEF_ELEMS = frozenset( { 'def' } ) - def Visit_funcdef(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._FUNC_DEF_ELEMS) + def Visit_funcdef( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._FUNC_DEF_ELEMS ) - def Visit_async_funcdef(self, node): # pylint: disable=invalid-name - self._StartNewLine() - index = 0 - for child in node.children: - index += 1 - self.Visit(child) - if child.type == grammar_token.ASYNC: - break - for child in node.children[index].children: - self.Visit(child) + def Visit_async_funcdef( self, node ): # pylint: disable=invalid-name + self._StartNewLine() + index = 0 + for child in node.children: + index += 1 + self.Visit( child ) + if child.type == grammar_token.ASYNC: + break + for child in node.children[ index ].children: + self.Visit( child ) - _CLASS_DEF_ELEMS = frozenset({'class'}) + _CLASS_DEF_ELEMS = frozenset( { 'class' } ) - def Visit_classdef(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._CLASS_DEF_ELEMS) + def Visit_classdef( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._CLASS_DEF_ELEMS ) - def Visit_async_stmt(self, node): # pylint: disable=invalid-name - self._StartNewLine() - index = 0 - for child in node.children: - index += 1 - self.Visit(child) - if child.type == grammar_token.ASYNC: - break - for child in node.children[index].children: - if child.type == grammar_token.NAME and child.value == 'else': + def Visit_async_stmt( self, node ): # pylint: disable=invalid-name self._StartNewLine() - self.Visit(child) - - def Visit_decorator(self, node): # pylint: disable=invalid-name - for child in node.children: - self.Visit(child) - if child.type == grammar_token.COMMENT and child == node.children[0]: + index = 0 + for child in node.children: + index += 1 + self.Visit( child ) + if child.type == grammar_token.ASYNC: + break + for child in node.children[ index ].children: + if child.type == grammar_token.NAME and child.value == 'else': + self._StartNewLine() + self.Visit( child ) + + def Visit_decorator( self, node ): # pylint: disable=invalid-name + for child in node.children: + self.Visit( child ) + if child.type == grammar_token.COMMENT and child == node.children[ 0 ]: + self._StartNewLine() + + def Visit_decorators( self, node ): # pylint: disable=invalid-name + for child in node.children: + self._StartNewLine() + self.Visit( child ) + + def Visit_decorated( self, node ): # pylint: disable=invalid-name + for child in node.children: + self._StartNewLine() + self.Visit( child ) + + _WITH_STMT_ELEMS = frozenset( { 'with' } ) + + def Visit_with_stmt( self, node ): # pylint: disable=invalid-name + self._VisitCompoundStatement( node, self._WITH_STMT_ELEMS ) + + def Visit_suite( self, node ): # pylint: disable=invalid-name + # A 'suite' starts a new indentation level in Python. + self._cur_depth += 1 self._StartNewLine() + self.DefaultNodeVisit( node ) + self._cur_depth -= 1 - def Visit_decorators(self, node): # pylint: disable=invalid-name - for child in node.children: - self._StartNewLine() - self.Visit(child) - - def Visit_decorated(self, node): # pylint: disable=invalid-name - for child in node.children: - self._StartNewLine() - self.Visit(child) - - _WITH_STMT_ELEMS = frozenset({'with'}) - - def Visit_with_stmt(self, node): # pylint: disable=invalid-name - self._VisitCompoundStatement(node, self._WITH_STMT_ELEMS) - - def Visit_suite(self, node): # pylint: disable=invalid-name - # A 'suite' starts a new indentation level in Python. - self._cur_depth += 1 - self._StartNewLine() - self.DefaultNodeVisit(node) - self._cur_depth -= 1 - - def Visit_listmaker(self, node): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation(node) - self.DefaultNodeVisit(node) + def Visit_listmaker( self, node ): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation( node ) + self.DefaultNodeVisit( node ) - def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation(node) - self.DefaultNodeVisit(node) + def Visit_dictsetmaker( self, node ): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation( node ) + self.DefaultNodeVisit( node ) - def Visit_import_as_names(self, node): # pylint: disable=invalid-name - if node.prev_sibling.value == '(': - _DetermineMustSplitAnnotation(node) - self.DefaultNodeVisit(node) + def Visit_import_as_names( self, node ): # pylint: disable=invalid-name + if node.prev_sibling.value == '(': + _DetermineMustSplitAnnotation( node ) + self.DefaultNodeVisit( node ) - def Visit_testlist_gexp(self, node): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation(node) - self.DefaultNodeVisit(node) + def Visit_testlist_gexp( self, node ): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation( node ) + self.DefaultNodeVisit( node ) - def Visit_arglist(self, node): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation(node) - self.DefaultNodeVisit(node) + def Visit_arglist( self, node ): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation( node ) + self.DefaultNodeVisit( node ) - def Visit_typedargslist(self, node): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation(node) - self.DefaultNodeVisit(node) + def Visit_typedargslist( self, node ): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation( node ) + self.DefaultNodeVisit( node ) - def DefaultLeafVisit(self, leaf): - """Default visitor for tree leaves. + def DefaultLeafVisit( self, leaf ): + """Default visitor for tree leaves. A tree leaf is always just gets appended to the current logical line. Arguments: leaf: the leaf to visit. """ - if leaf.type in _WHITESPACE_TOKENS: - self._StartNewLine() - elif leaf.type != grammar_token.COMMENT or leaf.value.strip(): - # Add non-whitespace tokens and comments that aren't empty. - self._cur_logical_line.AppendToken( - format_token.FormatToken(leaf, pytree_utils.NodeName(leaf))) + if leaf.type in _WHITESPACE_TOKENS: + self._StartNewLine() + elif leaf.type != grammar_token.COMMENT or leaf.value.strip(): + # Add non-whitespace tokens and comments that aren't empty. + self._cur_logical_line.AppendToken( + format_token.FormatToken( leaf, pytree_utils.NodeName( leaf ) ) ) -_BRACKET_MATCH = {')': '(', '}': '{', ']': '['} +_BRACKET_MATCH = { ')': '(', '}': '{', ']': '['} -def _MatchBrackets(line): - """Visit the node and match the brackets. +def _MatchBrackets( line ): + """Visit the node and match the brackets. For every open bracket ('[', '{', or '('), find the associated closing bracket and "match" them up. I.e., save in the token a pointer to its associated open @@ -314,23 +316,23 @@ def _MatchBrackets(line): Arguments: line: (LogicalLine) A logical line. """ - bracket_stack = [] - for token in line.tokens: - if token.value in _OPENING_BRACKETS: - bracket_stack.append(token) - elif token.value in _CLOSING_BRACKETS: - bracket_stack[-1].matching_bracket = token - token.matching_bracket = bracket_stack[-1] - bracket_stack.pop() + bracket_stack = [] + for token in line.tokens: + if token.value in _OPENING_BRACKETS: + bracket_stack.append( token ) + elif token.value in _CLOSING_BRACKETS: + bracket_stack[ -1 ].matching_bracket = token + token.matching_bracket = bracket_stack[ -1 ] + bracket_stack.pop() - for bracket in bracket_stack: - if id(pytree_utils.GetOpeningBracket(token.node)) == id(bracket.node): - bracket.container_elements.append(token) - token.container_opening = bracket + for bracket in bracket_stack: + if id( pytree_utils.GetOpeningBracket( token.node ) ) == id( bracket.node ): + bracket.container_elements.append( token ) + token.container_opening = bracket -def _IdentifyParameterLists(line): - """Visit the node to create a state for parameter lists. +def _IdentifyParameterLists( line ): + """Visit the node to create a state for parameter lists. For instance, a parameter is considered an "object" with its first and last token uniquely identifying the object. @@ -338,32 +340,32 @@ def _IdentifyParameterLists(line): Arguments: line: (LogicalLine) A logical line. """ - func_stack = [] - param_stack = [] - for tok in line.tokens: - # Identify parameter list objects. - if subtypes.FUNC_DEF in tok.subtypes: - assert tok.next_token.value == '(' - func_stack.append(tok.next_token) - continue + func_stack = [] + param_stack = [] + for tok in line.tokens: + # Identify parameter list objects. + if subtypes.FUNC_DEF in tok.subtypes: + assert tok.next_token.value == '(' + func_stack.append( tok.next_token ) + continue - if func_stack and tok.value == ')': - if tok == func_stack[-1].matching_bracket: - func_stack.pop() - continue + if func_stack and tok.value == ')': + if tok == func_stack[ -1 ].matching_bracket: + func_stack.pop() + continue - # Identify parameter objects. - if subtypes.PARAMETER_START in tok.subtypes: - param_stack.append(tok) + # Identify parameter objects. + if subtypes.PARAMETER_START in tok.subtypes: + param_stack.append( tok ) - # Not "elif", a parameter could be a single token. - if param_stack and subtypes.PARAMETER_STOP in tok.subtypes: - start = param_stack.pop() - func_stack[-1].parameters.append(object_state.Parameter(start, tok)) + # Not "elif", a parameter could be a single token. + if param_stack and subtypes.PARAMETER_STOP in tok.subtypes: + start = param_stack.pop() + func_stack[ -1 ].parameters.append( object_state.Parameter( start, tok ) ) -def _AdjustSplitPenalty(line): - """Visit the node and adjust the split penalties if needed. +def _AdjustSplitPenalty( line ): + """Visit the node and adjust the split penalties if needed. A token shouldn't be split if it's not within a bracket pair. Mark any token that's not within a bracket pair as "unbreakable". @@ -371,57 +373,56 @@ def _AdjustSplitPenalty(line): Arguments: line: (LogicalLine) An logical line. """ - bracket_level = 0 - for index, token in enumerate(line.tokens): - if index and not bracket_level: - pytree_utils.SetNodeAnnotation(token.node, - pytree_utils.Annotation.SPLIT_PENALTY, - split_penalty.UNBREAKABLE) - if token.value in _OPENING_BRACKETS: - bracket_level += 1 - elif token.value in _CLOSING_BRACKETS: - bracket_level -= 1 - - -def _DetermineMustSplitAnnotation(node): - """Enforce a split in the list if the list ends with a comma.""" - if style.Get('DISABLE_ENDING_COMMA_HEURISTIC'): - return - if not _ContainsComments(node): - token = next(node.parent.leaves()) - if token.value == '(': - if sum(1 for ch in node.children if ch.type == grammar_token.COMMA) < 2: + bracket_level = 0 + for index, token in enumerate( line.tokens ): + if index and not bracket_level: + pytree_utils.SetNodeAnnotation( + token.node, pytree_utils.Annotation.SPLIT_PENALTY, + split_penalty.UNBREAKABLE ) + if token.value in _OPENING_BRACKETS: + bracket_level += 1 + elif token.value in _CLOSING_BRACKETS: + bracket_level -= 1 + + +def _DetermineMustSplitAnnotation( node ): + """Enforce a split in the list if the list ends with a comma.""" + if style.Get( 'DISABLE_ENDING_COMMA_HEURISTIC' ): return - if (not isinstance(node.children[-1], pytree.Leaf) or - node.children[-1].value != ','): - return - num_children = len(node.children) - index = 0 - _SetMustSplitOnFirstLeaf(node.children[0]) - while index < num_children - 1: - child = node.children[index] - if isinstance(child, pytree.Leaf) and child.value == ',': - next_child = node.children[index + 1] - if next_child.type == grammar_token.COMMENT: + if not _ContainsComments( node ): + token = next( node.parent.leaves() ) + if token.value == '(': + if sum( 1 for ch in node.children if ch.type == grammar_token.COMMA ) < 2: + return + if ( not isinstance( node.children[ -1 ], pytree.Leaf ) or + node.children[ -1 ].value != ',' ): + return + num_children = len( node.children ) + index = 0 + _SetMustSplitOnFirstLeaf( node.children[ 0 ] ) + while index < num_children - 1: + child = node.children[ index ] + if isinstance( child, pytree.Leaf ) and child.value == ',': + next_child = node.children[ index + 1 ] + if next_child.type == grammar_token.COMMENT: + index += 1 + if index >= num_children - 1: + break + _SetMustSplitOnFirstLeaf( node.children[ index + 1 ] ) index += 1 - if index >= num_children - 1: - break - _SetMustSplitOnFirstLeaf(node.children[index + 1]) - index += 1 - - -def _ContainsComments(node): - """Return True if the list has a comment in it.""" - if isinstance(node, pytree.Leaf): - return node.type == grammar_token.COMMENT - for child in node.children: - if _ContainsComments(child): - return True - return False - - -def _SetMustSplitOnFirstLeaf(node): - """Set the "must split" annotation on the first leaf node.""" - pytree_utils.SetNodeAnnotation( - pytree_utils.FirstLeafNode(node), pytree_utils.Annotation.MUST_SPLIT, - True) + + +def _ContainsComments( node ): + """Return True if the list has a comment in it.""" + if isinstance( node, pytree.Leaf ): + return node.type == grammar_token.COMMENT + for child in node.children: + if _ContainsComments( child ): + return True + return False + + +def _SetMustSplitOnFirstLeaf( node ): + """Set the "must split" annotation on the first leaf node.""" + pytree_utils.SetNodeAnnotation( + pytree_utils.FirstLeafNode( node ), pytree_utils.Annotation.MUST_SPLIT, True ) diff --git a/yapf/pytree/pytree_utils.py b/yapf/pytree/pytree_utils.py index 66a54e617..710e0082d 100644 --- a/yapf/pytree/pytree_utils.py +++ b/yapf/pytree/pytree_utils.py @@ -37,20 +37,20 @@ # have a better understanding of what information we need from the tree. Then, # these tokens may be filtered out from the tree before the tree gets to the # unwrapper. -NONSEMANTIC_TOKENS = frozenset(['DEDENT', 'INDENT', 'NEWLINE', 'ENDMARKER']) +NONSEMANTIC_TOKENS = frozenset( [ 'DEDENT', 'INDENT', 'NEWLINE', 'ENDMARKER' ] ) -class Annotation(object): - """Annotation names associated with pytrees.""" - CHILD_INDENT = 'child_indent' - NEWLINES = 'newlines' - MUST_SPLIT = 'must_split' - SPLIT_PENALTY = 'split_penalty' - SUBTYPE = 'subtype' +class Annotation( object ): + """Annotation names associated with pytrees.""" + CHILD_INDENT = 'child_indent' + NEWLINES = 'newlines' + MUST_SPLIT = 'must_split' + SPLIT_PENALTY = 'split_penalty' + SUBTYPE = 'subtype' -def NodeName(node): - """Produce a string name for a given node. +def NodeName( node ): + """Produce a string name for a given node. For a Leaf this is the token name, and for a Node this is the type. @@ -60,23 +60,23 @@ def NodeName(node): Returns: Name as a string. """ - # Nodes with values < 256 are tokens. Values >= 256 are grammar symbols. - if node.type < 256: - return token.tok_name[node.type] - else: - return pygram.python_grammar.number2symbol[node.type] + # Nodes with values < 256 are tokens. Values >= 256 are grammar symbols. + if node.type < 256: + return token.tok_name[ node.type ] + else: + return pygram.python_grammar.number2symbol[ node.type ] -def FirstLeafNode(node): - if isinstance(node, pytree.Leaf): - return node - return FirstLeafNode(node.children[0]) +def FirstLeafNode( node ): + if isinstance( node, pytree.Leaf ): + return node + return FirstLeafNode( node.children[ 0 ] ) -def LastLeafNode(node): - if isinstance(node, pytree.Leaf): - return node - return LastLeafNode(node.children[-1]) +def LastLeafNode( node ): + if isinstance( node, pytree.Leaf ): + return node + return LastLeafNode( node.children[ -1 ] ) # lib2to3 thoughtfully provides pygram.python_grammar_no_print_statement for @@ -85,14 +85,14 @@ def LastLeafNode(node): # It forgets to do the same for 'exec' though. Luckily, Python is amenable to # monkey-patching. _GRAMMAR_FOR_PY3 = pygram.python_grammar_no_print_statement.copy() -del _GRAMMAR_FOR_PY3.keywords['exec'] +del _GRAMMAR_FOR_PY3.keywords[ 'exec' ] _GRAMMAR_FOR_PY2 = pygram.python_grammar.copy() -del _GRAMMAR_FOR_PY2.keywords['nonlocal'] +del _GRAMMAR_FOR_PY2.keywords[ 'nonlocal' ] -def ParseCodeToTree(code): - """Parse the given code to a lib2to3 pytree. +def ParseCodeToTree( code ): + """Parse the given code to a lib2to3 pytree. Arguments: code: a string with the code to parse. @@ -104,35 +104,35 @@ def ParseCodeToTree(code): Returns: The root node of the parsed tree. """ - # This function is tiny, but the incantation for invoking the parser correctly - # is sufficiently magical to be worth abstracting away. - if not code.endswith(os.linesep): - code += os.linesep - - try: - # Try to parse using a Python 3 grammar, which is more permissive (print and - # exec are not keywords). - parser_driver = driver.Driver(_GRAMMAR_FOR_PY3, convert=pytree.convert) - tree = parser_driver.parse_string(code, debug=False) - except parse.ParseError: - # Now try to parse using a Python 2 grammar; If this fails, then - # there's something else wrong with the code. + # This function is tiny, but the incantation for invoking the parser correctly + # is sufficiently magical to be worth abstracting away. + if not code.endswith( os.linesep ): + code += os.linesep + try: - parser_driver = driver.Driver(_GRAMMAR_FOR_PY2, convert=pytree.convert) - tree = parser_driver.parse_string(code, debug=False) + # Try to parse using a Python 3 grammar, which is more permissive (print and + # exec are not keywords). + parser_driver = driver.Driver( _GRAMMAR_FOR_PY3, convert = pytree.convert ) + tree = parser_driver.parse_string( code, debug = False ) except parse.ParseError: - # Raise a syntax error if the code is invalid python syntax. - try: - ast.parse(code) - except SyntaxError as e: - raise e - else: - raise - return _WrapEndMarker(tree) - - -def _WrapEndMarker(tree): - """Wrap a single ENDMARKER token in a "file_input" node. + # Now try to parse using a Python 2 grammar; If this fails, then + # there's something else wrong with the code. + try: + parser_driver = driver.Driver( _GRAMMAR_FOR_PY2, convert = pytree.convert ) + tree = parser_driver.parse_string( code, debug = False ) + except parse.ParseError: + # Raise a syntax error if the code is invalid python syntax. + try: + ast.parse( code ) + except SyntaxError as e: + raise e + else: + raise + return _WrapEndMarker( tree ) + + +def _WrapEndMarker( tree ): + """Wrap a single ENDMARKER token in a "file_input" node. Arguments: tree: (pytree.Node) The root node of the parsed tree. @@ -142,13 +142,13 @@ def _WrapEndMarker(tree): then that node is wrapped in a "file_input" node. That will ensure we don't skip comments attached to that node. """ - if isinstance(tree, pytree.Leaf) and tree.type == token.ENDMARKER: - return pytree.Node(pygram.python_symbols.file_input, [tree]) - return tree + if isinstance( tree, pytree.Leaf ) and tree.type == token.ENDMARKER: + return pytree.Node( pygram.python_symbols.file_input, [ tree ] ) + return tree -def InsertNodesBefore(new_nodes, target): - """Insert new_nodes before the given target location in the tree. +def InsertNodesBefore( new_nodes, target ): + """Insert new_nodes before the given target location in the tree. Arguments: new_nodes: a sequence of new nodes to insert (the nodes should not be in the @@ -158,12 +158,12 @@ def InsertNodesBefore(new_nodes, target): Raises: RuntimeError: if the tree is corrupted, or the insertion would corrupt it. """ - for node in new_nodes: - _InsertNodeAt(node, target, after=False) + for node in new_nodes: + _InsertNodeAt( node, target, after = False ) -def InsertNodesAfter(new_nodes, target): - """Insert new_nodes after the given target location in the tree. +def InsertNodesAfter( new_nodes, target ): + """Insert new_nodes after the given target location in the tree. Arguments: new_nodes: a sequence of new nodes to insert (the nodes should not be in the @@ -173,12 +173,12 @@ def InsertNodesAfter(new_nodes, target): Raises: RuntimeError: if the tree is corrupted, or the insertion would corrupt it. """ - for node in reversed(new_nodes): - _InsertNodeAt(node, target, after=True) + for node in reversed( new_nodes ): + _InsertNodeAt( node, target, after = True ) -def _InsertNodeAt(new_node, target, after=False): - """Underlying implementation for node insertion. +def _InsertNodeAt( new_node, target, after = False ): + """Underlying implementation for node insertion. Arguments: new_node: a new node to insert (this node should not be in the tree). @@ -193,24 +193,23 @@ def _InsertNodeAt(new_node, target, after=False): RuntimeError: if the tree is corrupted, or the insertion would corrupt it. """ - # Protect against attempts to insert nodes which already belong to some tree. - if new_node.parent is not None: - raise RuntimeError('inserting node which already has a parent', - (new_node, new_node.parent)) + # Protect against attempts to insert nodes which already belong to some tree. + if new_node.parent is not None: + raise RuntimeError( + 'inserting node which already has a parent', ( new_node, new_node.parent ) ) - # The code here is based on pytree.Base.next_sibling - parent_of_target = target.parent - if parent_of_target is None: - raise RuntimeError('expected target node to have a parent', (target,)) + # The code here is based on pytree.Base.next_sibling + parent_of_target = target.parent + if parent_of_target is None: + raise RuntimeError( 'expected target node to have a parent', ( target,) ) - for i, child in enumerate(parent_of_target.children): - if child is target: - insertion_index = i + 1 if after else i - parent_of_target.insert_child(insertion_index, new_node) - return + for i, child in enumerate( parent_of_target.children ): + if child is target: + insertion_index = i + 1 if after else i + parent_of_target.insert_child( insertion_index, new_node ) + return - raise RuntimeError('unable to find insertion point for target node', - (target,)) + raise RuntimeError( 'unable to find insertion point for target node', ( target,) ) # The following constant and functions implement a simple custom annotation @@ -220,20 +219,20 @@ def _InsertNodeAt(new_node, target, after=False): _NODE_ANNOTATION_PREFIX = '_yapf_annotation_' -def CopyYapfAnnotations(src, dst): - """Copy all YAPF annotations from the source node to the destination node. +def CopyYapfAnnotations( src, dst ): + """Copy all YAPF annotations from the source node to the destination node. Arguments: src: the source node. dst: the destination node. """ - for annotation in dir(src): - if annotation.startswith(_NODE_ANNOTATION_PREFIX): - setattr(dst, annotation, getattr(src, annotation, None)) + for annotation in dir( src ): + if annotation.startswith( _NODE_ANNOTATION_PREFIX ): + setattr( dst, annotation, getattr( src, annotation, None ) ) -def GetNodeAnnotation(node, annotation, default=None): - """Get annotation value from a node. +def GetNodeAnnotation( node, annotation, default = None ): + """Get annotation value from a node. Arguments: node: the node. @@ -244,48 +243,48 @@ def GetNodeAnnotation(node, annotation, default=None): Value of the annotation in the given node. If the node doesn't have this particular annotation name yet, returns default. """ - return getattr(node, _NODE_ANNOTATION_PREFIX + annotation, default) + return getattr( node, _NODE_ANNOTATION_PREFIX + annotation, default ) -def SetNodeAnnotation(node, annotation, value): - """Set annotation value on a node. +def SetNodeAnnotation( node, annotation, value ): + """Set annotation value on a node. Arguments: node: the node. annotation: annotation name - a string. value: annotation value to set. """ - setattr(node, _NODE_ANNOTATION_PREFIX + annotation, value) + setattr( node, _NODE_ANNOTATION_PREFIX + annotation, value ) -def AppendNodeAnnotation(node, annotation, value): - """Appends an annotation value to a list of annotations on the node. +def AppendNodeAnnotation( node, annotation, value ): + """Appends an annotation value to a list of annotations on the node. Arguments: node: the node. annotation: annotation name - a string. value: annotation value to set. """ - attr = GetNodeAnnotation(node, annotation, set()) - attr.add(value) - SetNodeAnnotation(node, annotation, attr) + attr = GetNodeAnnotation( node, annotation, set() ) + attr.add( value ) + SetNodeAnnotation( node, annotation, attr ) -def RemoveSubtypeAnnotation(node, value): - """Removes an annotation value from the subtype annotations on the node. +def RemoveSubtypeAnnotation( node, value ): + """Removes an annotation value from the subtype annotations on the node. Arguments: node: the node. value: annotation value to remove. """ - attr = GetNodeAnnotation(node, Annotation.SUBTYPE) - if attr and value in attr: - attr.remove(value) - SetNodeAnnotation(node, Annotation.SUBTYPE, attr) + attr = GetNodeAnnotation( node, Annotation.SUBTYPE ) + if attr and value in attr: + attr.remove( value ) + SetNodeAnnotation( node, Annotation.SUBTYPE, attr ) -def GetOpeningBracket(node): - """Get opening bracket value from a node. +def GetOpeningBracket( node ): + """Get opening bracket value from a node. Arguments: node: the node. @@ -293,21 +292,21 @@ def GetOpeningBracket(node): Returns: The opening bracket node or None if it couldn't find one. """ - return getattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None) + return getattr( node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None ) -def SetOpeningBracket(node, bracket): - """Set opening bracket value for a node. +def SetOpeningBracket( node, bracket ): + """Set opening bracket value for a node. Arguments: node: the node. bracket: opening bracket to set. """ - setattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', bracket) + setattr( node, _NODE_ANNOTATION_PREFIX + 'container_bracket', bracket ) -def DumpNodeToString(node): - """Dump a string representation of the given node. For debugging. +def DumpNodeToString( node ): + """Dump a string representation of the given node. For debugging. Arguments: node: the node. @@ -315,33 +314,35 @@ def DumpNodeToString(node): Returns: The string representation. """ - if isinstance(node, pytree.Leaf): - fmt = ('{name}({value}) [lineno={lineno}, column={column}, ' - 'prefix={prefix}, penalty={penalty}]') - return fmt.format( - name=NodeName(node), - value=_PytreeNodeRepr(node), - lineno=node.lineno, - column=node.column, - prefix=repr(node.prefix), - penalty=GetNodeAnnotation(node, Annotation.SPLIT_PENALTY, None)) - else: - fmt = '{node} [{len} children] [child_indent="{indent}"]' - return fmt.format( - node=NodeName(node), - len=len(node.children), - indent=GetNodeAnnotation(node, Annotation.CHILD_INDENT)) - - -def _PytreeNodeRepr(node): - """Like pytree.Node.__repr__, but names instead of numbers for tokens.""" - if isinstance(node, pytree.Node): - return '%s(%s, %r)' % (node.__class__.__name__, NodeName(node), - [_PytreeNodeRepr(c) for c in node.children]) - if isinstance(node, pytree.Leaf): - return '%s(%s, %r)' % (node.__class__.__name__, NodeName(node), node.value) - - -def IsCommentStatement(node): - return (NodeName(node) == 'simple_stmt' and - node.children[0].type == token.COMMENT) + if isinstance( node, pytree.Leaf ): + fmt = ( + '{name}({value}) [lineno={lineno}, column={column}, ' + 'prefix={prefix}, penalty={penalty}]' ) + return fmt.format( + name = NodeName( node ), + value = _PytreeNodeRepr( node ), + lineno = node.lineno, + column = node.column, + prefix = repr( node.prefix ), + penalty = GetNodeAnnotation( node, Annotation.SPLIT_PENALTY, None ) ) + else: + fmt = '{node} [{len} children] [child_indent="{indent}"]' + return fmt.format( + node = NodeName( node ), + len = len( node.children ), + indent = GetNodeAnnotation( node, Annotation.CHILD_INDENT ) ) + + +def _PytreeNodeRepr( node ): + """Like pytree.Node.__repr__, but names instead of numbers for tokens.""" + if isinstance( node, pytree.Node ): + return '%s(%s, %r)' % ( + node.__class__.__name__, NodeName( node ), + [ _PytreeNodeRepr( c ) for c in node.children ] ) + if isinstance( node, pytree.Leaf ): + return '%s(%s, %r)' % ( node.__class__.__name__, NodeName( node ), node.value ) + + +def IsCommentStatement( node ): + return ( + NodeName( node ) == 'simple_stmt' and node.children[ 0 ].type == token.COMMENT ) diff --git a/yapf/pytree/pytree_visitor.py b/yapf/pytree/pytree_visitor.py index 314431e84..5b816f3e4 100644 --- a/yapf/pytree/pytree_visitor.py +++ b/yapf/pytree/pytree_visitor.py @@ -31,8 +31,8 @@ from yapf.pytree import pytree_utils -class PyTreeVisitor(object): - """Visitor pattern for pytree trees. +class PyTreeVisitor( object ): + """Visitor pattern for pytree trees. Methods named Visit_XXX will be invoked when a node with type XXX is encountered in the tree. The type is either a token type (for Leaf nodes) or @@ -54,42 +54,42 @@ class PyTreeVisitor(object): that may have children - otherwise the children will not be visited. """ - def Visit(self, node): - """Visit a node.""" - method = 'Visit_{0}'.format(pytree_utils.NodeName(node)) - if hasattr(self, method): - # Found a specific visitor for this node - getattr(self, method)(node) - else: - if isinstance(node, pytree.Leaf): - self.DefaultLeafVisit(node) - else: - self.DefaultNodeVisit(node) - - def DefaultNodeVisit(self, node): - """Default visitor for Node: visits the node's children depth-first. + def Visit( self, node ): + """Visit a node.""" + method = 'Visit_{0}'.format( pytree_utils.NodeName( node ) ) + if hasattr( self, method ): + # Found a specific visitor for this node + getattr( self, method )( node ) + else: + if isinstance( node, pytree.Leaf ): + self.DefaultLeafVisit( node ) + else: + self.DefaultNodeVisit( node ) + + def DefaultNodeVisit( self, node ): + """Default visitor for Node: visits the node's children depth-first. This method is invoked when no specific visitor for the node is defined. Arguments: node: the node to visit """ - for child in node.children: - self.Visit(child) + for child in node.children: + self.Visit( child ) - def DefaultLeafVisit(self, leaf): - """Default visitor for Leaf: no-op. + def DefaultLeafVisit( self, leaf ): + """Default visitor for Leaf: no-op. This method is invoked when no specific visitor for the leaf is defined. Arguments: leaf: the leaf to visit """ - pass + pass -def DumpPyTree(tree, target_stream=sys.stdout): - """Convenience function for dumping a given pytree. +def DumpPyTree( tree, target_stream = sys.stdout ): + """Convenience function for dumping a given pytree. This function presents a very minimal interface. For more configurability (for example, controlling how specific node types are displayed), use PyTreeDumper @@ -100,36 +100,36 @@ def DumpPyTree(tree, target_stream=sys.stdout): target_stream: the stream to dump the tree to. A file-like object. By default will dump into stdout. """ - dumper = PyTreeDumper(target_stream) - dumper.Visit(tree) + dumper = PyTreeDumper( target_stream ) + dumper.Visit( tree ) -class PyTreeDumper(PyTreeVisitor): - """Visitor that dumps the tree to a stream. +class PyTreeDumper( PyTreeVisitor ): + """Visitor that dumps the tree to a stream. Implements the PyTreeVisitor interface. """ - def __init__(self, target_stream=sys.stdout): - """Create a tree dumper. + def __init__( self, target_stream = sys.stdout ): + """Create a tree dumper. Arguments: target_stream: the stream to dump the tree to. A file-like object. By default will dump into stdout. """ - self._target_stream = target_stream - self._current_indent = 0 - - def _DumpString(self, s): - self._target_stream.write('{0}{1}\n'.format(' ' * self._current_indent, s)) - - def DefaultNodeVisit(self, node): - # Dump information about the current node, and then use the generic - # DefaultNodeVisit visitor to dump each of its children. - self._DumpString(pytree_utils.DumpNodeToString(node)) - self._current_indent += 2 - super(PyTreeDumper, self).DefaultNodeVisit(node) - self._current_indent -= 2 - - def DefaultLeafVisit(self, leaf): - self._DumpString(pytree_utils.DumpNodeToString(leaf)) + self._target_stream = target_stream + self._current_indent = 0 + + def _DumpString( self, s ): + self._target_stream.write( '{0}{1}\n'.format( ' ' * self._current_indent, s ) ) + + def DefaultNodeVisit( self, node ): + # Dump information about the current node, and then use the generic + # DefaultNodeVisit visitor to dump each of its children. + self._DumpString( pytree_utils.DumpNodeToString( node ) ) + self._current_indent += 2 + super( PyTreeDumper, self ).DefaultNodeVisit( node ) + self._current_indent -= 2 + + def DefaultLeafVisit( self, leaf ): + self._DumpString( pytree_utils.DumpNodeToString( leaf ) ) diff --git a/yapf/pytree/split_penalty.py b/yapf/pytree/split_penalty.py index b53ffbf85..8b5598390 100644 --- a/yapf/pytree/split_penalty.py +++ b/yapf/pytree/split_penalty.py @@ -26,565 +26,574 @@ # TODO(morbo): Document the annotations in a centralized place. E.g., the # README file. -UNBREAKABLE = 1000 * 1000 -NAMED_ASSIGN = 15000 -DOTTED_NAME = 4000 +UNBREAKABLE = 1000 * 1000 +NAMED_ASSIGN = 15000 +DOTTED_NAME = 4000 VERY_STRONGLY_CONNECTED = 3500 -STRONGLY_CONNECTED = 3000 -CONNECTED = 500 -TOGETHER = 100 - -OR_TEST = 1000 -AND_TEST = 1100 -NOT_TEST = 1200 -COMPARISON = 1300 -STAR_EXPR = 1300 -EXPR = 1400 -XOR_EXPR = 1500 -AND_EXPR = 1700 -SHIFT_EXPR = 1800 -ARITH_EXPR = 1900 -TERM = 2000 -FACTOR = 2100 -POWER = 2200 -ATOM = 2300 +STRONGLY_CONNECTED = 3000 +CONNECTED = 500 +TOGETHER = 100 + +OR_TEST = 1000 +AND_TEST = 1100 +NOT_TEST = 1200 +COMPARISON = 1300 +STAR_EXPR = 1300 +EXPR = 1400 +XOR_EXPR = 1500 +AND_EXPR = 1700 +SHIFT_EXPR = 1800 +ARITH_EXPR = 1900 +TERM = 2000 +FACTOR = 2100 +POWER = 2200 +ATOM = 2300 ONE_ELEMENT_ARGUMENT = 500 -SUBSCRIPT = 6000 +SUBSCRIPT = 6000 -def ComputeSplitPenalties(tree): - """Compute split penalties on tokens in the given parse tree. +def ComputeSplitPenalties( tree ): + """Compute split penalties on tokens in the given parse tree. Arguments: tree: the top-level pytree node to annotate with penalties. """ - _SplitPenaltyAssigner().Visit(tree) + _SplitPenaltyAssigner().Visit( tree ) -class _SplitPenaltyAssigner(pytree_visitor.PyTreeVisitor): - """Assigns split penalties to tokens, based on parse tree structure. +class _SplitPenaltyAssigner( pytree_visitor.PyTreeVisitor ): + """Assigns split penalties to tokens, based on parse tree structure. Split penalties are attached as annotations to tokens. """ - def Visit(self, node): - if not hasattr(node, 'is_pseudo'): # Ignore pseudo tokens. - super(_SplitPenaltyAssigner, self).Visit(node) - - def Visit_import_as_names(self, node): # pyline: disable=invalid-name - # import_as_names ::= import_as_name (',' import_as_name)* [','] - self.DefaultNodeVisit(node) - prev_child = None - for child in node.children: - if (prev_child and isinstance(prev_child, pytree.Leaf) and - prev_child.value == ','): - _SetSplitPenalty(child, style.Get('SPLIT_PENALTY_IMPORT_NAMES')) - prev_child = child - - def Visit_classdef(self, node): # pylint: disable=invalid-name - # classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite - # - # NAME - _SetUnbreakable(node.children[1]) - if len(node.children) > 4: - # opening '(' - _SetUnbreakable(node.children[2]) - # ':' - _SetUnbreakable(node.children[-2]) - self.DefaultNodeVisit(node) - - def Visit_funcdef(self, node): # pylint: disable=invalid-name - # funcdef ::= 'def' NAME parameters ['->' test] ':' suite - # - # Can't break before the function name and before the colon. The parameters - # are handled by child iteration. - colon_idx = 1 - while pytree_utils.NodeName(node.children[colon_idx]) == 'simple_stmt': - colon_idx += 1 - _SetUnbreakable(node.children[colon_idx]) - arrow_idx = -1 - while colon_idx < len(node.children): - if isinstance(node.children[colon_idx], pytree.Leaf): - if node.children[colon_idx].value == ':': - break - if node.children[colon_idx].value == '->': - arrow_idx = colon_idx - colon_idx += 1 - _SetUnbreakable(node.children[colon_idx]) - self.DefaultNodeVisit(node) - if arrow_idx > 0: - _SetSplitPenalty( - pytree_utils.LastLeafNode(node.children[arrow_idx - 1]), 0) - _SetUnbreakable(node.children[arrow_idx]) - _SetStronglyConnected(node.children[arrow_idx + 1]) - - def Visit_lambdef(self, node): # pylint: disable=invalid-name - # lambdef ::= 'lambda' [varargslist] ':' test - # Loop over the lambda up to and including the colon. - allow_multiline_lambdas = style.Get('ALLOW_MULTILINE_LAMBDAS') - if not allow_multiline_lambdas: - for child in node.children: - if child.type == grammar_token.COMMENT: - if re.search(r'pylint:.*disable=.*\bg-long-lambda', child.value): - allow_multiline_lambdas = True - break - - if allow_multiline_lambdas: - _SetExpressionPenalty(node, STRONGLY_CONNECTED) - else: - _SetExpressionPenalty(node, VERY_STRONGLY_CONNECTED) - - def Visit_parameters(self, node): # pylint: disable=invalid-name - # parameters ::= '(' [typedargslist] ')' - self.DefaultNodeVisit(node) - - # Can't break before the opening paren of a parameter list. - _SetUnbreakable(node.children[0]) - if not (style.Get('INDENT_CLOSING_BRACKETS') or - style.Get('DEDENT_CLOSING_BRACKETS')): - _SetStronglyConnected(node.children[-1]) - - def Visit_arglist(self, node): # pylint: disable=invalid-name - # arglist ::= argument (',' argument)* [','] - if node.children[0].type == grammar_token.STAR: - # Python 3 treats a star expression as a specific expression type. - # Process it in that method. - self.Visit_star_expr(node) - return - - self.DefaultNodeVisit(node) - - for index in py3compat.range(1, len(node.children)): - child = node.children[index] - if isinstance(child, pytree.Leaf) and child.value == ',': - _SetUnbreakable(child) - - for child in node.children: - if pytree_utils.NodeName(child) == 'atom': - _IncreasePenalty(child, CONNECTED) - - def Visit_argument(self, node): # pylint: disable=invalid-name - # argument ::= test [comp_for] | test '=' test # Really [keyword '='] test - self.DefaultNodeVisit(node) - - for index in py3compat.range(1, len(node.children) - 1): - child = node.children[index] - if isinstance(child, pytree.Leaf) and child.value == '=': - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index]), NAMED_ASSIGN) - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index + 1]), NAMED_ASSIGN) - - def Visit_tname(self, node): # pylint: disable=invalid-name - # tname ::= NAME [':' test] - self.DefaultNodeVisit(node) - - for index in py3compat.range(1, len(node.children) - 1): - child = node.children[index] - if isinstance(child, pytree.Leaf) and child.value == ':': - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index]), NAMED_ASSIGN) - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index + 1]), NAMED_ASSIGN) - - def Visit_dotted_name(self, node): # pylint: disable=invalid-name - # dotted_name ::= NAME ('.' NAME)* - for child in node.children: - self.Visit(child) - start = 2 if hasattr(node.children[0], 'is_pseudo') else 1 - for i in py3compat.range(start, len(node.children)): - _SetUnbreakable(node.children[i]) - - def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name - # dictsetmaker ::= ( (test ':' test - # (comp_for | (',' test ':' test)* [','])) | - # (test (comp_for | (',' test)* [','])) ) - for child in node.children: - self.Visit(child) - if child.type == grammar_token.COLON: - # This is a key to a dictionary. We don't want to split the key if at - # all possible. - _SetStronglyConnected(child) - - def Visit_trailer(self, node): # pylint: disable=invalid-name - # trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME - if node.children[0].value == '.': - before = style.Get('SPLIT_BEFORE_DOT') - _SetSplitPenalty(node.children[0], - VERY_STRONGLY_CONNECTED if before else DOTTED_NAME) - _SetSplitPenalty(node.children[1], - DOTTED_NAME if before else VERY_STRONGLY_CONNECTED) - elif len(node.children) == 2: - # Don't split an empty argument list if at all possible. - _SetSplitPenalty(node.children[1], VERY_STRONGLY_CONNECTED) - elif len(node.children) == 3: - name = pytree_utils.NodeName(node.children[1]) - if name in {'argument', 'comparison'}: - # Don't split an argument list with one element if at all possible. - _SetStronglyConnected(node.children[1]) - if (len(node.children[1].children) > 1 and - pytree_utils.NodeName(node.children[1].children[1]) == 'comp_for'): - # Don't penalize splitting before a comp_for expression. - _SetSplitPenalty(pytree_utils.FirstLeafNode(node.children[1]), 0) + def Visit( self, node ): + if not hasattr( node, 'is_pseudo' ): # Ignore pseudo tokens. + super( _SplitPenaltyAssigner, self ).Visit( node ) + + def Visit_import_as_names( self, node ): # pyline: disable=invalid-name + # import_as_names ::= import_as_name (',' import_as_name)* [','] + self.DefaultNodeVisit( node ) + prev_child = None + for child in node.children: + if ( prev_child and isinstance( prev_child, pytree.Leaf ) and + prev_child.value == ',' ): + _SetSplitPenalty( child, style.Get( 'SPLIT_PENALTY_IMPORT_NAMES' ) ) + prev_child = child + + def Visit_classdef( self, node ): # pylint: disable=invalid-name + # classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite + # + # NAME + _SetUnbreakable( node.children[ 1 ] ) + if len( node.children ) > 4: + # opening '(' + _SetUnbreakable( node.children[ 2 ] ) + # ':' + _SetUnbreakable( node.children[ -2 ] ) + self.DefaultNodeVisit( node ) + + def Visit_funcdef( self, node ): # pylint: disable=invalid-name + # funcdef ::= 'def' NAME parameters ['->' test] ':' suite + # + # Can't break before the function name and before the colon. The parameters + # are handled by child iteration. + colon_idx = 1 + while pytree_utils.NodeName( node.children[ colon_idx ] ) == 'simple_stmt': + colon_idx += 1 + _SetUnbreakable( node.children[ colon_idx ] ) + arrow_idx = -1 + while colon_idx < len( node.children ): + if isinstance( node.children[ colon_idx ], pytree.Leaf ): + if node.children[ colon_idx ].value == ':': + break + if node.children[ colon_idx ].value == '->': + arrow_idx = colon_idx + colon_idx += 1 + _SetUnbreakable( node.children[ colon_idx ] ) + self.DefaultNodeVisit( node ) + if arrow_idx > 0: + _SetSplitPenalty( + pytree_utils.LastLeafNode( node.children[ arrow_idx - 1 ] ), 0 ) + _SetUnbreakable( node.children[ arrow_idx ] ) + _SetStronglyConnected( node.children[ arrow_idx + 1 ] ) + + def Visit_lambdef( self, node ): # pylint: disable=invalid-name + # lambdef ::= 'lambda' [varargslist] ':' test + # Loop over the lambda up to and including the colon. + allow_multiline_lambdas = style.Get( 'ALLOW_MULTILINE_LAMBDAS' ) + if not allow_multiline_lambdas: + for child in node.children: + if child.type == grammar_token.COMMENT: + if re.search( r'pylint:.*disable=.*\bg-long-lambda', child.value ): + allow_multiline_lambdas = True + break + + if allow_multiline_lambdas: + _SetExpressionPenalty( node, STRONGLY_CONNECTED ) else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[1]), - ONE_ELEMENT_ARGUMENT) - elif (node.children[0].type == grammar_token.LSQB and - len(node.children[1].children) > 2 and - (name.endswith('_test') or name.endswith('_expr'))): - _SetStronglyConnected(node.children[1].children[0]) - _SetStronglyConnected(node.children[1].children[2]) - - # Still allow splitting around the operator. - split_before = ((name.endswith('_test') and - style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR')) or - (name.endswith('_expr') and - style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'))) - if split_before: - _SetSplitPenalty( - pytree_utils.LastLeafNode(node.children[1].children[1]), 0) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[1].children[2]), 0) - - # Don't split the ending bracket of a subscript list. - _RecAnnotate(node.children[-1], pytree_utils.Annotation.SPLIT_PENALTY, - VERY_STRONGLY_CONNECTED) - elif name not in { - 'arglist', 'argument', 'term', 'or_test', 'and_test', 'comparison', - 'atom', 'power' - }: - # Don't split an argument list with one element if at all possible. - stypes = pytree_utils.GetNodeAnnotation( - pytree_utils.FirstLeafNode(node), pytree_utils.Annotation.SUBTYPE) - if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: - _IncreasePenalty(node, SUBSCRIPT) - - # Bump up the split penalty for the first part of a subscript. We - # would rather not split there. - _IncreasePenalty(node.children[1], CONNECTED) - else: - _SetStronglyConnected(node.children[1], node.children[2]) - - if name == 'arglist': - _SetStronglyConnected(node.children[-1]) - - self.DefaultNodeVisit(node) - - def Visit_power(self, node): # pylint: disable=invalid-name,missing-docstring - # power ::= atom trailer* ['**' factor] - self.DefaultNodeVisit(node) - - # When atom is followed by a trailer, we can not break between them. - # E.g. arr[idx] - no break allowed between 'arr' and '['. - if (len(node.children) > 1 and - pytree_utils.NodeName(node.children[1]) == 'trailer'): - # children[1] itself is a whole trailer: we don't want to - # mark all of it as unbreakable, only its first token: (, [ or . - first = pytree_utils.FirstLeafNode(node.children[1]) - if first.value != '.': - _SetUnbreakable(node.children[1].children[0]) - - # A special case when there are more trailers in the sequence. Given: - # atom tr1 tr2 - # The last token of tr1 and the first token of tr2 comprise an unbreakable - # region. For example: foo.bar.baz(1) - # We can't put breaks between either of the '.', '(', or '[' and the names - # *preceding* them. - prev_trailer_idx = 1 - while prev_trailer_idx < len(node.children) - 1: - cur_trailer_idx = prev_trailer_idx + 1 - cur_trailer = node.children[cur_trailer_idx] - if pytree_utils.NodeName(cur_trailer) != 'trailer': - break - - # Now we know we have two trailers one after the other - prev_trailer = node.children[prev_trailer_idx] - if prev_trailer.children[-1].value != ')': - # Set the previous node unbreakable if it's not a function call: - # atom tr1() tr2 - # It may be necessary (though undesirable) to split up a previous - # function call's parentheses to the next line. - _SetStronglyConnected(prev_trailer.children[-1]) - _SetStronglyConnected(cur_trailer.children[0]) - prev_trailer_idx = cur_trailer_idx - - # We don't want to split before the last ')' of a function call. This also - # takes care of the special case of: - # atom tr1 tr2 ... trn - # where the 'tr#' are trailers that may end in a ')'. - for trailer in node.children[1:]: - if pytree_utils.NodeName(trailer) != 'trailer': - break - if trailer.children[0].value in '([': - if len(trailer.children) > 2: - stypes = pytree_utils.GetNodeAnnotation( - trailer.children[0], pytree_utils.Annotation.SUBTYPE) - if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: - _SetStronglyConnected( - pytree_utils.FirstLeafNode(trailer.children[1])) - - last_child_node = pytree_utils.LastLeafNode(trailer) - if last_child_node.value.strip().startswith('#'): - last_child_node = last_child_node.prev_sibling - if not (style.Get('INDENT_CLOSING_BRACKETS') or - style.Get('DEDENT_CLOSING_BRACKETS')): - last = pytree_utils.LastLeafNode(last_child_node.prev_sibling) - if last.value != ',': - if last_child_node.value == ']': - _SetUnbreakable(last_child_node) - else: - _SetSplitPenalty(last_child_node, VERY_STRONGLY_CONNECTED) - else: - # If the trailer's children are '()', then make it a strongly - # connected region. It's sometimes necessary, though undesirable, to - # split the two. - _SetStronglyConnected(trailer.children[-1]) - - def Visit_subscriptlist(self, node): # pylint: disable=invalid-name - # subscriptlist ::= subscript (',' subscript)* [','] - self.DefaultNodeVisit(node) - _SetSplitPenalty(pytree_utils.FirstLeafNode(node), 0) - prev_child = None - for child in node.children: - if prev_child and prev_child.type == grammar_token.COMMA: - _SetSplitPenalty(pytree_utils.FirstLeafNode(child), 0) - prev_child = child - - def Visit_subscript(self, node): # pylint: disable=invalid-name - # subscript ::= test | [test] ':' [test] [sliceop] - _SetStronglyConnected(*node.children) - self.DefaultNodeVisit(node) - - def Visit_comp_for(self, node): # pylint: disable=invalid-name - # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] - _SetSplitPenalty(pytree_utils.FirstLeafNode(node), 0) - _SetStronglyConnected(*node.children[1:]) - self.DefaultNodeVisit(node) - - def Visit_old_comp_for(self, node): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_for(node) - - def Visit_comp_if(self, node): # pylint: disable=invalid-name - # comp_if ::= 'if' old_test [comp_iter] - _SetSplitPenalty(node.children[0], - style.Get('SPLIT_PENALTY_BEFORE_IF_EXPR')) - _SetStronglyConnected(*node.children[1:]) - self.DefaultNodeVisit(node) - - def Visit_old_comp_if(self, node): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_if(node) - - def Visit_test(self, node): # pylint: disable=invalid-name - # test ::= or_test ['if' or_test 'else' test] | lambdef - _IncreasePenalty(node, OR_TEST) - self.DefaultNodeVisit(node) - - def Visit_or_test(self, node): # pylint: disable=invalid-name - # or_test ::= and_test ('or' and_test)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, OR_TEST) - index = 1 - while index + 1 < len(node.children): - if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'): - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index]), OR_TEST) - else: - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index + 1]), OR_TEST) - index += 2 - - def Visit_and_test(self, node): # pylint: disable=invalid-name - # and_test ::= not_test ('and' not_test)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, AND_TEST) - index = 1 - while index + 1 < len(node.children): - if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'): - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index]), AND_TEST) - else: - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index + 1]), AND_TEST) - index += 2 - - def Visit_not_test(self, node): # pylint: disable=invalid-name - # not_test ::= 'not' not_test | comparison - self.DefaultNodeVisit(node) - _IncreasePenalty(node, NOT_TEST) - - def Visit_comparison(self, node): # pylint: disable=invalid-name - # comparison ::= expr (comp_op expr)* - self.DefaultNodeVisit(node) - if len(node.children) == 3 and _StronglyConnectedCompOp(node): - _IncreasePenalty(node.children[1], VERY_STRONGLY_CONNECTED) - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[2]), STRONGLY_CONNECTED) - else: - _IncreasePenalty(node, COMPARISON) - - def Visit_star_expr(self, node): # pylint: disable=invalid-name - # star_expr ::= '*' expr - self.DefaultNodeVisit(node) - _IncreasePenalty(node, STAR_EXPR) - - def Visit_expr(self, node): # pylint: disable=invalid-name - # expr ::= xor_expr ('|' xor_expr)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, EXPR) - _SetBitwiseOperandPenalty(node, '|') - - def Visit_xor_expr(self, node): # pylint: disable=invalid-name - # xor_expr ::= and_expr ('^' and_expr)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, XOR_EXPR) - _SetBitwiseOperandPenalty(node, '^') - - def Visit_and_expr(self, node): # pylint: disable=invalid-name - # and_expr ::= shift_expr ('&' shift_expr)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, AND_EXPR) - _SetBitwiseOperandPenalty(node, '&') - - def Visit_shift_expr(self, node): # pylint: disable=invalid-name - # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, SHIFT_EXPR) - - _ARITH_OPS = frozenset({'PLUS', 'MINUS'}) - - def Visit_arith_expr(self, node): # pylint: disable=invalid-name - # arith_expr ::= term (('+'|'-') term)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, ARITH_EXPR) - _SetExpressionOperandPenalty(node, self._ARITH_OPS) - - _TERM_OPS = frozenset({'STAR', 'AT', 'SLASH', 'PERCENT', 'DOUBLESLASH'}) - - def Visit_term(self, node): # pylint: disable=invalid-name - # term ::= factor (('*'|'@'|'/'|'%'|'//') factor)* - self.DefaultNodeVisit(node) - _IncreasePenalty(node, TERM) - _SetExpressionOperandPenalty(node, self._TERM_OPS) - - def Visit_factor(self, node): # pyline: disable=invalid-name - # factor ::= ('+'|'-'|'~') factor | power - self.DefaultNodeVisit(node) - _IncreasePenalty(node, FACTOR) - - def Visit_atom(self, node): # pylint: disable=invalid-name - # atom ::= ('(' [yield_expr|testlist_gexp] ')' - # '[' [listmaker] ']' | - # '{' [dictsetmaker] '}') - self.DefaultNodeVisit(node) - if (node.children[0].value == '(' and - not hasattr(node.children[0], 'is_pseudo')): - if node.children[-1].value == ')': - if pytree_utils.NodeName(node.parent) == 'if_stmt': - _SetSplitPenalty(node.children[-1], STRONGLY_CONNECTED) + _SetExpressionPenalty( node, VERY_STRONGLY_CONNECTED ) + + def Visit_parameters( self, node ): # pylint: disable=invalid-name + # parameters ::= '(' [typedargslist] ')' + self.DefaultNodeVisit( node ) + + # Can't break before the opening paren of a parameter list. + _SetUnbreakable( node.children[ 0 ] ) + if not ( style.Get( 'INDENT_CLOSING_BRACKETS' ) or + style.Get( 'DEDENT_CLOSING_BRACKETS' ) ): + _SetStronglyConnected( node.children[ -1 ] ) + + def Visit_arglist( self, node ): # pylint: disable=invalid-name + # arglist ::= argument (',' argument)* [','] + if node.children[ 0 ].type == grammar_token.STAR: + # Python 3 treats a star expression as a specific expression type. + # Process it in that method. + self.Visit_star_expr( node ) + return + + self.DefaultNodeVisit( node ) + + for index in py3compat.range( 1, len( node.children ) ): + child = node.children[ index ] + if isinstance( child, pytree.Leaf ) and child.value == ',': + _SetUnbreakable( child ) + + for child in node.children: + if pytree_utils.NodeName( child ) == 'atom': + _IncreasePenalty( child, CONNECTED ) + + def Visit_argument( self, node ): # pylint: disable=invalid-name + # argument ::= test [comp_for] | test '=' test # Really [keyword '='] test + self.DefaultNodeVisit( node ) + + for index in py3compat.range( 1, len( node.children ) - 1 ): + child = node.children[ index ] + if isinstance( child, pytree.Leaf ) and child.value == '=': + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index ] ), NAMED_ASSIGN ) + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), + NAMED_ASSIGN ) + + def Visit_tname( self, node ): # pylint: disable=invalid-name + # tname ::= NAME [':' test] + self.DefaultNodeVisit( node ) + + for index in py3compat.range( 1, len( node.children ) - 1 ): + child = node.children[ index ] + if isinstance( child, pytree.Leaf ) and child.value == ':': + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index ] ), NAMED_ASSIGN ) + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), + NAMED_ASSIGN ) + + def Visit_dotted_name( self, node ): # pylint: disable=invalid-name + # dotted_name ::= NAME ('.' NAME)* + for child in node.children: + self.Visit( child ) + start = 2 if hasattr( node.children[ 0 ], 'is_pseudo' ) else 1 + for i in py3compat.range( start, len( node.children ) ): + _SetUnbreakable( node.children[ i ] ) + + def Visit_dictsetmaker( self, node ): # pylint: disable=invalid-name + # dictsetmaker ::= ( (test ':' test + # (comp_for | (',' test ':' test)* [','])) | + # (test (comp_for | (',' test)* [','])) ) + for child in node.children: + self.Visit( child ) + if child.type == grammar_token.COLON: + # This is a key to a dictionary. We don't want to split the key if at + # all possible. + _SetStronglyConnected( child ) + + def Visit_trailer( self, node ): # pylint: disable=invalid-name + # trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME + if node.children[ 0 ].value == '.': + before = style.Get( 'SPLIT_BEFORE_DOT' ) + _SetSplitPenalty( + node.children[ 0 ], VERY_STRONGLY_CONNECTED if before else DOTTED_NAME ) + _SetSplitPenalty( + node.children[ 1 ], DOTTED_NAME if before else VERY_STRONGLY_CONNECTED ) + elif len( node.children ) == 2: + # Don't split an empty argument list if at all possible. + _SetSplitPenalty( node.children[ 1 ], VERY_STRONGLY_CONNECTED ) + elif len( node.children ) == 3: + name = pytree_utils.NodeName( node.children[ 1 ] ) + if name in { 'argument', 'comparison' }: + # Don't split an argument list with one element if at all possible. + _SetStronglyConnected( node.children[ 1 ] ) + if ( len( node.children[ 1 ].children ) > 1 and pytree_utils.NodeName( + node.children[ 1 ].children[ 1 ] ) == 'comp_for' ): + # Don't penalize splitting before a comp_for expression. + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ 1 ] ), 0 ) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ 1 ] ), + ONE_ELEMENT_ARGUMENT ) + elif ( node.children[ 0 ].type == grammar_token.LSQB and + len( node.children[ 1 ].children ) > 2 and + ( name.endswith( '_test' ) or name.endswith( '_expr' ) ) ): + _SetStronglyConnected( node.children[ 1 ].children[ 0 ] ) + _SetStronglyConnected( node.children[ 1 ].children[ 2 ] ) + + # Still allow splitting around the operator. + split_before = ( + ( + name.endswith( '_test' ) and + style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ) ) or ( + name.endswith( '_expr' ) and + style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ) ) ) + if split_before: + _SetSplitPenalty( + pytree_utils.LastLeafNode( node.children[ 1 ].children[ 1 ] ), + 0 ) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ 1 ].children[ 2 ] ), + 0 ) + + # Don't split the ending bracket of a subscript list. + _RecAnnotate( + node.children[ -1 ], pytree_utils.Annotation.SPLIT_PENALTY, + VERY_STRONGLY_CONNECTED ) + elif name not in { 'arglist', 'argument', 'term', 'or_test', 'and_test', + 'comparison', 'atom', 'power' }: + # Don't split an argument list with one element if at all possible. + stypes = pytree_utils.GetNodeAnnotation( + pytree_utils.FirstLeafNode( node ), + pytree_utils.Annotation.SUBTYPE ) + if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: + _IncreasePenalty( node, SUBSCRIPT ) + + # Bump up the split penalty for the first part of a subscript. We + # would rather not split there. + _IncreasePenalty( node.children[ 1 ], CONNECTED ) + else: + _SetStronglyConnected( node.children[ 1 ], node.children[ 2 ] ) + + if name == 'arglist': + _SetStronglyConnected( node.children[ -1 ] ) + + self.DefaultNodeVisit( node ) + + def Visit_power( self, node ): # pylint: disable=invalid-name,missing-docstring + # power ::= atom trailer* ['**' factor] + self.DefaultNodeVisit( node ) + + # When atom is followed by a trailer, we can not break between them. + # E.g. arr[idx] - no break allowed between 'arr' and '['. + if ( len( node.children ) > 1 and + pytree_utils.NodeName( node.children[ 1 ] ) == 'trailer' ): + # children[1] itself is a whole trailer: we don't want to + # mark all of it as unbreakable, only its first token: (, [ or . + first = pytree_utils.FirstLeafNode( node.children[ 1 ] ) + if first.value != '.': + _SetUnbreakable( node.children[ 1 ].children[ 0 ] ) + + # A special case when there are more trailers in the sequence. Given: + # atom tr1 tr2 + # The last token of tr1 and the first token of tr2 comprise an unbreakable + # region. For example: foo.bar.baz(1) + # We can't put breaks between either of the '.', '(', or '[' and the names + # *preceding* them. + prev_trailer_idx = 1 + while prev_trailer_idx < len( node.children ) - 1: + cur_trailer_idx = prev_trailer_idx + 1 + cur_trailer = node.children[ cur_trailer_idx ] + if pytree_utils.NodeName( cur_trailer ) != 'trailer': + break + + # Now we know we have two trailers one after the other + prev_trailer = node.children[ prev_trailer_idx ] + if prev_trailer.children[ -1 ].value != ')': + # Set the previous node unbreakable if it's not a function call: + # atom tr1() tr2 + # It may be necessary (though undesirable) to split up a previous + # function call's parentheses to the next line. + _SetStronglyConnected( prev_trailer.children[ -1 ] ) + _SetStronglyConnected( cur_trailer.children[ 0 ] ) + prev_trailer_idx = cur_trailer_idx + + # We don't want to split before the last ')' of a function call. This also + # takes care of the special case of: + # atom tr1 tr2 ... trn + # where the 'tr#' are trailers that may end in a ')'. + for trailer in node.children[ 1 : ]: + if pytree_utils.NodeName( trailer ) != 'trailer': + break + if trailer.children[ 0 ].value in '([': + if len( trailer.children ) > 2: + stypes = pytree_utils.GetNodeAnnotation( + trailer.children[ 0 ], pytree_utils.Annotation.SUBTYPE ) + if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: + _SetStronglyConnected( + pytree_utils.FirstLeafNode( trailer.children[ 1 ] ) ) + + last_child_node = pytree_utils.LastLeafNode( trailer ) + if last_child_node.value.strip().startswith( '#' ): + last_child_node = last_child_node.prev_sibling + if not ( style.Get( 'INDENT_CLOSING_BRACKETS' ) or + style.Get( 'DEDENT_CLOSING_BRACKETS' ) ): + last = pytree_utils.LastLeafNode( last_child_node.prev_sibling ) + if last.value != ',': + if last_child_node.value == ']': + _SetUnbreakable( last_child_node ) + else: + _SetSplitPenalty( + last_child_node, VERY_STRONGLY_CONNECTED ) + else: + # If the trailer's children are '()', then make it a strongly + # connected region. It's sometimes necessary, though undesirable, to + # split the two. + _SetStronglyConnected( trailer.children[ -1 ] ) + + def Visit_subscriptlist( self, node ): # pylint: disable=invalid-name + # subscriptlist ::= subscript (',' subscript)* [','] + self.DefaultNodeVisit( node ) + _SetSplitPenalty( pytree_utils.FirstLeafNode( node ), 0 ) + prev_child = None + for child in node.children: + if prev_child and prev_child.type == grammar_token.COMMA: + _SetSplitPenalty( pytree_utils.FirstLeafNode( child ), 0 ) + prev_child = child + + def Visit_subscript( self, node ): # pylint: disable=invalid-name + # subscript ::= test | [test] ':' [test] [sliceop] + _SetStronglyConnected( *node.children ) + self.DefaultNodeVisit( node ) + + def Visit_comp_for( self, node ): # pylint: disable=invalid-name + # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] + _SetSplitPenalty( pytree_utils.FirstLeafNode( node ), 0 ) + _SetStronglyConnected( *node.children[ 1 : ] ) + self.DefaultNodeVisit( node ) + + def Visit_old_comp_for( self, node ): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_for( node ) + + def Visit_comp_if( self, node ): # pylint: disable=invalid-name + # comp_if ::= 'if' old_test [comp_iter] + _SetSplitPenalty( + node.children[ 0 ], style.Get( 'SPLIT_PENALTY_BEFORE_IF_EXPR' ) ) + _SetStronglyConnected( *node.children[ 1 : ] ) + self.DefaultNodeVisit( node ) + + def Visit_old_comp_if( self, node ): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_if( node ) + + def Visit_test( self, node ): # pylint: disable=invalid-name + # test ::= or_test ['if' or_test 'else' test] | lambdef + _IncreasePenalty( node, OR_TEST ) + self.DefaultNodeVisit( node ) + + def Visit_or_test( self, node ): # pylint: disable=invalid-name + # or_test ::= and_test ('or' and_test)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, OR_TEST ) + index = 1 + while index + 1 < len( node.children ): + if style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ): + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index ] ), OR_TEST ) + else: + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), OR_TEST ) + index += 2 + + def Visit_and_test( self, node ): # pylint: disable=invalid-name + # and_test ::= not_test ('and' not_test)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, AND_TEST ) + index = 1 + while index + 1 < len( node.children ): + if style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ): + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index ] ), AND_TEST ) + else: + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), AND_TEST ) + index += 2 + + def Visit_not_test( self, node ): # pylint: disable=invalid-name + # not_test ::= 'not' not_test | comparison + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, NOT_TEST ) + + def Visit_comparison( self, node ): # pylint: disable=invalid-name + # comparison ::= expr (comp_op expr)* + self.DefaultNodeVisit( node ) + if len( node.children ) == 3 and _StronglyConnectedCompOp( node ): + _IncreasePenalty( node.children[ 1 ], VERY_STRONGLY_CONNECTED ) + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ 2 ] ), STRONGLY_CONNECTED ) else: - if len(node.children) > 2: - _SetSplitPenalty(pytree_utils.FirstLeafNode(node.children[1]), EXPR) - _SetSplitPenalty(node.children[-1], ATOM) - elif node.children[0].value in '[{' and len(node.children) == 2: - # Keep empty containers together if we can. - _SetUnbreakable(node.children[-1]) - - def Visit_testlist_gexp(self, node): # pylint: disable=invalid-name - self.DefaultNodeVisit(node) - prev_was_comma = False - for child in node.children: - if isinstance(child, pytree.Leaf) and child.value == ',': - _SetUnbreakable(child) - prev_was_comma = True - else: - if prev_was_comma: - _SetSplitPenalty(pytree_utils.FirstLeafNode(child), TOGETHER) + _IncreasePenalty( node, COMPARISON ) + + def Visit_star_expr( self, node ): # pylint: disable=invalid-name + # star_expr ::= '*' expr + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, STAR_EXPR ) + + def Visit_expr( self, node ): # pylint: disable=invalid-name + # expr ::= xor_expr ('|' xor_expr)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, EXPR ) + _SetBitwiseOperandPenalty( node, '|' ) + + def Visit_xor_expr( self, node ): # pylint: disable=invalid-name + # xor_expr ::= and_expr ('^' and_expr)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, XOR_EXPR ) + _SetBitwiseOperandPenalty( node, '^' ) + + def Visit_and_expr( self, node ): # pylint: disable=invalid-name + # and_expr ::= shift_expr ('&' shift_expr)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, AND_EXPR ) + _SetBitwiseOperandPenalty( node, '&' ) + + def Visit_shift_expr( self, node ): # pylint: disable=invalid-name + # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, SHIFT_EXPR ) + + _ARITH_OPS = frozenset( { 'PLUS', 'MINUS' } ) + + def Visit_arith_expr( self, node ): # pylint: disable=invalid-name + # arith_expr ::= term (('+'|'-') term)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, ARITH_EXPR ) + _SetExpressionOperandPenalty( node, self._ARITH_OPS ) + + _TERM_OPS = frozenset( { 'STAR', 'AT', 'SLASH', 'PERCENT', 'DOUBLESLASH' } ) + + def Visit_term( self, node ): # pylint: disable=invalid-name + # term ::= factor (('*'|'@'|'/'|'%'|'//') factor)* + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, TERM ) + _SetExpressionOperandPenalty( node, self._TERM_OPS ) + + def Visit_factor( self, node ): # pyline: disable=invalid-name + # factor ::= ('+'|'-'|'~') factor | power + self.DefaultNodeVisit( node ) + _IncreasePenalty( node, FACTOR ) + + def Visit_atom( self, node ): # pylint: disable=invalid-name + # atom ::= ('(' [yield_expr|testlist_gexp] ')' + # '[' [listmaker] ']' | + # '{' [dictsetmaker] '}') + self.DefaultNodeVisit( node ) + if ( node.children[ 0 ].value == '(' and + not hasattr( node.children[ 0 ], 'is_pseudo' ) ): + if node.children[ -1 ].value == ')': + if pytree_utils.NodeName( node.parent ) == 'if_stmt': + _SetSplitPenalty( node.children[ -1 ], STRONGLY_CONNECTED ) + else: + if len( node.children ) > 2: + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ 1 ] ), EXPR ) + _SetSplitPenalty( node.children[ -1 ], ATOM ) + elif node.children[ 0 ].value in '[{' and len( node.children ) == 2: + # Keep empty containers together if we can. + _SetUnbreakable( node.children[ -1 ] ) + + def Visit_testlist_gexp( self, node ): # pylint: disable=invalid-name + self.DefaultNodeVisit( node ) prev_was_comma = False + for child in node.children: + if isinstance( child, pytree.Leaf ) and child.value == ',': + _SetUnbreakable( child ) + prev_was_comma = True + else: + if prev_was_comma: + _SetSplitPenalty( pytree_utils.FirstLeafNode( child ), TOGETHER ) + prev_was_comma = False -def _SetUnbreakable(node): - """Set an UNBREAKABLE penalty annotation for the given node.""" - _RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE) - - -def _SetStronglyConnected(*nodes): - """Set a STRONGLY_CONNECTED penalty annotation for the given nodes.""" - for node in nodes: - _RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, - STRONGLY_CONNECTED) +def _SetUnbreakable( node ): + """Set an UNBREAKABLE penalty annotation for the given node.""" + _RecAnnotate( node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE ) -def _SetExpressionPenalty(node, penalty): - """Set a penalty annotation on children nodes.""" +def _SetStronglyConnected( *nodes ): + """Set a STRONGLY_CONNECTED penalty annotation for the given nodes.""" + for node in nodes: + _RecAnnotate( node, pytree_utils.Annotation.SPLIT_PENALTY, STRONGLY_CONNECTED ) - def RecExpression(node, first_child_leaf): - if node is first_child_leaf: - return - if isinstance(node, pytree.Leaf): - if node.value in {'(', 'for', 'if'}: - return - penalty_annotation = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, default=0) - if penalty_annotation < penalty: - _SetSplitPenalty(node, penalty) - else: - for child in node.children: - RecExpression(child, first_child_leaf) +def _SetExpressionPenalty( node, penalty ): + """Set a penalty annotation on children nodes.""" - RecExpression(node, pytree_utils.FirstLeafNode(node)) + def RecExpression( node, first_child_leaf ): + if node is first_child_leaf: + return + if isinstance( node, pytree.Leaf ): + if node.value in { '(', 'for', 'if' }: + return + penalty_annotation = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, default = 0 ) + if penalty_annotation < penalty: + _SetSplitPenalty( node, penalty ) + else: + for child in node.children: + RecExpression( child, first_child_leaf ) + + RecExpression( node, pytree_utils.FirstLeafNode( node ) ) + + +def _SetBitwiseOperandPenalty( node, op ): + for index in py3compat.range( 1, len( node.children ) - 1 ): + child = node.children[ index ] + if isinstance( child, pytree.Leaf ) and child.value == op: + if style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ): + _SetSplitPenalty( child, style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) ) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), + style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) ) + + +def _SetExpressionOperandPenalty( node, ops ): + for index in py3compat.range( 1, len( node.children ) - 1 ): + child = node.children[ index ] + if pytree_utils.NodeName( child ) in ops: + if style.Get( 'SPLIT_BEFORE_ARITHMETIC_OPERATOR' ): + _SetSplitPenalty( + child, style.Get( 'SPLIT_PENALTY_ARITHMETIC_OPERATOR' ) ) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), + style.Get( 'SPLIT_PENALTY_ARITHMETIC_OPERATOR' ) ) + + +def _IncreasePenalty( node, amt ): + """Increase a penalty annotation on children nodes.""" + + def RecExpression( node, first_child_leaf ): + if node is first_child_leaf: + return + + if isinstance( node, pytree.Leaf ): + if node.value in { '(', 'for' }: + return + penalty = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, default = 0 ) + _SetSplitPenalty( node, penalty + amt ) + else: + for child in node.children: + RecExpression( child, first_child_leaf ) -def _SetBitwiseOperandPenalty(node, op): - for index in py3compat.range(1, len(node.children) - 1): - child = node.children[index] - if isinstance(child, pytree.Leaf) and child.value == op: - if style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'): - _SetSplitPenalty(child, style.Get('SPLIT_PENALTY_BITWISE_OPERATOR')) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index + 1]), - style.Get('SPLIT_PENALTY_BITWISE_OPERATOR')) - - -def _SetExpressionOperandPenalty(node, ops): - for index in py3compat.range(1, len(node.children) - 1): - child = node.children[index] - if pytree_utils.NodeName(child) in ops: - if style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR'): - _SetSplitPenalty(child, style.Get('SPLIT_PENALTY_ARITHMETIC_OPERATOR')) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode(node.children[index + 1]), - style.Get('SPLIT_PENALTY_ARITHMETIC_OPERATOR')) - - -def _IncreasePenalty(node, amt): - """Increase a penalty annotation on children nodes.""" - - def RecExpression(node, first_child_leaf): - if node is first_child_leaf: - return - - if isinstance(node, pytree.Leaf): - if node.value in {'(', 'for'}: - return - penalty = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, default=0) - _SetSplitPenalty(node, penalty + amt) - else: - for child in node.children: - RecExpression(child, first_child_leaf) - - RecExpression(node, pytree_utils.FirstLeafNode(node)) + RecExpression( node, pytree_utils.FirstLeafNode( node ) ) -def _RecAnnotate(tree, annotate_name, annotate_value): - """Recursively set the given annotation on all leafs of the subtree. +def _RecAnnotate( tree, annotate_name, annotate_value ): + """Recursively set the given annotation on all leafs of the subtree. Takes care to only increase the penalty. If the node already has a higher or equal penalty associated with it, this is a no-op. @@ -594,40 +603,40 @@ def _RecAnnotate(tree, annotate_name, annotate_value): annotate_name: name of the annotation to set annotate_value: value of the annotation to set """ - for child in tree.children: - _RecAnnotate(child, annotate_name, annotate_value) - if isinstance(tree, pytree.Leaf): - cur_annotate = pytree_utils.GetNodeAnnotation( - tree, annotate_name, default=0) - if cur_annotate < annotate_value: - pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value) - - -_COMP_OPS = frozenset({'==', '!=', '<=', '<', '>', '>=', '<>', 'in', 'is'}) - - -def _StronglyConnectedCompOp(op): - if (len(op.children[1].children) == 2 and - pytree_utils.NodeName(op.children[1]) == 'comp_op'): - if (pytree_utils.FirstLeafNode(op.children[1]).value == 'not' and - pytree_utils.LastLeafNode(op.children[1]).value == 'in'): - return True - if (pytree_utils.FirstLeafNode(op.children[1]).value == 'is' and - pytree_utils.LastLeafNode(op.children[1]).value == 'not'): - return True - if (isinstance(op.children[1], pytree.Leaf) and - op.children[1].value in _COMP_OPS): - return True - return False - - -def _DecrementSplitPenalty(node, amt): - penalty = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, default=amt) - penalty = penalty - amt if amt < penalty else 0 - _SetSplitPenalty(node, penalty) - - -def _SetSplitPenalty(node, penalty): - pytree_utils.SetNodeAnnotation(node, pytree_utils.Annotation.SPLIT_PENALTY, - penalty) + for child in tree.children: + _RecAnnotate( child, annotate_name, annotate_value ) + if isinstance( tree, pytree.Leaf ): + cur_annotate = pytree_utils.GetNodeAnnotation( + tree, annotate_name, default = 0 ) + if cur_annotate < annotate_value: + pytree_utils.SetNodeAnnotation( tree, annotate_name, annotate_value ) + + +_COMP_OPS = frozenset( { '==', '!=', '<=', '<', '>', '>=', '<>', 'in', 'is' } ) + + +def _StronglyConnectedCompOp( op ): + if ( len( op.children[ 1 ].children ) == 2 and + pytree_utils.NodeName( op.children[ 1 ] ) == 'comp_op' ): + if ( pytree_utils.FirstLeafNode( op.children[ 1 ] ).value == 'not' and + pytree_utils.LastLeafNode( op.children[ 1 ] ).value == 'in' ): + return True + if ( pytree_utils.FirstLeafNode( op.children[ 1 ] ).value == 'is' and + pytree_utils.LastLeafNode( op.children[ 1 ] ).value == 'not' ): + return True + if ( isinstance( op.children[ 1 ], pytree.Leaf ) and + op.children[ 1 ].value in _COMP_OPS ): + return True + return False + + +def _DecrementSplitPenalty( node, amt ): + penalty = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, default = amt ) + penalty = penalty - amt if amt < penalty else 0 + _SetSplitPenalty( node, penalty ) + + +def _SetSplitPenalty( node, penalty ): + pytree_utils.SetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, penalty ) diff --git a/yapf/pytree/subtype_assigner.py b/yapf/pytree/subtype_assigner.py index 0ee247a82..19c65b323 100644 --- a/yapf/pytree/subtype_assigner.py +++ b/yapf/pytree/subtype_assigner.py @@ -34,14 +34,14 @@ from yapf.yapflib import subtypes -def AssignSubtypes(tree): - """Run the subtype assigner visitor over the tree, modifying it in place. +def AssignSubtypes( tree ): + """Run the subtype assigner visitor over the tree, modifying it in place. Arguments: tree: the top-level pytree node to annotate with subtypes. """ - subtype_assigner = _SubtypeAssigner() - subtype_assigner.Visit(tree) + subtype_assigner = _SubtypeAssigner() + subtype_assigner.Visit( tree ) # Map tokens in argument lists to their respective subtype. @@ -53,447 +53,448 @@ def AssignSubtypes(tree): } -class _SubtypeAssigner(pytree_visitor.PyTreeVisitor): - """_SubtypeAssigner - see file-level docstring for detailed description. +class _SubtypeAssigner( pytree_visitor.PyTreeVisitor ): + """_SubtypeAssigner - see file-level docstring for detailed description. The subtype is added as an annotation to the pytree token. """ - def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name - # dictsetmaker ::= (test ':' test (comp_for | - # (',' test ':' test)* [','])) | - # (test (comp_for | (',' test)* [','])) - for child in node.children: - self.Visit(child) - - comp_for = False - dict_maker = False - - for child in node.children: - if pytree_utils.NodeName(child) == 'comp_for': - comp_for = True - _AppendFirstLeafTokenSubtype(child, subtypes.DICT_SET_GENERATOR) - elif child.type in (grammar_token.COLON, grammar_token.DOUBLESTAR): - dict_maker = True - - if not comp_for and dict_maker: - last_was_colon = False - unpacking = False - for child in node.children: - if child.type == grammar_token.DOUBLESTAR: - _AppendFirstLeafTokenSubtype(child, subtypes.KWARGS_STAR_STAR) - if last_was_colon: - if style.Get('INDENT_DICTIONARY_VALUE'): - _InsertPseudoParentheses(child) - else: - _AppendFirstLeafTokenSubtype(child, subtypes.DICTIONARY_VALUE) - elif (isinstance(child, pytree.Node) or - (not child.value.startswith('#') and child.value not in '{:,')): - # Mark the first leaf of a key entry as a DICTIONARY_KEY. We - # normally want to split before them if the dictionary cannot exist - # on a single line. - if not unpacking or pytree_utils.FirstLeafNode(child).value == '**': - _AppendFirstLeafTokenSubtype(child, subtypes.DICTIONARY_KEY) - _AppendSubtypeRec(child, subtypes.DICTIONARY_KEY_PART) - last_was_colon = child.type == grammar_token.COLON - if child.type == grammar_token.DOUBLESTAR: - unpacking = True - elif last_was_colon: - unpacking = False - - def Visit_expr_stmt(self, node): # pylint: disable=invalid-name - # expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist) - # | ('=' (yield_expr|testlist_star_expr))*) - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '=': - _AppendTokenSubtype(child, subtypes.ASSIGN_OPERATOR) - - def Visit_or_test(self, node): # pylint: disable=invalid-name - # or_test ::= and_test ('or' and_test)* - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == 'or': - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - def Visit_and_test(self, node): # pylint: disable=invalid-name - # and_test ::= not_test ('and' not_test)* - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == 'and': - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - def Visit_not_test(self, node): # pylint: disable=invalid-name - # not_test ::= 'not' not_test | comparison - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == 'not': - _AppendTokenSubtype(child, subtypes.UNARY_OPERATOR) - - def Visit_comparison(self, node): # pylint: disable=invalid-name - # comparison ::= expr (comp_op expr)* - # comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not' - for child in node.children: - self.Visit(child) - if (isinstance(child, pytree.Leaf) and - child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}): - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - elif pytree_utils.NodeName(child) == 'comp_op': - for grandchild in child.children: - _AppendTokenSubtype(grandchild, subtypes.BINARY_OPERATOR) - - def Visit_star_expr(self, node): # pylint: disable=invalid-name - # star_expr ::= '*' expr - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '*': - _AppendTokenSubtype(child, subtypes.UNARY_OPERATOR) - _AppendTokenSubtype(child, subtypes.VARARGS_STAR) - - def Visit_expr(self, node): # pylint: disable=invalid-name - # expr ::= xor_expr ('|' xor_expr)* - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '|': - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - def Visit_xor_expr(self, node): # pylint: disable=invalid-name - # xor_expr ::= and_expr ('^' and_expr)* - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '^': - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - def Visit_and_expr(self, node): # pylint: disable=invalid-name - # and_expr ::= shift_expr ('&' shift_expr)* - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '&': - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - def Visit_shift_expr(self, node): # pylint: disable=invalid-name - # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}: - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - def Visit_arith_expr(self, node): # pylint: disable=invalid-name - # arith_expr ::= term (('+'|'-') term)* - for child in node.children: - self.Visit(child) - if _IsAExprOperator(child): - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - if _IsSimpleExpression(node): - for child in node.children: - if _IsAExprOperator(child): - _AppendTokenSubtype(child, subtypes.SIMPLE_EXPRESSION) - - def Visit_term(self, node): # pylint: disable=invalid-name - # term ::= factor (('*'|'/'|'%'|'//'|'@') factor)* - for child in node.children: - self.Visit(child) - if _IsMExprOperator(child): - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) - - if _IsSimpleExpression(node): - for child in node.children: - if _IsMExprOperator(child): - _AppendTokenSubtype(child, subtypes.SIMPLE_EXPRESSION) - - def Visit_factor(self, node): # pylint: disable=invalid-name - # factor ::= ('+'|'-'|'~') factor | power - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value in '+-~': - _AppendTokenSubtype(child, subtypes.UNARY_OPERATOR) - - def Visit_power(self, node): # pylint: disable=invalid-name - # power ::= atom trailer* ['**' factor] - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '**': - _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + def Visit_dictsetmaker( self, node ): # pylint: disable=invalid-name + # dictsetmaker ::= (test ':' test (comp_for | + # (',' test ':' test)* [','])) | + # (test (comp_for | (',' test)* [','])) + for child in node.children: + self.Visit( child ) + + comp_for = False + dict_maker = False + + for child in node.children: + if pytree_utils.NodeName( child ) == 'comp_for': + comp_for = True + _AppendFirstLeafTokenSubtype( child, subtypes.DICT_SET_GENERATOR ) + elif child.type in ( grammar_token.COLON, grammar_token.DOUBLESTAR ): + dict_maker = True + + if not comp_for and dict_maker: + last_was_colon = False + unpacking = False + for child in node.children: + if child.type == grammar_token.DOUBLESTAR: + _AppendFirstLeafTokenSubtype( child, subtypes.KWARGS_STAR_STAR ) + if last_was_colon: + if style.Get( 'INDENT_DICTIONARY_VALUE' ): + _InsertPseudoParentheses( child ) + else: + _AppendFirstLeafTokenSubtype( child, subtypes.DICTIONARY_VALUE ) + elif ( isinstance( child, pytree.Node ) or + ( not child.value.startswith( '#' ) and + child.value not in '{:,' ) ): + # Mark the first leaf of a key entry as a DICTIONARY_KEY. We + # normally want to split before them if the dictionary cannot exist + # on a single line. + if not unpacking or pytree_utils.FirstLeafNode( + child ).value == '**': + _AppendFirstLeafTokenSubtype( child, subtypes.DICTIONARY_KEY ) + _AppendSubtypeRec( child, subtypes.DICTIONARY_KEY_PART ) + last_was_colon = child.type == grammar_token.COLON + if child.type == grammar_token.DOUBLESTAR: + unpacking = True + elif last_was_colon: + unpacking = False + + def Visit_expr_stmt( self, node ): # pylint: disable=invalid-name + # expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist) + # | ('=' (yield_expr|testlist_star_expr))*) + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '=': + _AppendTokenSubtype( child, subtypes.ASSIGN_OPERATOR ) + + def Visit_or_test( self, node ): # pylint: disable=invalid-name + # or_test ::= and_test ('or' and_test)* + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == 'or': + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_and_test( self, node ): # pylint: disable=invalid-name + # and_test ::= not_test ('and' not_test)* + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == 'and': + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_not_test( self, node ): # pylint: disable=invalid-name + # not_test ::= 'not' not_test | comparison + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == 'not': + _AppendTokenSubtype( child, subtypes.UNARY_OPERATOR ) + + def Visit_comparison( self, node ): # pylint: disable=invalid-name + # comparison ::= expr (comp_op expr)* + # comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not' + for child in node.children: + self.Visit( child ) + if ( isinstance( child, pytree.Leaf ) and child.value + in { '<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is' } ): + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + elif pytree_utils.NodeName( child ) == 'comp_op': + for grandchild in child.children: + _AppendTokenSubtype( grandchild, subtypes.BINARY_OPERATOR ) + + def Visit_star_expr( self, node ): # pylint: disable=invalid-name + # star_expr ::= '*' expr + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '*': + _AppendTokenSubtype( child, subtypes.UNARY_OPERATOR ) + _AppendTokenSubtype( child, subtypes.VARARGS_STAR ) + + def Visit_expr( self, node ): # pylint: disable=invalid-name + # expr ::= xor_expr ('|' xor_expr)* + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '|': + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_xor_expr( self, node ): # pylint: disable=invalid-name + # xor_expr ::= and_expr ('^' and_expr)* + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '^': + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_and_expr( self, node ): # pylint: disable=invalid-name + # and_expr ::= shift_expr ('&' shift_expr)* + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '&': + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_shift_expr( self, node ): # pylint: disable=invalid-name + # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value in { '<<', '>>' }: + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_arith_expr( self, node ): # pylint: disable=invalid-name + # arith_expr ::= term (('+'|'-') term)* + for child in node.children: + self.Visit( child ) + if _IsAExprOperator( child ): + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + if _IsSimpleExpression( node ): + for child in node.children: + if _IsAExprOperator( child ): + _AppendTokenSubtype( child, subtypes.SIMPLE_EXPRESSION ) + + def Visit_term( self, node ): # pylint: disable=invalid-name + # term ::= factor (('*'|'/'|'%'|'//'|'@') factor)* + for child in node.children: + self.Visit( child ) + if _IsMExprOperator( child ): + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + if _IsSimpleExpression( node ): + for child in node.children: + if _IsMExprOperator( child ): + _AppendTokenSubtype( child, subtypes.SIMPLE_EXPRESSION ) + + def Visit_factor( self, node ): # pylint: disable=invalid-name + # factor ::= ('+'|'-'|'~') factor | power + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value in '+-~': + _AppendTokenSubtype( child, subtypes.UNARY_OPERATOR ) + + def Visit_power( self, node ): # pylint: disable=invalid-name + # power ::= atom trailer* ['**' factor] + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '**': + _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) + + def Visit_trailer( self, node ): # pylint: disable=invalid-name + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value in '[]': + _AppendTokenSubtype( child, subtypes.SUBSCRIPT_BRACKET ) + + def Visit_subscript( self, node ): # pylint: disable=invalid-name + # subscript ::= test | [test] ':' [test] [sliceop] + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == ':': + _AppendTokenSubtype( child, subtypes.SUBSCRIPT_COLON ) + + def Visit_sliceop( self, node ): # pylint: disable=invalid-name + # sliceop ::= ':' [test] + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == ':': + _AppendTokenSubtype( child, subtypes.SUBSCRIPT_COLON ) + + def Visit_argument( self, node ): # pylint: disable=invalid-name + # argument ::= + # test [comp_for] | test '=' test + self._ProcessArgLists( node ) + #TODO add a subtype to each argument? + + def Visit_arglist( self, node ): # pylint: disable=invalid-name + # arglist ::= + # (argument ',')* (argument [','] + # | '*' test (',' argument)* [',' '**' test] + # | '**' test) + self._ProcessArgLists( node ) + _SetArgListSubtype( + node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST ) + + def Visit_tname( self, node ): # pylint: disable=invalid-name + self._ProcessArgLists( node ) + _SetArgListSubtype( + node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST ) + + def Visit_decorator( self, node ): # pylint: disable=invalid-name + # decorator ::= + # '@' dotted_name [ '(' [arglist] ')' ] NEWLINE + for child in node.children: + if isinstance( child, pytree.Leaf ) and child.value == '@': + _AppendTokenSubtype( child, subtype = subtypes.DECORATOR ) + self.Visit( child ) + + def Visit_funcdef( self, node ): # pylint: disable=invalid-name + # funcdef ::= + # 'def' NAME parameters ['->' test] ':' suite + for child in node.children: + if child.type == grammar_token.NAME and child.value != 'def': + _AppendTokenSubtype( child, subtypes.FUNC_DEF ) + break + for child in node.children: + self.Visit( child ) + + def Visit_parameters( self, node ): # pylint: disable=invalid-name + # parameters ::= '(' [typedargslist] ')' + self._ProcessArgLists( node ) + if len( node.children ) > 2: + _AppendFirstLeafTokenSubtype( node.children[ 1 ], subtypes.PARAMETER_START ) + _AppendLastLeafTokenSubtype( node.children[ -2 ], subtypes.PARAMETER_STOP ) + + def Visit_typedargslist( self, node ): # pylint: disable=invalid-name + # typedargslist ::= + # ((tfpdef ['=' test] ',')* + # ('*' [tname] (',' tname ['=' test])* [',' '**' tname] + # | '**' tname) + # | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + self._ProcessArgLists( node ) + _SetArgListSubtype( + node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST ) + tname = False + if not node.children: + return + + _AppendFirstLeafTokenSubtype( node.children[ 0 ], subtypes.PARAMETER_START ) + _AppendLastLeafTokenSubtype( node.children[ -1 ], subtypes.PARAMETER_STOP ) + + tname = pytree_utils.NodeName( node.children[ 0 ] ) == 'tname' + for i in range( 1, len( node.children ) ): + prev_child = node.children[ i - 1 ] + child = node.children[ i ] + if prev_child.type == grammar_token.COMMA: + _AppendFirstLeafTokenSubtype( child, subtypes.PARAMETER_START ) + elif child.type == grammar_token.COMMA: + _AppendLastLeafTokenSubtype( prev_child, subtypes.PARAMETER_STOP ) + + if pytree_utils.NodeName( child ) == 'tname': + tname = True + _SetArgListSubtype( + child, subtypes.TYPED_NAME, subtypes.TYPED_NAME_ARG_LIST ) + elif child.type == grammar_token.COMMA: + tname = False + elif child.type == grammar_token.EQUAL and tname: + _AppendTokenSubtype( child, subtype = subtypes.TYPED_NAME ) + tname = False + + def Visit_varargslist( self, node ): # pylint: disable=invalid-name + # varargslist ::= + # ((vfpdef ['=' test] ',')* + # ('*' [vname] (',' vname ['=' test])* [',' '**' vname] + # | '**' vname) + # | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + self._ProcessArgLists( node ) + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ) and child.value == '=': + _AppendTokenSubtype( child, subtypes.VARARGS_LIST ) + + def Visit_comp_for( self, node ): # pylint: disable=invalid-name + # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] + _AppendSubtypeRec( node, subtypes.COMP_FOR ) + # Mark the previous node as COMP_EXPR unless this is a nested comprehension + # as these will have the outer comprehension as their previous node. + attr = pytree_utils.GetNodeAnnotation( + node.parent, pytree_utils.Annotation.SUBTYPE ) + if not attr or subtypes.COMP_FOR not in attr: + _AppendSubtypeRec( node.parent.children[ 0 ], subtypes.COMP_EXPR ) + self.DefaultNodeVisit( node ) + + def Visit_old_comp_for( self, node ): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_for( node ) + + def Visit_comp_if( self, node ): # pylint: disable=invalid-name + # comp_if ::= 'if' old_test [comp_iter] + _AppendSubtypeRec( node, subtypes.COMP_IF ) + self.DefaultNodeVisit( node ) + + def Visit_old_comp_if( self, node ): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_if( node ) + + def _ProcessArgLists( self, node ): + """Common method for processing argument lists.""" + for child in node.children: + self.Visit( child ) + if isinstance( child, pytree.Leaf ): + _AppendTokenSubtype( + child, + subtype = _ARGLIST_TOKEN_TO_SUBTYPE.get( + child.value, subtypes.NONE ) ) + + +def _SetArgListSubtype( node, node_subtype, list_subtype ): + """Set named assign subtype on elements in a arg list.""" + + def HasSubtype( node ): + """Return True if the arg list has a named assign subtype.""" + if isinstance( node, pytree.Leaf ): + return node_subtype in pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SUBTYPE, set() ) + + for child in node.children: + node_name = pytree_utils.NodeName( child ) + if node_name not in { 'atom', 'arglist', 'power' }: + if HasSubtype( child ): + return True + + return False + + if not HasSubtype( node ): + return - def Visit_trailer(self, node): # pylint: disable=invalid-name for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value in '[]': - _AppendTokenSubtype(child, subtypes.SUBSCRIPT_BRACKET) + node_name = pytree_utils.NodeName( child ) + if node_name not in { 'atom', 'COMMA' }: + _AppendFirstLeafTokenSubtype( child, list_subtype ) - def Visit_subscript(self, node): # pylint: disable=invalid-name - # subscript ::= test | [test] ':' [test] [sliceop] - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == ':': - _AppendTokenSubtype(child, subtypes.SUBSCRIPT_COLON) - def Visit_sliceop(self, node): # pylint: disable=invalid-name - # sliceop ::= ':' [test] - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == ':': - _AppendTokenSubtype(child, subtypes.SUBSCRIPT_COLON) - - def Visit_argument(self, node): # pylint: disable=invalid-name - # argument ::= - # test [comp_for] | test '=' test - self._ProcessArgLists(node) - #TODO add a subtype to each argument? - - def Visit_arglist(self, node): # pylint: disable=invalid-name - # arglist ::= - # (argument ',')* (argument [','] - # | '*' test (',' argument)* [',' '**' test] - # | '**' test) - self._ProcessArgLists(node) - _SetArgListSubtype(node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) - - def Visit_tname(self, node): # pylint: disable=invalid-name - self._ProcessArgLists(node) - _SetArgListSubtype(node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) - - def Visit_decorator(self, node): # pylint: disable=invalid-name - # decorator ::= - # '@' dotted_name [ '(' [arglist] ')' ] NEWLINE - for child in node.children: - if isinstance(child, pytree.Leaf) and child.value == '@': - _AppendTokenSubtype(child, subtype=subtypes.DECORATOR) - self.Visit(child) +def _AppendTokenSubtype( node, subtype ): + """Append the token's subtype only if it's not already set.""" + pytree_utils.AppendNodeAnnotation( node, pytree_utils.Annotation.SUBTYPE, subtype ) - def Visit_funcdef(self, node): # pylint: disable=invalid-name - # funcdef ::= - # 'def' NAME parameters ['->' test] ':' suite - for child in node.children: - if child.type == grammar_token.NAME and child.value != 'def': - _AppendTokenSubtype(child, subtypes.FUNC_DEF) - break - for child in node.children: - self.Visit(child) - - def Visit_parameters(self, node): # pylint: disable=invalid-name - # parameters ::= '(' [typedargslist] ')' - self._ProcessArgLists(node) - if len(node.children) > 2: - _AppendFirstLeafTokenSubtype(node.children[1], subtypes.PARAMETER_START) - _AppendLastLeafTokenSubtype(node.children[-2], subtypes.PARAMETER_STOP) - - def Visit_typedargslist(self, node): # pylint: disable=invalid-name - # typedargslist ::= - # ((tfpdef ['=' test] ',')* - # ('*' [tname] (',' tname ['=' test])* [',' '**' tname] - # | '**' tname) - # | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) - self._ProcessArgLists(node) - _SetArgListSubtype(node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) - tname = False - if not node.children: - return - - _AppendFirstLeafTokenSubtype(node.children[0], subtypes.PARAMETER_START) - _AppendLastLeafTokenSubtype(node.children[-1], subtypes.PARAMETER_STOP) - - tname = pytree_utils.NodeName(node.children[0]) == 'tname' - for i in range(1, len(node.children)): - prev_child = node.children[i - 1] - child = node.children[i] - if prev_child.type == grammar_token.COMMA: - _AppendFirstLeafTokenSubtype(child, subtypes.PARAMETER_START) - elif child.type == grammar_token.COMMA: - _AppendLastLeafTokenSubtype(prev_child, subtypes.PARAMETER_STOP) - - if pytree_utils.NodeName(child) == 'tname': - tname = True - _SetArgListSubtype(child, subtypes.TYPED_NAME, - subtypes.TYPED_NAME_ARG_LIST) - # NOTE Every element of the tynamme argument - # should have this list type - _AppendSubtypeRec(child, subtypes.TYPED_NAME_ARG_LIST) - - elif child.type == grammar_token.COMMA: - tname = False - elif child.type == grammar_token.EQUAL and tname: - _AppendTokenSubtype(child, subtype=subtypes.TYPED_NAME) - tname = False - def Visit_varargslist(self, node): # pylint: disable=invalid-name - # varargslist ::= - # ((vfpdef ['=' test] ',')* - # ('*' [vname] (',' vname ['=' test])* [',' '**' vname] - # | '**' vname) - # | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) - self._ProcessArgLists(node) - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf) and child.value == '=': - _AppendTokenSubtype(child, subtypes.VARARGS_LIST) - - def Visit_comp_for(self, node): # pylint: disable=invalid-name - # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] - _AppendSubtypeRec(node, subtypes.COMP_FOR) - # Mark the previous node as COMP_EXPR unless this is a nested comprehension - # as these will have the outer comprehension as their previous node. - attr = pytree_utils.GetNodeAnnotation(node.parent, - pytree_utils.Annotation.SUBTYPE) - if not attr or subtypes.COMP_FOR not in attr: - _AppendSubtypeRec(node.parent.children[0], subtypes.COMP_EXPR) - self.DefaultNodeVisit(node) - - def Visit_old_comp_for(self, node): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_for(node) - - def Visit_comp_if(self, node): # pylint: disable=invalid-name - # comp_if ::= 'if' old_test [comp_iter] - _AppendSubtypeRec(node, subtypes.COMP_IF) - self.DefaultNodeVisit(node) - - def Visit_old_comp_if(self, node): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_if(node) - - def _ProcessArgLists(self, node): - """Common method for processing argument lists.""" - for child in node.children: - self.Visit(child) - if isinstance(child, pytree.Leaf): - _AppendTokenSubtype( - child, - subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value, subtypes.NONE)) +def _AppendFirstLeafTokenSubtype( node, subtype ): + """Append the first leaf token's subtypes.""" + if isinstance( node, pytree.Leaf ): + _AppendTokenSubtype( node, subtype ) + return + _AppendFirstLeafTokenSubtype( node.children[ 0 ], subtype ) -def _SetArgListSubtype(node, node_subtype, list_subtype): - """Set named assign subtype on elements in a arg list.""" +def _AppendLastLeafTokenSubtype( node, subtype ): + """Append the last leaf token's subtypes.""" + if isinstance( node, pytree.Leaf ): + _AppendTokenSubtype( node, subtype ) + return + _AppendLastLeafTokenSubtype( node.children[ -1 ], subtype ) - def HasSubtype(node): - """Return True if the arg list has a named assign subtype.""" - if isinstance(node, pytree.Leaf): - return node_subtype in pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SUBTYPE, set()) +def _AppendSubtypeRec( node, subtype, force = True ): + """Append the leafs in the node to the given subtype.""" + if isinstance( node, pytree.Leaf ): + _AppendTokenSubtype( node, subtype ) + return for child in node.children: - node_name = pytree_utils.NodeName(child) - if node_name not in {'atom', 'arglist', 'power'}: - if HasSubtype(child): - return True - - return False - - if not HasSubtype(node): - return - - for child in node.children: - node_name = pytree_utils.NodeName(child) - if node_name not in {'atom', 'COMMA'}: - _AppendFirstLeafTokenSubtype(child, list_subtype) - - -def _AppendTokenSubtype(node, subtype): - """Append the token's subtype only if it's not already set.""" - pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE, - subtype) - - -def _AppendFirstLeafTokenSubtype(node, subtype): - """Append the first leaf token's subtypes.""" - if isinstance(node, pytree.Leaf): - _AppendTokenSubtype(node, subtype) - return - _AppendFirstLeafTokenSubtype(node.children[0], subtype) - - -def _AppendLastLeafTokenSubtype(node, subtype): - """Append the last leaf token's subtypes.""" - if isinstance(node, pytree.Leaf): - _AppendTokenSubtype(node, subtype) - return - _AppendLastLeafTokenSubtype(node.children[-1], subtype) - - -def _AppendSubtypeRec(node, subtype, force=True): - """Append the leafs in the node to the given subtype.""" - if isinstance(node, pytree.Leaf): - _AppendTokenSubtype(node, subtype) - return - for child in node.children: - _AppendSubtypeRec(child, subtype, force=force) - - -def _InsertPseudoParentheses(node): - """Insert pseudo parentheses so that dicts can be formatted correctly.""" - comment_node = None - if isinstance(node, pytree.Node): - if node.children[-1].type == grammar_token.COMMENT: - comment_node = node.children[-1].clone() - node.children[-1].remove() - - first = pytree_utils.FirstLeafNode(node) - last = pytree_utils.LastLeafNode(node) - - if first == last and first.type == grammar_token.COMMENT: - # A comment was inserted before the value, which is a pytree.Leaf. - # Encompass the dictionary's value into an ATOM node. - last = first.next_sibling - last_clone = last.clone() - new_node = pytree.Node(syms.atom, [first.clone(), last_clone]) - for orig_leaf, clone_leaf in zip(last.leaves(), last_clone.leaves()): - pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf) - if hasattr(orig_leaf, 'is_pseudo'): - clone_leaf.is_pseudo = orig_leaf.is_pseudo - - node.replace(new_node) - node = new_node - last.remove() - - first = pytree_utils.FirstLeafNode(node) - last = pytree_utils.LastLeafNode(node) - - lparen = pytree.Leaf( - grammar_token.LPAR, - u'(', - context=('', (first.get_lineno(), first.column - 1))) - last_lineno = last.get_lineno() - if last.type == grammar_token.STRING and '\n' in last.value: - last_lineno += last.value.count('\n') - - if last.type == grammar_token.STRING and '\n' in last.value: - last_column = len(last.value.split('\n')[-1]) + 1 - else: - last_column = last.column + len(last.value) + 1 - rparen = pytree.Leaf( - grammar_token.RPAR, u')', context=('', (last_lineno, last_column))) - - lparen.is_pseudo = True - rparen.is_pseudo = True - - if isinstance(node, pytree.Node): - node.insert_child(0, lparen) - node.append_child(rparen) - if comment_node: - node.append_child(comment_node) - _AppendFirstLeafTokenSubtype(node, subtypes.DICTIONARY_VALUE) - else: - clone = node.clone() - for orig_leaf, clone_leaf in zip(node.leaves(), clone.leaves()): - pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf) - new_node = pytree.Node(syms.atom, [lparen, clone, rparen]) - node.replace(new_node) - _AppendFirstLeafTokenSubtype(clone, subtypes.DICTIONARY_VALUE) - - -def _IsAExprOperator(node): - return isinstance(node, pytree.Leaf) and node.value in {'+', '-'} - - -def _IsMExprOperator(node): - return isinstance(node, - pytree.Leaf) and node.value in {'*', '/', '%', '//', '@'} - - -def _IsSimpleExpression(node): - """A node with only leafs as children.""" - return all(isinstance(child, pytree.Leaf) for child in node.children) + _AppendSubtypeRec( child, subtype, force = force ) + + +def _InsertPseudoParentheses( node ): + """Insert pseudo parentheses so that dicts can be formatted correctly.""" + comment_node = None + if isinstance( node, pytree.Node ): + if node.children[ -1 ].type == grammar_token.COMMENT: + comment_node = node.children[ -1 ].clone() + node.children[ -1 ].remove() + + first = pytree_utils.FirstLeafNode( node ) + last = pytree_utils.LastLeafNode( node ) + + if first == last and first.type == grammar_token.COMMENT: + # A comment was inserted before the value, which is a pytree.Leaf. + # Encompass the dictionary's value into an ATOM node. + last = first.next_sibling + last_clone = last.clone() + new_node = pytree.Node( syms.atom, [ first.clone(), last_clone ] ) + for orig_leaf, clone_leaf in zip( last.leaves(), last_clone.leaves() ): + pytree_utils.CopyYapfAnnotations( orig_leaf, clone_leaf ) + if hasattr( orig_leaf, 'is_pseudo' ): + clone_leaf.is_pseudo = orig_leaf.is_pseudo + + node.replace( new_node ) + node = new_node + last.remove() + + first = pytree_utils.FirstLeafNode( node ) + last = pytree_utils.LastLeafNode( node ) + + lparen = pytree.Leaf( + grammar_token.LPAR, + u'(', + context = ( '', ( first.get_lineno(), first.column - 1 ) ) ) + last_lineno = last.get_lineno() + if last.type == grammar_token.STRING and '\n' in last.value: + last_lineno += last.value.count( '\n' ) + + if last.type == grammar_token.STRING and '\n' in last.value: + last_column = len( last.value.split( '\n' )[ -1 ] ) + 1 + else: + last_column = last.column + len( last.value ) + 1 + rparen = pytree.Leaf( + grammar_token.RPAR, u')', context = ( '', ( last_lineno, last_column ) ) ) + + lparen.is_pseudo = True + rparen.is_pseudo = True + + if isinstance( node, pytree.Node ): + node.insert_child( 0, lparen ) + node.append_child( rparen ) + if comment_node: + node.append_child( comment_node ) + _AppendFirstLeafTokenSubtype( node, subtypes.DICTIONARY_VALUE ) + else: + clone = node.clone() + for orig_leaf, clone_leaf in zip( node.leaves(), clone.leaves() ): + pytree_utils.CopyYapfAnnotations( orig_leaf, clone_leaf ) + new_node = pytree.Node( syms.atom, [ lparen, clone, rparen ] ) + node.replace( new_node ) + _AppendFirstLeafTokenSubtype( clone, subtypes.DICTIONARY_VALUE ) + + +def _IsAExprOperator( node ): + return isinstance( node, pytree.Leaf ) and node.value in { '+', '-' } + + +def _IsMExprOperator( node ): + return isinstance( node, + pytree.Leaf ) and node.value in { '*', '/', '%', '//', '@' } + + +def _IsSimpleExpression( node ): + """A node with only leafs as children.""" + return all( isinstance( child, pytree.Leaf ) for child in node.children ) diff --git a/yapf/third_party/yapf_diff/yapf_diff.py b/yapf/third_party/yapf_diff/yapf_diff.py index 810a6a2d4..afd3ebc91 100644 --- a/yapf/third_party/yapf_diff/yapf_diff.py +++ b/yapf/third_party/yapf_diff/yapf_diff.py @@ -33,113 +33,114 @@ import sys if sys.version_info.major >= 3: - from io import StringIO + from io import StringIO else: - from io import BytesIO as StringIO + from io import BytesIO as StringIO def main(): - parser = argparse.ArgumentParser( - description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument( - '-i', - '--in-place', - action='store_true', - default=False, - help='apply edits to files instead of displaying a diff') - parser.add_argument( - '-p', - '--prefix', - metavar='NUM', - default=1, - help='strip the smallest prefix containing P slashes') - parser.add_argument( - '--regex', - metavar='PATTERN', - default=None, - help='custom pattern selecting file paths to reformat ' - '(case sensitive, overrides -iregex)') - parser.add_argument( - '--iregex', - metavar='PATTERN', - default=r'.*\.(py)', - help='custom pattern selecting file paths to reformat ' - '(case insensitive, overridden by -regex)') - parser.add_argument( - '-v', - '--verbose', - action='store_true', - help='be more verbose, ineffective without -i') - parser.add_argument( - '--style', - help='specify formatting style: either a style name (for ' - 'example "pep8" or "google"), or the name of a file with ' - 'style settings. The default is pep8 unless a ' - '.style.yapf or setup.cfg file located in one of the ' - 'parent directories of the source file (or current ' - 'directory for stdin)') - parser.add_argument( - '--binary', default='yapf', help='location of binary to use for yapf') - args = parser.parse_args() + parser = argparse.ArgumentParser( + description = __doc__, formatter_class = argparse.RawDescriptionHelpFormatter ) + parser.add_argument( + '-i', + '--in-place', + action = 'store_true', + default = False, + help = 'apply edits to files instead of displaying a diff' ) + parser.add_argument( + '-p', + '--prefix', + metavar = 'NUM', + default = 1, + help = 'strip the smallest prefix containing P slashes' ) + parser.add_argument( + '--regex', + metavar = 'PATTERN', + default = None, + help = 'custom pattern selecting file paths to reformat ' + '(case sensitive, overrides -iregex)' ) + parser.add_argument( + '--iregex', + metavar = 'PATTERN', + default = r'.*\.(py)', + help = 'custom pattern selecting file paths to reformat ' + '(case insensitive, overridden by -regex)' ) + parser.add_argument( + '-v', + '--verbose', + action = 'store_true', + help = 'be more verbose, ineffective without -i' ) + parser.add_argument( + '--style', + help = 'specify formatting style: either a style name (for ' + 'example "pep8" or "google"), or the name of a file with ' + 'style settings. The default is pep8 unless a ' + '.style.yapf or setup.cfg file located in one of the ' + 'parent directories of the source file (or current ' + 'directory for stdin)' ) + parser.add_argument( + '--binary', default = 'yapf', help = 'location of binary to use for yapf' ) + args = parser.parse_args() - # Extract changed lines for each file. - filename = None - lines_by_file = {} - for line in sys.stdin: - match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.prefix, line) - if match: - filename = match.group(2) - if filename is None: - continue + # Extract changed lines for each file. + filename = None + lines_by_file = {} + for line in sys.stdin: + match = re.search( r'^\+\+\+\ (.*?/){%s}(\S*)' % args.prefix, line ) + if match: + filename = match.group( 2 ) + if filename is None: + continue - if args.regex is not None: - if not re.match('^%s$' % args.regex, filename): - continue - elif not re.match('^%s$' % args.iregex, filename, re.IGNORECASE): - continue + if args.regex is not None: + if not re.match( '^%s$' % args.regex, filename ): + continue + elif not re.match( '^%s$' % args.iregex, filename, re.IGNORECASE ): + continue - match = re.search(r'^@@.*\+(\d+)(,(\d+))?', line) - if match: - start_line = int(match.group(1)) - line_count = 1 - if match.group(3): - line_count = int(match.group(3)) - if line_count == 0: - continue - end_line = start_line + line_count - 1 - lines_by_file.setdefault(filename, []).extend( - ['--lines', str(start_line) + '-' + str(end_line)]) + match = re.search( r'^@@.*\+(\d+)(,(\d+))?', line ) + if match: + start_line = int( match.group( 1 ) ) + line_count = 1 + if match.group( 3 ): + line_count = int( match.group( 3 ) ) + if line_count == 0: + continue + end_line = start_line + line_count - 1 + lines_by_file.setdefault( filename, [] ).extend( + [ '--lines', str( start_line ) + '-' + str( end_line ) ] ) - # Reformat files containing changes in place. - for filename, lines in lines_by_file.items(): - if args.in_place and args.verbose: - print('Formatting {}'.format(filename)) - command = [args.binary, filename] - if args.in_place: - command.append('-i') - command.extend(lines) - if args.style: - command.extend(['--style', args.style]) - p = subprocess.Popen( - command, - stdout=subprocess.PIPE, - stderr=None, - stdin=subprocess.PIPE, - universal_newlines=True) - stdout, stderr = p.communicate() - if p.returncode != 0: - sys.exit(p.returncode) + # Reformat files containing changes in place. + for filename, lines in lines_by_file.items(): + if args.in_place and args.verbose: + print( 'Formatting {}'.format( filename ) ) + command = [ args.binary, filename ] + if args.in_place: + command.append( '-i' ) + command.extend( lines ) + if args.style: + command.extend( [ '--style', args.style ] ) + p = subprocess.Popen( + command, + stdout = subprocess.PIPE, + stderr = None, + stdin = subprocess.PIPE, + universal_newlines = True ) + stdout, stderr = p.communicate() + if p.returncode != 0: + sys.exit( p.returncode ) - if not args.in_place: - with open(filename) as f: - code = f.readlines() - formatted_code = StringIO(stdout).readlines() - diff = difflib.unified_diff(code, formatted_code, filename, filename, - '(before formatting)', '(after formatting)') - diff_string = ''.join(diff) - if len(diff_string) > 0: - sys.stdout.write(diff_string) + if not args.in_place: + with open( filename ) as f: + code = f.readlines() + formatted_code = StringIO( stdout ).readlines() + diff = difflib.unified_diff( + code, formatted_code, filename, filename, '(before formatting)', + '(after formatting)' ) + diff_string = ''.join( diff ) + if len( diff_string ) > 0: + sys.stdout.write( diff_string ) if __name__ == '__main__': - main() + main() diff --git a/yapf/yapflib/errors.py b/yapf/yapflib/errors.py index 99e88d9c0..8864b49c6 100644 --- a/yapf/yapflib/errors.py +++ b/yapf/yapflib/errors.py @@ -16,8 +16,8 @@ from lib2to3.pgen2 import tokenize -def FormatErrorMsg(e): - """Convert an exception into a standard format. +def FormatErrorMsg( e ): + """Convert an exception into a standard format. The standard error message format is: @@ -29,18 +29,19 @@ def FormatErrorMsg(e): Returns: A properly formatted error message string. """ - if isinstance(e, SyntaxError): - return '{}:{}:{}: {}'.format(e.filename, e.lineno, e.offset, e.msg) - if isinstance(e, tokenize.TokenError): - return '{}:{}:{}: {}'.format(e.filename, e.args[1][0], e.args[1][1], - e.args[0]) - return '{}:{}:{}: {}'.format(e.args[1][0], e.args[1][1], e.args[1][2], e.msg) + if isinstance( e, SyntaxError ): + return '{}:{}:{}: {}'.format( e.filename, e.lineno, e.offset, e.msg ) + if isinstance( e, tokenize.TokenError ): + return '{}:{}:{}: {}'.format( + e.filename, e.args[ 1 ][ 0 ], e.args[ 1 ][ 1 ], e.args[ 0 ] ) + return '{}:{}:{}: {}'.format( + e.args[ 1 ][ 0 ], e.args[ 1 ][ 1 ], e.args[ 1 ][ 2 ], e.msg ) -class YapfError(Exception): - """Parent class for user errors or input errors. +class YapfError( Exception ): + """Parent class for user errors or input errors. Exceptions of this type are handled by the command line tool and result in clear error messages, as opposed to backtraces. """ - pass + pass diff --git a/yapf/yapflib/file_resources.py b/yapf/yapflib/file_resources.py index b5e2612bd..07ee951a2 100644 --- a/yapf/yapflib/file_resources.py +++ b/yapf/yapflib/file_resources.py @@ -25,49 +25,48 @@ from yapf.yapflib import py3compat from yapf.yapflib import style -CR = '\r' -LF = '\n' +CR = '\r' +LF = '\n' CRLF = '\r\n' -def _GetExcludePatternsFromYapfIgnore(filename): - """Get a list of file patterns to ignore from .yapfignore.""" - ignore_patterns = [] - if os.path.isfile(filename) and os.access(filename, os.R_OK): - with open(filename, 'r') as fd: - for line in fd: - if line.strip() and not line.startswith('#'): - ignore_patterns.append(line.strip()) +def _GetExcludePatternsFromYapfIgnore( filename ): + """Get a list of file patterns to ignore from .yapfignore.""" + ignore_patterns = [] + if os.path.isfile( filename ) and os.access( filename, os.R_OK ): + with open( filename, 'r' ) as fd: + for line in fd: + if line.strip() and not line.startswith( '#' ): + ignore_patterns.append( line.strip() ) - if any(e.startswith('./') for e in ignore_patterns): - raise errors.YapfError('path in .yapfignore should not start with ./') + if any( e.startswith( './' ) for e in ignore_patterns ): + raise errors.YapfError( 'path in .yapfignore should not start with ./' ) - return ignore_patterns + return ignore_patterns -def _GetExcludePatternsFromPyprojectToml(filename): - """Get a list of file patterns to ignore from pyproject.toml.""" - ignore_patterns = [] - try: - import toml - except ImportError: - raise errors.YapfError( - "toml package is needed for using pyproject.toml as a " - "configuration file") +def _GetExcludePatternsFromPyprojectToml( filename ): + """Get a list of file patterns to ignore from pyproject.toml.""" + ignore_patterns = [] + try: + import toml + except ImportError: + raise errors.YapfError( + "toml package is needed for using pyproject.toml as a " + "configuration file" ) - if os.path.isfile(filename) and os.access(filename, os.R_OK): - pyproject_toml = toml.load(filename) - ignore_patterns = pyproject_toml.get('tool', - {}).get('yapfignore', - {}).get('ignore_patterns', []) - if any(e.startswith('./') for e in ignore_patterns): - raise errors.YapfError('path in pyproject.toml should not start with ./') + if os.path.isfile( filename ) and os.access( filename, os.R_OK ): + pyproject_toml = toml.load( filename ) + ignore_patterns = pyproject_toml.get( 'tool', {} ).get( 'yapfignore', {} ).get( + 'ignore_patterns', [] ) + if any( e.startswith( './' ) for e in ignore_patterns ): + raise errors.YapfError( 'path in pyproject.toml should not start with ./' ) - return ignore_patterns + return ignore_patterns -def GetExcludePatternsForDir(dirname): - """Return patterns of files to exclude from ignorefile in a given directory. +def GetExcludePatternsForDir( dirname ): + """Return patterns of files to exclude from ignorefile in a given directory. Looks for .yapfignore in the directory dirname. @@ -78,20 +77,20 @@ def GetExcludePatternsForDir(dirname): A List of file patterns to exclude if ignore file is found, otherwise empty List. """ - ignore_patterns = [] + ignore_patterns = [] - yapfignore_file = os.path.join(dirname, '.yapfignore') - if os.path.exists(yapfignore_file): - ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file) + yapfignore_file = os.path.join( dirname, '.yapfignore' ) + if os.path.exists( yapfignore_file ): + ignore_patterns += _GetExcludePatternsFromYapfIgnore( yapfignore_file ) - pyproject_toml_file = os.path.join(dirname, 'pyproject.toml') - if os.path.exists(pyproject_toml_file): - ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file) - return ignore_patterns + pyproject_toml_file = os.path.join( dirname, 'pyproject.toml' ) + if os.path.exists( pyproject_toml_file ): + ignore_patterns += _GetExcludePatternsFromPyprojectToml( pyproject_toml_file ) + return ignore_patterns -def GetDefaultStyleForDir(dirname, default_style=style.DEFAULT_STYLE): - """Return default style name for a given directory. +def GetDefaultStyleForDir( dirname, default_style = style.DEFAULT_STYLE ): + """Return default style name for a given directory. Looks for .style.yapf or setup.cfg or pyproject.toml in the parent directories. @@ -104,68 +103,65 @@ def GetDefaultStyleForDir(dirname, default_style=style.DEFAULT_STYLE): Returns: The filename if found, otherwise return the default style. """ - dirname = os.path.abspath(dirname) - while True: - # See if we have a .style.yapf file. - style_file = os.path.join(dirname, style.LOCAL_STYLE) - if os.path.exists(style_file): - return style_file - - # See if we have a setup.cfg file with a '[yapf]' section. - config_file = os.path.join(dirname, style.SETUP_CONFIG) - try: - fd = open(config_file) - except IOError: - pass # It's okay if it's not there. - else: - with fd: - config = py3compat.ConfigParser() - config.read_file(fd) - if config.has_section('yapf'): - return config_file - - # See if we have a pyproject.toml file with a '[tool.yapf]' section. - config_file = os.path.join(dirname, style.PYPROJECT_TOML) - try: - fd = open(config_file) - except IOError: - pass # It's okay if it's not there. - else: - with fd: + dirname = os.path.abspath( dirname ) + while True: + # See if we have a .style.yapf file. + style_file = os.path.join( dirname, style.LOCAL_STYLE ) + if os.path.exists( style_file ): + return style_file + + # See if we have a setup.cfg file with a '[yapf]' section. + config_file = os.path.join( dirname, style.SETUP_CONFIG ) try: - import toml - except ImportError: - raise errors.YapfError( - "toml package is needed for using pyproject.toml as a " - "configuration file") + fd = open( config_file ) + except IOError: + pass # It's okay if it's not there. + else: + with fd: + config = py3compat.ConfigParser() + config.read_file( fd ) + if config.has_section( 'yapf' ): + return config_file + + # See if we have a pyproject.toml file with a '[tool.yapf]' section. + config_file = os.path.join( dirname, style.PYPROJECT_TOML ) + try: + fd = open( config_file ) + except IOError: + pass # It's okay if it's not there. + else: + with fd: + try: + import toml + except ImportError: + raise errors.YapfError( + "toml package is needed for using pyproject.toml as a " + "configuration file" ) - pyproject_toml = toml.load(config_file) - style_dict = pyproject_toml.get('tool', {}).get('yapf', None) - if style_dict is not None: - return config_file + pyproject_toml = toml.load( config_file ) + style_dict = pyproject_toml.get( 'tool', {} ).get( 'yapf', None ) + if style_dict is not None: + return config_file - if (not dirname or not os.path.basename(dirname) or - dirname == os.path.abspath(os.path.sep)): - break - dirname = os.path.dirname(dirname) + if ( not dirname or not os.path.basename( dirname ) or + dirname == os.path.abspath( os.path.sep ) ): + break + dirname = os.path.dirname( dirname ) - global_file = os.path.expanduser(style.GLOBAL_STYLE) - if os.path.exists(global_file): - return global_file + global_file = os.path.expanduser( style.GLOBAL_STYLE ) + if os.path.exists( global_file ): + return global_file - return default_style + return default_style -def GetCommandLineFiles(command_line_file_list, recursive, exclude): - """Return the list of files specified on the command line.""" - return _FindPythonFiles(command_line_file_list, recursive, exclude) +def GetCommandLineFiles( command_line_file_list, recursive, exclude ): + """Return the list of files specified on the command line.""" + return _FindPythonFiles( command_line_file_list, recursive, exclude ) -def WriteReformattedCode(filename, - reformatted_code, - encoding='', - in_place=False): - """Emit the reformatted code. +def WriteReformattedCode( filename, reformatted_code, encoding = '', in_place = False ): + """Emit the reformatted code. Write the reformatted code into the file, if in_place is True. Otherwise, write to stdout. @@ -176,117 +172,117 @@ def WriteReformattedCode(filename, encoding: (unicode) The encoding of the file. in_place: (bool) If True, then write the reformatted code to the file. """ - if in_place: - with py3compat.open_with_encoding( - filename, mode='w', encoding=encoding, newline='') as fd: - fd.write(reformatted_code) - else: - py3compat.EncodeAndWriteToStdout(reformatted_code) - - -def LineEnding(lines): - """Retrieve the line ending of the original source.""" - endings = {CRLF: 0, CR: 0, LF: 0} - for line in lines: - if line.endswith(CRLF): - endings[CRLF] += 1 - elif line.endswith(CR): - endings[CR] += 1 - elif line.endswith(LF): - endings[LF] += 1 - return (sorted(endings, key=endings.get, reverse=True) or [LF])[0] - - -def _FindPythonFiles(filenames, recursive, exclude): - """Find all Python files.""" - if exclude and any(e.startswith('./') for e in exclude): - raise errors.YapfError("path in '--exclude' should not start with ./") - exclude = exclude and [e.rstrip("/" + os.path.sep) for e in exclude] - - python_files = [] - for filename in filenames: - if filename != '.' and exclude and IsIgnored(filename, exclude): - continue - if os.path.isdir(filename): - if not recursive: - raise errors.YapfError( - "directory specified without '--recursive' flag: %s" % filename) - - # TODO(morbo): Look into a version of os.walk that can handle recursion. - excluded_dirs = [] - for dirpath, dirnames, filelist in os.walk(filename): - if dirpath != '.' and exclude and IsIgnored(dirpath, exclude): - excluded_dirs.append(dirpath) - continue - elif any(dirpath.startswith(e) for e in excluded_dirs): - continue - for f in filelist: - filepath = os.path.join(dirpath, f) - if exclude and IsIgnored(filepath, exclude): + if in_place: + with py3compat.open_with_encoding( filename, mode = 'w', encoding = encoding, + newline = '' ) as fd: + fd.write( reformatted_code ) + else: + py3compat.EncodeAndWriteToStdout( reformatted_code ) + + +def LineEnding( lines ): + """Retrieve the line ending of the original source.""" + endings = { CRLF: 0, CR: 0, LF: 0} + for line in lines: + if line.endswith( CRLF ): + endings[ CRLF ] += 1 + elif line.endswith( CR ): + endings[ CR ] += 1 + elif line.endswith( LF ): + endings[ LF ] += 1 + return ( sorted( endings, key = endings.get, reverse = True ) or [ LF ] )[ 0 ] + + +def _FindPythonFiles( filenames, recursive, exclude ): + """Find all Python files.""" + if exclude and any( e.startswith( './' ) for e in exclude ): + raise errors.YapfError( "path in '--exclude' should not start with ./" ) + exclude = exclude and [ e.rstrip( "/" + os.path.sep ) for e in exclude ] + + python_files = [] + for filename in filenames: + if filename != '.' and exclude and IsIgnored( filename, exclude ): continue - if IsPythonFile(filepath): - python_files.append(filepath) - # To prevent it from scanning the contents excluded folders, os.walk() - # lets you amend its list of child dirs `dirnames`. These edits must be - # made in-place instead of creating a modified copy of `dirnames`. - # list.remove() is slow and list.pop() is a headache. Instead clear - # `dirnames` then repopulate it. - dirnames_ = [dirnames.pop(0) for i in range(len(dirnames))] - for dirname in dirnames_: - dir_ = os.path.join(dirpath, dirname) - if IsIgnored(dir_, exclude): - excluded_dirs.append(dir_) - else: - dirnames.append(dirname) - - elif os.path.isfile(filename): - python_files.append(filename) - - return python_files - - -def IsIgnored(path, exclude): - """Return True if filename matches any patterns in exclude.""" - if exclude is None: - return False - path = path.lstrip(os.path.sep) - while path.startswith('.' + os.path.sep): - path = path[2:] - return any(fnmatch.fnmatch(path, e.rstrip(os.path.sep)) for e in exclude) - - -def IsPythonFile(filename): - """Return True if filename is a Python file.""" - if os.path.splitext(filename)[1] == '.py': - return True - - try: - with open(filename, 'rb') as fd: - encoding = py3compat.detect_encoding(fd.readline)[0] - - # Check for correctness of encoding. - with py3compat.open_with_encoding( - filename, mode='r', encoding=encoding) as fd: - fd.read() - except UnicodeDecodeError: - encoding = 'latin-1' - except (IOError, SyntaxError): - # If we fail to detect encoding (or the encoding cookie is incorrect - which - # will make detect_encoding raise SyntaxError), assume it's not a Python - # file. - return False - - try: - with py3compat.open_with_encoding( - filename, mode='r', encoding=encoding) as fd: - first_line = fd.readline(256) - except IOError: - return False - - return re.match(r'^#!.*\bpython[23]?\b', first_line) - - -def FileEncoding(filename): - """Return the file's encoding.""" - with open(filename, 'rb') as fd: - return py3compat.detect_encoding(fd.readline)[0] + if os.path.isdir( filename ): + if not recursive: + raise errors.YapfError( + "directory specified without '--recursive' flag: %s" % filename ) + + # TODO(morbo): Look into a version of os.walk that can handle recursion. + excluded_dirs = [] + for dirpath, dirnames, filelist in os.walk( filename ): + if dirpath != '.' and exclude and IsIgnored( dirpath, exclude ): + excluded_dirs.append( dirpath ) + continue + elif any( dirpath.startswith( e ) for e in excluded_dirs ): + continue + for f in filelist: + filepath = os.path.join( dirpath, f ) + if exclude and IsIgnored( filepath, exclude ): + continue + if IsPythonFile( filepath ): + python_files.append( filepath ) + # To prevent it from scanning the contents excluded folders, os.walk() + # lets you amend its list of child dirs `dirnames`. These edits must be + # made in-place instead of creating a modified copy of `dirnames`. + # list.remove() is slow and list.pop() is a headache. Instead clear + # `dirnames` then repopulate it. + dirnames_ = [ dirnames.pop( 0 ) for i in range( len( dirnames ) ) ] + for dirname in dirnames_: + dir_ = os.path.join( dirpath, dirname ) + if IsIgnored( dir_, exclude ): + excluded_dirs.append( dir_ ) + else: + dirnames.append( dirname ) + + elif os.path.isfile( filename ): + python_files.append( filename ) + + return python_files + + +def IsIgnored( path, exclude ): + """Return True if filename matches any patterns in exclude.""" + if exclude is None: + return False + path = path.lstrip( os.path.sep ) + while path.startswith( '.' + os.path.sep ): + path = path[ 2 : ] + return any( fnmatch.fnmatch( path, e.rstrip( os.path.sep ) ) for e in exclude ) + + +def IsPythonFile( filename ): + """Return True if filename is a Python file.""" + if os.path.splitext( filename )[ 1 ] == '.py': + return True + + try: + with open( filename, 'rb' ) as fd: + encoding = py3compat.detect_encoding( fd.readline )[ 0 ] + + # Check for correctness of encoding. + with py3compat.open_with_encoding( filename, mode = 'r', + encoding = encoding ) as fd: + fd.read() + except UnicodeDecodeError: + encoding = 'latin-1' + except ( IOError, SyntaxError ): + # If we fail to detect encoding (or the encoding cookie is incorrect - which + # will make detect_encoding raise SyntaxError), assume it's not a Python + # file. + return False + + try: + with py3compat.open_with_encoding( filename, mode = 'r', + encoding = encoding ) as fd: + first_line = fd.readline( 256 ) + except IOError: + return False + + return re.match( r'^#!.*\bpython[23]?\b', first_line ) + + +def FileEncoding( filename ): + """Return the file's encoding.""" + with open( filename, 'rb' ) as fd: + return py3compat.detect_encoding( fd.readline )[ 0 ] diff --git a/yapf/yapflib/format_decision_state.py b/yapf/yapflib/format_decision_state.py index efcef0ba4..bd08aa9ba 100644 --- a/yapf/yapflib/format_decision_state.py +++ b/yapf/yapflib/format_decision_state.py @@ -33,8 +33,8 @@ from yapf.yapflib import subtypes -class FormatDecisionState(object): - """The current state when indenting a logical line. +class FormatDecisionState( object ): + """The current state when indenting a logical line. The FormatDecisionState object is meant to be copied instead of referenced. @@ -56,8 +56,8 @@ class FormatDecisionState(object): column_limit: The column limit specified by the style. """ - def __init__(self, line, first_indent): - """Initializer. + def __init__( self, line, first_indent ): + """Initializer. Initializes to the state after placing the first token from 'line' at 'first_indent'. @@ -66,62 +66,64 @@ def __init__(self, line, first_indent): line: (LogicalLine) The logical line we're currently processing. first_indent: (int) The indent of the first token. """ - self.next_token = line.first - self.column = first_indent - self.line = line - self.paren_level = 0 - self.lowest_level_on_line = 0 - self.ignore_stack_for_comparison = False - self.stack = [_ParenState(first_indent, first_indent)] - self.comp_stack = [] - self.param_list_stack = [] - self.first_indent = first_indent - self.column_limit = style.Get('COLUMN_LIMIT') - - def Clone(self): - """Clones a FormatDecisionState object.""" - new = FormatDecisionState(self.line, self.first_indent) - new.next_token = self.next_token - new.column = self.column - new.line = self.line - new.paren_level = self.paren_level - new.line.depth = self.line.depth - new.lowest_level_on_line = self.lowest_level_on_line - new.ignore_stack_for_comparison = self.ignore_stack_for_comparison - new.first_indent = self.first_indent - new.stack = [state.Clone() for state in self.stack] - new.comp_stack = [state.Clone() for state in self.comp_stack] - new.param_list_stack = [state.Clone() for state in self.param_list_stack] - return new - - def __eq__(self, other): - # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', - # because it shouldn't have a bearing on this comparison. (I.e., it will - # report equal if 'next_token' does.) - return (self.next_token == other.next_token and - self.column == other.column and + self.next_token = line.first + self.column = first_indent + self.line = line + self.paren_level = 0 + self.lowest_level_on_line = 0 + self.ignore_stack_for_comparison = False + self.stack = [ _ParenState( first_indent, first_indent ) ] + self.comp_stack = [] + self.param_list_stack = [] + self.first_indent = first_indent + self.column_limit = style.Get( 'COLUMN_LIMIT' ) + + def Clone( self ): + """Clones a FormatDecisionState object.""" + new = FormatDecisionState( self.line, self.first_indent ) + new.next_token = self.next_token + new.column = self.column + new.line = self.line + new.paren_level = self.paren_level + new.line.depth = self.line.depth + new.lowest_level_on_line = self.lowest_level_on_line + new.ignore_stack_for_comparison = self.ignore_stack_for_comparison + new.first_indent = self.first_indent + new.stack = [ state.Clone() for state in self.stack ] + new.comp_stack = [ state.Clone() for state in self.comp_stack ] + new.param_list_stack = [ state.Clone() for state in self.param_list_stack ] + return new + + def __eq__( self, other ): + # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', + # because it shouldn't have a bearing on this comparison. (I.e., it will + # report equal if 'next_token' does.) + return ( + self.next_token == other.next_token and self.column == other.column and self.paren_level == other.paren_level and self.line.depth == other.line.depth and - self.lowest_level_on_line == other.lowest_level_on_line and - (self.ignore_stack_for_comparison or - other.ignore_stack_for_comparison or self.stack == other.stack and - self.comp_stack == other.comp_stack and - self.param_list_stack == other.param_list_stack)) + self.lowest_level_on_line == other.lowest_level_on_line and ( + self.ignore_stack_for_comparison or other.ignore_stack_for_comparison or + self.stack == other.stack and self.comp_stack == other.comp_stack and + self.param_list_stack == other.param_list_stack ) ) - def __ne__(self, other): - return not self == other + def __ne__( self, other ): + return not self == other - def __hash__(self): - return hash((self.next_token, self.column, self.paren_level, - self.line.depth, self.lowest_level_on_line)) + def __hash__( self ): + return hash( + ( + self.next_token, self.column, self.paren_level, self.line.depth, + self.lowest_level_on_line ) ) - def __repr__(self): - return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % - (self.column, repr(self.next_token), self.paren_level, - '\n\t'.join(repr(s) for s in self.stack) + ']')) + def __repr__( self ): + return ( + 'column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % ( + self.column, repr( self.next_token ), self.paren_level, + '\n\t'.join( repr( s ) for s in self.stack ) + ']' ) ) - def CanSplit(self, must_split): - """Determine if we can split before the next token. + def CanSplit( self, must_split ): + """Determine if we can split before the next token. Arguments: must_split: (bool) A newline was required before this token. @@ -129,436 +131,443 @@ def CanSplit(self, must_split): Returns: True if the line can be split before the next token. """ - current = self.next_token - previous = current.previous_token - - if current.is_pseudo: - return False - - if (not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and - subtypes.DICTIONARY_KEY not in current.subtypes and - not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')): - # In some situations, a dictionary may be multiline, but pylint doesn't - # like it. So don't allow it unless forced to. - return False - - if (not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and - not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')): - return False - - if previous and previous.value == '(' and current.value == ')': - # Don't split an empty function call list if we aren't splitting before - # dict values. - token = previous.previous_token - while token: - prev = token.previous_token - if not prev or prev.name not in {'NAME', 'DOT'}: - break - token = token.previous_token - if token and subtypes.DICTIONARY_VALUE in token.subtypes: - if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'): - return False - - if previous and previous.value == '.' and current.value == '.': - return False - - return current.can_break_before - - def MustSplit(self): - """Returns True if the line must split before the next token.""" - current = self.next_token - previous = current.previous_token - - if current.is_pseudo: - return False - - if current.must_break_before: - return True - - if not previous: - return False - - if style.Get('SPLIT_ALL_COMMA_SEPARATED_VALUES') and previous.value == ',': - return True - - if (style.Get('SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES') and - previous.value == ','): - # Avoid breaking in a container that fits in the current line if possible - opening = _GetOpeningBracket(current) - - # Can't find opening bracket, behave the same way as - # SPLIT_ALL_COMMA_SEPARATED_VALUES. - if not opening: - return True - - if current.is_comment: - # Don't require splitting before a comment, since it may be related to - # the current line. - return False + current = self.next_token + previous = current.previous_token - # Allow the fallthrough code to handle the closing bracket. - if current != opening.matching_bracket: - # If the container doesn't fit in the current line, must split - return not self._ContainerFitsOnStartLine(opening) - - if (self.stack[-1].split_before_closing_bracket and - (current.value in '}]' and style.Get('SPLIT_BEFORE_CLOSING_BRACKET') or - current.value in '}])' and style.Get('INDENT_CLOSING_BRACKETS'))): - # Split before the closing bracket if we can. - if subtypes.SUBSCRIPT_BRACKET not in current.subtypes: - return current.node_split_penalty != split_penalty.UNBREAKABLE - - if (current.value == ')' and previous.value == ',' and - not _IsSingleElementTuple(current.matching_bracket)): - return True - - # Prevent splitting before the first argument in compound statements - # with the exception of function declarations. - if (style.Get('SPLIT_BEFORE_FIRST_ARGUMENT') and - _IsCompoundStatement(self.line.first) and - not _IsFunctionDef(self.line.first)): - return False - - ########################################################################### - # List Splitting - if (style.Get('DEDENT_CLOSING_BRACKETS') or - style.Get('INDENT_CLOSING_BRACKETS') or - style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): - bracket = current if current.ClosesScope() else previous - if subtypes.SUBSCRIPT_BRACKET not in bracket.subtypes: - if bracket.OpensScope(): - if style.Get('COALESCE_BRACKETS'): - if current.OpensScope(): - # Prefer to keep all opening brackets together. - return False - - if (not _IsLastScopeInLine(bracket) or - logical_line.IsSurroundedByBrackets(bracket)): - last_token = bracket.matching_bracket - else: - last_token = _LastTokenInLine(bracket.matching_bracket) - - if not self._FitsOnLine(bracket, last_token): - # Split before the first element if the whole list can't fit on a - # single line. - self.stack[-1].split_before_closing_bracket = True - return True - - elif (style.Get('DEDENT_CLOSING_BRACKETS') or - style.Get('INDENT_CLOSING_BRACKETS')) and current.ClosesScope(): - # Split before and dedent the closing bracket. - return self.stack[-1].split_before_closing_bracket - - if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and - current.is_name): - # An expression that's surrounded by parens gets split after the opening - # parenthesis. - def SurroundedByParens(token): - """Check if it's an expression surrounded by parentheses.""" - while token: - if token.value == ',': + if current.is_pseudo: return False - if token.value == ')': - return not token.next_token - if token.OpensScope(): - token = token.matching_bracket.next_token - else: - token = token.next_token - return False - if (previous.value == '(' and not previous.is_pseudo and - not logical_line.IsSurroundedByBrackets(previous)): - pptoken = previous.previous_token - if (pptoken and not pptoken.is_name and not pptoken.is_keyword and - SurroundedByParens(current)): - return True - - if (current.is_name or current.is_string) and previous.value == ',': - # If the list has function calls in it and the full list itself cannot - # fit on the line, then we want to split. Otherwise, we'll get something - # like this: - # - # X = [ - # Bar(xxx='some string', - # yyy='another long string', - # zzz='a third long string'), Bar( - # xxx='some string', - # yyy='another long string', - # zzz='a third long string') - # ] - # - # or when a string formatting syntax. - func_call_or_string_format = False - tok = current.next_token - if current.is_name: - while tok and (tok.is_name or tok.value == '.'): - tok = tok.next_token - func_call_or_string_format = tok and tok.value == '(' - elif current.is_string: - while tok and tok.is_string: - tok = tok.next_token - func_call_or_string_format = tok and tok.value == '%' - if func_call_or_string_format: - open_bracket = logical_line.IsSurroundedByBrackets(current) - if open_bracket: - if open_bracket.value in '[{': - if not self._FitsOnLine(open_bracket, - open_bracket.matching_bracket): - return True - elif tok.value == '(': - if not self._FitsOnLine(current, tok.matching_bracket): - return True - - if (current.OpensScope() and previous.value == ',' and - subtypes.DICTIONARY_KEY not in current.next_token.subtypes): - # If we have a list of tuples, then we can get a similar look as above. If - # the full list cannot fit on the line, then we want a split. - open_bracket = logical_line.IsSurroundedByBrackets(current) - if (open_bracket and open_bracket.value in '[{' and - subtypes.SUBSCRIPT_BRACKET not in open_bracket.subtypes): - if not self._FitsOnLine(current, current.matching_bracket): - return True - - ########################################################################### - # Dict/Set Splitting - if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') and - subtypes.DICTIONARY_KEY in current.subtypes and not current.is_comment): - # Place each dictionary entry onto its own line. - if previous.value == '{' and previous.previous_token: - opening = _GetOpeningBracket(previous.previous_token) - if (opening and opening.value == '(' and opening.previous_token and - opening.previous_token.is_name): - # This is a dictionary that's an argument to a function. - if (self._FitsOnLine(previous, previous.matching_bracket) and - previous.matching_bracket.next_token and - (not opening.matching_bracket.next_token or - opening.matching_bracket.next_token.value != '.') and - _ScopeHasNoCommas(previous)): - # Don't split before the key if: - # - The dictionary fits on a line, and - # - The function call isn't part of a builder-style call and - # - The dictionary has one entry and no trailing comma + if ( not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and + subtypes.DICTIONARY_KEY not in current.subtypes and + not style.Get( 'ALLOW_MULTILINE_DICTIONARY_KEYS' ) ): + # In some situations, a dictionary may be multiline, but pylint doesn't + # like it. So don't allow it unless forced to. return False - return True - - if (style.Get('SPLIT_BEFORE_DICT_SET_GENERATOR') and - subtypes.DICT_SET_GENERATOR in current.subtypes): - # Split before a dict/set generator. - return True - - if (subtypes.DICTIONARY_VALUE in current.subtypes or - (previous.is_pseudo and previous.value == '(' and - not current.is_comment)): - # Split before the dictionary value if we can't fit every dictionary - # entry on its own line. - if not current.OpensScope(): - opening = _GetOpeningBracket(current) - if not self._EachDictEntryFitsOnOneLine(opening): - return style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE') - - if previous.value == '{': - # Split if the dict/set cannot fit on one line and ends in a comma. - closing = previous.matching_bracket - if (not self._FitsOnLine(previous, closing) and - closing.previous_token.value == ','): - self.stack[-1].split_before_closing_bracket = True - return True - - ########################################################################### - # Argument List Splitting - if (style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and not current.is_comment and - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in current.subtypes): - if (previous.value not in {'=', ':', '*', '**'} and - current.value not in ':=,)' and not _IsFunctionDefinition(previous)): - # If we're going to split the lines because of named arguments, then we - # want to split after the opening bracket as well. But not when this is - # part of a function definition. - if previous.value == '(': - # Make sure we don't split after the opening bracket if the - # continuation indent is greater than the opening bracket: - # - # a( - # b=1, - # c=2) - if (self._FitsOnLine(previous, previous.matching_bracket) and - logical_line.IsSurroundedByBrackets(previous)): - # An argument to a function is a function call with named - # assigns. + + if ( not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and + not style.Get( 'ALLOW_SPLIT_BEFORE_DICT_VALUE' ) ): return False - # Don't split if not required - if (not style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and - not style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): + if previous and previous.value == '(' and current.value == ')': + # Don't split an empty function call list if we aren't splitting before + # dict values. + token = previous.previous_token + while token: + prev = token.previous_token + if not prev or prev.name not in { 'NAME', 'DOT' }: + break + token = token.previous_token + if token and subtypes.DICTIONARY_VALUE in token.subtypes: + if not style.Get( 'ALLOW_SPLIT_BEFORE_DICT_VALUE' ): + return False + + if previous and previous.value == '.' and current.value == '.': return False - column = self.column - self.stack[-1].last_space - return column > style.Get('CONTINUATION_INDENT_WIDTH') + return current.can_break_before - opening = _GetOpeningBracket(current) - if opening: - return not self._ContainerFitsOnStartLine(opening) + def MustSplit( self ): + """Returns True if the line must split before the next token.""" + current = self.next_token + previous = current.previous_token - if (current.value not in '{)' and previous.value == '(' and - self._ArgumentListHasDictionaryEntry(current)): - return True + if current.is_pseudo: + return False - if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'): - # Split before arguments in a function call or definition if the - # arguments are terminated by a comma. - opening = _GetOpeningBracket(current) - if opening and opening.previous_token and opening.previous_token.is_name: - if previous.value in '(,': - if opening.matching_bracket.previous_token.value == ',': + if current.must_break_before: return True - if ((current.is_name or current.value in {'*', '**'}) and - previous.value == ','): - # If we have a function call within an argument list and it won't fit on - # the remaining line, but it will fit on a line by itself, then go ahead - # and split before the call. - opening = _GetOpeningBracket(current) - if (opening and opening.value == '(' and opening.previous_token and - (opening.previous_token.is_name or - opening.previous_token.value in {'*', '**'})): - is_func_call = False - opening = current - while opening: - if opening.value == '(': - is_func_call = True - break - if (not (opening.is_name or opening.value in {'*', '**'}) and - opening.value != '.'): - break - opening = opening.next_token + if not previous: + return False - if is_func_call: - if (not self._FitsOnLine(current, opening.matching_bracket) or - (opening.matching_bracket.next_token and - opening.matching_bracket.next_token.value != ',' and - not opening.matching_bracket.next_token.ClosesScope())): + if style.Get( 'SPLIT_ALL_COMMA_SEPARATED_VALUES' ) and previous.value == ',': return True - pprevious = previous.previous_token - - # A function call with a dictionary as its first argument may result in - # unreadable formatting if the dictionary spans multiple lines. The - # dictionary itself is formatted just fine, but the remaining arguments are - # indented too far: - # - # function_call({ - # KEY_1: 'value one', - # KEY_2: 'value two', - # }, - # default=False) - if (current.value == '{' and previous.value == '(' and pprevious and - pprevious.is_name): - dict_end = current.matching_bracket - next_token = dict_end.next_token - if next_token.value == ',' and not self._FitsOnLine(current, dict_end): - return True - - if (current.is_name and pprevious and pprevious.is_name and - previous.value == '('): - - if (not self._FitsOnLine(previous, previous.matching_bracket) and - _IsFunctionCallWithArguments(current)): - # There is a function call, with more than 1 argument, where the first - # argument is itself a function call with arguments that does not fit - # into the line. In this specific case, if we split after the first - # argument's opening '(', then the formatting will look bad for the - # rest of the arguments. E.g.: - # - # outer_function_call(inner_function_call( - # inner_arg1, inner_arg2), - # outer_arg1, outer_arg2) - # - # Instead, enforce a split before that argument to keep things looking - # good. - if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') or - style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): - return True - - opening = _GetOpeningBracket(current) - if (opening and opening.value == '(' and opening.previous_token and - (opening.previous_token.is_name or - opening.previous_token.value in {'*', '**'})): - is_func_call = False - opening = current - while opening: - if opening.value == '(': - is_func_call = True - break - if (not (opening.is_name or opening.value in {'*', '**'}) and - opening.value != '.'): - break - opening = opening.next_token - - if is_func_call: - if (not self._FitsOnLine(current, opening.matching_bracket) or - (opening.matching_bracket.next_token and - opening.matching_bracket.next_token.value != ',' and - not opening.matching_bracket.next_token.ClosesScope())): - return True - - if (previous.OpensScope() and not current.OpensScope() and - not current.is_comment and - subtypes.SUBSCRIPT_BRACKET not in previous.subtypes): - if pprevious and not pprevious.is_keyword and not pprevious.is_name: - # We want to split if there's a comment in the container. - token = current - while token != previous.matching_bracket: - if token.is_comment: + if ( style.Get( 'SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES' ) and + previous.value == ',' ): + # Avoid breaking in a container that fits in the current line if possible + opening = _GetOpeningBracket( current ) + + # Can't find opening bracket, behave the same way as + # SPLIT_ALL_COMMA_SEPARATED_VALUES. + if not opening: + return True + + if current.is_comment: + # Don't require splitting before a comment, since it may be related to + # the current line. + return False + + # Allow the fallthrough code to handle the closing bracket. + if current != opening.matching_bracket: + # If the container doesn't fit in the current line, must split + return not self._ContainerFitsOnStartLine( opening ) + + if ( self.stack[ -1 ].split_before_closing_bracket and + ( current.value in '}]' and style.Get( 'SPLIT_BEFORE_CLOSING_BRACKET' ) or + current.value in '}])' and style.Get( 'INDENT_CLOSING_BRACKETS' ) ) ): + # Split before the closing bracket if we can. + if subtypes.SUBSCRIPT_BRACKET not in current.subtypes: + return current.node_split_penalty != split_penalty.UNBREAKABLE + + if ( current.value == ')' and previous.value == ',' and + not _IsSingleElementTuple( current.matching_bracket ) ): return True - token = token.next_token - if previous.value == '(': - pptoken = previous.previous_token - if not pptoken or not pptoken.is_name: - # Split after the opening of a tuple if it doesn't fit on the current - # line and it's not a function call. - if self._FitsOnLine(previous, previous.matching_bracket): - return False - elif not self._FitsOnLine(previous, previous.matching_bracket): - if len(previous.container_elements) == 1: + + # Prevent splitting before the first argument in compound statements + # with the exception of function declarations. + if ( style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) and + _IsCompoundStatement( self.line.first ) and + not _IsFunctionDef( self.line.first ) ): return False - elements = previous.container_elements + [previous.matching_bracket] - i = 1 - while i < len(elements): - if (not elements[i - 1].OpensScope() and - not self._FitsOnLine(elements[i - 1], elements[i])): - return True - i += 1 + ########################################################################### + # List Splitting + if ( style.Get( 'DEDENT_CLOSING_BRACKETS' ) or + style.Get( 'INDENT_CLOSING_BRACKETS' ) or + style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ): + bracket = current if current.ClosesScope() else previous + if subtypes.SUBSCRIPT_BRACKET not in bracket.subtypes: + if bracket.OpensScope(): + if style.Get( 'COALESCE_BRACKETS' ): + if current.OpensScope(): + # Prefer to keep all opening brackets together. + return False + + if ( not _IsLastScopeInLine( bracket ) or + logical_line.IsSurroundedByBrackets( bracket ) ): + last_token = bracket.matching_bracket + else: + last_token = _LastTokenInLine( bracket.matching_bracket ) + + if not self._FitsOnLine( bracket, last_token ): + # Split before the first element if the whole list can't fit on a + # single line. + self.stack[ -1 ].split_before_closing_bracket = True + return True + + elif ( style.Get( 'DEDENT_CLOSING_BRACKETS' ) or + style.Get( 'INDENT_CLOSING_BRACKETS' ) + ) and current.ClosesScope(): + # Split before and dedent the closing bracket. + return self.stack[ -1 ].split_before_closing_bracket + + if ( style.Get( 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ) and + current.is_name ): + # An expression that's surrounded by parens gets split after the opening + # parenthesis. + def SurroundedByParens( token ): + """Check if it's an expression surrounded by parentheses.""" + while token: + if token.value == ',': + return False + if token.value == ')': + return not token.next_token + if token.OpensScope(): + token = token.matching_bracket.next_token + else: + token = token.next_token + return False + + if ( previous.value == '(' and not previous.is_pseudo and + not logical_line.IsSurroundedByBrackets( previous ) ): + pptoken = previous.previous_token + if ( pptoken and not pptoken.is_name and not pptoken.is_keyword and + SurroundedByParens( current ) ): + return True + + if ( current.is_name or current.is_string ) and previous.value == ',': + # If the list has function calls in it and the full list itself cannot + # fit on the line, then we want to split. Otherwise, we'll get something + # like this: + # + # X = [ + # Bar(xxx='some string', + # yyy='another long string', + # zzz='a third long string'), Bar( + # xxx='some string', + # yyy='another long string', + # zzz='a third long string') + # ] + # + # or when a string formatting syntax. + func_call_or_string_format = False + tok = current.next_token + if current.is_name: + while tok and ( tok.is_name or tok.value == '.' ): + tok = tok.next_token + func_call_or_string_format = tok and tok.value == '(' + elif current.is_string: + while tok and tok.is_string: + tok = tok.next_token + func_call_or_string_format = tok and tok.value == '%' + if func_call_or_string_format: + open_bracket = logical_line.IsSurroundedByBrackets( current ) + if open_bracket: + if open_bracket.value in '[{': + if not self._FitsOnLine( open_bracket, + open_bracket.matching_bracket ): + return True + elif tok.value == '(': + if not self._FitsOnLine( current, tok.matching_bracket ): + return True + + if ( current.OpensScope() and previous.value == ',' and + subtypes.DICTIONARY_KEY not in current.next_token.subtypes ): + # If we have a list of tuples, then we can get a similar look as above. If + # the full list cannot fit on the line, then we want a split. + open_bracket = logical_line.IsSurroundedByBrackets( current ) + if ( open_bracket and open_bracket.value in '[{' and + subtypes.SUBSCRIPT_BRACKET not in open_bracket.subtypes ): + if not self._FitsOnLine( current, current.matching_bracket ): + return True + + ########################################################################### + # Dict/Set Splitting + if ( style.Get( 'EACH_DICT_ENTRY_ON_SEPARATE_LINE' ) and + subtypes.DICTIONARY_KEY in current.subtypes and not current.is_comment ): + # Place each dictionary entry onto its own line. + if previous.value == '{' and previous.previous_token: + opening = _GetOpeningBracket( previous.previous_token ) + if ( opening and opening.value == '(' and opening.previous_token and + opening.previous_token.is_name ): + # This is a dictionary that's an argument to a function. + if ( self._FitsOnLine( previous, previous.matching_bracket ) and + previous.matching_bracket.next_token and + ( not opening.matching_bracket.next_token or + opening.matching_bracket.next_token.value != '.' ) and + _ScopeHasNoCommas( previous ) ): + # Don't split before the key if: + # - The dictionary fits on a line, and + # - The function call isn't part of a builder-style call and + # - The dictionary has one entry and no trailing comma + return False + return True - if (self.column_limit - self.column) / float(self.column_limit) < 0.3: - # Try not to squish all of the arguments off to the right. + if ( style.Get( 'SPLIT_BEFORE_DICT_SET_GENERATOR' ) and + subtypes.DICT_SET_GENERATOR in current.subtypes ): + # Split before a dict/set generator. return True - else: - # Split after the opening of a container if it doesn't fit on the - # current line. - if not self._FitsOnLine(previous, previous.matching_bracket): - return True - - ########################################################################### - # Original Formatting Splitting - # These checks rely upon the original formatting. This is in order to - # attempt to keep hand-written code in the same condition as it was before. - # However, this may cause the formatter to fail to be idempotent. - if (style.Get('SPLIT_BEFORE_BITWISE_OPERATOR') and current.value in '&|' and - previous.lineno < current.lineno): - # Retain the split before a bitwise operator. - return True - - if (current.is_comment and - previous.lineno < current.lineno - current.value.count('\n')): - # If a comment comes in the middle of a logical line (like an if - # conditional with comments interspersed), then we want to split if the - # original comments were on a separate line. - return True - return False + if ( subtypes.DICTIONARY_VALUE in current.subtypes or + ( previous.is_pseudo and previous.value == '(' and + not current.is_comment ) ): + # Split before the dictionary value if we can't fit every dictionary + # entry on its own line. + if not current.OpensScope(): + opening = _GetOpeningBracket( current ) + if not self._EachDictEntryFitsOnOneLine( opening ): + return style.Get( 'ALLOW_SPLIT_BEFORE_DICT_VALUE' ) + + if previous.value == '{': + # Split if the dict/set cannot fit on one line and ends in a comma. + closing = previous.matching_bracket + if ( not self._FitsOnLine( previous, closing ) and + closing.previous_token.value == ',' ): + self.stack[ -1 ].split_before_closing_bracket = True + return True + + ########################################################################### + # Argument List Splitting + if ( style.Get( 'SPLIT_BEFORE_NAMED_ASSIGNS' ) and not current.is_comment and + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in current.subtypes ): + if ( previous.value not in { '=', ':', '*', '**' } and + current.value not in ':=,)' and + not _IsFunctionDefinition( previous ) ): + # If we're going to split the lines because of named arguments, then we + # want to split after the opening bracket as well. But not when this is + # part of a function definition. + if previous.value == '(': + # Make sure we don't split after the opening bracket if the + # continuation indent is greater than the opening bracket: + # + # a( + # b=1, + # c=2) + if ( self._FitsOnLine( previous, previous.matching_bracket ) and + logical_line.IsSurroundedByBrackets( previous ) ): + # An argument to a function is a function call with named + # assigns. + return False + + # Don't split if not required + if ( not style.Get( 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ) + and not style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ): + return False + + column = self.column - self.stack[ -1 ].last_space + return column > style.Get( 'CONTINUATION_INDENT_WIDTH' ) + + opening = _GetOpeningBracket( current ) + if opening: + return not self._ContainerFitsOnStartLine( opening ) + + if ( current.value not in '{)' and previous.value == '(' and + self._ArgumentListHasDictionaryEntry( current ) ): + return True - def AddTokenToState(self, newline, dry_run, must_split=False): - """Add a token to the format decision state. + if style.Get( 'SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED' ): + # Split before arguments in a function call or definition if the + # arguments are terminated by a comma. + opening = _GetOpeningBracket( current ) + if opening and opening.previous_token and opening.previous_token.is_name: + if previous.value in '(,': + if opening.matching_bracket.previous_token.value == ',': + return True + + if ( ( current.is_name or current.value in { '*', '**' } ) and + previous.value == ',' ): + # If we have a function call within an argument list and it won't fit on + # the remaining line, but it will fit on a line by itself, then go ahead + # and split before the call. + opening = _GetOpeningBracket( current ) + if ( opening and opening.value == '(' and opening.previous_token and + ( opening.previous_token.is_name or + opening.previous_token.value in { '*', '**' } ) ): + is_func_call = False + opening = current + while opening: + if opening.value == '(': + is_func_call = True + break + if ( not ( opening.is_name or opening.value in { '*', '**' } ) and + opening.value != '.' ): + break + opening = opening.next_token + + if is_func_call: + if ( not self._FitsOnLine( current, opening.matching_bracket ) or + ( opening.matching_bracket.next_token and + opening.matching_bracket.next_token.value != ',' and + not opening.matching_bracket.next_token.ClosesScope() ) ): + return True + + pprevious = previous.previous_token + + # A function call with a dictionary as its first argument may result in + # unreadable formatting if the dictionary spans multiple lines. The + # dictionary itself is formatted just fine, but the remaining arguments are + # indented too far: + # + # function_call({ + # KEY_1: 'value one', + # KEY_2: 'value two', + # }, + # default=False) + if ( current.value == '{' and previous.value == '(' and pprevious and + pprevious.is_name ): + dict_end = current.matching_bracket + next_token = dict_end.next_token + if next_token.value == ',' and not self._FitsOnLine( current, dict_end ): + return True + + if ( current.is_name and pprevious and pprevious.is_name and + previous.value == '(' ): + + if ( not self._FitsOnLine( previous, previous.matching_bracket ) and + _IsFunctionCallWithArguments( current ) ): + # There is a function call, with more than 1 argument, where the first + # argument is itself a function call with arguments that does not fit + # into the line. In this specific case, if we split after the first + # argument's opening '(', then the formatting will look bad for the + # rest of the arguments. E.g.: + # + # outer_function_call(inner_function_call( + # inner_arg1, inner_arg2), + # outer_arg1, outer_arg2) + # + # Instead, enforce a split before that argument to keep things looking + # good. + if ( style.Get( 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ) or + style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ): + return True + + opening = _GetOpeningBracket( current ) + if ( opening and opening.value == '(' and opening.previous_token and + ( opening.previous_token.is_name or + opening.previous_token.value in { '*', '**' } ) ): + is_func_call = False + opening = current + while opening: + if opening.value == '(': + is_func_call = True + break + if ( not ( opening.is_name or opening.value in { '*', '**' } ) + and opening.value != '.' ): + break + opening = opening.next_token + + if is_func_call: + if ( + not self._FitsOnLine( current, + opening.matching_bracket ) or + ( opening.matching_bracket.next_token and + opening.matching_bracket.next_token.value != ',' and + not opening.matching_bracket.next_token.ClosesScope() ) ): + return True + + if ( previous.OpensScope() and not current.OpensScope() and + not current.is_comment and + subtypes.SUBSCRIPT_BRACKET not in previous.subtypes ): + if pprevious and not pprevious.is_keyword and not pprevious.is_name: + # We want to split if there's a comment in the container. + token = current + while token != previous.matching_bracket: + if token.is_comment: + return True + token = token.next_token + if previous.value == '(': + pptoken = previous.previous_token + if not pptoken or not pptoken.is_name: + # Split after the opening of a tuple if it doesn't fit on the current + # line and it's not a function call. + if self._FitsOnLine( previous, previous.matching_bracket ): + return False + elif not self._FitsOnLine( previous, previous.matching_bracket ): + if len( previous.container_elements ) == 1: + return False + + elements = previous.container_elements + [ + previous.matching_bracket + ] + i = 1 + while i < len( elements ): + if ( not elements[ i - 1 ].OpensScope() and + not self._FitsOnLine( elements[ i - 1 ], elements[ i ] ) ): + return True + i += 1 + + if ( self.column_limit - self.column ) / float( + self.column_limit ) < 0.3: + # Try not to squish all of the arguments off to the right. + return True + else: + # Split after the opening of a container if it doesn't fit on the + # current line. + if not self._FitsOnLine( previous, previous.matching_bracket ): + return True + + ########################################################################### + # Original Formatting Splitting + # These checks rely upon the original formatting. This is in order to + # attempt to keep hand-written code in the same condition as it was before. + # However, this may cause the formatter to fail to be idempotent. + if ( style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ) and current.value in '&|' and + previous.lineno < current.lineno ): + # Retain the split before a bitwise operator. + return True + + if ( current.is_comment and + previous.lineno < current.lineno - current.value.count( '\n' ) ): + # If a comment comes in the middle of a logical line (like an if + # conditional with comments interspersed), then we want to split if the + # original comments were on a separate line. + return True + + return False + + def AddTokenToState( self, newline, dry_run, must_split = False ): + """Add a token to the format decision state. Allow the heuristic to try out adding the token with and without a newline. Later on, the algorithm will determine which one has the lowest penalty. @@ -572,21 +581,21 @@ def AddTokenToState(self, newline, dry_run, must_split=False): Returns: The penalty of splitting after the current token. """ - self._PushParameterListState(newline) + self._PushParameterListState( newline ) - penalty = 0 - if newline: - penalty = self._AddTokenOnNewline(dry_run, must_split) - else: - self._AddTokenOnCurrentLine(dry_run) + penalty = 0 + if newline: + penalty = self._AddTokenOnNewline( dry_run, must_split ) + else: + self._AddTokenOnCurrentLine( dry_run ) - penalty += self._CalculateComprehensionState(newline) - penalty += self._CalculateParameterListState(newline) + penalty += self._CalculateComprehensionState( newline ) + penalty += self._CalculateParameterListState( newline ) - return self.MoveStateToNextToken() + penalty + return self.MoveStateToNextToken() + penalty - def _AddTokenOnCurrentLine(self, dry_run): - """Puts the token on the current line. + def _AddTokenOnCurrentLine( self, dry_run ): + """Puts the token on the current line. Appends the next token to the state and updates information necessary for indentation. @@ -594,37 +603,37 @@ def _AddTokenOnCurrentLine(self, dry_run): Arguments: dry_run: (bool) Commit whitespace changes to the FormatToken if True. """ - current = self.next_token - previous = current.previous_token - - spaces = current.spaces_required_before - if isinstance(spaces, list): - # Don't set the value here, as we need to look at the lines near - # this one to determine the actual horizontal alignment value. - spaces = 0 - - if not dry_run: - current.AddWhitespacePrefix(newlines_before=0, spaces=spaces) - - if previous.OpensScope(): - if not current.is_comment: - # Align closing scopes that are on a newline with the opening scope: - # - # foo = [a, - # b, - # ] - self.stack[-1].closing_scope_indent = self.column - 1 - if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'): - self.stack[-1].closing_scope_indent += 1 - self.stack[-1].indent = self.column + spaces - else: - self.stack[-1].closing_scope_indent = ( - self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')) - - self.column += spaces - - def _AddTokenOnNewline(self, dry_run, must_split): - """Adds a line break and necessary indentation. + current = self.next_token + previous = current.previous_token + + spaces = current.spaces_required_before + if isinstance( spaces, list ): + # Don't set the value here, as we need to look at the lines near + # this one to determine the actual horizontal alignment value. + spaces = 0 + + if not dry_run: + current.AddWhitespacePrefix( newlines_before = 0, spaces = spaces ) + + if previous.OpensScope(): + if not current.is_comment: + # Align closing scopes that are on a newline with the opening scope: + # + # foo = [a, + # b, + # ] + self.stack[ -1 ].closing_scope_indent = self.column - 1 + if style.Get( 'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT' ): + self.stack[ -1 ].closing_scope_indent += 1 + self.stack[ -1 ].indent = self.column + spaces + else: + self.stack[ -1 ].closing_scope_indent = ( + self.stack[ -1 ].indent - style.Get( 'CONTINUATION_INDENT_WIDTH' ) ) + + self.column += spaces + + def _AddTokenOnNewline( self, dry_run, must_split ): + """Adds a line break and necessary indentation. Appends the next token to the state and updates information necessary for indentation. @@ -637,63 +646,63 @@ def _AddTokenOnNewline(self, dry_run, must_split): Returns: The split penalty for splitting after the current state. """ - current = self.next_token - previous = current.previous_token - - self.column = self._GetNewlineColumn() - - if not dry_run: - indent_level = self.line.depth - spaces = self.column - if spaces: - spaces -= indent_level * style.Get('INDENT_WIDTH') - current.AddWhitespacePrefix( - newlines_before=1, spaces=spaces, indent_level=indent_level) - - if not current.is_comment: - self.stack[-1].last_space = self.column - self.lowest_level_on_line = self.paren_level - - if (previous.OpensScope() or - (previous.is_comment and previous.previous_token is not None and - previous.previous_token.OpensScope())): - dedent = (style.Get('CONTINUATION_INDENT_WIDTH'), - 0)[style.Get('INDENT_CLOSING_BRACKETS')] - self.stack[-1].closing_scope_indent = ( - max(0, self.stack[-1].indent - dedent)) - self.stack[-1].split_before_closing_bracket = True - - # Calculate the split penalty. - penalty = current.split_penalty - - if must_split: - # Don't penalize for a must split. - return penalty - - if previous.is_pseudo and previous.value == '(': - # Small penalty for splitting after a pseudo paren. - penalty += 50 - - # Add a penalty for each increasing newline we add, but don't penalize for - # splitting before an if-expression or list comprehension. - if current.value not in {'if', 'for'}: - last = self.stack[-1] - last.num_line_splits += 1 - penalty += ( - style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * - last.num_line_splits) - - if current.OpensScope() and previous.OpensScope(): - # Prefer to keep opening brackets coalesced (unless it's at the beginning - # of a function call). - pprev = previous.previous_token - if not pprev or not pprev.is_name: - penalty += 10 - - return penalty + 10 - - def MoveStateToNextToken(self): - """Calculate format decision state information and move onto the next token. + current = self.next_token + previous = current.previous_token + + self.column = self._GetNewlineColumn() + + if not dry_run: + indent_level = self.line.depth + spaces = self.column + if spaces: + spaces -= indent_level * style.Get( 'INDENT_WIDTH' ) + current.AddWhitespacePrefix( + newlines_before = 1, spaces = spaces, indent_level = indent_level ) + + if not current.is_comment: + self.stack[ -1 ].last_space = self.column + self.lowest_level_on_line = self.paren_level + + if ( previous.OpensScope() or + ( previous.is_comment and previous.previous_token is not None and + previous.previous_token.OpensScope() ) ): + dedent = ( style.Get( 'CONTINUATION_INDENT_WIDTH' ), + 0 )[ style.Get( 'INDENT_CLOSING_BRACKETS' ) ] + self.stack[ -1 ].closing_scope_indent = ( + max( 0, self.stack[ -1 ].indent - dedent ) ) + self.stack[ -1 ].split_before_closing_bracket = True + + # Calculate the split penalty. + penalty = current.split_penalty + + if must_split: + # Don't penalize for a must split. + return penalty + + if previous.is_pseudo and previous.value == '(': + # Small penalty for splitting after a pseudo paren. + penalty += 50 + + # Add a penalty for each increasing newline we add, but don't penalize for + # splitting before an if-expression or list comprehension. + if current.value not in { 'if', 'for' }: + last = self.stack[ -1 ] + last.num_line_splits += 1 + penalty += ( + style.Get( 'SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT' ) * + last.num_line_splits ) + + if current.OpensScope() and previous.OpensScope(): + # Prefer to keep opening brackets coalesced (unless it's at the beginning + # of a function call). + pprev = previous.previous_token + if not pprev or not pprev.is_name: + penalty += 10 + + return penalty + 10 + + def MoveStateToNextToken( self ): + """Calculate format decision state information and move onto the next token. Before moving onto the next token, we first calculate the format decision state given the current token and its formatting decisions. Then the format @@ -702,55 +711,55 @@ def MoveStateToNextToken(self): Returns: The penalty for the number of characters over the column limit. """ - current = self.next_token - if not current.OpensScope() and not current.ClosesScope(): - self.lowest_level_on_line = min(self.lowest_level_on_line, - self.paren_level) - - # If we encounter an opening bracket, we add a level to our stack to prepare - # for the subsequent tokens. - if current.OpensScope(): - last = self.stack[-1] - new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space - - self.stack.append(_ParenState(new_indent, self.stack[-1].last_space)) - self.paren_level += 1 - - # If we encounter a closing bracket, we can remove a level from our - # parenthesis stack. - if len(self.stack) > 1 and current.ClosesScope(): - if subtypes.DICTIONARY_KEY_PART in current.subtypes: - self.stack[-2].last_space = self.stack[-2].indent - else: - self.stack[-2].last_space = self.stack[-1].last_space - self.stack.pop() - self.paren_level -= 1 - - is_multiline_string = current.is_string and '\n' in current.value - if is_multiline_string: - # This is a multiline string. Only look at the first line. - self.column += len(current.value.split('\n')[0]) - elif not current.is_pseudo: - self.column += len(current.value) - - self.next_token = self.next_token.next_token - - # Calculate the penalty for overflowing the column limit. - penalty = 0 - if (not current.is_pylint_comment and not current.is_pytype_comment and - not current.is_copybara_comment and self.column > self.column_limit): - excess_characters = self.column - self.column_limit - penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters - - if is_multiline_string: - # If this is a multiline string, the column is actually the - # end of the last line in the string. - self.column = len(current.value.split('\n')[-1]) - - return penalty - - def _CalculateComprehensionState(self, newline): - """Makes required changes to comprehension state. + current = self.next_token + if not current.OpensScope() and not current.ClosesScope(): + self.lowest_level_on_line = min( + self.lowest_level_on_line, self.paren_level ) + + # If we encounter an opening bracket, we add a level to our stack to prepare + # for the subsequent tokens. + if current.OpensScope(): + last = self.stack[ -1 ] + new_indent = style.Get( 'CONTINUATION_INDENT_WIDTH' ) + last.last_space + + self.stack.append( _ParenState( new_indent, self.stack[ -1 ].last_space ) ) + self.paren_level += 1 + + # If we encounter a closing bracket, we can remove a level from our + # parenthesis stack. + if len( self.stack ) > 1 and current.ClosesScope(): + if subtypes.DICTIONARY_KEY_PART in current.subtypes: + self.stack[ -2 ].last_space = self.stack[ -2 ].indent + else: + self.stack[ -2 ].last_space = self.stack[ -1 ].last_space + self.stack.pop() + self.paren_level -= 1 + + is_multiline_string = current.is_string and '\n' in current.value + if is_multiline_string: + # This is a multiline string. Only look at the first line. + self.column += len( current.value.split( '\n' )[ 0 ] ) + elif not current.is_pseudo: + self.column += len( current.value ) + + self.next_token = self.next_token.next_token + + # Calculate the penalty for overflowing the column limit. + penalty = 0 + if ( not current.is_pylint_comment and not current.is_pytype_comment and + not current.is_copybara_comment and self.column > self.column_limit ): + excess_characters = self.column - self.column_limit + penalty += style.Get( 'SPLIT_PENALTY_EXCESS_CHARACTER' ) * excess_characters + + if is_multiline_string: + # If this is a multiline string, the column is actually the + # end of the last line in the string. + self.column = len( current.value.split( '\n' )[ -1 ] ) + + return penalty + + def _CalculateComprehensionState( self, newline ): + """Makes required changes to comprehension state. Args: newline: Whether the current token is to be added on a newline. @@ -759,81 +768,82 @@ def _CalculateComprehensionState(self, newline): The penalty for the token-newline combination given the current comprehension state. """ - current = self.next_token - previous = current.previous_token - top_of_stack = self.comp_stack[-1] if self.comp_stack else None - penalty = 0 - - if top_of_stack is not None: - # Check if the token terminates the current comprehension. - if current == top_of_stack.closing_bracket: - last = self.comp_stack.pop() - # Lightly penalize comprehensions that are split across multiple lines. - if last.has_interior_split: - penalty += style.Get('SPLIT_PENALTY_COMPREHENSION') + current = self.next_token + previous = current.previous_token + top_of_stack = self.comp_stack[ -1 ] if self.comp_stack else None + penalty = 0 + + if top_of_stack is not None: + # Check if the token terminates the current comprehension. + if current == top_of_stack.closing_bracket: + last = self.comp_stack.pop() + # Lightly penalize comprehensions that are split across multiple lines. + if last.has_interior_split: + penalty += style.Get( 'SPLIT_PENALTY_COMPREHENSION' ) + + return penalty + + if newline: + top_of_stack.has_interior_split = True + + if ( subtypes.COMP_EXPR in current.subtypes and + subtypes.COMP_EXPR not in previous.subtypes ): + self.comp_stack.append( object_state.ComprehensionState( current ) ) + return penalty + + if current.value == 'for' and subtypes.COMP_FOR in current.subtypes: + if top_of_stack.for_token is not None: + # Treat nested comprehensions like normal comp_if expressions. + # Example: + # my_comp = [ + # a.qux + b.qux + # for a in foo + # --> for b in bar <-- + # if a.zut + b.zut + # ] + if ( style.Get( 'SPLIT_COMPLEX_COMPREHENSION' ) and + top_of_stack.has_split_at_for != newline and + ( top_of_stack.has_split_at_for or + not top_of_stack.HasTrivialExpr() ) ): + penalty += split_penalty.UNBREAKABLE + else: + top_of_stack.for_token = current + top_of_stack.has_split_at_for = newline + + # Try to keep trivial expressions on the same line as the comp_for. + if ( style.Get( 'SPLIT_COMPLEX_COMPREHENSION' ) and newline and + top_of_stack.HasTrivialExpr() ): + penalty += split_penalty.CONNECTED + + if ( subtypes.COMP_IF in current.subtypes and + subtypes.COMP_IF not in previous.subtypes ): + # Penalize breaking at comp_if when it doesn't match the newline structure + # in the rest of the comprehension. + if ( style.Get( 'SPLIT_COMPLEX_COMPREHENSION' ) and + top_of_stack.has_split_at_for != newline and + ( top_of_stack.has_split_at_for or + not top_of_stack.HasTrivialExpr() ) ): + penalty += split_penalty.UNBREAKABLE return penalty - if newline: - top_of_stack.has_interior_split = True - - if (subtypes.COMP_EXPR in current.subtypes and - subtypes.COMP_EXPR not in previous.subtypes): - self.comp_stack.append(object_state.ComprehensionState(current)) - return penalty - - if current.value == 'for' and subtypes.COMP_FOR in current.subtypes: - if top_of_stack.for_token is not None: - # Treat nested comprehensions like normal comp_if expressions. - # Example: - # my_comp = [ - # a.qux + b.qux - # for a in foo - # --> for b in bar <-- - # if a.zut + b.zut - # ] - if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and - top_of_stack.has_split_at_for != newline and - (top_of_stack.has_split_at_for or - not top_of_stack.HasTrivialExpr())): - penalty += split_penalty.UNBREAKABLE - else: - top_of_stack.for_token = current - top_of_stack.has_split_at_for = newline - - # Try to keep trivial expressions on the same line as the comp_for. - if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and - top_of_stack.HasTrivialExpr()): - penalty += split_penalty.CONNECTED - - if (subtypes.COMP_IF in current.subtypes and - subtypes.COMP_IF not in previous.subtypes): - # Penalize breaking at comp_if when it doesn't match the newline structure - # in the rest of the comprehension. - if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and - top_of_stack.has_split_at_for != newline and - (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())): - penalty += split_penalty.UNBREAKABLE - - return penalty - - def _PushParameterListState(self, newline): - """Push a new parameter list state for a function definition. + def _PushParameterListState( self, newline ): + """Push a new parameter list state for a function definition. Args: newline: Whether the current token is to be added on a newline. """ - current = self.next_token - previous = current.previous_token + current = self.next_token + previous = current.previous_token - if _IsFunctionDefinition(previous): - first_param_column = previous.total_length + self.stack[-2].indent - self.param_list_stack.append( - object_state.ParameterListState(previous, newline, - first_param_column)) + if _IsFunctionDefinition( previous ): + first_param_column = previous.total_length + self.stack[ -2 ].indent + self.param_list_stack.append( + object_state.ParameterListState( + previous, newline, first_param_column ) ) - def _CalculateParameterListState(self, newline): - """Makes required changes to parameter list state. + def _CalculateParameterListState( self, newline ): + """Makes required changes to parameter list state. Args: newline: Whether the current token is to be added on a newline. @@ -842,353 +852,355 @@ def _CalculateParameterListState(self, newline): The penalty for the token-newline combination given the current parameter state. """ - current = self.next_token - previous = current.previous_token - penalty = 0 - - if _IsFunctionDefinition(previous): - first_param_column = previous.total_length + self.stack[-2].indent - if not newline: - param_list = self.param_list_stack[-1] - if param_list.parameters and param_list.has_typed_return: - last_param = param_list.parameters[-1].first_token - last_token = _LastTokenInLine(previous.matching_bracket) - total_length = last_token.total_length - total_length -= last_param.total_length - len(last_param.value) - if total_length + self.column > self.column_limit: - # If we need to split before the trailing code of a function - # definition with return types, then also split before the opening - # parameter so that the trailing bit isn't indented on a line by - # itself: - # - # def rrrrrrrrrrrrrrrrrrrrrr(ccccccccccccccccccccccc: Tuple[Text] - # ) -> List[Tuple[Text, Text]]: - # pass - penalty += split_penalty.VERY_STRONGLY_CONNECTED + current = self.next_token + previous = current.previous_token + penalty = 0 + + if _IsFunctionDefinition( previous ): + first_param_column = previous.total_length + self.stack[ -2 ].indent + if not newline: + param_list = self.param_list_stack[ -1 ] + if param_list.parameters and param_list.has_typed_return: + last_param = param_list.parameters[ -1 ].first_token + last_token = _LastTokenInLine( previous.matching_bracket ) + total_length = last_token.total_length + total_length -= last_param.total_length - len( last_param.value ) + if total_length + self.column > self.column_limit: + # If we need to split before the trailing code of a function + # definition with return types, then also split before the opening + # parameter so that the trailing bit isn't indented on a line by + # itself: + # + # def rrrrrrrrrrrrrrrrrrrrrr(ccccccccccccccccccccccc: Tuple[Text] + # ) -> List[Tuple[Text, Text]]: + # pass + penalty += split_penalty.VERY_STRONGLY_CONNECTED + return penalty + + if first_param_column <= self.column: + # Make sure we don't split after the opening bracket if the + # continuation indent is greater than the opening bracket: + # + # a( + # b=1, + # c=2) + penalty += split_penalty.VERY_STRONGLY_CONNECTED + return penalty + + if not self.param_list_stack: + return penalty + + param_list = self.param_list_stack[ -1 ] + if current == self.param_list_stack[ -1 ].closing_bracket: + self.param_list_stack.pop() # We're done with this state. + if newline and param_list.has_typed_return: + if param_list.split_before_closing_bracket: + penalty -= split_penalty.STRONGLY_CONNECTED + elif param_list.LastParamFitsOnLine( self.column ): + penalty += split_penalty.STRONGLY_CONNECTED + + if ( not newline and param_list.has_typed_return and + param_list.has_split_before_first_param ): + # Prefer splitting before the closing bracket if there's a return type + # and we've already split before the first parameter. + penalty += split_penalty.STRONGLY_CONNECTED + + return penalty + + if not param_list.parameters: + return penalty + + if newline: + if self._FitsOnLine( param_list.parameters[ 0 ].first_token, + _LastTokenInLine( param_list.closing_bracket ) ): + penalty += split_penalty.STRONGLY_CONNECTED + + if ( not newline and style.Get( 'SPLIT_BEFORE_NAMED_ASSIGNS' ) and + param_list.has_default_values and + current != param_list.parameters[ 0 ].first_token and + current != param_list.closing_bracket and + subtypes.PARAMETER_START in current.subtypes ): + # If we want to split before parameters when there are named assigns, + # then add a penalty for not splitting. + penalty += split_penalty.STRONGLY_CONNECTED + return penalty - if first_param_column <= self.column: - # Make sure we don't split after the opening bracket if the - # continuation indent is greater than the opening bracket: - # - # a( - # b=1, - # c=2) - penalty += split_penalty.VERY_STRONGLY_CONNECTED - return penalty - - if not self.param_list_stack: - return penalty - - param_list = self.param_list_stack[-1] - if current == self.param_list_stack[-1].closing_bracket: - self.param_list_stack.pop() # We're done with this state. - if newline and param_list.has_typed_return: - if param_list.split_before_closing_bracket: - penalty -= split_penalty.STRONGLY_CONNECTED - elif param_list.LastParamFitsOnLine(self.column): - penalty += split_penalty.STRONGLY_CONNECTED - - if (not newline and param_list.has_typed_return and - param_list.has_split_before_first_param): - # Prefer splitting before the closing bracket if there's a return type - # and we've already split before the first parameter. - penalty += split_penalty.STRONGLY_CONNECTED - - return penalty - - if not param_list.parameters: - return penalty - - if newline: - if self._FitsOnLine(param_list.parameters[0].first_token, - _LastTokenInLine(param_list.closing_bracket)): - penalty += split_penalty.STRONGLY_CONNECTED - - if (not newline and style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and - param_list.has_default_values and - current != param_list.parameters[0].first_token and - current != param_list.closing_bracket and - subtypes.PARAMETER_START in current.subtypes): - # If we want to split before parameters when there are named assigns, - # then add a penalty for not splitting. - penalty += split_penalty.STRONGLY_CONNECTED - - return penalty - - def _IndentWithContinuationAlignStyle(self, column): - if column == 0: - return column - align_style = style.Get('CONTINUATION_ALIGN_STYLE') - if align_style == 'FIXED': - return ((self.line.depth * style.Get('INDENT_WIDTH')) + - style.Get('CONTINUATION_INDENT_WIDTH')) - if align_style == 'VALIGN-RIGHT': - indent_width = style.Get('INDENT_WIDTH') - return indent_width * int((column + indent_width - 1) / indent_width) - return column - - def _GetNewlineColumn(self): - """Return the new column on the newline.""" - current = self.next_token - previous = current.previous_token - top_of_stack = self.stack[-1] - - if isinstance(current.spaces_required_before, list): - # Don't set the value here, as we need to look at the lines near - # this one to determine the actual horizontal alignment value. - return 0 - elif current.spaces_required_before > 2 or self.line.disable: - return current.spaces_required_before - - cont_aligned_indent = self._IndentWithContinuationAlignStyle( - top_of_stack.indent) - - if current.OpensScope(): - return cont_aligned_indent if self.paren_level else self.first_indent - - if current.ClosesScope(): - if (previous.OpensScope() or - (previous.is_comment and previous.previous_token is not None and - previous.previous_token.OpensScope())): - return max(0, - top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH')) - return top_of_stack.closing_scope_indent - - if (previous and previous.is_string and current.is_string and - subtypes.DICTIONARY_VALUE in current.subtypes): - return previous.column - - if style.Get('INDENT_DICTIONARY_VALUE'): - if previous and (previous.value == ':' or previous.is_pseudo): - if subtypes.DICTIONARY_VALUE in current.subtypes: - return top_of_stack.indent - - if (not self.param_list_stack and _IsCompoundStatement(self.line.first) and - (not (style.Get('DEDENT_CLOSING_BRACKETS') or - style.Get('INDENT_CLOSING_BRACKETS')) or - style.Get('SPLIT_BEFORE_FIRST_ARGUMENT'))): - token_indent = ( - len(self.line.first.whitespace_prefix.split('\n')[-1]) + - style.Get('INDENT_WIDTH')) - if token_indent == top_of_stack.indent: - return token_indent + style.Get('CONTINUATION_INDENT_WIDTH') - - if (self.param_list_stack and - not self.param_list_stack[-1].SplitBeforeClosingBracket( - top_of_stack.indent) and top_of_stack.indent - == ((self.line.depth + 1) * style.Get('INDENT_WIDTH'))): - # NOTE: comment inside argument list is not excluded in subtype assigner - if (subtypes.PARAMETER_START in current.subtypes or - (previous.is_comment and - subtypes.PARAMETER_START in previous.subtypes)): - return top_of_stack.indent + style.Get('CONTINUATION_INDENT_WIDTH') - - return cont_aligned_indent - - def _FitsOnLine(self, start, end): - """Determines if line between start and end can fit on the current line.""" - length = end.total_length - start.total_length - if not start.is_pseudo: - length += len(start.value) - return length + self.column <= self.column_limit - - def _EachDictEntryFitsOnOneLine(self, opening): - """Determine if each dict elems can fit on one line.""" - - def PreviousNonCommentToken(tok): - tok = tok.previous_token - while tok.is_comment: - tok = tok.previous_token - return tok - - def ImplicitStringConcatenation(tok): - num_strings = 0 - if tok.is_pseudo: - tok = tok.next_token - while tok.is_string: - num_strings += 1 - tok = tok.next_token - return num_strings > 1 - - def DictValueIsContainer(opening, closing): - """Return true if the dictionary value is a container.""" - if not opening or not closing: - return False - colon = opening.previous_token - while colon: - if not colon.is_pseudo: - break - colon = colon.previous_token - if not colon or colon.value != ':': - return False - key = colon.previous_token - if not key: - return False - return subtypes.DICTIONARY_KEY_PART in key.subtypes - - closing = opening.matching_bracket - entry_start = opening.next_token - current = opening.next_token.next_token - - while current and current != closing: - if subtypes.DICTIONARY_KEY in current.subtypes: - prev = PreviousNonCommentToken(current) - if prev.value == ',': - prev = PreviousNonCommentToken(prev.previous_token) - if not DictValueIsContainer(prev.matching_bracket, prev): - length = prev.total_length - entry_start.total_length - length += len(entry_start.value) - if length + self.stack[-2].indent >= self.column_limit: - return False - entry_start = current - if current.OpensScope(): - if ((current.value == '{' or - (current.is_pseudo and current.next_token.value == '{') and - subtypes.DICTIONARY_VALUE in current.subtypes) or - ImplicitStringConcatenation(current)): - # A dictionary entry that cannot fit on a single line shouldn't matter - # to this calculation. If it can't fit on a single line, then the - # opening should be on the same line as the key and the rest on - # newlines after it. But the other entries should be on single lines - # if possible. - if current.matching_bracket: - current = current.matching_bracket - while current: - if current == closing: - return True + def _IndentWithContinuationAlignStyle( self, column ): + if column == 0: + return column + align_style = style.Get( 'CONTINUATION_ALIGN_STYLE' ) + if align_style == 'FIXED': + return ( + ( self.line.depth * style.Get( 'INDENT_WIDTH' ) ) + + style.Get( 'CONTINUATION_INDENT_WIDTH' ) ) + if align_style == 'VALIGN-RIGHT': + indent_width = style.Get( 'INDENT_WIDTH' ) + return indent_width * int( ( column + indent_width - 1 ) / indent_width ) + return column + + def _GetNewlineColumn( self ): + """Return the new column on the newline.""" + current = self.next_token + previous = current.previous_token + top_of_stack = self.stack[ -1 ] + + if isinstance( current.spaces_required_before, list ): + # Don't set the value here, as we need to look at the lines near + # this one to determine the actual horizontal alignment value. + return 0 + elif current.spaces_required_before > 2 or self.line.disable: + return current.spaces_required_before + + cont_aligned_indent = self._IndentWithContinuationAlignStyle( + top_of_stack.indent ) + + if current.OpensScope(): + return cont_aligned_indent if self.paren_level else self.first_indent + + if current.ClosesScope(): + if ( previous.OpensScope() or + ( previous.is_comment and previous.previous_token is not None and + previous.previous_token.OpensScope() ) ): + return max( + 0, top_of_stack.indent - style.Get( 'CONTINUATION_INDENT_WIDTH' ) ) + return top_of_stack.closing_scope_indent + + if ( previous and previous.is_string and current.is_string and + subtypes.DICTIONARY_VALUE in current.subtypes ): + return previous.column + + if style.Get( 'INDENT_DICTIONARY_VALUE' ): + if previous and ( previous.value == ':' or previous.is_pseudo ): + if subtypes.DICTIONARY_VALUE in current.subtypes: + return top_of_stack.indent + + if ( not self.param_list_stack and _IsCompoundStatement( self.line.first ) and + ( not ( style.Get( 'DEDENT_CLOSING_BRACKETS' ) or + style.Get( 'INDENT_CLOSING_BRACKETS' ) ) or + style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ) ): + token_indent = ( + len( self.line.first.whitespace_prefix.split( '\n' )[ -1 ] ) + + style.Get( 'INDENT_WIDTH' ) ) + if token_indent == top_of_stack.indent: + return token_indent + style.Get( 'CONTINUATION_INDENT_WIDTH' ) + + if ( self.param_list_stack and + not self.param_list_stack[ -1 ].SplitBeforeClosingBracket( + top_of_stack.indent ) and top_of_stack.indent + == ( ( self.line.depth + 1 ) * style.Get( 'INDENT_WIDTH' ) ) ): + # NOTE: comment inside argument list is not excluded in subtype assigner + if ( subtypes.PARAMETER_START in current.subtypes or + ( previous.is_comment and + subtypes.PARAMETER_START in previous.subtypes ) ): + return top_of_stack.indent + style.Get( 'CONTINUATION_INDENT_WIDTH' ) + + return cont_aligned_indent + + def _FitsOnLine( self, start, end ): + """Determines if line between start and end can fit on the current line.""" + length = end.total_length - start.total_length + if not start.is_pseudo: + length += len( start.value ) + return length + self.column <= self.column_limit + + def _EachDictEntryFitsOnOneLine( self, opening ): + """Determine if each dict elems can fit on one line.""" + + def PreviousNonCommentToken( tok ): + tok = tok.previous_token + while tok.is_comment: + tok = tok.previous_token + return tok + + def ImplicitStringConcatenation( tok ): + num_strings = 0 + if tok.is_pseudo: + tok = tok.next_token + while tok.is_string: + num_strings += 1 + tok = tok.next_token + return num_strings > 1 + + def DictValueIsContainer( opening, closing ): + """Return true if the dictionary value is a container.""" + if not opening or not closing: + return False + colon = opening.previous_token + while colon: + if not colon.is_pseudo: + break + colon = colon.previous_token + if not colon or colon.value != ':': + return False + key = colon.previous_token + if not key: + return False + return subtypes.DICTIONARY_KEY_PART in key.subtypes + + closing = opening.matching_bracket + entry_start = opening.next_token + current = opening.next_token.next_token + + while current and current != closing: if subtypes.DICTIONARY_KEY in current.subtypes: - entry_start = current - break - current = current.next_token - else: - current = current.matching_bracket - else: - current = current.next_token - - # At this point, current is the closing bracket. Go back one to get the end - # of the dictionary entry. - current = PreviousNonCommentToken(current) - length = current.total_length - entry_start.total_length - length += len(entry_start.value) - return length + self.stack[-2].indent <= self.column_limit - - def _ArgumentListHasDictionaryEntry(self, token): - """Check if the function argument list has a dictionary as an arg.""" - if _IsArgumentToFunction(token): - while token: - if token.value == '{': - length = token.matching_bracket.total_length - token.total_length - return length + self.stack[-2].indent > self.column_limit - if token.ClosesScope(): - break - if token.OpensScope(): - token = token.matching_bracket - token = token.next_token - return False + prev = PreviousNonCommentToken( current ) + if prev.value == ',': + prev = PreviousNonCommentToken( prev.previous_token ) + if not DictValueIsContainer( prev.matching_bracket, prev ): + length = prev.total_length - entry_start.total_length + length += len( entry_start.value ) + if length + self.stack[ -2 ].indent >= self.column_limit: + return False + entry_start = current + if current.OpensScope(): + if ( ( current.value == '{' or + ( current.is_pseudo and current.next_token.value == '{' ) and + subtypes.DICTIONARY_VALUE in current.subtypes ) or + ImplicitStringConcatenation( current ) ): + # A dictionary entry that cannot fit on a single line shouldn't matter + # to this calculation. If it can't fit on a single line, then the + # opening should be on the same line as the key and the rest on + # newlines after it. But the other entries should be on single lines + # if possible. + if current.matching_bracket: + current = current.matching_bracket + while current: + if current == closing: + return True + if subtypes.DICTIONARY_KEY in current.subtypes: + entry_start = current + break + current = current.next_token + else: + current = current.matching_bracket + else: + current = current.next_token + + # At this point, current is the closing bracket. Go back one to get the end + # of the dictionary entry. + current = PreviousNonCommentToken( current ) + length = current.total_length - entry_start.total_length + length += len( entry_start.value ) + return length + self.stack[ -2 ].indent <= self.column_limit + + def _ArgumentListHasDictionaryEntry( self, token ): + """Check if the function argument list has a dictionary as an arg.""" + if _IsArgumentToFunction( token ): + while token: + if token.value == '{': + length = token.matching_bracket.total_length - token.total_length + return length + self.stack[ -2 ].indent > self.column_limit + if token.ClosesScope(): + break + if token.OpensScope(): + token = token.matching_bracket + token = token.next_token + return False - def _ContainerFitsOnStartLine(self, opening): - """Check if the container can fit on its starting line.""" - return (opening.matching_bracket.total_length - opening.total_length + - self.stack[-1].indent) <= self.column_limit + def _ContainerFitsOnStartLine( self, opening ): + """Check if the container can fit on its starting line.""" + return ( + opening.matching_bracket.total_length - opening.total_length + + self.stack[ -1 ].indent ) <= self.column_limit _COMPOUND_STMTS = frozenset( - {'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class'}) + { 'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class' } ) -def _IsCompoundStatement(token): - if token.value == 'async': - token = token.next_token - return token.value in _COMPOUND_STMTS +def _IsCompoundStatement( token ): + if token.value == 'async': + token = token.next_token + return token.value in _COMPOUND_STMTS -def _IsFunctionDef(token): - if token.value == 'async': - token = token.next_token - return token.value == 'def' +def _IsFunctionDef( token ): + if token.value == 'async': + token = token.next_token + return token.value == 'def' -def _IsFunctionCallWithArguments(token): - while token: - if token.value == '(': - token = token.next_token - return token and token.value != ')' - elif token.name not in {'NAME', 'DOT', 'EQUAL'}: - break - token = token.next_token - return False +def _IsFunctionCallWithArguments( token ): + while token: + if token.value == '(': + token = token.next_token + return token and token.value != ')' + elif token.name not in { 'NAME', 'DOT', 'EQUAL' }: + break + token = token.next_token + return False -def _IsArgumentToFunction(token): - bracket = logical_line.IsSurroundedByBrackets(token) - if not bracket or bracket.value != '(': - return False - previous = bracket.previous_token - return previous and previous.is_name +def _IsArgumentToFunction( token ): + bracket = logical_line.IsSurroundedByBrackets( token ) + if not bracket or bracket.value != '(': + return False + previous = bracket.previous_token + return previous and previous.is_name -def _GetOpeningBracket(current): - """Get the opening bracket containing the current token.""" - if current.matching_bracket and not current.is_pseudo: - return current if current.OpensScope() else current.matching_bracket +def _GetOpeningBracket( current ): + """Get the opening bracket containing the current token.""" + if current.matching_bracket and not current.is_pseudo: + return current if current.OpensScope() else current.matching_bracket - while current: - if current.ClosesScope(): - current = current.matching_bracket - elif current.is_pseudo: - current = current.previous_token - elif current.OpensScope(): - return current - current = current.previous_token - return None + while current: + if current.ClosesScope(): + current = current.matching_bracket + elif current.is_pseudo: + current = current.previous_token + elif current.OpensScope(): + return current + current = current.previous_token + return None -def _LastTokenInLine(current): - while not current.is_comment and current.next_token: - current = current.next_token - return current +def _LastTokenInLine( current ): + while not current.is_comment and current.next_token: + current = current.next_token + return current -def _IsFunctionDefinition(current): - prev = current.previous_token - return current.value == '(' and prev and subtypes.FUNC_DEF in prev.subtypes +def _IsFunctionDefinition( current ): + prev = current.previous_token + return current.value == '(' and prev and subtypes.FUNC_DEF in prev.subtypes -def _IsLastScopeInLine(current): - current = current.matching_bracket - while current: - current = current.next_token - if current and current.OpensScope(): - return False - return True +def _IsLastScopeInLine( current ): + current = current.matching_bracket + while current: + current = current.next_token + if current and current.OpensScope(): + return False + return True -def _IsSingleElementTuple(token): - """Check if it's a single-element tuple.""" - close = token.matching_bracket - token = token.next_token - num_commas = 0 - while token != close: - if token.value == ',': - num_commas += 1 - token = token.matching_bracket if token.OpensScope() else token.next_token - return num_commas == 1 +def _IsSingleElementTuple( token ): + """Check if it's a single-element tuple.""" + close = token.matching_bracket + token = token.next_token + num_commas = 0 + while token != close: + if token.value == ',': + num_commas += 1 + token = token.matching_bracket if token.OpensScope() else token.next_token + return num_commas == 1 -def _ScopeHasNoCommas(token): - """Check if the scope has no commas.""" - close = token.matching_bracket - token = token.next_token - while token != close: - if token.value == ',': - return False - token = token.matching_bracket if token.OpensScope() else token.next_token - return True +def _ScopeHasNoCommas( token ): + """Check if the scope has no commas.""" + close = token.matching_bracket + token = token.next_token + while token != close: + if token.value == ',': + return False + token = token.matching_bracket if token.OpensScope() else token.next_token + return True -class _ParenState(object): - """Maintains the state of the bracket enclosures. +class _ParenState( object ): + """Maintains the state of the bracket enclosures. A stack of _ParenState objects are kept so that we know how to indent relative to the brackets. @@ -1205,32 +1217,34 @@ class _ParenState(object): Each subsequent line split gets an increasing penalty. """ - # TODO(morbo): This doesn't track "bin packing." - - def __init__(self, indent, last_space): - self.indent = indent - self.last_space = last_space - self.closing_scope_indent = 0 - self.split_before_closing_bracket = False - self.num_line_splits = 0 - - def Clone(self): - state = _ParenState(self.indent, self.last_space) - state.closing_scope_indent = self.closing_scope_indent - state.split_before_closing_bracket = self.split_before_closing_bracket - state.num_line_splits = self.num_line_splits - return state - - def __repr__(self): - return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % ( - self.indent, self.last_space, self.closing_scope_indent) - - def __eq__(self, other): - return hash(self) == hash(other) - - def __ne__(self, other): - return not self == other - - def __hash__(self, *args, **kwargs): - return hash((self.indent, self.last_space, self.closing_scope_indent, - self.split_before_closing_bracket, self.num_line_splits)) + # TODO(morbo): This doesn't track "bin packing." + + def __init__( self, indent, last_space ): + self.indent = indent + self.last_space = last_space + self.closing_scope_indent = 0 + self.split_before_closing_bracket = False + self.num_line_splits = 0 + + def Clone( self ): + state = _ParenState( self.indent, self.last_space ) + state.closing_scope_indent = self.closing_scope_indent + state.split_before_closing_bracket = self.split_before_closing_bracket + state.num_line_splits = self.num_line_splits + return state + + def __repr__( self ): + return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % ( + self.indent, self.last_space, self.closing_scope_indent ) + + def __eq__( self, other ): + return hash( self ) == hash( other ) + + def __ne__( self, other ): + return not self == other + + def __hash__( self, *args, **kwargs ): + return hash( + ( + self.indent, self.last_space, self.closing_scope_indent, + self.split_before_closing_bracket, self.num_line_splits ) ) diff --git a/yapf/yapflib/format_token.py b/yapf/yapflib/format_token.py index 070987851..3dd570ef4 100644 --- a/yapf/yapflib/format_token.py +++ b/yapf/yapflib/format_token.py @@ -25,12 +25,12 @@ CONTINUATION = token.N_TOKENS -_OPENING_BRACKETS = frozenset({'(', '[', '{'}) -_CLOSING_BRACKETS = frozenset({')', ']', '}'}) +_OPENING_BRACKETS = frozenset( { '(', '[', '{' } ) +_CLOSING_BRACKETS = frozenset( { ')', ']', '}' } ) -def _TabbedContinuationAlignPadding(spaces, align_style, tab_width): - """Build padding string for continuation alignment in tabbed indentation. +def _TabbedContinuationAlignPadding( spaces, align_style, tab_width ): + """Build padding string for continuation alignment in tabbed indentation. Arguments: spaces: (int) The number of spaces to place before the token for alignment. @@ -40,15 +40,15 @@ def _TabbedContinuationAlignPadding(spaces, align_style, tab_width): Returns: A padding string for alignment with style specified by align_style option. """ - if align_style in ('FIXED', 'VALIGN-RIGHT'): - if spaces > 0: - return '\t' * int((spaces + tab_width - 1) / tab_width) - return '' - return ' ' * spaces + if align_style in ( 'FIXED', 'VALIGN-RIGHT' ): + if spaces > 0: + return '\t' * int( ( spaces + tab_width - 1 ) / tab_width ) + return '' + return ' ' * spaces -class FormatToken(object): - """Enhanced token information for formatting. +class FormatToken( object ): + """Enhanced token information for formatting. This represents the token plus additional information useful for reformatting the code. @@ -83,58 +83,57 @@ class FormatToken(object): newlines: The number of newlines needed before this token. """ - def __init__(self, node, name): - """Constructor. + def __init__( self, node, name ): + """Constructor. Arguments: node: (pytree.Leaf) The node that's being wrapped. name: (string) The name of the node. """ - self.node = node - self.name = name - self.type = node.type - self.column = node.column - self.lineno = node.lineno - self.value = node.value - - if self.is_continuation: - self.value = node.value.rstrip() - - self.next_token = None - self.previous_token = None - self.matching_bracket = None - self.parameters = [] - self.container_opening = None - self.container_elements = [] - self.whitespace_prefix = '' - self.total_length = 0 - self.split_penalty = 0 - self.can_break_before = False - self.must_break_before = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.MUST_SPLIT, default=False) - self.newlines = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.NEWLINES) - self.spaces_required_before = 0 - - if self.is_comment: - self.spaces_required_before = style.Get('SPACES_BEFORE_COMMENT') - - stypes = pytree_utils.GetNodeAnnotation(node, - pytree_utils.Annotation.SUBTYPE) - self.subtypes = {subtypes.NONE} if not stypes else stypes - self.is_pseudo = hasattr(node, 'is_pseudo') and node.is_pseudo - - @property - def formatted_whitespace_prefix(self): - if style.Get('INDENT_BLANK_LINES'): - without_newlines = self.whitespace_prefix.lstrip('\n') - height = len(self.whitespace_prefix) - len(without_newlines) - if height: - return ('\n' + without_newlines) * height - return self.whitespace_prefix - - def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0): - """Register a token's whitespace prefix. + self.node = node + self.name = name + self.type = node.type + self.column = node.column + self.lineno = node.lineno + self.value = node.value + + if self.is_continuation: + self.value = node.value.rstrip() + + self.next_token = None + self.previous_token = None + self.matching_bracket = None + self.parameters = [] + self.container_opening = None + self.container_elements = [] + self.whitespace_prefix = '' + self.total_length = 0 + self.split_penalty = 0 + self.can_break_before = False + self.must_break_before = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.MUST_SPLIT, default = False ) + self.newlines = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.NEWLINES ) + self.spaces_required_before = 0 + + if self.is_comment: + self.spaces_required_before = style.Get( 'SPACES_BEFORE_COMMENT' ) + + stypes = pytree_utils.GetNodeAnnotation( node, pytree_utils.Annotation.SUBTYPE ) + self.subtypes = { subtypes.NONE } if not stypes else stypes + self.is_pseudo = hasattr( node, 'is_pseudo' ) and node.is_pseudo + + @property + def formatted_whitespace_prefix( self ): + if style.Get( 'INDENT_BLANK_LINES' ): + without_newlines = self.whitespace_prefix.lstrip( '\n' ) + height = len( self.whitespace_prefix ) - len( without_newlines ) + if height: + return ( '\n' + without_newlines ) * height + return self.whitespace_prefix + + def AddWhitespacePrefix( self, newlines_before, spaces = 0, indent_level = 0 ): + """Register a token's whitespace prefix. This is the whitespace that will be output before a token's string. @@ -143,261 +142,196 @@ def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0): spaces: (int) The number of spaces to place before the token. indent_level: (int) The indentation level. """ - if style.Get('USE_TABS'): - if newlines_before > 0: - indent_before = '\t' * indent_level + _TabbedContinuationAlignPadding( - spaces, style.Get('CONTINUATION_ALIGN_STYLE'), - style.Get('INDENT_WIDTH')) - else: - indent_before = '\t' * indent_level + ' ' * spaces - else: - indent_before = (' ' * indent_level * style.Get('INDENT_WIDTH') + - ' ' * spaces) - - if self.is_comment: - comment_lines = [s.lstrip() for s in self.value.splitlines()] - self.value = ('\n' + indent_before).join(comment_lines) - - # Update our own value since we are changing node value - self.value = self.value - - if not self.whitespace_prefix: - self.whitespace_prefix = ('\n' * (self.newlines or newlines_before) + - indent_before) - else: - self.whitespace_prefix += indent_before - - def AdjustNewlinesBefore(self, newlines_before): - """Change the number of newlines before this token.""" - self.whitespace_prefix = ('\n' * newlines_before + - self.whitespace_prefix.lstrip('\n')) - - def RetainHorizontalSpacing(self, first_column, depth): - """Retains a token's horizontal spacing.""" - previous = self.previous_token - if not previous: - return - - if previous.is_pseudo: - previous = previous.previous_token - if not previous: - return - - cur_lineno = self.lineno - prev_lineno = previous.lineno - if previous.is_multiline_string: - prev_lineno += previous.value.count('\n') - - if (cur_lineno != prev_lineno or - (previous.is_pseudo and previous.value != ')' and - cur_lineno != previous.previous_token.lineno)): - self.spaces_required_before = ( - self.column - first_column + depth * style.Get('INDENT_WIDTH')) - return - - cur_column = self.column - prev_column = previous.column - prev_len = len(previous.value) - - if previous.is_pseudo and previous.value == ')': - prev_column -= 1 - prev_len = 0 - - if previous.is_multiline_string: - prev_len = len(previous.value.split('\n')[-1]) - if '\n' in previous.value: - prev_column = 0 # Last line starts in column 0. - - self.spaces_required_before = cur_column - (prev_column + prev_len) - - def OpensScope(self): - return self.value in _OPENING_BRACKETS - - def ClosesScope(self): - return self.value in _CLOSING_BRACKETS - - def AddSubtype(self, subtype): - self.subtypes.add(subtype) - - def __repr__(self): - msg = ('FormatToken(name={0}, value={1}, column={2}, lineno={3}, ' - 'splitpenalty={4}'.format( - 'DOCSTRING' if self.is_docstring else self.name, self.value, - self.column, self.lineno, self.split_penalty)) - msg += ', pseudo)' if self.is_pseudo else ')' - return msg - - @property - def node_split_penalty(self): - """Split penalty attached to the pytree node of this token.""" - return pytree_utils.GetNodeAnnotation( - self.node, pytree_utils.Annotation.SPLIT_PENALTY, default=0) - - @property - def is_binary_op(self): - """Token is a binary operator.""" - return subtypes.BINARY_OPERATOR in self.subtypes - - @property - @py3compat.lru_cache() - def is_arithmetic_op(self): - """Token is an arithmetic operator.""" - return self.value in frozenset({ - '+', # Add - '-', # Subtract - '*', # Multiply - '@', # Matrix Multiply - '/', # Divide - '//', # Floor Divide - '%', # Modulo - '<<', # Left Shift - '>>', # Right Shift - '|', # Bitwise Or - '&', # Bitwise Add - '^', # Bitwise Xor - '**', # Power - }) - - @property - def is_simple_expr(self): - """Token is an operator in a simple expression.""" - return subtypes.SIMPLE_EXPRESSION in self.subtypes - - @property - def is_subscript_colon(self): - """Token is a subscript colon.""" - return subtypes.SUBSCRIPT_COLON in self.subtypes - - @property - def is_comment(self): - return self.type == token.COMMENT - - @property - def is_continuation(self): - return self.type == CONTINUATION - - @property - @py3compat.lru_cache() - def is_keyword(self): - return keyword.iskeyword(self.value) - - @property - def is_name(self): - return self.type == token.NAME and not self.is_keyword - - @property - def is_number(self): - return self.type == token.NUMBER - - @property - def is_string(self): - return self.type == token.STRING - - @property - def is_multiline_string(self): - """Test if this string is a multiline string. + if style.Get( 'USE_TABS' ): + if newlines_before > 0: + indent_before = '\t' * indent_level + _TabbedContinuationAlignPadding( + spaces, style.Get( 'CONTINUATION_ALIGN_STYLE' ), + style.Get( 'INDENT_WIDTH' ) ) + else: + indent_before = '\t' * indent_level + ' ' * spaces + else: + indent_before = ( + ' ' * indent_level * style.Get( 'INDENT_WIDTH' ) + ' ' * spaces ) + + if self.is_comment: + comment_lines = [ s.lstrip() for s in self.value.splitlines() ] + self.value = ( '\n' + indent_before ).join( comment_lines ) + + # Update our own value since we are changing node value + self.value = self.value + + if not self.whitespace_prefix: + self.whitespace_prefix = ( + '\n' * ( self.newlines or newlines_before ) + indent_before ) + else: + self.whitespace_prefix += indent_before + + def AdjustNewlinesBefore( self, newlines_before ): + """Change the number of newlines before this token.""" + self.whitespace_prefix = ( + '\n' * newlines_before + self.whitespace_prefix.lstrip( '\n' ) ) + + def RetainHorizontalSpacing( self, first_column, depth ): + """Retains a token's horizontal spacing.""" + previous = self.previous_token + if not previous: + return + + if previous.is_pseudo: + previous = previous.previous_token + if not previous: + return + + cur_lineno = self.lineno + prev_lineno = previous.lineno + if previous.is_multiline_string: + prev_lineno += previous.value.count( '\n' ) + + if ( cur_lineno != prev_lineno or + ( previous.is_pseudo and previous.value != ')' and + cur_lineno != previous.previous_token.lineno ) ): + self.spaces_required_before = ( + self.column - first_column + depth * style.Get( 'INDENT_WIDTH' ) ) + return + + cur_column = self.column + prev_column = previous.column + prev_len = len( previous.value ) + + if previous.is_pseudo and previous.value == ')': + prev_column -= 1 + prev_len = 0 + + if previous.is_multiline_string: + prev_len = len( previous.value.split( '\n' )[ -1 ] ) + if '\n' in previous.value: + prev_column = 0 # Last line starts in column 0. + + self.spaces_required_before = cur_column - ( prev_column + prev_len ) + + def OpensScope( self ): + return self.value in _OPENING_BRACKETS + + def ClosesScope( self ): + return self.value in _CLOSING_BRACKETS + + def AddSubtype( self, subtype ): + self.subtypes.add( subtype ) + + def __repr__( self ): + msg = ( + 'FormatToken(name={0}, value={1}, column={2}, lineno={3}, ' + 'splitpenalty={4}'.format( + 'DOCSTRING' if self.is_docstring else self.name, self.value, + self.column, self.lineno, self.split_penalty ) ) + msg += ', pseudo)' if self.is_pseudo else ')' + return msg + + @property + def node_split_penalty( self ): + """Split penalty attached to the pytree node of this token.""" + return pytree_utils.GetNodeAnnotation( + self.node, pytree_utils.Annotation.SPLIT_PENALTY, default = 0 ) + + @property + def is_binary_op( self ): + """Token is a binary operator.""" + return subtypes.BINARY_OPERATOR in self.subtypes + + @property + @py3compat.lru_cache() + def is_arithmetic_op( self ): + """Token is an arithmetic operator.""" + return self.value in frozenset( + { + '+', # Add + '-', # Subtract + '*', # Multiply + '@', # Matrix Multiply + '/', # Divide + '//', # Floor Divide + '%', # Modulo + '<<', # Left Shift + '>>', # Right Shift + '|', # Bitwise Or + '&', # Bitwise Add + '^', # Bitwise Xor + '**', # Power + } ) + + @property + def is_simple_expr( self ): + """Token is an operator in a simple expression.""" + return subtypes.SIMPLE_EXPRESSION in self.subtypes + + @property + def is_subscript_colon( self ): + """Token is a subscript colon.""" + return subtypes.SUBSCRIPT_COLON in self.subtypes + + @property + def is_comment( self ): + return self.type == token.COMMENT + + @property + def is_continuation( self ): + return self.type == CONTINUATION + + @property + @py3compat.lru_cache() + def is_keyword( self ): + return keyword.iskeyword( self.value ) + + @property + def is_name( self ): + return self.type == token.NAME and not self.is_keyword + + @property + def is_number( self ): + return self.type == token.NUMBER + + @property + def is_string( self ): + return self.type == token.STRING + + @property + def is_multiline_string( self ): + """Test if this string is a multiline string. Returns: A multiline string always ends with triple quotes, so if it is a string token, inspect the last 3 characters and return True if it is a triple double or triple single quote mark. """ - return self.is_string and self.value.endswith(('"""', "'''")) - - @property - def is_docstring(self): - return self.is_string and self.previous_token is None - - @property - def is_pylint_comment(self): - return self.is_comment and re.match(r'#.*\bpylint:\s*(disable|enable)=', - self.value) - - @property - def is_pytype_comment(self): - return self.is_comment and re.match(r'#.*\bpytype:\s*(disable|enable)=', - self.value) - - @property - def is_copybara_comment(self): - return self.is_comment and re.match( - r'#.*\bcopybara:\s*(strip|insert|replace)', self.value) - - @property - def is_assign(self): - return subtypes.ASSIGN_OPERATOR in self.subtypes - - @property - def is_dict_colon(self): - # if the token is dictionary colon and - # the dictionary has no comp_for - return self.value == ':' and self.previous_token.is_dict_key - - @property - def is_dict_key(self): - # if the token is dictionary key which is not preceded by doubel stars and - # the dictionary has no comp_for - return subtypes.DICTIONARY_KEY_PART in self.subtypes - - @property - def is_dict_key_start(self): - # if the token is dictionary key start - return subtypes.DICTIONARY_KEY in self.subtypes - - @property - def is_dict_value(self): - return subtypes.DICTIONARY_VALUE in self.subtypes - - @property - def is_augassign(self): - augassigns = {'+=', '-=' , '*=' , '@=' , '/=' , '%=' , '&=' , '|=' , '^=' , - '<<=' , '>>=' , '**=' , '//='} - return self.value in augassigns - - @property - def is_argassign(self): - return (subtypes.DEFAULT_OR_NAMED_ASSIGN in self.subtypes - or subtypes.VARARGS_LIST in self.subtypes) - - @property - def is_argname(self): - # it's the argument part before argument assignment operator, - # including tnames and data type - # not the assign operator, - # not the value after the assign operator - - # argument without assignment is also included - # the token is arg part before '=' but not after '=' - if self.is_argname_start: - return True - - # exclude comment inside argument list - if not self.is_comment: - # the token is any element in typed arglist - if subtypes.TYPED_NAME_ARG_LIST in self.subtypes: - return True - - return False - - @property - def is_argname_start(self): - # return true if it's the start of every argument entry - previous_subtypes = {0} - if self.previous_token: - previous_subtypes = self.previous_token.subtypes - - return ( - (not self.is_comment - and subtypes.DEFAULT_OR_NAMED_ASSIGN not in self.subtypes - and subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in self.subtypes - and subtypes.DEFAULT_OR_NAMED_ASSIGN not in previous_subtypes - and (not subtypes.PARAMETER_STOP in self.subtypes - or subtypes.PARAMETER_START in self.subtypes) - ) - or # if there is comment, the arg after it is the argname start - (not self.is_comment and self.previous_token and self.previous_token.is_comment - and - (subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in previous_subtypes - or subtypes.TYPED_NAME_ARG_LIST in self.subtypes - or subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in self.subtypes)) - ) + return self.is_string and self.value.endswith( ( '"""', "'''" ) ) + + @property + def is_docstring( self ): + return self.is_string and self.previous_token is None + + @property + def is_pylint_comment( self ): + return self.is_comment and re.match( + r'#.*\bpylint:\s*(disable|enable)=', self.value ) + + @property + def is_pytype_comment( self ): + return self.is_comment and re.match( + r'#.*\bpytype:\s*(disable|enable)=', self.value ) + + @property + def is_copybara_comment( self ): + return self.is_comment and re.match( + r'#.*\bcopybara:\s*(strip|insert|replace)', self.value ) + + @property + def is_assign( self ): + return subtypes.ASSIGN_OPERATOR in self.subtypes + + @property + def is_augassign( self ): + augassigns = { + '+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=', + '//=' + } + return self.value in augassigns diff --git a/yapf/yapflib/identify_container.py b/yapf/yapflib/identify_container.py index d027cc5d4..049694a77 100644 --- a/yapf/yapflib/identify_container.py +++ b/yapf/yapflib/identify_container.py @@ -25,45 +25,45 @@ from yapf.pytree import pytree_visitor -def IdentifyContainers(tree): - """Run the identify containers visitor over the tree, modifying it in place. +def IdentifyContainers( tree ): + """Run the identify containers visitor over the tree, modifying it in place. Arguments: tree: the top-level pytree node to annotate with subtypes. """ - identify_containers = _IdentifyContainers() - identify_containers.Visit(tree) + identify_containers = _IdentifyContainers() + identify_containers.Visit( tree ) -class _IdentifyContainers(pytree_visitor.PyTreeVisitor): - """_IdentifyContainers - see file-level docstring for detailed description.""" +class _IdentifyContainers( pytree_visitor.PyTreeVisitor ): + """_IdentifyContainers - see file-level docstring for detailed description.""" - def Visit_trailer(self, node): # pylint: disable=invalid-name - for child in node.children: - self.Visit(child) + def Visit_trailer( self, node ): # pylint: disable=invalid-name + for child in node.children: + self.Visit( child ) - if len(node.children) != 3: - return - if node.children[0].type != grammar_token.LPAR: - return + if len( node.children ) != 3: + return + if node.children[ 0 ].type != grammar_token.LPAR: + return - if pytree_utils.NodeName(node.children[1]) == 'arglist': - for child in node.children[1].children: - pytree_utils.SetOpeningBracket( - pytree_utils.FirstLeafNode(child), node.children[0]) - else: - pytree_utils.SetOpeningBracket( - pytree_utils.FirstLeafNode(node.children[1]), node.children[0]) + if pytree_utils.NodeName( node.children[ 1 ] ) == 'arglist': + for child in node.children[ 1 ].children: + pytree_utils.SetOpeningBracket( + pytree_utils.FirstLeafNode( child ), node.children[ 0 ] ) + else: + pytree_utils.SetOpeningBracket( + pytree_utils.FirstLeafNode( node.children[ 1 ] ), node.children[ 0 ] ) - def Visit_atom(self, node): # pylint: disable=invalid-name - for child in node.children: - self.Visit(child) + def Visit_atom( self, node ): # pylint: disable=invalid-name + for child in node.children: + self.Visit( child ) - if len(node.children) != 3: - return - if node.children[0].type != grammar_token.LPAR: - return + if len( node.children ) != 3: + return + if node.children[ 0 ].type != grammar_token.LPAR: + return - for child in node.children[1].children: - pytree_utils.SetOpeningBracket( - pytree_utils.FirstLeafNode(child), node.children[0]) + for child in node.children[ 1 ].children: + pytree_utils.SetOpeningBracket( + pytree_utils.FirstLeafNode( child ), node.children[ 0 ] ) diff --git a/yapf/yapflib/line_joiner.py b/yapf/yapflib/line_joiner.py index f0acd2f37..8a2911397 100644 --- a/yapf/yapflib/line_joiner.py +++ b/yapf/yapflib/line_joiner.py @@ -36,11 +36,11 @@ from yapf.yapflib import style -_CLASS_OR_FUNC = frozenset({'def', 'class'}) +_CLASS_OR_FUNC = frozenset( { 'def', 'class' } ) -def CanMergeMultipleLines(lines, last_was_merged=False): - """Determine if multiple lines can be joined into one. +def CanMergeMultipleLines( lines, last_was_merged = False ): + """Determine if multiple lines can be joined into one. Arguments: lines: (list of LogicalLine) This is a splice of LogicalLines from the full @@ -51,39 +51,39 @@ def CanMergeMultipleLines(lines, last_was_merged=False): True if two consecutive lines can be joined together. In reality, this will only happen if two consecutive lines can be joined, due to the style guide. """ - # The indentation amount for the starting line (number of spaces). - indent_amt = lines[0].depth * style.Get('INDENT_WIDTH') - if len(lines) == 1 or indent_amt > style.Get('COLUMN_LIMIT'): - return False - - if (len(lines) >= 3 and lines[2].depth >= lines[1].depth and - lines[0].depth != lines[2].depth): - # If lines[2]'s depth is greater than or equal to line[1]'s depth, we're not - # looking at a single statement (e.g., if-then, while, etc.). A following - # line with the same depth as the first line isn't part of the lines we - # would want to combine. - return False # Don't merge more than two lines together. + # The indentation amount for the starting line (number of spaces). + indent_amt = lines[ 0 ].depth * style.Get( 'INDENT_WIDTH' ) + if len( lines ) == 1 or indent_amt > style.Get( 'COLUMN_LIMIT' ): + return False + + if ( len( lines ) >= 3 and lines[ 2 ].depth >= lines[ 1 ].depth and + lines[ 0 ].depth != lines[ 2 ].depth ): + # If lines[2]'s depth is greater than or equal to line[1]'s depth, we're not + # looking at a single statement (e.g., if-then, while, etc.). A following + # line with the same depth as the first line isn't part of the lines we + # would want to combine. + return False # Don't merge more than two lines together. + + if lines[ 0 ].first.value in _CLASS_OR_FUNC: + # Don't join lines onto the starting line of a class or function. + return False + + limit = style.Get( 'COLUMN_LIMIT' ) - indent_amt + if lines[ 0 ].last.total_length < limit: + limit -= lines[ 0 ].last.total_length + + if lines[ 0 ].first.value == 'if': + return _CanMergeLineIntoIfStatement( lines, limit ) + if last_was_merged and lines[ 0 ].first.value in { 'elif', 'else' }: + return _CanMergeLineIntoIfStatement( lines, limit ) + + # TODO(morbo): Other control statements? - if lines[0].first.value in _CLASS_OR_FUNC: - # Don't join lines onto the starting line of a class or function. return False - limit = style.Get('COLUMN_LIMIT') - indent_amt - if lines[0].last.total_length < limit: - limit -= lines[0].last.total_length - - if lines[0].first.value == 'if': - return _CanMergeLineIntoIfStatement(lines, limit) - if last_was_merged and lines[0].first.value in {'elif', 'else'}: - return _CanMergeLineIntoIfStatement(lines, limit) - - # TODO(morbo): Other control statements? - return False - - -def _CanMergeLineIntoIfStatement(lines, limit): - """Determine if we can merge a short if-then statement into one line. +def _CanMergeLineIntoIfStatement( lines, limit ): + """Determine if we can merge a short if-then statement into one line. Two lines of an if-then statement can be merged if they were that way in the original source, fit on the line without going over the column limit, and are @@ -97,13 +97,13 @@ def _CanMergeLineIntoIfStatement(lines, limit): Returns: True if the lines can be merged, False otherwise. """ - if len(lines[1].tokens) == 1 and lines[1].last.is_multiline_string: - # This might be part of a multiline shebang. - return True - if lines[0].lineno != lines[1].lineno: - # Don't merge lines if the original lines weren't merged. - return False - if lines[1].last.total_length >= limit: - # Don't merge lines if the result goes over the column limit. - return False - return style.Get('JOIN_MULTIPLE_LINES') + if len( lines[ 1 ].tokens ) == 1 and lines[ 1 ].last.is_multiline_string: + # This might be part of a multiline shebang. + return True + if lines[ 0 ].lineno != lines[ 1 ].lineno: + # Don't merge lines if the original lines weren't merged. + return False + if lines[ 1 ].last.total_length >= limit: + # Don't merge lines if the result goes over the column limit. + return False + return style.Get( 'JOIN_MULTIPLE_LINES' ) diff --git a/yapf/yapflib/logical_line.py b/yapf/yapflib/logical_line.py index 8c84b7ba8..b02e3588b 100644 --- a/yapf/yapflib/logical_line.py +++ b/yapf/yapflib/logical_line.py @@ -29,8 +29,8 @@ from lib2to3.fixer_util import syms as python_symbols -class LogicalLine(object): - """Represents a single logical line in the output. +class LogicalLine( object ): + """Represents a single logical line in the output. Attributes: depth: indentation depth of this line. This is just a numeric value used to @@ -38,8 +38,8 @@ class LogicalLine(object): actual amount of spaces, which is style-dependent. """ - def __init__(self, depth, tokens=None): - """Constructor. + def __init__( self, depth, tokens = None ): + """Constructor. Creates a new logical line with the given depth an initial list of tokens. Constructs the doubly-linked lists for format tokens using their built-in @@ -49,108 +49,108 @@ def __init__(self, depth, tokens=None): depth: indentation depth of this line tokens: initial list of tokens """ - self.depth = depth - self._tokens = tokens or [] - self.disable = False - - if self._tokens: - # Set up a doubly linked list. - for index, tok in enumerate(self._tokens[1:]): - # Note, 'index' is the index to the previous token. - tok.previous_token = self._tokens[index] - self._tokens[index].next_token = tok - - def CalculateFormattingInformation(self): - """Calculate the split penalty and total length for the tokens.""" - # Say that the first token in the line should have a space before it. This - # means only that if this logical line is joined with a predecessor line, - # then there will be a space between them. - self.first.spaces_required_before = 1 - self.first.total_length = len(self.first.value) - - prev_token = self.first - prev_length = self.first.total_length - for token in self._tokens[1:]: - if (token.spaces_required_before == 0 and - _SpaceRequiredBetween(prev_token, token, self.disable)): - token.spaces_required_before = 1 - - tok_len = len(token.value) if not token.is_pseudo else 0 - - spaces_required_before = token.spaces_required_before - if isinstance(spaces_required_before, list): - assert token.is_comment, token - - # If here, we are looking at a comment token that appears on a line - # with other tokens (but because it is a comment, it is always the last - # token). Rather than specifying the actual number of spaces here, - # hard code a value of 0 and then set it later. This logic only works - # because this comment token is guaranteed to be the last token in the - # list. - spaces_required_before = 0 - - token.total_length = prev_length + tok_len + spaces_required_before - - # The split penalty has to be computed before {must|can}_break_before, - # because these may use it for their decision. - token.split_penalty += _SplitPenalty(prev_token, token) - token.must_break_before = _MustBreakBefore(prev_token, token) - token.can_break_before = ( - token.must_break_before or _CanBreakBefore(prev_token, token)) - - prev_length = token.total_length - prev_token = token - - def Split(self): - """Split the line at semicolons.""" - if not self.has_semicolon or self.disable: - return [self] - - llines = [] - lline = LogicalLine(self.depth) - for tok in self._tokens: - if tok.value == ';': - llines.append(lline) - lline = LogicalLine(self.depth) - else: - lline.AppendToken(tok) - - if lline.tokens: - llines.append(lline) - - for lline in llines: - lline.first.previous_token = None - lline.last.next_token = None - - return llines - - ############################################################################ - # Token Access and Manipulation Methods # - ############################################################################ - - def AppendToken(self, token): - """Append a new FormatToken to the tokens contained in this line.""" - if self._tokens: - token.previous_token = self.last - self.last.next_token = token - self._tokens.append(token) - - @property - def first(self): - """Returns the first non-whitespace token.""" - return self._tokens[0] - - @property - def last(self): - """Returns the last non-whitespace token.""" - return self._tokens[-1] - - ############################################################################ - # Token -> String Methods # - ############################################################################ - - def AsCode(self, indent_per_depth=2): - """Return a "code" representation of this line. + self.depth = depth + self._tokens = tokens or [] + self.disable = False + + if self._tokens: + # Set up a doubly linked list. + for index, tok in enumerate( self._tokens[ 1 : ] ): + # Note, 'index' is the index to the previous token. + tok.previous_token = self._tokens[ index ] + self._tokens[ index ].next_token = tok + + def CalculateFormattingInformation( self ): + """Calculate the split penalty and total length for the tokens.""" + # Say that the first token in the line should have a space before it. This + # means only that if this logical line is joined with a predecessor line, + # then there will be a space between them. + self.first.spaces_required_before = 1 + self.first.total_length = len( self.first.value ) + + prev_token = self.first + prev_length = self.first.total_length + for token in self._tokens[ 1 : ]: + if ( token.spaces_required_before == 0 and + _SpaceRequiredBetween( prev_token, token, self.disable ) ): + token.spaces_required_before = 1 + + tok_len = len( token.value ) if not token.is_pseudo else 0 + + spaces_required_before = token.spaces_required_before + if isinstance( spaces_required_before, list ): + assert token.is_comment, token + + # If here, we are looking at a comment token that appears on a line + # with other tokens (but because it is a comment, it is always the last + # token). Rather than specifying the actual number of spaces here, + # hard code a value of 0 and then set it later. This logic only works + # because this comment token is guaranteed to be the last token in the + # list. + spaces_required_before = 0 + + token.total_length = prev_length + tok_len + spaces_required_before + + # The split penalty has to be computed before {must|can}_break_before, + # because these may use it for their decision. + token.split_penalty += _SplitPenalty( prev_token, token ) + token.must_break_before = _MustBreakBefore( prev_token, token ) + token.can_break_before = ( + token.must_break_before or _CanBreakBefore( prev_token, token ) ) + + prev_length = token.total_length + prev_token = token + + def Split( self ): + """Split the line at semicolons.""" + if not self.has_semicolon or self.disable: + return [ self ] + + llines = [] + lline = LogicalLine( self.depth ) + for tok in self._tokens: + if tok.value == ';': + llines.append( lline ) + lline = LogicalLine( self.depth ) + else: + lline.AppendToken( tok ) + + if lline.tokens: + llines.append( lline ) + + for lline in llines: + lline.first.previous_token = None + lline.last.next_token = None + + return llines + + ############################################################################ + # Token Access and Manipulation Methods # + ############################################################################ + + def AppendToken( self, token ): + """Append a new FormatToken to the tokens contained in this line.""" + if self._tokens: + token.previous_token = self.last + self.last.next_token = token + self._tokens.append( token ) + + @property + def first( self ): + """Returns the first non-whitespace token.""" + return self._tokens[ 0 ] + + @property + def last( self ): + """Returns the last non-whitespace token.""" + return self._tokens[ -1 ] + + ############################################################################ + # Token -> String Methods # + ############################################################################ + + def AsCode( self, indent_per_depth = 2 ): + """Return a "code" representation of this line. The code representation shows how the line would be printed out as code. @@ -164,516 +164,518 @@ def AsCode(self, indent_per_depth=2): Returns: A string representing the line as code. """ - indent = ' ' * indent_per_depth * self.depth - tokens_str = ' '.join(tok.value for tok in self._tokens) - return indent + tokens_str + indent = ' ' * indent_per_depth * self.depth + tokens_str = ' '.join( tok.value for tok in self._tokens ) + return indent + tokens_str - def __str__(self): # pragma: no cover - return self.AsCode() + def __str__( self ): # pragma: no cover + return self.AsCode() - def __repr__(self): # pragma: no cover - tokens_repr = ','.join( - '{0}({1!r})'.format(tok.name, tok.value) for tok in self._tokens) - return 'LogicalLine(depth={0}, tokens=[{1}])'.format( - self.depth, tokens_repr) + def __repr__( self ): # pragma: no cover + tokens_repr = ','.join( + '{0}({1!r})'.format( tok.name, tok.value ) for tok in self._tokens ) + return 'LogicalLine(depth={0}, tokens=[{1}])'.format( self.depth, tokens_repr ) - ############################################################################ - # Properties # - ############################################################################ + ############################################################################ + # Properties # + ############################################################################ - @property - def tokens(self): - """Access the tokens contained within this line. + @property + def tokens( self ): + """Access the tokens contained within this line. The caller must not modify the tokens list returned by this method. Returns: List of tokens in this line. """ - return self._tokens + return self._tokens - @property - def lineno(self): - """Return the line number of this logical line. + @property + def lineno( self ): + """Return the line number of this logical line. Returns: The line number of the first token in this logical line. """ - return self.first.lineno + return self.first.lineno - @property - def start(self): - """The start of the logical line. + @property + def start( self ): + """The start of the logical line. Returns: A tuple of the starting line number and column. """ - return (self.first.lineno, self.first.column) + return ( self.first.lineno, self.first.column ) - @property - def end(self): - """The end of the logical line. + @property + def end( self ): + """The end of the logical line. Returns: A tuple of the ending line number and column. """ - return (self.last.lineno, self.last.column + len(self.last.value)) + return ( self.last.lineno, self.last.column + len( self.last.value ) ) - @property - def is_comment(self): - return self.first.is_comment + @property + def is_comment( self ): + return self.first.is_comment - @property - def has_semicolon(self): - return any(tok.value == ';' for tok in self._tokens) + @property + def has_semicolon( self ): + return any( tok.value == ';' for tok in self._tokens ) -def _IsIdNumberStringToken(tok): - return tok.is_keyword or tok.is_name or tok.is_number or tok.is_string +def _IsIdNumberStringToken( tok ): + return tok.is_keyword or tok.is_name or tok.is_number or tok.is_string -def _IsUnaryOperator(tok): - return subtypes.UNARY_OPERATOR in tok.subtypes +def _IsUnaryOperator( tok ): + return subtypes.UNARY_OPERATOR in tok.subtypes -def _HasPrecedence(tok): - """Whether a binary operation has precedence within its context.""" - node = tok.node +def _HasPrecedence( tok ): + """Whether a binary operation has precedence within its context.""" + node = tok.node - # We let ancestor be the statement surrounding the operation that tok is the - # operator in. - ancestor = node.parent.parent + # We let ancestor be the statement surrounding the operation that tok is the + # operator in. + ancestor = node.parent.parent - while ancestor is not None: - # Search through the ancestor nodes in the parse tree for operators with - # lower precedence. - predecessor_type = pytree_utils.NodeName(ancestor) - if predecessor_type in ['arith_expr', 'term']: - # An ancestor "arith_expr" or "term" means we have found an operator - # with lower precedence than our tok. - return True - if predecessor_type != 'atom': - # We understand the context to look for precedence within as an - # arbitrary nesting of "arith_expr", "term", and "atom" nodes. If we - # leave this context we have not found a lower precedence operator. - return False - # Under normal usage we expect a complete parse tree to be available and - # we will return before we get an AttributeError from the root. - ancestor = ancestor.parent + while ancestor is not None: + # Search through the ancestor nodes in the parse tree for operators with + # lower precedence. + predecessor_type = pytree_utils.NodeName( ancestor ) + if predecessor_type in [ 'arith_expr', 'term' ]: + # An ancestor "arith_expr" or "term" means we have found an operator + # with lower precedence than our tok. + return True + if predecessor_type != 'atom': + # We understand the context to look for precedence within as an + # arbitrary nesting of "arith_expr", "term", and "atom" nodes. If we + # leave this context we have not found a lower precedence operator. + return False + # Under normal usage we expect a complete parse tree to be available and + # we will return before we get an AttributeError from the root. + ancestor = ancestor.parent -def _PriorityIndicatingNoSpace(tok): - """Whether to remove spaces around an operator due to precedence.""" - if not tok.is_arithmetic_op or not tok.is_simple_expr: - # Limit space removal to highest priority arithmetic operators - return False - return _HasPrecedence(tok) +def _PriorityIndicatingNoSpace( tok ): + """Whether to remove spaces around an operator due to precedence.""" + if not tok.is_arithmetic_op or not tok.is_simple_expr: + # Limit space removal to highest priority arithmetic operators + return False + return _HasPrecedence( tok ) -def _IsSubscriptColonAndValuePair(token1, token2): - return (token1.is_number or token1.is_name) and token2.is_subscript_colon +def _IsSubscriptColonAndValuePair( token1, token2 ): + return ( token1.is_number or token1.is_name ) and token2.is_subscript_colon -def _SpaceRequiredBetween(left, right, is_line_disabled): - """Return True if a space is required between the left and right token.""" - lval = left.value - rval = right.value - if (left.is_pseudo and _IsIdNumberStringToken(right) and - left.previous_token and _IsIdNumberStringToken(left.previous_token)): - # Space between keyword... tokens and pseudo parens. - return True - if left.is_pseudo or right.is_pseudo: - # There should be a space after the ':' in a dictionary. - if left.OpensScope(): - return True - # The closing pseudo-paren shouldn't affect spacing. - return False - if left.is_continuation or right.is_continuation: - # The continuation node's value has all of the spaces it needs. - return False - if right.name in pytree_utils.NONSEMANTIC_TOKENS: - # No space before a non-semantic token. - return False - if _IsIdNumberStringToken(left) and _IsIdNumberStringToken(right): - # Spaces between keyword, string, number, and identifier tokens. - return True - if lval == ',' and rval == ':': - # We do want a space between a comma and colon. - return True - if style.Get('SPACE_INSIDE_BRACKETS'): - # Supersede the "no space before a colon or comma" check. - if left.OpensScope() and rval == ':': - return True - if right.ClosesScope() and lval == ':': - return True - if (style.Get('SPACES_AROUND_SUBSCRIPT_COLON') and - (_IsSubscriptColonAndValuePair(left, right) or - _IsSubscriptColonAndValuePair(right, left))): - # Supersede the "never want a space before a colon or comma" check. - return True - if rval in ':,': - # Otherwise, we never want a space before a colon or comma. - return False - if lval == ',' and rval in ']})': - # Add a space between ending ',' and closing bracket if requested. - return style.Get('SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET') - if lval == ',': - # We want a space after a comma. - return True - if lval == 'from' and rval == '.': - # Space before the '.' in an import statement. - return True - if lval == '.' and rval == 'import': - # Space after the '.' in an import statement. - return True - if (lval == '=' and rval in {'.', ',,,'} and - subtypes.DEFAULT_OR_NAMED_ASSIGN not in left.subtypes): - # Space between equal and '.' as in "X = ...". - return True - if lval == ':' and rval in {'.', '...'}: - # Space between : and ... - return True - if ((right.is_keyword or right.is_name) and - (left.is_keyword or left.is_name)): - # Don't merge two keywords/identifiers. - return True - if (subtypes.SUBSCRIPT_COLON in left.subtypes or - subtypes.SUBSCRIPT_COLON in right.subtypes): - # A subscript shouldn't have spaces separating its colons. - return False - if (subtypes.TYPED_NAME in left.subtypes or - subtypes.TYPED_NAME in right.subtypes): - # A typed argument should have a space after the colon. - return True - if left.is_string: - if (rval == '=' and - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in right.subtypes): - # If there is a type hint, then we don't want to add a space between the - # equal sign and the hint. - return False - if rval not in '[)]}.' and not right.is_binary_op: - # A string followed by something other than a subscript, closing bracket, - # dot, or a binary op should have a space after it. - return True +def _SpaceRequiredBetween( left, right, is_line_disabled ): + """Return True if a space is required between the left and right token.""" + lval = left.value + rval = right.value + if ( left.is_pseudo and _IsIdNumberStringToken( right ) and left.previous_token and + _IsIdNumberStringToken( left.previous_token ) ): + # Space between keyword... tokens and pseudo parens. + return True + if left.is_pseudo or right.is_pseudo: + # There should be a space after the ':' in a dictionary. + if left.OpensScope(): + return True + # The closing pseudo-paren shouldn't affect spacing. + return False + if left.is_continuation or right.is_continuation: + # The continuation node's value has all of the spaces it needs. + return False + if right.name in pytree_utils.NONSEMANTIC_TOKENS: + # No space before a non-semantic token. + return False + if _IsIdNumberStringToken( left ) and _IsIdNumberStringToken( right ): + # Spaces between keyword, string, number, and identifier tokens. + return True + if lval == ',' and rval == ':': + # We do want a space between a comma and colon. + return True + if style.Get( 'SPACE_INSIDE_BRACKETS' ): + # Supersede the "no space before a colon or comma" check. + if left.OpensScope() and rval == ':': + return True + if right.ClosesScope() and lval == ':': + return True + if ( style.Get( 'SPACES_AROUND_SUBSCRIPT_COLON' ) and + ( _IsSubscriptColonAndValuePair( left, right ) or + _IsSubscriptColonAndValuePair( right, left ) ) ): + # Supersede the "never want a space before a colon or comma" check. + return True + if rval in ':,': + # Otherwise, we never want a space before a colon or comma. + return False + if lval == ',' and rval in ']})': + # Add a space between ending ',' and closing bracket if requested. + return style.Get( 'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET' ) + if lval == ',': + # We want a space after a comma. + return True + if lval == 'from' and rval == '.': + # Space before the '.' in an import statement. + return True + if lval == '.' and rval == 'import': + # Space after the '.' in an import statement. + return True + if ( lval == '=' and rval in { '.', ',,,' } and + subtypes.DEFAULT_OR_NAMED_ASSIGN not in left.subtypes ): + # Space between equal and '.' as in "X = ...". + return True + if lval == ':' and rval in { '.', '...' }: + # Space between : and ... + return True + if ( ( right.is_keyword or right.is_name ) and + ( left.is_keyword or left.is_name ) ): + # Don't merge two keywords/identifiers. + return True + if ( subtypes.SUBSCRIPT_COLON in left.subtypes or + subtypes.SUBSCRIPT_COLON in right.subtypes ): + # A subscript shouldn't have spaces separating its colons. + return False + if ( subtypes.TYPED_NAME in left.subtypes or + subtypes.TYPED_NAME in right.subtypes ): + # A typed argument should have a space after the colon. + return True + if left.is_string: + if ( rval == '=' and + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in right.subtypes ): + # If there is a type hint, then we don't want to add a space between the + # equal sign and the hint. + return False + if rval not in '[)]}.' and not right.is_binary_op: + # A string followed by something other than a subscript, closing bracket, + # dot, or a binary op should have a space after it. + return True + if right.ClosesScope(): + # A string followed by closing brackets should have a space after it + # depending on SPACE_INSIDE_BRACKETS. A string followed by opening + # brackets, however, should not. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if subtypes.SUBSCRIPT_BRACKET in right.subtypes: + # It's legal to do this in Python: 'hello'[a] + return False + if left.is_binary_op and lval != '**' and _IsUnaryOperator( right ): + # Space between the binary operator and the unary operator. + return True + if left.is_keyword and _IsUnaryOperator( right ): + # Handle things like "not -3 < x". + return True + if _IsUnaryOperator( left ) and _IsUnaryOperator( right ): + # No space between two unary operators. + return False + if left.is_binary_op or right.is_binary_op: + if lval == '**' or rval == '**': + # Space around the "power" operator. + return style.Get( 'SPACES_AROUND_POWER_OPERATOR' ) + # Enforce spaces around binary operators except the blocked ones. + block_list = style.Get( 'NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS' ) + if lval in block_list or rval in block_list: + return False + if style.Get( 'ARITHMETIC_PRECEDENCE_INDICATION' ): + if _PriorityIndicatingNoSpace( left ) or _PriorityIndicatingNoSpace( + right ): + return False + else: + return True + else: + return True + if ( _IsUnaryOperator( left ) and lval != 'not' and + ( right.is_name or right.is_number or rval == '(' ) ): + # The previous token was a unary op. No space is desired between it and + # the current token. + return False + if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in left.subtypes and + subtypes.TYPED_NAME not in right.subtypes ): + # A named argument or default parameter shouldn't have spaces around it. + return style.Get( 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN' ) + if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in right.subtypes and + subtypes.TYPED_NAME not in left.subtypes ): + # A named argument or default parameter shouldn't have spaces around it. + return style.Get( 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN' ) + if ( subtypes.VARARGS_LIST in left.subtypes or + subtypes.VARARGS_LIST in right.subtypes ): + return False + if ( subtypes.VARARGS_STAR in left.subtypes or + subtypes.KWARGS_STAR_STAR in left.subtypes ): + # Don't add a space after a vararg's star or a keyword's star-star. + return False + if lval == '@' and subtypes.DECORATOR in left.subtypes: + # Decorators shouldn't be separated from the 'at' sign. + return False + if left.is_keyword and rval == '.': + # Add space between keywords and dots. + return lval not in { 'None', 'print' } + if lval == '.' and right.is_keyword: + # Add space between keywords and dots. + return rval not in { 'None', 'print' } + if lval == '.' or rval == '.': + # Don't place spaces between dots. + return False + if ( ( lval == '(' and rval == ')' ) or ( lval == '[' and rval == ']' ) or + ( lval == '{' and rval == '}' ) ): + # Empty objects shouldn't be separated by spaces. + return False + if not is_line_disabled and ( left.OpensScope() or right.ClosesScope() ): + if ( style.GetOrDefault( 'SPACES_AROUND_DICT_DELIMITERS', False ) and + ( ( lval == '{' and + _IsDictListTupleDelimiterTok( left, is_opening = True ) ) or + ( rval == '}' and + _IsDictListTupleDelimiterTok( right, is_opening = False ) ) ) ): + return True + if ( style.GetOrDefault( 'SPACES_AROUND_LIST_DELIMITERS', False ) and + ( ( lval == '[' and + _IsDictListTupleDelimiterTok( left, is_opening = True ) ) or + ( rval == ']' and + _IsDictListTupleDelimiterTok( right, is_opening = False ) ) ) ): + return True + if ( style.GetOrDefault( 'SPACES_AROUND_TUPLE_DELIMITERS', False ) and + ( ( lval == '(' and + _IsDictListTupleDelimiterTok( left, is_opening = True ) ) or + ( rval == ')' and + _IsDictListTupleDelimiterTok( right, is_opening = False ) ) ) ): + return True + if left.OpensScope() and right.OpensScope(): + # Nested objects' opening brackets shouldn't be separated, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if left.ClosesScope() and right.ClosesScope(): + # Nested objects' closing brackets shouldn't be separated, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if left.ClosesScope() and rval in '([': + # A call, set, dictionary, or subscript that has a call or subscript after + # it shouldn't have a space between them. + return False + if left.OpensScope() and _IsIdNumberStringToken( right ): + # Don't separate the opening bracket from the first item, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if left.is_name and rval in '([': + # Don't separate a call or array access from the name. + return False if right.ClosesScope(): - # A string followed by closing brackets should have a space after it - # depending on SPACE_INSIDE_BRACKETS. A string followed by opening - # brackets, however, should not. - return style.Get('SPACE_INSIDE_BRACKETS') - if subtypes.SUBSCRIPT_BRACKET in right.subtypes: - # It's legal to do this in Python: 'hello'[a] - return False - if left.is_binary_op and lval != '**' and _IsUnaryOperator(right): - # Space between the binary operator and the unary operator. + # Don't separate the closing bracket from the last item, unless enabled + # by SPACE_INSIDE_BRACKETS. + # FIXME(morbo): This might be too permissive. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if lval == 'print' and rval == '(': + # Special support for the 'print' function. + return False + if left.OpensScope() and _IsUnaryOperator( right ): + # Don't separate a unary operator from the opening bracket, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if ( left.OpensScope() and ( subtypes.VARARGS_STAR in right.subtypes or + subtypes.KWARGS_STAR_STAR in right.subtypes ) ): + # Don't separate a '*' or '**' from the opening bracket, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) + if rval == ';': + # Avoid spaces before a semicolon. (Why is there a semicolon?!) + return False + if lval == '(' and rval == 'await': + # Special support for the 'await' keyword. Don't separate the 'await' + # keyword from an opening paren, unless enabled by SPACE_INSIDE_BRACKETS. + return style.Get( 'SPACE_INSIDE_BRACKETS' ) return True - if left.is_keyword and _IsUnaryOperator(right): - # Handle things like "not -3 < x". + + +def _MustBreakBefore( prev_token, cur_token ): + """Return True if a line break is required before the current token.""" + if prev_token.is_comment or ( prev_token.previous_token and prev_token.is_pseudo and + prev_token.previous_token.is_comment ): + # Must break if the previous token was a comment. + return True + if ( cur_token.is_string and prev_token.is_string and + IsSurroundedByBrackets( cur_token ) ): + # We want consecutive strings to be on separate lines. This is a + # reasonable assumption, because otherwise they should have written them + # all on the same line, or with a '+'. + return True + return cur_token.must_break_before + + +def _CanBreakBefore( prev_token, cur_token ): + """Return True if a line break may occur before the current token.""" + pval = prev_token.value + cval = cur_token.value + if py3compat.PY3: + if pval == 'yield' and cval == 'from': + # Don't break before a yield argument. + return False + if pval in { 'async', 'await' } and cval in { 'def', 'with', 'for' }: + # Don't break after sync keywords. + return False + if cur_token.split_penalty >= split_penalty.UNBREAKABLE: + return False + if pval == '@': + # Don't break right after the beginning of a decorator. + return False + if cval == ':': + # Don't break before the start of a block of code. + return False + if cval == ',': + # Don't break before a comma. + return False + if prev_token.is_name and cval == '(': + # Don't break in the middle of a function definition or call. + return False + if prev_token.is_name and cval == '[': + # Don't break in the middle of an array dereference. + return False + if cur_token.is_comment and prev_token.lineno == cur_token.lineno: + # Don't break a comment at the end of the line. + return False + if subtypes.UNARY_OPERATOR in prev_token.subtypes: + # Don't break after a unary token. + return False + if not style.Get( 'ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS' ): + if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes or + subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes ): + return False return True - if _IsUnaryOperator(left) and _IsUnaryOperator(right): - # No space between two unary operators. - return False - if left.is_binary_op or right.is_binary_op: - if lval == '**' or rval == '**': - # Space around the "power" operator. - return style.Get('SPACES_AROUND_POWER_OPERATOR') - # Enforce spaces around binary operators except the blocked ones. - block_list = style.Get('NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS') - if lval in block_list or rval in block_list: - return False - if style.Get('ARITHMETIC_PRECEDENCE_INDICATION'): - if _PriorityIndicatingNoSpace(left) or _PriorityIndicatingNoSpace(right): + + +def IsSurroundedByBrackets( tok ): + """Return True if the token is surrounded by brackets.""" + paren_count = 0 + brace_count = 0 + sq_bracket_count = 0 + previous_token = tok.previous_token + while previous_token: + if previous_token.value == ')': + paren_count -= 1 + elif previous_token.value == '}': + brace_count -= 1 + elif previous_token.value == ']': + sq_bracket_count -= 1 + + if previous_token.value == '(': + if paren_count == 0: + return previous_token + paren_count += 1 + elif previous_token.value == '{': + if brace_count == 0: + return previous_token + brace_count += 1 + elif previous_token.value == '[': + if sq_bracket_count == 0: + return previous_token + sq_bracket_count += 1 + + previous_token = previous_token.previous_token + return None + + +def _IsDictListTupleDelimiterTok( tok, is_opening ): + assert tok + + if tok.matching_bracket is None: return False - else: - return True + + if is_opening: + open_tok = tok + close_tok = tok.matching_bracket else: - return True - if (_IsUnaryOperator(left) and lval != 'not' and - (right.is_name or right.is_number or rval == '(')): - # The previous token was a unary op. No space is desired between it and - # the current token. - return False - if (subtypes.DEFAULT_OR_NAMED_ASSIGN in left.subtypes and - subtypes.TYPED_NAME not in right.subtypes): - # A named argument or default parameter shouldn't have spaces around it. - return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN') - if (subtypes.DEFAULT_OR_NAMED_ASSIGN in right.subtypes and - subtypes.TYPED_NAME not in left.subtypes): - # A named argument or default parameter shouldn't have spaces around it. - return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN') - if (subtypes.VARARGS_LIST in left.subtypes or - subtypes.VARARGS_LIST in right.subtypes): - return False - if (subtypes.VARARGS_STAR in left.subtypes or - subtypes.KWARGS_STAR_STAR in left.subtypes): - # Don't add a space after a vararg's star or a keyword's star-star. - return False - if lval == '@' and subtypes.DECORATOR in left.subtypes: - # Decorators shouldn't be separated from the 'at' sign. - return False - if left.is_keyword and rval == '.': - # Add space between keywords and dots. - return lval not in {'None', 'print'} - if lval == '.' and right.is_keyword: - # Add space between keywords and dots. - return rval not in {'None', 'print'} - if lval == '.' or rval == '.': - # Don't place spaces between dots. - return False - if ((lval == '(' and rval == ')') or (lval == '[' and rval == ']') or - (lval == '{' and rval == '}')): - # Empty objects shouldn't be separated by spaces. - return False - if not is_line_disabled and (left.OpensScope() or right.ClosesScope()): - if (style.GetOrDefault('SPACES_AROUND_DICT_DELIMITERS', False) and ( - (lval == '{' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or - (rval == '}' and - _IsDictListTupleDelimiterTok(right, is_opening=False)))): - return True - if (style.GetOrDefault('SPACES_AROUND_LIST_DELIMITERS', False) and ( - (lval == '[' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or - (rval == ']' and - _IsDictListTupleDelimiterTok(right, is_opening=False)))): - return True - if (style.GetOrDefault('SPACES_AROUND_TUPLE_DELIMITERS', False) and ( - (lval == '(' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or - (rval == ')' and - _IsDictListTupleDelimiterTok(right, is_opening=False)))): - return True - if left.OpensScope() and right.OpensScope(): - # Nested objects' opening brackets shouldn't be separated, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get('SPACE_INSIDE_BRACKETS') - if left.ClosesScope() and right.ClosesScope(): - # Nested objects' closing brackets shouldn't be separated, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get('SPACE_INSIDE_BRACKETS') - if left.ClosesScope() and rval in '([': - # A call, set, dictionary, or subscript that has a call or subscript after - # it shouldn't have a space between them. - return False - if left.OpensScope() and _IsIdNumberStringToken(right): - # Don't separate the opening bracket from the first item, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get('SPACE_INSIDE_BRACKETS') - if left.is_name and rval in '([': - # Don't separate a call or array access from the name. - return False - if right.ClosesScope(): - # Don't separate the closing bracket from the last item, unless enabled - # by SPACE_INSIDE_BRACKETS. - # FIXME(morbo): This might be too permissive. - return style.Get('SPACE_INSIDE_BRACKETS') - if lval == 'print' and rval == '(': - # Special support for the 'print' function. - return False - if left.OpensScope() and _IsUnaryOperator(right): - # Don't separate a unary operator from the opening bracket, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get('SPACE_INSIDE_BRACKETS') - if (left.OpensScope() and (subtypes.VARARGS_STAR in right.subtypes or - subtypes.KWARGS_STAR_STAR in right.subtypes)): - # Don't separate a '*' or '**' from the opening bracket, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get('SPACE_INSIDE_BRACKETS') - if rval == ';': - # Avoid spaces before a semicolon. (Why is there a semicolon?!) - return False - if lval == '(' and rval == 'await': - # Special support for the 'await' keyword. Don't separate the 'await' - # keyword from an opening paren, unless enabled by SPACE_INSIDE_BRACKETS. - return style.Get('SPACE_INSIDE_BRACKETS') - return True - - -def _MustBreakBefore(prev_token, cur_token): - """Return True if a line break is required before the current token.""" - if prev_token.is_comment or (prev_token.previous_token and - prev_token.is_pseudo and - prev_token.previous_token.is_comment): - # Must break if the previous token was a comment. - return True - if (cur_token.is_string and prev_token.is_string and - IsSurroundedByBrackets(cur_token)): - # We want consecutive strings to be on separate lines. This is a - # reasonable assumption, because otherwise they should have written them - # all on the same line, or with a '+'. - return True - return cur_token.must_break_before - - -def _CanBreakBefore(prev_token, cur_token): - """Return True if a line break may occur before the current token.""" - pval = prev_token.value - cval = cur_token.value - if py3compat.PY3: - if pval == 'yield' and cval == 'from': - # Don't break before a yield argument. - return False - if pval in {'async', 'await'} and cval in {'def', 'with', 'for'}: - # Don't break after sync keywords. - return False - if cur_token.split_penalty >= split_penalty.UNBREAKABLE: - return False - if pval == '@': - # Don't break right after the beginning of a decorator. - return False - if cval == ':': - # Don't break before the start of a block of code. - return False - if cval == ',': - # Don't break before a comma. - return False - if prev_token.is_name and cval == '(': - # Don't break in the middle of a function definition or call. - return False - if prev_token.is_name and cval == '[': - # Don't break in the middle of an array dereference. - return False - if cur_token.is_comment and prev_token.lineno == cur_token.lineno: - # Don't break a comment at the end of the line. - return False - if subtypes.UNARY_OPERATOR in prev_token.subtypes: - # Don't break after a unary token. - return False - if not style.Get('ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS'): - if (subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes or - subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes): - return False - return True - - -def IsSurroundedByBrackets(tok): - """Return True if the token is surrounded by brackets.""" - paren_count = 0 - brace_count = 0 - sq_bracket_count = 0 - previous_token = tok.previous_token - while previous_token: - if previous_token.value == ')': - paren_count -= 1 - elif previous_token.value == '}': - brace_count -= 1 - elif previous_token.value == ']': - sq_bracket_count -= 1 - - if previous_token.value == '(': - if paren_count == 0: - return previous_token - paren_count += 1 - elif previous_token.value == '{': - if brace_count == 0: - return previous_token - brace_count += 1 - elif previous_token.value == '[': - if sq_bracket_count == 0: - return previous_token - sq_bracket_count += 1 - - previous_token = previous_token.previous_token - return None - - -def _IsDictListTupleDelimiterTok(tok, is_opening): - assert tok - - if tok.matching_bracket is None: - return False - - if is_opening: - open_tok = tok - close_tok = tok.matching_bracket - else: - open_tok = tok.matching_bracket - close_tok = tok - - # There must be something in between the tokens - if open_tok.next_token == close_tok: - return False - - assert open_tok.next_token.node - assert open_tok.next_token.node.parent - - return open_tok.next_token.node.parent.type in [ - python_symbols.dictsetmaker, - python_symbols.listmaker, - python_symbols.testlist_gexp, - ] - - -_LOGICAL_OPERATORS = frozenset({'and', 'or'}) -_BITWISE_OPERATORS = frozenset({'&', '|', '^'}) -_ARITHMETIC_OPERATORS = frozenset({'+', '-', '*', '/', '%', '//', '@'}) - - -def _SplitPenalty(prev_token, cur_token): - """Return the penalty for breaking the line before the current token.""" - pval = prev_token.value - cval = cur_token.value - if pval == 'not': - return split_penalty.UNBREAKABLE - - if cur_token.node_split_penalty > 0: - return cur_token.node_split_penalty - - if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'): - # Prefer to split before 'and' and 'or'. - if pval in _LOGICAL_OPERATORS: - return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR') - if cval in _LOGICAL_OPERATORS: - return 0 - else: - # Prefer to split after 'and' and 'or'. - if pval in _LOGICAL_OPERATORS: - return 0 - if cval in _LOGICAL_OPERATORS: - return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR') - - if style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'): - # Prefer to split before '&', '|', and '^'. - if pval in _BITWISE_OPERATORS: - return style.Get('SPLIT_PENALTY_BITWISE_OPERATOR') - if cval in _BITWISE_OPERATORS: - return 0 - else: - # Prefer to split after '&', '|', and '^'. - if pval in _BITWISE_OPERATORS: - return 0 - if cval in _BITWISE_OPERATORS: - return style.Get('SPLIT_PENALTY_BITWISE_OPERATOR') - - if (subtypes.COMP_FOR in cur_token.subtypes or - subtypes.COMP_IF in cur_token.subtypes): - # We don't mind breaking before the 'for' or 'if' of a list comprehension. - return 0 - if subtypes.UNARY_OPERATOR in prev_token.subtypes: - # Try not to break after a unary operator. - return style.Get('SPLIT_PENALTY_AFTER_UNARY_OPERATOR') - if pval == ',': - # Breaking after a comma is fine, if need be. + open_tok = tok.matching_bracket + close_tok = tok + + # There must be something in between the tokens + if open_tok.next_token == close_tok: + return False + + assert open_tok.next_token.node + assert open_tok.next_token.node.parent + + return open_tok.next_token.node.parent.type in [ + python_symbols.dictsetmaker, + python_symbols.listmaker, + python_symbols.testlist_gexp, + ] + + +_LOGICAL_OPERATORS = frozenset( { 'and', 'or' } ) +_BITWISE_OPERATORS = frozenset( { '&', '|', '^' } ) +_ARITHMETIC_OPERATORS = frozenset( { '+', '-', '*', '/', '%', '//', '@' } ) + + +def _SplitPenalty( prev_token, cur_token ): + """Return the penalty for breaking the line before the current token.""" + pval = prev_token.value + cval = cur_token.value + if pval == 'not': + return split_penalty.UNBREAKABLE + + if cur_token.node_split_penalty > 0: + return cur_token.node_split_penalty + + if style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ): + # Prefer to split before 'and' and 'or'. + if pval in _LOGICAL_OPERATORS: + return style.Get( 'SPLIT_PENALTY_LOGICAL_OPERATOR' ) + if cval in _LOGICAL_OPERATORS: + return 0 + else: + # Prefer to split after 'and' and 'or'. + if pval in _LOGICAL_OPERATORS: + return 0 + if cval in _LOGICAL_OPERATORS: + return style.Get( 'SPLIT_PENALTY_LOGICAL_OPERATOR' ) + + if style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ): + # Prefer to split before '&', '|', and '^'. + if pval in _BITWISE_OPERATORS: + return style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) + if cval in _BITWISE_OPERATORS: + return 0 + else: + # Prefer to split after '&', '|', and '^'. + if pval in _BITWISE_OPERATORS: + return 0 + if cval in _BITWISE_OPERATORS: + return style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) + + if ( subtypes.COMP_FOR in cur_token.subtypes or + subtypes.COMP_IF in cur_token.subtypes ): + # We don't mind breaking before the 'for' or 'if' of a list comprehension. + return 0 + if subtypes.UNARY_OPERATOR in prev_token.subtypes: + # Try not to break after a unary operator. + return style.Get( 'SPLIT_PENALTY_AFTER_UNARY_OPERATOR' ) + if pval == ',': + # Breaking after a comma is fine, if need be. + return 0 + if pval == '**' or cval == '**': + return split_penalty.STRONGLY_CONNECTED + if ( subtypes.VARARGS_STAR in prev_token.subtypes or + subtypes.KWARGS_STAR_STAR in prev_token.subtypes ): + # Don't split after a varargs * or kwargs **. + return split_penalty.UNBREAKABLE + if prev_token.OpensScope() and cval != '(': + # Slightly prefer + return style.Get( 'SPLIT_PENALTY_AFTER_OPENING_BRACKET' ) + if cval == ':': + # Don't split before a colon. + return split_penalty.UNBREAKABLE + if cval == '=': + # Don't split before an assignment. + return split_penalty.UNBREAKABLE + if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes or + subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes ): + # Don't break before or after an default or named assignment. + return split_penalty.UNBREAKABLE + if cval == '==': + # We would rather not split before an equality operator. + return split_penalty.STRONGLY_CONNECTED + if cur_token.ClosesScope(): + # Give a slight penalty for splitting before the closing scope. + return 100 return 0 - if pval == '**' or cval == '**': - return split_penalty.STRONGLY_CONNECTED - if (subtypes.VARARGS_STAR in prev_token.subtypes or - subtypes.KWARGS_STAR_STAR in prev_token.subtypes): - # Don't split after a varargs * or kwargs **. - return split_penalty.UNBREAKABLE - if prev_token.OpensScope() and cval != '(': - # Slightly prefer - return style.Get('SPLIT_PENALTY_AFTER_OPENING_BRACKET') - if cval == ':': - # Don't split before a colon. - return split_penalty.UNBREAKABLE - if cval == '=': - # Don't split before an assignment. - return split_penalty.UNBREAKABLE - if (subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes or - subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes): - # Don't break before or after an default or named assignment. - return split_penalty.UNBREAKABLE - if cval == '==': - # We would rather not split before an equality operator. - return split_penalty.STRONGLY_CONNECTED - if cur_token.ClosesScope(): - # Give a slight penalty for splitting before the closing scope. - return 100 - return 0 diff --git a/yapf/yapflib/object_state.py b/yapf/yapflib/object_state.py index ec259e682..58dd6fe18 100644 --- a/yapf/yapflib/object_state.py +++ b/yapf/yapflib/object_state.py @@ -27,8 +27,8 @@ from yapf.yapflib import subtypes -class ComprehensionState(object): - """Maintains the state of list comprehension formatting decisions. +class ComprehensionState( object ): + """Maintains the state of list comprehension formatting decisions. A stack of ComprehensionState objects are kept to ensure that list comprehensions are wrapped with well-defined rules. @@ -44,50 +44,53 @@ class ComprehensionState(object): That is, a split somewhere after expr_token or before closing_bracket. """ - def __init__(self, expr_token): - self.expr_token = expr_token - self.for_token = None - self.has_split_at_for = False - self.has_interior_split = False + def __init__( self, expr_token ): + self.expr_token = expr_token + self.for_token = None + self.has_split_at_for = False + self.has_interior_split = False - def HasTrivialExpr(self): - """Returns whether the comp_expr is "trivial" i.e. is a single token.""" - return self.expr_token.next_token.value == 'for' + def HasTrivialExpr( self ): + """Returns whether the comp_expr is "trivial" i.e. is a single token.""" + return self.expr_token.next_token.value == 'for' - @property - def opening_bracket(self): - return self.expr_token.previous_token + @property + def opening_bracket( self ): + return self.expr_token.previous_token - @property - def closing_bracket(self): - return self.opening_bracket.matching_bracket + @property + def closing_bracket( self ): + return self.opening_bracket.matching_bracket - def Clone(self): - clone = ComprehensionState(self.expr_token) - clone.for_token = self.for_token - clone.has_split_at_for = self.has_split_at_for - clone.has_interior_split = self.has_interior_split - return clone + def Clone( self ): + clone = ComprehensionState( self.expr_token ) + clone.for_token = self.for_token + clone.has_split_at_for = self.has_split_at_for + clone.has_interior_split = self.has_interior_split + return clone - def __repr__(self): - return ('[opening_bracket::%s, for_token::%s, has_split_at_for::%s,' - ' has_interior_split::%s, has_trivial_expr::%s]' % - (self.opening_bracket, self.for_token, self.has_split_at_for, - self.has_interior_split, self.HasTrivialExpr())) + def __repr__( self ): + return ( + '[opening_bracket::%s, for_token::%s, has_split_at_for::%s,' + ' has_interior_split::%s, has_trivial_expr::%s]' % ( + self.opening_bracket, self.for_token, self.has_split_at_for, + self.has_interior_split, self.HasTrivialExpr() ) ) - def __eq__(self, other): - return hash(self) == hash(other) + def __eq__( self, other ): + return hash( self ) == hash( other ) - def __ne__(self, other): - return not self == other + def __ne__( self, other ): + return not self == other - def __hash__(self, *args, **kwargs): - return hash((self.expr_token, self.for_token, self.has_split_at_for, - self.has_interior_split)) + def __hash__( self, *args, **kwargs ): + return hash( + ( + self.expr_token, self.for_token, self.has_split_at_for, + self.has_interior_split ) ) -class ParameterListState(object): - """Maintains the state of function parameter list formatting decisions. +class ParameterListState( object ): + """Maintains the state of function parameter list formatting decisions. Attributes: opening_bracket: The opening bracket of the parameter list. @@ -104,95 +107,97 @@ class ParameterListState(object): needed if the indentation would collide. """ - def __init__(self, opening_bracket, newline, opening_column): - self.opening_bracket = opening_bracket - self.has_split_before_first_param = newline - self.opening_column = opening_column - self.parameters = opening_bracket.parameters - self.split_before_closing_bracket = False - - @property - def closing_bracket(self): - return self.opening_bracket.matching_bracket - - @property - def has_typed_return(self): - return self.closing_bracket.next_token.value == '->' - - @property - @py3compat.lru_cache() - def has_default_values(self): - return any(param.has_default_value for param in self.parameters) - - @property - @py3compat.lru_cache() - def ends_in_comma(self): - if not self.parameters: - return False - return self.parameters[-1].last_token.next_token.value == ',' - - @property - @py3compat.lru_cache() - def last_token(self): - token = self.opening_bracket.matching_bracket - while not token.is_comment and token.next_token: - token = token.next_token - return token - - @py3compat.lru_cache() - def LastParamFitsOnLine(self, indent): - """Return true if the last parameter fits on a single line.""" - if not self.has_typed_return: - return False - if not self.parameters: - return True - total_length = self.last_token.total_length - last_param = self.parameters[-1].first_token - total_length -= last_param.total_length - len(last_param.value) - return total_length + indent <= style.Get('COLUMN_LIMIT') - - @py3compat.lru_cache() - def SplitBeforeClosingBracket(self, indent): - """Return true if there's a split before the closing bracket.""" - if style.Get('DEDENT_CLOSING_BRACKETS'): - return True - if self.ends_in_comma: - return True - if not self.parameters: - return False - total_length = self.last_token.total_length - last_param = self.parameters[-1].first_token - total_length -= last_param.total_length - len(last_param.value) - return total_length + indent > style.Get('COLUMN_LIMIT') - - def Clone(self): - clone = ParameterListState(self.opening_bracket, - self.has_split_before_first_param, - self.opening_column) - clone.split_before_closing_bracket = self.split_before_closing_bracket - clone.parameters = [param.Clone() for param in self.parameters] - return clone - - def __repr__(self): - return ('[opening_bracket::%s, has_split_before_first_param::%s, ' - 'opening_column::%d]' % - (self.opening_bracket, self.has_split_before_first_param, - self.opening_column)) - - def __eq__(self, other): - return hash(self) == hash(other) - - def __ne__(self, other): - return not self == other - - def __hash__(self, *args, **kwargs): - return hash( - (self.opening_bracket, self.has_split_before_first_param, - self.opening_column, (hash(param) for param in self.parameters))) - - -class Parameter(object): - """A parameter in a parameter list. + def __init__( self, opening_bracket, newline, opening_column ): + self.opening_bracket = opening_bracket + self.has_split_before_first_param = newline + self.opening_column = opening_column + self.parameters = opening_bracket.parameters + self.split_before_closing_bracket = False + + @property + def closing_bracket( self ): + return self.opening_bracket.matching_bracket + + @property + def has_typed_return( self ): + return self.closing_bracket.next_token.value == '->' + + @property + @py3compat.lru_cache() + def has_default_values( self ): + return any( param.has_default_value for param in self.parameters ) + + @property + @py3compat.lru_cache() + def ends_in_comma( self ): + if not self.parameters: + return False + return self.parameters[ -1 ].last_token.next_token.value == ',' + + @property + @py3compat.lru_cache() + def last_token( self ): + token = self.opening_bracket.matching_bracket + while not token.is_comment and token.next_token: + token = token.next_token + return token + + @py3compat.lru_cache() + def LastParamFitsOnLine( self, indent ): + """Return true if the last parameter fits on a single line.""" + if not self.has_typed_return: + return False + if not self.parameters: + return True + total_length = self.last_token.total_length + last_param = self.parameters[ -1 ].first_token + total_length -= last_param.total_length - len( last_param.value ) + return total_length + indent <= style.Get( 'COLUMN_LIMIT' ) + + @py3compat.lru_cache() + def SplitBeforeClosingBracket( self, indent ): + """Return true if there's a split before the closing bracket.""" + if style.Get( 'DEDENT_CLOSING_BRACKETS' ): + return True + if self.ends_in_comma: + return True + if not self.parameters: + return False + total_length = self.last_token.total_length + last_param = self.parameters[ -1 ].first_token + total_length -= last_param.total_length - len( last_param.value ) + return total_length + indent > style.Get( 'COLUMN_LIMIT' ) + + def Clone( self ): + clone = ParameterListState( + self.opening_bracket, self.has_split_before_first_param, + self.opening_column ) + clone.split_before_closing_bracket = self.split_before_closing_bracket + clone.parameters = [ param.Clone() for param in self.parameters ] + return clone + + def __repr__( self ): + return ( + '[opening_bracket::%s, has_split_before_first_param::%s, ' + 'opening_column::%d]' % ( + self.opening_bracket, self.has_split_before_first_param, + self.opening_column ) ) + + def __eq__( self, other ): + return hash( self ) == hash( other ) + + def __ne__( self, other ): + return not self == other + + def __hash__( self, *args, **kwargs ): + return hash( + ( + self.opening_bracket, self.has_split_before_first_param, + self.opening_column, ( hash( param ) for param in self.parameters ) ) ) + + +class Parameter( object ): + """A parameter in a parameter list. Attributes: first_token: (format_token.FormatToken) First token of parameter. @@ -200,33 +205,33 @@ class Parameter(object): has_default_value: (boolean) True if the parameter has a default value """ - def __init__(self, first_token, last_token): - self.first_token = first_token - self.last_token = last_token + def __init__( self, first_token, last_token ): + self.first_token = first_token + self.last_token = last_token - @property - @py3compat.lru_cache() - def has_default_value(self): - """Returns true if the parameter has a default value.""" - tok = self.first_token - while tok != self.last_token: - if subtypes.DEFAULT_OR_NAMED_ASSIGN in tok.subtypes: - return True - tok = tok.matching_bracket if tok.OpensScope() else tok.next_token - return False + @property + @py3compat.lru_cache() + def has_default_value( self ): + """Returns true if the parameter has a default value.""" + tok = self.first_token + while tok != self.last_token: + if subtypes.DEFAULT_OR_NAMED_ASSIGN in tok.subtypes: + return True + tok = tok.matching_bracket if tok.OpensScope() else tok.next_token + return False - def Clone(self): - return Parameter(self.first_token, self.last_token) + def Clone( self ): + return Parameter( self.first_token, self.last_token ) - def __repr__(self): - return '[first_token::%s, last_token:%s]' % (self.first_token, - self.last_token) + def __repr__( self ): + return '[first_token::%s, last_token:%s]' % ( + self.first_token, self.last_token ) - def __eq__(self, other): - return hash(self) == hash(other) + def __eq__( self, other ): + return hash( self ) == hash( other ) - def __ne__(self, other): - return not self == other + def __ne__( self, other ): + return not self == other - def __hash__(self, *args, **kwargs): - return hash((self.first_token, self.last_token)) + def __hash__( self, *args, **kwargs ): + return hash( ( self.first_token, self.last_token ) ) diff --git a/yapf/yapflib/py3compat.py b/yapf/yapflib/py3compat.py index e4cb9788f..143a13c3e 100644 --- a/yapf/yapflib/py3compat.py +++ b/yapf/yapflib/py3compat.py @@ -18,75 +18,75 @@ import os import sys -PY3 = sys.version_info[0] >= 3 -PY36 = sys.version_info[0] >= 3 and sys.version_info[1] >= 6 -PY37 = sys.version_info[0] >= 3 and sys.version_info[1] >= 7 -PY38 = sys.version_info[0] >= 3 and sys.version_info[1] >= 8 +PY3 = sys.version_info[ 0 ] >= 3 +PY36 = sys.version_info[ 0 ] >= 3 and sys.version_info[ 1 ] >= 6 +PY37 = sys.version_info[ 0 ] >= 3 and sys.version_info[ 1 ] >= 7 +PY38 = sys.version_info[ 0 ] >= 3 and sys.version_info[ 1 ] >= 8 if PY3: - StringIO = io.StringIO - BytesIO = io.BytesIO + StringIO = io.StringIO + BytesIO = io.BytesIO - import codecs # noqa: F811 + import codecs # noqa: F811 - def open_with_encoding(filename, mode, encoding, newline=''): # pylint: disable=unused-argument # noqa - return codecs.open(filename, mode=mode, encoding=encoding) + def open_with_encoding( filename, mode, encoding, newline = '' ): # pylint: disable=unused-argument # noqa + return codecs.open( filename, mode = mode, encoding = encoding ) - import functools - lru_cache = functools.lru_cache + import functools + lru_cache = functools.lru_cache - range = range - ifilter = filter + range = range + ifilter = filter - def raw_input(): - wrapper = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8') - return wrapper.buffer.raw.readall().decode('utf-8') + def raw_input(): + wrapper = io.TextIOWrapper( sys.stdin.buffer, encoding = 'utf-8' ) + return wrapper.buffer.raw.readall().decode( 'utf-8' ) - import configparser + import configparser - # Mappings from strings to booleans (such as '1' to True, 'false' to False, - # etc.) - CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser.BOOLEAN_STATES + # Mappings from strings to booleans (such as '1' to True, 'false' to False, + # etc.) + CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser.BOOLEAN_STATES - import tokenize - detect_encoding = tokenize.detect_encoding - TokenInfo = tokenize.TokenInfo + import tokenize + detect_encoding = tokenize.detect_encoding + TokenInfo = tokenize.TokenInfo else: - import __builtin__ - import cStringIO - from itertools import ifilter + import __builtin__ + import cStringIO + from itertools import ifilter - StringIO = BytesIO = cStringIO.StringIO + StringIO = BytesIO = cStringIO.StringIO - open_with_encoding = io.open + open_with_encoding = io.open - # Python 2.7 doesn't have a native LRU cache, so do nothing. - def lru_cache(maxsize=128, typed=False): + # Python 2.7 doesn't have a native LRU cache, so do nothing. + def lru_cache( maxsize = 128, typed = False ): - def fake_wrapper(user_function): - return user_function + def fake_wrapper( user_function ): + return user_function - return fake_wrapper + return fake_wrapper - range = xrange # noqa: F821 + range = xrange # noqa: F821 - raw_input = raw_input + raw_input = raw_input - import ConfigParser as configparser - CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser._boolean_states # pylint: disable=protected-access # noqa + import ConfigParser as configparser + CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser._boolean_states # pylint: disable=protected-access # noqa - from lib2to3.pgen2 import tokenize - detect_encoding = tokenize.detect_encoding + from lib2to3.pgen2 import tokenize + detect_encoding = tokenize.detect_encoding - import collections + import collections - class TokenInfo( - collections.namedtuple('TokenInfo', 'type string start end line')): - pass + class TokenInfo( collections.namedtuple( 'TokenInfo', + 'type string start end line' ) ): + pass -def EncodeAndWriteToStdout(s, encoding='utf-8'): - """Encode the given string and emit to stdout. +def EncodeAndWriteToStdout( s, encoding = 'utf-8' ): + """Encode the given string and emit to stdout. The string may contain non-ascii characters. This is a problem when stdout is redirected, because then Python doesn't know the encoding and we may get a @@ -96,50 +96,50 @@ def EncodeAndWriteToStdout(s, encoding='utf-8'): s: (string) The string to encode. encoding: (string) The encoding of the string. """ - if PY3: - sys.stdout.buffer.write(s.encode(encoding)) - elif sys.platform == 'win32': - # On python 2 and Windows universal newline transformation will be in - # effect on stdout. Python 2 will not let us avoid the easily because - # it happens based on whether the file handle is opened in O_BINARY or - # O_TEXT state. However we can tell Windows itself to change the current - # mode, and python 2 will follow suit. However we must take care to change - # the mode on the actual external stdout not just the current sys.stdout - # which may have been monkey-patched inside the python environment. - import msvcrt # pylint: disable=g-import-not-at-top - if sys.__stdout__ is sys.stdout: - msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) - sys.stdout.write(s.encode(encoding)) - else: - sys.stdout.write(s.encode(encoding)) + if PY3: + sys.stdout.buffer.write( s.encode( encoding ) ) + elif sys.platform == 'win32': + # On python 2 and Windows universal newline transformation will be in + # effect on stdout. Python 2 will not let us avoid the easily because + # it happens based on whether the file handle is opened in O_BINARY or + # O_TEXT state. However we can tell Windows itself to change the current + # mode, and python 2 will follow suit. However we must take care to change + # the mode on the actual external stdout not just the current sys.stdout + # which may have been monkey-patched inside the python environment. + import msvcrt # pylint: disable=g-import-not-at-top + if sys.__stdout__ is sys.stdout: + msvcrt.setmode( sys.stdout.fileno(), os.O_BINARY ) + sys.stdout.write( s.encode( encoding ) ) + else: + sys.stdout.write( s.encode( encoding ) ) if PY3: - basestring = str - unicode = str # pylint: disable=redefined-builtin,invalid-name + basestring = str + unicode = str # pylint: disable=redefined-builtin,invalid-name else: - basestring = basestring + basestring = basestring - def unicode(s): # pylint: disable=invalid-name - """Force conversion of s to unicode.""" - return __builtin__.unicode(s, 'utf-8') + def unicode( s ): # pylint: disable=invalid-name + """Force conversion of s to unicode.""" + return __builtin__.unicode( s, 'utf-8' ) # In Python 3.2+, readfp is deprecated in favor of read_file, which doesn't # exist in Python 2 yet. To avoid deprecation warnings, subclass ConfigParser to # fix this - now read_file works across all Python versions we care about. -class ConfigParser(configparser.ConfigParser): - if not PY3: +class ConfigParser( configparser.ConfigParser ): + if not PY3: - def read_file(self, fp, source=None): - self.readfp(fp, filename=source) + def read_file( self, fp, source = None ): + self.readfp( fp, filename = source ) -def removeBOM(source): - """Remove any Byte-order-Mark bytes from the beginning of a file.""" - bom = codecs.BOM_UTF8 - if PY3: - bom = bom.decode('utf-8') - if source.startswith(bom): - return source[len(bom):] - return source +def removeBOM( source ): + """Remove any Byte-order-Mark bytes from the beginning of a file.""" + bom = codecs.BOM_UTF8 + if PY3: + bom = bom.decode( 'utf-8' ) + if source.startswith( bom ): + return source[ len( bom ): ] + return source diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index 8f8a103f8..7e0fdf344 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -37,8 +37,8 @@ from yapf.yapflib import verifier -def Reformat(llines, verify=False, lines=None): - """Reformat the logical lines. +def Reformat( llines, verify = False, lines = None ): + """Reformat the logical lines. Arguments: llines: (list of logical_line.LogicalLine) Lines we want to format. @@ -49,144 +49,138 @@ def Reformat(llines, verify=False, lines=None): Returns: A string representing the reformatted code. """ - final_lines = [] - prev_line = None # The previous line. - indent_width = style.Get('INDENT_WIDTH') - - for lline in _SingleOrMergedLines(llines): - first_token = lline.first - _FormatFirstToken(first_token, lline.depth, prev_line, final_lines) - - indent_amt = indent_width * lline.depth - state = format_decision_state.FormatDecisionState(lline, indent_amt) - state.MoveStateToNextToken() - - if not lline.disable: - if lline.first.is_comment: - lline.first.value = lline.first.value.rstrip() - elif lline.last.is_comment: - lline.last.value = lline.last.value.rstrip() - if prev_line and prev_line.disable: - # Keep the vertical spacing between a disabled and enabled formatting - # region. - _RetainRequiredVerticalSpacingBetweenTokens(lline.first, prev_line.last, - lines) - if any(tok.is_comment for tok in lline.tokens): - _RetainVerticalSpacingBeforeComments(lline) - - if lline.disable or _LineHasContinuationMarkers(lline): - _RetainHorizontalSpacing(lline) - _RetainRequiredVerticalSpacing(lline, prev_line, lines) - _EmitLineUnformatted(state) - - elif (_LineContainsPylintDisableLineTooLong(lline) or - _LineContainsI18n(lline)): - # Don't modify vertical spacing, but fix any horizontal spacing issues. - _RetainRequiredVerticalSpacing(lline, prev_line, lines) - _EmitLineUnformatted(state) - - elif _CanPlaceOnSingleLine(lline) and not any(tok.must_break_before - for tok in lline.tokens): - # The logical line fits on one line. - while state.next_token: - state.AddTokenToState(newline=False, dry_run=False) - - elif not _AnalyzeSolutionSpace(state): - # Failsafe mode. If there isn't a solution to the line, then just emit - # it as is. - state = format_decision_state.FormatDecisionState(lline, indent_amt) - state.MoveStateToNextToken() - _RetainHorizontalSpacing(lline) - _RetainRequiredVerticalSpacing(lline, prev_line, None) - _EmitLineUnformatted(state) - - final_lines.append(lline) - prev_line = lline - - if style.Get('ALIGN_ASSIGNMENT'): - _AlignAssignment(final_lines) - if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') - and style.Get('ALIGN_DICT_COLON')): - _AlignDictColon(final_lines) - if style.Get('ALIGN_ARGUMENT_ASSIGNMENT'): - _AlignArgAssign(final_lines) - - _AlignTrailingComments(final_lines) - return _FormatFinalLines(final_lines, verify) - - -def _RetainHorizontalSpacing(line): - """Retain all horizontal spacing between tokens.""" - for tok in line.tokens: - tok.RetainHorizontalSpacing(line.first.column, line.depth) - - -def _RetainRequiredVerticalSpacing(cur_line, prev_line, lines): - """Retain all vertical spacing between lines.""" - prev_tok = None - if prev_line is not None: - prev_tok = prev_line.last - - if cur_line.disable: - # After the first token we are acting on a single line. So if it is - # disabled we must not reformat. - lines = set() - - for cur_tok in cur_line.tokens: - _RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines) - prev_tok = cur_tok - - -def _RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines): - """Retain vertical spacing between two tokens if not in editable range.""" - if prev_tok is None: - return - - if prev_tok.is_string: - prev_lineno = prev_tok.lineno + prev_tok.value.count('\n') - elif prev_tok.is_pseudo: - if not prev_tok.previous_token.is_multiline_string: - prev_lineno = prev_tok.previous_token.lineno + final_lines = [] + prev_line = None # The previous line. + indent_width = style.Get( 'INDENT_WIDTH' ) + + for lline in _SingleOrMergedLines( llines ): + first_token = lline.first + _FormatFirstToken( first_token, lline.depth, prev_line, final_lines ) + + indent_amt = indent_width * lline.depth + state = format_decision_state.FormatDecisionState( lline, indent_amt ) + state.MoveStateToNextToken() + + if not lline.disable: + if lline.first.is_comment: + lline.first.value = lline.first.value.rstrip() + elif lline.last.is_comment: + lline.last.value = lline.last.value.rstrip() + if prev_line and prev_line.disable: + # Keep the vertical spacing between a disabled and enabled formatting + # region. + _RetainRequiredVerticalSpacingBetweenTokens( + lline.first, prev_line.last, lines ) + if any( tok.is_comment for tok in lline.tokens ): + _RetainVerticalSpacingBeforeComments( lline ) + + if lline.disable or _LineHasContinuationMarkers( lline ): + _RetainHorizontalSpacing( lline ) + _RetainRequiredVerticalSpacing( lline, prev_line, lines ) + _EmitLineUnformatted( state ) + + elif ( _LineContainsPylintDisableLineTooLong( lline ) or + _LineContainsI18n( lline ) ): + # Don't modify vertical spacing, but fix any horizontal spacing issues. + _RetainRequiredVerticalSpacing( lline, prev_line, lines ) + _EmitLineUnformatted( state ) + + elif _CanPlaceOnSingleLine( lline ) and not any( tok.must_break_before + for tok in lline.tokens ): + # The logical line fits on one line. + while state.next_token: + state.AddTokenToState( newline = False, dry_run = False ) + + elif not _AnalyzeSolutionSpace( state ): + # Failsafe mode. If there isn't a solution to the line, then just emit + # it as is. + state = format_decision_state.FormatDecisionState( lline, indent_amt ) + state.MoveStateToNextToken() + _RetainHorizontalSpacing( lline ) + _RetainRequiredVerticalSpacing( lline, prev_line, None ) + _EmitLineUnformatted( state ) + + final_lines.append( lline ) + prev_line = lline + + if style.Get( 'ALIGN_ASSIGNMENT' ): + _AlignAssignment( final_lines ) + + _AlignTrailingComments( final_lines ) + return _FormatFinalLines( final_lines, verify ) + + +def _RetainHorizontalSpacing( line ): + """Retain all horizontal spacing between tokens.""" + for tok in line.tokens: + tok.RetainHorizontalSpacing( line.first.column, line.depth ) + + +def _RetainRequiredVerticalSpacing( cur_line, prev_line, lines ): + """Retain all vertical spacing between lines.""" + prev_tok = None + if prev_line is not None: + prev_tok = prev_line.last + + if cur_line.disable: + # After the first token we are acting on a single line. So if it is + # disabled we must not reformat. + lines = set() + + for cur_tok in cur_line.tokens: + _RetainRequiredVerticalSpacingBetweenTokens( cur_tok, prev_tok, lines ) + prev_tok = cur_tok + + +def _RetainRequiredVerticalSpacingBetweenTokens( cur_tok, prev_tok, lines ): + """Retain vertical spacing between two tokens if not in editable range.""" + if prev_tok is None: + return + + if prev_tok.is_string: + prev_lineno = prev_tok.lineno + prev_tok.value.count( '\n' ) + elif prev_tok.is_pseudo: + if not prev_tok.previous_token.is_multiline_string: + prev_lineno = prev_tok.previous_token.lineno + else: + prev_lineno = prev_tok.lineno else: - prev_lineno = prev_tok.lineno - else: - prev_lineno = prev_tok.lineno + prev_lineno = prev_tok.lineno - if cur_tok.is_comment: - cur_lineno = cur_tok.lineno - cur_tok.value.count('\n') - else: - cur_lineno = cur_tok.lineno + if cur_tok.is_comment: + cur_lineno = cur_tok.lineno - cur_tok.value.count( '\n' ) + else: + cur_lineno = cur_tok.lineno - if not prev_tok.is_comment and prev_tok.value.endswith('\\'): - prev_lineno += prev_tok.value.count('\n') + if not prev_tok.is_comment and prev_tok.value.endswith( '\\' ): + prev_lineno += prev_tok.value.count( '\n' ) - required_newlines = cur_lineno - prev_lineno - if cur_tok.is_comment and not prev_tok.is_comment: - # Don't adjust between a comment and non-comment. - pass - elif lines and lines.intersection(range(prev_lineno, cur_lineno + 1)): - desired_newlines = cur_tok.whitespace_prefix.count('\n') - whitespace_lines = range(prev_lineno + 1, cur_lineno) - deletable_lines = len(lines.intersection(whitespace_lines)) - required_newlines = max(required_newlines - deletable_lines, - desired_newlines) + required_newlines = cur_lineno - prev_lineno + if cur_tok.is_comment and not prev_tok.is_comment: + # Don't adjust between a comment and non-comment. + pass + elif lines and lines.intersection( range( prev_lineno, cur_lineno + 1 ) ): + desired_newlines = cur_tok.whitespace_prefix.count( '\n' ) + whitespace_lines = range( prev_lineno + 1, cur_lineno ) + deletable_lines = len( lines.intersection( whitespace_lines ) ) + required_newlines = max( required_newlines - deletable_lines, desired_newlines ) - cur_tok.AdjustNewlinesBefore(required_newlines) + cur_tok.AdjustNewlinesBefore( required_newlines ) -def _RetainVerticalSpacingBeforeComments(line): - """Retain vertical spacing before comments.""" - prev_token = None - for tok in line.tokens: - if tok.is_comment and prev_token: - if tok.lineno - tok.value.count('\n') - prev_token.lineno > 1: - tok.AdjustNewlinesBefore(ONE_BLANK_LINE) +def _RetainVerticalSpacingBeforeComments( line ): + """Retain vertical spacing before comments.""" + prev_token = None + for tok in line.tokens: + if tok.is_comment and prev_token: + if tok.lineno - tok.value.count( '\n' ) - prev_token.lineno > 1: + tok.AdjustNewlinesBefore( ONE_BLANK_LINE ) - prev_token = tok + prev_token = tok -def _EmitLineUnformatted(state): - """Emit the line without formatting. +def _EmitLineUnformatted( state ): + """Emit the line without formatting. The line contains code that if reformatted would break a non-syntactic convention. E.g., i18n comments and function calls are tightly bound by @@ -197,23 +191,23 @@ def _EmitLineUnformatted(state): state: (format_decision_state.FormatDecisionState) The format decision state. """ - while state.next_token: - previous_token = state.next_token.previous_token - previous_lineno = previous_token.lineno + while state.next_token: + previous_token = state.next_token.previous_token + previous_lineno = previous_token.lineno - if previous_token.is_multiline_string or previous_token.is_string: - previous_lineno += previous_token.value.count('\n') + if previous_token.is_multiline_string or previous_token.is_string: + previous_lineno += previous_token.value.count( '\n' ) - if previous_token.is_continuation: - newline = False - else: - newline = state.next_token.lineno > previous_lineno + if previous_token.is_continuation: + newline = False + else: + newline = state.next_token.lineno > previous_lineno - state.AddTokenToState(newline=newline, dry_run=False) + state.AddTokenToState( newline = newline, dry_run = False ) -def _LineContainsI18n(line): - """Return true if there are i18n comments or function calls in the line. +def _LineContainsI18n( line ): + """Return true if there are i18n comments or function calls in the line. I18n comments and pseudo-function calls are closely related. They cannot be moved apart without breaking i18n. @@ -224,33 +218,33 @@ def _LineContainsI18n(line): Returns: True if the line contains i18n comments or function calls. False otherwise. """ - if style.Get('I18N_COMMENT'): - for tok in line.tokens: - if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value): - # Contains an i18n comment. - return True - - if style.Get('I18N_FUNCTION_CALL'): - length = len(line.tokens) - for index in range(length - 1): - if (line.tokens[index + 1].value == '(' and - line.tokens[index].value in style.Get('I18N_FUNCTION_CALL')): - return True - return False + if style.Get( 'I18N_COMMENT' ): + for tok in line.tokens: + if tok.is_comment and re.match( style.Get( 'I18N_COMMENT' ), tok.value ): + # Contains an i18n comment. + return True + + if style.Get( 'I18N_FUNCTION_CALL' ): + length = len( line.tokens ) + for index in range( length - 1 ): + if ( line.tokens[ index + 1 ].value == '(' and + line.tokens[ index ].value in style.Get( 'I18N_FUNCTION_CALL' ) ): + return True + return False -def _LineContainsPylintDisableLineTooLong(line): - """Return true if there is a "pylint: disable=line-too-long" comment.""" - return re.search(r'\bpylint:\s+disable=line-too-long\b', line.last.value) +def _LineContainsPylintDisableLineTooLong( line ): + """Return true if there is a "pylint: disable=line-too-long" comment.""" + return re.search( r'\bpylint:\s+disable=line-too-long\b', line.last.value ) -def _LineHasContinuationMarkers(line): - """Return true if the line has continuation markers in it.""" - return any(tok.is_continuation for tok in line.tokens) +def _LineHasContinuationMarkers( line ): + """Return true if the line has continuation markers in it.""" + return any( tok.is_continuation for tok in line.tokens ) -def _CanPlaceOnSingleLine(line): - """Determine if the logical line can go on a single line. +def _CanPlaceOnSingleLine( line ): + """Determine if the logical line can go on a single line. Arguments: line: (logical_line.LogicalLine) The line currently being formatted. @@ -258,673 +252,359 @@ def _CanPlaceOnSingleLine(line): Returns: True if the line can or should be added to a single line. False otherwise. """ - token_names = [x.name for x in line.tokens] - if (style.Get('FORCE_MULTILINE_DICT') and 'LBRACE' in token_names): - return False - indent_amt = style.Get('INDENT_WIDTH') * line.depth - last = line.last - last_index = -1 - if (last.is_pylint_comment or last.is_pytype_comment or - last.is_copybara_comment): - last = last.previous_token - last_index = -2 - if last is None: - return True - return (last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and - not any(tok.is_comment for tok in line.tokens[:last_index])) - - -def _AlignTrailingComments(final_lines): - """Align trailing comments to the same column.""" - final_lines_index = 0 - while final_lines_index < len(final_lines): - line = final_lines[final_lines_index] - assert line.tokens - - processed_content = False - - for tok in line.tokens: - if (tok.is_comment and isinstance(tok.spaces_required_before, list) and - tok.value.startswith('#')): - # All trailing comments and comments that appear on a line by themselves - # in this block should be indented at the same level. The block is - # terminated by an empty line or EOF. Enumerate through each line in - # the block and calculate the max line length. Once complete, use the - # first col value greater than that value and create the necessary for - # each line accordingly. - all_pc_line_lengths = [] # All pre-comment line lengths - max_line_length = 0 - - while True: - # EOF - if final_lines_index + len(all_pc_line_lengths) == len(final_lines): - break - - this_line = final_lines[final_lines_index + len(all_pc_line_lengths)] - - # Blank line - note that content is preformatted so we don't need to - # worry about spaces/tabs; a blank line will always be '\n\n'. - assert this_line.tokens - if (all_pc_line_lengths and - this_line.tokens[0].formatted_whitespace_prefix.startswith('\n\n') - ): - break - - if this_line.disable: - all_pc_line_lengths.append([]) - continue - - # Calculate the length of each line in this logical line. - line_content = '' - pc_line_lengths = [] - - for line_tok in this_line.tokens: - whitespace_prefix = line_tok.formatted_whitespace_prefix - - newline_index = whitespace_prefix.rfind('\n') - if newline_index != -1: - max_line_length = max(max_line_length, len(line_content)) - line_content = '' - - whitespace_prefix = whitespace_prefix[newline_index + 1:] - - if line_tok.is_comment: - pc_line_lengths.append(len(line_content)) - else: - line_content += '{}{}'.format(whitespace_prefix, line_tok.value) - - if pc_line_lengths: - max_line_length = max(max_line_length, max(pc_line_lengths)) - - all_pc_line_lengths.append(pc_line_lengths) - - # Calculate the aligned column value - max_line_length += 2 - - aligned_col = None - for potential_col in tok.spaces_required_before: - if potential_col > max_line_length: - aligned_col = potential_col - break - - if aligned_col is None: - aligned_col = max_line_length - - # Update the comment token values based on the aligned values - for all_pc_line_lengths_index, pc_line_lengths in enumerate( - all_pc_line_lengths): - if not pc_line_lengths: - continue + token_names = [ x.name for x in line.tokens ] + if ( style.Get( 'FORCE_MULTILINE_DICT' ) and 'LBRACE' in token_names ): + return False + indent_amt = style.Get( 'INDENT_WIDTH' ) * line.depth + last = line.last + last_index = -1 + if ( last.is_pylint_comment or last.is_pytype_comment or last.is_copybara_comment ): + last = last.previous_token + last_index = -2 + if last is None: + return True + return ( + last.total_length + indent_amt <= style.Get( 'COLUMN_LIMIT' ) and + not any( tok.is_comment for tok in line.tokens[ : last_index ] ) ) + + +def _AlignTrailingComments( final_lines ): + """Align trailing comments to the same column.""" + final_lines_index = 0 + while final_lines_index < len( final_lines ): + line = final_lines[ final_lines_index ] + assert line.tokens + + processed_content = False + + for tok in line.tokens: + if ( tok.is_comment and isinstance( tok.spaces_required_before, list ) and + tok.value.startswith( '#' ) ): + # All trailing comments and comments that appear on a line by themselves + # in this block should be indented at the same level. The block is + # terminated by an empty line or EOF. Enumerate through each line in + # the block and calculate the max line length. Once complete, use the + # first col value greater than that value and create the necessary for + # each line accordingly. + all_pc_line_lengths = [] # All pre-comment line lengths + max_line_length = 0 + + while True: + # EOF + if final_lines_index + len( all_pc_line_lengths ) == len( + final_lines ): + break + + this_line = final_lines[ final_lines_index + + len( all_pc_line_lengths ) ] + + # Blank line - note that content is preformatted so we don't need to + # worry about spaces/tabs; a blank line will always be '\n\n'. + assert this_line.tokens + if ( all_pc_line_lengths and + this_line.tokens[ 0 ].formatted_whitespace_prefix.startswith( + '\n\n' ) ): + break + + if this_line.disable: + all_pc_line_lengths.append( [] ) + continue - this_line = final_lines[final_lines_index + all_pc_line_lengths_index] + # Calculate the length of each line in this logical line. + line_content = '' + pc_line_lengths = [] - pc_line_length_index = 0 - for line_tok in this_line.tokens: - if line_tok.is_comment: - assert pc_line_length_index < len(pc_line_lengths) - assert pc_line_lengths[pc_line_length_index] < aligned_col + for line_tok in this_line.tokens: + whitespace_prefix = line_tok.formatted_whitespace_prefix - # Note that there may be newlines embedded in the comments, so - # we need to apply a whitespace prefix to each line. - whitespace = ' ' * ( - aligned_col - pc_line_lengths[pc_line_length_index] - 1) - pc_line_length_index += 1 + newline_index = whitespace_prefix.rfind( '\n' ) + if newline_index != -1: + max_line_length = max( + max_line_length, len( line_content ) ) + line_content = '' - line_content = [] + whitespace_prefix = whitespace_prefix[ newline_index + 1 : ] - for comment_line_index, comment_line in enumerate( - line_tok.value.split('\n')): - line_content.append('{}{}'.format(whitespace, - comment_line.strip())) + if line_tok.is_comment: + pc_line_lengths.append( len( line_content ) ) + else: + line_content += '{}{}'.format( + whitespace_prefix, line_tok.value ) - if comment_line_index == 0: - whitespace = ' ' * (aligned_col - 1) + if pc_line_lengths: + max_line_length = max( max_line_length, max( pc_line_lengths ) ) - line_content = '\n'.join(line_content) + all_pc_line_lengths.append( pc_line_lengths ) - # Account for initial whitespace already slated for the - # beginning of the line. - existing_whitespace_prefix = \ - line_tok.formatted_whitespace_prefix.lstrip('\n') + # Calculate the aligned column value + max_line_length += 2 - if line_content.startswith(existing_whitespace_prefix): - line_content = line_content[len(existing_whitespace_prefix):] + aligned_col = None + for potential_col in tok.spaces_required_before: + if potential_col > max_line_length: + aligned_col = potential_col + break - line_tok.value = line_content + if aligned_col is None: + aligned_col = max_line_length - assert pc_line_length_index == len(pc_line_lengths) + # Update the comment token values based on the aligned values + for all_pc_line_lengths_index, pc_line_lengths in enumerate( + all_pc_line_lengths ): + if not pc_line_lengths: + continue - final_lines_index += len(all_pc_line_lengths) + this_line = final_lines[ final_lines_index + + all_pc_line_lengths_index ] - processed_content = True - break + pc_line_length_index = 0 + for line_tok in this_line.tokens: + if line_tok.is_comment: + assert pc_line_length_index < len( pc_line_lengths ) + assert pc_line_lengths[ pc_line_length_index ] < aligned_col - if not processed_content: - final_lines_index += 1 + # Note that there may be newlines embedded in the comments, so + # we need to apply a whitespace prefix to each line. + whitespace = ' ' * ( + aligned_col - pc_line_lengths[ pc_line_length_index ] - + 1 ) + pc_line_length_index += 1 + line_content = [] -def _AlignAssignment(final_lines): - """Align assignment operators and augmented assignment operators to the same column""" + for comment_line_index, comment_line in enumerate( + line_tok.value.split( '\n' ) ): + line_content.append( + '{}{}'.format( whitespace, comment_line.strip() ) ) - final_lines_index = 0 - while final_lines_index < len(final_lines): - line = final_lines[final_lines_index] + if comment_line_index == 0: + whitespace = ' ' * ( aligned_col - 1 ) - assert line.tokens - process_content = False + line_content = '\n'.join( line_content ) - for tok in line.tokens: - if tok.is_assign or tok.is_augassign: - # all pre assignment variable lengths in one block of lines - all_pa_variables_lengths = [] - max_variables_length = 0 - - while True: - # EOF - if final_lines_index + len(all_pa_variables_lengths) == len(final_lines): - break + # Account for initial whitespace already slated for the + # beginning of the line. + existing_whitespace_prefix = \ + line_tok.formatted_whitespace_prefix.lstrip('\n') - this_line_index = final_lines_index + len(all_pa_variables_lengths) - this_line = final_lines[this_line_index] + if line_content.startswith( existing_whitespace_prefix ): + line_content = line_content[ + len( existing_whitespace_prefix ): ] - next_line = None - if this_line_index < len(final_lines) - 1: - next_line = final_lines[final_lines_index + len(all_pa_variables_lengths) + 1 ] + line_tok.value = line_content - assert this_line.tokens, next_line.tokens + assert pc_line_length_index == len( pc_line_lengths ) - # align them differently when there is a blank line in between - if (all_pa_variables_lengths and - this_line.tokens[0].formatted_whitespace_prefix.startswith('\n\n') - ): - break + final_lines_index += len( all_pc_line_lengths ) - # if there is a standalone comment or keyword statement line - # or other lines without assignment in between, break - elif (all_pa_variables_lengths and - True not in [tok.is_assign or tok.is_augassign for tok in this_line.tokens]): - if this_line.tokens[0].is_comment: - if style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE'): + processed_content = True break - else: break - - if this_line.disable: - all_pa_variables_lengths.append([]) - continue - - variables_content = '' - pa_variables_lengths = [] - contain_object = False - line_tokens = this_line.tokens - # only one assignment expression is on each line - for index in range(len(line_tokens)): - line_tok = line_tokens[index] - - prefix = line_tok.formatted_whitespace_prefix - newline_index = prefix.rfind('\n') - if newline_index != -1: - variables_content = '' - prefix = prefix[newline_index + 1:] - - if line_tok.is_assign or line_tok.is_augassign: - next_toks = [line_tokens[i] for i in range(index+1, len(line_tokens))] - # if there is object(list/tuple/dict) with newline entries, break, - # update the alignment so far and start to calulate new alignment - for tok in next_toks: - if tok.value in ['(', '[', '{'] and tok.next_token: - if (tok.next_token.formatted_whitespace_prefix.startswith('\n') - or (tok.next_token.is_comment and tok.next_token.next_token.formatted_whitespace_prefix.startswith('\n'))): - pa_variables_lengths.append(len(variables_content)) - contain_object = True - break - if not contain_object: - if line_tok.is_assign: - pa_variables_lengths.append(len(variables_content)) - # if augassign, add the extra augmented part to the max length caculation - elif line_tok.is_augassign: - pa_variables_lengths.append(len(variables_content) + len(line_tok.value) - 1 ) - # don't add the tokens - # after the assignment operator - break - else: - variables_content += '{}{}'.format(prefix, line_tok.value) - - if pa_variables_lengths: - max_variables_length = max(max_variables_length, max(pa_variables_lengths)) - - all_pa_variables_lengths.append(pa_variables_lengths) - - # after saving this line's max variable length, - # we check if next line has the same depth as this line, - # if not, we don't want to calculate their max variable length together - # so we break the while loop, update alignment so far, and - # then go to next line that has '=' - if next_line: - if this_line.depth != next_line.depth: - break - # if this line contains objects with newline entries, - # start new block alignment - if contain_object: - break - - # if no update of max_length, just go to the next block - if max_variables_length == 0: continue - - max_variables_length += 2 - # Update the assignment token values based on the max variable length - for all_pa_variables_lengths_index, pa_variables_lengths in enumerate( - all_pa_variables_lengths): - if not pa_variables_lengths: - continue - this_line = final_lines[final_lines_index + all_pa_variables_lengths_index] - - # only the first assignment operator on each line - pa_variables_lengths_index = 0 - for line_tok in this_line.tokens: - if line_tok.is_assign or line_tok.is_augassign: - assert pa_variables_lengths[0] < max_variables_length - - if pa_variables_lengths_index < len(pa_variables_lengths): - whitespace = ' ' * ( - max_variables_length - pa_variables_lengths[0] - 1) - - assign_content = '{}{}'.format(whitespace, line_tok.value.strip()) - - existing_whitespace_prefix = \ - line_tok.formatted_whitespace_prefix.lstrip('\n') - - # in case the existing spaces are larger than padded spaces - if (len(whitespace) == 1 or len(whitespace) > 1 and - len(existing_whitespace_prefix)>len(whitespace)): - line_tok.whitespace_prefix = '' - elif assign_content.startswith(existing_whitespace_prefix): - assign_content = assign_content[len(existing_whitespace_prefix):] - - # update the assignment operator value - line_tok.value = assign_content - - pa_variables_lengths_index += 1 - - final_lines_index += len(all_pa_variables_lengths) - - process_content = True - break - - if not process_content: - final_lines_index += 1 - - -def _AlignArgAssign(final_lines): - """Align the assign operators in a argument list to the same column""" - """NOTE One argument list of one function is on one logical line! - But funtion calls/argument lists can be in argument list. - """ - final_lines_index = 0 - while final_lines_index < len(final_lines): - line = final_lines[final_lines_index] - if line.disable: - final_lines_index += 1 - continue - - assert line.tokens - process_content = False - - for tok in line.tokens: - if tok.is_argassign: + if not processed_content: + final_lines_index += 1 - this_line = line - line_tokens = this_line.tokens - for open_index in range(len(line_tokens)): - line_tok = line_tokens[open_index] +def _AlignAssignment( final_lines ): + """Align assignment operators and augmented assignment operators to the same column""" + + final_lines_index = 0 + while final_lines_index < len( final_lines ): + line = final_lines[ final_lines_index ] + + assert line.tokens + process_content = False + + for tok in line.tokens: + if tok.is_assign or tok.is_augassign: + # all pre assignment variable lengths in one block of lines + all_pa_variables_lengths = [] + max_variables_length = 0 + + while True: + # EOF + if final_lines_index + len( all_pa_variables_lengths ) == len( + final_lines ): + break + + this_line_index = final_lines_index + len( + all_pa_variables_lengths ) + this_line = final_lines[ this_line_index ] + + next_line = None + if this_line_index < len( final_lines ) - 1: + next_line = final_lines[ final_lines_index + + len( all_pa_variables_lengths ) + 1 ] + + assert this_line.tokens, next_line.tokens + + # align them differently when there is a blank line in between + if ( all_pa_variables_lengths and + this_line.tokens[ 0 ].formatted_whitespace_prefix.startswith( + '\n\n' ) ): + break + + # if there is a standalone comment or keyword statement line + # or other lines without assignment in between, break + elif ( all_pa_variables_lengths and + True not in [ tok.is_assign or tok.is_augassign + for tok in this_line.tokens ] ): + if this_line.tokens[ 0 ].is_comment: + if style.Get( 'NEW_ALIGNMENT_AFTER_COMMENTLINE' ): + break + else: + break + + if this_line.disable: + all_pa_variables_lengths.append( [] ) + continue - if (line_tok.value == '(' and not line_tok.is_pseudo - and line_tok.next_token.formatted_whitespace_prefix.startswith('\n')): - index = open_index - # skip the comments in the beginning - index += 1 - line_tok = line_tokens[index] - while not line_tok.is_argname_start and index < len(line_tokens)-1: - index += 1 - line_tok = line_tokens[index] - - # check if the argstart is on newline - if line_tok.is_argname_start and line_tok.formatted_whitespace_prefix.startswith('\n'): - first_arg_index = index - first_arg_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) - - closing = False - all_arg_name_lengths = [] - arg_name_lengths = [] - name_content = '' - arg_column = first_arg_column - - # start with the first argument - # that has nextline prefix - while not closing: - # if there is a comment in between, save, reset and continue to calulate new alignment - if (style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE') - and arg_name_lengths and line_tok.is_comment - and line_tok.formatted_whitespace_prefix.startswith('\n')): - all_arg_name_lengths.append(arg_name_lengths) - arg_name_lengths = [] - index += 1 - line_tok = line_tokens[index] - continue - - prefix = line_tok.formatted_whitespace_prefix - newline_index = prefix.rfind('\n') - - if newline_index != -1: - if line_tok.is_argname_start: - name_content = '' - prefix = prefix[newline_index + 1:] - arg_column = len(prefix) - # if a typed arg name is so long - # that there are newlines inside - # only calulate the last line arg_name that has the assignment - elif line_tok.is_argname: - name_content = '' - prefix = prefix[newline_index + 1:] - # if any argument not on newline - elif line_tok.is_argname_start: - name_content = '' - arg_column = line_tok.column - # in case they are formatted into one line in final_line - # but are put in separated lines in original codes - if arg_column == first_arg_column: - arg_column = line_tok.formatted_whitespace_prefix - # on the same argument level - if (line_tok.is_argname_start and arg_name_lengths - and arg_column==first_arg_column): - argname_end = line_tok - while argname_end.is_argname: - argname_end = argname_end.next_token - # argument without assignment in between - if not argname_end.is_argassign: - all_arg_name_lengths.append(arg_name_lengths) - arg_name_lengths = [] - index += 1 - line_tok = line_tokens[index] + variables_content = '' + pa_variables_lengths = [] + contain_object = False + line_tokens = this_line.tokens + # only one assignment expression is on each line + for index in range( len( line_tokens ) ): + line_tok = line_tokens[ index ] + + prefix = line_tok.formatted_whitespace_prefix + newline_index = prefix.rfind( '\n' ) + if newline_index != -1: + variables_content = '' + prefix = prefix[ newline_index + 1 : ] + + if line_tok.is_assign or line_tok.is_augassign: + next_toks = [ + line_tokens[ i ] + for i in range( index + 1, len( line_tokens ) ) + ] + # if there is object(list/tuple/dict) with newline entries, break, + # update the alignment so far and start to calulate new alignment + for tok in next_toks: + if tok.value in [ '(', '[', '{' ] and tok.next_token: + if ( + tok.next_token.formatted_whitespace_prefix + .startswith( '\n' ) or + ( tok.next_token.is_comment and + tok.next_token.next_token. + formatted_whitespace_prefix.startswith( '\n' ) + ) ): + pa_variables_lengths.append( + len( variables_content ) ) + contain_object = True + break + if not contain_object: + if line_tok.is_assign: + pa_variables_lengths.append( + len( variables_content ) ) + # if augassign, add the extra augmented part to the max length caculation + elif line_tok.is_augassign: + pa_variables_lengths.append( + len( variables_content ) + + len( line_tok.value ) - 1 ) + # don't add the tokens + # after the assignment operator + break + else: + variables_content += '{}{}'.format( prefix, line_tok.value ) + + if pa_variables_lengths: + max_variables_length = max( + max_variables_length, max( pa_variables_lengths ) ) + + all_pa_variables_lengths.append( pa_variables_lengths ) + + # after saving this line's max variable length, + # we check if next line has the same depth as this line, + # if not, we don't want to calculate their max variable length together + # so we break the while loop, update alignment so far, and + # then go to next line that has '=' + if next_line: + if this_line.depth != next_line.depth: + break + # if this line contains objects with newline entries, + # start new block alignment + if contain_object: + break + + # if no update of max_length, just go to the next block + if max_variables_length == 0: continue - if line_tok.is_argassign and arg_column == first_arg_column: - arg_name_lengths.append(len(name_content)) - elif line_tok.is_argname and arg_column == first_arg_column: - name_content += '{}{}'.format(prefix, line_tok.value) - # add up all token values before the arg assign operator + max_variables_length += 2 - index += 1 - if index < len(line_tokens): - line_tok = line_tokens[index] - # when the matching closing bracket is never found - # due to edge cases where the closing bracket - # is not indented or dedented - else: - all_arg_name_lengths.append(arg_name_lengths) - break - - # if there is a new object(list/tuple/dict) with its entries on newlines, - # save, reset and continue to calulate new alignment - if (line_tok.value in ['(', '[','{'] and line_tok.next_token - and line_tok.next_token.formatted_whitespace_prefix.startswith('\n')): - if arg_name_lengths: - all_arg_name_lengths.append(arg_name_lengths) - arg_name_lengths = [] - index += 1 - line_tok = line_tokens[index] - continue - - if line_tok.value == ')'and not line_tok.is_pseudo: - if line_tok.formatted_whitespace_prefix.startswith('\n'): - close_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) - else: close_column = line_tok.column - if close_column < first_arg_column: - if arg_name_lengths: - all_arg_name_lengths.append(arg_name_lengths) - closing = True - - # update the alignment once one full arg list is processed - if all_arg_name_lengths: - # if argument list with only the first argument on newline - if len(all_arg_name_lengths) == 1 and len(all_arg_name_lengths[0]) == 1: - continue - max_name_length = 0 - all_arg_name_lengths_index = 0 - arg_name_lengths = all_arg_name_lengths[all_arg_name_lengths_index] - max_name_length = max(arg_name_lengths or [0]) + 2 - arg_lengths_index = 0 - for token in line_tokens[first_arg_index:index]: - if token.is_argassign: - name_token = token.previous_token - while name_token.is_argname and not name_token.is_argname_start: - name_token = name_token.previous_token - name_column = len(name_token.formatted_whitespace_prefix.lstrip('\n')) - if name_column == first_arg_column: - if all_arg_name_lengths_index < len(all_arg_name_lengths): - if arg_lengths_index == len(arg_name_lengths): - all_arg_name_lengths_index += 1 - arg_name_lengths = all_arg_name_lengths[all_arg_name_lengths_index] - max_name_length = max(arg_name_lengths or [0]) + 2 - arg_lengths_index = 0 - - if arg_lengths_index < len(arg_name_lengths): - - assert arg_name_lengths[arg_lengths_index] < max_name_length - - padded_spaces = ' ' * ( - max_name_length - arg_name_lengths[arg_lengths_index] - 1) - arg_lengths_index += 1 - - assign_content = '{}{}'.format(padded_spaces, token.value.strip()) - existing_whitespace_prefix = \ - token.formatted_whitespace_prefix.lstrip('\n') - - # in case the existing spaces are larger than padded spaces - if (len(padded_spaces)==1 or len(padded_spaces)>1 and - len(existing_whitespace_prefix)>len(padded_spaces)): - token.whitespace_prefix = '' - elif assign_content.startswith(existing_whitespace_prefix): - assign_content = assign_content[len(existing_whitespace_prefix):] - - token.value = assign_content - - final_lines_index += 1 - process_content = True - break - - if not process_content: - final_lines_index += 1 - - -def _AlignDictColon(final_lines): - """Align colons in a dict to the same column""" - """NOTE One (nested) dict/list is one logical line!""" - final_lines_index = 0 - while final_lines_index < len(final_lines): - line = final_lines[final_lines_index] - if line.disable: - final_lines_index += 1 - continue - - assert line.tokens - process_content = False + # Update the assignment token values based on the max variable length + for all_pa_variables_lengths_index, pa_variables_lengths in enumerate( + all_pa_variables_lengths ): + if not pa_variables_lengths: + continue + this_line = final_lines[ final_lines_index + + all_pa_variables_lengths_index ] - for tok in line.tokens: - # make sure each dict entry on separate lines and - # the dict has more than one entry - if (tok.is_dict_key and tok.formatted_whitespace_prefix.startswith('\n') and - not tok.is_comment): + # only the first assignment operator on each line + pa_variables_lengths_index = 0 + for line_tok in this_line.tokens: + if line_tok.is_assign or line_tok.is_augassign: + assert pa_variables_lengths[ 0 ] < max_variables_length - this_line = line + if pa_variables_lengths_index < len( pa_variables_lengths ): + whitespace = ' ' * ( + max_variables_length - pa_variables_lengths[ 0 ] - + 1 ) - line_tokens = this_line.tokens - for open_index in range(len(line_tokens)): - line_tok = line_tokens[open_index] + assign_content = '{}{}'.format( + whitespace, line_tok.value.strip() ) - # check each time if the detected dict is the dict we aim for - if line_tok.value == '{' and line_tok.next_token.formatted_whitespace_prefix.startswith('\n'): - index = open_index - # skip the comments in the beginning - index += 1 - line_tok = line_tokens[index] - while not line_tok.is_dict_key and index < len(line_tokens)-1: - index += 1 - line_tok = line_tokens[index] - # in case empty dict, check if dict key again - if line_tok.is_dict_key and line_tok.formatted_whitespace_prefix.startswith('\n'): - closing = False # the closing bracket in dict '}'. - keys_content = '' - all_dict_keys_lengths = [] - dict_keys_lengths = [] - - # record the column number of the first key - first_key_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) - key_column = first_key_column - - # while not closing: - while not closing: - prefix = line_tok.formatted_whitespace_prefix - newline = prefix.startswith('\n') - if newline: - # if comments inbetween, save, reset and continue to caluclate new alignment - if (style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE') - and dict_keys_lengths and line_tok.is_comment): - all_dict_keys_lengths.append(dict_keys_lengths) - dict_keys_lengths =[] - index += 1 - line_tok = line_tokens[index] - continue - if line_tok.is_dict_key_start: - keys_content = '' - prefix = prefix.lstrip('\n') - key_column = len(prefix) - # if the dict key is so long that it has multi-lines - # only caculate the last line that has the colon - elif line_tok.is_dict_key: - keys_content = '' - prefix = prefix.lstrip('\n') - elif line_tok.is_dict_key_start: - key_column = line_tok.column - - if line_tok.is_dict_colon and key_column == first_key_column: - dict_keys_lengths.append(len(keys_content)) - elif line_tok.is_dict_key and key_column == first_key_column: - keys_content += '{}{}'.format(prefix, line_tok.value) - - index += 1 - if index < len(line_tokens): - line_tok = line_tokens[index] - # when the matching closing bracket is never found - # due to edge cases where the closing bracket - # is not indented or dedented, e.g. ']}', with another bracket before - else: - all_dict_keys_lengths.append(dict_keys_lengths) - break - - # if there is new objects(list/tuple/dict) with its entries on newlines, - # or a function call with any of its arguments on newlines, - # save, reset and continue to calulate new alignment - if (line_tok.value in ['(', '[', '{'] and not line_tok.is_pseudo and line_tok.next_token - and line_tok.next_token.formatted_whitespace_prefix.startswith('\n')): - if dict_keys_lengths: - all_dict_keys_lengths.append(dict_keys_lengths) - dict_keys_lengths = [] - index += 1 - line_tok = line_tokens[index] - continue - # the matching closing bracket is either same indented or dedented - # accordingly to previous level's indentation - # the first found, immediately break the while loop - if line_tok.value == '}': - if line_tok.formatted_whitespace_prefix.startswith('\n'): - close_column = len(line_tok.formatted_whitespace_prefix.lstrip('\n')) - else: close_column = line_tok.column - if close_column < first_key_column: - if dict_keys_lengths: - all_dict_keys_lengths.append(dict_keys_lengths) - closing = True - - # update the alignment once one dict is processed - if all_dict_keys_lengths: - max_keys_length = 0 - all_dict_keys_lengths_index = 0 - dict_keys_lengths = all_dict_keys_lengths[all_dict_keys_lengths_index] - max_keys_length = max(dict_keys_lengths or [0]) + 2 - keys_lengths_index = 0 - for token in line_tokens[open_index+1:index]: - if token.is_dict_colon: - # check if the key has multiple tokens and - # get the first key token in this key - key_token = token.previous_token - while key_token.is_dict_key and not key_token.is_dict_key_start: - key_token = key_token.previous_token - key_column = len(key_token.formatted_whitespace_prefix.lstrip('\n')) - - if key_column == first_key_column: - - if keys_lengths_index == len(dict_keys_lengths): - all_dict_keys_lengths_index += 1 - dict_keys_lengths = all_dict_keys_lengths[all_dict_keys_lengths_index] - max_keys_length = max(dict_keys_lengths or [0]) + 2 - keys_lengths_index = 0 - - if keys_lengths_index < len(dict_keys_lengths): - assert dict_keys_lengths[keys_lengths_index] < max_keys_length - - padded_spaces = ' ' * ( - max_keys_length - dict_keys_lengths[keys_lengths_index] - 1) - keys_lengths_index += 1 - #NOTE if the existing whitespaces are larger than padded spaces - existing_whitespace_prefix = \ - token.formatted_whitespace_prefix.lstrip('\n') - colon_content = '{}{}'.format(padded_spaces, token.value.strip()) + existing_whitespace_prefix = \ + line_tok.formatted_whitespace_prefix.lstrip('\n') - # in case the existing spaces are larger than the paddes spaces - if (len(padded_spaces) == 1 or len(padded_spaces) > 1 - and len(existing_whitespace_prefix) >= len(padded_spaces)): - # remove the existing spaces - token.whitespace_prefix = '' - elif colon_content.startswith(existing_whitespace_prefix): - colon_content = colon_content[len(existing_whitespace_prefix):] + # in case the existing spaces are larger than padded spaces + if ( len( whitespace ) == 1 or len( whitespace ) > 1 and + len( existing_whitespace_prefix ) + > len( whitespace ) ): + line_tok.whitespace_prefix = '' + elif assign_content.startswith( + existing_whitespace_prefix ): + assign_content = assign_content[ + len( existing_whitespace_prefix ): ] - token.value = colon_content + # update the assignment operator value + line_tok.value = assign_content - final_lines_index += 1 + pa_variables_lengths_index += 1 - process_content = True - break + final_lines_index += len( all_pa_variables_lengths ) - if not process_content: - final_lines_index += 1 + process_content = True + break + if not process_content: + final_lines_index += 1 -def _FormatFinalLines(final_lines, verify): - """Compose the final output from the finalized lines.""" - formatted_code = [] - for line in final_lines: - formatted_line = [] - for tok in line.tokens: - if not tok.is_pseudo: - formatted_line.append(tok.formatted_whitespace_prefix) - formatted_line.append(tok.value) - elif (not tok.next_token.whitespace_prefix.startswith('\n') and - not tok.next_token.whitespace_prefix.startswith(' ')): - if (tok.previous_token.value == ':' or - tok.next_token.value not in ',}])'): - formatted_line.append(' ') +def _FormatFinalLines( final_lines, verify ): + """Compose the final output from the finalized lines.""" + formatted_code = [] + for line in final_lines: + formatted_line = [] + for tok in line.tokens: + if not tok.is_pseudo: + formatted_line.append( tok.formatted_whitespace_prefix ) + formatted_line.append( tok.value ) + elif ( not tok.next_token.whitespace_prefix.startswith( '\n' ) and + not tok.next_token.whitespace_prefix.startswith( ' ' ) ): + if ( tok.previous_token.value == ':' or + tok.next_token.value not in ',}])' ): + formatted_line.append( ' ' ) - formatted_code.append(''.join(formatted_line)) - if verify: - verifier.VerifyCode(formatted_code[-1]) + formatted_code.append( ''.join( formatted_line ) ) + if verify: + verifier.VerifyCode( formatted_code[ -1 ] ) - return ''.join(formatted_code) + '\n' + return ''.join( formatted_code ) + '\n' -class _StateNode(object): - """An edge in the solution space from 'previous.state' to 'state'. +class _StateNode( object ): + """An edge in the solution space from 'previous.state' to 'state'. Attributes: state: (format_decision_state.FormatDecisionState) The format decision state @@ -934,32 +614,31 @@ class _StateNode(object): previous: (_StateNode) The previous state node in the graph. """ - # TODO(morbo): Add a '__cmp__' method. + # TODO(morbo): Add a '__cmp__' method. - def __init__(self, state, newline, previous): - self.state = state.Clone() - self.newline = newline - self.previous = previous + def __init__( self, state, newline, previous ): + self.state = state.Clone() + self.newline = newline + self.previous = previous - def __repr__(self): # pragma: no cover - return 'StateNode(state=[\n{0}\n], newline={1})'.format( - self.state, self.newline) + def __repr__( self ): # pragma: no cover + return 'StateNode(state=[\n{0}\n], newline={1})'.format( + self.state, self.newline ) # A tuple of (penalty, count) that is used to prioritize the BFS. In case of # equal penalties, we prefer states that were inserted first. During state # generation, we make sure that we insert states first that break the line as # late as possible. -_OrderedPenalty = collections.namedtuple('OrderedPenalty', ['penalty', 'count']) +_OrderedPenalty = collections.namedtuple( 'OrderedPenalty', [ 'penalty', 'count' ] ) # An item in the prioritized BFS search queue. The 'StateNode's 'state' has # the given '_OrderedPenalty'. -_QueueItem = collections.namedtuple('QueueItem', - ['ordered_penalty', 'state_node']) +_QueueItem = collections.namedtuple( 'QueueItem', [ 'ordered_penalty', 'state_node' ] ) -def _AnalyzeSolutionSpace(initial_state): - """Analyze the entire solution space starting from initial_state. +def _AnalyzeSolutionSpace( initial_state ): + """Analyze the entire solution space starting from initial_state. This implements a variant of Dijkstra's algorithm on the graph that spans the solution space (LineStates are the nodes). The algorithm tries to find @@ -973,49 +652,49 @@ def _AnalyzeSolutionSpace(initial_state): Returns: True if a formatting solution was found. False otherwise. """ - count = 0 - seen = set() - p_queue = [] - - # Insert start element. - node = _StateNode(initial_state, False, None) - heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(0, count), node)) - - count += 1 - while p_queue: - item = p_queue[0] - penalty = item.ordered_penalty.penalty - node = item.state_node - if not node.state.next_token: - break - heapq.heappop(p_queue) - - if count > 10000: - node.state.ignore_stack_for_comparison = True - - # Unconditionally add the state and check if it was present to avoid having - # to hash it twice in the common case (state hashing is expensive). - before_seen_count = len(seen) - seen.add(node.state) - # If seen didn't change size, the state was already present. - if before_seen_count == len(seen): - continue - - # FIXME(morbo): Add a 'decision' element? - - count = _AddNextStateToQueue(penalty, node, False, count, p_queue) - count = _AddNextStateToQueue(penalty, node, True, count, p_queue) - - if not p_queue: - # We weren't able to find a solution. Do nothing. - return False + count = 0 + seen = set() + p_queue = [] + + # Insert start element. + node = _StateNode( initial_state, False, None ) + heapq.heappush( p_queue, _QueueItem( _OrderedPenalty( 0, count ), node ) ) + + count += 1 + while p_queue: + item = p_queue[ 0 ] + penalty = item.ordered_penalty.penalty + node = item.state_node + if not node.state.next_token: + break + heapq.heappop( p_queue ) + + if count > 10000: + node.state.ignore_stack_for_comparison = True + + # Unconditionally add the state and check if it was present to avoid having + # to hash it twice in the common case (state hashing is expensive). + before_seen_count = len( seen ) + seen.add( node.state ) + # If seen didn't change size, the state was already present. + if before_seen_count == len( seen ): + continue + + # FIXME(morbo): Add a 'decision' element? - _ReconstructPath(initial_state, heapq.heappop(p_queue).state_node) - return True + count = _AddNextStateToQueue( penalty, node, False, count, p_queue ) + count = _AddNextStateToQueue( penalty, node, True, count, p_queue ) + if not p_queue: + # We weren't able to find a solution. Do nothing. + return False + + _ReconstructPath( initial_state, heapq.heappop( p_queue ).state_node ) + return True -def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue): - """Add the following state to the analysis queue. + +def _AddNextStateToQueue( penalty, previous_node, newline, count, p_queue ): + """Add the following state to the analysis queue. Assume the current state is 'previous_node' and has been reached with a penalty of 'penalty'. Insert a line break if 'newline' is True. @@ -1031,23 +710,23 @@ def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue): Returns: The updated number of elements in the queue. """ - must_split = previous_node.state.MustSplit() - if newline and not previous_node.state.CanSplit(must_split): - # Don't add a newline if the token cannot be split. - return count - if not newline and must_split: - # Don't add a token we must split but where we aren't splitting. - return count + must_split = previous_node.state.MustSplit() + if newline and not previous_node.state.CanSplit( must_split ): + # Don't add a newline if the token cannot be split. + return count + if not newline and must_split: + # Don't add a token we must split but where we aren't splitting. + return count - node = _StateNode(previous_node.state, newline, previous_node) - penalty += node.state.AddTokenToState( - newline=newline, dry_run=True, must_split=must_split) - heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(penalty, count), node)) - return count + 1 + node = _StateNode( previous_node.state, newline, previous_node ) + penalty += node.state.AddTokenToState( + newline = newline, dry_run = True, must_split = must_split ) + heapq.heappush( p_queue, _QueueItem( _OrderedPenalty( penalty, count ), node ) ) + return count + 1 -def _ReconstructPath(initial_state, current): - """Reconstruct the path through the queue with lowest penalty. +def _ReconstructPath( initial_state, current ): + """Reconstruct the path through the queue with lowest penalty. Arguments: initial_state: (format_decision_state.FormatDecisionState) The initial state @@ -1055,21 +734,21 @@ def _ReconstructPath(initial_state, current): current: (_StateNode) The node in the decision graph that is the end point of the path with the least penalty. """ - path = collections.deque() + path = collections.deque() - while current.previous: - path.appendleft(current) - current = current.previous + while current.previous: + path.appendleft( current ) + current = current.previous - for node in path: - initial_state.AddTokenToState(newline=node.newline, dry_run=False) + for node in path: + initial_state.AddTokenToState( newline = node.newline, dry_run = False ) NESTED_DEPTH = [] -def _FormatFirstToken(first_token, indent_depth, prev_line, final_lines): - """Format the first token in the logical line. +def _FormatFirstToken( first_token, indent_depth, prev_line, final_lines ): + """Format the first token in the logical line. Add a newline and the required indent before the first token of the logical line. @@ -1082,39 +761,38 @@ def _FormatFirstToken(first_token, indent_depth, prev_line, final_lines): final_lines: (list of logical_line.LogicalLine) The logical lines that have already been processed. """ - global NESTED_DEPTH - while NESTED_DEPTH and NESTED_DEPTH[-1] > indent_depth: - NESTED_DEPTH.pop() - - first_nested = False - if _IsClassOrDef(first_token): - if not NESTED_DEPTH: - NESTED_DEPTH = [indent_depth] - elif NESTED_DEPTH[-1] < indent_depth: - first_nested = True - NESTED_DEPTH.append(indent_depth) - - first_token.AddWhitespacePrefix( - _CalculateNumberOfNewlines(first_token, indent_depth, prev_line, - final_lines, first_nested), - indent_level=indent_depth) - - -NO_BLANK_LINES = 1 -ONE_BLANK_LINE = 2 + global NESTED_DEPTH + while NESTED_DEPTH and NESTED_DEPTH[ -1 ] > indent_depth: + NESTED_DEPTH.pop() + + first_nested = False + if _IsClassOrDef( first_token ): + if not NESTED_DEPTH: + NESTED_DEPTH = [ indent_depth ] + elif NESTED_DEPTH[ -1 ] < indent_depth: + first_nested = True + NESTED_DEPTH.append( indent_depth ) + + first_token.AddWhitespacePrefix( + _CalculateNumberOfNewlines( + first_token, indent_depth, prev_line, final_lines, first_nested ), + indent_level = indent_depth ) + + +NO_BLANK_LINES = 1 +ONE_BLANK_LINE = 2 TWO_BLANK_LINES = 3 -def _IsClassOrDef(tok): - if tok.value in {'class', 'def', '@'}: - return True - return (tok.next_token and tok.value == 'async' and - tok.next_token.value == 'def') +def _IsClassOrDef( tok ): + if tok.value in { 'class', 'def', '@' }: + return True + return ( tok.next_token and tok.value == 'async' and tok.next_token.value == 'def' ) -def _CalculateNumberOfNewlines(first_token, indent_depth, prev_line, - final_lines, first_nested): - """Calculate the number of newlines we need to add. +def _CalculateNumberOfNewlines( + first_token, indent_depth, prev_line, final_lines, first_nested ): + """Calculate the number of newlines we need to add. Arguments: first_token: (format_token.FormatToken) The first token in the logical @@ -1129,102 +807,103 @@ def _CalculateNumberOfNewlines(first_token, indent_depth, prev_line, Returns: The number of newlines needed before the first token. """ - # TODO(morbo): Special handling for imports. - # TODO(morbo): Create a knob that can tune these. - if prev_line is None: - # The first line in the file. Don't add blank lines. - # FIXME(morbo): Is this correct? - if first_token.newlines is not None: - first_token.newlines = None - return 0 - - if first_token.is_docstring: - if (prev_line.first.value == 'class' and - style.Get('BLANK_LINE_BEFORE_CLASS_DOCSTRING')): - # Enforce a blank line before a class's docstring. - return ONE_BLANK_LINE - elif (prev_line.first.value.startswith('#') and - style.Get('BLANK_LINE_BEFORE_MODULE_DOCSTRING')): - # Enforce a blank line before a module's docstring. - return ONE_BLANK_LINE - # The docstring shouldn't have a newline before it. - return NO_BLANK_LINES - - if first_token.is_name and not indent_depth: - if prev_line.first.value in {'from', 'import'}: - # Support custom number of blank lines between top-level imports and - # variable definitions. - return 1 + style.Get( - 'BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES') - - prev_last_token = prev_line.last - if prev_last_token.is_docstring: - if (not indent_depth and first_token.value in {'class', 'def', 'async'}): - # Separate a class or function from the module-level docstring with - # appropriate number of blank lines. - return 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION') - if (first_nested and - not style.Get('BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF') and - _IsClassOrDef(first_token)): - first_token.newlines = None - return NO_BLANK_LINES - if _NoBlankLinesBeforeCurrentToken(prev_last_token.value, first_token, - prev_last_token): - return NO_BLANK_LINES - else: - return ONE_BLANK_LINE - - if _IsClassOrDef(first_token): - # TODO(morbo): This can go once the blank line calculator is more - # sophisticated. - if not indent_depth: - # This is a top-level class or function. - is_inline_comment = prev_last_token.whitespace_prefix.count('\n') == 0 - if (not prev_line.disable and prev_last_token.is_comment and - not is_inline_comment): - # This token follows a non-inline comment. - if _NoBlankLinesBeforeCurrentToken(prev_last_token.value, first_token, - prev_last_token): - # Assume that the comment is "attached" to the current line. - # Therefore, we want two blank lines before the comment. - index = len(final_lines) - 1 - while index > 0: - if not final_lines[index - 1].is_comment: - break - index -= 1 - if final_lines[index - 1].first.value == '@': - final_lines[index].first.AdjustNewlinesBefore(NO_BLANK_LINES) - else: - prev_last_token.AdjustNewlinesBefore( - 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION')) - if first_token.newlines is not None: + # TODO(morbo): Special handling for imports. + # TODO(morbo): Create a knob that can tune these. + if prev_line is None: + # The first line in the file. Don't add blank lines. + # FIXME(morbo): Is this correct? + if first_token.newlines is not None: first_token.newlines = None - return NO_BLANK_LINES - elif _IsClassOrDef(prev_line.first): - if first_nested and not style.Get( - 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'): - first_token.newlines = None + return 0 + + if first_token.is_docstring: + if ( prev_line.first.value == 'class' and + style.Get( 'BLANK_LINE_BEFORE_CLASS_DOCSTRING' ) ): + # Enforce a blank line before a class's docstring. + return ONE_BLANK_LINE + elif ( prev_line.first.value.startswith( '#' ) and + style.Get( 'BLANK_LINE_BEFORE_MODULE_DOCSTRING' ) ): + # Enforce a blank line before a module's docstring. + return ONE_BLANK_LINE + # The docstring shouldn't have a newline before it. return NO_BLANK_LINES - # Calculate how many newlines were between the original lines. We want to - # retain that formatting if it doesn't violate one of the style guide rules. - if first_token.is_comment: - first_token_lineno = first_token.lineno - first_token.value.count('\n') - else: - first_token_lineno = first_token.lineno + if first_token.is_name and not indent_depth: + if prev_line.first.value in { 'from', 'import' }: + # Support custom number of blank lines between top-level imports and + # variable definitions. + return 1 + style.Get( + 'BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES' ) + + prev_last_token = prev_line.last + if prev_last_token.is_docstring: + if ( not indent_depth and first_token.value in { 'class', 'def', 'async' } ): + # Separate a class or function from the module-level docstring with + # appropriate number of blank lines. + return 1 + style.Get( 'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION' ) + if ( first_nested and + not style.Get( 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF' ) and + _IsClassOrDef( first_token ) ): + first_token.newlines = None + return NO_BLANK_LINES + if _NoBlankLinesBeforeCurrentToken( prev_last_token.value, first_token, + prev_last_token ): + return NO_BLANK_LINES + else: + return ONE_BLANK_LINE + + if _IsClassOrDef( first_token ): + # TODO(morbo): This can go once the blank line calculator is more + # sophisticated. + if not indent_depth: + # This is a top-level class or function. + is_inline_comment = prev_last_token.whitespace_prefix.count( '\n' ) == 0 + if ( not prev_line.disable and prev_last_token.is_comment and + not is_inline_comment ): + # This token follows a non-inline comment. + if _NoBlankLinesBeforeCurrentToken( prev_last_token.value, first_token, + prev_last_token ): + # Assume that the comment is "attached" to the current line. + # Therefore, we want two blank lines before the comment. + index = len( final_lines ) - 1 + while index > 0: + if not final_lines[ index - 1 ].is_comment: + break + index -= 1 + if final_lines[ index - 1 ].first.value == '@': + final_lines[ index ].first.AdjustNewlinesBefore( + NO_BLANK_LINES ) + else: + prev_last_token.AdjustNewlinesBefore( + 1 + style.Get( 'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION' ) ) + if first_token.newlines is not None: + first_token.newlines = None + return NO_BLANK_LINES + elif _IsClassOrDef( prev_line.first ): + if first_nested and not style.Get( + 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF' ): + first_token.newlines = None + return NO_BLANK_LINES + + # Calculate how many newlines were between the original lines. We want to + # retain that formatting if it doesn't violate one of the style guide rules. + if first_token.is_comment: + first_token_lineno = first_token.lineno - first_token.value.count( '\n' ) + else: + first_token_lineno = first_token.lineno - prev_last_token_lineno = prev_last_token.lineno - if prev_last_token.is_multiline_string: - prev_last_token_lineno += prev_last_token.value.count('\n') + prev_last_token_lineno = prev_last_token.lineno + if prev_last_token.is_multiline_string: + prev_last_token_lineno += prev_last_token.value.count( '\n' ) - if first_token_lineno - prev_last_token_lineno > 1: - return ONE_BLANK_LINE + if first_token_lineno - prev_last_token_lineno > 1: + return ONE_BLANK_LINE - return NO_BLANK_LINES + return NO_BLANK_LINES -def _SingleOrMergedLines(lines): - """Generate the lines we want to format. +def _SingleOrMergedLines( lines ): + """Generate the lines we want to format. Arguments: lines: (list of logical_line.LogicalLine) Lines we want to format. @@ -1233,46 +912,49 @@ def _SingleOrMergedLines(lines): Either a single line, if the current line cannot be merged with the succeeding line, or the next two lines merged into one line. """ - index = 0 - last_was_merged = False - while index < len(lines): - if lines[index].disable: - line = lines[index] - index += 1 - while index < len(lines): - column = line.last.column + 2 - if lines[index].lineno != line.lineno: - break - if line.last.value != ':': - leaf = pytree.Leaf( - type=token.SEMI, value=';', context=('', (line.lineno, column))) - line.AppendToken( - format_token.FormatToken(leaf, pytree_utils.NodeName(leaf))) - for tok in lines[index].tokens: - line.AppendToken(tok) - index += 1 - yield line - elif line_joiner.CanMergeMultipleLines(lines[index:], last_was_merged): - # TODO(morbo): This splice is potentially very slow. Come up with a more - # performance-friendly way of determining if two lines can be merged. - next_line = lines[index + 1] - for tok in next_line.tokens: - lines[index].AppendToken(tok) - if (len(next_line.tokens) == 1 and next_line.first.is_multiline_string): - # This may be a multiline shebang. In that case, we want to retain the - # formatting. Otherwise, it could mess up the shell script's syntax. - lines[index].disable = True - yield lines[index] - index += 2 - last_was_merged = True - else: - yield lines[index] - index += 1 - last_was_merged = False - - -def _NoBlankLinesBeforeCurrentToken(text, cur_token, prev_token): - """Determine if there are no blank lines before the current token. + index = 0 + last_was_merged = False + while index < len( lines ): + if lines[ index ].disable: + line = lines[ index ] + index += 1 + while index < len( lines ): + column = line.last.column + 2 + if lines[ index ].lineno != line.lineno: + break + if line.last.value != ':': + leaf = pytree.Leaf( + type = token.SEMI, + value = ';', + context = ( '', ( line.lineno, column ) ) ) + line.AppendToken( + format_token.FormatToken( leaf, + pytree_utils.NodeName( leaf ) ) ) + for tok in lines[ index ].tokens: + line.AppendToken( tok ) + index += 1 + yield line + elif line_joiner.CanMergeMultipleLines( lines[ index : ], last_was_merged ): + # TODO(morbo): This splice is potentially very slow. Come up with a more + # performance-friendly way of determining if two lines can be merged. + next_line = lines[ index + 1 ] + for tok in next_line.tokens: + lines[ index ].AppendToken( tok ) + if ( len( next_line.tokens ) == 1 and next_line.first.is_multiline_string ): + # This may be a multiline shebang. In that case, we want to retain the + # formatting. Otherwise, it could mess up the shell script's syntax. + lines[ index ].disable = True + yield lines[ index ] + index += 2 + last_was_merged = True + else: + yield lines[ index ] + index += 1 + last_was_merged = False + + +def _NoBlankLinesBeforeCurrentToken( text, cur_token, prev_token ): + """Determine if there are no blank lines before the current token. The previous token is a docstring or comment. The prev_token_lineno is the start of the text of that token. Counting the number of newlines in its text @@ -1290,8 +972,8 @@ def _NoBlankLinesBeforeCurrentToken(text, cur_token, prev_token): Returns: True if there is no blank line before the current token. """ - cur_token_lineno = cur_token.lineno - if cur_token.is_comment: - cur_token_lineno -= cur_token.value.count('\n') - num_newlines = text.count('\n') if not prev_token.is_comment else 0 - return prev_token.lineno + num_newlines == cur_token_lineno - 1 + cur_token_lineno = cur_token.lineno + if cur_token.is_comment: + cur_token_lineno -= cur_token.value.count( '\n' ) + num_newlines = text.count( '\n' ) if not prev_token.is_comment else 0 + return prev_token.lineno + num_newlines == cur_token_lineno - 1 diff --git a/yapf/yapflib/split_penalty.py b/yapf/yapflib/split_penalty.py index 79b68edcd..8f93d3ade 100644 --- a/yapf/yapflib/split_penalty.py +++ b/yapf/yapflib/split_penalty.py @@ -15,9 +15,9 @@ from yapf.yapflib import style # Generic split penalties -UNBREAKABLE = 1000**5 +UNBREAKABLE = 1000**5 VERY_STRONGLY_CONNECTED = 5000 -STRONGLY_CONNECTED = 2500 +STRONGLY_CONNECTED = 2500 ############################################################################# # Grammar-specific penalties - should be <= 1000 # @@ -25,15 +25,15 @@ # Lambdas shouldn't be split unless absolutely necessary or if # ALLOW_MULTILINE_LAMBDAS is True. -LAMBDA = 1000 +LAMBDA = 1000 MULTILINE_LAMBDA = 500 ANNOTATION = 100 -ARGUMENT = 25 +ARGUMENT = 25 # TODO: Assign real values. -RETURN_TYPE = 1 -DOTTED_NAME = 40 -EXPR = 10 -DICT_KEY_EXPR = 20 +RETURN_TYPE = 1 +DOTTED_NAME = 40 +EXPR = 10 +DICT_KEY_EXPR = 20 DICT_VALUE_EXPR = 11 diff --git a/yapf/yapflib/style.py b/yapf/yapflib/style.py index d9e9e5e9e..684bfb274 100644 --- a/yapf/yapflib/style.py +++ b/yapf/yapflib/style.py @@ -21,71 +21,70 @@ from yapf.yapflib import py3compat -class StyleConfigError(errors.YapfError): - """Raised when there's a problem reading the style configuration.""" - pass +class StyleConfigError( errors.YapfError ): + """Raised when there's a problem reading the style configuration.""" + pass -def Get(setting_name): - """Get a style setting.""" - return _style[setting_name] +def Get( setting_name ): + """Get a style setting.""" + return _style[ setting_name ] -def GetOrDefault(setting_name, default_value): - """Get a style setting or default value if the setting does not exist.""" - return _style.get(setting_name, default_value) +def GetOrDefault( setting_name, default_value ): + """Get a style setting or default value if the setting does not exist.""" + return _style.get( setting_name, default_value ) def Help(): - """Return dict mapping style names to help strings.""" - return _STYLE_HELP + """Return dict mapping style names to help strings.""" + return _STYLE_HELP -def SetGlobalStyle(style): - """Set a style dict.""" - global _style - global _GLOBAL_STYLE_FACTORY - factory = _GetStyleFactory(style) - if factory: - _GLOBAL_STYLE_FACTORY = factory - _style = style +def SetGlobalStyle( style ): + """Set a style dict.""" + global _style + global _GLOBAL_STYLE_FACTORY + factory = _GetStyleFactory( style ) + if factory: + _GLOBAL_STYLE_FACTORY = factory + _style = style _STYLE_HELP = dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent("""\ - Align closing bracket with visual indentation."""), - ALIGN_ASSIGNMENT=textwrap.dedent("""\ + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = textwrap.dedent( + """\ + Align closing bracket with visual indentation.""" ), + ALIGN_ASSIGNMENT = textwrap.dedent( + """\ Align assignment or augmented assignment operators. If there is a blank line or newline comment or objects with newline entries in between, - it will start new block alignment."""), - ALIGN_ARGUMENT_ASSIGNMENT=textwrap.dedent("""\ - Align assignment operators in the argument list if they are all split on newlines. - Arguments without assignment are ignored. - Arguments without assignment in between will initiate new block alignment calulation. - Newline comments or objects with newline entries will also start new block alignment."""), - ALIGN_DICT_COLON=textwrap.dedent("""\ - Align the colons in the dictionary - if all entries in dictionay are split on newlines. - or 'EACH_DICT_ENTRY_ON_SEPERATE_LINE' is set True. - """), - NEW_ALIGNMENT_AFTER_COMMENTLINE=textwrap.dedent("""\ - Start new assignment or colon alignment when there is a newline comment in between."""), - ALLOW_MULTILINE_LAMBDAS=textwrap.dedent("""\ - Allow lambdas to be formatted on more than one line."""), - ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent("""\ + it will start new block alignment.""" ), + NEW_ALIGNMENT_AFTER_COMMENTLINE = textwrap.dedent( + """\ + Start new assignment or colon alignment when there is a newline comment in between.""" + ), + ALLOW_MULTILINE_LAMBDAS = textwrap.dedent( + """\ + Allow lambdas to be formatted on more than one line.""" ), + ALLOW_MULTILINE_DICTIONARY_KEYS = textwrap.dedent( + """\ Allow dictionary keys to exist on multiple lines. For example: x = { ('this is the first element of a tuple', 'this is the second element of a tuple'): value, - }"""), - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=textwrap.dedent("""\ + }""" ), + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS = textwrap.dedent( + """\ Allow splitting before a default / named assignment in an argument list. - """), - ALLOW_SPLIT_BEFORE_DICT_VALUE=textwrap.dedent("""\ - Allow splits before the dictionary value."""), - ARITHMETIC_PRECEDENCE_INDICATION=textwrap.dedent("""\ + """ ), + ALLOW_SPLIT_BEFORE_DICT_VALUE = textwrap.dedent( + """\ + Allow splits before the dictionary value.""" ), + ARITHMETIC_PRECEDENCE_INDICATION = textwrap.dedent( + """\ Let spacing indicate operator precedence. For example: a = 1 * 2 + 3 / 4 @@ -104,26 +103,32 @@ def SetGlobalStyle(style): e = 1*2 - 3 f = 1 + 2 + 3 + 4 - """), - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=textwrap.dedent("""\ + """ ), + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = textwrap.dedent( + """\ Insert a blank line before a 'def' or 'class' immediately nested within another 'def' or 'class'. For example: class Foo: # <------ this blank line def method(): - ..."""), - BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent("""\ - Insert a blank line before a class-level docstring."""), - BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent("""\ - Insert a blank line before a module docstring."""), - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent("""\ + ...""" ), + BLANK_LINE_BEFORE_CLASS_DOCSTRING = textwrap.dedent( + """\ + Insert a blank line before a class-level docstring.""" ), + BLANK_LINE_BEFORE_MODULE_DOCSTRING = textwrap.dedent( + """\ + Insert a blank line before a module docstring.""" ), + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = textwrap.dedent( + """\ Number of blank lines surrounding top-level function and class - definitions."""), - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=textwrap.dedent("""\ + definitions.""" ), + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES = textwrap.dedent( + """\ Number of blank lines between top-level imports and variable - definitions."""), - COALESCE_BRACKETS=textwrap.dedent("""\ + definitions.""" ), + COALESCE_BRACKETS = textwrap.dedent( + """\ Do not split consecutive brackets. Only relevant when dedent_closing_brackets is set. For example: @@ -139,10 +144,11 @@ def method(): call_func_that_takes_a_dict({ 'key1': 'value1', 'key2': 'value2', - })"""), - COLUMN_LIMIT=textwrap.dedent("""\ - The column limit."""), - CONTINUATION_ALIGN_STYLE=textwrap.dedent("""\ + })""" ), + COLUMN_LIMIT = textwrap.dedent( """\ + The column limit.""" ), + CONTINUATION_ALIGN_STYLE = textwrap.dedent( + """\ The style for continuation alignment. Possible values are: - SPACE: Use spaces for continuation alignment. This is default behavior. @@ -151,10 +157,12 @@ def method(): CONTINUATION_INDENT_WIDTH spaces) for continuation alignment. - VALIGN-RIGHT: Vertically align continuation lines to multiple of INDENT_WIDTH columns. Slightly right (one tab or a few spaces) if - cannot vertically align continuation lines with indent characters."""), - CONTINUATION_INDENT_WIDTH=textwrap.dedent("""\ - Indent width used for line continuations."""), - DEDENT_CLOSING_BRACKETS=textwrap.dedent("""\ + cannot vertically align continuation lines with indent characters.""" ), + CONTINUATION_INDENT_WIDTH = textwrap.dedent( + """\ + Indent width used for line continuations.""" ), + DEDENT_CLOSING_BRACKETS = textwrap.dedent( + """\ Put closing brackets on a separate line, dedented, if the bracketed expression can't fit in a single line. Applies to all kinds of brackets, including function definitions and calls. For example: @@ -171,28 +179,34 @@ def method(): start_ts=now()-timedelta(days=3), end_ts=now(), ) # <--- this bracket is dedented and on a separate line - """), - DISABLE_ENDING_COMMA_HEURISTIC=textwrap.dedent("""\ + """ ), + DISABLE_ENDING_COMMA_HEURISTIC = textwrap.dedent( + """\ Disable the heuristic which places each list element on a separate line - if the list is comma-terminated."""), - EACH_DICT_ENTRY_ON_SEPARATE_LINE=textwrap.dedent("""\ - Place each dictionary entry onto its own line."""), - FORCE_MULTILINE_DICT=textwrap.dedent("""\ + if the list is comma-terminated.""" ), + EACH_DICT_ENTRY_ON_SEPARATE_LINE = textwrap.dedent( + """\ + Place each dictionary entry onto its own line.""" ), + FORCE_MULTILINE_DICT = textwrap.dedent( + """\ Require multiline dictionary even if it would normally fit on one line. For example: config = { 'key1': 'value1' - }"""), - I18N_COMMENT=textwrap.dedent("""\ + }""" ), + I18N_COMMENT = textwrap.dedent( + """\ The regex for an i18n comment. The presence of this comment stops reformatting of that line, because the comments are required to be - next to the string they translate."""), - I18N_FUNCTION_CALL=textwrap.dedent("""\ + next to the string they translate.""" ), + I18N_FUNCTION_CALL = textwrap.dedent( + """\ The i18n function call names. The presence of this function stops reformattting on that line, because the string it has cannot be moved - away from the i18n comment."""), - INDENT_CLOSING_BRACKETS=textwrap.dedent("""\ + away from the i18n comment.""" ), + INDENT_CLOSING_BRACKETS = textwrap.dedent( + """\ Put closing brackets on a separate line, indented, if the bracketed expression can't fit in a single line. Applies to all kinds of brackets, including function definitions and calls. For example: @@ -209,8 +223,9 @@ def method(): start_ts=now()-timedelta(days=3), end_ts=now(), ) # <--- this bracket is indented and on a separate line - """), - INDENT_DICTIONARY_VALUE=textwrap.dedent("""\ + """ ), + INDENT_DICTIONARY_VALUE = textwrap.dedent( + """\ Indent the dictionary value if it cannot fit on the same line as the dictionary key. For example: @@ -220,14 +235,17 @@ def method(): 'key2': value1 + value2, } - """), - INDENT_WIDTH=textwrap.dedent("""\ - The number of columns to use for indentation."""), - INDENT_BLANK_LINES=textwrap.dedent("""\ - Indent blank lines."""), - JOIN_MULTIPLE_LINES=textwrap.dedent("""\ - Join short lines into one line. E.g., single line 'if' statements."""), - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=textwrap.dedent("""\ + """ ), + INDENT_WIDTH = textwrap.dedent( + """\ + The number of columns to use for indentation.""" ), + INDENT_BLANK_LINES = textwrap.dedent( """\ + Indent blank lines.""" ), + JOIN_MULTIPLE_LINES = textwrap.dedent( + """\ + Join short lines into one line. E.g., single line 'if' statements.""" ), + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = textwrap.dedent( + """\ Do not include spaces around selected binary operators. For example: 1 + 2 * 3 - 4 / 5 @@ -235,22 +253,27 @@ def method(): will be formatted as follows when configured with "*,/": 1 + 2*3 - 4/5 - """), - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=textwrap.dedent("""\ + """ ), + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = textwrap.dedent( + """\ Insert a space between the ending comma and closing bracket of a list, - etc."""), - SPACE_INSIDE_BRACKETS=textwrap.dedent("""\ + etc.""" ), + SPACE_INSIDE_BRACKETS = textwrap.dedent( + """\ Use spaces inside brackets, braces, and parentheses. For example: method_call( 1 ) my_dict[ 3 ][ 1 ][ get_index( *args, **kwargs ) ] my_set = { 1, 2, 3 } - """), - SPACES_AROUND_POWER_OPERATOR=textwrap.dedent("""\ - Use spaces around the power operator."""), - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=textwrap.dedent("""\ - Use spaces around default or named assigns."""), - SPACES_AROUND_DICT_DELIMITERS=textwrap.dedent("""\ + """ ), + SPACES_AROUND_POWER_OPERATOR = textwrap.dedent( + """\ + Use spaces around the power operator.""" ), + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN = textwrap.dedent( + """\ + Use spaces around default or named assigns.""" ), + SPACES_AROUND_DICT_DELIMITERS = textwrap.dedent( + """\ Adds a space after the opening '{' and before the ending '}' dict delimiters. @@ -259,8 +282,9 @@ def method(): will be formatted as: { 1: 2 } - """), - SPACES_AROUND_LIST_DELIMITERS=textwrap.dedent("""\ + """ ), + SPACES_AROUND_LIST_DELIMITERS = textwrap.dedent( + """\ Adds a space after the opening '[' and before the ending ']' list delimiters. @@ -269,13 +293,15 @@ def method(): will be formatted as: [ 1, 2 ] - """), - SPACES_AROUND_SUBSCRIPT_COLON=textwrap.dedent("""\ + """ ), + SPACES_AROUND_SUBSCRIPT_COLON = textwrap.dedent( + """\ Use spaces around the subscript / slice operator. For example: my_list[1 : 10 : 2] - """), - SPACES_AROUND_TUPLE_DELIMITERS=textwrap.dedent("""\ + """ ), + SPACES_AROUND_TUPLE_DELIMITERS = textwrap.dedent( + """\ Adds a space after the opening '(' and before the ending ')' tuple delimiters. @@ -284,8 +310,9 @@ def method(): will be formatted as: ( 1, 2, 3 ) - """), - SPACES_BEFORE_COMMENT=textwrap.dedent("""\ + """ ), + SPACES_BEFORE_COMMENT = textwrap.dedent( + """\ The number of spaces required before a trailing comment. This can be a single value (representing the number of spaces before each trailing comment) or list of values (representing @@ -326,33 +353,41 @@ def method(): a_very_long_statement_that_extends_beyond_the_final_column # Comment <-- the end of line comments are aligned based on the line length short # This is a shorter statement - """), # noqa - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=textwrap.dedent("""\ + """ ), # noqa + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = textwrap.dedent( + """\ Split before arguments if the argument list is terminated by a - comma."""), - SPLIT_ALL_COMMA_SEPARATED_VALUES=textwrap.dedent("""\ - Split before arguments"""), - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=textwrap.dedent("""\ + comma.""" ), + SPLIT_ALL_COMMA_SEPARATED_VALUES = textwrap.dedent( + """\ + Split before arguments""" ), + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES = textwrap.dedent( + """\ Split before arguments, but do not split all subexpressions recursively - (unless needed)."""), - SPLIT_BEFORE_ARITHMETIC_OPERATOR=textwrap.dedent("""\ + (unless needed).""" ), + SPLIT_BEFORE_ARITHMETIC_OPERATOR = textwrap.dedent( + """\ Set to True to prefer splitting before '+', '-', '*', '/', '//', or '@' - rather than after."""), - SPLIT_BEFORE_BITWISE_OPERATOR=textwrap.dedent("""\ + rather than after.""" ), + SPLIT_BEFORE_BITWISE_OPERATOR = textwrap.dedent( + """\ Set to True to prefer splitting before '&', '|' or '^' rather than - after."""), - SPLIT_BEFORE_CLOSING_BRACKET=textwrap.dedent("""\ + after.""" ), + SPLIT_BEFORE_CLOSING_BRACKET = textwrap.dedent( + """\ Split before the closing bracket if a list or dict literal doesn't fit on - a single line."""), - SPLIT_BEFORE_DICT_SET_GENERATOR=textwrap.dedent("""\ + a single line.""" ), + SPLIT_BEFORE_DICT_SET_GENERATOR = textwrap.dedent( + """\ Split before a dictionary or set generator (comp_for). For example, note the split before the 'for': foo = { variable: 'Hello world, have a nice day!' for variable in bar if variable != 42 - }"""), - SPLIT_BEFORE_DOT=textwrap.dedent("""\ + }""" ), + SPLIT_BEFORE_DOT = textwrap.dedent( + """\ Split before the '.' if we need to split a longer expression: foo = ('This is a really long string: {}, {}, {}, {}'.format(a, b, c, d)) @@ -361,20 +396,25 @@ def method(): foo = ('This is a really long string: {}, {}, {}, {}' .format(a, b, c, d)) - """), # noqa - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=textwrap.dedent("""\ + """ ), # noqa + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = textwrap.dedent( + """\ Split after the opening paren which surrounds an expression if it doesn't fit on a single line. - """), - SPLIT_BEFORE_FIRST_ARGUMENT=textwrap.dedent("""\ + """ ), + SPLIT_BEFORE_FIRST_ARGUMENT = textwrap.dedent( + """\ If an argument / parameter list is going to be split, then split before - the first argument."""), - SPLIT_BEFORE_LOGICAL_OPERATOR=textwrap.dedent("""\ + the first argument.""" ), + SPLIT_BEFORE_LOGICAL_OPERATOR = textwrap.dedent( + """\ Set to True to prefer splitting before 'and' or 'or' rather than - after."""), - SPLIT_BEFORE_NAMED_ASSIGNS=textwrap.dedent("""\ - Split named assignments onto individual lines."""), - SPLIT_COMPLEX_COMPREHENSION=textwrap.dedent("""\ + after.""" ), + SPLIT_BEFORE_NAMED_ASSIGNS = textwrap.dedent( + """\ + Split named assignments onto individual lines.""" ), + SPLIT_COMPLEX_COMPREHENSION = textwrap.dedent( + """\ Set to True to split list comprehensions and generators that have non-trivial expressions and multiple clauses before each of these clauses. For example: @@ -389,28 +429,37 @@ def method(): a_long_var + 100 for a_long_var in xrange(1000) if a_long_var % 10] - """), - SPLIT_PENALTY_AFTER_OPENING_BRACKET=textwrap.dedent("""\ - The penalty for splitting right after the opening bracket."""), - SPLIT_PENALTY_AFTER_UNARY_OPERATOR=textwrap.dedent("""\ - The penalty for splitting the line after a unary operator."""), - SPLIT_PENALTY_ARITHMETIC_OPERATOR=textwrap.dedent("""\ + """ ), + SPLIT_PENALTY_AFTER_OPENING_BRACKET = textwrap.dedent( + """\ + The penalty for splitting right after the opening bracket.""" ), + SPLIT_PENALTY_AFTER_UNARY_OPERATOR = textwrap.dedent( + """\ + The penalty for splitting the line after a unary operator.""" ), + SPLIT_PENALTY_ARITHMETIC_OPERATOR = textwrap.dedent( + """\ The penalty of splitting the line around the '+', '-', '*', '/', '//', - ``%``, and '@' operators."""), - SPLIT_PENALTY_BEFORE_IF_EXPR=textwrap.dedent("""\ - The penalty for splitting right before an if expression."""), - SPLIT_PENALTY_BITWISE_OPERATOR=textwrap.dedent("""\ + ``%``, and '@' operators.""" ), + SPLIT_PENALTY_BEFORE_IF_EXPR = textwrap.dedent( + """\ + The penalty for splitting right before an if expression.""" ), + SPLIT_PENALTY_BITWISE_OPERATOR = textwrap.dedent( + """\ The penalty of splitting the line around the '&', '|', and '^' - operators."""), - SPLIT_PENALTY_COMPREHENSION=textwrap.dedent("""\ + operators.""" ), + SPLIT_PENALTY_COMPREHENSION = textwrap.dedent( + """\ The penalty for splitting a list comprehension or generator - expression."""), - SPLIT_PENALTY_EXCESS_CHARACTER=textwrap.dedent("""\ - The penalty for characters over the column limit."""), - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=textwrap.dedent("""\ + expression.""" ), + SPLIT_PENALTY_EXCESS_CHARACTER = textwrap.dedent( + """\ + The penalty for characters over the column limit.""" ), + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = textwrap.dedent( + """\ The penalty incurred by adding a line split to the logical line. The - more line splits added the higher the penalty."""), - SPLIT_PENALTY_IMPORT_NAMES=textwrap.dedent("""\ + more line splits added the higher the penalty.""" ), + SPLIT_PENALTY_IMPORT_NAMES = textwrap.dedent( + """\ The penalty of splitting a list of "import as" names. For example: from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, @@ -421,201 +470,200 @@ def method(): from a_very_long_or_indented_module_name_yada_yad import ( long_argument_1, long_argument_2, long_argument_3) - """), # noqa - SPLIT_PENALTY_LOGICAL_OPERATOR=textwrap.dedent("""\ + """ ), # noqa + SPLIT_PENALTY_LOGICAL_OPERATOR = textwrap.dedent( + """\ The penalty of splitting the line around the 'and' and 'or' - operators."""), - USE_TABS=textwrap.dedent("""\ - Use the Tab character for indentation."""), - # BASED_ON_STYLE='Which predefined style this style is based on', + operators.""" ), + USE_TABS = textwrap.dedent( """\ + Use the Tab character for indentation.""" ), + # BASED_ON_STYLE='Which predefined style this style is based on', ) def CreatePEP8Style(): - """Create the PEP8 formatting style.""" - return dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True, - ALIGN_ASSIGNMENT=False, - ALIGN_ARGUMENT_ASSIGNMENT=False, - ALIGN_DICT_COLON=False, - NEW_ALIGNMENT_AFTER_COMMENTLINE=False, - ALLOW_MULTILINE_LAMBDAS=False, - ALLOW_MULTILINE_DICTIONARY_KEYS=False, - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=True, - ALLOW_SPLIT_BEFORE_DICT_VALUE=True, - ARITHMETIC_PRECEDENCE_INDICATION=False, - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=True, - BLANK_LINE_BEFORE_CLASS_DOCSTRING=False, - BLANK_LINE_BEFORE_MODULE_DOCSTRING=False, - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=2, - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=1, - COALESCE_BRACKETS=False, - COLUMN_LIMIT=79, - CONTINUATION_ALIGN_STYLE='SPACE', - CONTINUATION_INDENT_WIDTH=4, - DEDENT_CLOSING_BRACKETS=False, - INDENT_CLOSING_BRACKETS=False, - DISABLE_ENDING_COMMA_HEURISTIC=False, - EACH_DICT_ENTRY_ON_SEPARATE_LINE=True, - FORCE_MULTILINE_DICT=False, - I18N_COMMENT='', - I18N_FUNCTION_CALL='', - INDENT_DICTIONARY_VALUE=False, - INDENT_WIDTH=4, - INDENT_BLANK_LINES=False, - JOIN_MULTIPLE_LINES=True, - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=set(), - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=True, - SPACE_INSIDE_BRACKETS=False, - SPACES_AROUND_POWER_OPERATOR=False, - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=False, - SPACES_AROUND_DICT_DELIMITERS=False, - SPACES_AROUND_LIST_DELIMITERS=False, - SPACES_AROUND_SUBSCRIPT_COLON=False, - SPACES_AROUND_TUPLE_DELIMITERS=False, - SPACES_BEFORE_COMMENT=2, - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=False, - SPLIT_ALL_COMMA_SEPARATED_VALUES=False, - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=False, - SPLIT_BEFORE_ARITHMETIC_OPERATOR=False, - SPLIT_BEFORE_BITWISE_OPERATOR=True, - SPLIT_BEFORE_CLOSING_BRACKET=True, - SPLIT_BEFORE_DICT_SET_GENERATOR=True, - SPLIT_BEFORE_DOT=False, - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=False, - SPLIT_BEFORE_FIRST_ARGUMENT=False, - SPLIT_BEFORE_LOGICAL_OPERATOR=True, - SPLIT_BEFORE_NAMED_ASSIGNS=True, - SPLIT_COMPLEX_COMPREHENSION=False, - SPLIT_PENALTY_AFTER_OPENING_BRACKET=300, - SPLIT_PENALTY_AFTER_UNARY_OPERATOR=10000, - SPLIT_PENALTY_ARITHMETIC_OPERATOR=300, - SPLIT_PENALTY_BEFORE_IF_EXPR=0, - SPLIT_PENALTY_BITWISE_OPERATOR=300, - SPLIT_PENALTY_COMPREHENSION=80, - SPLIT_PENALTY_EXCESS_CHARACTER=7000, - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=30, - SPLIT_PENALTY_IMPORT_NAMES=0, - SPLIT_PENALTY_LOGICAL_OPERATOR=300, - USE_TABS=False, - ) + """Create the PEP8 formatting style.""" + return dict( + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = True, + ALIGN_ASSIGNMENT = False, + NEW_ALIGNMENT_AFTER_COMMENTLINE = False, + ALLOW_MULTILINE_LAMBDAS = False, + ALLOW_MULTILINE_DICTIONARY_KEYS = False, + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS = True, + ALLOW_SPLIT_BEFORE_DICT_VALUE = True, + ARITHMETIC_PRECEDENCE_INDICATION = False, + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = True, + BLANK_LINE_BEFORE_CLASS_DOCSTRING = False, + BLANK_LINE_BEFORE_MODULE_DOCSTRING = False, + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = 2, + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES = 1, + COALESCE_BRACKETS = False, + COLUMN_LIMIT = 79, + CONTINUATION_ALIGN_STYLE = 'SPACE', + CONTINUATION_INDENT_WIDTH = 4, + DEDENT_CLOSING_BRACKETS = False, + INDENT_CLOSING_BRACKETS = False, + DISABLE_ENDING_COMMA_HEURISTIC = False, + EACH_DICT_ENTRY_ON_SEPARATE_LINE = True, + FORCE_MULTILINE_DICT = False, + I18N_COMMENT = '', + I18N_FUNCTION_CALL = '', + INDENT_DICTIONARY_VALUE = False, + INDENT_WIDTH = 4, + INDENT_BLANK_LINES = False, + JOIN_MULTIPLE_LINES = True, + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = set(), + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True, + SPACE_INSIDE_BRACKETS = False, + SPACES_AROUND_POWER_OPERATOR = False, + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN = False, + SPACES_AROUND_DICT_DELIMITERS = False, + SPACES_AROUND_LIST_DELIMITERS = False, + SPACES_AROUND_SUBSCRIPT_COLON = False, + SPACES_AROUND_TUPLE_DELIMITERS = False, + SPACES_BEFORE_COMMENT = 2, + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = False, + SPLIT_ALL_COMMA_SEPARATED_VALUES = False, + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES = False, + SPLIT_BEFORE_ARITHMETIC_OPERATOR = False, + SPLIT_BEFORE_BITWISE_OPERATOR = True, + SPLIT_BEFORE_CLOSING_BRACKET = True, + SPLIT_BEFORE_DICT_SET_GENERATOR = True, + SPLIT_BEFORE_DOT = False, + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = False, + SPLIT_BEFORE_FIRST_ARGUMENT = False, + SPLIT_BEFORE_LOGICAL_OPERATOR = True, + SPLIT_BEFORE_NAMED_ASSIGNS = True, + SPLIT_COMPLEX_COMPREHENSION = False, + SPLIT_PENALTY_AFTER_OPENING_BRACKET = 300, + SPLIT_PENALTY_AFTER_UNARY_OPERATOR = 10000, + SPLIT_PENALTY_ARITHMETIC_OPERATOR = 300, + SPLIT_PENALTY_BEFORE_IF_EXPR = 0, + SPLIT_PENALTY_BITWISE_OPERATOR = 300, + SPLIT_PENALTY_COMPREHENSION = 80, + SPLIT_PENALTY_EXCESS_CHARACTER = 7000, + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 30, + SPLIT_PENALTY_IMPORT_NAMES = 0, + SPLIT_PENALTY_LOGICAL_OPERATOR = 300, + USE_TABS = False, + ) def CreateGoogleStyle(): - """Create the Google formatting style.""" - style = CreatePEP8Style() - style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False - style['COLUMN_LIMIT'] = 80 - style['INDENT_DICTIONARY_VALUE'] = True - style['INDENT_WIDTH'] = 4 - style['I18N_COMMENT'] = r'#\..*' - style['I18N_FUNCTION_CALL'] = ['N_', '_'] - style['JOIN_MULTIPLE_LINES'] = False - style['SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET'] = False - style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False - style['SPLIT_BEFORE_DICT_SET_GENERATOR'] = False - style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False - style['SPLIT_COMPLEX_COMPREHENSION'] = True - style['SPLIT_PENALTY_COMPREHENSION'] = 2100 - return style + """Create the Google formatting style.""" + style = CreatePEP8Style() + style[ 'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT' ] = False + style[ 'COLUMN_LIMIT' ] = 80 + style[ 'INDENT_DICTIONARY_VALUE' ] = True + style[ 'INDENT_WIDTH' ] = 4 + style[ 'I18N_COMMENT' ] = r'#\..*' + style[ 'I18N_FUNCTION_CALL' ] = [ 'N_', '_' ] + style[ 'JOIN_MULTIPLE_LINES' ] = False + style[ 'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET' ] = False + style[ 'SPLIT_BEFORE_BITWISE_OPERATOR' ] = False + style[ 'SPLIT_BEFORE_DICT_SET_GENERATOR' ] = False + style[ 'SPLIT_BEFORE_LOGICAL_OPERATOR' ] = False + style[ 'SPLIT_COMPLEX_COMPREHENSION' ] = True + style[ 'SPLIT_PENALTY_COMPREHENSION' ] = 2100 + return style def CreateYapfStyle(): - """Create the YAPF formatting style.""" - style = CreateGoogleStyle() - style['ALLOW_MULTILINE_DICTIONARY_KEYS'] = True - style['ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS'] = False - style['INDENT_WIDTH'] = 2 - style['SPLIT_BEFORE_BITWISE_OPERATOR'] = True - style['SPLIT_BEFORE_DOT'] = True - style['SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN'] = True - return style + """Create the YAPF formatting style.""" + style = CreateGoogleStyle() + style[ 'ALLOW_MULTILINE_DICTIONARY_KEYS' ] = True + style[ 'ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS' ] = False + style[ 'INDENT_WIDTH' ] = 2 + style[ 'SPLIT_BEFORE_BITWISE_OPERATOR' ] = True + style[ 'SPLIT_BEFORE_DOT' ] = True + style[ 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ] = True + return style def CreateFacebookStyle(): - """Create the Facebook formatting style.""" - style = CreatePEP8Style() - style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False - style['BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'] = False - style['COLUMN_LIMIT'] = 80 - style['DEDENT_CLOSING_BRACKETS'] = True - style['INDENT_CLOSING_BRACKETS'] = False - style['INDENT_DICTIONARY_VALUE'] = True - style['JOIN_MULTIPLE_LINES'] = False - style['SPACES_BEFORE_COMMENT'] = 2 - style['SPLIT_PENALTY_AFTER_OPENING_BRACKET'] = 0 - style['SPLIT_PENALTY_BEFORE_IF_EXPR'] = 30 - style['SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT'] = 30 - style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False - style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False - return style + """Create the Facebook formatting style.""" + style = CreatePEP8Style() + style[ 'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT' ] = False + style[ 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF' ] = False + style[ 'COLUMN_LIMIT' ] = 80 + style[ 'DEDENT_CLOSING_BRACKETS' ] = True + style[ 'INDENT_CLOSING_BRACKETS' ] = False + style[ 'INDENT_DICTIONARY_VALUE' ] = True + style[ 'JOIN_MULTIPLE_LINES' ] = False + style[ 'SPACES_BEFORE_COMMENT' ] = 2 + style[ 'SPLIT_PENALTY_AFTER_OPENING_BRACKET' ] = 0 + style[ 'SPLIT_PENALTY_BEFORE_IF_EXPR' ] = 30 + style[ 'SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT' ] = 30 + style[ 'SPLIT_BEFORE_LOGICAL_OPERATOR' ] = False + style[ 'SPLIT_BEFORE_BITWISE_OPERATOR' ] = False + return style _STYLE_NAME_TO_FACTORY = dict( - pep8=CreatePEP8Style, - google=CreateGoogleStyle, - facebook=CreateFacebookStyle, - yapf=CreateYapfStyle, + pep8 = CreatePEP8Style, + google = CreateGoogleStyle, + facebook = CreateFacebookStyle, + yapf = CreateYapfStyle, ) _DEFAULT_STYLE_TO_FACTORY = [ - (CreateFacebookStyle(), CreateFacebookStyle), - (CreateGoogleStyle(), CreateGoogleStyle), - (CreatePEP8Style(), CreatePEP8Style), - (CreateYapfStyle(), CreateYapfStyle), + ( CreateFacebookStyle(), CreateFacebookStyle ), + ( CreateGoogleStyle(), CreateGoogleStyle ), + ( CreatePEP8Style(), CreatePEP8Style ), + ( CreateYapfStyle(), CreateYapfStyle ), ] -def _GetStyleFactory(style): - for def_style, factory in _DEFAULT_STYLE_TO_FACTORY: - if style == def_style: - return factory - return None +def _GetStyleFactory( style ): + for def_style, factory in _DEFAULT_STYLE_TO_FACTORY: + if style == def_style: + return factory + return None -def _ContinuationAlignStyleStringConverter(s): - """Option value converter for a continuation align style string.""" - accepted_styles = ('SPACE', 'FIXED', 'VALIGN-RIGHT') - if s: - r = s.strip('"\'').replace('_', '-').upper() - if r not in accepted_styles: - raise ValueError('unknown continuation align style: %r' % (s,)) - else: - r = accepted_styles[0] - return r +def _ContinuationAlignStyleStringConverter( s ): + """Option value converter for a continuation align style string.""" + accepted_styles = ( 'SPACE', 'FIXED', 'VALIGN-RIGHT' ) + if s: + r = s.strip( '"\'' ).replace( '_', '-' ).upper() + if r not in accepted_styles: + raise ValueError( 'unknown continuation align style: %r' % ( s,) ) + else: + r = accepted_styles[ 0 ] + return r -def _StringListConverter(s): - """Option value converter for a comma-separated list of strings.""" - return [part.strip() for part in s.split(',')] +def _StringListConverter( s ): + """Option value converter for a comma-separated list of strings.""" + return [ part.strip() for part in s.split( ',' ) ] -def _StringSetConverter(s): - """Option value converter for a comma-separated set of strings.""" - if len(s) > 2 and s[0] in '"\'': - s = s[1:-1] - return {part.strip() for part in s.split(',')} +def _StringSetConverter( s ): + """Option value converter for a comma-separated set of strings.""" + if len( s ) > 2 and s[ 0 ] in '"\'': + s = s[ 1 :-1 ] + return { part.strip() for part in s.split( ',' ) } -def _BoolConverter(s): - """Option value converter for a boolean.""" - return py3compat.CONFIGPARSER_BOOLEAN_STATES[s.lower()] +def _BoolConverter( s ): + """Option value converter for a boolean.""" + return py3compat.CONFIGPARSER_BOOLEAN_STATES[ s.lower() ] -def _IntListConverter(s): - """Option value converter for a comma-separated list of integers.""" - s = s.strip() - if s.startswith('[') and s.endswith(']'): - s = s[1:-1] +def _IntListConverter( s ): + """Option value converter for a comma-separated list of integers.""" + s = s.strip() + if s.startswith( '[' ) and s.endswith( ']' ): + s = s[ 1 :-1 ] - return [int(part.strip()) for part in s.split(',') if part.strip()] + return [ int( part.strip() ) for part in s.split( ',' ) if part.strip() ] -def _IntOrIntListConverter(s): - """Option value converter for an integer or list of integers.""" - if len(s) > 2 and s[0] in '"\'': - s = s[1:-1] - return _IntListConverter(s) if ',' in s else int(s) +def _IntOrIntListConverter( s ): + """Option value converter for an integer or list of integers.""" + if len( s ) > 2 and s[ 0 ] in '"\'': + s = s[ 1 :-1 ] + return _IntListConverter( s ) if ',' in s else int( s ) # Different style options need to have their values interpreted differently when @@ -626,75 +674,73 @@ def _IntOrIntListConverter(s): # # Note: this dict has to map all the supported style options. _STYLE_OPTION_VALUE_CONVERTER = dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=_BoolConverter, - ALIGN_ASSIGNMENT=_BoolConverter, - ALIGN_DICT_COLON=_BoolConverter, - NEW_ALIGNMENT_AFTER_COMMENTLINE=_BoolConverter, - ALIGN_ARGUMENT_ASSIGNMENT=_BoolConverter, - ALLOW_MULTILINE_LAMBDAS=_BoolConverter, - ALLOW_MULTILINE_DICTIONARY_KEYS=_BoolConverter, - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=_BoolConverter, - ALLOW_SPLIT_BEFORE_DICT_VALUE=_BoolConverter, - ARITHMETIC_PRECEDENCE_INDICATION=_BoolConverter, - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=_BoolConverter, - BLANK_LINE_BEFORE_CLASS_DOCSTRING=_BoolConverter, - BLANK_LINE_BEFORE_MODULE_DOCSTRING=_BoolConverter, - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=int, - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=int, - COALESCE_BRACKETS=_BoolConverter, - COLUMN_LIMIT=int, - CONTINUATION_ALIGN_STYLE=_ContinuationAlignStyleStringConverter, - CONTINUATION_INDENT_WIDTH=int, - DEDENT_CLOSING_BRACKETS=_BoolConverter, - INDENT_CLOSING_BRACKETS=_BoolConverter, - DISABLE_ENDING_COMMA_HEURISTIC=_BoolConverter, - EACH_DICT_ENTRY_ON_SEPARATE_LINE=_BoolConverter, - FORCE_MULTILINE_DICT=_BoolConverter, - I18N_COMMENT=str, - I18N_FUNCTION_CALL=_StringListConverter, - INDENT_DICTIONARY_VALUE=_BoolConverter, - INDENT_WIDTH=int, - INDENT_BLANK_LINES=_BoolConverter, - JOIN_MULTIPLE_LINES=_BoolConverter, - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=_StringSetConverter, - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=_BoolConverter, - SPACE_INSIDE_BRACKETS=_BoolConverter, - SPACES_AROUND_POWER_OPERATOR=_BoolConverter, - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=_BoolConverter, - SPACES_AROUND_DICT_DELIMITERS=_BoolConverter, - SPACES_AROUND_LIST_DELIMITERS=_BoolConverter, - SPACES_AROUND_SUBSCRIPT_COLON=_BoolConverter, - SPACES_AROUND_TUPLE_DELIMITERS=_BoolConverter, - SPACES_BEFORE_COMMENT=_IntOrIntListConverter, - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=_BoolConverter, - SPLIT_ALL_COMMA_SEPARATED_VALUES=_BoolConverter, - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=_BoolConverter, - SPLIT_BEFORE_ARITHMETIC_OPERATOR=_BoolConverter, - SPLIT_BEFORE_BITWISE_OPERATOR=_BoolConverter, - SPLIT_BEFORE_CLOSING_BRACKET=_BoolConverter, - SPLIT_BEFORE_DICT_SET_GENERATOR=_BoolConverter, - SPLIT_BEFORE_DOT=_BoolConverter, - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=_BoolConverter, - SPLIT_BEFORE_FIRST_ARGUMENT=_BoolConverter, - SPLIT_BEFORE_LOGICAL_OPERATOR=_BoolConverter, - SPLIT_BEFORE_NAMED_ASSIGNS=_BoolConverter, - SPLIT_COMPLEX_COMPREHENSION=_BoolConverter, - SPLIT_PENALTY_AFTER_OPENING_BRACKET=int, - SPLIT_PENALTY_AFTER_UNARY_OPERATOR=int, - SPLIT_PENALTY_ARITHMETIC_OPERATOR=int, - SPLIT_PENALTY_BEFORE_IF_EXPR=int, - SPLIT_PENALTY_BITWISE_OPERATOR=int, - SPLIT_PENALTY_COMPREHENSION=int, - SPLIT_PENALTY_EXCESS_CHARACTER=int, - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=int, - SPLIT_PENALTY_IMPORT_NAMES=int, - SPLIT_PENALTY_LOGICAL_OPERATOR=int, - USE_TABS=_BoolConverter, + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = _BoolConverter, + ALIGN_ASSIGNMENT = _BoolConverter, + NEW_ALIGNMENT_AFTER_COMMENTLINE = _BoolConverter, + ALLOW_MULTILINE_LAMBDAS = _BoolConverter, + ALLOW_MULTILINE_DICTIONARY_KEYS = _BoolConverter, + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS = _BoolConverter, + ALLOW_SPLIT_BEFORE_DICT_VALUE = _BoolConverter, + ARITHMETIC_PRECEDENCE_INDICATION = _BoolConverter, + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = _BoolConverter, + BLANK_LINE_BEFORE_CLASS_DOCSTRING = _BoolConverter, + BLANK_LINE_BEFORE_MODULE_DOCSTRING = _BoolConverter, + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = int, + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES = int, + COALESCE_BRACKETS = _BoolConverter, + COLUMN_LIMIT = int, + CONTINUATION_ALIGN_STYLE = _ContinuationAlignStyleStringConverter, + CONTINUATION_INDENT_WIDTH = int, + DEDENT_CLOSING_BRACKETS = _BoolConverter, + INDENT_CLOSING_BRACKETS = _BoolConverter, + DISABLE_ENDING_COMMA_HEURISTIC = _BoolConverter, + EACH_DICT_ENTRY_ON_SEPARATE_LINE = _BoolConverter, + FORCE_MULTILINE_DICT = _BoolConverter, + I18N_COMMENT = str, + I18N_FUNCTION_CALL = _StringListConverter, + INDENT_DICTIONARY_VALUE = _BoolConverter, + INDENT_WIDTH = int, + INDENT_BLANK_LINES = _BoolConverter, + JOIN_MULTIPLE_LINES = _BoolConverter, + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = _StringSetConverter, + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = _BoolConverter, + SPACE_INSIDE_BRACKETS = _BoolConverter, + SPACES_AROUND_POWER_OPERATOR = _BoolConverter, + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN = _BoolConverter, + SPACES_AROUND_DICT_DELIMITERS = _BoolConverter, + SPACES_AROUND_LIST_DELIMITERS = _BoolConverter, + SPACES_AROUND_SUBSCRIPT_COLON = _BoolConverter, + SPACES_AROUND_TUPLE_DELIMITERS = _BoolConverter, + SPACES_BEFORE_COMMENT = _IntOrIntListConverter, + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = _BoolConverter, + SPLIT_ALL_COMMA_SEPARATED_VALUES = _BoolConverter, + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES = _BoolConverter, + SPLIT_BEFORE_ARITHMETIC_OPERATOR = _BoolConverter, + SPLIT_BEFORE_BITWISE_OPERATOR = _BoolConverter, + SPLIT_BEFORE_CLOSING_BRACKET = _BoolConverter, + SPLIT_BEFORE_DICT_SET_GENERATOR = _BoolConverter, + SPLIT_BEFORE_DOT = _BoolConverter, + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = _BoolConverter, + SPLIT_BEFORE_FIRST_ARGUMENT = _BoolConverter, + SPLIT_BEFORE_LOGICAL_OPERATOR = _BoolConverter, + SPLIT_BEFORE_NAMED_ASSIGNS = _BoolConverter, + SPLIT_COMPLEX_COMPREHENSION = _BoolConverter, + SPLIT_PENALTY_AFTER_OPENING_BRACKET = int, + SPLIT_PENALTY_AFTER_UNARY_OPERATOR = int, + SPLIT_PENALTY_ARITHMETIC_OPERATOR = int, + SPLIT_PENALTY_BEFORE_IF_EXPR = int, + SPLIT_PENALTY_BITWISE_OPERATOR = int, + SPLIT_PENALTY_COMPREHENSION = int, + SPLIT_PENALTY_EXCESS_CHARACTER = int, + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = int, + SPLIT_PENALTY_IMPORT_NAMES = int, + SPLIT_PENALTY_LOGICAL_OPERATOR = int, + USE_TABS = _BoolConverter, ) -def CreateStyleFromConfig(style_config): - """Create a style dict from the given config. +def CreateStyleFromConfig( style_config ): + """Create a style dict from the given config. Arguments: style_config: either a style name or a file name. The file is expected to @@ -710,107 +756,108 @@ def CreateStyleFromConfig(style_config): StyleConfigError: if an unknown style option was encountered. """ - def GlobalStyles(): - for style, _ in _DEFAULT_STYLE_TO_FACTORY: - yield style - - def_style = False - if style_config is None: - for style in GlobalStyles(): - if _style == style: - def_style = True - break - if not def_style: - return _style - return _GLOBAL_STYLE_FACTORY() - - if isinstance(style_config, dict): - config = _CreateConfigParserFromConfigDict(style_config) - elif isinstance(style_config, py3compat.basestring): - style_factory = _STYLE_NAME_TO_FACTORY.get(style_config.lower()) - if style_factory is not None: - return style_factory() - if style_config.startswith('{'): - # Most likely a style specification from the command line. - config = _CreateConfigParserFromConfigString(style_config) - else: - # Unknown config name: assume it's a file name then. - config = _CreateConfigParserFromConfigFile(style_config) - return _CreateStyleFromConfigParser(config) - - -def _CreateConfigParserFromConfigDict(config_dict): - config = py3compat.ConfigParser() - config.add_section('style') - for key, value in config_dict.items(): - config.set('style', key, str(value)) - return config - - -def _CreateConfigParserFromConfigString(config_string): - """Given a config string from the command line, return a config parser.""" - if config_string[0] != '{' or config_string[-1] != '}': - raise StyleConfigError( - "Invalid style dict syntax: '{}'.".format(config_string)) - config = py3compat.ConfigParser() - config.add_section('style') - for key, value, _ in re.findall( - r'([a-zA-Z0-9_]+)\s*[:=]\s*' - r'(?:' - r'((?P[\'"]).*?(?P=quote)|' - r'[a-zA-Z0-9_]+)' - r')', config_string): # yapf: disable - config.set('style', key, value) - return config - - -def _CreateConfigParserFromConfigFile(config_filename): - """Read the file and return a ConfigParser object.""" - if not os.path.exists(config_filename): - # Provide a more meaningful error here. - raise StyleConfigError( - '"{0}" is not a valid style or file path'.format(config_filename)) - with open(config_filename) as style_file: + def GlobalStyles(): + for style, _ in _DEFAULT_STYLE_TO_FACTORY: + yield style + + def_style = False + if style_config is None: + for style in GlobalStyles(): + if _style == style: + def_style = True + break + if not def_style: + return _style + return _GLOBAL_STYLE_FACTORY() + + if isinstance( style_config, dict ): + config = _CreateConfigParserFromConfigDict( style_config ) + elif isinstance( style_config, py3compat.basestring ): + style_factory = _STYLE_NAME_TO_FACTORY.get( style_config.lower() ) + if style_factory is not None: + return style_factory() + if style_config.startswith( '{' ): + # Most likely a style specification from the command line. + config = _CreateConfigParserFromConfigString( style_config ) + else: + # Unknown config name: assume it's a file name then. + config = _CreateConfigParserFromConfigFile( style_config ) + return _CreateStyleFromConfigParser( config ) + + +def _CreateConfigParserFromConfigDict( config_dict ): config = py3compat.ConfigParser() - if config_filename.endswith(PYPROJECT_TOML): - try: - import toml - except ImportError: - raise errors.YapfError( - "toml package is needed for using pyproject.toml as a " - "configuration file") - - pyproject_toml = toml.load(style_file) - style_dict = pyproject_toml.get("tool", {}).get("yapf", None) - if style_dict is None: - raise StyleConfigError( - 'Unable to find section [tool.yapf] in {0}'.format(config_filename)) - config.add_section('style') - for k, v in style_dict.items(): - config.set('style', k, str(v)) - return config - - config.read_file(style_file) - if config_filename.endswith(SETUP_CONFIG): - if not config.has_section('yapf'): - raise StyleConfigError( - 'Unable to find section [yapf] in {0}'.format(config_filename)) - return config + config.add_section( 'style' ) + for key, value in config_dict.items(): + config.set( 'style', key, str( value ) ) + return config - if config_filename.endswith(LOCAL_STYLE): - if not config.has_section('style'): - raise StyleConfigError( - 'Unable to find section [style] in {0}'.format(config_filename)) - return config - if not config.has_section('style'): - raise StyleConfigError( - 'Unable to find section [style] in {0}'.format(config_filename)) +def _CreateConfigParserFromConfigString( config_string ): + """Given a config string from the command line, return a config parser.""" + if config_string[ 0 ] != '{' or config_string[ -1 ] != '}': + raise StyleConfigError( + "Invalid style dict syntax: '{}'.".format( config_string ) ) + config = py3compat.ConfigParser() + config.add_section( 'style' ) + for key, value, _ in re.findall( + r'([a-zA-Z0-9_]+)\s*[:=]\s*' + r'(?:' + r'((?P[\'"]).*?(?P=quote)|' + r'[a-zA-Z0-9_]+)' + r')', config_string): # yapf: disable + config.set( 'style', key, value ) return config -def _CreateStyleFromConfigParser(config): - """Create a style dict from a configuration file. +def _CreateConfigParserFromConfigFile( config_filename ): + """Read the file and return a ConfigParser object.""" + if not os.path.exists( config_filename ): + # Provide a more meaningful error here. + raise StyleConfigError( + '"{0}" is not a valid style or file path'.format( config_filename ) ) + with open( config_filename ) as style_file: + config = py3compat.ConfigParser() + if config_filename.endswith( PYPROJECT_TOML ): + try: + import toml + except ImportError: + raise errors.YapfError( + "toml package is needed for using pyproject.toml as a " + "configuration file" ) + + pyproject_toml = toml.load( style_file ) + style_dict = pyproject_toml.get( "tool", {} ).get( "yapf", None ) + if style_dict is None: + raise StyleConfigError( + 'Unable to find section [tool.yapf] in {0}'.format( + config_filename ) ) + config.add_section( 'style' ) + for k, v in style_dict.items(): + config.set( 'style', k, str( v ) ) + return config + + config.read_file( style_file ) + if config_filename.endswith( SETUP_CONFIG ): + if not config.has_section( 'yapf' ): + raise StyleConfigError( + 'Unable to find section [yapf] in {0}'.format( config_filename ) ) + return config + + if config_filename.endswith( LOCAL_STYLE ): + if not config.has_section( 'style' ): + raise StyleConfigError( + 'Unable to find section [style] in {0}'.format( config_filename ) ) + return config + + if not config.has_section( 'style' ): + raise StyleConfigError( + 'Unable to find section [style] in {0}'.format( config_filename ) ) + return config + + +def _CreateStyleFromConfigParser( config ): + """Create a style dict from a configuration file. Arguments: config: a ConfigParser object. @@ -821,45 +868,45 @@ def _CreateStyleFromConfigParser(config): Raises: StyleConfigError: if an unknown style option was encountered. """ - # Initialize the base style. - section = 'yapf' if config.has_section('yapf') else 'style' - if config.has_option('style', 'based_on_style'): - based_on = config.get('style', 'based_on_style').lower() - base_style = _STYLE_NAME_TO_FACTORY[based_on]() - elif config.has_option('yapf', 'based_on_style'): - based_on = config.get('yapf', 'based_on_style').lower() - base_style = _STYLE_NAME_TO_FACTORY[based_on]() - else: - base_style = _GLOBAL_STYLE_FACTORY() - - # Read all options specified in the file and update the style. - for option, value in config.items(section): - if option.lower() == 'based_on_style': - # Now skip this one - we've already handled it and it's not one of the - # recognized style options. - continue - option = option.upper() - if option not in _STYLE_OPTION_VALUE_CONVERTER: - raise StyleConfigError('Unknown style option "{0}"'.format(option)) - try: - base_style[option] = _STYLE_OPTION_VALUE_CONVERTER[option](value) - except ValueError: - raise StyleConfigError("'{}' is not a valid setting for {}.".format( - value, option)) - return base_style + # Initialize the base style. + section = 'yapf' if config.has_section( 'yapf' ) else 'style' + if config.has_option( 'style', 'based_on_style' ): + based_on = config.get( 'style', 'based_on_style' ).lower() + base_style = _STYLE_NAME_TO_FACTORY[ based_on ]() + elif config.has_option( 'yapf', 'based_on_style' ): + based_on = config.get( 'yapf', 'based_on_style' ).lower() + base_style = _STYLE_NAME_TO_FACTORY[ based_on ]() + else: + base_style = _GLOBAL_STYLE_FACTORY() + + # Read all options specified in the file and update the style. + for option, value in config.items( section ): + if option.lower() == 'based_on_style': + # Now skip this one - we've already handled it and it's not one of the + # recognized style options. + continue + option = option.upper() + if option not in _STYLE_OPTION_VALUE_CONVERTER: + raise StyleConfigError( 'Unknown style option "{0}"'.format( option ) ) + try: + base_style[ option ] = _STYLE_OPTION_VALUE_CONVERTER[ option ]( value ) + except ValueError: + raise StyleConfigError( + "'{}' is not a valid setting for {}.".format( value, option ) ) + return base_style # The default style - used if yapf is not invoked without specifically # requesting a formatting style. -DEFAULT_STYLE = 'pep8' +DEFAULT_STYLE = 'pep8' DEFAULT_STYLE_FACTORY = CreatePEP8Style _GLOBAL_STYLE_FACTORY = CreatePEP8Style # The name of the file to use for global style definition. GLOBAL_STYLE = ( os.path.join( - os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'yapf', - 'style')) + os.getenv( 'XDG_CONFIG_HOME' ) or os.path.expanduser( '~/.config' ), 'yapf', + 'style' ) ) # The name of the file to use for directory-local style definition. LOCAL_STYLE = '.style.yapf' @@ -876,4 +923,4 @@ def _CreateStyleFromConfigParser(config): # Refactor this so that the style is passed around through yapf rather than # being global. _style = None -SetGlobalStyle(_GLOBAL_STYLE_FACTORY()) +SetGlobalStyle( _GLOBAL_STYLE_FACTORY() ) diff --git a/yapf/yapflib/subtypes.py b/yapf/yapflib/subtypes.py index b4b7efe75..e675c41c1 100644 --- a/yapf/yapflib/subtypes.py +++ b/yapf/yapflib/subtypes.py @@ -13,28 +13,28 @@ # limitations under the License. """Token subtypes used to improve formatting.""" -NONE = 0 -UNARY_OPERATOR = 1 -BINARY_OPERATOR = 2 -SUBSCRIPT_COLON = 3 -SUBSCRIPT_BRACKET = 4 -DEFAULT_OR_NAMED_ASSIGN = 5 +NONE = 0 +UNARY_OPERATOR = 1 +BINARY_OPERATOR = 2 +SUBSCRIPT_COLON = 3 +SUBSCRIPT_BRACKET = 4 +DEFAULT_OR_NAMED_ASSIGN = 5 DEFAULT_OR_NAMED_ASSIGN_ARG_LIST = 6 -VARARGS_LIST = 7 -VARARGS_STAR = 8 -KWARGS_STAR_STAR = 9 -ASSIGN_OPERATOR = 10 -DICTIONARY_KEY = 11 -DICTIONARY_KEY_PART = 12 -DICTIONARY_VALUE = 13 -DICT_SET_GENERATOR = 14 -COMP_EXPR = 15 -COMP_FOR = 16 -COMP_IF = 17 -FUNC_DEF = 18 -DECORATOR = 19 -TYPED_NAME = 20 -TYPED_NAME_ARG_LIST = 21 -SIMPLE_EXPRESSION = 22 -PARAMETER_START = 23 -PARAMETER_STOP = 24 +VARARGS_LIST = 7 +VARARGS_STAR = 8 +KWARGS_STAR_STAR = 9 +ASSIGN_OPERATOR = 10 +DICTIONARY_KEY = 11 +DICTIONARY_KEY_PART = 12 +DICTIONARY_VALUE = 13 +DICT_SET_GENERATOR = 14 +COMP_EXPR = 15 +COMP_FOR = 16 +COMP_IF = 17 +FUNC_DEF = 18 +DECORATOR = 19 +TYPED_NAME = 20 +TYPED_NAME_ARG_LIST = 21 +SIMPLE_EXPRESSION = 22 +PARAMETER_START = 23 +PARAMETER_STOP = 24 diff --git a/yapf/yapflib/verifier.py b/yapf/yapflib/verifier.py index bcbe6fb6b..80cfebc08 100644 --- a/yapf/yapflib/verifier.py +++ b/yapf/yapflib/verifier.py @@ -25,13 +25,13 @@ import textwrap -class InternalError(Exception): - """Internal error in verifying formatted code.""" - pass +class InternalError( Exception ): + """Internal error in verifying formatted code.""" + pass -def VerifyCode(code): - """Verify that the reformatted code is syntactically correct. +def VerifyCode( code ): + """Verify that the reformatted code is syntactically correct. Arguments: code: (unicode) The reformatted code snippet. @@ -39,55 +39,57 @@ def VerifyCode(code): Raises: SyntaxError if the code was reformatted incorrectly. """ - try: - compile(textwrap.dedent(code).encode('UTF-8'), '', 'exec') - except SyntaxError: try: - ast.parse(textwrap.dedent(code.lstrip('\n')).lstrip(), '', 'exec') + compile( textwrap.dedent( code ).encode( 'UTF-8' ), '', 'exec' ) except SyntaxError: - try: - normalized_code = _NormalizeCode(code) - compile(normalized_code.encode('UTF-8'), '', 'exec') - except SyntaxError: - raise InternalError(sys.exc_info()[1]) + try: + ast.parse( + textwrap.dedent( code.lstrip( '\n' ) ).lstrip(), '', 'exec' ) + except SyntaxError: + try: + normalized_code = _NormalizeCode( code ) + compile( normalized_code.encode( 'UTF-8' ), '', 'exec' ) + except SyntaxError: + raise InternalError( sys.exc_info()[ 1 ] ) -def _NormalizeCode(code): - """Make sure that the code snippet is compilable.""" - code = textwrap.dedent(code.lstrip('\n')).lstrip() +def _NormalizeCode( code ): + """Make sure that the code snippet is compilable.""" + code = textwrap.dedent( code.lstrip( '\n' ) ).lstrip() - # Split the code to lines and get rid of all leading full-comment lines as - # they can mess up the normalization attempt. - lines = code.split('\n') - i = 0 - for i, line in enumerate(lines): - line = line.strip() - if line and not line.startswith('#'): - break - code = '\n'.join(lines[i:]) + '\n' + # Split the code to lines and get rid of all leading full-comment lines as + # they can mess up the normalization attempt. + lines = code.split( '\n' ) + i = 0 + for i, line in enumerate( lines ): + line = line.strip() + if line and not line.startswith( '#' ): + break + code = '\n'.join( lines[ i : ] ) + '\n' - if re.match(r'(if|while|for|with|def|class|async|await)\b', code): - code += '\n pass' - elif re.match(r'(elif|else)\b', code): - try: - try_code = 'if True:\n pass\n' + code + '\n pass' - ast.parse( - textwrap.dedent(try_code.lstrip('\n')).lstrip(), '', 'exec') - code = try_code - except SyntaxError: - # The assumption here is that the code is on a single line. - code = 'if True: pass\n' + code - elif code.startswith('@'): - code += '\ndef _():\n pass' - elif re.match(r'try\b', code): - code += '\n pass\nexcept:\n pass' - elif re.match(r'(except|finally)\b', code): - code = 'try:\n pass\n' + code + '\n pass' - elif re.match(r'(return|yield)\b', code): - code = 'def _():\n ' + code - elif re.match(r'(continue|break)\b', code): - code = 'while True:\n ' + code - elif re.match(r'print\b', code): - code = 'from __future__ import print_function\n' + code + if re.match( r'(if|while|for|with|def|class|async|await)\b', code ): + code += '\n pass' + elif re.match( r'(elif|else)\b', code ): + try: + try_code = 'if True:\n pass\n' + code + '\n pass' + ast.parse( + textwrap.dedent( try_code.lstrip( '\n' ) ).lstrip(), '', + 'exec' ) + code = try_code + except SyntaxError: + # The assumption here is that the code is on a single line. + code = 'if True: pass\n' + code + elif code.startswith( '@' ): + code += '\ndef _():\n pass' + elif re.match( r'try\b', code ): + code += '\n pass\nexcept:\n pass' + elif re.match( r'(except|finally)\b', code ): + code = 'try:\n pass\n' + code + '\n pass' + elif re.match( r'(return|yield)\b', code ): + code = 'def _():\n ' + code + elif re.match( r'(continue|break)\b', code ): + code = 'while True:\n ' + code + elif re.match( r'print\b', code ): + code = 'from __future__ import print_function\n' + code - return code + '\n' + return code + '\n' diff --git a/yapf/yapflib/yapf_api.py b/yapf/yapflib/yapf_api.py index c17451434..e8ae26e87 100644 --- a/yapf/yapflib/yapf_api.py +++ b/yapf/yapflib/yapf_api.py @@ -51,14 +51,15 @@ from yapf.yapflib import style -def FormatFile(filename, - style_config=None, - lines=None, - print_diff=False, - verify=False, - in_place=False, - logger=None): - """Format a single Python file and return the formatted code. +def FormatFile( + filename, + style_config = None, + lines = None, + print_diff = False, + verify = False, + in_place = False, + logger = None ): + """Format a single Python file and return the formatted code. Arguments: filename: (unicode) The file to reformat. @@ -84,33 +85,33 @@ def FormatFile(filename, IOError: raised if there was an error reading the file. ValueError: raised if in_place and print_diff are both specified. """ - _CheckPythonVersion() - - if in_place and print_diff: - raise ValueError('Cannot pass both in_place and print_diff.') - - original_source, newline, encoding = ReadFile(filename, logger) - reformatted_source, changed = FormatCode( - original_source, - style_config=style_config, - filename=filename, - lines=lines, - print_diff=print_diff, - verify=verify) - if reformatted_source.rstrip('\n'): - lines = reformatted_source.rstrip('\n').split('\n') - reformatted_source = newline.join(iter(lines)) + newline - if in_place: - if original_source and original_source != reformatted_source: - file_resources.WriteReformattedCode(filename, reformatted_source, - encoding, in_place) - return None, encoding, changed - - return reformatted_source, encoding, changed - - -def FormatTree(tree, style_config=None, lines=None, verify=False): - """Format a parsed lib2to3 pytree. + _CheckPythonVersion() + + if in_place and print_diff: + raise ValueError( 'Cannot pass both in_place and print_diff.' ) + + original_source, newline, encoding = ReadFile( filename, logger ) + reformatted_source, changed = FormatCode( + original_source, + style_config = style_config, + filename = filename, + lines = lines, + print_diff = print_diff, + verify = verify ) + if reformatted_source.rstrip( '\n' ): + lines = reformatted_source.rstrip( '\n' ).split( '\n' ) + reformatted_source = newline.join( iter( lines ) ) + newline + if in_place: + if original_source and original_source != reformatted_source: + file_resources.WriteReformattedCode( + filename, reformatted_source, encoding, in_place ) + return None, encoding, changed + + return reformatted_source, encoding, changed + + +def FormatTree( tree, style_config = None, lines = None, verify = False ): + """Format a parsed lib2to3 pytree. This provides an alternative entry point to YAPF. @@ -128,33 +129,34 @@ def FormatTree(tree, style_config=None, lines=None, verify=False): Returns: The source formatted according to the given formatting style. """ - _CheckPythonVersion() - style.SetGlobalStyle(style.CreateStyleFromConfig(style_config)) - - # Run passes on the tree, modifying it in place. - comment_splicer.SpliceComments(tree) - continuation_splicer.SpliceContinuations(tree) - subtype_assigner.AssignSubtypes(tree) - identify_container.IdentifyContainers(tree) - split_penalty.ComputeSplitPenalties(tree) - blank_line_calculator.CalculateBlankLines(tree) - - llines = pytree_unwrapper.UnwrapPyTree(tree) - for lline in llines: - lline.CalculateFormattingInformation() - - lines = _LineRangesToSet(lines) - _MarkLinesToFormat(llines, lines) - return reformatter.Reformat(_SplitSemicolons(llines), verify, lines) - - -def FormatCode(unformatted_source, - filename='', - style_config=None, - lines=None, - print_diff=False, - verify=False): - """Format a string of Python code. + _CheckPythonVersion() + style.SetGlobalStyle( style.CreateStyleFromConfig( style_config ) ) + + # Run passes on the tree, modifying it in place. + comment_splicer.SpliceComments( tree ) + continuation_splicer.SpliceContinuations( tree ) + subtype_assigner.AssignSubtypes( tree ) + identify_container.IdentifyContainers( tree ) + split_penalty.ComputeSplitPenalties( tree ) + blank_line_calculator.CalculateBlankLines( tree ) + + llines = pytree_unwrapper.UnwrapPyTree( tree ) + for lline in llines: + lline.CalculateFormattingInformation() + + lines = _LineRangesToSet( lines ) + _MarkLinesToFormat( llines, lines ) + return reformatter.Reformat( _SplitSemicolons( llines ), verify, lines ) + + +def FormatCode( + unformatted_source, + filename = '', + style_config = None, + lines = None, + print_diff = False, + verify = False ): + """Format a string of Python code. This provides an alternative entry point to YAPF. @@ -176,39 +178,39 @@ def FormatCode(unformatted_source, Tuple of (reformatted_source, changed). reformatted_source conforms to the desired formatting style. changed is True if the source changed. """ - try: - tree = pytree_utils.ParseCodeToTree(unformatted_source) - except Exception as e: - e.filename = filename - raise errors.YapfError(errors.FormatErrorMsg(e)) + try: + tree = pytree_utils.ParseCodeToTree( unformatted_source ) + except Exception as e: + e.filename = filename + raise errors.YapfError( errors.FormatErrorMsg( e ) ) - reformatted_source = FormatTree( - tree, style_config=style_config, lines=lines, verify=verify) + reformatted_source = FormatTree( + tree, style_config = style_config, lines = lines, verify = verify ) - if unformatted_source == reformatted_source: - return '' if print_diff else reformatted_source, False + if unformatted_source == reformatted_source: + return '' if print_diff else reformatted_source, False - code_diff = _GetUnifiedDiff( - unformatted_source, reformatted_source, filename=filename) + code_diff = _GetUnifiedDiff( + unformatted_source, reformatted_source, filename = filename ) - if print_diff: - return code_diff, code_diff.strip() != '' # pylint: disable=g-explicit-bool-comparison # noqa + if print_diff: + return code_diff, code_diff.strip() != '' # pylint: disable=g-explicit-bool-comparison # noqa - return reformatted_source, True + return reformatted_source, True -def _CheckPythonVersion(): # pragma: no cover - errmsg = 'yapf is only supported for Python 2.7 or 3.6+' - if sys.version_info[0] == 2: - if sys.version_info[1] < 7: - raise RuntimeError(errmsg) - elif sys.version_info[0] == 3: - if sys.version_info[1] < 6: - raise RuntimeError(errmsg) +def _CheckPythonVersion(): # pragma: no cover + errmsg = 'yapf is only supported for Python 2.7 or 3.6+' + if sys.version_info[ 0 ] == 2: + if sys.version_info[ 1 ] < 7: + raise RuntimeError( errmsg ) + elif sys.version_info[ 0 ] == 3: + if sys.version_info[ 1 ] < 6: + raise RuntimeError( errmsg ) -def ReadFile(filename, logger=None): - """Read the contents of the file. +def ReadFile( filename, logger = None ): + """Read the contents of the file. An optional logger can be specified to emit messages to your favorite logging stream. If specified, then no exception is raised. This is external so that it @@ -224,99 +226,106 @@ def ReadFile(filename, logger=None): Raises: IOError: raised if there was an error reading the file. """ - try: - encoding = file_resources.FileEncoding(filename) - - # Preserves line endings. - with py3compat.open_with_encoding( - filename, mode='r', encoding=encoding, newline='') as fd: - lines = fd.readlines() - - line_ending = file_resources.LineEnding(lines) - source = '\n'.join(line.rstrip('\r\n') for line in lines) + '\n' - return source, line_ending, encoding - except IOError as e: # pragma: no cover - if logger: - logger(e) - e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) - raise - except UnicodeDecodeError as e: # pragma: no cover - if logger: - logger('Could not parse %s! Consider excluding this file with --exclude.', - filename) - logger(e) - e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) - raise - - -def _SplitSemicolons(lines): - res = [] - for line in lines: - res.extend(line.Split()) - return res + try: + encoding = file_resources.FileEncoding( filename ) + + # Preserves line endings. + with py3compat.open_with_encoding( filename, mode = 'r', encoding = encoding, + newline = '' ) as fd: + lines = fd.readlines() + + line_ending = file_resources.LineEnding( lines ) + source = '\n'.join( line.rstrip( '\r\n' ) for line in lines ) + '\n' + return source, line_ending, encoding + except IOError as e: # pragma: no cover + if logger: + logger( e ) + e.args = ( + e.args[ 0 ], + ( filename, e.args[ 1 ][ 1 ], e.args[ 1 ][ 2 ], e.args[ 1 ][ 3 ] ) ) + raise + except UnicodeDecodeError as e: # pragma: no cover + if logger: + logger( + 'Could not parse %s! Consider excluding this file with --exclude.', + filename ) + logger( e ) + e.args = ( + e.args[ 0 ], + ( filename, e.args[ 1 ][ 1 ], e.args[ 1 ][ 2 ], e.args[ 1 ][ 3 ] ) ) + raise + + +def _SplitSemicolons( lines ): + res = [] + for line in lines: + res.extend( line.Split() ) + return res DISABLE_PATTERN = r'^#.*\byapf:\s*disable\b' -ENABLE_PATTERN = r'^#.*\byapf:\s*enable\b' - - -def _LineRangesToSet(line_ranges): - """Return a set of lines in the range.""" - - if line_ranges is None: - return None - - line_set = set() - for low, high in sorted(line_ranges): - line_set.update(range(low, high + 1)) - - return line_set - - -def _MarkLinesToFormat(llines, lines): - """Skip sections of code that we shouldn't reformat.""" - if lines: - for uwline in llines: - uwline.disable = not lines.intersection( - range(uwline.lineno, uwline.last.lineno + 1)) - - # Now go through the lines and disable any lines explicitly marked as - # disabled. - index = 0 - while index < len(llines): - uwline = llines[index] - if uwline.is_comment: - if _DisableYAPF(uwline.first.value.strip()): +ENABLE_PATTERN = r'^#.*\byapf:\s*enable\b' + + +def _LineRangesToSet( line_ranges ): + """Return a set of lines in the range.""" + + if line_ranges is None: + return None + + line_set = set() + for low, high in sorted( line_ranges ): + line_set.update( range( low, high + 1 ) ) + + return line_set + + +def _MarkLinesToFormat( llines, lines ): + """Skip sections of code that we shouldn't reformat.""" + if lines: + for uwline in llines: + uwline.disable = not lines.intersection( + range( uwline.lineno, uwline.last.lineno + 1 ) ) + + # Now go through the lines and disable any lines explicitly marked as + # disabled. + index = 0 + while index < len( llines ): + uwline = llines[ index ] + if uwline.is_comment: + if _DisableYAPF( uwline.first.value.strip() ): + index += 1 + while index < len( llines ): + uwline = llines[ index ] + line = uwline.first.value.strip() + if uwline.is_comment and _EnableYAPF( line ): + if not _DisableYAPF( line ): + break + uwline.disable = True + index += 1 + elif re.search( DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE ): + uwline.disable = True index += 1 - while index < len(llines): - uwline = llines[index] - line = uwline.first.value.strip() - if uwline.is_comment and _EnableYAPF(line): - if not _DisableYAPF(line): - break - uwline.disable = True - index += 1 - elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE): - uwline.disable = True - index += 1 -def _DisableYAPF(line): - return (re.search(DISABLE_PATTERN, - line.split('\n')[0].strip(), re.IGNORECASE) or - re.search(DISABLE_PATTERN, - line.split('\n')[-1].strip(), re.IGNORECASE)) +def _DisableYAPF( line ): + return ( + re.search( DISABLE_PATTERN, + line.split( '\n' )[ 0 ].strip(), re.IGNORECASE ) or + re.search( DISABLE_PATTERN, + line.split( '\n' )[ -1 ].strip(), re.IGNORECASE ) ) -def _EnableYAPF(line): - return (re.search(ENABLE_PATTERN, - line.split('\n')[0].strip(), re.IGNORECASE) or - re.search(ENABLE_PATTERN, - line.split('\n')[-1].strip(), re.IGNORECASE)) +def _EnableYAPF( line ): + return ( + re.search( ENABLE_PATTERN, + line.split( '\n' )[ 0 ].strip(), re.IGNORECASE ) or + re.search( ENABLE_PATTERN, + line.split( '\n' )[ -1 ].strip(), re.IGNORECASE ) ) -def _GetUnifiedDiff(before, after, filename='code'): - """Get a unified diff of the changes. +def _GetUnifiedDiff( before, after, filename = 'code' ): + """Get a unified diff of the changes. Arguments: before: (unicode) The original source code. @@ -326,14 +335,14 @@ def _GetUnifiedDiff(before, after, filename='code'): Returns: The unified diff text. """ - before = before.splitlines() - after = after.splitlines() - return '\n'.join( - difflib.unified_diff( - before, - after, - filename, - filename, - '(original)', - '(reformatted)', - lineterm='')) + '\n' + before = before.splitlines() + after = after.splitlines() + return '\n'.join( + difflib.unified_diff( + before, + after, + filename, + filename, + '(original)', + '(reformatted)', + lineterm = '' ) ) + '\n' diff --git a/yapftests/format_token_test.py b/yapftests/format_token_test.py index 3bb1ce9f5..e73f1ea8a 100644 --- a/yapftests/format_token_test.py +++ b/yapftests/format_token_test.py @@ -90,37 +90,6 @@ def testIsMultilineString(self): pytree.Leaf(token.STRING, 'r"""hello"""'), 'STRING') self.assertTrue(tok.is_multiline_string) - #------------test argument names------------ - # fun( - # a='hello world', - # # comment, - # b='') - child1 = pytree.Leaf(token.NAME, 'a') - child2 = pytree.Leaf(token.EQUAL, '=') - child3 = pytree.Leaf(token.STRING, "'hello world'") - child4 = pytree.Leaf(token.COMMA, ',') - child5 = pytree.Leaf(token.COMMENT,'# comment') - child6 = pytree.Leaf(token.COMMA, ',') - child7 = pytree.Leaf(token.NAME, 'b') - child8 = pytree.Leaf(token.EQUAL, '=') - child9 = pytree.Leaf(token.STRING, "''") - node_type = pygram.python_grammar.symbol2number['arglist'] - node = pytree.Node(node_type, [child1, child2, child3, child4, child5, - child6, child7, child8,child9]) - subtype_assigner.AssignSubtypes(node) - - def testIsArgName(self, node=node): - tok = format_token.FormatToken(node.children[0],'NAME') - self.assertTrue(tok.is_argname) - - def testIsArgAssign(self, node=node): - tok = format_token.FormatToken(node.children[1], 'EQUAL') - self.assertTrue(tok.is_argassign) - - # test if comment inside is not argname - def testCommentNotIsArgName(self, node=node): - tok = format_token.FormatToken(node.children[4], 'COMMENT') - self.assertFalse(tok.is_argname) if __name__ == '__main__': unittest.main() diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 0eeeefdce..798dbab9a 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -3191,8 +3191,9 @@ def testAlignAssignBlankLineInbetween(self): def testAlignAssignCommentLineInbetween(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_assignment: true,' - 'new_alignment_after_commentline = true}')) + style.CreateStyleFromConfig( + '{align_assignment: true,' + 'new_alignment_after_commentline = true}')) unformatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 @@ -3285,244 +3286,6 @@ def testAlignAssignWithOnlyOneAssignmentLine(self): finally: style.SetGlobalStyle(style.CreateYapfStyle()) - ########## for Align_ArgAssign()########### - def testAlignArgAssignTypedName(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'split_before_first_argument: true}')) - unformatted_code = textwrap.dedent("""\ -def f1( - self, - *, - app_name:str="", - server=None, - main_app=None, - db: Optional[NemDB]=None, - root: Optional[str]="", - conf: Optional[dict]={1, 2}, - ini_section: str="" -): pass -""") - expected_formatted_code = textwrap.dedent("""\ -def f1( - self, - *, - app_name: str = "", - server =None, - main_app =None, - db: Optional[NemDB] = None, - root: Optional[str] = "", - conf: Optional[dict] = {1, 2}, - ini_section: str = ""): - pass -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - # test both object/nested argument list with newlines and - # argument without assignment in between - def testAlignArgAssignNestedArglistInBetween(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true}')) - unformatted_code = textwrap.dedent("""\ -arglist = test( - first_argument='', - second_argument=fun( - self, role=None, client_name='', client_id=1, very_long_long_long_long_long='' - ), - third_argument=3, - fourth_argument=4 -) -""") - expected_formatted_code = textwrap.dedent("""\ -arglist = test( - first_argument ='', - second_argument =fun( - self, - role =None, - client_name ='', - client_id =1, - very_long_long_long_long_long =''), - third_argument =3, - fourth_argument =4) -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - # start new alignment after comment line in between - def testAlignArgAssignCommentLineInBetween(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true,' - 'new_alignment_after_commentline:true}')) - unformatted_code = textwrap.dedent("""\ -arglist = test( - client_id=0, - username_id=1, - # comment - user_name='xxxxxxxxxxxxxxxxxxxxx' -) -""") - expected_formatted_code = textwrap.dedent("""\ -arglist = test( - client_id =0, - username_id =1, - # comment - user_name ='xxxxxxxxxxxxxxxxxxxxx') -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - def testAlignArgAssignWithOnlyFirstArgOnNewline(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true}')) - unformatted_code = textwrap.dedent("""\ -arglist = self.get_data_from_excelsheet( - client_id=0, username_id=1, user_name='xxxxxxxxxxxxxxxxxxxx') -""") - expected_formatted_code = unformatted_code - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - def testAlignArgAssignArgumentsCanFitInOneLine(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_argument_assignment: true}')) - unformatted_code = textwrap.dedent("""\ -def function( - first_argument_xxxxxx =(0,), - second_argument =None -) -> None: - pass -""") - expected_formatted_code = textwrap.dedent("""\ -def function(first_argument_xxxxxx=(0,), second_argument=None) -> None: - pass -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - ########for align dictionary colons######### - def testAlignDictColonNestedDictInBetween(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_dict_colon: true}')) - unformatted_code = textwrap.dedent("""\ -fields = [{"type": "text","required": True,"html": {"attr": 'style="width: 250px;" maxlength="30"',"page": 0,}, - "list" : [1, 2, 3, 4]}] -""") - expected_formatted_code = textwrap.dedent("""\ -fields = [{ - "type" : "text", - "required" : True, - "html" : { - "attr" : 'style="width: 250px;" maxlength="30"', - "page" : 0, - }, - "list" : [1, 2, 3, 4] -}] -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - def testAlignDictColonCommentLineInBetween(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_dict_colon: true,' - 'new_alignment_after_commentline: true}')) - unformatted_code = textwrap.dedent("""\ -fields = [{ - "type": "text", - "required": True, - # comment - "list": [1, 2, 3, 4]}] -""") - expected_formatted_code = textwrap.dedent("""\ -fields = [{ - "type" : "text", - "required" : True, - # comment - "list" : [1, 2, 3, 4] -}] -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - def testAlignDictColonLargerExistingSpacesBefore(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_dict_colon: true}')) - unformatted_code = textwrap.dedent("""\ -fields = [{ - "type" : "text", - "required" : True, - "list" : [1, 2, 3, 4], -}] -""") - expected_formatted_code = textwrap.dedent("""\ -fields = [{ - "type" : "text", - "required" : True, - "list" : [1, 2, 3, 4], -}] -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - def testAlignDictColonCommentAfterOpenBracket(self): - try: - style.SetGlobalStyle( - style.CreateStyleFromConfig('{align_dict_colon: true}')) - unformatted_code = textwrap.dedent("""\ -fields = [{ - # comment - "type": "text", - "required": True, - "list": [1, 2, 3, 4]}] -""") - expected_formatted_code = textwrap.dedent("""\ -fields = [{ - # comment - "type" : "text", - "required" : True, - "list" : [1, 2, 3, 4] -}] -""") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) - finally: - style.SetGlobalStyle(style.CreateYapfStyle()) - - - if __name__ == '__main__': unittest.main() diff --git a/yapftests/subtype_assigner_test.py b/yapftests/subtype_assigner_test.py index 97f9cd3ac..8616169c9 100644 --- a/yapftests/subtype_assigner_test.py +++ b/yapftests/subtype_assigner_test.py @@ -129,87 +129,6 @@ def testFuncCallWithDefaultAssign(self): ], ]) - #----test comment subtype inside the argument list---- - def testCommentSubtypesInsideArglist(self): - code = textwrap.dedent("""\ - foo( - # comment - x, - a='hello world') - """) - llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('foo', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ('# comment', {subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), - ('x', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), - (',', {subtypes.NONE}), - ('a', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST}), - ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), - ("'hello world'", {subtypes.NONE}), - (')', {subtypes.NONE}), - ], - ]) - - # ----test typed arguments subtypes------ - def testTypedArgumentsInsideArglist(self): - code = textwrap.dedent("""\ -def foo( - self, - preprocess: Callable[[str], str] = identity - ): pass -""") - llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('def', {subtypes.NONE}), - ('foo', {subtypes.FUNC_DEF}), - ('(', {subtypes.NONE}), - ('self', {subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.PARAMETER_START, - subtypes.PARAMETER_STOP}), - (',', {subtypes.NONE}), - ('preprocess', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.PARAMETER_START, - subtypes.TYPED_NAME_ARG_LIST}), - (':', { - subtypes.TYPED_NAME, - subtypes.TYPED_NAME_ARG_LIST}), - ('Callable', {subtypes.TYPED_NAME_ARG_LIST}), - ('[', { - subtypes.SUBSCRIPT_BRACKET, - subtypes.TYPED_NAME_ARG_LIST}), - ('[', {subtypes.TYPED_NAME_ARG_LIST}), - ('str', {subtypes.TYPED_NAME_ARG_LIST}), - (']', {subtypes.TYPED_NAME_ARG_LIST}), - (',', {subtypes.TYPED_NAME_ARG_LIST}), - ('str', {subtypes.TYPED_NAME_ARG_LIST}), - (']', { - subtypes.SUBSCRIPT_BRACKET, - subtypes.TYPED_NAME_ARG_LIST}), - ('=', { - subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.TYPED_NAME}), - ('identity', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.PARAMETER_STOP}), - (')', {subtypes.NONE}), - (':', {subtypes.NONE})], - [('pass', {subtypes.NONE}), - ], - ]) - def testSetComprehension(self): code = textwrap.dedent("""\ def foo(strs): From beec8688466d7c3664a6a214e6e13506c7fc298b Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Mon, 3 Oct 2022 15:53:10 +0200 Subject: [PATCH 09/11] run the assignment align over the yapf codes --- yapf/__init__.py | 542 ++--- yapf/pyparser/pyparser.py | 143 +- yapf/pyparser/pyparser_utils.py | 92 +- yapf/pyparser/split_penalty_visitor.py | 1777 +++++++++-------- yapf/pytree/blank_line_calculator.py | 233 +-- yapf/pytree/comment_splicer.py | 512 +++-- yapf/pytree/continuation_splicer.py | 40 +- yapf/pytree/pytree_unwrapper.py | 531 ++--- yapf/pytree/pytree_utils.py | 289 +-- yapf/pytree/pytree_visitor.py | 90 +- yapf/pytree/split_penalty.py | 1102 ++++++----- yapf/pytree/subtype_assigner.py | 864 ++++---- yapf/third_party/yapf_diff/yapf_diff.py | 198 +- yapf/yapflib/errors.py | 21 +- yapf/yapflib/file_resources.py | 406 ++-- yapf/yapflib/format_decision_state.py | 2075 ++++++++++---------- yapf/yapflib/format_token.py | 489 ++--- yapf/yapflib/identify_container.py | 60 +- yapf/yapflib/line_joiner.py | 84 +- yapf/yapflib/logical_line.py | 1146 ++++++----- yapf/yapflib/object_state.py | 296 +-- yapf/yapflib/py3compat.py | 158 +- yapf/yapflib/reformatter.py | 1436 +++++++------- yapf/yapflib/style.py | 966 ++++----- yapf/yapflib/verifier.py | 102 +- yapf/yapflib/yapf_api.py | 356 ++-- yapftests/blank_line_calculator_test.py | 69 +- yapftests/comment_splicer_test.py | 62 +- yapftests/file_resources_test.py | 137 +- yapftests/format_decision_state_test.py | 8 +- yapftests/line_joiner_test.py | 18 +- yapftests/logical_line_test.py | 22 +- yapftests/main_test.py | 11 +- yapftests/pytree_unwrapper_test.py | 273 +-- yapftests/pytree_utils_test.py | 62 +- yapftests/pytree_visitor_test.py | 10 +- yapftests/reformatter_basic_test.py | 983 ++++++---- yapftests/reformatter_buganizer_test.py | 541 +++-- yapftests/reformatter_facebook_test.py | 95 +- yapftests/reformatter_pep8_test.py | 274 ++- yapftests/reformatter_python3_test.py | 109 +- yapftests/reformatter_style_config_test.py | 54 +- yapftests/reformatter_verify_test.py | 26 +- yapftests/split_penalty_test.py | 272 +-- yapftests/style_test.py | 57 +- yapftests/subtype_assigner_test.py | 421 ++-- yapftests/utils.py | 34 +- yapftests/yapf_test.py | 500 +++-- yapftests/yapf_test_helper.py | 2 +- 49 files changed, 9393 insertions(+), 8655 deletions(-) diff --git a/yapf/__init__.py b/yapf/__init__.py index e8825c1cb..2b69c1ddc 100644 --- a/yapf/__init__.py +++ b/yapf/__init__.py @@ -41,8 +41,8 @@ __version__ = '0.32.0' -def main( argv ): - """Main program. +def main(argv): + """Main program. Arguments: argv: command-line arguments, such as sys.argv (including the program name @@ -55,116 +55,116 @@ def main( argv ): Raises: YapfError: if none of the supplied files were Python files. """ - parser = _BuildParser() - args = parser.parse_args( argv[ 1 : ] ) - style_config = args.style - - if args.style_help: - _PrintHelp( args ) - return 0 - - if args.lines and len( args.files ) > 1: - parser.error( 'cannot use -l/--lines with more than one file' ) - - lines = _GetLines( args.lines ) if args.lines is not None else None - if not args.files: - # No arguments specified. Read code from stdin. - if args.in_place or args.diff: - parser.error( - 'cannot use --in-place or --diff flags when reading ' - 'from stdin' ) - - original_source = [] - while True: - # Test that sys.stdin has the "closed" attribute. When using pytest, it - # co-opts sys.stdin, which makes the "main_tests.py" fail. This is gross. - if hasattr( sys.stdin, "closed" ) and sys.stdin.closed: - break - try: - # Use 'raw_input' instead of 'sys.stdin.read', because otherwise the - # user will need to hit 'Ctrl-D' more than once if they're inputting - # the program by hand. 'raw_input' throws an EOFError exception if - # 'Ctrl-D' is pressed, which makes it easy to bail out of this loop. - original_source.append( py3compat.raw_input() ) - except EOFError: - break - except KeyboardInterrupt: - return 1 - - if style_config is None and not args.no_local_style: - style_config = file_resources.GetDefaultStyleForDir( os.getcwd() ) - - source = [ line.rstrip() for line in original_source ] - source[ 0 ] = py3compat.removeBOM( source[ 0 ] ) - - try: - reformatted_source, _ = yapf_api.FormatCode( - py3compat.unicode( '\n'.join( source ) + '\n' ), - filename = '', - style_config = style_config, - lines = lines, - verify = args.verify ) - except errors.YapfError: - raise - except Exception as e: - raise errors.YapfError( errors.FormatErrorMsg( e ) ) - - file_resources.WriteReformattedCode( '', reformatted_source ) - return 0 - - # Get additional exclude patterns from ignorefile - exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir( - os.getcwd() ) - - files = file_resources.GetCommandLineFiles( - args.files, args.recursive, - ( args.exclude or [] ) + exclude_patterns_from_ignore_file ) - if not files: - raise errors.YapfError( 'input filenames did not match any python files' ) - - changed = FormatFiles( - files, - lines, - style_config = args.style, - no_local_style = args.no_local_style, - in_place = args.in_place, - print_diff = args.diff, - verify = args.verify, - parallel = args.parallel, - quiet = args.quiet, - verbose = args.verbose ) - return 1 if changed and ( args.diff or args.quiet ) else 0 - - -def _PrintHelp( args ): - """Prints the help menu.""" - - if args.style is None and not args.no_local_style: - args.style = file_resources.GetDefaultStyleForDir( os.getcwd() ) - style.SetGlobalStyle( style.CreateStyleFromConfig( args.style ) ) - print( '[style]' ) - for option, docstring in sorted( style.Help().items() ): - for line in docstring.splitlines(): - print( '#', line and ' ' or '', line, sep = '' ) - option_value = style.Get( option ) - if isinstance( option_value, ( set, list ) ): - option_value = ', '.join( map( str, option_value ) ) - print( option.lower(), '=', option_value, sep = '' ) - print() + parser = _BuildParser() + args = parser.parse_args(argv[1:]) + style_config = args.style + + if args.style_help: + _PrintHelp(args) + return 0 + + if args.lines and len(args.files) > 1: + parser.error('cannot use -l/--lines with more than one file') + + lines = _GetLines(args.lines) if args.lines is not None else None + if not args.files: + # No arguments specified. Read code from stdin. + if args.in_place or args.diff: + parser.error( + 'cannot use --in-place or --diff flags when reading ' + 'from stdin') + + original_source = [] + while True: + # Test that sys.stdin has the "closed" attribute. When using pytest, it + # co-opts sys.stdin, which makes the "main_tests.py" fail. This is gross. + if hasattr(sys.stdin, "closed") and sys.stdin.closed: + break + try: + # Use 'raw_input' instead of 'sys.stdin.read', because otherwise the + # user will need to hit 'Ctrl-D' more than once if they're inputting + # the program by hand. 'raw_input' throws an EOFError exception if + # 'Ctrl-D' is pressed, which makes it easy to bail out of this loop. + original_source.append(py3compat.raw_input()) + except EOFError: + break + except KeyboardInterrupt: + return 1 + + if style_config is None and not args.no_local_style: + style_config = file_resources.GetDefaultStyleForDir(os.getcwd()) + + source = [line.rstrip() for line in original_source] + source[0] = py3compat.removeBOM(source[0]) + + try: + reformatted_source, _ = yapf_api.FormatCode( + py3compat.unicode('\n'.join(source) + '\n'), + filename='', + style_config=style_config, + lines=lines, + verify=args.verify) + except errors.YapfError: + raise + except Exception as e: + raise errors.YapfError(errors.FormatErrorMsg(e)) + + file_resources.WriteReformattedCode('', reformatted_source) + return 0 + + # Get additional exclude patterns from ignorefile + exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir( + os.getcwd()) + + files = file_resources.GetCommandLineFiles( + args.files, args.recursive, + (args.exclude or []) + exclude_patterns_from_ignore_file) + if not files: + raise errors.YapfError('input filenames did not match any python files') + + changed = FormatFiles( + files, + lines, + style_config=args.style, + no_local_style=args.no_local_style, + in_place=args.in_place, + print_diff=args.diff, + verify=args.verify, + parallel=args.parallel, + quiet=args.quiet, + verbose=args.verbose) + return 1 if changed and (args.diff or args.quiet) else 0 + + +def _PrintHelp(args): + """Prints the help menu.""" + + if args.style is None and not args.no_local_style: + args.style = file_resources.GetDefaultStyleForDir(os.getcwd()) + style.SetGlobalStyle(style.CreateStyleFromConfig(args.style)) + print('[style]') + for option, docstring in sorted(style.Help().items()): + for line in docstring.splitlines(): + print('#', line and ' ' or '', line, sep='') + option_value = style.Get(option) + if isinstance(option_value, (set, list)): + option_value = ', '.join(map(str, option_value)) + print(option.lower(), '=', option_value, sep='') + print() def FormatFiles( - filenames, - lines, - style_config = None, - no_local_style = False, - in_place = False, - print_diff = False, - verify = False, - parallel = False, - quiet = False, - verbose = False ): - """Format a list of files. + filenames, + lines, + style_config=None, + no_local_style=False, + in_place=False, + print_diff=False, + verify=False, + parallel=False, + quiet=False, + verbose=False): + """Format a list of files. Arguments: filenames: (list of unicode) A list of files to reformat. @@ -186,68 +186,68 @@ def FormatFiles( Returns: True if the source code changed in any of the files being formatted. """ - changed = False - if parallel: - import multiprocessing # pylint: disable=g-import-not-at-top - import concurrent.futures # pylint: disable=g-import-not-at-top - workers = min( multiprocessing.cpu_count(), len( filenames ) ) - with concurrent.futures.ProcessPoolExecutor( workers ) as executor: - future_formats = [ - executor.submit( - _FormatFile, filename, lines, style_config, no_local_style, - in_place, print_diff, verify, quiet, verbose ) - for filename in filenames - ] - for future in concurrent.futures.as_completed( future_formats ): - changed |= future.result() - else: - for filename in filenames: - changed |= _FormatFile( - filename, lines, style_config, no_local_style, in_place, print_diff, - verify, quiet, verbose ) - return changed + changed = False + if parallel: + import multiprocessing # pylint: disable=g-import-not-at-top + import concurrent.futures # pylint: disable=g-import-not-at-top + workers = min(multiprocessing.cpu_count(), len(filenames)) + with concurrent.futures.ProcessPoolExecutor(workers) as executor: + future_formats = [ + executor.submit( + _FormatFile, filename, lines, style_config, no_local_style, + in_place, print_diff, verify, quiet, verbose) + for filename in filenames + ] + for future in concurrent.futures.as_completed(future_formats): + changed |= future.result() + else: + for filename in filenames: + changed |= _FormatFile( + filename, lines, style_config, no_local_style, in_place, print_diff, + verify, quiet, verbose) + return changed def _FormatFile( + filename, + lines, + style_config=None, + no_local_style=False, + in_place=False, + print_diff=False, + verify=False, + quiet=False, + verbose=False): + """Format an individual file.""" + if verbose and not quiet: + print('Reformatting %s' % filename) + + if style_config is None and not no_local_style: + style_config = file_resources.GetDefaultStyleForDir( + os.path.dirname(filename)) + + try: + reformatted_code, encoding, has_change = yapf_api.FormatFile( filename, - lines, - style_config = None, - no_local_style = False, - in_place = False, - print_diff = False, - verify = False, - quiet = False, - verbose = False ): - """Format an individual file.""" - if verbose and not quiet: - print( 'Reformatting %s' % filename ) - - if style_config is None and not no_local_style: - style_config = file_resources.GetDefaultStyleForDir( - os.path.dirname( filename ) ) - - try: - reformatted_code, encoding, has_change = yapf_api.FormatFile( - filename, - in_place = in_place, - style_config = style_config, - lines = lines, - print_diff = print_diff, - verify = verify, - logger = logging.warning ) - except errors.YapfError: - raise - except Exception as e: - raise errors.YapfError( errors.FormatErrorMsg( e ) ) - - if not in_place and not quiet and reformatted_code: - file_resources.WriteReformattedCode( - filename, reformatted_code, encoding, in_place ) - return has_change - - -def _GetLines( line_strings ): - """Parses the start and end lines from a line string like 'start-end'. + in_place=in_place, + style_config=style_config, + lines=lines, + print_diff=print_diff, + verify=verify, + logger=logging.warning) + except errors.YapfError: + raise + except Exception as e: + raise errors.YapfError(errors.FormatErrorMsg(e)) + + if not in_place and not quiet and reformatted_code: + file_resources.WriteReformattedCode( + filename, reformatted_code, encoding, in_place) + return has_change + + +def _GetLines(line_strings): + """Parses the start and end lines from a line string like 'start-end'. Arguments: line_strings: (array of string) A list of strings representing a line @@ -259,117 +259,117 @@ def _GetLines( line_strings ): Raises: ValueError: If the line string failed to parse or was an invalid line range. """ - lines = [] - for line_string in line_strings: - # The 'list' here is needed by Python 3. - line = list( map( int, line_string.split( '-', 1 ) ) ) - if line[ 0 ] < 1: - raise errors.YapfError( 'invalid start of line range: %r' % line ) - if line[ 0 ] > line[ 1 ]: - raise errors.YapfError( 'end comes before start in line range: %r' % line ) - lines.append( tuple( line ) ) - return lines + lines = [] + for line_string in line_strings: + # The 'list' here is needed by Python 3. + line = list(map(int, line_string.split('-', 1))) + if line[0] < 1: + raise errors.YapfError('invalid start of line range: %r' % line) + if line[0] > line[1]: + raise errors.YapfError('end comes before start in line range: %r' % line) + lines.append(tuple(line)) + return lines def _BuildParser(): - """Constructs the parser for the command line arguments. + """Constructs the parser for the command line arguments. Returns: An ArgumentParser instance for the CLI. """ - parser = argparse.ArgumentParser( - prog = 'yapf', description = 'Formatter for Python code.' ) - parser.add_argument( - '-v', - '--version', - action = 'version', - version = '%(prog)s {}'.format( __version__ ) ) - - diff_inplace_quiet_group = parser.add_mutually_exclusive_group() - diff_inplace_quiet_group.add_argument( - '-d', - '--diff', - action = 'store_true', - help = 'print the diff for the fixed source' ) - diff_inplace_quiet_group.add_argument( - '-i', - '--in-place', - action = 'store_true', - help = 'make changes to files in place' ) - diff_inplace_quiet_group.add_argument( - '-q', - '--quiet', - action = 'store_true', - help = 'output nothing and set return value' ) - - lines_recursive_group = parser.add_mutually_exclusive_group() - lines_recursive_group.add_argument( - '-r', - '--recursive', - action = 'store_true', - help = 'run recursively over directories' ) - lines_recursive_group.add_argument( - '-l', - '--lines', - metavar = 'START-END', - action = 'append', - default = None, - help = 'range of lines to reformat, one-based' ) - - parser.add_argument( - '-e', - '--exclude', - metavar = 'PATTERN', - action = 'append', - default = None, - help = 'patterns for files to exclude from formatting' ) - parser.add_argument( - '--style', - action = 'store', - help = ( - 'specify formatting style: either a style name (for example "pep8" ' - 'or "google"), or the name of a file with style settings. The ' - 'default is pep8 unless a %s or %s or %s file located in the same ' - 'directory as the source or one of its parent directories ' - '(for stdin, the current directory is used).' % - ( style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML ) ) ) - parser.add_argument( - '--style-help', - action = 'store_true', - help = ( - 'show style settings and exit; this output can be ' - 'saved to .style.yapf to make your settings ' - 'permanent' ) ) - parser.add_argument( - '--no-local-style', - action = 'store_true', - help = "don't search for local style definition" ) - parser.add_argument( '--verify', action = 'store_true', help = argparse.SUPPRESS ) - parser.add_argument( - '-p', - '--parallel', - action = 'store_true', - help = ( - 'run YAPF in parallel when formatting multiple files. Requires ' - 'concurrent.futures in Python 2.X' ) ) - parser.add_argument( - '-vv', - '--verbose', - action = 'store_true', - help = 'print out file names while processing' ) - - parser.add_argument( - 'files', nargs = '*', help = 'reads from stdin when no files are specified.' ) - return parser - - -def run_main(): # pylint: disable=invalid-name - try: - sys.exit( main( sys.argv ) ) - except errors.YapfError as e: - sys.stderr.write( 'yapf: ' + str( e ) + '\n' ) - sys.exit( 1 ) + parser = argparse.ArgumentParser( + prog='yapf', description='Formatter for Python code.') + parser.add_argument( + '-v', + '--version', + action='version', + version='%(prog)s {}'.format(__version__)) + + diff_inplace_quiet_group = parser.add_mutually_exclusive_group() + diff_inplace_quiet_group.add_argument( + '-d', + '--diff', + action='store_true', + help='print the diff for the fixed source') + diff_inplace_quiet_group.add_argument( + '-i', + '--in-place', + action='store_true', + help='make changes to files in place') + diff_inplace_quiet_group.add_argument( + '-q', + '--quiet', + action='store_true', + help='output nothing and set return value') + + lines_recursive_group = parser.add_mutually_exclusive_group() + lines_recursive_group.add_argument( + '-r', + '--recursive', + action='store_true', + help='run recursively over directories') + lines_recursive_group.add_argument( + '-l', + '--lines', + metavar='START-END', + action='append', + default=None, + help='range of lines to reformat, one-based') + + parser.add_argument( + '-e', + '--exclude', + metavar='PATTERN', + action='append', + default=None, + help='patterns for files to exclude from formatting') + parser.add_argument( + '--style', + action='store', + help=( + 'specify formatting style: either a style name (for example "pep8" ' + 'or "google"), or the name of a file with style settings. The ' + 'default is pep8 unless a %s or %s or %s file located in the same ' + 'directory as the source or one of its parent directories ' + '(for stdin, the current directory is used).' % + (style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML))) + parser.add_argument( + '--style-help', + action='store_true', + help=( + 'show style settings and exit; this output can be ' + 'saved to .style.yapf to make your settings ' + 'permanent')) + parser.add_argument( + '--no-local-style', + action='store_true', + help="don't search for local style definition") + parser.add_argument('--verify', action='store_true', help=argparse.SUPPRESS) + parser.add_argument( + '-p', + '--parallel', + action='store_true', + help=( + 'run YAPF in parallel when formatting multiple files. Requires ' + 'concurrent.futures in Python 2.X')) + parser.add_argument( + '-vv', + '--verbose', + action='store_true', + help='print out file names while processing') + + parser.add_argument( + 'files', nargs='*', help='reads from stdin when no files are specified.') + return parser + + +def run_main(): # pylint: disable=invalid-name + try: + sys.exit(main(sys.argv)) + except errors.YapfError as e: + sys.stderr.write('yapf: ' + str(e) + '\n') + sys.exit(1) if __name__ == '__main__': - run_main() + run_main() diff --git a/yapf/pyparser/pyparser.py b/yapf/pyparser/pyparser.py index b6b7c50d7..b2bffa283 100644 --- a/yapf/pyparser/pyparser.py +++ b/yapf/pyparser/pyparser.py @@ -46,8 +46,8 @@ CONTINUATION = token.N_TOKENS -def ParseCode( unformatted_source, filename = '' ): - """Parse a string of Python code into logical lines. +def ParseCode(unformatted_source, filename=''): + """Parse a string of Python code into logical lines. This provides an alternative entry point to YAPF. @@ -61,27 +61,27 @@ def ParseCode( unformatted_source, filename = '' ): Raises: An exception is raised if there's an error during AST parsing. """ - if not unformatted_source.endswith( os.linesep ): - unformatted_source += os.linesep + if not unformatted_source.endswith(os.linesep): + unformatted_source += os.linesep - try: - ast_tree = ast.parse( unformatted_source, filename ) - ast.fix_missing_locations( ast_tree ) - readline = py3compat.StringIO( unformatted_source ).readline - tokens = tokenize.generate_tokens( readline ) - except Exception: - raise + try: + ast_tree = ast.parse(unformatted_source, filename) + ast.fix_missing_locations(ast_tree) + readline = py3compat.StringIO(unformatted_source).readline + tokens = tokenize.generate_tokens(readline) + except Exception: + raise - logical_lines = _CreateLogicalLines( tokens ) + logical_lines = _CreateLogicalLines(tokens) - # Process the logical lines. - split_penalty_visitor.SplitPenalty( logical_lines ).visit( ast_tree ) + # Process the logical lines. + split_penalty_visitor.SplitPenalty(logical_lines).visit(ast_tree) - return logical_lines + return logical_lines -def _CreateLogicalLines( tokens ): - """Separate tokens into logical lines. +def _CreateLogicalLines(tokens): + """Separate tokens into logical lines. Arguments: tokens: (list of tokenizer.TokenInfo) Tokens generated by tokenizer. @@ -89,58 +89,57 @@ def _CreateLogicalLines( tokens ): Returns: A list of LogicalLines. """ - logical_lines = [] - cur_logical_line = [] - prev_tok = None - depth = 0 - - for tok in tokens: - tok = py3compat.TokenInfo( *tok ) - if tok.type == tokenize.NEWLINE: - # End of a logical line. - logical_lines.append( logical_line.LogicalLine( depth, cur_logical_line ) ) - cur_logical_line = [] - prev_tok = None - elif tok.type == tokenize.INDENT: - depth += 1 - elif tok.type == tokenize.DEDENT: - depth -= 1 - elif tok.type not in { tokenize.NL, tokenize.ENDMARKER }: - if ( prev_tok and prev_tok.line.rstrip().endswith( '\\' ) and - prev_tok.start[ 0 ] < tok.start[ 0 ] ): - # Insert a token for a line continuation. - ctok = py3compat.TokenInfo( - type = CONTINUATION, - string = '\\', - start = ( prev_tok.start[ 0 ], prev_tok.start[ 1 ] + 1 ), - end = ( prev_tok.end[ 0 ], prev_tok.end[ 0 ] + 2 ), - line = prev_tok.line ) - ctok.lineno = ctok.start[ 0 ] - ctok.column = ctok.start[ 1 ] - ctok.value = '\\' - cur_logical_line.append( - format_token.FormatToken( ctok, 'CONTINUATION' ) ) - tok.lineno = tok.start[ 0 ] - tok.column = tok.start[ 1 ] - tok.value = tok.string - cur_logical_line.append( - format_token.FormatToken( tok, token.tok_name[ tok.type ] ) ) - prev_tok = tok - - # Link the FormatTokens in each line together to for a doubly linked list. - for line in logical_lines: - previous = line.first - bracket_stack = [ previous ] if previous.OpensScope() else [] - for tok in line.tokens[ 1 : ]: - tok.previous_token = previous - previous.next_token = tok - previous = tok - - # Set up the "matching_bracket" attribute. - if tok.OpensScope(): - bracket_stack.append( tok ) - elif tok.ClosesScope(): - bracket_stack[ -1 ].matching_bracket = tok - tok.matching_bracket = bracket_stack.pop() - - return logical_lines + logical_lines = [] + cur_logical_line = [] + prev_tok = None + depth = 0 + + for tok in tokens: + tok = py3compat.TokenInfo(*tok) + if tok.type == tokenize.NEWLINE: + # End of a logical line. + logical_lines.append(logical_line.LogicalLine(depth, cur_logical_line)) + cur_logical_line = [] + prev_tok = None + elif tok.type == tokenize.INDENT: + depth += 1 + elif tok.type == tokenize.DEDENT: + depth -= 1 + elif tok.type not in {tokenize.NL, tokenize.ENDMARKER}: + if (prev_tok and prev_tok.line.rstrip().endswith('\\') and + prev_tok.start[0] < tok.start[0]): + # Insert a token for a line continuation. + ctok = py3compat.TokenInfo( + type=CONTINUATION, + string='\\', + start=(prev_tok.start[0], prev_tok.start[1] + 1), + end=(prev_tok.end[0], prev_tok.end[0] + 2), + line=prev_tok.line) + ctok.lineno = ctok.start[0] + ctok.column = ctok.start[1] + ctok.value = '\\' + cur_logical_line.append(format_token.FormatToken(ctok, 'CONTINUATION')) + tok.lineno = tok.start[0] + tok.column = tok.start[1] + tok.value = tok.string + cur_logical_line.append( + format_token.FormatToken(tok, token.tok_name[tok.type])) + prev_tok = tok + + # Link the FormatTokens in each line together to for a doubly linked list. + for line in logical_lines: + previous = line.first + bracket_stack = [previous] if previous.OpensScope() else [] + for tok in line.tokens[1:]: + tok.previous_token = previous + previous.next_token = tok + previous = tok + + # Set up the "matching_bracket" attribute. + if tok.OpensScope(): + bracket_stack.append(tok) + elif tok.ClosesScope(): + bracket_stack[-1].matching_bracket = tok + tok.matching_bracket = bracket_stack.pop() + + return logical_lines diff --git a/yapf/pyparser/pyparser_utils.py b/yapf/pyparser/pyparser_utils.py index 4a37b89a9..149e0a280 100644 --- a/yapf/pyparser/pyparser_utils.py +++ b/yapf/pyparser/pyparser_utils.py @@ -29,68 +29,68 @@ """ -def GetTokens( logical_lines, node ): - """Get a list of tokens within the node's range from the logical lines.""" - start = TokenStart( node ) - end = TokenEnd( node ) - tokens = [] +def GetTokens(logical_lines, node): + """Get a list of tokens within the node's range from the logical lines.""" + start = TokenStart(node) + end = TokenEnd(node) + tokens = [] - for line in logical_lines: - if line.start > end: - break - if line.start <= start or line.end >= end: - tokens.extend( GetTokensInSubRange( line.tokens, node ) ) + for line in logical_lines: + if line.start > end: + break + if line.start <= start or line.end >= end: + tokens.extend(GetTokensInSubRange(line.tokens, node)) - return tokens + return tokens -def GetTokensInSubRange( tokens, node ): - """Get a subset of tokens representing the node.""" - start = TokenStart( node ) - end = TokenEnd( node ) - tokens_in_range = [] +def GetTokensInSubRange(tokens, node): + """Get a subset of tokens representing the node.""" + start = TokenStart(node) + end = TokenEnd(node) + tokens_in_range = [] - for tok in tokens: - tok_range = ( tok.lineno, tok.column ) - if tok_range >= start and tok_range < end: - tokens_in_range.append( tok ) + for tok in tokens: + tok_range = (tok.lineno, tok.column) + if tok_range >= start and tok_range < end: + tokens_in_range.append(tok) - return tokens_in_range + return tokens_in_range -def GetTokenIndex( tokens, pos ): - """Get the index of the token at pos.""" - for index, token in enumerate( tokens ): - if ( token.lineno, token.column ) == pos: - return index +def GetTokenIndex(tokens, pos): + """Get the index of the token at pos.""" + for index, token in enumerate(tokens): + if (token.lineno, token.column) == pos: + return index - return None + return None -def GetNextTokenIndex( tokens, pos ): - """Get the index of the next token after pos.""" - for index, token in enumerate( tokens ): - if ( token.lineno, token.column ) >= pos: - return index +def GetNextTokenIndex(tokens, pos): + """Get the index of the next token after pos.""" + for index, token in enumerate(tokens): + if (token.lineno, token.column) >= pos: + return index - return None + return None -def GetPrevTokenIndex( tokens, pos ): - """Get the index of the previous token before pos.""" - for index, token in enumerate( tokens ): - if index > 0 and ( token.lineno, token.column ) >= pos: - return index - 1 +def GetPrevTokenIndex(tokens, pos): + """Get the index of the previous token before pos.""" + for index, token in enumerate(tokens): + if index > 0 and (token.lineno, token.column) >= pos: + return index - 1 - return None + return None -def TokenStart( node ): - return ( node.lineno, node.col_offset ) +def TokenStart(node): + return (node.lineno, node.col_offset) -def TokenEnd( node ): - return ( node.end_lineno, node.end_col_offset ) +def TokenEnd(node): + return (node.end_lineno, node.end_col_offset) ############################################################################# @@ -98,6 +98,6 @@ def TokenEnd( node ): ############################################################################# -def AstDump( node ): - import ast - print( ast.dump( node, include_attributes = True, indent = 4 ) ) +def AstDump(node): + import ast + print(ast.dump(node, include_attributes=True, indent=4)) diff --git a/yapf/pyparser/split_penalty_visitor.py b/yapf/pyparser/split_penalty_visitor.py index 4d05558ba..946bd949f 100644 --- a/yapf/pyparser/split_penalty_visitor.py +++ b/yapf/pyparser/split_penalty_visitor.py @@ -21,896 +21,893 @@ # This is a skeleton of an AST visitor. -class SplitPenalty( ast.NodeVisitor ): - """Compute split penalties between tokens.""" - - def __init__( self, logical_lines ): - super( SplitPenalty, self ).__init__() - self.logical_lines = logical_lines - - # We never want to split before a colon or comma. - for logical_line in logical_lines: - for token in logical_line.tokens: - if token.value in frozenset( { ',', ':' } ): - token.split_penalty = split_penalty.UNBREAKABLE - - def _GetTokens( self, node ): - return pyutils.GetTokens( self.logical_lines, node ) - - ############################################################################ - # Statements # - ############################################################################ - - def visit_FunctionDef( self, node ): - # FunctionDef(name=Name, - # args=arguments( - # posonlyargs=[], - # args=[], - # vararg=[], - # kwonlyargs=[], - # kw_defaults=[], - # defaults=[]), - # body=[...], - # decorator_list=[Call_1, Call_2, ..., Call_n], - # keywords=[]) - tokens = self._GetTokens( node ) - for decorator in node.decorator_list: - # The decorator token list begins after the '@'. The body of the decorator - # is formatted like a normal "call." - decorator_range = self._GetTokens( decorator ) - # Don't split after the '@'. - decorator_range[ 0 ].split_penalty = split_penalty.UNBREAKABLE - - for token in tokens[ 1 : ]: - if token.value == '(': - break - _SetPenalty( token, split_penalty.UNBREAKABLE ) - - if node.returns: - start_index = pyutils.GetTokenIndex( - tokens, pyutils.TokenStart( node.returns ) ) - _IncreasePenalty( - tokens[ start_index - 1 : start_index + 1 ], - split_penalty.VERY_STRONGLY_CONNECTED ) - end_index = pyutils.GetTokenIndex( - tokens, pyutils.TokenEnd( node.returns ) ) - _IncreasePenalty( - tokens[ start_index + 1 : end_index ], - split_penalty.STRONGLY_CONNECTED ) - - return self.generic_visit( node ) - - def visit_AsyncFunctionDef( self, node ): - # AsyncFunctionDef(name=Name, - # args=arguments( - # posonlyargs=[], - # args=[], - # vararg=[], - # kwonlyargs=[], - # kw_defaults=[], - # defaults=[]), - # body=[...], - # decorator_list=[Expr_1, Expr_2, ..., Expr_n], - # keywords=[]) - return self.visit_FunctionDef( node ) - - def visit_ClassDef( self, node ): - # ClassDef(name=Name, - # bases=[Expr_1, Expr_2, ..., Expr_n], - # keywords=[], - # body=[], - # decorator_list=[Expr_1, Expr_2, ..., Expr_m]) - for base in node.bases: - tokens = self._GetTokens( base ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - for decorator in node.decorator_list: - # Don't split after the '@'. - decorator_range = self._GetTokens( decorator ) - decorator_range[ 0 ].split_penalty = split_penalty.UNBREAKABLE - - return self.generic_visit( node ) - - def visit_Return( self, node ): - # Return(value=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_Delete( self, node ): - # Delete(targets=[Expr_1, Expr_2, ..., Expr_n]) - for target in node.targets: - tokens = self._GetTokens( target ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_Assign( self, node ): - # Assign(targets=[Expr_1, Expr_2, ..., Expr_n], - # value=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_AugAssign( self, node ): - # AugAssign(target=Name, - # op=Add(), - # value=Expr) - return self.generic_visit( node ) - - def visit_AnnAssign( self, node ): - # AnnAssign(target=Expr, - # annotation=TypeName, - # value=Expr, - # simple=number) - return self.generic_visit( node ) - - def visit_For( self, node ): - # For(target=Expr, - # iter=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit( node ) - - def visit_AsyncFor( self, node ): - # AsyncFor(target=Expr, - # iter=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit( node ) - - def visit_While( self, node ): - # While(test=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit( node ) - - def visit_If( self, node ): - # If(test=Expr, - # body=[...], - # orelse=[...]) - return self.generic_visit( node ) - - def visit_With( self, node ): - # With(items=[withitem_1, withitem_2, ..., withitem_n], - # body=[...]) - return self.generic_visit( node ) - - def visit_AsyncWith( self, node ): - # AsyncWith(items=[withitem_1, withitem_2, ..., withitem_n], - # body=[...]) - return self.generic_visit( node ) - - def visit_Match( self, node ): - # Match(subject=Expr, - # cases=[ - # match_case( - # pattern=pattern, - # guard=Expr, - # body=[...]), - # ... - # ]) - return self.generic_visit( node ) - - def visit_Raise( self, node ): - # Raise(exc=Expr) - return self.generic_visit( node ) - - def visit_Try( self, node ): - # Try(body=[...], - # handlers=[ExceptHandler_1, ExceptHandler_2, ..., ExceptHandler_b], - # orelse=[...], - # finalbody=[...]) - return self.generic_visit( node ) - - def visit_Assert( self, node ): - # Assert(test=Expr) - return self.generic_visit( node ) - - def visit_Import( self, node ): - # Import(names=[ - # alias( - # name=Identifier, - # asname=Identifier), - # ... - # ]) - return self.generic_visit( node ) - - def visit_ImportFrom( self, node ): - # ImportFrom(module=Identifier, - # names=[ - # alias( - # name=Identifier, - # asname=Identifier), - # ... - # ], - # level=num - return self.generic_visit( node ) - - def visit_Global( self, node ): - # Global(names=[Identifier_1, Identifier_2, ..., Identifier_n]) - return self.generic_visit( node ) - - def visit_Nonlocal( self, node ): - # Nonlocal(names=[Identifier_1, Identifier_2, ..., Identifier_n]) - return self.generic_visit( node ) - - def visit_Expr( self, node ): - # Expr(value=Expr) - return self.generic_visit( node ) - - def visit_Pass( self, node ): - # Pass() - return self.generic_visit( node ) - - def visit_Break( self, node ): - # Break() - return self.generic_visit( node ) - - def visit_Continue( self, node ): - # Continue() - return self.generic_visit( node ) - - ############################################################################ - # Expressions # - ############################################################################ - - def visit_BoolOp( self, node ): - # BoolOp(op=And | Or, - # values=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - # Lower the split penalty to allow splitting before or after the logical - # operator. - split_before_operator = style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ) - operator_indices = [ - pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( value ) ) - for value in node.values[ :-1 ] - ] - for operator_index in operator_indices: - if not split_before_operator: - operator_index += 1 - _DecreasePenalty( tokens[ operator_index ], split_penalty.EXPR * 2 ) - - return self.generic_visit( node ) - - def visit_NamedExpr( self, node ): - # NamedExpr(target=Name, - # value=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_BinOp( self, node ): - # BinOp(left=LExpr - # op=Add | Sub | Mult | MatMult | Div | Mod | Pow | LShift | - # RShift | BitOr | BitXor | BitAnd | FloorDiv - # right=RExpr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - # Lower the split penalty to allow splitting before or after the arithmetic - # operator. - operator_index = pyutils.GetNextTokenIndex( - tokens, pyutils.TokenEnd( node.left ) ) - if not style.Get( 'SPLIT_BEFORE_ARITHMETIC_OPERATOR' ): - operator_index += 1 - - _DecreasePenalty( tokens[ operator_index ], split_penalty.EXPR * 2 ) - - return self.generic_visit( node ) - - def visit_UnaryOp( self, node ): - # UnaryOp(op=Not | USub | UAdd | Invert, - # operand=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - _IncreasePenalty( - tokens[ 1 ], style.Get( 'SPLIT_PENALTY_AFTER_UNARY_OPERATOR' ) ) - - return self.generic_visit( node ) - - def visit_Lambda( self, node ): - # Lambda(args=arguments( - # posonlyargs=[arg(...), arg(...), ..., arg(...)], - # args=[arg(...), arg(...), ..., arg(...)], - # kwonlyargs=[arg(...), arg(...), ..., arg(...)], - # kw_defaults=[arg(...), arg(...), ..., arg(...)], - # defaults=[arg(...), arg(...), ..., arg(...)]), - # body=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.LAMBDA ) - - if style.Get( 'ALLOW_MULTILINE_LAMBDAS' ): - _SetPenalty( self._GetTokens( node.body ), split_penalty.MULTIPLINE_LAMBDA ) - - return self.generic_visit( node ) - - def visit_IfExp( self, node ): - # IfExp(test=TestExpr, - # body=BodyExpr, - # orelse=OrElseExpr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_Dict( self, node ): - # Dict(keys=[Expr_1, Expr_2, ..., Expr_n], - # values=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens( node ) - - # The keys should be on a single line if at all possible. - for key in node.keys: - subrange = pyutils.GetTokensInSubRange( tokens, key ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.DICT_KEY_EXPR ) - - for value in node.values: - subrange = pyutils.GetTokensInSubRange( tokens, value ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.DICT_VALUE_EXPR ) - - return self.generic_visit( node ) - - def visit_Set( self, node ): - # Set(elts=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens( node ) - for element in node.elts: - subrange = pyutils.GetTokensInSubRange( tokens, element ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_ListComp( self, node ): - # ListComp(elt=Expr, - # generators=[ - # comprehension( - # target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0), - # ... - # ]) - tokens = self._GetTokens( node ) - element = pyutils.GetTokensInSubRange( tokens, node.elt ) - _IncreasePenalty( element[ 1 : ], split_penalty.EXPR ) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_SetComp( self, node ): - # SetComp(elt=Expr, - # generators=[ - # comprehension( - # target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0), - # ... - # ]) - tokens = self._GetTokens( node ) - element = pyutils.GetTokensInSubRange( tokens, node.elt ) - _IncreasePenalty( element[ 1 : ], split_penalty.EXPR ) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_DictComp( self, node ): - # DictComp(key=KeyExpr, - # value=ValExpr, - # generators=[ - # comprehension( - # target=TargetExpr - # iter=IterExpr, - # ifs=[Expr_1, Expr_2, ..., Expr_n]), - # is_async=0)], - # ... - # ]) - tokens = self._GetTokens( node ) - key = pyutils.GetTokensInSubRange( tokens, node.key ) - _IncreasePenalty( key[ 1 : ], split_penalty.EXPR ) - - value = pyutils.GetTokensInSubRange( tokens, node.value ) - _IncreasePenalty( value[ 1 : ], split_penalty.EXPR ) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_GeneratorExp( self, node ): - # GeneratorExp(elt=Expr, - # generators=[ - # comprehension( - # target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0), - # ... - # ]) - tokens = self._GetTokens( node ) - element = pyutils.GetTokensInSubRange( tokens, node.elt ) - _IncreasePenalty( element[ 1 : ], split_penalty.EXPR ) - - for comp in node.generators: - subrange = pyutils.GetTokensInSubRange( tokens, comp.iter ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - for if_expr in comp.ifs: - subrange = pyutils.GetTokensInSubRange( tokens, if_expr ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_Await( self, node ): - # Await(value=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_Yield( self, node ): - # Yield(value=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_YieldFrom( self, node ): - # YieldFrom(value=Expr) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - tokens[ 2 ].split_penalty = split_penalty.UNBREAKABLE - - return self.generic_visit( node ) - - def visit_Compare( self, node ): - # Compare(left=LExpr, - # ops=[Op_1, Op_2, ..., Op_n], - # comparators=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.EXPR ) - - operator_indices = [ - pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( node.left ) ) - ] + [ - pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( comparator ) ) - for comparator in node.comparators[ :-1 ] - ] - split_before = style.Get( 'SPLIT_BEFORE_ARITHMETIC_OPERATOR' ) - - for operator_index in operator_indices: - if not split_before: - operator_index += 1 - _DecreasePenalty( tokens[ operator_index ], split_penalty.EXPR * 2 ) - - return self.generic_visit( node ) - - def visit_Call( self, node ): - # Call(func=Expr, - # args=[Expr_1, Expr_2, ..., Expr_n], - # keywords=[ - # keyword( - # arg='d', - # value=Expr), - # ... - # ]) - tokens = self._GetTokens( node ) - - # Don't never split before the opening parenthesis. - paren_index = pyutils.GetNextTokenIndex( tokens, pyutils.TokenEnd( node.func ) ) - _IncreasePenalty( tokens[ paren_index ], split_penalty.UNBREAKABLE ) - - for arg in node.args: - subrange = pyutils.GetTokensInSubRange( tokens, arg ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - - return self.generic_visit( node ) - - def visit_FormattedValue( self, node ): - # FormattedValue(value=Expr, - # conversion=-1) - return node # Ignore formatted values. - - def visit_JoinedStr( self, node ): - # JoinedStr(values=[Expr_1, Expr_2, ..., Expr_n]) - return self.generic_visit( node ) - - def visit_Constant( self, node ): - # Constant(value=Expr) - return self.generic_visit( node ) - - def visit_Attribute( self, node ): - # Attribute(value=Expr, - # attr=Identifier) - tokens = self._GetTokens( node ) - split_before = style.Get( 'SPLIT_BEFORE_DOT' ) - dot_indices = pyutils.GetNextTokenIndex( - tokens, pyutils.TokenEnd( node.value ) ) - - if not split_before: - dot_indices += 1 - _IncreasePenalty( tokens[ dot_indices ], split_penalty.VERY_STRONGLY_CONNECTED ) - - return self.generic_visit( node ) - - def visit_Subscript( self, node ): - # Subscript(value=ValueExpr, - # slice=SliceExpr) - tokens = self._GetTokens( node ) - - # Don't split before the opening bracket of a subscript. - bracket_index = pyutils.GetNextTokenIndex( - tokens, pyutils.TokenEnd( node.value ) ) - _IncreasePenalty( tokens[ bracket_index ], split_penalty.UNBREAKABLE ) - - return self.generic_visit( node ) - - def visit_Starred( self, node ): - # Starred(value=Expr) - return self.generic_visit( node ) - - def visit_Name( self, node ): - # Name(id=Identifier) - tokens = self._GetTokens( node ) - _IncreasePenalty( tokens[ 1 : ], split_penalty.UNBREAKABLE ) - - return self.generic_visit( node ) - - def visit_List( self, node ): - # List(elts=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens( node ) - - for element in node.elts: - subrange = pyutils.GetTokensInSubRange( tokens, element ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) - - return self.generic_visit( node ) - - def visit_Tuple( self, node ): - # Tuple(elts=[Expr_1, Expr_2, ..., Expr_n]) - tokens = self._GetTokens( node ) - - for element in node.elts: - subrange = pyutils.GetTokensInSubRange( tokens, element ) - _IncreasePenalty( subrange[ 1 : ], split_penalty.EXPR ) - _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) - - return self.generic_visit( node ) - - def visit_Slice( self, node ): - # Slice(lower=Expr, - # upper=Expr, - # step=Expr) - tokens = self._GetTokens( node ) - - if hasattr( node, 'lower' ) and node.lower: - subrange = pyutils.GetTokensInSubRange( tokens, node.lower ) - _IncreasePenalty( subrange, split_penalty.EXPR ) - _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) - - if hasattr( node, 'upper' ) and node.upper: - colon_index = pyutils.GetPrevTokenIndex( - tokens, pyutils.TokenStart( node.upper ) ) - _IncreasePenalty( tokens[ colon_index ], split_penalty.UNBREAKABLE ) - subrange = pyutils.GetTokensInSubRange( tokens, node.upper ) - _IncreasePenalty( subrange, split_penalty.EXPR ) - _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) - - if hasattr( node, 'step' ) and node.step: - colon_index = pyutils.GetPrevTokenIndex( - tokens, pyutils.TokenStart( node.step ) ) - _IncreasePenalty( tokens[ colon_index ], split_penalty.UNBREAKABLE ) - subrange = pyutils.GetTokensInSubRange( tokens, node.step ) - _IncreasePenalty( subrange, split_penalty.EXPR ) - _DecreasePenalty( subrange[ 0 ], split_penalty.EXPR // 2 ) - - return self.generic_visit( node ) - - ############################################################################ - # Expression Context # - ############################################################################ - - def visit_Load( self, node ): - # Load() - return self.generic_visit( node ) - - def visit_Store( self, node ): - # Store() - return self.generic_visit( node ) - - def visit_Del( self, node ): - # Del() - return self.generic_visit( node ) - - ############################################################################ - # Boolean Operators # - ############################################################################ +class SplitPenalty(ast.NodeVisitor): + """Compute split penalties between tokens.""" + + def __init__(self, logical_lines): + super(SplitPenalty, self).__init__() + self.logical_lines = logical_lines + + # We never want to split before a colon or comma. + for logical_line in logical_lines: + for token in logical_line.tokens: + if token.value in frozenset({',', ':'}): + token.split_penalty = split_penalty.UNBREAKABLE + + def _GetTokens(self, node): + return pyutils.GetTokens(self.logical_lines, node) + + ############################################################################ + # Statements # + ############################################################################ + + def visit_FunctionDef(self, node): + # FunctionDef(name=Name, + # args=arguments( + # posonlyargs=[], + # args=[], + # vararg=[], + # kwonlyargs=[], + # kw_defaults=[], + # defaults=[]), + # body=[...], + # decorator_list=[Call_1, Call_2, ..., Call_n], + # keywords=[]) + tokens = self._GetTokens(node) + for decorator in node.decorator_list: + # The decorator token list begins after the '@'. The body of the decorator + # is formatted like a normal "call." + decorator_range = self._GetTokens(decorator) + # Don't split after the '@'. + decorator_range[0].split_penalty = split_penalty.UNBREAKABLE + + for token in tokens[1:]: + if token.value == '(': + break + _SetPenalty(token, split_penalty.UNBREAKABLE) + + if node.returns: + start_index = pyutils.GetTokenIndex( + tokens, pyutils.TokenStart(node.returns)) + _IncreasePenalty( + tokens[start_index - 1:start_index + 1], + split_penalty.VERY_STRONGLY_CONNECTED) + end_index = pyutils.GetTokenIndex(tokens, pyutils.TokenEnd(node.returns)) + _IncreasePenalty( + tokens[start_index + 1:end_index], split_penalty.STRONGLY_CONNECTED) + + return self.generic_visit(node) + + def visit_AsyncFunctionDef(self, node): + # AsyncFunctionDef(name=Name, + # args=arguments( + # posonlyargs=[], + # args=[], + # vararg=[], + # kwonlyargs=[], + # kw_defaults=[], + # defaults=[]), + # body=[...], + # decorator_list=[Expr_1, Expr_2, ..., Expr_n], + # keywords=[]) + return self.visit_FunctionDef(node) + + def visit_ClassDef(self, node): + # ClassDef(name=Name, + # bases=[Expr_1, Expr_2, ..., Expr_n], + # keywords=[], + # body=[], + # decorator_list=[Expr_1, Expr_2, ..., Expr_m]) + for base in node.bases: + tokens = self._GetTokens(base) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + for decorator in node.decorator_list: + # Don't split after the '@'. + decorator_range = self._GetTokens(decorator) + decorator_range[0].split_penalty = split_penalty.UNBREAKABLE + + return self.generic_visit(node) + + def visit_Return(self, node): + # Return(value=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_Delete(self, node): + # Delete(targets=[Expr_1, Expr_2, ..., Expr_n]) + for target in node.targets: + tokens = self._GetTokens(target) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_Assign(self, node): + # Assign(targets=[Expr_1, Expr_2, ..., Expr_n], + # value=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_AugAssign(self, node): + # AugAssign(target=Name, + # op=Add(), + # value=Expr) + return self.generic_visit(node) + + def visit_AnnAssign(self, node): + # AnnAssign(target=Expr, + # annotation=TypeName, + # value=Expr, + # simple=number) + return self.generic_visit(node) + + def visit_For(self, node): + # For(target=Expr, + # iter=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit(node) + + def visit_AsyncFor(self, node): + # AsyncFor(target=Expr, + # iter=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit(node) + + def visit_While(self, node): + # While(test=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit(node) + + def visit_If(self, node): + # If(test=Expr, + # body=[...], + # orelse=[...]) + return self.generic_visit(node) + + def visit_With(self, node): + # With(items=[withitem_1, withitem_2, ..., withitem_n], + # body=[...]) + return self.generic_visit(node) + + def visit_AsyncWith(self, node): + # AsyncWith(items=[withitem_1, withitem_2, ..., withitem_n], + # body=[...]) + return self.generic_visit(node) + + def visit_Match(self, node): + # Match(subject=Expr, + # cases=[ + # match_case( + # pattern=pattern, + # guard=Expr, + # body=[...]), + # ... + # ]) + return self.generic_visit(node) + + def visit_Raise(self, node): + # Raise(exc=Expr) + return self.generic_visit(node) + + def visit_Try(self, node): + # Try(body=[...], + # handlers=[ExceptHandler_1, ExceptHandler_2, ..., ExceptHandler_b], + # orelse=[...], + # finalbody=[...]) + return self.generic_visit(node) + + def visit_Assert(self, node): + # Assert(test=Expr) + return self.generic_visit(node) + + def visit_Import(self, node): + # Import(names=[ + # alias( + # name=Identifier, + # asname=Identifier), + # ... + # ]) + return self.generic_visit(node) + + def visit_ImportFrom(self, node): + # ImportFrom(module=Identifier, + # names=[ + # alias( + # name=Identifier, + # asname=Identifier), + # ... + # ], + # level=num + return self.generic_visit(node) + + def visit_Global(self, node): + # Global(names=[Identifier_1, Identifier_2, ..., Identifier_n]) + return self.generic_visit(node) + + def visit_Nonlocal(self, node): + # Nonlocal(names=[Identifier_1, Identifier_2, ..., Identifier_n]) + return self.generic_visit(node) + + def visit_Expr(self, node): + # Expr(value=Expr) + return self.generic_visit(node) + + def visit_Pass(self, node): + # Pass() + return self.generic_visit(node) + + def visit_Break(self, node): + # Break() + return self.generic_visit(node) + + def visit_Continue(self, node): + # Continue() + return self.generic_visit(node) + + ############################################################################ + # Expressions # + ############################################################################ + + def visit_BoolOp(self, node): + # BoolOp(op=And | Or, + # values=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + # Lower the split penalty to allow splitting before or after the logical + # operator. + split_before_operator = style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR') + operator_indices = [ + pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(value)) + for value in node.values[:-1] + ] + for operator_index in operator_indices: + if not split_before_operator: + operator_index += 1 + _DecreasePenalty(tokens[operator_index], split_penalty.EXPR * 2) + + return self.generic_visit(node) + + def visit_NamedExpr(self, node): + # NamedExpr(target=Name, + # value=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_BinOp(self, node): + # BinOp(left=LExpr + # op=Add | Sub | Mult | MatMult | Div | Mod | Pow | LShift | + # RShift | BitOr | BitXor | BitAnd | FloorDiv + # right=RExpr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + # Lower the split penalty to allow splitting before or after the arithmetic + # operator. + operator_index = pyutils.GetNextTokenIndex( + tokens, pyutils.TokenEnd(node.left)) + if not style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR'): + operator_index += 1 + + _DecreasePenalty(tokens[operator_index], split_penalty.EXPR * 2) + + return self.generic_visit(node) + + def visit_UnaryOp(self, node): + # UnaryOp(op=Not | USub | UAdd | Invert, + # operand=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + _IncreasePenalty(tokens[1], style.Get('SPLIT_PENALTY_AFTER_UNARY_OPERATOR')) + + return self.generic_visit(node) + + def visit_Lambda(self, node): + # Lambda(args=arguments( + # posonlyargs=[arg(...), arg(...), ..., arg(...)], + # args=[arg(...), arg(...), ..., arg(...)], + # kwonlyargs=[arg(...), arg(...), ..., arg(...)], + # kw_defaults=[arg(...), arg(...), ..., arg(...)], + # defaults=[arg(...), arg(...), ..., arg(...)]), + # body=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.LAMBDA) + + if style.Get('ALLOW_MULTILINE_LAMBDAS'): + _SetPenalty(self._GetTokens(node.body), split_penalty.MULTIPLINE_LAMBDA) + + return self.generic_visit(node) + + def visit_IfExp(self, node): + # IfExp(test=TestExpr, + # body=BodyExpr, + # orelse=OrElseExpr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_Dict(self, node): + # Dict(keys=[Expr_1, Expr_2, ..., Expr_n], + # values=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens(node) + + # The keys should be on a single line if at all possible. + for key in node.keys: + subrange = pyutils.GetTokensInSubRange(tokens, key) + _IncreasePenalty(subrange[1:], split_penalty.DICT_KEY_EXPR) + + for value in node.values: + subrange = pyutils.GetTokensInSubRange(tokens, value) + _IncreasePenalty(subrange[1:], split_penalty.DICT_VALUE_EXPR) + + return self.generic_visit(node) + + def visit_Set(self, node): + # Set(elts=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens(node) + for element in node.elts: + subrange = pyutils.GetTokensInSubRange(tokens, element) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_ListComp(self, node): + # ListComp(elt=Expr, + # generators=[ + # comprehension( + # target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0), + # ... + # ]) + tokens = self._GetTokens(node) + element = pyutils.GetTokensInSubRange(tokens, node.elt) + _IncreasePenalty(element[1:], split_penalty.EXPR) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange(tokens, if_expr) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_SetComp(self, node): + # SetComp(elt=Expr, + # generators=[ + # comprehension( + # target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0), + # ... + # ]) + tokens = self._GetTokens(node) + element = pyutils.GetTokensInSubRange(tokens, node.elt) + _IncreasePenalty(element[1:], split_penalty.EXPR) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange(tokens, if_expr) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_DictComp(self, node): + # DictComp(key=KeyExpr, + # value=ValExpr, + # generators=[ + # comprehension( + # target=TargetExpr + # iter=IterExpr, + # ifs=[Expr_1, Expr_2, ..., Expr_n]), + # is_async=0)], + # ... + # ]) + tokens = self._GetTokens(node) + key = pyutils.GetTokensInSubRange(tokens, node.key) + _IncreasePenalty(key[1:], split_penalty.EXPR) + + value = pyutils.GetTokensInSubRange(tokens, node.value) + _IncreasePenalty(value[1:], split_penalty.EXPR) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange(tokens, if_expr) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_GeneratorExp(self, node): + # GeneratorExp(elt=Expr, + # generators=[ + # comprehension( + # target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0), + # ... + # ]) + tokens = self._GetTokens(node) + element = pyutils.GetTokensInSubRange(tokens, node.elt) + _IncreasePenalty(element[1:], split_penalty.EXPR) + + for comp in node.generators: + subrange = pyutils.GetTokensInSubRange(tokens, comp.iter) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + for if_expr in comp.ifs: + subrange = pyutils.GetTokensInSubRange(tokens, if_expr) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_Await(self, node): + # Await(value=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_Yield(self, node): + # Yield(value=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_YieldFrom(self, node): + # YieldFrom(value=Expr) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + tokens[2].split_penalty = split_penalty.UNBREAKABLE + + return self.generic_visit(node) + + def visit_Compare(self, node): + # Compare(left=LExpr, + # ops=[Op_1, Op_2, ..., Op_n], + # comparators=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.EXPR) + + operator_indices = [ + pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(node.left)) + ] + [ + pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(comparator)) + for comparator in node.comparators[:-1] + ] + split_before = style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR') + + for operator_index in operator_indices: + if not split_before: + operator_index += 1 + _DecreasePenalty(tokens[operator_index], split_penalty.EXPR * 2) + + return self.generic_visit(node) + + def visit_Call(self, node): + # Call(func=Expr, + # args=[Expr_1, Expr_2, ..., Expr_n], + # keywords=[ + # keyword( + # arg='d', + # value=Expr), + # ... + # ]) + tokens = self._GetTokens(node) + + # Don't never split before the opening parenthesis. + paren_index = pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(node.func)) + _IncreasePenalty(tokens[paren_index], split_penalty.UNBREAKABLE) + + for arg in node.args: + subrange = pyutils.GetTokensInSubRange(tokens, arg) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + + return self.generic_visit(node) + + def visit_FormattedValue(self, node): + # FormattedValue(value=Expr, + # conversion=-1) + return node # Ignore formatted values. + + def visit_JoinedStr(self, node): + # JoinedStr(values=[Expr_1, Expr_2, ..., Expr_n]) + return self.generic_visit(node) + + def visit_Constant(self, node): + # Constant(value=Expr) + return self.generic_visit(node) + + def visit_Attribute(self, node): + # Attribute(value=Expr, + # attr=Identifier) + tokens = self._GetTokens(node) + split_before = style.Get('SPLIT_BEFORE_DOT') + dot_indices = pyutils.GetNextTokenIndex( + tokens, pyutils.TokenEnd(node.value)) + + if not split_before: + dot_indices += 1 + _IncreasePenalty(tokens[dot_indices], split_penalty.VERY_STRONGLY_CONNECTED) + + return self.generic_visit(node) + + def visit_Subscript(self, node): + # Subscript(value=ValueExpr, + # slice=SliceExpr) + tokens = self._GetTokens(node) + + # Don't split before the opening bracket of a subscript. + bracket_index = pyutils.GetNextTokenIndex( + tokens, pyutils.TokenEnd(node.value)) + _IncreasePenalty(tokens[bracket_index], split_penalty.UNBREAKABLE) + + return self.generic_visit(node) + + def visit_Starred(self, node): + # Starred(value=Expr) + return self.generic_visit(node) + + def visit_Name(self, node): + # Name(id=Identifier) + tokens = self._GetTokens(node) + _IncreasePenalty(tokens[1:], split_penalty.UNBREAKABLE) + + return self.generic_visit(node) + + def visit_List(self, node): + # List(elts=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens(node) + + for element in node.elts: + subrange = pyutils.GetTokensInSubRange(tokens, element) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) + + return self.generic_visit(node) + + def visit_Tuple(self, node): + # Tuple(elts=[Expr_1, Expr_2, ..., Expr_n]) + tokens = self._GetTokens(node) + + for element in node.elts: + subrange = pyutils.GetTokensInSubRange(tokens, element) + _IncreasePenalty(subrange[1:], split_penalty.EXPR) + _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) + + return self.generic_visit(node) + + def visit_Slice(self, node): + # Slice(lower=Expr, + # upper=Expr, + # step=Expr) + tokens = self._GetTokens(node) + + if hasattr(node, 'lower') and node.lower: + subrange = pyutils.GetTokensInSubRange(tokens, node.lower) + _IncreasePenalty(subrange, split_penalty.EXPR) + _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) + + if hasattr(node, 'upper') and node.upper: + colon_index = pyutils.GetPrevTokenIndex( + tokens, pyutils.TokenStart(node.upper)) + _IncreasePenalty(tokens[colon_index], split_penalty.UNBREAKABLE) + subrange = pyutils.GetTokensInSubRange(tokens, node.upper) + _IncreasePenalty(subrange, split_penalty.EXPR) + _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) + + if hasattr(node, 'step') and node.step: + colon_index = pyutils.GetPrevTokenIndex( + tokens, pyutils.TokenStart(node.step)) + _IncreasePenalty(tokens[colon_index], split_penalty.UNBREAKABLE) + subrange = pyutils.GetTokensInSubRange(tokens, node.step) + _IncreasePenalty(subrange, split_penalty.EXPR) + _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) + + return self.generic_visit(node) + + ############################################################################ + # Expression Context # + ############################################################################ + + def visit_Load(self, node): + # Load() + return self.generic_visit(node) + + def visit_Store(self, node): + # Store() + return self.generic_visit(node) + + def visit_Del(self, node): + # Del() + return self.generic_visit(node) + + ############################################################################ + # Boolean Operators # + ############################################################################ - def visit_And( self, node ): - # And() - return self.generic_visit( node ) + def visit_And(self, node): + # And() + return self.generic_visit(node) - def visit_Or( self, node ): - # Or() - return self.generic_visit( node ) - - ############################################################################ - # Binary Operators # - ############################################################################ - - def visit_Add( self, node ): - # Add() - return self.generic_visit( node ) + def visit_Or(self, node): + # Or() + return self.generic_visit(node) + + ############################################################################ + # Binary Operators # + ############################################################################ + + def visit_Add(self, node): + # Add() + return self.generic_visit(node) - def visit_Sub( self, node ): - # Sub() - return self.generic_visit( node ) - - def visit_Mult( self, node ): - # Mult() - return self.generic_visit( node ) - - def visit_MatMult( self, node ): - # MatMult() - return self.generic_visit( node ) - - def visit_Div( self, node ): - # Div() - return self.generic_visit( node ) - - def visit_Mod( self, node ): - # Mod() - return self.generic_visit( node ) - - def visit_Pow( self, node ): - # Pow() - return self.generic_visit( node ) - - def visit_LShift( self, node ): - # LShift() - return self.generic_visit( node ) - - def visit_RShift( self, node ): - # RShift() - return self.generic_visit( node ) - - def visit_BitOr( self, node ): - # BitOr() - return self.generic_visit( node ) - - def visit_BitXor( self, node ): - # BitXor() - return self.generic_visit( node ) - - def visit_BitAnd( self, node ): - # BitAnd() - return self.generic_visit( node ) - - def visit_FloorDiv( self, node ): - # FloorDiv() - return self.generic_visit( node ) - - ############################################################################ - # Unary Operators # - ############################################################################ - - def visit_Invert( self, node ): - # Invert() - return self.generic_visit( node ) - - def visit_Not( self, node ): - # Not() - return self.generic_visit( node ) - - def visit_UAdd( self, node ): - # UAdd() - return self.generic_visit( node ) - - def visit_USub( self, node ): - # USub() - return self.generic_visit( node ) - - ############################################################################ - # Comparison Operators # - ############################################################################ - - def visit_Eq( self, node ): - # Eq() - return self.generic_visit( node ) - - def visit_NotEq( self, node ): - # NotEq() - return self.generic_visit( node ) - - def visit_Lt( self, node ): - # Lt() - return self.generic_visit( node ) - - def visit_LtE( self, node ): - # LtE() - return self.generic_visit( node ) - - def visit_Gt( self, node ): - # Gt() - return self.generic_visit( node ) - - def visit_GtE( self, node ): - # GtE() - return self.generic_visit( node ) - - def visit_Is( self, node ): - # Is() - return self.generic_visit( node ) - - def visit_IsNot( self, node ): - # IsNot() - return self.generic_visit( node ) - - def visit_In( self, node ): - # In() - return self.generic_visit( node ) - - def visit_NotIn( self, node ): - # NotIn() - return self.generic_visit( node ) - - ############################################################################ - # Exception Handler # - ############################################################################ - - def visit_ExceptionHandler( self, node ): - # ExceptHandler(type=Expr, - # name=Identifier, - # body=[...]) - return self.generic_visit( node ) - - ############################################################################ - # Matching Patterns # - ############################################################################ - - def visit_MatchValue( self, node ): - # MatchValue(value=Expr) - return self.generic_visit( node ) - - def visit_MatchSingleton( self, node ): - # MatchSingleton(value=Constant) - return self.generic_visit( node ) - - def visit_MatchSequence( self, node ): - # MatchSequence(patterns=[pattern_1, pattern_2, ..., pattern_n]) - return self.generic_visit( node ) - - def visit_MatchMapping( self, node ): - # MatchMapping(keys=[Expr_1, Expr_2, ..., Expr_n], - # patterns=[pattern_1, pattern_2, ..., pattern_m], - # rest=Identifier) - return self.generic_visit( node ) - - def visit_MatchClass( self, node ): - # MatchClass(cls=Expr, - # patterns=[pattern_1, pattern_2, ...], - # kwd_attrs=[Identifier_1, Identifier_2, ...], - # kwd_patterns=[pattern_1, pattern_2, ...]) - return self.generic_visit( node ) - - def visit_MatchStar( self, node ): - # MatchStar(name=Identifier) - return self.generic_visit( node ) - - def visit_MatchAs( self, node ): - # MatchAs(pattern=pattern, - # name=Identifier) - return self.generic_visit( node ) - - def visit_MatchOr( self, node ): - # MatchOr(patterns=[pattern_1, pattern_2, ...]) - return self.generic_visit( node ) - - ############################################################################ - # Type Ignore # - ############################################################################ - - def visit_TypeIgnore( self, node ): - # TypeIgnore(tag=string) - return self.generic_visit( node ) - - ############################################################################ - # Miscellaneous # - ############################################################################ - - def visit_comprehension( self, node ): - # comprehension(target=Expr, - # iter=Expr, - # ifs=[Expr_1, Expr_2, ..., Expr_n], - # is_async=0) - return self.generic_visit( node ) - - def visit_arguments( self, node ): - # arguments(posonlyargs=[arg_1, arg_2, ..., arg_a], - # args=[arg_1, arg_2, ..., arg_b], - # vararg=arg, - # kwonlyargs=[arg_1, arg_2, ..., arg_c], - # kw_defaults=[arg_1, arg_2, ..., arg_d], - # kwarg=arg, - # defaults=[Expr_1, Expr_2, ..., Expr_n]) - return self.generic_visit( node ) - - def visit_arg( self, node ): - # arg(arg=Identifier, - # annotation=Expr, - # type_comment='') - tokens = self._GetTokens( node ) - - # Process any annotations. - if hasattr( node, 'annotation' ) and node.annotation: - annotation = node.annotation - subrange = pyutils.GetTokensInSubRange( tokens, annotation ) - _IncreasePenalty( subrange, split_penalty.ANNOTATION ) - - return self.generic_visit( node ) - - def visit_keyword( self, node ): - # keyword(arg=Identifier, - # value=Expr) - return self.generic_visit( node ) - - def visit_alias( self, node ): - # alias(name=Identifier, - # asname=Identifier) - return self.generic_visit( node ) - - def visit_withitem( self, node ): - # withitem(context_expr=Expr, - # optional_vars=Expr) - return self.generic_visit( node ) - - def visit_match_case( self, node ): - # match_case(pattern=pattern, - # guard=Expr, - # body=[...]) - return self.generic_visit( node ) - - -def _IncreasePenalty( tokens, amt ): - if not isinstance( tokens, list ): - tokens = [ tokens ] - for token in tokens: - token.split_penalty += amt - - -def _DecreasePenalty( tokens, amt ): - if not isinstance( tokens, list ): - tokens = [ tokens ] - for token in tokens: - token.split_penalty -= amt - - -def _SetPenalty( tokens, amt ): - if not isinstance( tokens, list ): - tokens = [ tokens ] - for token in tokens: - token.split_penalty = amt + def visit_Sub(self, node): + # Sub() + return self.generic_visit(node) + + def visit_Mult(self, node): + # Mult() + return self.generic_visit(node) + + def visit_MatMult(self, node): + # MatMult() + return self.generic_visit(node) + + def visit_Div(self, node): + # Div() + return self.generic_visit(node) + + def visit_Mod(self, node): + # Mod() + return self.generic_visit(node) + + def visit_Pow(self, node): + # Pow() + return self.generic_visit(node) + + def visit_LShift(self, node): + # LShift() + return self.generic_visit(node) + + def visit_RShift(self, node): + # RShift() + return self.generic_visit(node) + + def visit_BitOr(self, node): + # BitOr() + return self.generic_visit(node) + + def visit_BitXor(self, node): + # BitXor() + return self.generic_visit(node) + + def visit_BitAnd(self, node): + # BitAnd() + return self.generic_visit(node) + + def visit_FloorDiv(self, node): + # FloorDiv() + return self.generic_visit(node) + + ############################################################################ + # Unary Operators # + ############################################################################ + + def visit_Invert(self, node): + # Invert() + return self.generic_visit(node) + + def visit_Not(self, node): + # Not() + return self.generic_visit(node) + + def visit_UAdd(self, node): + # UAdd() + return self.generic_visit(node) + + def visit_USub(self, node): + # USub() + return self.generic_visit(node) + + ############################################################################ + # Comparison Operators # + ############################################################################ + + def visit_Eq(self, node): + # Eq() + return self.generic_visit(node) + + def visit_NotEq(self, node): + # NotEq() + return self.generic_visit(node) + + def visit_Lt(self, node): + # Lt() + return self.generic_visit(node) + + def visit_LtE(self, node): + # LtE() + return self.generic_visit(node) + + def visit_Gt(self, node): + # Gt() + return self.generic_visit(node) + + def visit_GtE(self, node): + # GtE() + return self.generic_visit(node) + + def visit_Is(self, node): + # Is() + return self.generic_visit(node) + + def visit_IsNot(self, node): + # IsNot() + return self.generic_visit(node) + + def visit_In(self, node): + # In() + return self.generic_visit(node) + + def visit_NotIn(self, node): + # NotIn() + return self.generic_visit(node) + + ############################################################################ + # Exception Handler # + ############################################################################ + + def visit_ExceptionHandler(self, node): + # ExceptHandler(type=Expr, + # name=Identifier, + # body=[...]) + return self.generic_visit(node) + + ############################################################################ + # Matching Patterns # + ############################################################################ + + def visit_MatchValue(self, node): + # MatchValue(value=Expr) + return self.generic_visit(node) + + def visit_MatchSingleton(self, node): + # MatchSingleton(value=Constant) + return self.generic_visit(node) + + def visit_MatchSequence(self, node): + # MatchSequence(patterns=[pattern_1, pattern_2, ..., pattern_n]) + return self.generic_visit(node) + + def visit_MatchMapping(self, node): + # MatchMapping(keys=[Expr_1, Expr_2, ..., Expr_n], + # patterns=[pattern_1, pattern_2, ..., pattern_m], + # rest=Identifier) + return self.generic_visit(node) + + def visit_MatchClass(self, node): + # MatchClass(cls=Expr, + # patterns=[pattern_1, pattern_2, ...], + # kwd_attrs=[Identifier_1, Identifier_2, ...], + # kwd_patterns=[pattern_1, pattern_2, ...]) + return self.generic_visit(node) + + def visit_MatchStar(self, node): + # MatchStar(name=Identifier) + return self.generic_visit(node) + + def visit_MatchAs(self, node): + # MatchAs(pattern=pattern, + # name=Identifier) + return self.generic_visit(node) + + def visit_MatchOr(self, node): + # MatchOr(patterns=[pattern_1, pattern_2, ...]) + return self.generic_visit(node) + + ############################################################################ + # Type Ignore # + ############################################################################ + + def visit_TypeIgnore(self, node): + # TypeIgnore(tag=string) + return self.generic_visit(node) + + ############################################################################ + # Miscellaneous # + ############################################################################ + + def visit_comprehension(self, node): + # comprehension(target=Expr, + # iter=Expr, + # ifs=[Expr_1, Expr_2, ..., Expr_n], + # is_async=0) + return self.generic_visit(node) + + def visit_arguments(self, node): + # arguments(posonlyargs=[arg_1, arg_2, ..., arg_a], + # args=[arg_1, arg_2, ..., arg_b], + # vararg=arg, + # kwonlyargs=[arg_1, arg_2, ..., arg_c], + # kw_defaults=[arg_1, arg_2, ..., arg_d], + # kwarg=arg, + # defaults=[Expr_1, Expr_2, ..., Expr_n]) + return self.generic_visit(node) + + def visit_arg(self, node): + # arg(arg=Identifier, + # annotation=Expr, + # type_comment='') + tokens = self._GetTokens(node) + + # Process any annotations. + if hasattr(node, 'annotation') and node.annotation: + annotation = node.annotation + subrange = pyutils.GetTokensInSubRange(tokens, annotation) + _IncreasePenalty(subrange, split_penalty.ANNOTATION) + + return self.generic_visit(node) + + def visit_keyword(self, node): + # keyword(arg=Identifier, + # value=Expr) + return self.generic_visit(node) + + def visit_alias(self, node): + # alias(name=Identifier, + # asname=Identifier) + return self.generic_visit(node) + + def visit_withitem(self, node): + # withitem(context_expr=Expr, + # optional_vars=Expr) + return self.generic_visit(node) + + def visit_match_case(self, node): + # match_case(pattern=pattern, + # guard=Expr, + # body=[...]) + return self.generic_visit(node) + + +def _IncreasePenalty(tokens, amt): + if not isinstance(tokens, list): + tokens = [tokens] + for token in tokens: + token.split_penalty += amt + + +def _DecreasePenalty(tokens, amt): + if not isinstance(tokens, list): + tokens = [tokens] + for token in tokens: + token.split_penalty -= amt + + +def _SetPenalty(tokens, amt): + if not isinstance(tokens, list): + tokens = [tokens] + for token in tokens: + token.split_penalty = amt diff --git a/yapf/pytree/blank_line_calculator.py b/yapf/pytree/blank_line_calculator.py index 141306e07..8aa20ec0a 100644 --- a/yapf/pytree/blank_line_calculator.py +++ b/yapf/pytree/blank_line_calculator.py @@ -35,78 +35,79 @@ _PYTHON_STATEMENTS = frozenset( { - 'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt', 'break_stmt', - 'continue_stmt', 'return_stmt', 'raise_stmt', 'yield_stmt', 'import_stmt', - 'global_stmt', 'exec_stmt', 'assert_stmt', 'if_stmt', 'while_stmt', 'for_stmt', - 'try_stmt', 'with_stmt', 'nonlocal_stmt', 'async_stmt', 'simple_stmt' - } ) + 'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt', + 'break_stmt', 'continue_stmt', 'return_stmt', 'raise_stmt', + 'yield_stmt', 'import_stmt', 'global_stmt', 'exec_stmt', 'assert_stmt', + 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', + 'nonlocal_stmt', 'async_stmt', 'simple_stmt' + }) -def CalculateBlankLines( tree ): - """Run the blank line calculator visitor over the tree. +def CalculateBlankLines(tree): + """Run the blank line calculator visitor over the tree. This modifies the tree in place. Arguments: tree: the top-level pytree node to annotate with subtypes. """ - blank_line_calculator = _BlankLineCalculator() - blank_line_calculator.Visit( tree ) - - -class _BlankLineCalculator( pytree_visitor.PyTreeVisitor ): - """_BlankLineCalculator - see file-level docstring for a description.""" - - def __init__( self ): - self.class_level = 0 - self.function_level = 0 - self.last_comment_lineno = 0 - self.last_was_decorator = False - self.last_was_class_or_function = False - - def Visit_simple_stmt( self, node ): # pylint: disable=invalid-name - self.DefaultNodeVisit( node ) - if node.children[ 0 ].type == grammar_token.COMMENT: - self.last_comment_lineno = node.children[ 0 ].lineno - - def Visit_decorator( self, node ): # pylint: disable=invalid-name - if ( self.last_comment_lineno and - self.last_comment_lineno == node.children[ 0 ].lineno - 1 ): - _SetNumNewlines( node.children[ 0 ], _NO_BLANK_LINES ) - else: - _SetNumNewlines( node.children[ 0 ], self._GetNumNewlines( node ) ) - for child in node.children: - self.Visit( child ) - self.last_was_decorator = True - - def Visit_classdef( self, node ): # pylint: disable=invalid-name - self.last_was_class_or_function = False - index = self._SetBlankLinesBetweenCommentAndClassFunc( node ) - self.last_was_decorator = False - self.class_level += 1 - for child in node.children[ index : ]: - self.Visit( child ) - self.class_level -= 1 - self.last_was_class_or_function = True - - def Visit_funcdef( self, node ): # pylint: disable=invalid-name - self.last_was_class_or_function = False - index = self._SetBlankLinesBetweenCommentAndClassFunc( node ) - if _AsyncFunction( node ): - index = self._SetBlankLinesBetweenCommentAndClassFunc( - node.prev_sibling.parent ) - _SetNumNewlines( node.children[ 0 ], None ) - else: - index = self._SetBlankLinesBetweenCommentAndClassFunc( node ) - self.last_was_decorator = False - self.function_level += 1 - for child in node.children[ index : ]: - self.Visit( child ) - self.function_level -= 1 - self.last_was_class_or_function = True - - def DefaultNodeVisit( self, node ): - """Override the default visitor for Node. + blank_line_calculator = _BlankLineCalculator() + blank_line_calculator.Visit(tree) + + +class _BlankLineCalculator(pytree_visitor.PyTreeVisitor): + """_BlankLineCalculator - see file-level docstring for a description.""" + + def __init__(self): + self.class_level = 0 + self.function_level = 0 + self.last_comment_lineno = 0 + self.last_was_decorator = False + self.last_was_class_or_function = False + + def Visit_simple_stmt(self, node): # pylint: disable=invalid-name + self.DefaultNodeVisit(node) + if node.children[0].type == grammar_token.COMMENT: + self.last_comment_lineno = node.children[0].lineno + + def Visit_decorator(self, node): # pylint: disable=invalid-name + if (self.last_comment_lineno and + self.last_comment_lineno == node.children[0].lineno - 1): + _SetNumNewlines(node.children[0], _NO_BLANK_LINES) + else: + _SetNumNewlines(node.children[0], self._GetNumNewlines(node)) + for child in node.children: + self.Visit(child) + self.last_was_decorator = True + + def Visit_classdef(self, node): # pylint: disable=invalid-name + self.last_was_class_or_function = False + index = self._SetBlankLinesBetweenCommentAndClassFunc(node) + self.last_was_decorator = False + self.class_level += 1 + for child in node.children[index:]: + self.Visit(child) + self.class_level -= 1 + self.last_was_class_or_function = True + + def Visit_funcdef(self, node): # pylint: disable=invalid-name + self.last_was_class_or_function = False + index = self._SetBlankLinesBetweenCommentAndClassFunc(node) + if _AsyncFunction(node): + index = self._SetBlankLinesBetweenCommentAndClassFunc( + node.prev_sibling.parent) + _SetNumNewlines(node.children[0], None) + else: + index = self._SetBlankLinesBetweenCommentAndClassFunc(node) + self.last_was_decorator = False + self.function_level += 1 + for child in node.children[index:]: + self.Visit(child) + self.function_level -= 1 + self.last_was_class_or_function = True + + def DefaultNodeVisit(self, node): + """Override the default visitor for Node. This will set the blank lines required if the last entity was a class or function. @@ -114,15 +115,15 @@ def DefaultNodeVisit( self, node ): Arguments: node: (pytree.Node) The node to visit. """ - if self.last_was_class_or_function: - if pytree_utils.NodeName( node ) in _PYTHON_STATEMENTS: - leaf = pytree_utils.FirstLeafNode( node ) - _SetNumNewlines( leaf, self._GetNumNewlines( leaf ) ) - self.last_was_class_or_function = False - super( _BlankLineCalculator, self ).DefaultNodeVisit( node ) + if self.last_was_class_or_function: + if pytree_utils.NodeName(node) in _PYTHON_STATEMENTS: + leaf = pytree_utils.FirstLeafNode(node) + _SetNumNewlines(leaf, self._GetNumNewlines(leaf)) + self.last_was_class_or_function = False + super(_BlankLineCalculator, self).DefaultNodeVisit(node) - def _SetBlankLinesBetweenCommentAndClassFunc( self, node ): - """Set the number of blanks between a comment and class or func definition. + def _SetBlankLinesBetweenCommentAndClassFunc(self, node): + """Set the number of blanks between a comment and class or func definition. Class and function definitions have leading comments as children of the classdef and functdef nodes. @@ -133,50 +134,50 @@ def _SetBlankLinesBetweenCommentAndClassFunc( self, node ): Returns: The index of the first child past the comment nodes. """ - index = 0 - while pytree_utils.IsCommentStatement( node.children[ index ] ): - # Standalone comments are wrapped in a simple_stmt node with the comment - # node as its only child. - self.Visit( node.children[ index ].children[ 0 ] ) - if not self.last_was_decorator: - _SetNumNewlines( node.children[ index ].children[ 0 ], _ONE_BLANK_LINE ) - index += 1 - if ( index and node.children[ index ].lineno - 1 - == node.children[ index - 1 ].children[ 0 ].lineno ): - _SetNumNewlines( node.children[ index ], _NO_BLANK_LINES ) - else: - if self.last_comment_lineno + 1 == node.children[ index ].lineno: - num_newlines = _NO_BLANK_LINES - else: - num_newlines = self._GetNumNewlines( node ) - _SetNumNewlines( node.children[ index ], num_newlines ) - return index - - def _GetNumNewlines( self, node ): - if self.last_was_decorator: - return _NO_BLANK_LINES - elif self._IsTopLevel( node ): - return 1 + style.Get( 'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION' ) - return _ONE_BLANK_LINE - - def _IsTopLevel( self, node ): - return ( - not ( self.class_level or self.function_level ) and - _StartsInZerothColumn( node ) ) - - -def _SetNumNewlines( node, num_newlines ): - pytree_utils.SetNodeAnnotation( - node, pytree_utils.Annotation.NEWLINES, num_newlines ) - - -def _StartsInZerothColumn( node ): + index = 0 + while pytree_utils.IsCommentStatement(node.children[index]): + # Standalone comments are wrapped in a simple_stmt node with the comment + # node as its only child. + self.Visit(node.children[index].children[0]) + if not self.last_was_decorator: + _SetNumNewlines(node.children[index].children[0], _ONE_BLANK_LINE) + index += 1 + if (index and node.children[index].lineno - 1 + == node.children[index - 1].children[0].lineno): + _SetNumNewlines(node.children[index], _NO_BLANK_LINES) + else: + if self.last_comment_lineno + 1 == node.children[index].lineno: + num_newlines = _NO_BLANK_LINES + else: + num_newlines = self._GetNumNewlines(node) + _SetNumNewlines(node.children[index], num_newlines) + return index + + def _GetNumNewlines(self, node): + if self.last_was_decorator: + return _NO_BLANK_LINES + elif self._IsTopLevel(node): + return 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION') + return _ONE_BLANK_LINE + + def _IsTopLevel(self, node): return ( - pytree_utils.FirstLeafNode( node ).column == 0 or - ( _AsyncFunction( node ) and node.prev_sibling.column == 0 ) ) + not (self.class_level or self.function_level) and + _StartsInZerothColumn(node)) -def _AsyncFunction( node ): - return ( - py3compat.PY3 and node.prev_sibling and - node.prev_sibling.type == grammar_token.ASYNC ) +def _SetNumNewlines(node, num_newlines): + pytree_utils.SetNodeAnnotation( + node, pytree_utils.Annotation.NEWLINES, num_newlines) + + +def _StartsInZerothColumn(node): + return ( + pytree_utils.FirstLeafNode(node).column == 0 or + (_AsyncFunction(node) and node.prev_sibling.column == 0)) + + +def _AsyncFunction(node): + return ( + py3compat.PY3 and node.prev_sibling and + node.prev_sibling.type == grammar_token.ASYNC) diff --git a/yapf/pytree/comment_splicer.py b/yapf/pytree/comment_splicer.py index 33706ae47..01911c896 100644 --- a/yapf/pytree/comment_splicer.py +++ b/yapf/pytree/comment_splicer.py @@ -28,8 +28,8 @@ from yapf.pytree import pytree_utils -def SpliceComments( tree ): - """Given a pytree, splice comments into nodes of their own right. +def SpliceComments(tree): + """Given a pytree, splice comments into nodes of their own right. Extract comments from the prefixes where they are housed after parsing. The prefixes that previously housed the comments become empty. @@ -38,189 +38,175 @@ def SpliceComments( tree ): tree: a pytree.Node - the tree to work on. The tree is modified by this function. """ - # The previous leaf node encountered in the traversal. - # This is a list because Python 2.x doesn't have 'nonlocal' :) - prev_leaf = [ None ] - _AnnotateIndents( tree ) - - def _VisitNodeRec( node ): - """Recursively visit each node to splice comments into the AST.""" - # This loop may insert into node.children, so we'll iterate over a copy. - for child in node.children[ : ]: - if isinstance( child, pytree.Node ): - # Nodes don't have prefixes. - _VisitNodeRec( child ) - else: - if child.prefix.lstrip().startswith( '#' ): - # We have a comment prefix in this child, so splicing is needed. - comment_prefix = child.prefix - comment_lineno = child.lineno - comment_prefix.count( '\n' ) - comment_column = child.column - - # Remember the leading indentation of this prefix and clear it. - # Mopping up the prefix is important because we may go over this same - # child in the next iteration... - child_prefix = child.prefix.lstrip( '\n' ) - prefix_indent = child_prefix[ : child_prefix.find( '#' ) ] - if '\n' in prefix_indent: - prefix_indent = prefix_indent[ prefix_indent.rfind( '\n' ) + - 1 : ] - child.prefix = '' - - if child.type == token.NEWLINE: - # If the prefix was on a NEWLINE leaf, it's part of the line so it - # will be inserted after the previously encountered leaf. - # We can't just insert it before the NEWLINE node, because as a - # result of the way pytrees are organized, this node can be under - # an inappropriate parent. - comment_column -= len( comment_prefix.lstrip() ) - pytree_utils.InsertNodesAfter( - _CreateCommentsFromPrefix( - comment_prefix, - comment_lineno, - comment_column, - standalone = False ), prev_leaf[ 0 ] ) - elif child.type == token.DEDENT: - # Comment prefixes on DEDENT nodes also deserve special treatment, - # because their final placement depends on their prefix. - # We'll look for an ancestor of this child with a matching - # indentation, and insert the comment before it if the ancestor is - # on a DEDENT node and after it otherwise. - # - # lib2to3 places comments that should be separated into the same - # DEDENT node. For example, "comment 1" and "comment 2" will be - # combined. - # - # def _(): - # for x in y: - # pass - # # comment 1 - # - # # comment 2 - # pass - # - # In this case, we need to split them up ourselves. - - # Split into groups of comments at decreasing levels of indentation - comment_groups = [] - comment_column = None - for cmt in comment_prefix.split( '\n' ): - col = cmt.find( '#' ) - if col < 0: - if comment_column is None: - # Skip empty lines at the top of the first comment group - comment_lineno += 1 - continue - elif comment_column is None or col < comment_column: - comment_column = col - comment_indent = cmt[ : comment_column ] - comment_groups.append( - ( comment_column, comment_indent, [] ) ) - comment_groups[ -1 ][ -1 ].append( cmt ) - - # Insert a node for each group - for comment_column, comment_indent, comment_group in comment_groups: - ancestor_at_indent = _FindAncestorAtIndent( - child, comment_indent ) - if ancestor_at_indent.type == token.DEDENT: - InsertNodes = pytree_utils.InsertNodesBefore # pylint: disable=invalid-name # noqa - else: - InsertNodes = pytree_utils.InsertNodesAfter # pylint: disable=invalid-name # noqa - InsertNodes( - _CreateCommentsFromPrefix( - '\n'.join( comment_group ) + '\n', - comment_lineno, - comment_column, - standalone = True ), ancestor_at_indent ) - comment_lineno += len( comment_group ) - else: - # Otherwise there are two cases. - # - # 1. The comment is on its own line - # 2. The comment is part of an expression. - # - # Unfortunately, it's fairly difficult to distinguish between the - # two in lib2to3 trees. The algorithm here is to determine whether - # child is the first leaf in the statement it belongs to. If it is, - # then the comment (which is a prefix) belongs on a separate line. - # If it is not, it means the comment is buried deep in the statement - # and is part of some expression. - stmt_parent = _FindStmtParent( child ) - - for leaf_in_parent in stmt_parent.leaves(): - if leaf_in_parent.type == token.NEWLINE: - continue - elif id( leaf_in_parent ) == id( child ): - # This comment stands on its own line, and it has to be inserted - # into the appropriate parent. We'll have to find a suitable - # parent to insert into. See comments above - # _STANDALONE_LINE_NODES for more details. - node_with_line_parent = _FindNodeWithStandaloneLineParent( - child ) - - if pytree_utils.NodeName( - node_with_line_parent.parent ) in { 'funcdef', - 'classdef' - }: - # Keep a comment that's not attached to a function or class - # next to the object it is attached to. - comment_end = ( - comment_lineno + - comment_prefix.rstrip( '\n' ).count( '\n' ) ) - if comment_end < node_with_line_parent.lineno - 1: - node_with_line_parent = node_with_line_parent.parent - - pytree_utils.InsertNodesBefore( - _CreateCommentsFromPrefix( - comment_prefix, - comment_lineno, - 0, - standalone = True ), node_with_line_parent ) - break - else: - if comment_lineno == prev_leaf[ 0 ].lineno: - comment_lines = comment_prefix.splitlines() - value = comment_lines[ 0 ].lstrip() - if value.rstrip( '\n' ): - comment_column = prev_leaf[ 0 ].column - comment_column += len( prev_leaf[ 0 ].value ) - comment_column += ( - len( comment_lines[ 0 ] ) - - len( comment_lines[ 0 ].lstrip() ) ) - comment_leaf = pytree.Leaf( - type = token.COMMENT, - value = value.rstrip( '\n' ), - context = ( - '', ( comment_lineno, - comment_column ) ) ) - pytree_utils.InsertNodesAfter( - [ comment_leaf ], prev_leaf[ 0 ] ) - comment_prefix = '\n'.join( - comment_lines[ 1 : ] ) - comment_lineno += 1 - - rindex = ( - 0 if '\n' not in comment_prefix.rstrip() else - comment_prefix.rstrip().rindex( '\n' ) + 1 ) - comment_column = ( - len( comment_prefix[ rindex : ] ) - - len( comment_prefix[ rindex : ].lstrip() ) ) - comments = _CreateCommentsFromPrefix( - comment_prefix, - comment_lineno, - comment_column, - standalone = False ) - pytree_utils.InsertNodesBefore( comments, child ) - break - - prev_leaf[ 0 ] = child - - _VisitNodeRec( tree ) + # The previous leaf node encountered in the traversal. + # This is a list because Python 2.x doesn't have 'nonlocal' :) + prev_leaf = [None] + _AnnotateIndents(tree) + + def _VisitNodeRec(node): + """Recursively visit each node to splice comments into the AST.""" + # This loop may insert into node.children, so we'll iterate over a copy. + for child in node.children[:]: + if isinstance(child, pytree.Node): + # Nodes don't have prefixes. + _VisitNodeRec(child) + else: + if child.prefix.lstrip().startswith('#'): + # We have a comment prefix in this child, so splicing is needed. + comment_prefix = child.prefix + comment_lineno = child.lineno - comment_prefix.count('\n') + comment_column = child.column + + # Remember the leading indentation of this prefix and clear it. + # Mopping up the prefix is important because we may go over this same + # child in the next iteration... + child_prefix = child.prefix.lstrip('\n') + prefix_indent = child_prefix[:child_prefix.find('#')] + if '\n' in prefix_indent: + prefix_indent = prefix_indent[prefix_indent.rfind('\n') + 1:] + child.prefix = '' + + if child.type == token.NEWLINE: + # If the prefix was on a NEWLINE leaf, it's part of the line so it + # will be inserted after the previously encountered leaf. + # We can't just insert it before the NEWLINE node, because as a + # result of the way pytrees are organized, this node can be under + # an inappropriate parent. + comment_column -= len(comment_prefix.lstrip()) + pytree_utils.InsertNodesAfter( + _CreateCommentsFromPrefix( + comment_prefix, + comment_lineno, + comment_column, + standalone=False), prev_leaf[0]) + elif child.type == token.DEDENT: + # Comment prefixes on DEDENT nodes also deserve special treatment, + # because their final placement depends on their prefix. + # We'll look for an ancestor of this child with a matching + # indentation, and insert the comment before it if the ancestor is + # on a DEDENT node and after it otherwise. + # + # lib2to3 places comments that should be separated into the same + # DEDENT node. For example, "comment 1" and "comment 2" will be + # combined. + # + # def _(): + # for x in y: + # pass + # # comment 1 + # + # # comment 2 + # pass + # + # In this case, we need to split them up ourselves. + + # Split into groups of comments at decreasing levels of indentation + comment_groups = [] + comment_column = None + for cmt in comment_prefix.split('\n'): + col = cmt.find('#') + if col < 0: + if comment_column is None: + # Skip empty lines at the top of the first comment group + comment_lineno += 1 + continue + elif comment_column is None or col < comment_column: + comment_column = col + comment_indent = cmt[:comment_column] + comment_groups.append((comment_column, comment_indent, [])) + comment_groups[-1][-1].append(cmt) + + # Insert a node for each group + for comment_column, comment_indent, comment_group in comment_groups: + ancestor_at_indent = _FindAncestorAtIndent(child, comment_indent) + if ancestor_at_indent.type == token.DEDENT: + InsertNodes = pytree_utils.InsertNodesBefore # pylint: disable=invalid-name # noqa + else: + InsertNodes = pytree_utils.InsertNodesAfter # pylint: disable=invalid-name # noqa + InsertNodes( + _CreateCommentsFromPrefix( + '\n'.join(comment_group) + '\n', + comment_lineno, + comment_column, + standalone=True), ancestor_at_indent) + comment_lineno += len(comment_group) + else: + # Otherwise there are two cases. + # + # 1. The comment is on its own line + # 2. The comment is part of an expression. + # + # Unfortunately, it's fairly difficult to distinguish between the + # two in lib2to3 trees. The algorithm here is to determine whether + # child is the first leaf in the statement it belongs to. If it is, + # then the comment (which is a prefix) belongs on a separate line. + # If it is not, it means the comment is buried deep in the statement + # and is part of some expression. + stmt_parent = _FindStmtParent(child) + + for leaf_in_parent in stmt_parent.leaves(): + if leaf_in_parent.type == token.NEWLINE: + continue + elif id(leaf_in_parent) == id(child): + # This comment stands on its own line, and it has to be inserted + # into the appropriate parent. We'll have to find a suitable + # parent to insert into. See comments above + # _STANDALONE_LINE_NODES for more details. + node_with_line_parent = _FindNodeWithStandaloneLineParent(child) + + if pytree_utils.NodeName( + node_with_line_parent.parent) in {'funcdef', 'classdef'}: + # Keep a comment that's not attached to a function or class + # next to the object it is attached to. + comment_end = ( + comment_lineno + comment_prefix.rstrip('\n').count('\n')) + if comment_end < node_with_line_parent.lineno - 1: + node_with_line_parent = node_with_line_parent.parent + + pytree_utils.InsertNodesBefore( + _CreateCommentsFromPrefix( + comment_prefix, comment_lineno, 0, standalone=True), + node_with_line_parent) + break + else: + if comment_lineno == prev_leaf[0].lineno: + comment_lines = comment_prefix.splitlines() + value = comment_lines[0].lstrip() + if value.rstrip('\n'): + comment_column = prev_leaf[0].column + comment_column += len(prev_leaf[0].value) + comment_column += ( + len(comment_lines[0]) - len(comment_lines[0].lstrip())) + comment_leaf = pytree.Leaf( + type=token.COMMENT, + value=value.rstrip('\n'), + context=('', (comment_lineno, comment_column))) + pytree_utils.InsertNodesAfter([comment_leaf], prev_leaf[0]) + comment_prefix = '\n'.join(comment_lines[1:]) + comment_lineno += 1 + + rindex = ( + 0 if '\n' not in comment_prefix.rstrip() else + comment_prefix.rstrip().rindex('\n') + 1) + comment_column = ( + len(comment_prefix[rindex:]) - + len(comment_prefix[rindex:].lstrip())) + comments = _CreateCommentsFromPrefix( + comment_prefix, + comment_lineno, + comment_column, + standalone=False) + pytree_utils.InsertNodesBefore(comments, child) + break + + prev_leaf[0] = child + + _VisitNodeRec(tree) def _CreateCommentsFromPrefix( - comment_prefix, comment_lineno, comment_column, standalone = False ): - """Create pytree nodes to represent the given comment prefix. + comment_prefix, comment_lineno, comment_column, standalone=False): + """Create pytree nodes to represent the given comment prefix. Args: comment_prefix: (unicode) the text of the comment from the node's prefix. @@ -233,35 +219,35 @@ def _CreateCommentsFromPrefix( new COMMENT leafs. The prefix may consist of multiple comment blocks, separated by blank lines. Each block gets its own leaf. """ - # The comment is stored in the prefix attribute, with no lineno of its - # own. So we only know at which line it ends. To find out at which line it - # starts, look at how many newlines the comment itself contains. - comments = [] - - lines = comment_prefix.split( '\n' ) - index = 0 - while index < len( lines ): - comment_block = [] - while index < len( lines ) and lines[ index ].lstrip().startswith( '#' ): - comment_block.append( lines[ index ].strip() ) - index += 1 - - if comment_block: - new_lineno = comment_lineno + index - 1 - comment_block[ 0 ] = comment_block[ 0 ].strip() - comment_block[ -1 ] = comment_block[ -1 ].strip() - comment_leaf = pytree.Leaf( - type = token.COMMENT, - value = '\n'.join( comment_block ), - context = ( '', ( new_lineno, comment_column ) ) ) - comment_node = comment_leaf if not standalone else pytree.Node( - pygram.python_symbols.simple_stmt, [ comment_leaf ] ) - comments.append( comment_node ) - - while index < len( lines ) and not lines[ index ].lstrip(): - index += 1 - - return comments + # The comment is stored in the prefix attribute, with no lineno of its + # own. So we only know at which line it ends. To find out at which line it + # starts, look at how many newlines the comment itself contains. + comments = [] + + lines = comment_prefix.split('\n') + index = 0 + while index < len(lines): + comment_block = [] + while index < len(lines) and lines[index].lstrip().startswith('#'): + comment_block.append(lines[index].strip()) + index += 1 + + if comment_block: + new_lineno = comment_lineno + index - 1 + comment_block[0] = comment_block[0].strip() + comment_block[-1] = comment_block[-1].strip() + comment_leaf = pytree.Leaf( + type=token.COMMENT, + value='\n'.join(comment_block), + context=('', (new_lineno, comment_column))) + comment_node = comment_leaf if not standalone else pytree.Node( + pygram.python_symbols.simple_stmt, [comment_leaf]) + comments.append(comment_node) + + while index < len(lines) and not lines[index].lstrip(): + index += 1 + + return comments # "Standalone line nodes" are tree nodes that have to start a new line in Python @@ -279,11 +265,11 @@ def _CreateCommentsFromPrefix( [ 'suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', 'funcdef', 'classdef', 'decorated', 'file_input' - ] ) + ]) -def _FindNodeWithStandaloneLineParent( node ): - """Find a node whose parent is a 'standalone line' node. +def _FindNodeWithStandaloneLineParent(node): + """Find a node whose parent is a 'standalone line' node. See the comment above _STANDALONE_LINE_NODES for more details. @@ -293,21 +279,21 @@ def _FindNodeWithStandaloneLineParent( node ): Returns: Suitable node that's either the node itself or one of its ancestors. """ - if pytree_utils.NodeName( node.parent ) in _STANDALONE_LINE_NODES: - return node - else: - # This is guaranteed to terminate because 'file_input' is the root node of - # any pytree. - return _FindNodeWithStandaloneLineParent( node.parent ) + if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES: + return node + else: + # This is guaranteed to terminate because 'file_input' is the root node of + # any pytree. + return _FindNodeWithStandaloneLineParent(node.parent) # "Statement nodes" are standalone statements. The don't have to start a new # line. -_STATEMENT_NODES = frozenset( [ 'simple_stmt' ] ) | _STANDALONE_LINE_NODES +_STATEMENT_NODES = frozenset(['simple_stmt']) | _STANDALONE_LINE_NODES -def _FindStmtParent( node ): - """Find the nearest parent of node that is a statement node. +def _FindStmtParent(node): + """Find the nearest parent of node that is a statement node. Arguments: node: node to start from @@ -315,14 +301,14 @@ def _FindStmtParent( node ): Returns: Nearest parent (or node itself, if suitable). """ - if pytree_utils.NodeName( node ) in _STATEMENT_NODES: - return node - else: - return _FindStmtParent( node.parent ) + if pytree_utils.NodeName(node) in _STATEMENT_NODES: + return node + else: + return _FindStmtParent(node.parent) -def _FindAncestorAtIndent( node, indent ): - """Find an ancestor of node with the given indentation. +def _FindAncestorAtIndent(node, indent): + """Find an ancestor of node with the given indentation. Arguments: node: node to start from. This must not be the tree root. @@ -333,27 +319,27 @@ def _FindAncestorAtIndent( node, indent ): An ancestor node with suitable indentation. If no suitable ancestor is found, the closest ancestor to the tree root is returned. """ - if node.parent.parent is None: - # Our parent is the tree root, so there's nowhere else to go. - return node - - # If the parent has an indent annotation, and it's shorter than node's - # indent, this is a suitable ancestor. - # The reason for "shorter" rather than "equal" is that comments may be - # improperly indented (i.e. by three spaces, where surrounding statements - # have either zero or two or four), and we don't want to propagate them all - # the way to the root. - parent_indent = pytree_utils.GetNodeAnnotation( - node.parent, pytree_utils.Annotation.CHILD_INDENT ) - if parent_indent is not None and indent.startswith( parent_indent ): - return node - else: - # Keep looking up the tree. - return _FindAncestorAtIndent( node.parent, indent ) - - -def _AnnotateIndents( tree ): - """Annotate the tree with child_indent annotations. + if node.parent.parent is None: + # Our parent is the tree root, so there's nowhere else to go. + return node + + # If the parent has an indent annotation, and it's shorter than node's + # indent, this is a suitable ancestor. + # The reason for "shorter" rather than "equal" is that comments may be + # improperly indented (i.e. by three spaces, where surrounding statements + # have either zero or two or four), and we don't want to propagate them all + # the way to the root. + parent_indent = pytree_utils.GetNodeAnnotation( + node.parent, pytree_utils.Annotation.CHILD_INDENT) + if parent_indent is not None and indent.startswith(parent_indent): + return node + else: + # Keep looking up the tree. + return _FindAncestorAtIndent(node.parent, indent) + + +def _AnnotateIndents(tree): + """Annotate the tree with child_indent annotations. A child_indent annotation on a node specifies the indentation (as a string, like " ") of its children. It is inferred from the INDENT child of a node. @@ -364,16 +350,16 @@ def _AnnotateIndents( tree ): Raises: RuntimeError: if the tree is malformed. """ - # Annotate the root of the tree with zero indent. - if tree.parent is None: - pytree_utils.SetNodeAnnotation( tree, pytree_utils.Annotation.CHILD_INDENT, '' ) - for child in tree.children: - if child.type == token.INDENT: - child_indent = pytree_utils.GetNodeAnnotation( - tree, pytree_utils.Annotation.CHILD_INDENT ) - if child_indent is not None and child_indent != child.value: - raise RuntimeError( - 'inconsistent indentation for child', ( tree, child ) ) - pytree_utils.SetNodeAnnotation( - tree, pytree_utils.Annotation.CHILD_INDENT, child.value ) - _AnnotateIndents( child ) + # Annotate the root of the tree with zero indent. + if tree.parent is None: + pytree_utils.SetNodeAnnotation( + tree, pytree_utils.Annotation.CHILD_INDENT, '') + for child in tree.children: + if child.type == token.INDENT: + child_indent = pytree_utils.GetNodeAnnotation( + tree, pytree_utils.Annotation.CHILD_INDENT) + if child_indent is not None and child_indent != child.value: + raise RuntimeError('inconsistent indentation for child', (tree, child)) + pytree_utils.SetNodeAnnotation( + tree, pytree_utils.Annotation.CHILD_INDENT, child.value) + _AnnotateIndents(child) diff --git a/yapf/pytree/continuation_splicer.py b/yapf/pytree/continuation_splicer.py index dea4de29f..b86188cb5 100644 --- a/yapf/pytree/continuation_splicer.py +++ b/yapf/pytree/continuation_splicer.py @@ -24,29 +24,29 @@ from yapf.yapflib import format_token -def SpliceContinuations( tree ): - """Given a pytree, splice the continuation marker into nodes. +def SpliceContinuations(tree): + """Given a pytree, splice the continuation marker into nodes. Arguments: tree: (pytree.Node) The tree to work on. The tree is modified by this function. """ - def RecSplicer( node ): - """Inserts a continuation marker into the node.""" - if isinstance( node, pytree.Leaf ): - if node.prefix.lstrip().startswith( '\\\n' ): - new_lineno = node.lineno - node.prefix.count( '\n' ) - return pytree.Leaf( - type = format_token.CONTINUATION, - value = node.prefix, - context = ( '', ( new_lineno, 0 ) ) ) - return None - num_inserted = 0 - for index, child in enumerate( node.children[ : ] ): - continuation_node = RecSplicer( child ) - if continuation_node: - node.children.insert( index + num_inserted, continuation_node ) - num_inserted += 1 - - RecSplicer( tree ) + def RecSplicer(node): + """Inserts a continuation marker into the node.""" + if isinstance(node, pytree.Leaf): + if node.prefix.lstrip().startswith('\\\n'): + new_lineno = node.lineno - node.prefix.count('\n') + return pytree.Leaf( + type=format_token.CONTINUATION, + value=node.prefix, + context=('', (new_lineno, 0))) + return None + num_inserted = 0 + for index, child in enumerate(node.children[:]): + continuation_node = RecSplicer(child) + if continuation_node: + node.children.insert(index + num_inserted, continuation_node) + num_inserted += 1 + + RecSplicer(tree) diff --git a/yapf/pytree/pytree_unwrapper.py b/yapf/pytree/pytree_unwrapper.py index 89618066b..835ca60a1 100644 --- a/yapf/pytree/pytree_unwrapper.py +++ b/yapf/pytree/pytree_unwrapper.py @@ -40,12 +40,12 @@ from yapf.yapflib import style from yapf.yapflib import subtypes -_OPENING_BRACKETS = frozenset( { '(', '[', '{' } ) -_CLOSING_BRACKETS = frozenset( { ')', ']', '}' } ) +_OPENING_BRACKETS = frozenset({'(', '[', '{'}) +_CLOSING_BRACKETS = frozenset({')', ']', '}'}) -def UnwrapPyTree( tree ): - """Create and return a list of logical lines from the given pytree. +def UnwrapPyTree(tree): + """Create and return a list of logical lines from the given pytree. Arguments: tree: the top-level pytree node to unwrap.. @@ -53,11 +53,11 @@ def UnwrapPyTree( tree ): Returns: A list of LogicalLine objects. """ - unwrapper = PyTreeUnwrapper() - unwrapper.Visit( tree ) - llines = unwrapper.GetLogicalLines() - llines.sort( key = lambda x: x.lineno ) - return llines + unwrapper = PyTreeUnwrapper() + unwrapper.Visit(tree) + llines = unwrapper.GetLogicalLines() + llines.sort(key=lambda x: x.lineno) + return llines # Grammar tokens considered as whitespace for the purpose of unwrapping. @@ -65,11 +65,11 @@ def UnwrapPyTree( tree ): [ grammar_token.NEWLINE, grammar_token.DEDENT, grammar_token.INDENT, grammar_token.ENDMARKER - ] ) + ]) -class PyTreeUnwrapper( pytree_visitor.PyTreeVisitor ): - """PyTreeUnwrapper - see file-level docstring for detailed description. +class PyTreeUnwrapper(pytree_visitor.PyTreeVisitor): + """PyTreeUnwrapper - see file-level docstring for detailed description. Note: since this implements PyTreeVisitor and node names in lib2to3 are underscore_separated, the visiting methods of this class are named as @@ -83,78 +83,78 @@ class PyTreeUnwrapper( pytree_visitor.PyTreeVisitor ): familiarity with the Python grammar is required. """ - def __init__( self ): - # A list of all logical lines finished visiting so far. - self._logical_lines = [] + def __init__(self): + # A list of all logical lines finished visiting so far. + self._logical_lines = [] - # Builds up a "current" logical line while visiting pytree nodes. Some nodes - # will finish a line and start a new one. - self._cur_logical_line = logical_line.LogicalLine( 0 ) + # Builds up a "current" logical line while visiting pytree nodes. Some nodes + # will finish a line and start a new one. + self._cur_logical_line = logical_line.LogicalLine(0) - # Current indentation depth. - self._cur_depth = 0 + # Current indentation depth. + self._cur_depth = 0 - def GetLogicalLines( self ): - """Fetch the result of the tree walk. + def GetLogicalLines(self): + """Fetch the result of the tree walk. Note: only call this after visiting the whole tree. Returns: A list of LogicalLine objects. """ - # Make sure the last line that was being populated is flushed. - self._StartNewLine() - return self._logical_lines + # Make sure the last line that was being populated is flushed. + self._StartNewLine() + return self._logical_lines - def _StartNewLine( self ): - """Finish current line and start a new one. + def _StartNewLine(self): + """Finish current line and start a new one. Place the currently accumulated line into the _logical_lines list and start a new one. """ - if self._cur_logical_line.tokens: - self._logical_lines.append( self._cur_logical_line ) - _MatchBrackets( self._cur_logical_line ) - _IdentifyParameterLists( self._cur_logical_line ) - _AdjustSplitPenalty( self._cur_logical_line ) - self._cur_logical_line = logical_line.LogicalLine( self._cur_depth ) - - _STMT_TYPES = frozenset( - { - 'if_stmt', - 'while_stmt', - 'for_stmt', - 'try_stmt', - 'expect_clause', - 'with_stmt', - 'funcdef', - 'classdef', - } ) - - # pylint: disable=invalid-name,missing-docstring - def Visit_simple_stmt( self, node ): - # A 'simple_stmt' conveniently represents a non-compound Python statement, - # i.e. a statement that does not contain other statements. - - # When compound nodes have a single statement as their suite, the parser - # can leave it in the tree directly without creating a suite. But we have - # to increase depth in these cases as well. However, don't increase the - # depth of we have a simple_stmt that's a comment node. This represents a - # standalone comment and in the case of it coming directly after the - # funcdef, it is a "top" comment for the whole function. - # TODO(eliben): add more relevant compound statements here. - single_stmt_suite = ( - node.parent and pytree_utils.NodeName( node.parent ) in self._STMT_TYPES ) - is_comment_stmt = pytree_utils.IsCommentStatement( node ) - if single_stmt_suite and not is_comment_stmt: - self._cur_depth += 1 - self._StartNewLine() - self.DefaultNodeVisit( node ) - if single_stmt_suite and not is_comment_stmt: - self._cur_depth -= 1 - - def _VisitCompoundStatement( self, node, substatement_names ): - """Helper for visiting compound statements. + if self._cur_logical_line.tokens: + self._logical_lines.append(self._cur_logical_line) + _MatchBrackets(self._cur_logical_line) + _IdentifyParameterLists(self._cur_logical_line) + _AdjustSplitPenalty(self._cur_logical_line) + self._cur_logical_line = logical_line.LogicalLine(self._cur_depth) + + _STMT_TYPES = frozenset( + { + 'if_stmt', + 'while_stmt', + 'for_stmt', + 'try_stmt', + 'expect_clause', + 'with_stmt', + 'funcdef', + 'classdef', + }) + + # pylint: disable=invalid-name,missing-docstring + def Visit_simple_stmt(self, node): + # A 'simple_stmt' conveniently represents a non-compound Python statement, + # i.e. a statement that does not contain other statements. + + # When compound nodes have a single statement as their suite, the parser + # can leave it in the tree directly without creating a suite. But we have + # to increase depth in these cases as well. However, don't increase the + # depth of we have a simple_stmt that's a comment node. This represents a + # standalone comment and in the case of it coming directly after the + # funcdef, it is a "top" comment for the whole function. + # TODO(eliben): add more relevant compound statements here. + single_stmt_suite = ( + node.parent and pytree_utils.NodeName(node.parent) in self._STMT_TYPES) + is_comment_stmt = pytree_utils.IsCommentStatement(node) + if single_stmt_suite and not is_comment_stmt: + self._cur_depth += 1 + self._StartNewLine() + self.DefaultNodeVisit(node) + if single_stmt_suite and not is_comment_stmt: + self._cur_depth -= 1 + + def _VisitCompoundStatement(self, node, substatement_names): + """Helper for visiting compound statements. Python compound statements serve as containers for other statements. Thus, when we encounter a new compound statement, we start a new logical line. @@ -164,150 +164,150 @@ def _VisitCompoundStatement( self, node, substatement_names ): substatement_names: set of node names. A compound statement will be recognized as a NAME node with a name in this set. """ - for child in node.children: - # A pytree is structured in such a way that a single 'if_stmt' node will - # contain all the 'if', 'elif' and 'else' nodes as children (similar - # structure applies to 'while' statements, 'try' blocks, etc). Therefore, - # we visit all children here and create a new line before the requested - # set of nodes. - if ( child.type == grammar_token.NAME and - child.value in substatement_names ): - self._StartNewLine() - self.Visit( child ) + for child in node.children: + # A pytree is structured in such a way that a single 'if_stmt' node will + # contain all the 'if', 'elif' and 'else' nodes as children (similar + # structure applies to 'while' statements, 'try' blocks, etc). Therefore, + # we visit all children here and create a new line before the requested + # set of nodes. + if (child.type == grammar_token.NAME and + child.value in substatement_names): + self._StartNewLine() + self.Visit(child) - _IF_STMT_ELEMS = frozenset( { 'if', 'else', 'elif' } ) + _IF_STMT_ELEMS = frozenset({'if', 'else', 'elif'}) - def Visit_if_stmt( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._IF_STMT_ELEMS ) + def Visit_if_stmt(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._IF_STMT_ELEMS) - _WHILE_STMT_ELEMS = frozenset( { 'while', 'else' } ) + _WHILE_STMT_ELEMS = frozenset({'while', 'else'}) - def Visit_while_stmt( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._WHILE_STMT_ELEMS ) + def Visit_while_stmt(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._WHILE_STMT_ELEMS) - _FOR_STMT_ELEMS = frozenset( { 'for', 'else' } ) + _FOR_STMT_ELEMS = frozenset({'for', 'else'}) - def Visit_for_stmt( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._FOR_STMT_ELEMS ) + def Visit_for_stmt(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._FOR_STMT_ELEMS) - _TRY_STMT_ELEMS = frozenset( { 'try', 'except', 'else', 'finally' } ) + _TRY_STMT_ELEMS = frozenset({'try', 'except', 'else', 'finally'}) - def Visit_try_stmt( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._TRY_STMT_ELEMS ) + def Visit_try_stmt(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._TRY_STMT_ELEMS) - _EXCEPT_STMT_ELEMS = frozenset( { 'except' } ) + _EXCEPT_STMT_ELEMS = frozenset({'except'}) - def Visit_except_clause( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._EXCEPT_STMT_ELEMS ) + def Visit_except_clause(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._EXCEPT_STMT_ELEMS) - _FUNC_DEF_ELEMS = frozenset( { 'def' } ) + _FUNC_DEF_ELEMS = frozenset({'def'}) - def Visit_funcdef( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._FUNC_DEF_ELEMS ) + def Visit_funcdef(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._FUNC_DEF_ELEMS) - def Visit_async_funcdef( self, node ): # pylint: disable=invalid-name - self._StartNewLine() - index = 0 - for child in node.children: - index += 1 - self.Visit( child ) - if child.type == grammar_token.ASYNC: - break - for child in node.children[ index ].children: - self.Visit( child ) + def Visit_async_funcdef(self, node): # pylint: disable=invalid-name + self._StartNewLine() + index = 0 + for child in node.children: + index += 1 + self.Visit(child) + if child.type == grammar_token.ASYNC: + break + for child in node.children[index].children: + self.Visit(child) - _CLASS_DEF_ELEMS = frozenset( { 'class' } ) + _CLASS_DEF_ELEMS = frozenset({'class'}) - def Visit_classdef( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._CLASS_DEF_ELEMS ) + def Visit_classdef(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._CLASS_DEF_ELEMS) - def Visit_async_stmt( self, node ): # pylint: disable=invalid-name + def Visit_async_stmt(self, node): # pylint: disable=invalid-name + self._StartNewLine() + index = 0 + for child in node.children: + index += 1 + self.Visit(child) + if child.type == grammar_token.ASYNC: + break + for child in node.children[index].children: + if child.type == grammar_token.NAME and child.value == 'else': self._StartNewLine() - index = 0 - for child in node.children: - index += 1 - self.Visit( child ) - if child.type == grammar_token.ASYNC: - break - for child in node.children[ index ].children: - if child.type == grammar_token.NAME and child.value == 'else': - self._StartNewLine() - self.Visit( child ) - - def Visit_decorator( self, node ): # pylint: disable=invalid-name - for child in node.children: - self.Visit( child ) - if child.type == grammar_token.COMMENT and child == node.children[ 0 ]: - self._StartNewLine() - - def Visit_decorators( self, node ): # pylint: disable=invalid-name - for child in node.children: - self._StartNewLine() - self.Visit( child ) - - def Visit_decorated( self, node ): # pylint: disable=invalid-name - for child in node.children: - self._StartNewLine() - self.Visit( child ) - - _WITH_STMT_ELEMS = frozenset( { 'with' } ) - - def Visit_with_stmt( self, node ): # pylint: disable=invalid-name - self._VisitCompoundStatement( node, self._WITH_STMT_ELEMS ) - - def Visit_suite( self, node ): # pylint: disable=invalid-name - # A 'suite' starts a new indentation level in Python. - self._cur_depth += 1 + self.Visit(child) + + def Visit_decorator(self, node): # pylint: disable=invalid-name + for child in node.children: + self.Visit(child) + if child.type == grammar_token.COMMENT and child == node.children[0]: self._StartNewLine() - self.DefaultNodeVisit( node ) - self._cur_depth -= 1 - def Visit_listmaker( self, node ): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation( node ) - self.DefaultNodeVisit( node ) + def Visit_decorators(self, node): # pylint: disable=invalid-name + for child in node.children: + self._StartNewLine() + self.Visit(child) + + def Visit_decorated(self, node): # pylint: disable=invalid-name + for child in node.children: + self._StartNewLine() + self.Visit(child) + + _WITH_STMT_ELEMS = frozenset({'with'}) + + def Visit_with_stmt(self, node): # pylint: disable=invalid-name + self._VisitCompoundStatement(node, self._WITH_STMT_ELEMS) + + def Visit_suite(self, node): # pylint: disable=invalid-name + # A 'suite' starts a new indentation level in Python. + self._cur_depth += 1 + self._StartNewLine() + self.DefaultNodeVisit(node) + self._cur_depth -= 1 + + def Visit_listmaker(self, node): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation(node) + self.DefaultNodeVisit(node) - def Visit_dictsetmaker( self, node ): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation( node ) - self.DefaultNodeVisit( node ) + def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation(node) + self.DefaultNodeVisit(node) - def Visit_import_as_names( self, node ): # pylint: disable=invalid-name - if node.prev_sibling.value == '(': - _DetermineMustSplitAnnotation( node ) - self.DefaultNodeVisit( node ) + def Visit_import_as_names(self, node): # pylint: disable=invalid-name + if node.prev_sibling.value == '(': + _DetermineMustSplitAnnotation(node) + self.DefaultNodeVisit(node) - def Visit_testlist_gexp( self, node ): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation( node ) - self.DefaultNodeVisit( node ) + def Visit_testlist_gexp(self, node): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation(node) + self.DefaultNodeVisit(node) - def Visit_arglist( self, node ): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation( node ) - self.DefaultNodeVisit( node ) + def Visit_arglist(self, node): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation(node) + self.DefaultNodeVisit(node) - def Visit_typedargslist( self, node ): # pylint: disable=invalid-name - _DetermineMustSplitAnnotation( node ) - self.DefaultNodeVisit( node ) + def Visit_typedargslist(self, node): # pylint: disable=invalid-name + _DetermineMustSplitAnnotation(node) + self.DefaultNodeVisit(node) - def DefaultLeafVisit( self, leaf ): - """Default visitor for tree leaves. + def DefaultLeafVisit(self, leaf): + """Default visitor for tree leaves. A tree leaf is always just gets appended to the current logical line. Arguments: leaf: the leaf to visit. """ - if leaf.type in _WHITESPACE_TOKENS: - self._StartNewLine() - elif leaf.type != grammar_token.COMMENT or leaf.value.strip(): - # Add non-whitespace tokens and comments that aren't empty. - self._cur_logical_line.AppendToken( - format_token.FormatToken( leaf, pytree_utils.NodeName( leaf ) ) ) + if leaf.type in _WHITESPACE_TOKENS: + self._StartNewLine() + elif leaf.type != grammar_token.COMMENT or leaf.value.strip(): + # Add non-whitespace tokens and comments that aren't empty. + self._cur_logical_line.AppendToken( + format_token.FormatToken(leaf, pytree_utils.NodeName(leaf))) -_BRACKET_MATCH = { ')': '(', '}': '{', ']': '['} +_BRACKET_MATCH = {')': '(', '}': '{', ']': '['} -def _MatchBrackets( line ): - """Visit the node and match the brackets. +def _MatchBrackets(line): + """Visit the node and match the brackets. For every open bracket ('[', '{', or '('), find the associated closing bracket and "match" them up. I.e., save in the token a pointer to its associated open @@ -316,23 +316,23 @@ def _MatchBrackets( line ): Arguments: line: (LogicalLine) A logical line. """ - bracket_stack = [] - for token in line.tokens: - if token.value in _OPENING_BRACKETS: - bracket_stack.append( token ) - elif token.value in _CLOSING_BRACKETS: - bracket_stack[ -1 ].matching_bracket = token - token.matching_bracket = bracket_stack[ -1 ] - bracket_stack.pop() + bracket_stack = [] + for token in line.tokens: + if token.value in _OPENING_BRACKETS: + bracket_stack.append(token) + elif token.value in _CLOSING_BRACKETS: + bracket_stack[-1].matching_bracket = token + token.matching_bracket = bracket_stack[-1] + bracket_stack.pop() - for bracket in bracket_stack: - if id( pytree_utils.GetOpeningBracket( token.node ) ) == id( bracket.node ): - bracket.container_elements.append( token ) - token.container_opening = bracket + for bracket in bracket_stack: + if id(pytree_utils.GetOpeningBracket(token.node)) == id(bracket.node): + bracket.container_elements.append(token) + token.container_opening = bracket -def _IdentifyParameterLists( line ): - """Visit the node to create a state for parameter lists. +def _IdentifyParameterLists(line): + """Visit the node to create a state for parameter lists. For instance, a parameter is considered an "object" with its first and last token uniquely identifying the object. @@ -340,32 +340,32 @@ def _IdentifyParameterLists( line ): Arguments: line: (LogicalLine) A logical line. """ - func_stack = [] - param_stack = [] - for tok in line.tokens: - # Identify parameter list objects. - if subtypes.FUNC_DEF in tok.subtypes: - assert tok.next_token.value == '(' - func_stack.append( tok.next_token ) - continue + func_stack = [] + param_stack = [] + for tok in line.tokens: + # Identify parameter list objects. + if subtypes.FUNC_DEF in tok.subtypes: + assert tok.next_token.value == '(' + func_stack.append(tok.next_token) + continue - if func_stack and tok.value == ')': - if tok == func_stack[ -1 ].matching_bracket: - func_stack.pop() - continue + if func_stack and tok.value == ')': + if tok == func_stack[-1].matching_bracket: + func_stack.pop() + continue - # Identify parameter objects. - if subtypes.PARAMETER_START in tok.subtypes: - param_stack.append( tok ) + # Identify parameter objects. + if subtypes.PARAMETER_START in tok.subtypes: + param_stack.append(tok) - # Not "elif", a parameter could be a single token. - if param_stack and subtypes.PARAMETER_STOP in tok.subtypes: - start = param_stack.pop() - func_stack[ -1 ].parameters.append( object_state.Parameter( start, tok ) ) + # Not "elif", a parameter could be a single token. + if param_stack and subtypes.PARAMETER_STOP in tok.subtypes: + start = param_stack.pop() + func_stack[-1].parameters.append(object_state.Parameter(start, tok)) -def _AdjustSplitPenalty( line ): - """Visit the node and adjust the split penalties if needed. +def _AdjustSplitPenalty(line): + """Visit the node and adjust the split penalties if needed. A token shouldn't be split if it's not within a bracket pair. Mark any token that's not within a bracket pair as "unbreakable". @@ -373,56 +373,57 @@ def _AdjustSplitPenalty( line ): Arguments: line: (LogicalLine) An logical line. """ - bracket_level = 0 - for index, token in enumerate( line.tokens ): - if index and not bracket_level: - pytree_utils.SetNodeAnnotation( - token.node, pytree_utils.Annotation.SPLIT_PENALTY, - split_penalty.UNBREAKABLE ) - if token.value in _OPENING_BRACKETS: - bracket_level += 1 - elif token.value in _CLOSING_BRACKETS: - bracket_level -= 1 - - -def _DetermineMustSplitAnnotation( node ): - """Enforce a split in the list if the list ends with a comma.""" - if style.Get( 'DISABLE_ENDING_COMMA_HEURISTIC' ): + bracket_level = 0 + for index, token in enumerate(line.tokens): + if index and not bracket_level: + pytree_utils.SetNodeAnnotation( + token.node, pytree_utils.Annotation.SPLIT_PENALTY, + split_penalty.UNBREAKABLE) + if token.value in _OPENING_BRACKETS: + bracket_level += 1 + elif token.value in _CLOSING_BRACKETS: + bracket_level -= 1 + + +def _DetermineMustSplitAnnotation(node): + """Enforce a split in the list if the list ends with a comma.""" + if style.Get('DISABLE_ENDING_COMMA_HEURISTIC'): + return + if not _ContainsComments(node): + token = next(node.parent.leaves()) + if token.value == '(': + if sum(1 for ch in node.children if ch.type == grammar_token.COMMA) < 2: return - if not _ContainsComments( node ): - token = next( node.parent.leaves() ) - if token.value == '(': - if sum( 1 for ch in node.children if ch.type == grammar_token.COMMA ) < 2: - return - if ( not isinstance( node.children[ -1 ], pytree.Leaf ) or - node.children[ -1 ].value != ',' ): - return - num_children = len( node.children ) - index = 0 - _SetMustSplitOnFirstLeaf( node.children[ 0 ] ) - while index < num_children - 1: - child = node.children[ index ] - if isinstance( child, pytree.Leaf ) and child.value == ',': - next_child = node.children[ index + 1 ] - if next_child.type == grammar_token.COMMENT: - index += 1 - if index >= num_children - 1: - break - _SetMustSplitOnFirstLeaf( node.children[ index + 1 ] ) + if (not isinstance(node.children[-1], pytree.Leaf) or + node.children[-1].value != ','): + return + num_children = len(node.children) + index = 0 + _SetMustSplitOnFirstLeaf(node.children[0]) + while index < num_children - 1: + child = node.children[index] + if isinstance(child, pytree.Leaf) and child.value == ',': + next_child = node.children[index + 1] + if next_child.type == grammar_token.COMMENT: index += 1 - - -def _ContainsComments( node ): - """Return True if the list has a comment in it.""" - if isinstance( node, pytree.Leaf ): - return node.type == grammar_token.COMMENT - for child in node.children: - if _ContainsComments( child ): - return True - return False - - -def _SetMustSplitOnFirstLeaf( node ): - """Set the "must split" annotation on the first leaf node.""" - pytree_utils.SetNodeAnnotation( - pytree_utils.FirstLeafNode( node ), pytree_utils.Annotation.MUST_SPLIT, True ) + if index >= num_children - 1: + break + _SetMustSplitOnFirstLeaf(node.children[index + 1]) + index += 1 + + +def _ContainsComments(node): + """Return True if the list has a comment in it.""" + if isinstance(node, pytree.Leaf): + return node.type == grammar_token.COMMENT + for child in node.children: + if _ContainsComments(child): + return True + return False + + +def _SetMustSplitOnFirstLeaf(node): + """Set the "must split" annotation on the first leaf node.""" + pytree_utils.SetNodeAnnotation( + pytree_utils.FirstLeafNode(node), pytree_utils.Annotation.MUST_SPLIT, + True) diff --git a/yapf/pytree/pytree_utils.py b/yapf/pytree/pytree_utils.py index 710e0082d..415011806 100644 --- a/yapf/pytree/pytree_utils.py +++ b/yapf/pytree/pytree_utils.py @@ -37,20 +37,20 @@ # have a better understanding of what information we need from the tree. Then, # these tokens may be filtered out from the tree before the tree gets to the # unwrapper. -NONSEMANTIC_TOKENS = frozenset( [ 'DEDENT', 'INDENT', 'NEWLINE', 'ENDMARKER' ] ) +NONSEMANTIC_TOKENS = frozenset(['DEDENT', 'INDENT', 'NEWLINE', 'ENDMARKER']) -class Annotation( object ): - """Annotation names associated with pytrees.""" - CHILD_INDENT = 'child_indent' - NEWLINES = 'newlines' - MUST_SPLIT = 'must_split' - SPLIT_PENALTY = 'split_penalty' - SUBTYPE = 'subtype' +class Annotation(object): + """Annotation names associated with pytrees.""" + CHILD_INDENT = 'child_indent' + NEWLINES = 'newlines' + MUST_SPLIT = 'must_split' + SPLIT_PENALTY = 'split_penalty' + SUBTYPE = 'subtype' -def NodeName( node ): - """Produce a string name for a given node. +def NodeName(node): + """Produce a string name for a given node. For a Leaf this is the token name, and for a Node this is the type. @@ -60,23 +60,23 @@ def NodeName( node ): Returns: Name as a string. """ - # Nodes with values < 256 are tokens. Values >= 256 are grammar symbols. - if node.type < 256: - return token.tok_name[ node.type ] - else: - return pygram.python_grammar.number2symbol[ node.type ] + # Nodes with values < 256 are tokens. Values >= 256 are grammar symbols. + if node.type < 256: + return token.tok_name[node.type] + else: + return pygram.python_grammar.number2symbol[node.type] -def FirstLeafNode( node ): - if isinstance( node, pytree.Leaf ): - return node - return FirstLeafNode( node.children[ 0 ] ) +def FirstLeafNode(node): + if isinstance(node, pytree.Leaf): + return node + return FirstLeafNode(node.children[0]) -def LastLeafNode( node ): - if isinstance( node, pytree.Leaf ): - return node - return LastLeafNode( node.children[ -1 ] ) +def LastLeafNode(node): + if isinstance(node, pytree.Leaf): + return node + return LastLeafNode(node.children[-1]) # lib2to3 thoughtfully provides pygram.python_grammar_no_print_statement for @@ -85,14 +85,14 @@ def LastLeafNode( node ): # It forgets to do the same for 'exec' though. Luckily, Python is amenable to # monkey-patching. _GRAMMAR_FOR_PY3 = pygram.python_grammar_no_print_statement.copy() -del _GRAMMAR_FOR_PY3.keywords[ 'exec' ] +del _GRAMMAR_FOR_PY3.keywords['exec'] _GRAMMAR_FOR_PY2 = pygram.python_grammar.copy() -del _GRAMMAR_FOR_PY2.keywords[ 'nonlocal' ] +del _GRAMMAR_FOR_PY2.keywords['nonlocal'] -def ParseCodeToTree( code ): - """Parse the given code to a lib2to3 pytree. +def ParseCodeToTree(code): + """Parse the given code to a lib2to3 pytree. Arguments: code: a string with the code to parse. @@ -104,35 +104,35 @@ def ParseCodeToTree( code ): Returns: The root node of the parsed tree. """ - # This function is tiny, but the incantation for invoking the parser correctly - # is sufficiently magical to be worth abstracting away. - if not code.endswith( os.linesep ): - code += os.linesep - + # This function is tiny, but the incantation for invoking the parser correctly + # is sufficiently magical to be worth abstracting away. + if not code.endswith(os.linesep): + code += os.linesep + + try: + # Try to parse using a Python 3 grammar, which is more permissive (print and + # exec are not keywords). + parser_driver = driver.Driver(_GRAMMAR_FOR_PY3, convert=pytree.convert) + tree = parser_driver.parse_string(code, debug=False) + except parse.ParseError: + # Now try to parse using a Python 2 grammar; If this fails, then + # there's something else wrong with the code. try: - # Try to parse using a Python 3 grammar, which is more permissive (print and - # exec are not keywords). - parser_driver = driver.Driver( _GRAMMAR_FOR_PY3, convert = pytree.convert ) - tree = parser_driver.parse_string( code, debug = False ) + parser_driver = driver.Driver(_GRAMMAR_FOR_PY2, convert=pytree.convert) + tree = parser_driver.parse_string(code, debug=False) except parse.ParseError: - # Now try to parse using a Python 2 grammar; If this fails, then - # there's something else wrong with the code. - try: - parser_driver = driver.Driver( _GRAMMAR_FOR_PY2, convert = pytree.convert ) - tree = parser_driver.parse_string( code, debug = False ) - except parse.ParseError: - # Raise a syntax error if the code is invalid python syntax. - try: - ast.parse( code ) - except SyntaxError as e: - raise e - else: - raise - return _WrapEndMarker( tree ) - - -def _WrapEndMarker( tree ): - """Wrap a single ENDMARKER token in a "file_input" node. + # Raise a syntax error if the code is invalid python syntax. + try: + ast.parse(code) + except SyntaxError as e: + raise e + else: + raise + return _WrapEndMarker(tree) + + +def _WrapEndMarker(tree): + """Wrap a single ENDMARKER token in a "file_input" node. Arguments: tree: (pytree.Node) The root node of the parsed tree. @@ -142,13 +142,13 @@ def _WrapEndMarker( tree ): then that node is wrapped in a "file_input" node. That will ensure we don't skip comments attached to that node. """ - if isinstance( tree, pytree.Leaf ) and tree.type == token.ENDMARKER: - return pytree.Node( pygram.python_symbols.file_input, [ tree ] ) - return tree + if isinstance(tree, pytree.Leaf) and tree.type == token.ENDMARKER: + return pytree.Node(pygram.python_symbols.file_input, [tree]) + return tree -def InsertNodesBefore( new_nodes, target ): - """Insert new_nodes before the given target location in the tree. +def InsertNodesBefore(new_nodes, target): + """Insert new_nodes before the given target location in the tree. Arguments: new_nodes: a sequence of new nodes to insert (the nodes should not be in the @@ -158,12 +158,12 @@ def InsertNodesBefore( new_nodes, target ): Raises: RuntimeError: if the tree is corrupted, or the insertion would corrupt it. """ - for node in new_nodes: - _InsertNodeAt( node, target, after = False ) + for node in new_nodes: + _InsertNodeAt(node, target, after=False) -def InsertNodesAfter( new_nodes, target ): - """Insert new_nodes after the given target location in the tree. +def InsertNodesAfter(new_nodes, target): + """Insert new_nodes after the given target location in the tree. Arguments: new_nodes: a sequence of new nodes to insert (the nodes should not be in the @@ -173,12 +173,12 @@ def InsertNodesAfter( new_nodes, target ): Raises: RuntimeError: if the tree is corrupted, or the insertion would corrupt it. """ - for node in reversed( new_nodes ): - _InsertNodeAt( node, target, after = True ) + for node in reversed(new_nodes): + _InsertNodeAt(node, target, after=True) -def _InsertNodeAt( new_node, target, after = False ): - """Underlying implementation for node insertion. +def _InsertNodeAt(new_node, target, after=False): + """Underlying implementation for node insertion. Arguments: new_node: a new node to insert (this node should not be in the tree). @@ -193,23 +193,25 @@ def _InsertNodeAt( new_node, target, after = False ): RuntimeError: if the tree is corrupted, or the insertion would corrupt it. """ - # Protect against attempts to insert nodes which already belong to some tree. - if new_node.parent is not None: - raise RuntimeError( - 'inserting node which already has a parent', ( new_node, new_node.parent ) ) + # Protect against attempts to insert nodes which already belong to some tree. + if new_node.parent is not None: + raise RuntimeError( + 'inserting node which already has a parent', + (new_node, new_node.parent)) - # The code here is based on pytree.Base.next_sibling - parent_of_target = target.parent - if parent_of_target is None: - raise RuntimeError( 'expected target node to have a parent', ( target,) ) + # The code here is based on pytree.Base.next_sibling + parent_of_target = target.parent + if parent_of_target is None: + raise RuntimeError('expected target node to have a parent', (target,)) - for i, child in enumerate( parent_of_target.children ): - if child is target: - insertion_index = i + 1 if after else i - parent_of_target.insert_child( insertion_index, new_node ) - return + for i, child in enumerate(parent_of_target.children): + if child is target: + insertion_index = i + 1 if after else i + parent_of_target.insert_child(insertion_index, new_node) + return - raise RuntimeError( 'unable to find insertion point for target node', ( target,) ) + raise RuntimeError( + 'unable to find insertion point for target node', (target,)) # The following constant and functions implement a simple custom annotation @@ -219,20 +221,20 @@ def _InsertNodeAt( new_node, target, after = False ): _NODE_ANNOTATION_PREFIX = '_yapf_annotation_' -def CopyYapfAnnotations( src, dst ): - """Copy all YAPF annotations from the source node to the destination node. +def CopyYapfAnnotations(src, dst): + """Copy all YAPF annotations from the source node to the destination node. Arguments: src: the source node. dst: the destination node. """ - for annotation in dir( src ): - if annotation.startswith( _NODE_ANNOTATION_PREFIX ): - setattr( dst, annotation, getattr( src, annotation, None ) ) + for annotation in dir(src): + if annotation.startswith(_NODE_ANNOTATION_PREFIX): + setattr(dst, annotation, getattr(src, annotation, None)) -def GetNodeAnnotation( node, annotation, default = None ): - """Get annotation value from a node. +def GetNodeAnnotation(node, annotation, default=None): + """Get annotation value from a node. Arguments: node: the node. @@ -243,48 +245,48 @@ def GetNodeAnnotation( node, annotation, default = None ): Value of the annotation in the given node. If the node doesn't have this particular annotation name yet, returns default. """ - return getattr( node, _NODE_ANNOTATION_PREFIX + annotation, default ) + return getattr(node, _NODE_ANNOTATION_PREFIX + annotation, default) -def SetNodeAnnotation( node, annotation, value ): - """Set annotation value on a node. +def SetNodeAnnotation(node, annotation, value): + """Set annotation value on a node. Arguments: node: the node. annotation: annotation name - a string. value: annotation value to set. """ - setattr( node, _NODE_ANNOTATION_PREFIX + annotation, value ) + setattr(node, _NODE_ANNOTATION_PREFIX + annotation, value) -def AppendNodeAnnotation( node, annotation, value ): - """Appends an annotation value to a list of annotations on the node. +def AppendNodeAnnotation(node, annotation, value): + """Appends an annotation value to a list of annotations on the node. Arguments: node: the node. annotation: annotation name - a string. value: annotation value to set. """ - attr = GetNodeAnnotation( node, annotation, set() ) - attr.add( value ) - SetNodeAnnotation( node, annotation, attr ) + attr = GetNodeAnnotation(node, annotation, set()) + attr.add(value) + SetNodeAnnotation(node, annotation, attr) -def RemoveSubtypeAnnotation( node, value ): - """Removes an annotation value from the subtype annotations on the node. +def RemoveSubtypeAnnotation(node, value): + """Removes an annotation value from the subtype annotations on the node. Arguments: node: the node. value: annotation value to remove. """ - attr = GetNodeAnnotation( node, Annotation.SUBTYPE ) - if attr and value in attr: - attr.remove( value ) - SetNodeAnnotation( node, Annotation.SUBTYPE, attr ) + attr = GetNodeAnnotation(node, Annotation.SUBTYPE) + if attr and value in attr: + attr.remove(value) + SetNodeAnnotation(node, Annotation.SUBTYPE, attr) -def GetOpeningBracket( node ): - """Get opening bracket value from a node. +def GetOpeningBracket(node): + """Get opening bracket value from a node. Arguments: node: the node. @@ -292,21 +294,21 @@ def GetOpeningBracket( node ): Returns: The opening bracket node or None if it couldn't find one. """ - return getattr( node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None ) + return getattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', None) -def SetOpeningBracket( node, bracket ): - """Set opening bracket value for a node. +def SetOpeningBracket(node, bracket): + """Set opening bracket value for a node. Arguments: node: the node. bracket: opening bracket to set. """ - setattr( node, _NODE_ANNOTATION_PREFIX + 'container_bracket', bracket ) + setattr(node, _NODE_ANNOTATION_PREFIX + 'container_bracket', bracket) -def DumpNodeToString( node ): - """Dump a string representation of the given node. For debugging. +def DumpNodeToString(node): + """Dump a string representation of the given node. For debugging. Arguments: node: the node. @@ -314,35 +316,36 @@ def DumpNodeToString( node ): Returns: The string representation. """ - if isinstance( node, pytree.Leaf ): - fmt = ( - '{name}({value}) [lineno={lineno}, column={column}, ' - 'prefix={prefix}, penalty={penalty}]' ) - return fmt.format( - name = NodeName( node ), - value = _PytreeNodeRepr( node ), - lineno = node.lineno, - column = node.column, - prefix = repr( node.prefix ), - penalty = GetNodeAnnotation( node, Annotation.SPLIT_PENALTY, None ) ) - else: - fmt = '{node} [{len} children] [child_indent="{indent}"]' - return fmt.format( - node = NodeName( node ), - len = len( node.children ), - indent = GetNodeAnnotation( node, Annotation.CHILD_INDENT ) ) - - -def _PytreeNodeRepr( node ): - """Like pytree.Node.__repr__, but names instead of numbers for tokens.""" - if isinstance( node, pytree.Node ): - return '%s(%s, %r)' % ( - node.__class__.__name__, NodeName( node ), - [ _PytreeNodeRepr( c ) for c in node.children ] ) - if isinstance( node, pytree.Leaf ): - return '%s(%s, %r)' % ( node.__class__.__name__, NodeName( node ), node.value ) - - -def IsCommentStatement( node ): - return ( - NodeName( node ) == 'simple_stmt' and node.children[ 0 ].type == token.COMMENT ) + if isinstance(node, pytree.Leaf): + fmt = ( + '{name}({value}) [lineno={lineno}, column={column}, ' + 'prefix={prefix}, penalty={penalty}]') + return fmt.format( + name=NodeName(node), + value=_PytreeNodeRepr(node), + lineno=node.lineno, + column=node.column, + prefix=repr(node.prefix), + penalty=GetNodeAnnotation(node, Annotation.SPLIT_PENALTY, None)) + else: + fmt = '{node} [{len} children] [child_indent="{indent}"]' + return fmt.format( + node=NodeName(node), + len=len(node.children), + indent=GetNodeAnnotation(node, Annotation.CHILD_INDENT)) + + +def _PytreeNodeRepr(node): + """Like pytree.Node.__repr__, but names instead of numbers for tokens.""" + if isinstance(node, pytree.Node): + return '%s(%s, %r)' % ( + node.__class__.__name__, NodeName(node), + [_PytreeNodeRepr(c) for c in node.children]) + if isinstance(node, pytree.Leaf): + return '%s(%s, %r)' % (node.__class__.__name__, NodeName(node), node.value) + + +def IsCommentStatement(node): + return ( + NodeName(node) == 'simple_stmt' and + node.children[0].type == token.COMMENT) diff --git a/yapf/pytree/pytree_visitor.py b/yapf/pytree/pytree_visitor.py index 5b816f3e4..1cc2819f6 100644 --- a/yapf/pytree/pytree_visitor.py +++ b/yapf/pytree/pytree_visitor.py @@ -31,8 +31,8 @@ from yapf.pytree import pytree_utils -class PyTreeVisitor( object ): - """Visitor pattern for pytree trees. +class PyTreeVisitor(object): + """Visitor pattern for pytree trees. Methods named Visit_XXX will be invoked when a node with type XXX is encountered in the tree. The type is either a token type (for Leaf nodes) or @@ -54,42 +54,42 @@ class PyTreeVisitor( object ): that may have children - otherwise the children will not be visited. """ - def Visit( self, node ): - """Visit a node.""" - method = 'Visit_{0}'.format( pytree_utils.NodeName( node ) ) - if hasattr( self, method ): - # Found a specific visitor for this node - getattr( self, method )( node ) - else: - if isinstance( node, pytree.Leaf ): - self.DefaultLeafVisit( node ) - else: - self.DefaultNodeVisit( node ) - - def DefaultNodeVisit( self, node ): - """Default visitor for Node: visits the node's children depth-first. + def Visit(self, node): + """Visit a node.""" + method = 'Visit_{0}'.format(pytree_utils.NodeName(node)) + if hasattr(self, method): + # Found a specific visitor for this node + getattr(self, method)(node) + else: + if isinstance(node, pytree.Leaf): + self.DefaultLeafVisit(node) + else: + self.DefaultNodeVisit(node) + + def DefaultNodeVisit(self, node): + """Default visitor for Node: visits the node's children depth-first. This method is invoked when no specific visitor for the node is defined. Arguments: node: the node to visit """ - for child in node.children: - self.Visit( child ) + for child in node.children: + self.Visit(child) - def DefaultLeafVisit( self, leaf ): - """Default visitor for Leaf: no-op. + def DefaultLeafVisit(self, leaf): + """Default visitor for Leaf: no-op. This method is invoked when no specific visitor for the leaf is defined. Arguments: leaf: the leaf to visit """ - pass + pass -def DumpPyTree( tree, target_stream = sys.stdout ): - """Convenience function for dumping a given pytree. +def DumpPyTree(tree, target_stream=sys.stdout): + """Convenience function for dumping a given pytree. This function presents a very minimal interface. For more configurability (for example, controlling how specific node types are displayed), use PyTreeDumper @@ -100,36 +100,36 @@ def DumpPyTree( tree, target_stream = sys.stdout ): target_stream: the stream to dump the tree to. A file-like object. By default will dump into stdout. """ - dumper = PyTreeDumper( target_stream ) - dumper.Visit( tree ) + dumper = PyTreeDumper(target_stream) + dumper.Visit(tree) -class PyTreeDumper( PyTreeVisitor ): - """Visitor that dumps the tree to a stream. +class PyTreeDumper(PyTreeVisitor): + """Visitor that dumps the tree to a stream. Implements the PyTreeVisitor interface. """ - def __init__( self, target_stream = sys.stdout ): - """Create a tree dumper. + def __init__(self, target_stream=sys.stdout): + """Create a tree dumper. Arguments: target_stream: the stream to dump the tree to. A file-like object. By default will dump into stdout. """ - self._target_stream = target_stream - self._current_indent = 0 - - def _DumpString( self, s ): - self._target_stream.write( '{0}{1}\n'.format( ' ' * self._current_indent, s ) ) - - def DefaultNodeVisit( self, node ): - # Dump information about the current node, and then use the generic - # DefaultNodeVisit visitor to dump each of its children. - self._DumpString( pytree_utils.DumpNodeToString( node ) ) - self._current_indent += 2 - super( PyTreeDumper, self ).DefaultNodeVisit( node ) - self._current_indent -= 2 - - def DefaultLeafVisit( self, leaf ): - self._DumpString( pytree_utils.DumpNodeToString( leaf ) ) + self._target_stream = target_stream + self._current_indent = 0 + + def _DumpString(self, s): + self._target_stream.write('{0}{1}\n'.format(' ' * self._current_indent, s)) + + def DefaultNodeVisit(self, node): + # Dump information about the current node, and then use the generic + # DefaultNodeVisit visitor to dump each of its children. + self._DumpString(pytree_utils.DumpNodeToString(node)) + self._current_indent += 2 + super(PyTreeDumper, self).DefaultNodeVisit(node) + self._current_indent -= 2 + + def DefaultLeafVisit(self, leaf): + self._DumpString(pytree_utils.DumpNodeToString(leaf)) diff --git a/yapf/pytree/split_penalty.py b/yapf/pytree/split_penalty.py index 8b5598390..8dc8056d3 100644 --- a/yapf/pytree/split_penalty.py +++ b/yapf/pytree/split_penalty.py @@ -52,548 +52,540 @@ SUBSCRIPT = 6000 -def ComputeSplitPenalties( tree ): - """Compute split penalties on tokens in the given parse tree. +def ComputeSplitPenalties(tree): + """Compute split penalties on tokens in the given parse tree. Arguments: tree: the top-level pytree node to annotate with penalties. """ - _SplitPenaltyAssigner().Visit( tree ) + _SplitPenaltyAssigner().Visit(tree) -class _SplitPenaltyAssigner( pytree_visitor.PyTreeVisitor ): - """Assigns split penalties to tokens, based on parse tree structure. +class _SplitPenaltyAssigner(pytree_visitor.PyTreeVisitor): + """Assigns split penalties to tokens, based on parse tree structure. Split penalties are attached as annotations to tokens. """ - def Visit( self, node ): - if not hasattr( node, 'is_pseudo' ): # Ignore pseudo tokens. - super( _SplitPenaltyAssigner, self ).Visit( node ) - - def Visit_import_as_names( self, node ): # pyline: disable=invalid-name - # import_as_names ::= import_as_name (',' import_as_name)* [','] - self.DefaultNodeVisit( node ) - prev_child = None - for child in node.children: - if ( prev_child and isinstance( prev_child, pytree.Leaf ) and - prev_child.value == ',' ): - _SetSplitPenalty( child, style.Get( 'SPLIT_PENALTY_IMPORT_NAMES' ) ) - prev_child = child - - def Visit_classdef( self, node ): # pylint: disable=invalid-name - # classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite - # - # NAME - _SetUnbreakable( node.children[ 1 ] ) - if len( node.children ) > 4: - # opening '(' - _SetUnbreakable( node.children[ 2 ] ) - # ':' - _SetUnbreakable( node.children[ -2 ] ) - self.DefaultNodeVisit( node ) - - def Visit_funcdef( self, node ): # pylint: disable=invalid-name - # funcdef ::= 'def' NAME parameters ['->' test] ':' suite - # - # Can't break before the function name and before the colon. The parameters - # are handled by child iteration. - colon_idx = 1 - while pytree_utils.NodeName( node.children[ colon_idx ] ) == 'simple_stmt': - colon_idx += 1 - _SetUnbreakable( node.children[ colon_idx ] ) - arrow_idx = -1 - while colon_idx < len( node.children ): - if isinstance( node.children[ colon_idx ], pytree.Leaf ): - if node.children[ colon_idx ].value == ':': - break - if node.children[ colon_idx ].value == '->': - arrow_idx = colon_idx - colon_idx += 1 - _SetUnbreakable( node.children[ colon_idx ] ) - self.DefaultNodeVisit( node ) - if arrow_idx > 0: - _SetSplitPenalty( - pytree_utils.LastLeafNode( node.children[ arrow_idx - 1 ] ), 0 ) - _SetUnbreakable( node.children[ arrow_idx ] ) - _SetStronglyConnected( node.children[ arrow_idx + 1 ] ) - - def Visit_lambdef( self, node ): # pylint: disable=invalid-name - # lambdef ::= 'lambda' [varargslist] ':' test - # Loop over the lambda up to and including the colon. - allow_multiline_lambdas = style.Get( 'ALLOW_MULTILINE_LAMBDAS' ) - if not allow_multiline_lambdas: - for child in node.children: - if child.type == grammar_token.COMMENT: - if re.search( r'pylint:.*disable=.*\bg-long-lambda', child.value ): - allow_multiline_lambdas = True - break - - if allow_multiline_lambdas: - _SetExpressionPenalty( node, STRONGLY_CONNECTED ) - else: - _SetExpressionPenalty( node, VERY_STRONGLY_CONNECTED ) - - def Visit_parameters( self, node ): # pylint: disable=invalid-name - # parameters ::= '(' [typedargslist] ')' - self.DefaultNodeVisit( node ) - - # Can't break before the opening paren of a parameter list. - _SetUnbreakable( node.children[ 0 ] ) - if not ( style.Get( 'INDENT_CLOSING_BRACKETS' ) or - style.Get( 'DEDENT_CLOSING_BRACKETS' ) ): - _SetStronglyConnected( node.children[ -1 ] ) - - def Visit_arglist( self, node ): # pylint: disable=invalid-name - # arglist ::= argument (',' argument)* [','] - if node.children[ 0 ].type == grammar_token.STAR: - # Python 3 treats a star expression as a specific expression type. - # Process it in that method. - self.Visit_star_expr( node ) - return - - self.DefaultNodeVisit( node ) - - for index in py3compat.range( 1, len( node.children ) ): - child = node.children[ index ] - if isinstance( child, pytree.Leaf ) and child.value == ',': - _SetUnbreakable( child ) - - for child in node.children: - if pytree_utils.NodeName( child ) == 'atom': - _IncreasePenalty( child, CONNECTED ) - - def Visit_argument( self, node ): # pylint: disable=invalid-name - # argument ::= test [comp_for] | test '=' test # Really [keyword '='] test - self.DefaultNodeVisit( node ) - - for index in py3compat.range( 1, len( node.children ) - 1 ): - child = node.children[ index ] - if isinstance( child, pytree.Leaf ) and child.value == '=': - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index ] ), NAMED_ASSIGN ) - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), - NAMED_ASSIGN ) - - def Visit_tname( self, node ): # pylint: disable=invalid-name - # tname ::= NAME [':' test] - self.DefaultNodeVisit( node ) - - for index in py3compat.range( 1, len( node.children ) - 1 ): - child = node.children[ index ] - if isinstance( child, pytree.Leaf ) and child.value == ':': - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index ] ), NAMED_ASSIGN ) - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), - NAMED_ASSIGN ) - - def Visit_dotted_name( self, node ): # pylint: disable=invalid-name - # dotted_name ::= NAME ('.' NAME)* - for child in node.children: - self.Visit( child ) - start = 2 if hasattr( node.children[ 0 ], 'is_pseudo' ) else 1 - for i in py3compat.range( start, len( node.children ) ): - _SetUnbreakable( node.children[ i ] ) - - def Visit_dictsetmaker( self, node ): # pylint: disable=invalid-name - # dictsetmaker ::= ( (test ':' test - # (comp_for | (',' test ':' test)* [','])) | - # (test (comp_for | (',' test)* [','])) ) - for child in node.children: - self.Visit( child ) - if child.type == grammar_token.COLON: - # This is a key to a dictionary. We don't want to split the key if at - # all possible. - _SetStronglyConnected( child ) - - def Visit_trailer( self, node ): # pylint: disable=invalid-name - # trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME - if node.children[ 0 ].value == '.': - before = style.Get( 'SPLIT_BEFORE_DOT' ) - _SetSplitPenalty( - node.children[ 0 ], VERY_STRONGLY_CONNECTED if before else DOTTED_NAME ) - _SetSplitPenalty( - node.children[ 1 ], DOTTED_NAME if before else VERY_STRONGLY_CONNECTED ) - elif len( node.children ) == 2: - # Don't split an empty argument list if at all possible. - _SetSplitPenalty( node.children[ 1 ], VERY_STRONGLY_CONNECTED ) - elif len( node.children ) == 3: - name = pytree_utils.NodeName( node.children[ 1 ] ) - if name in { 'argument', 'comparison' }: - # Don't split an argument list with one element if at all possible. - _SetStronglyConnected( node.children[ 1 ] ) - if ( len( node.children[ 1 ].children ) > 1 and pytree_utils.NodeName( - node.children[ 1 ].children[ 1 ] ) == 'comp_for' ): - # Don't penalize splitting before a comp_for expression. - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ 1 ] ), 0 ) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ 1 ] ), - ONE_ELEMENT_ARGUMENT ) - elif ( node.children[ 0 ].type == grammar_token.LSQB and - len( node.children[ 1 ].children ) > 2 and - ( name.endswith( '_test' ) or name.endswith( '_expr' ) ) ): - _SetStronglyConnected( node.children[ 1 ].children[ 0 ] ) - _SetStronglyConnected( node.children[ 1 ].children[ 2 ] ) - - # Still allow splitting around the operator. - split_before = ( - ( - name.endswith( '_test' ) and - style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ) ) or ( - name.endswith( '_expr' ) and - style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ) ) ) - if split_before: - _SetSplitPenalty( - pytree_utils.LastLeafNode( node.children[ 1 ].children[ 1 ] ), - 0 ) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ 1 ].children[ 2 ] ), - 0 ) - - # Don't split the ending bracket of a subscript list. - _RecAnnotate( - node.children[ -1 ], pytree_utils.Annotation.SPLIT_PENALTY, - VERY_STRONGLY_CONNECTED ) - elif name not in { 'arglist', 'argument', 'term', 'or_test', 'and_test', - 'comparison', 'atom', 'power' }: - # Don't split an argument list with one element if at all possible. - stypes = pytree_utils.GetNodeAnnotation( - pytree_utils.FirstLeafNode( node ), - pytree_utils.Annotation.SUBTYPE ) - if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: - _IncreasePenalty( node, SUBSCRIPT ) - - # Bump up the split penalty for the first part of a subscript. We - # would rather not split there. - _IncreasePenalty( node.children[ 1 ], CONNECTED ) - else: - _SetStronglyConnected( node.children[ 1 ], node.children[ 2 ] ) - - if name == 'arglist': - _SetStronglyConnected( node.children[ -1 ] ) - - self.DefaultNodeVisit( node ) - - def Visit_power( self, node ): # pylint: disable=invalid-name,missing-docstring - # power ::= atom trailer* ['**' factor] - self.DefaultNodeVisit( node ) - - # When atom is followed by a trailer, we can not break between them. - # E.g. arr[idx] - no break allowed between 'arr' and '['. - if ( len( node.children ) > 1 and - pytree_utils.NodeName( node.children[ 1 ] ) == 'trailer' ): - # children[1] itself is a whole trailer: we don't want to - # mark all of it as unbreakable, only its first token: (, [ or . - first = pytree_utils.FirstLeafNode( node.children[ 1 ] ) - if first.value != '.': - _SetUnbreakable( node.children[ 1 ].children[ 0 ] ) - - # A special case when there are more trailers in the sequence. Given: - # atom tr1 tr2 - # The last token of tr1 and the first token of tr2 comprise an unbreakable - # region. For example: foo.bar.baz(1) - # We can't put breaks between either of the '.', '(', or '[' and the names - # *preceding* them. - prev_trailer_idx = 1 - while prev_trailer_idx < len( node.children ) - 1: - cur_trailer_idx = prev_trailer_idx + 1 - cur_trailer = node.children[ cur_trailer_idx ] - if pytree_utils.NodeName( cur_trailer ) != 'trailer': - break - - # Now we know we have two trailers one after the other - prev_trailer = node.children[ prev_trailer_idx ] - if prev_trailer.children[ -1 ].value != ')': - # Set the previous node unbreakable if it's not a function call: - # atom tr1() tr2 - # It may be necessary (though undesirable) to split up a previous - # function call's parentheses to the next line. - _SetStronglyConnected( prev_trailer.children[ -1 ] ) - _SetStronglyConnected( cur_trailer.children[ 0 ] ) - prev_trailer_idx = cur_trailer_idx - - # We don't want to split before the last ')' of a function call. This also - # takes care of the special case of: - # atom tr1 tr2 ... trn - # where the 'tr#' are trailers that may end in a ')'. - for trailer in node.children[ 1 : ]: - if pytree_utils.NodeName( trailer ) != 'trailer': - break - if trailer.children[ 0 ].value in '([': - if len( trailer.children ) > 2: - stypes = pytree_utils.GetNodeAnnotation( - trailer.children[ 0 ], pytree_utils.Annotation.SUBTYPE ) - if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: - _SetStronglyConnected( - pytree_utils.FirstLeafNode( trailer.children[ 1 ] ) ) - - last_child_node = pytree_utils.LastLeafNode( trailer ) - if last_child_node.value.strip().startswith( '#' ): - last_child_node = last_child_node.prev_sibling - if not ( style.Get( 'INDENT_CLOSING_BRACKETS' ) or - style.Get( 'DEDENT_CLOSING_BRACKETS' ) ): - last = pytree_utils.LastLeafNode( last_child_node.prev_sibling ) - if last.value != ',': - if last_child_node.value == ']': - _SetUnbreakable( last_child_node ) - else: - _SetSplitPenalty( - last_child_node, VERY_STRONGLY_CONNECTED ) - else: - # If the trailer's children are '()', then make it a strongly - # connected region. It's sometimes necessary, though undesirable, to - # split the two. - _SetStronglyConnected( trailer.children[ -1 ] ) - - def Visit_subscriptlist( self, node ): # pylint: disable=invalid-name - # subscriptlist ::= subscript (',' subscript)* [','] - self.DefaultNodeVisit( node ) - _SetSplitPenalty( pytree_utils.FirstLeafNode( node ), 0 ) - prev_child = None - for child in node.children: - if prev_child and prev_child.type == grammar_token.COMMA: - _SetSplitPenalty( pytree_utils.FirstLeafNode( child ), 0 ) - prev_child = child - - def Visit_subscript( self, node ): # pylint: disable=invalid-name - # subscript ::= test | [test] ':' [test] [sliceop] - _SetStronglyConnected( *node.children ) - self.DefaultNodeVisit( node ) - - def Visit_comp_for( self, node ): # pylint: disable=invalid-name - # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] - _SetSplitPenalty( pytree_utils.FirstLeafNode( node ), 0 ) - _SetStronglyConnected( *node.children[ 1 : ] ) - self.DefaultNodeVisit( node ) - - def Visit_old_comp_for( self, node ): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_for( node ) - - def Visit_comp_if( self, node ): # pylint: disable=invalid-name - # comp_if ::= 'if' old_test [comp_iter] + def Visit(self, node): + if not hasattr(node, 'is_pseudo'): # Ignore pseudo tokens. + super(_SplitPenaltyAssigner, self).Visit(node) + + def Visit_import_as_names(self, node): # pyline: disable=invalid-name + # import_as_names ::= import_as_name (',' import_as_name)* [','] + self.DefaultNodeVisit(node) + prev_child = None + for child in node.children: + if (prev_child and isinstance(prev_child, pytree.Leaf) and + prev_child.value == ','): + _SetSplitPenalty(child, style.Get('SPLIT_PENALTY_IMPORT_NAMES')) + prev_child = child + + def Visit_classdef(self, node): # pylint: disable=invalid-name + # classdef ::= 'class' NAME ['(' [arglist] ')'] ':' suite + # + # NAME + _SetUnbreakable(node.children[1]) + if len(node.children) > 4: + # opening '(' + _SetUnbreakable(node.children[2]) + # ':' + _SetUnbreakable(node.children[-2]) + self.DefaultNodeVisit(node) + + def Visit_funcdef(self, node): # pylint: disable=invalid-name + # funcdef ::= 'def' NAME parameters ['->' test] ':' suite + # + # Can't break before the function name and before the colon. The parameters + # are handled by child iteration. + colon_idx = 1 + while pytree_utils.NodeName(node.children[colon_idx]) == 'simple_stmt': + colon_idx += 1 + _SetUnbreakable(node.children[colon_idx]) + arrow_idx = -1 + while colon_idx < len(node.children): + if isinstance(node.children[colon_idx], pytree.Leaf): + if node.children[colon_idx].value == ':': + break + if node.children[colon_idx].value == '->': + arrow_idx = colon_idx + colon_idx += 1 + _SetUnbreakable(node.children[colon_idx]) + self.DefaultNodeVisit(node) + if arrow_idx > 0: + _SetSplitPenalty( + pytree_utils.LastLeafNode(node.children[arrow_idx - 1]), 0) + _SetUnbreakable(node.children[arrow_idx]) + _SetStronglyConnected(node.children[arrow_idx + 1]) + + def Visit_lambdef(self, node): # pylint: disable=invalid-name + # lambdef ::= 'lambda' [varargslist] ':' test + # Loop over the lambda up to and including the colon. + allow_multiline_lambdas = style.Get('ALLOW_MULTILINE_LAMBDAS') + if not allow_multiline_lambdas: + for child in node.children: + if child.type == grammar_token.COMMENT: + if re.search(r'pylint:.*disable=.*\bg-long-lambda', child.value): + allow_multiline_lambdas = True + break + + if allow_multiline_lambdas: + _SetExpressionPenalty(node, STRONGLY_CONNECTED) + else: + _SetExpressionPenalty(node, VERY_STRONGLY_CONNECTED) + + def Visit_parameters(self, node): # pylint: disable=invalid-name + # parameters ::= '(' [typedargslist] ')' + self.DefaultNodeVisit(node) + + # Can't break before the opening paren of a parameter list. + _SetUnbreakable(node.children[0]) + if not (style.Get('INDENT_CLOSING_BRACKETS') or + style.Get('DEDENT_CLOSING_BRACKETS')): + _SetStronglyConnected(node.children[-1]) + + def Visit_arglist(self, node): # pylint: disable=invalid-name + # arglist ::= argument (',' argument)* [','] + if node.children[0].type == grammar_token.STAR: + # Python 3 treats a star expression as a specific expression type. + # Process it in that method. + self.Visit_star_expr(node) + return + + self.DefaultNodeVisit(node) + + for index in py3compat.range(1, len(node.children)): + child = node.children[index] + if isinstance(child, pytree.Leaf) and child.value == ',': + _SetUnbreakable(child) + + for child in node.children: + if pytree_utils.NodeName(child) == 'atom': + _IncreasePenalty(child, CONNECTED) + + def Visit_argument(self, node): # pylint: disable=invalid-name + # argument ::= test [comp_for] | test '=' test # Really [keyword '='] test + self.DefaultNodeVisit(node) + + for index in py3compat.range(1, len(node.children) - 1): + child = node.children[index] + if isinstance(child, pytree.Leaf) and child.value == '=': + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index]), NAMED_ASSIGN) + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index + 1]), NAMED_ASSIGN) + + def Visit_tname(self, node): # pylint: disable=invalid-name + # tname ::= NAME [':' test] + self.DefaultNodeVisit(node) + + for index in py3compat.range(1, len(node.children) - 1): + child = node.children[index] + if isinstance(child, pytree.Leaf) and child.value == ':': + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index]), NAMED_ASSIGN) _SetSplitPenalty( - node.children[ 0 ], style.Get( 'SPLIT_PENALTY_BEFORE_IF_EXPR' ) ) - _SetStronglyConnected( *node.children[ 1 : ] ) - self.DefaultNodeVisit( node ) - - def Visit_old_comp_if( self, node ): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_if( node ) - - def Visit_test( self, node ): # pylint: disable=invalid-name - # test ::= or_test ['if' or_test 'else' test] | lambdef - _IncreasePenalty( node, OR_TEST ) - self.DefaultNodeVisit( node ) - - def Visit_or_test( self, node ): # pylint: disable=invalid-name - # or_test ::= and_test ('or' and_test)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, OR_TEST ) - index = 1 - while index + 1 < len( node.children ): - if style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ): - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index ] ), OR_TEST ) - else: - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), OR_TEST ) - index += 2 - - def Visit_and_test( self, node ): # pylint: disable=invalid-name - # and_test ::= not_test ('and' not_test)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, AND_TEST ) - index = 1 - while index + 1 < len( node.children ): - if style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ): - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index ] ), AND_TEST ) - else: - _DecrementSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), AND_TEST ) - index += 2 - - def Visit_not_test( self, node ): # pylint: disable=invalid-name - # not_test ::= 'not' not_test | comparison - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, NOT_TEST ) - - def Visit_comparison( self, node ): # pylint: disable=invalid-name - # comparison ::= expr (comp_op expr)* - self.DefaultNodeVisit( node ) - if len( node.children ) == 3 and _StronglyConnectedCompOp( node ): - _IncreasePenalty( node.children[ 1 ], VERY_STRONGLY_CONNECTED ) - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ 2 ] ), STRONGLY_CONNECTED ) + pytree_utils.FirstLeafNode(node.children[index + 1]), NAMED_ASSIGN) + + def Visit_dotted_name(self, node): # pylint: disable=invalid-name + # dotted_name ::= NAME ('.' NAME)* + for child in node.children: + self.Visit(child) + start = 2 if hasattr(node.children[0], 'is_pseudo') else 1 + for i in py3compat.range(start, len(node.children)): + _SetUnbreakable(node.children[i]) + + def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name + # dictsetmaker ::= ( (test ':' test + # (comp_for | (',' test ':' test)* [','])) | + # (test (comp_for | (',' test)* [','])) ) + for child in node.children: + self.Visit(child) + if child.type == grammar_token.COLON: + # This is a key to a dictionary. We don't want to split the key if at + # all possible. + _SetStronglyConnected(child) + + def Visit_trailer(self, node): # pylint: disable=invalid-name + # trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME + if node.children[0].value == '.': + before = style.Get('SPLIT_BEFORE_DOT') + _SetSplitPenalty( + node.children[0], VERY_STRONGLY_CONNECTED if before else DOTTED_NAME) + _SetSplitPenalty( + node.children[1], DOTTED_NAME if before else VERY_STRONGLY_CONNECTED) + elif len(node.children) == 2: + # Don't split an empty argument list if at all possible. + _SetSplitPenalty(node.children[1], VERY_STRONGLY_CONNECTED) + elif len(node.children) == 3: + name = pytree_utils.NodeName(node.children[1]) + if name in {'argument', 'comparison'}: + # Don't split an argument list with one element if at all possible. + _SetStronglyConnected(node.children[1]) + if (len(node.children[1].children) > 1 and + pytree_utils.NodeName(node.children[1].children[1]) == 'comp_for'): + # Don't penalize splitting before a comp_for expression. + _SetSplitPenalty(pytree_utils.FirstLeafNode(node.children[1]), 0) else: - _IncreasePenalty( node, COMPARISON ) - - def Visit_star_expr( self, node ): # pylint: disable=invalid-name - # star_expr ::= '*' expr - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, STAR_EXPR ) - - def Visit_expr( self, node ): # pylint: disable=invalid-name - # expr ::= xor_expr ('|' xor_expr)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, EXPR ) - _SetBitwiseOperandPenalty( node, '|' ) - - def Visit_xor_expr( self, node ): # pylint: disable=invalid-name - # xor_expr ::= and_expr ('^' and_expr)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, XOR_EXPR ) - _SetBitwiseOperandPenalty( node, '^' ) - - def Visit_and_expr( self, node ): # pylint: disable=invalid-name - # and_expr ::= shift_expr ('&' shift_expr)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, AND_EXPR ) - _SetBitwiseOperandPenalty( node, '&' ) - - def Visit_shift_expr( self, node ): # pylint: disable=invalid-name - # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, SHIFT_EXPR ) - - _ARITH_OPS = frozenset( { 'PLUS', 'MINUS' } ) - - def Visit_arith_expr( self, node ): # pylint: disable=invalid-name - # arith_expr ::= term (('+'|'-') term)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, ARITH_EXPR ) - _SetExpressionOperandPenalty( node, self._ARITH_OPS ) - - _TERM_OPS = frozenset( { 'STAR', 'AT', 'SLASH', 'PERCENT', 'DOUBLESLASH' } ) - - def Visit_term( self, node ): # pylint: disable=invalid-name - # term ::= factor (('*'|'@'|'/'|'%'|'//') factor)* - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, TERM ) - _SetExpressionOperandPenalty( node, self._TERM_OPS ) - - def Visit_factor( self, node ): # pyline: disable=invalid-name - # factor ::= ('+'|'-'|'~') factor | power - self.DefaultNodeVisit( node ) - _IncreasePenalty( node, FACTOR ) - - def Visit_atom( self, node ): # pylint: disable=invalid-name - # atom ::= ('(' [yield_expr|testlist_gexp] ')' - # '[' [listmaker] ']' | - # '{' [dictsetmaker] '}') - self.DefaultNodeVisit( node ) - if ( node.children[ 0 ].value == '(' and - not hasattr( node.children[ 0 ], 'is_pseudo' ) ): - if node.children[ -1 ].value == ')': - if pytree_utils.NodeName( node.parent ) == 'if_stmt': - _SetSplitPenalty( node.children[ -1 ], STRONGLY_CONNECTED ) - else: - if len( node.children ) > 2: - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ 1 ] ), EXPR ) - _SetSplitPenalty( node.children[ -1 ], ATOM ) - elif node.children[ 0 ].value in '[{' and len( node.children ) == 2: - # Keep empty containers together if we can. - _SetUnbreakable( node.children[ -1 ] ) - - def Visit_testlist_gexp( self, node ): # pylint: disable=invalid-name - self.DefaultNodeVisit( node ) + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[1]), + ONE_ELEMENT_ARGUMENT) + elif (node.children[0].type == grammar_token.LSQB and + len(node.children[1].children) > 2 and + (name.endswith('_test') or name.endswith('_expr'))): + _SetStronglyConnected(node.children[1].children[0]) + _SetStronglyConnected(node.children[1].children[2]) + + # Still allow splitting around the operator. + split_before = ( + ( + name.endswith('_test') and + style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR')) or ( + name.endswith('_expr') and + style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'))) + if split_before: + _SetSplitPenalty( + pytree_utils.LastLeafNode(node.children[1].children[1]), 0) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[1].children[2]), 0) + + # Don't split the ending bracket of a subscript list. + _RecAnnotate( + node.children[-1], pytree_utils.Annotation.SPLIT_PENALTY, + VERY_STRONGLY_CONNECTED) + elif name not in {'arglist', 'argument', 'term', 'or_test', 'and_test', + 'comparison', 'atom', 'power'}: + # Don't split an argument list with one element if at all possible. + stypes = pytree_utils.GetNodeAnnotation( + pytree_utils.FirstLeafNode(node), pytree_utils.Annotation.SUBTYPE) + if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: + _IncreasePenalty(node, SUBSCRIPT) + + # Bump up the split penalty for the first part of a subscript. We + # would rather not split there. + _IncreasePenalty(node.children[1], CONNECTED) + else: + _SetStronglyConnected(node.children[1], node.children[2]) + + if name == 'arglist': + _SetStronglyConnected(node.children[-1]) + + self.DefaultNodeVisit(node) + + def Visit_power(self, node): # pylint: disable=invalid-name,missing-docstring + # power ::= atom trailer* ['**' factor] + self.DefaultNodeVisit(node) + + # When atom is followed by a trailer, we can not break between them. + # E.g. arr[idx] - no break allowed between 'arr' and '['. + if (len(node.children) > 1 and + pytree_utils.NodeName(node.children[1]) == 'trailer'): + # children[1] itself is a whole trailer: we don't want to + # mark all of it as unbreakable, only its first token: (, [ or . + first = pytree_utils.FirstLeafNode(node.children[1]) + if first.value != '.': + _SetUnbreakable(node.children[1].children[0]) + + # A special case when there are more trailers in the sequence. Given: + # atom tr1 tr2 + # The last token of tr1 and the first token of tr2 comprise an unbreakable + # region. For example: foo.bar.baz(1) + # We can't put breaks between either of the '.', '(', or '[' and the names + # *preceding* them. + prev_trailer_idx = 1 + while prev_trailer_idx < len(node.children) - 1: + cur_trailer_idx = prev_trailer_idx + 1 + cur_trailer = node.children[cur_trailer_idx] + if pytree_utils.NodeName(cur_trailer) != 'trailer': + break + + # Now we know we have two trailers one after the other + prev_trailer = node.children[prev_trailer_idx] + if prev_trailer.children[-1].value != ')': + # Set the previous node unbreakable if it's not a function call: + # atom tr1() tr2 + # It may be necessary (though undesirable) to split up a previous + # function call's parentheses to the next line. + _SetStronglyConnected(prev_trailer.children[-1]) + _SetStronglyConnected(cur_trailer.children[0]) + prev_trailer_idx = cur_trailer_idx + + # We don't want to split before the last ')' of a function call. This also + # takes care of the special case of: + # atom tr1 tr2 ... trn + # where the 'tr#' are trailers that may end in a ')'. + for trailer in node.children[1:]: + if pytree_utils.NodeName(trailer) != 'trailer': + break + if trailer.children[0].value in '([': + if len(trailer.children) > 2: + stypes = pytree_utils.GetNodeAnnotation( + trailer.children[0], pytree_utils.Annotation.SUBTYPE) + if stypes and subtypes.SUBSCRIPT_BRACKET in stypes: + _SetStronglyConnected( + pytree_utils.FirstLeafNode(trailer.children[1])) + + last_child_node = pytree_utils.LastLeafNode(trailer) + if last_child_node.value.strip().startswith('#'): + last_child_node = last_child_node.prev_sibling + if not (style.Get('INDENT_CLOSING_BRACKETS') or + style.Get('DEDENT_CLOSING_BRACKETS')): + last = pytree_utils.LastLeafNode(last_child_node.prev_sibling) + if last.value != ',': + if last_child_node.value == ']': + _SetUnbreakable(last_child_node) + else: + _SetSplitPenalty(last_child_node, VERY_STRONGLY_CONNECTED) + else: + # If the trailer's children are '()', then make it a strongly + # connected region. It's sometimes necessary, though undesirable, to + # split the two. + _SetStronglyConnected(trailer.children[-1]) + + def Visit_subscriptlist(self, node): # pylint: disable=invalid-name + # subscriptlist ::= subscript (',' subscript)* [','] + self.DefaultNodeVisit(node) + _SetSplitPenalty(pytree_utils.FirstLeafNode(node), 0) + prev_child = None + for child in node.children: + if prev_child and prev_child.type == grammar_token.COMMA: + _SetSplitPenalty(pytree_utils.FirstLeafNode(child), 0) + prev_child = child + + def Visit_subscript(self, node): # pylint: disable=invalid-name + # subscript ::= test | [test] ':' [test] [sliceop] + _SetStronglyConnected(*node.children) + self.DefaultNodeVisit(node) + + def Visit_comp_for(self, node): # pylint: disable=invalid-name + # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] + _SetSplitPenalty(pytree_utils.FirstLeafNode(node), 0) + _SetStronglyConnected(*node.children[1:]) + self.DefaultNodeVisit(node) + + def Visit_old_comp_for(self, node): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_for(node) + + def Visit_comp_if(self, node): # pylint: disable=invalid-name + # comp_if ::= 'if' old_test [comp_iter] + _SetSplitPenalty( + node.children[0], style.Get('SPLIT_PENALTY_BEFORE_IF_EXPR')) + _SetStronglyConnected(*node.children[1:]) + self.DefaultNodeVisit(node) + + def Visit_old_comp_if(self, node): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_if(node) + + def Visit_test(self, node): # pylint: disable=invalid-name + # test ::= or_test ['if' or_test 'else' test] | lambdef + _IncreasePenalty(node, OR_TEST) + self.DefaultNodeVisit(node) + + def Visit_or_test(self, node): # pylint: disable=invalid-name + # or_test ::= and_test ('or' and_test)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, OR_TEST) + index = 1 + while index + 1 < len(node.children): + if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'): + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index]), OR_TEST) + else: + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index + 1]), OR_TEST) + index += 2 + + def Visit_and_test(self, node): # pylint: disable=invalid-name + # and_test ::= not_test ('and' not_test)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, AND_TEST) + index = 1 + while index + 1 < len(node.children): + if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'): + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index]), AND_TEST) + else: + _DecrementSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index + 1]), AND_TEST) + index += 2 + + def Visit_not_test(self, node): # pylint: disable=invalid-name + # not_test ::= 'not' not_test | comparison + self.DefaultNodeVisit(node) + _IncreasePenalty(node, NOT_TEST) + + def Visit_comparison(self, node): # pylint: disable=invalid-name + # comparison ::= expr (comp_op expr)* + self.DefaultNodeVisit(node) + if len(node.children) == 3 and _StronglyConnectedCompOp(node): + _IncreasePenalty(node.children[1], VERY_STRONGLY_CONNECTED) + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[2]), STRONGLY_CONNECTED) + else: + _IncreasePenalty(node, COMPARISON) + + def Visit_star_expr(self, node): # pylint: disable=invalid-name + # star_expr ::= '*' expr + self.DefaultNodeVisit(node) + _IncreasePenalty(node, STAR_EXPR) + + def Visit_expr(self, node): # pylint: disable=invalid-name + # expr ::= xor_expr ('|' xor_expr)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, EXPR) + _SetBitwiseOperandPenalty(node, '|') + + def Visit_xor_expr(self, node): # pylint: disable=invalid-name + # xor_expr ::= and_expr ('^' and_expr)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, XOR_EXPR) + _SetBitwiseOperandPenalty(node, '^') + + def Visit_and_expr(self, node): # pylint: disable=invalid-name + # and_expr ::= shift_expr ('&' shift_expr)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, AND_EXPR) + _SetBitwiseOperandPenalty(node, '&') + + def Visit_shift_expr(self, node): # pylint: disable=invalid-name + # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, SHIFT_EXPR) + + _ARITH_OPS = frozenset({'PLUS', 'MINUS'}) + + def Visit_arith_expr(self, node): # pylint: disable=invalid-name + # arith_expr ::= term (('+'|'-') term)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, ARITH_EXPR) + _SetExpressionOperandPenalty(node, self._ARITH_OPS) + + _TERM_OPS = frozenset({'STAR', 'AT', 'SLASH', 'PERCENT', 'DOUBLESLASH'}) + + def Visit_term(self, node): # pylint: disable=invalid-name + # term ::= factor (('*'|'@'|'/'|'%'|'//') factor)* + self.DefaultNodeVisit(node) + _IncreasePenalty(node, TERM) + _SetExpressionOperandPenalty(node, self._TERM_OPS) + + def Visit_factor(self, node): # pyline: disable=invalid-name + # factor ::= ('+'|'-'|'~') factor | power + self.DefaultNodeVisit(node) + _IncreasePenalty(node, FACTOR) + + def Visit_atom(self, node): # pylint: disable=invalid-name + # atom ::= ('(' [yield_expr|testlist_gexp] ')' + # '[' [listmaker] ']' | + # '{' [dictsetmaker] '}') + self.DefaultNodeVisit(node) + if (node.children[0].value == '(' and + not hasattr(node.children[0], 'is_pseudo')): + if node.children[-1].value == ')': + if pytree_utils.NodeName(node.parent) == 'if_stmt': + _SetSplitPenalty(node.children[-1], STRONGLY_CONNECTED) + else: + if len(node.children) > 2: + _SetSplitPenalty(pytree_utils.FirstLeafNode(node.children[1]), EXPR) + _SetSplitPenalty(node.children[-1], ATOM) + elif node.children[0].value in '[{' and len(node.children) == 2: + # Keep empty containers together if we can. + _SetUnbreakable(node.children[-1]) + + def Visit_testlist_gexp(self, node): # pylint: disable=invalid-name + self.DefaultNodeVisit(node) + prev_was_comma = False + for child in node.children: + if isinstance(child, pytree.Leaf) and child.value == ',': + _SetUnbreakable(child) + prev_was_comma = True + else: + if prev_was_comma: + _SetSplitPenalty(pytree_utils.FirstLeafNode(child), TOGETHER) prev_was_comma = False - for child in node.children: - if isinstance( child, pytree.Leaf ) and child.value == ',': - _SetUnbreakable( child ) - prev_was_comma = True - else: - if prev_was_comma: - _SetSplitPenalty( pytree_utils.FirstLeafNode( child ), TOGETHER ) - prev_was_comma = False -def _SetUnbreakable( node ): - """Set an UNBREAKABLE penalty annotation for the given node.""" - _RecAnnotate( node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE ) +def _SetUnbreakable(node): + """Set an UNBREAKABLE penalty annotation for the given node.""" + _RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, UNBREAKABLE) -def _SetStronglyConnected( *nodes ): - """Set a STRONGLY_CONNECTED penalty annotation for the given nodes.""" - for node in nodes: - _RecAnnotate( node, pytree_utils.Annotation.SPLIT_PENALTY, STRONGLY_CONNECTED ) +def _SetStronglyConnected(*nodes): + """Set a STRONGLY_CONNECTED penalty annotation for the given nodes.""" + for node in nodes: + _RecAnnotate( + node, pytree_utils.Annotation.SPLIT_PENALTY, STRONGLY_CONNECTED) -def _SetExpressionPenalty( node, penalty ): - """Set a penalty annotation on children nodes.""" +def _SetExpressionPenalty(node, penalty): + """Set a penalty annotation on children nodes.""" - def RecExpression( node, first_child_leaf ): - if node is first_child_leaf: - return + def RecExpression(node, first_child_leaf): + if node is first_child_leaf: + return - if isinstance( node, pytree.Leaf ): - if node.value in { '(', 'for', 'if' }: - return - penalty_annotation = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, default = 0 ) - if penalty_annotation < penalty: - _SetSplitPenalty( node, penalty ) - else: - for child in node.children: - RecExpression( child, first_child_leaf ) - - RecExpression( node, pytree_utils.FirstLeafNode( node ) ) - - -def _SetBitwiseOperandPenalty( node, op ): - for index in py3compat.range( 1, len( node.children ) - 1 ): - child = node.children[ index ] - if isinstance( child, pytree.Leaf ) and child.value == op: - if style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ): - _SetSplitPenalty( child, style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) ) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), - style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) ) - - -def _SetExpressionOperandPenalty( node, ops ): - for index in py3compat.range( 1, len( node.children ) - 1 ): - child = node.children[ index ] - if pytree_utils.NodeName( child ) in ops: - if style.Get( 'SPLIT_BEFORE_ARITHMETIC_OPERATOR' ): - _SetSplitPenalty( - child, style.Get( 'SPLIT_PENALTY_ARITHMETIC_OPERATOR' ) ) - else: - _SetSplitPenalty( - pytree_utils.FirstLeafNode( node.children[ index + 1 ] ), - style.Get( 'SPLIT_PENALTY_ARITHMETIC_OPERATOR' ) ) - - -def _IncreasePenalty( node, amt ): - """Increase a penalty annotation on children nodes.""" - - def RecExpression( node, first_child_leaf ): - if node is first_child_leaf: - return - - if isinstance( node, pytree.Leaf ): - if node.value in { '(', 'for' }: - return - penalty = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, default = 0 ) - _SetSplitPenalty( node, penalty + amt ) - else: - for child in node.children: - RecExpression( child, first_child_leaf ) + if isinstance(node, pytree.Leaf): + if node.value in {'(', 'for', 'if'}: + return + penalty_annotation = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, default=0) + if penalty_annotation < penalty: + _SetSplitPenalty(node, penalty) + else: + for child in node.children: + RecExpression(child, first_child_leaf) + + RecExpression(node, pytree_utils.FirstLeafNode(node)) + + +def _SetBitwiseOperandPenalty(node, op): + for index in py3compat.range(1, len(node.children) - 1): + child = node.children[index] + if isinstance(child, pytree.Leaf) and child.value == op: + if style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'): + _SetSplitPenalty(child, style.Get('SPLIT_PENALTY_BITWISE_OPERATOR')) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index + 1]), + style.Get('SPLIT_PENALTY_BITWISE_OPERATOR')) + + +def _SetExpressionOperandPenalty(node, ops): + for index in py3compat.range(1, len(node.children) - 1): + child = node.children[index] + if pytree_utils.NodeName(child) in ops: + if style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR'): + _SetSplitPenalty(child, style.Get('SPLIT_PENALTY_ARITHMETIC_OPERATOR')) + else: + _SetSplitPenalty( + pytree_utils.FirstLeafNode(node.children[index + 1]), + style.Get('SPLIT_PENALTY_ARITHMETIC_OPERATOR')) + + +def _IncreasePenalty(node, amt): + """Increase a penalty annotation on children nodes.""" + + def RecExpression(node, first_child_leaf): + if node is first_child_leaf: + return + + if isinstance(node, pytree.Leaf): + if node.value in {'(', 'for'}: + return + penalty = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, default=0) + _SetSplitPenalty(node, penalty + amt) + else: + for child in node.children: + RecExpression(child, first_child_leaf) - RecExpression( node, pytree_utils.FirstLeafNode( node ) ) + RecExpression(node, pytree_utils.FirstLeafNode(node)) -def _RecAnnotate( tree, annotate_name, annotate_value ): - """Recursively set the given annotation on all leafs of the subtree. +def _RecAnnotate(tree, annotate_name, annotate_value): + """Recursively set the given annotation on all leafs of the subtree. Takes care to only increase the penalty. If the node already has a higher or equal penalty associated with it, this is a no-op. @@ -603,40 +595,40 @@ def _RecAnnotate( tree, annotate_name, annotate_value ): annotate_name: name of the annotation to set annotate_value: value of the annotation to set """ - for child in tree.children: - _RecAnnotate( child, annotate_name, annotate_value ) - if isinstance( tree, pytree.Leaf ): - cur_annotate = pytree_utils.GetNodeAnnotation( - tree, annotate_name, default = 0 ) - if cur_annotate < annotate_value: - pytree_utils.SetNodeAnnotation( tree, annotate_name, annotate_value ) - - -_COMP_OPS = frozenset( { '==', '!=', '<=', '<', '>', '>=', '<>', 'in', 'is' } ) - - -def _StronglyConnectedCompOp( op ): - if ( len( op.children[ 1 ].children ) == 2 and - pytree_utils.NodeName( op.children[ 1 ] ) == 'comp_op' ): - if ( pytree_utils.FirstLeafNode( op.children[ 1 ] ).value == 'not' and - pytree_utils.LastLeafNode( op.children[ 1 ] ).value == 'in' ): - return True - if ( pytree_utils.FirstLeafNode( op.children[ 1 ] ).value == 'is' and - pytree_utils.LastLeafNode( op.children[ 1 ] ).value == 'not' ): - return True - if ( isinstance( op.children[ 1 ], pytree.Leaf ) and - op.children[ 1 ].value in _COMP_OPS ): - return True - return False - - -def _DecrementSplitPenalty( node, amt ): - penalty = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, default = amt ) - penalty = penalty - amt if amt < penalty else 0 - _SetSplitPenalty( node, penalty ) - - -def _SetSplitPenalty( node, penalty ): - pytree_utils.SetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, penalty ) + for child in tree.children: + _RecAnnotate(child, annotate_name, annotate_value) + if isinstance(tree, pytree.Leaf): + cur_annotate = pytree_utils.GetNodeAnnotation( + tree, annotate_name, default=0) + if cur_annotate < annotate_value: + pytree_utils.SetNodeAnnotation(tree, annotate_name, annotate_value) + + +_COMP_OPS = frozenset({'==', '!=', '<=', '<', '>', '>=', '<>', 'in', 'is'}) + + +def _StronglyConnectedCompOp(op): + if (len(op.children[1].children) == 2 and + pytree_utils.NodeName(op.children[1]) == 'comp_op'): + if (pytree_utils.FirstLeafNode(op.children[1]).value == 'not' and + pytree_utils.LastLeafNode(op.children[1]).value == 'in'): + return True + if (pytree_utils.FirstLeafNode(op.children[1]).value == 'is' and + pytree_utils.LastLeafNode(op.children[1]).value == 'not'): + return True + if (isinstance(op.children[1], pytree.Leaf) and + op.children[1].value in _COMP_OPS): + return True + return False + + +def _DecrementSplitPenalty(node, amt): + penalty = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, default=amt) + penalty = penalty - amt if amt < penalty else 0 + _SetSplitPenalty(node, penalty) + + +def _SetSplitPenalty(node, penalty): + pytree_utils.SetNodeAnnotation( + node, pytree_utils.Annotation.SPLIT_PENALTY, penalty) diff --git a/yapf/pytree/subtype_assigner.py b/yapf/pytree/subtype_assigner.py index 19c65b323..06d1411f8 100644 --- a/yapf/pytree/subtype_assigner.py +++ b/yapf/pytree/subtype_assigner.py @@ -34,14 +34,14 @@ from yapf.yapflib import subtypes -def AssignSubtypes( tree ): - """Run the subtype assigner visitor over the tree, modifying it in place. +def AssignSubtypes(tree): + """Run the subtype assigner visitor over the tree, modifying it in place. Arguments: tree: the top-level pytree node to annotate with subtypes. """ - subtype_assigner = _SubtypeAssigner() - subtype_assigner.Visit( tree ) + subtype_assigner = _SubtypeAssigner() + subtype_assigner.Visit(tree) # Map tokens in argument lists to their respective subtype. @@ -53,448 +53,446 @@ def AssignSubtypes( tree ): } -class _SubtypeAssigner( pytree_visitor.PyTreeVisitor ): - """_SubtypeAssigner - see file-level docstring for detailed description. +class _SubtypeAssigner(pytree_visitor.PyTreeVisitor): + """_SubtypeAssigner - see file-level docstring for detailed description. The subtype is added as an annotation to the pytree token. """ - def Visit_dictsetmaker( self, node ): # pylint: disable=invalid-name - # dictsetmaker ::= (test ':' test (comp_for | - # (',' test ':' test)* [','])) | - # (test (comp_for | (',' test)* [','])) - for child in node.children: - self.Visit( child ) - - comp_for = False - dict_maker = False - - for child in node.children: - if pytree_utils.NodeName( child ) == 'comp_for': - comp_for = True - _AppendFirstLeafTokenSubtype( child, subtypes.DICT_SET_GENERATOR ) - elif child.type in ( grammar_token.COLON, grammar_token.DOUBLESTAR ): - dict_maker = True - - if not comp_for and dict_maker: - last_was_colon = False - unpacking = False - for child in node.children: - if child.type == grammar_token.DOUBLESTAR: - _AppendFirstLeafTokenSubtype( child, subtypes.KWARGS_STAR_STAR ) - if last_was_colon: - if style.Get( 'INDENT_DICTIONARY_VALUE' ): - _InsertPseudoParentheses( child ) - else: - _AppendFirstLeafTokenSubtype( child, subtypes.DICTIONARY_VALUE ) - elif ( isinstance( child, pytree.Node ) or - ( not child.value.startswith( '#' ) and - child.value not in '{:,' ) ): - # Mark the first leaf of a key entry as a DICTIONARY_KEY. We - # normally want to split before them if the dictionary cannot exist - # on a single line. - if not unpacking or pytree_utils.FirstLeafNode( - child ).value == '**': - _AppendFirstLeafTokenSubtype( child, subtypes.DICTIONARY_KEY ) - _AppendSubtypeRec( child, subtypes.DICTIONARY_KEY_PART ) - last_was_colon = child.type == grammar_token.COLON - if child.type == grammar_token.DOUBLESTAR: - unpacking = True - elif last_was_colon: - unpacking = False - - def Visit_expr_stmt( self, node ): # pylint: disable=invalid-name - # expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist) - # | ('=' (yield_expr|testlist_star_expr))*) - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '=': - _AppendTokenSubtype( child, subtypes.ASSIGN_OPERATOR ) - - def Visit_or_test( self, node ): # pylint: disable=invalid-name - # or_test ::= and_test ('or' and_test)* - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == 'or': - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_and_test( self, node ): # pylint: disable=invalid-name - # and_test ::= not_test ('and' not_test)* - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == 'and': - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_not_test( self, node ): # pylint: disable=invalid-name - # not_test ::= 'not' not_test | comparison - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == 'not': - _AppendTokenSubtype( child, subtypes.UNARY_OPERATOR ) - - def Visit_comparison( self, node ): # pylint: disable=invalid-name - # comparison ::= expr (comp_op expr)* - # comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not' - for child in node.children: - self.Visit( child ) - if ( isinstance( child, pytree.Leaf ) and child.value - in { '<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is' } ): - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - elif pytree_utils.NodeName( child ) == 'comp_op': - for grandchild in child.children: - _AppendTokenSubtype( grandchild, subtypes.BINARY_OPERATOR ) - - def Visit_star_expr( self, node ): # pylint: disable=invalid-name - # star_expr ::= '*' expr - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '*': - _AppendTokenSubtype( child, subtypes.UNARY_OPERATOR ) - _AppendTokenSubtype( child, subtypes.VARARGS_STAR ) - - def Visit_expr( self, node ): # pylint: disable=invalid-name - # expr ::= xor_expr ('|' xor_expr)* - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '|': - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_xor_expr( self, node ): # pylint: disable=invalid-name - # xor_expr ::= and_expr ('^' and_expr)* - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '^': - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_and_expr( self, node ): # pylint: disable=invalid-name - # and_expr ::= shift_expr ('&' shift_expr)* - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '&': - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_shift_expr( self, node ): # pylint: disable=invalid-name - # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value in { '<<', '>>' }: - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_arith_expr( self, node ): # pylint: disable=invalid-name - # arith_expr ::= term (('+'|'-') term)* - for child in node.children: - self.Visit( child ) - if _IsAExprOperator( child ): - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - if _IsSimpleExpression( node ): - for child in node.children: - if _IsAExprOperator( child ): - _AppendTokenSubtype( child, subtypes.SIMPLE_EXPRESSION ) - - def Visit_term( self, node ): # pylint: disable=invalid-name - # term ::= factor (('*'|'/'|'%'|'//'|'@') factor)* - for child in node.children: - self.Visit( child ) - if _IsMExprOperator( child ): - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - if _IsSimpleExpression( node ): - for child in node.children: - if _IsMExprOperator( child ): - _AppendTokenSubtype( child, subtypes.SIMPLE_EXPRESSION ) - - def Visit_factor( self, node ): # pylint: disable=invalid-name - # factor ::= ('+'|'-'|'~') factor | power - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value in '+-~': - _AppendTokenSubtype( child, subtypes.UNARY_OPERATOR ) - - def Visit_power( self, node ): # pylint: disable=invalid-name - # power ::= atom trailer* ['**' factor] - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '**': - _AppendTokenSubtype( child, subtypes.BINARY_OPERATOR ) - - def Visit_trailer( self, node ): # pylint: disable=invalid-name - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value in '[]': - _AppendTokenSubtype( child, subtypes.SUBSCRIPT_BRACKET ) - - def Visit_subscript( self, node ): # pylint: disable=invalid-name - # subscript ::= test | [test] ':' [test] [sliceop] - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == ':': - _AppendTokenSubtype( child, subtypes.SUBSCRIPT_COLON ) - - def Visit_sliceop( self, node ): # pylint: disable=invalid-name - # sliceop ::= ':' [test] - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == ':': - _AppendTokenSubtype( child, subtypes.SUBSCRIPT_COLON ) - - def Visit_argument( self, node ): # pylint: disable=invalid-name - # argument ::= - # test [comp_for] | test '=' test - self._ProcessArgLists( node ) - #TODO add a subtype to each argument? - - def Visit_arglist( self, node ): # pylint: disable=invalid-name - # arglist ::= - # (argument ',')* (argument [','] - # | '*' test (',' argument)* [',' '**' test] - # | '**' test) - self._ProcessArgLists( node ) - _SetArgListSubtype( - node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST ) + def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name + # dictsetmaker ::= (test ':' test (comp_for | + # (',' test ':' test)* [','])) | + # (test (comp_for | (',' test)* [','])) + for child in node.children: + self.Visit(child) - def Visit_tname( self, node ): # pylint: disable=invalid-name - self._ProcessArgLists( node ) - _SetArgListSubtype( - node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST ) - - def Visit_decorator( self, node ): # pylint: disable=invalid-name - # decorator ::= - # '@' dotted_name [ '(' [arglist] ')' ] NEWLINE - for child in node.children: - if isinstance( child, pytree.Leaf ) and child.value == '@': - _AppendTokenSubtype( child, subtype = subtypes.DECORATOR ) - self.Visit( child ) - - def Visit_funcdef( self, node ): # pylint: disable=invalid-name - # funcdef ::= - # 'def' NAME parameters ['->' test] ':' suite - for child in node.children: - if child.type == grammar_token.NAME and child.value != 'def': - _AppendTokenSubtype( child, subtypes.FUNC_DEF ) - break - for child in node.children: - self.Visit( child ) - - def Visit_parameters( self, node ): # pylint: disable=invalid-name - # parameters ::= '(' [typedargslist] ')' - self._ProcessArgLists( node ) - if len( node.children ) > 2: - _AppendFirstLeafTokenSubtype( node.children[ 1 ], subtypes.PARAMETER_START ) - _AppendLastLeafTokenSubtype( node.children[ -2 ], subtypes.PARAMETER_STOP ) - - def Visit_typedargslist( self, node ): # pylint: disable=invalid-name - # typedargslist ::= - # ((tfpdef ['=' test] ',')* - # ('*' [tname] (',' tname ['=' test])* [',' '**' tname] - # | '**' tname) - # | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) - self._ProcessArgLists( node ) - _SetArgListSubtype( - node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST ) - tname = False - if not node.children: - return - - _AppendFirstLeafTokenSubtype( node.children[ 0 ], subtypes.PARAMETER_START ) - _AppendLastLeafTokenSubtype( node.children[ -1 ], subtypes.PARAMETER_STOP ) - - tname = pytree_utils.NodeName( node.children[ 0 ] ) == 'tname' - for i in range( 1, len( node.children ) ): - prev_child = node.children[ i - 1 ] - child = node.children[ i ] - if prev_child.type == grammar_token.COMMA: - _AppendFirstLeafTokenSubtype( child, subtypes.PARAMETER_START ) - elif child.type == grammar_token.COMMA: - _AppendLastLeafTokenSubtype( prev_child, subtypes.PARAMETER_STOP ) - - if pytree_utils.NodeName( child ) == 'tname': - tname = True - _SetArgListSubtype( - child, subtypes.TYPED_NAME, subtypes.TYPED_NAME_ARG_LIST ) - elif child.type == grammar_token.COMMA: - tname = False - elif child.type == grammar_token.EQUAL and tname: - _AppendTokenSubtype( child, subtype = subtypes.TYPED_NAME ) - tname = False - - def Visit_varargslist( self, node ): # pylint: disable=invalid-name - # varargslist ::= - # ((vfpdef ['=' test] ',')* - # ('*' [vname] (',' vname ['=' test])* [',' '**' vname] - # | '**' vname) - # | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) - self._ProcessArgLists( node ) - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ) and child.value == '=': - _AppendTokenSubtype( child, subtypes.VARARGS_LIST ) - - def Visit_comp_for( self, node ): # pylint: disable=invalid-name - # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] - _AppendSubtypeRec( node, subtypes.COMP_FOR ) - # Mark the previous node as COMP_EXPR unless this is a nested comprehension - # as these will have the outer comprehension as their previous node. - attr = pytree_utils.GetNodeAnnotation( - node.parent, pytree_utils.Annotation.SUBTYPE ) - if not attr or subtypes.COMP_FOR not in attr: - _AppendSubtypeRec( node.parent.children[ 0 ], subtypes.COMP_EXPR ) - self.DefaultNodeVisit( node ) - - def Visit_old_comp_for( self, node ): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_for( node ) - - def Visit_comp_if( self, node ): # pylint: disable=invalid-name - # comp_if ::= 'if' old_test [comp_iter] - _AppendSubtypeRec( node, subtypes.COMP_IF ) - self.DefaultNodeVisit( node ) - - def Visit_old_comp_if( self, node ): # pylint: disable=invalid-name - # Python 3.7 - self.Visit_comp_if( node ) - - def _ProcessArgLists( self, node ): - """Common method for processing argument lists.""" - for child in node.children: - self.Visit( child ) - if isinstance( child, pytree.Leaf ): - _AppendTokenSubtype( - child, - subtype = _ARGLIST_TOKEN_TO_SUBTYPE.get( - child.value, subtypes.NONE ) ) - - -def _SetArgListSubtype( node, node_subtype, list_subtype ): - """Set named assign subtype on elements in a arg list.""" - - def HasSubtype( node ): - """Return True if the arg list has a named assign subtype.""" - if isinstance( node, pytree.Leaf ): - return node_subtype in pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SUBTYPE, set() ) - - for child in node.children: - node_name = pytree_utils.NodeName( child ) - if node_name not in { 'atom', 'arglist', 'power' }: - if HasSubtype( child ): - return True - - return False - - if not HasSubtype( node ): - return + comp_for = False + dict_maker = False + + for child in node.children: + if pytree_utils.NodeName(child) == 'comp_for': + comp_for = True + _AppendFirstLeafTokenSubtype(child, subtypes.DICT_SET_GENERATOR) + elif child.type in (grammar_token.COLON, grammar_token.DOUBLESTAR): + dict_maker = True + + if not comp_for and dict_maker: + last_was_colon = False + unpacking = False + for child in node.children: + if child.type == grammar_token.DOUBLESTAR: + _AppendFirstLeafTokenSubtype(child, subtypes.KWARGS_STAR_STAR) + if last_was_colon: + if style.Get('INDENT_DICTIONARY_VALUE'): + _InsertPseudoParentheses(child) + else: + _AppendFirstLeafTokenSubtype(child, subtypes.DICTIONARY_VALUE) + elif (isinstance(child, pytree.Node) or + (not child.value.startswith('#') and child.value not in '{:,')): + # Mark the first leaf of a key entry as a DICTIONARY_KEY. We + # normally want to split before them if the dictionary cannot exist + # on a single line. + if not unpacking or pytree_utils.FirstLeafNode(child).value == '**': + _AppendFirstLeafTokenSubtype(child, subtypes.DICTIONARY_KEY) + _AppendSubtypeRec(child, subtypes.DICTIONARY_KEY_PART) + last_was_colon = child.type == grammar_token.COLON + if child.type == grammar_token.DOUBLESTAR: + unpacking = True + elif last_was_colon: + unpacking = False + + def Visit_expr_stmt(self, node): # pylint: disable=invalid-name + # expr_stmt ::= testlist_star_expr (augassign (yield_expr|testlist) + # | ('=' (yield_expr|testlist_star_expr))*) + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '=': + _AppendTokenSubtype(child, subtypes.ASSIGN_OPERATOR) + + def Visit_or_test(self, node): # pylint: disable=invalid-name + # or_test ::= and_test ('or' and_test)* + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == 'or': + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + def Visit_and_test(self, node): # pylint: disable=invalid-name + # and_test ::= not_test ('and' not_test)* for child in node.children: - node_name = pytree_utils.NodeName( child ) - if node_name not in { 'atom', 'COMMA' }: - _AppendFirstLeafTokenSubtype( child, list_subtype ) + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == 'and': + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + def Visit_not_test(self, node): # pylint: disable=invalid-name + # not_test ::= 'not' not_test | comparison + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == 'not': + _AppendTokenSubtype(child, subtypes.UNARY_OPERATOR) -def _AppendTokenSubtype( node, subtype ): - """Append the token's subtype only if it's not already set.""" - pytree_utils.AppendNodeAnnotation( node, pytree_utils.Annotation.SUBTYPE, subtype ) + def Visit_comparison(self, node): # pylint: disable=invalid-name + # comparison ::= expr (comp_op expr)* + # comp_op ::= '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not in'|'is'|'is not' + for child in node.children: + self.Visit(child) + if (isinstance(child, pytree.Leaf) and + child.value in {'<', '>', '==', '>=', '<=', '<>', '!=', 'in', 'is'}): + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + elif pytree_utils.NodeName(child) == 'comp_op': + for grandchild in child.children: + _AppendTokenSubtype(grandchild, subtypes.BINARY_OPERATOR) + + def Visit_star_expr(self, node): # pylint: disable=invalid-name + # star_expr ::= '*' expr + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '*': + _AppendTokenSubtype(child, subtypes.UNARY_OPERATOR) + _AppendTokenSubtype(child, subtypes.VARARGS_STAR) + + def Visit_expr(self, node): # pylint: disable=invalid-name + # expr ::= xor_expr ('|' xor_expr)* + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '|': + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + + def Visit_xor_expr(self, node): # pylint: disable=invalid-name + # xor_expr ::= and_expr ('^' and_expr)* + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '^': + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + + def Visit_and_expr(self, node): # pylint: disable=invalid-name + # and_expr ::= shift_expr ('&' shift_expr)* + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '&': + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + + def Visit_shift_expr(self, node): # pylint: disable=invalid-name + # shift_expr ::= arith_expr (('<<'|'>>') arith_expr)* + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value in {'<<', '>>'}: + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + + def Visit_arith_expr(self, node): # pylint: disable=invalid-name + # arith_expr ::= term (('+'|'-') term)* + for child in node.children: + self.Visit(child) + if _IsAExprOperator(child): + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + if _IsSimpleExpression(node): + for child in node.children: + if _IsAExprOperator(child): + _AppendTokenSubtype(child, subtypes.SIMPLE_EXPRESSION) -def _AppendFirstLeafTokenSubtype( node, subtype ): - """Append the first leaf token's subtypes.""" - if isinstance( node, pytree.Leaf ): - _AppendTokenSubtype( node, subtype ) - return - _AppendFirstLeafTokenSubtype( node.children[ 0 ], subtype ) + def Visit_term(self, node): # pylint: disable=invalid-name + # term ::= factor (('*'|'/'|'%'|'//'|'@') factor)* + for child in node.children: + self.Visit(child) + if _IsMExprOperator(child): + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + + if _IsSimpleExpression(node): + for child in node.children: + if _IsMExprOperator(child): + _AppendTokenSubtype(child, subtypes.SIMPLE_EXPRESSION) + + def Visit_factor(self, node): # pylint: disable=invalid-name + # factor ::= ('+'|'-'|'~') factor | power + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value in '+-~': + _AppendTokenSubtype(child, subtypes.UNARY_OPERATOR) + + def Visit_power(self, node): # pylint: disable=invalid-name + # power ::= atom trailer* ['**' factor] + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '**': + _AppendTokenSubtype(child, subtypes.BINARY_OPERATOR) + + def Visit_trailer(self, node): # pylint: disable=invalid-name + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value in '[]': + _AppendTokenSubtype(child, subtypes.SUBSCRIPT_BRACKET) + + def Visit_subscript(self, node): # pylint: disable=invalid-name + # subscript ::= test | [test] ':' [test] [sliceop] + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == ':': + _AppendTokenSubtype(child, subtypes.SUBSCRIPT_COLON) + + def Visit_sliceop(self, node): # pylint: disable=invalid-name + # sliceop ::= ':' [test] + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == ':': + _AppendTokenSubtype(child, subtypes.SUBSCRIPT_COLON) + + def Visit_argument(self, node): # pylint: disable=invalid-name + # argument ::= + # test [comp_for] | test '=' test + self._ProcessArgLists(node) + #TODO add a subtype to each argument? + + def Visit_arglist(self, node): # pylint: disable=invalid-name + # arglist ::= + # (argument ',')* (argument [','] + # | '*' test (',' argument)* [',' '**' test] + # | '**' test) + self._ProcessArgLists(node) + _SetArgListSubtype( + node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) + + def Visit_tname(self, node): # pylint: disable=invalid-name + self._ProcessArgLists(node) + _SetArgListSubtype( + node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) + + def Visit_decorator(self, node): # pylint: disable=invalid-name + # decorator ::= + # '@' dotted_name [ '(' [arglist] ')' ] NEWLINE + for child in node.children: + if isinstance(child, pytree.Leaf) and child.value == '@': + _AppendTokenSubtype(child, subtype=subtypes.DECORATOR) + self.Visit(child) + + def Visit_funcdef(self, node): # pylint: disable=invalid-name + # funcdef ::= + # 'def' NAME parameters ['->' test] ':' suite + for child in node.children: + if child.type == grammar_token.NAME and child.value != 'def': + _AppendTokenSubtype(child, subtypes.FUNC_DEF) + break + for child in node.children: + self.Visit(child) + + def Visit_parameters(self, node): # pylint: disable=invalid-name + # parameters ::= '(' [typedargslist] ')' + self._ProcessArgLists(node) + if len(node.children) > 2: + _AppendFirstLeafTokenSubtype(node.children[1], subtypes.PARAMETER_START) + _AppendLastLeafTokenSubtype(node.children[-2], subtypes.PARAMETER_STOP) + + def Visit_typedargslist(self, node): # pylint: disable=invalid-name + # typedargslist ::= + # ((tfpdef ['=' test] ',')* + # ('*' [tname] (',' tname ['=' test])* [',' '**' tname] + # | '**' tname) + # | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) + self._ProcessArgLists(node) + _SetArgListSubtype( + node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) + tname = False + if not node.children: + return + + _AppendFirstLeafTokenSubtype(node.children[0], subtypes.PARAMETER_START) + _AppendLastLeafTokenSubtype(node.children[-1], subtypes.PARAMETER_STOP) + + tname = pytree_utils.NodeName(node.children[0]) == 'tname' + for i in range(1, len(node.children)): + prev_child = node.children[i - 1] + child = node.children[i] + if prev_child.type == grammar_token.COMMA: + _AppendFirstLeafTokenSubtype(child, subtypes.PARAMETER_START) + elif child.type == grammar_token.COMMA: + _AppendLastLeafTokenSubtype(prev_child, subtypes.PARAMETER_STOP) + + if pytree_utils.NodeName(child) == 'tname': + tname = True + _SetArgListSubtype( + child, subtypes.TYPED_NAME, subtypes.TYPED_NAME_ARG_LIST) + elif child.type == grammar_token.COMMA: + tname = False + elif child.type == grammar_token.EQUAL and tname: + _AppendTokenSubtype(child, subtype=subtypes.TYPED_NAME) + tname = False + + def Visit_varargslist(self, node): # pylint: disable=invalid-name + # varargslist ::= + # ((vfpdef ['=' test] ',')* + # ('*' [vname] (',' vname ['=' test])* [',' '**' vname] + # | '**' vname) + # | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) + self._ProcessArgLists(node) + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf) and child.value == '=': + _AppendTokenSubtype(child, subtypes.VARARGS_LIST) + + def Visit_comp_for(self, node): # pylint: disable=invalid-name + # comp_for ::= 'for' exprlist 'in' testlist_safe [comp_iter] + _AppendSubtypeRec(node, subtypes.COMP_FOR) + # Mark the previous node as COMP_EXPR unless this is a nested comprehension + # as these will have the outer comprehension as their previous node. + attr = pytree_utils.GetNodeAnnotation( + node.parent, pytree_utils.Annotation.SUBTYPE) + if not attr or subtypes.COMP_FOR not in attr: + _AppendSubtypeRec(node.parent.children[0], subtypes.COMP_EXPR) + self.DefaultNodeVisit(node) + + def Visit_old_comp_for(self, node): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_for(node) + + def Visit_comp_if(self, node): # pylint: disable=invalid-name + # comp_if ::= 'if' old_test [comp_iter] + _AppendSubtypeRec(node, subtypes.COMP_IF) + self.DefaultNodeVisit(node) + + def Visit_old_comp_if(self, node): # pylint: disable=invalid-name + # Python 3.7 + self.Visit_comp_if(node) + + def _ProcessArgLists(self, node): + """Common method for processing argument lists.""" + for child in node.children: + self.Visit(child) + if isinstance(child, pytree.Leaf): + _AppendTokenSubtype( + child, + subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get(child.value, subtypes.NONE)) -def _AppendLastLeafTokenSubtype( node, subtype ): - """Append the last leaf token's subtypes.""" - if isinstance( node, pytree.Leaf ): - _AppendTokenSubtype( node, subtype ) - return - _AppendLastLeafTokenSubtype( node.children[ -1 ], subtype ) +def _SetArgListSubtype(node, node_subtype, list_subtype): + """Set named assign subtype on elements in a arg list.""" + def HasSubtype(node): + """Return True if the arg list has a named assign subtype.""" + if isinstance(node, pytree.Leaf): + return node_subtype in pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SUBTYPE, set()) -def _AppendSubtypeRec( node, subtype, force = True ): - """Append the leafs in the node to the given subtype.""" - if isinstance( node, pytree.Leaf ): - _AppendTokenSubtype( node, subtype ) - return for child in node.children: - _AppendSubtypeRec( child, subtype, force = force ) - - -def _InsertPseudoParentheses( node ): - """Insert pseudo parentheses so that dicts can be formatted correctly.""" - comment_node = None - if isinstance( node, pytree.Node ): - if node.children[ -1 ].type == grammar_token.COMMENT: - comment_node = node.children[ -1 ].clone() - node.children[ -1 ].remove() - - first = pytree_utils.FirstLeafNode( node ) - last = pytree_utils.LastLeafNode( node ) - - if first == last and first.type == grammar_token.COMMENT: - # A comment was inserted before the value, which is a pytree.Leaf. - # Encompass the dictionary's value into an ATOM node. - last = first.next_sibling - last_clone = last.clone() - new_node = pytree.Node( syms.atom, [ first.clone(), last_clone ] ) - for orig_leaf, clone_leaf in zip( last.leaves(), last_clone.leaves() ): - pytree_utils.CopyYapfAnnotations( orig_leaf, clone_leaf ) - if hasattr( orig_leaf, 'is_pseudo' ): - clone_leaf.is_pseudo = orig_leaf.is_pseudo - - node.replace( new_node ) - node = new_node - last.remove() - - first = pytree_utils.FirstLeafNode( node ) - last = pytree_utils.LastLeafNode( node ) - - lparen = pytree.Leaf( - grammar_token.LPAR, - u'(', - context = ( '', ( first.get_lineno(), first.column - 1 ) ) ) - last_lineno = last.get_lineno() - if last.type == grammar_token.STRING and '\n' in last.value: - last_lineno += last.value.count( '\n' ) - - if last.type == grammar_token.STRING and '\n' in last.value: - last_column = len( last.value.split( '\n' )[ -1 ] ) + 1 - else: - last_column = last.column + len( last.value ) + 1 - rparen = pytree.Leaf( - grammar_token.RPAR, u')', context = ( '', ( last_lineno, last_column ) ) ) - - lparen.is_pseudo = True - rparen.is_pseudo = True - - if isinstance( node, pytree.Node ): - node.insert_child( 0, lparen ) - node.append_child( rparen ) - if comment_node: - node.append_child( comment_node ) - _AppendFirstLeafTokenSubtype( node, subtypes.DICTIONARY_VALUE ) - else: - clone = node.clone() - for orig_leaf, clone_leaf in zip( node.leaves(), clone.leaves() ): - pytree_utils.CopyYapfAnnotations( orig_leaf, clone_leaf ) - new_node = pytree.Node( syms.atom, [ lparen, clone, rparen ] ) - node.replace( new_node ) - _AppendFirstLeafTokenSubtype( clone, subtypes.DICTIONARY_VALUE ) - - -def _IsAExprOperator( node ): - return isinstance( node, pytree.Leaf ) and node.value in { '+', '-' } - - -def _IsMExprOperator( node ): - return isinstance( node, - pytree.Leaf ) and node.value in { '*', '/', '%', '//', '@' } - - -def _IsSimpleExpression( node ): - """A node with only leafs as children.""" - return all( isinstance( child, pytree.Leaf ) for child in node.children ) + node_name = pytree_utils.NodeName(child) + if node_name not in {'atom', 'arglist', 'power'}: + if HasSubtype(child): + return True + + return False + + if not HasSubtype(node): + return + + for child in node.children: + node_name = pytree_utils.NodeName(child) + if node_name not in {'atom', 'COMMA'}: + _AppendFirstLeafTokenSubtype(child, list_subtype) + + +def _AppendTokenSubtype(node, subtype): + """Append the token's subtype only if it's not already set.""" + pytree_utils.AppendNodeAnnotation( + node, pytree_utils.Annotation.SUBTYPE, subtype) + + +def _AppendFirstLeafTokenSubtype(node, subtype): + """Append the first leaf token's subtypes.""" + if isinstance(node, pytree.Leaf): + _AppendTokenSubtype(node, subtype) + return + _AppendFirstLeafTokenSubtype(node.children[0], subtype) + + +def _AppendLastLeafTokenSubtype(node, subtype): + """Append the last leaf token's subtypes.""" + if isinstance(node, pytree.Leaf): + _AppendTokenSubtype(node, subtype) + return + _AppendLastLeafTokenSubtype(node.children[-1], subtype) + + +def _AppendSubtypeRec(node, subtype, force=True): + """Append the leafs in the node to the given subtype.""" + if isinstance(node, pytree.Leaf): + _AppendTokenSubtype(node, subtype) + return + for child in node.children: + _AppendSubtypeRec(child, subtype, force=force) + + +def _InsertPseudoParentheses(node): + """Insert pseudo parentheses so that dicts can be formatted correctly.""" + comment_node = None + if isinstance(node, pytree.Node): + if node.children[-1].type == grammar_token.COMMENT: + comment_node = node.children[-1].clone() + node.children[-1].remove() + + first = pytree_utils.FirstLeafNode(node) + last = pytree_utils.LastLeafNode(node) + + if first == last and first.type == grammar_token.COMMENT: + # A comment was inserted before the value, which is a pytree.Leaf. + # Encompass the dictionary's value into an ATOM node. + last = first.next_sibling + last_clone = last.clone() + new_node = pytree.Node(syms.atom, [first.clone(), last_clone]) + for orig_leaf, clone_leaf in zip(last.leaves(), last_clone.leaves()): + pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf) + if hasattr(orig_leaf, 'is_pseudo'): + clone_leaf.is_pseudo = orig_leaf.is_pseudo + + node.replace(new_node) + node = new_node + last.remove() + + first = pytree_utils.FirstLeafNode(node) + last = pytree_utils.LastLeafNode(node) + + lparen = pytree.Leaf( + grammar_token.LPAR, + u'(', + context=('', (first.get_lineno(), first.column - 1))) + last_lineno = last.get_lineno() + if last.type == grammar_token.STRING and '\n' in last.value: + last_lineno += last.value.count('\n') + + if last.type == grammar_token.STRING and '\n' in last.value: + last_column = len(last.value.split('\n')[-1]) + 1 + else: + last_column = last.column + len(last.value) + 1 + rparen = pytree.Leaf( + grammar_token.RPAR, u')', context=('', (last_lineno, last_column))) + + lparen.is_pseudo = True + rparen.is_pseudo = True + + if isinstance(node, pytree.Node): + node.insert_child(0, lparen) + node.append_child(rparen) + if comment_node: + node.append_child(comment_node) + _AppendFirstLeafTokenSubtype(node, subtypes.DICTIONARY_VALUE) + else: + clone = node.clone() + for orig_leaf, clone_leaf in zip(node.leaves(), clone.leaves()): + pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf) + new_node = pytree.Node(syms.atom, [lparen, clone, rparen]) + node.replace(new_node) + _AppendFirstLeafTokenSubtype(clone, subtypes.DICTIONARY_VALUE) + + +def _IsAExprOperator(node): + return isinstance(node, pytree.Leaf) and node.value in {'+', '-'} + + +def _IsMExprOperator(node): + return isinstance(node, + pytree.Leaf) and node.value in {'*', '/', '%', '//', '@'} + + +def _IsSimpleExpression(node): + """A node with only leafs as children.""" + return all(isinstance(child, pytree.Leaf) for child in node.children) diff --git a/yapf/third_party/yapf_diff/yapf_diff.py b/yapf/third_party/yapf_diff/yapf_diff.py index afd3ebc91..f069aedcb 100644 --- a/yapf/third_party/yapf_diff/yapf_diff.py +++ b/yapf/third_party/yapf_diff/yapf_diff.py @@ -33,114 +33,114 @@ import sys if sys.version_info.major >= 3: - from io import StringIO + from io import StringIO else: - from io import BytesIO as StringIO + from io import BytesIO as StringIO def main(): - parser = argparse.ArgumentParser( - description = __doc__, formatter_class = argparse.RawDescriptionHelpFormatter ) - parser.add_argument( - '-i', - '--in-place', - action = 'store_true', - default = False, - help = 'apply edits to files instead of displaying a diff' ) - parser.add_argument( - '-p', - '--prefix', - metavar = 'NUM', - default = 1, - help = 'strip the smallest prefix containing P slashes' ) - parser.add_argument( - '--regex', - metavar = 'PATTERN', - default = None, - help = 'custom pattern selecting file paths to reformat ' - '(case sensitive, overrides -iregex)' ) - parser.add_argument( - '--iregex', - metavar = 'PATTERN', - default = r'.*\.(py)', - help = 'custom pattern selecting file paths to reformat ' - '(case insensitive, overridden by -regex)' ) - parser.add_argument( - '-v', - '--verbose', - action = 'store_true', - help = 'be more verbose, ineffective without -i' ) - parser.add_argument( - '--style', - help = 'specify formatting style: either a style name (for ' - 'example "pep8" or "google"), or the name of a file with ' - 'style settings. The default is pep8 unless a ' - '.style.yapf or setup.cfg file located in one of the ' - 'parent directories of the source file (or current ' - 'directory for stdin)' ) - parser.add_argument( - '--binary', default = 'yapf', help = 'location of binary to use for yapf' ) - args = parser.parse_args() + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) + parser.add_argument( + '-i', + '--in-place', + action='store_true', + default=False, + help='apply edits to files instead of displaying a diff') + parser.add_argument( + '-p', + '--prefix', + metavar='NUM', + default=1, + help='strip the smallest prefix containing P slashes') + parser.add_argument( + '--regex', + metavar='PATTERN', + default=None, + help='custom pattern selecting file paths to reformat ' + '(case sensitive, overrides -iregex)') + parser.add_argument( + '--iregex', + metavar='PATTERN', + default=r'.*\.(py)', + help='custom pattern selecting file paths to reformat ' + '(case insensitive, overridden by -regex)') + parser.add_argument( + '-v', + '--verbose', + action='store_true', + help='be more verbose, ineffective without -i') + parser.add_argument( + '--style', + help='specify formatting style: either a style name (for ' + 'example "pep8" or "google"), or the name of a file with ' + 'style settings. The default is pep8 unless a ' + '.style.yapf or setup.cfg file located in one of the ' + 'parent directories of the source file (or current ' + 'directory for stdin)') + parser.add_argument( + '--binary', default='yapf', help='location of binary to use for yapf') + args = parser.parse_args() - # Extract changed lines for each file. - filename = None - lines_by_file = {} - for line in sys.stdin: - match = re.search( r'^\+\+\+\ (.*?/){%s}(\S*)' % args.prefix, line ) - if match: - filename = match.group( 2 ) - if filename is None: - continue + # Extract changed lines for each file. + filename = None + lines_by_file = {} + for line in sys.stdin: + match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.prefix, line) + if match: + filename = match.group(2) + if filename is None: + continue - if args.regex is not None: - if not re.match( '^%s$' % args.regex, filename ): - continue - elif not re.match( '^%s$' % args.iregex, filename, re.IGNORECASE ): - continue + if args.regex is not None: + if not re.match('^%s$' % args.regex, filename): + continue + elif not re.match('^%s$' % args.iregex, filename, re.IGNORECASE): + continue - match = re.search( r'^@@.*\+(\d+)(,(\d+))?', line ) - if match: - start_line = int( match.group( 1 ) ) - line_count = 1 - if match.group( 3 ): - line_count = int( match.group( 3 ) ) - if line_count == 0: - continue - end_line = start_line + line_count - 1 - lines_by_file.setdefault( filename, [] ).extend( - [ '--lines', str( start_line ) + '-' + str( end_line ) ] ) + match = re.search(r'^@@.*\+(\d+)(,(\d+))?', line) + if match: + start_line = int(match.group(1)) + line_count = 1 + if match.group(3): + line_count = int(match.group(3)) + if line_count == 0: + continue + end_line = start_line + line_count - 1 + lines_by_file.setdefault(filename, []).extend( + ['--lines', str(start_line) + '-' + str(end_line)]) - # Reformat files containing changes in place. - for filename, lines in lines_by_file.items(): - if args.in_place and args.verbose: - print( 'Formatting {}'.format( filename ) ) - command = [ args.binary, filename ] - if args.in_place: - command.append( '-i' ) - command.extend( lines ) - if args.style: - command.extend( [ '--style', args.style ] ) - p = subprocess.Popen( - command, - stdout = subprocess.PIPE, - stderr = None, - stdin = subprocess.PIPE, - universal_newlines = True ) - stdout, stderr = p.communicate() - if p.returncode != 0: - sys.exit( p.returncode ) + # Reformat files containing changes in place. + for filename, lines in lines_by_file.items(): + if args.in_place and args.verbose: + print('Formatting {}'.format(filename)) + command = [args.binary, filename] + if args.in_place: + command.append('-i') + command.extend(lines) + if args.style: + command.extend(['--style', args.style]) + p = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=None, + stdin=subprocess.PIPE, + universal_newlines=True) + stdout, stderr = p.communicate() + if p.returncode != 0: + sys.exit(p.returncode) - if not args.in_place: - with open( filename ) as f: - code = f.readlines() - formatted_code = StringIO( stdout ).readlines() - diff = difflib.unified_diff( - code, formatted_code, filename, filename, '(before formatting)', - '(after formatting)' ) - diff_string = ''.join( diff ) - if len( diff_string ) > 0: - sys.stdout.write( diff_string ) + if not args.in_place: + with open(filename) as f: + code = f.readlines() + formatted_code = StringIO(stdout).readlines() + diff = difflib.unified_diff( + code, formatted_code, filename, filename, '(before formatting)', + '(after formatting)') + diff_string = ''.join(diff) + if len(diff_string) > 0: + sys.stdout.write(diff_string) if __name__ == '__main__': - main() + main() diff --git a/yapf/yapflib/errors.py b/yapf/yapflib/errors.py index 8864b49c6..cb8694d2c 100644 --- a/yapf/yapflib/errors.py +++ b/yapf/yapflib/errors.py @@ -16,8 +16,8 @@ from lib2to3.pgen2 import tokenize -def FormatErrorMsg( e ): - """Convert an exception into a standard format. +def FormatErrorMsg(e): + """Convert an exception into a standard format. The standard error message format is: @@ -29,19 +29,18 @@ def FormatErrorMsg( e ): Returns: A properly formatted error message string. """ - if isinstance( e, SyntaxError ): - return '{}:{}:{}: {}'.format( e.filename, e.lineno, e.offset, e.msg ) - if isinstance( e, tokenize.TokenError ): - return '{}:{}:{}: {}'.format( - e.filename, e.args[ 1 ][ 0 ], e.args[ 1 ][ 1 ], e.args[ 0 ] ) + if isinstance(e, SyntaxError): + return '{}:{}:{}: {}'.format(e.filename, e.lineno, e.offset, e.msg) + if isinstance(e, tokenize.TokenError): return '{}:{}:{}: {}'.format( - e.args[ 1 ][ 0 ], e.args[ 1 ][ 1 ], e.args[ 1 ][ 2 ], e.msg ) + e.filename, e.args[1][0], e.args[1][1], e.args[0]) + return '{}:{}:{}: {}'.format(e.args[1][0], e.args[1][1], e.args[1][2], e.msg) -class YapfError( Exception ): - """Parent class for user errors or input errors. +class YapfError(Exception): + """Parent class for user errors or input errors. Exceptions of this type are handled by the command line tool and result in clear error messages, as opposed to backtraces. """ - pass + pass diff --git a/yapf/yapflib/file_resources.py b/yapf/yapflib/file_resources.py index 07ee951a2..9c071db3d 100644 --- a/yapf/yapflib/file_resources.py +++ b/yapf/yapflib/file_resources.py @@ -30,43 +30,44 @@ CRLF = '\r\n' -def _GetExcludePatternsFromYapfIgnore( filename ): - """Get a list of file patterns to ignore from .yapfignore.""" - ignore_patterns = [] - if os.path.isfile( filename ) and os.access( filename, os.R_OK ): - with open( filename, 'r' ) as fd: - for line in fd: - if line.strip() and not line.startswith( '#' ): - ignore_patterns.append( line.strip() ) +def _GetExcludePatternsFromYapfIgnore(filename): + """Get a list of file patterns to ignore from .yapfignore.""" + ignore_patterns = [] + if os.path.isfile(filename) and os.access(filename, os.R_OK): + with open(filename, 'r') as fd: + for line in fd: + if line.strip() and not line.startswith('#'): + ignore_patterns.append(line.strip()) - if any( e.startswith( './' ) for e in ignore_patterns ): - raise errors.YapfError( 'path in .yapfignore should not start with ./' ) + if any(e.startswith('./') for e in ignore_patterns): + raise errors.YapfError('path in .yapfignore should not start with ./') - return ignore_patterns + return ignore_patterns -def _GetExcludePatternsFromPyprojectToml( filename ): - """Get a list of file patterns to ignore from pyproject.toml.""" - ignore_patterns = [] - try: - import toml - except ImportError: - raise errors.YapfError( - "toml package is needed for using pyproject.toml as a " - "configuration file" ) +def _GetExcludePatternsFromPyprojectToml(filename): + """Get a list of file patterns to ignore from pyproject.toml.""" + ignore_patterns = [] + try: + import toml + except ImportError: + raise errors.YapfError( + "toml package is needed for using pyproject.toml as a " + "configuration file") - if os.path.isfile( filename ) and os.access( filename, os.R_OK ): - pyproject_toml = toml.load( filename ) - ignore_patterns = pyproject_toml.get( 'tool', {} ).get( 'yapfignore', {} ).get( - 'ignore_patterns', [] ) - if any( e.startswith( './' ) for e in ignore_patterns ): - raise errors.YapfError( 'path in pyproject.toml should not start with ./' ) + if os.path.isfile(filename) and os.access(filename, os.R_OK): + pyproject_toml = toml.load(filename) + ignore_patterns = pyproject_toml.get('tool', + {}).get('yapfignore', + {}).get('ignore_patterns', []) + if any(e.startswith('./') for e in ignore_patterns): + raise errors.YapfError('path in pyproject.toml should not start with ./') - return ignore_patterns + return ignore_patterns -def GetExcludePatternsForDir( dirname ): - """Return patterns of files to exclude from ignorefile in a given directory. +def GetExcludePatternsForDir(dirname): + """Return patterns of files to exclude from ignorefile in a given directory. Looks for .yapfignore in the directory dirname. @@ -77,20 +78,20 @@ def GetExcludePatternsForDir( dirname ): A List of file patterns to exclude if ignore file is found, otherwise empty List. """ - ignore_patterns = [] + ignore_patterns = [] - yapfignore_file = os.path.join( dirname, '.yapfignore' ) - if os.path.exists( yapfignore_file ): - ignore_patterns += _GetExcludePatternsFromYapfIgnore( yapfignore_file ) + yapfignore_file = os.path.join(dirname, '.yapfignore') + if os.path.exists(yapfignore_file): + ignore_patterns += _GetExcludePatternsFromYapfIgnore(yapfignore_file) - pyproject_toml_file = os.path.join( dirname, 'pyproject.toml' ) - if os.path.exists( pyproject_toml_file ): - ignore_patterns += _GetExcludePatternsFromPyprojectToml( pyproject_toml_file ) - return ignore_patterns + pyproject_toml_file = os.path.join(dirname, 'pyproject.toml') + if os.path.exists(pyproject_toml_file): + ignore_patterns += _GetExcludePatternsFromPyprojectToml(pyproject_toml_file) + return ignore_patterns -def GetDefaultStyleForDir( dirname, default_style = style.DEFAULT_STYLE ): - """Return default style name for a given directory. +def GetDefaultStyleForDir(dirname, default_style=style.DEFAULT_STYLE): + """Return default style name for a given directory. Looks for .style.yapf or setup.cfg or pyproject.toml in the parent directories. @@ -103,65 +104,66 @@ def GetDefaultStyleForDir( dirname, default_style = style.DEFAULT_STYLE ): Returns: The filename if found, otherwise return the default style. """ - dirname = os.path.abspath( dirname ) - while True: - # See if we have a .style.yapf file. - style_file = os.path.join( dirname, style.LOCAL_STYLE ) - if os.path.exists( style_file ): - return style_file - - # See if we have a setup.cfg file with a '[yapf]' section. - config_file = os.path.join( dirname, style.SETUP_CONFIG ) - try: - fd = open( config_file ) - except IOError: - pass # It's okay if it's not there. - else: - with fd: - config = py3compat.ConfigParser() - config.read_file( fd ) - if config.has_section( 'yapf' ): - return config_file - - # See if we have a pyproject.toml file with a '[tool.yapf]' section. - config_file = os.path.join( dirname, style.PYPROJECT_TOML ) + dirname = os.path.abspath(dirname) + while True: + # See if we have a .style.yapf file. + style_file = os.path.join(dirname, style.LOCAL_STYLE) + if os.path.exists(style_file): + return style_file + + # See if we have a setup.cfg file with a '[yapf]' section. + config_file = os.path.join(dirname, style.SETUP_CONFIG) + try: + fd = open(config_file) + except IOError: + pass # It's okay if it's not there. + else: + with fd: + config = py3compat.ConfigParser() + config.read_file(fd) + if config.has_section('yapf'): + return config_file + + # See if we have a pyproject.toml file with a '[tool.yapf]' section. + config_file = os.path.join(dirname, style.PYPROJECT_TOML) + try: + fd = open(config_file) + except IOError: + pass # It's okay if it's not there. + else: + with fd: try: - fd = open( config_file ) - except IOError: - pass # It's okay if it's not there. - else: - with fd: - try: - import toml - except ImportError: - raise errors.YapfError( - "toml package is needed for using pyproject.toml as a " - "configuration file" ) + import toml + except ImportError: + raise errors.YapfError( + "toml package is needed for using pyproject.toml as a " + "configuration file") - pyproject_toml = toml.load( config_file ) - style_dict = pyproject_toml.get( 'tool', {} ).get( 'yapf', None ) - if style_dict is not None: - return config_file + pyproject_toml = toml.load(config_file) + style_dict = pyproject_toml.get('tool', {}).get('yapf', None) + if style_dict is not None: + return config_file - if ( not dirname or not os.path.basename( dirname ) or - dirname == os.path.abspath( os.path.sep ) ): - break - dirname = os.path.dirname( dirname ) + if (not dirname or not os.path.basename(dirname) or + dirname == os.path.abspath(os.path.sep)): + break + dirname = os.path.dirname(dirname) - global_file = os.path.expanduser( style.GLOBAL_STYLE ) - if os.path.exists( global_file ): - return global_file + global_file = os.path.expanduser(style.GLOBAL_STYLE) + if os.path.exists(global_file): + return global_file - return default_style + return default_style -def GetCommandLineFiles( command_line_file_list, recursive, exclude ): - """Return the list of files specified on the command line.""" - return _FindPythonFiles( command_line_file_list, recursive, exclude ) +def GetCommandLineFiles(command_line_file_list, recursive, exclude): + """Return the list of files specified on the command line.""" + return _FindPythonFiles(command_line_file_list, recursive, exclude) -def WriteReformattedCode( filename, reformatted_code, encoding = '', in_place = False ): - """Emit the reformatted code. +def WriteReformattedCode( + filename, reformatted_code, encoding='', in_place=False): + """Emit the reformatted code. Write the reformatted code into the file, if in_place is True. Otherwise, write to stdout. @@ -172,117 +174,117 @@ def WriteReformattedCode( filename, reformatted_code, encoding = '', in_place = encoding: (unicode) The encoding of the file. in_place: (bool) If True, then write the reformatted code to the file. """ - if in_place: - with py3compat.open_with_encoding( filename, mode = 'w', encoding = encoding, - newline = '' ) as fd: - fd.write( reformatted_code ) - else: - py3compat.EncodeAndWriteToStdout( reformatted_code ) - - -def LineEnding( lines ): - """Retrieve the line ending of the original source.""" - endings = { CRLF: 0, CR: 0, LF: 0} - for line in lines: - if line.endswith( CRLF ): - endings[ CRLF ] += 1 - elif line.endswith( CR ): - endings[ CR ] += 1 - elif line.endswith( LF ): - endings[ LF ] += 1 - return ( sorted( endings, key = endings.get, reverse = True ) or [ LF ] )[ 0 ] - - -def _FindPythonFiles( filenames, recursive, exclude ): - """Find all Python files.""" - if exclude and any( e.startswith( './' ) for e in exclude ): - raise errors.YapfError( "path in '--exclude' should not start with ./" ) - exclude = exclude and [ e.rstrip( "/" + os.path.sep ) for e in exclude ] - - python_files = [] - for filename in filenames: - if filename != '.' and exclude and IsIgnored( filename, exclude ): + if in_place: + with py3compat.open_with_encoding(filename, mode='w', encoding=encoding, + newline='') as fd: + fd.write(reformatted_code) + else: + py3compat.EncodeAndWriteToStdout(reformatted_code) + + +def LineEnding(lines): + """Retrieve the line ending of the original source.""" + endings = {CRLF: 0, CR: 0, LF: 0} + for line in lines: + if line.endswith(CRLF): + endings[CRLF] += 1 + elif line.endswith(CR): + endings[CR] += 1 + elif line.endswith(LF): + endings[LF] += 1 + return (sorted(endings, key=endings.get, reverse=True) or [LF])[0] + + +def _FindPythonFiles(filenames, recursive, exclude): + """Find all Python files.""" + if exclude and any(e.startswith('./') for e in exclude): + raise errors.YapfError("path in '--exclude' should not start with ./") + exclude = exclude and [e.rstrip("/" + os.path.sep) for e in exclude] + + python_files = [] + for filename in filenames: + if filename != '.' and exclude and IsIgnored(filename, exclude): + continue + if os.path.isdir(filename): + if not recursive: + raise errors.YapfError( + "directory specified without '--recursive' flag: %s" % filename) + + # TODO(morbo): Look into a version of os.walk that can handle recursion. + excluded_dirs = [] + for dirpath, dirnames, filelist in os.walk(filename): + if dirpath != '.' and exclude and IsIgnored(dirpath, exclude): + excluded_dirs.append(dirpath) + continue + elif any(dirpath.startswith(e) for e in excluded_dirs): + continue + for f in filelist: + filepath = os.path.join(dirpath, f) + if exclude and IsIgnored(filepath, exclude): continue - if os.path.isdir( filename ): - if not recursive: - raise errors.YapfError( - "directory specified without '--recursive' flag: %s" % filename ) - - # TODO(morbo): Look into a version of os.walk that can handle recursion. - excluded_dirs = [] - for dirpath, dirnames, filelist in os.walk( filename ): - if dirpath != '.' and exclude and IsIgnored( dirpath, exclude ): - excluded_dirs.append( dirpath ) - continue - elif any( dirpath.startswith( e ) for e in excluded_dirs ): - continue - for f in filelist: - filepath = os.path.join( dirpath, f ) - if exclude and IsIgnored( filepath, exclude ): - continue - if IsPythonFile( filepath ): - python_files.append( filepath ) - # To prevent it from scanning the contents excluded folders, os.walk() - # lets you amend its list of child dirs `dirnames`. These edits must be - # made in-place instead of creating a modified copy of `dirnames`. - # list.remove() is slow and list.pop() is a headache. Instead clear - # `dirnames` then repopulate it. - dirnames_ = [ dirnames.pop( 0 ) for i in range( len( dirnames ) ) ] - for dirname in dirnames_: - dir_ = os.path.join( dirpath, dirname ) - if IsIgnored( dir_, exclude ): - excluded_dirs.append( dir_ ) - else: - dirnames.append( dirname ) - - elif os.path.isfile( filename ): - python_files.append( filename ) - - return python_files - - -def IsIgnored( path, exclude ): - """Return True if filename matches any patterns in exclude.""" - if exclude is None: - return False - path = path.lstrip( os.path.sep ) - while path.startswith( '.' + os.path.sep ): - path = path[ 2 : ] - return any( fnmatch.fnmatch( path, e.rstrip( os.path.sep ) ) for e in exclude ) - - -def IsPythonFile( filename ): - """Return True if filename is a Python file.""" - if os.path.splitext( filename )[ 1 ] == '.py': - return True - - try: - with open( filename, 'rb' ) as fd: - encoding = py3compat.detect_encoding( fd.readline )[ 0 ] - - # Check for correctness of encoding. - with py3compat.open_with_encoding( filename, mode = 'r', - encoding = encoding ) as fd: - fd.read() - except UnicodeDecodeError: - encoding = 'latin-1' - except ( IOError, SyntaxError ): - # If we fail to detect encoding (or the encoding cookie is incorrect - which - # will make detect_encoding raise SyntaxError), assume it's not a Python - # file. - return False - - try: - with py3compat.open_with_encoding( filename, mode = 'r', - encoding = encoding ) as fd: - first_line = fd.readline( 256 ) - except IOError: - return False - - return re.match( r'^#!.*\bpython[23]?\b', first_line ) - - -def FileEncoding( filename ): - """Return the file's encoding.""" - with open( filename, 'rb' ) as fd: - return py3compat.detect_encoding( fd.readline )[ 0 ] + if IsPythonFile(filepath): + python_files.append(filepath) + # To prevent it from scanning the contents excluded folders, os.walk() + # lets you amend its list of child dirs `dirnames`. These edits must be + # made in-place instead of creating a modified copy of `dirnames`. + # list.remove() is slow and list.pop() is a headache. Instead clear + # `dirnames` then repopulate it. + dirnames_ = [dirnames.pop(0) for i in range(len(dirnames))] + for dirname in dirnames_: + dir_ = os.path.join(dirpath, dirname) + if IsIgnored(dir_, exclude): + excluded_dirs.append(dir_) + else: + dirnames.append(dirname) + + elif os.path.isfile(filename): + python_files.append(filename) + + return python_files + + +def IsIgnored(path, exclude): + """Return True if filename matches any patterns in exclude.""" + if exclude is None: + return False + path = path.lstrip(os.path.sep) + while path.startswith('.' + os.path.sep): + path = path[2:] + return any(fnmatch.fnmatch(path, e.rstrip(os.path.sep)) for e in exclude) + + +def IsPythonFile(filename): + """Return True if filename is a Python file.""" + if os.path.splitext(filename)[1] == '.py': + return True + + try: + with open(filename, 'rb') as fd: + encoding = py3compat.detect_encoding(fd.readline)[0] + + # Check for correctness of encoding. + with py3compat.open_with_encoding(filename, mode='r', + encoding=encoding) as fd: + fd.read() + except UnicodeDecodeError: + encoding = 'latin-1' + except (IOError, SyntaxError): + # If we fail to detect encoding (or the encoding cookie is incorrect - which + # will make detect_encoding raise SyntaxError), assume it's not a Python + # file. + return False + + try: + with py3compat.open_with_encoding(filename, mode='r', + encoding=encoding) as fd: + first_line = fd.readline(256) + except IOError: + return False + + return re.match(r'^#!.*\bpython[23]?\b', first_line) + + +def FileEncoding(filename): + """Return the file's encoding.""" + with open(filename, 'rb') as fd: + return py3compat.detect_encoding(fd.readline)[0] diff --git a/yapf/yapflib/format_decision_state.py b/yapf/yapflib/format_decision_state.py index bd08aa9ba..40bf5e25b 100644 --- a/yapf/yapflib/format_decision_state.py +++ b/yapf/yapflib/format_decision_state.py @@ -33,8 +33,8 @@ from yapf.yapflib import subtypes -class FormatDecisionState( object ): - """The current state when indenting a logical line. +class FormatDecisionState(object): + """The current state when indenting a logical line. The FormatDecisionState object is meant to be copied instead of referenced. @@ -56,8 +56,8 @@ class FormatDecisionState( object ): column_limit: The column limit specified by the style. """ - def __init__( self, line, first_indent ): - """Initializer. + def __init__(self, line, first_indent): + """Initializer. Initializes to the state after placing the first token from 'line' at 'first_indent'. @@ -66,64 +66,65 @@ def __init__( self, line, first_indent ): line: (LogicalLine) The logical line we're currently processing. first_indent: (int) The indent of the first token. """ - self.next_token = line.first - self.column = first_indent - self.line = line - self.paren_level = 0 - self.lowest_level_on_line = 0 - self.ignore_stack_for_comparison = False - self.stack = [ _ParenState( first_indent, first_indent ) ] - self.comp_stack = [] - self.param_list_stack = [] - self.first_indent = first_indent - self.column_limit = style.Get( 'COLUMN_LIMIT' ) - - def Clone( self ): - """Clones a FormatDecisionState object.""" - new = FormatDecisionState( self.line, self.first_indent ) - new.next_token = self.next_token - new.column = self.column - new.line = self.line - new.paren_level = self.paren_level - new.line.depth = self.line.depth - new.lowest_level_on_line = self.lowest_level_on_line - new.ignore_stack_for_comparison = self.ignore_stack_for_comparison - new.first_indent = self.first_indent - new.stack = [ state.Clone() for state in self.stack ] - new.comp_stack = [ state.Clone() for state in self.comp_stack ] - new.param_list_stack = [ state.Clone() for state in self.param_list_stack ] - return new - - def __eq__( self, other ): - # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', - # because it shouldn't have a bearing on this comparison. (I.e., it will - # report equal if 'next_token' does.) - return ( - self.next_token == other.next_token and self.column == other.column and - self.paren_level == other.paren_level and - self.line.depth == other.line.depth and - self.lowest_level_on_line == other.lowest_level_on_line and ( - self.ignore_stack_for_comparison or other.ignore_stack_for_comparison or - self.stack == other.stack and self.comp_stack == other.comp_stack and - self.param_list_stack == other.param_list_stack ) ) - - def __ne__( self, other ): - return not self == other - - def __hash__( self ): - return hash( - ( - self.next_token, self.column, self.paren_level, self.line.depth, - self.lowest_level_on_line ) ) - - def __repr__( self ): - return ( - 'column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % ( - self.column, repr( self.next_token ), self.paren_level, - '\n\t'.join( repr( s ) for s in self.stack ) + ']' ) ) - - def CanSplit( self, must_split ): - """Determine if we can split before the next token. + self.next_token = line.first + self.column = first_indent + self.line = line + self.paren_level = 0 + self.lowest_level_on_line = 0 + self.ignore_stack_for_comparison = False + self.stack = [_ParenState(first_indent, first_indent)] + self.comp_stack = [] + self.param_list_stack = [] + self.first_indent = first_indent + self.column_limit = style.Get('COLUMN_LIMIT') + + def Clone(self): + """Clones a FormatDecisionState object.""" + new = FormatDecisionState(self.line, self.first_indent) + new.next_token = self.next_token + new.column = self.column + new.line = self.line + new.paren_level = self.paren_level + new.line.depth = self.line.depth + new.lowest_level_on_line = self.lowest_level_on_line + new.ignore_stack_for_comparison = self.ignore_stack_for_comparison + new.first_indent = self.first_indent + new.stack = [state.Clone() for state in self.stack] + new.comp_stack = [state.Clone() for state in self.comp_stack] + new.param_list_stack = [state.Clone() for state in self.param_list_stack] + return new + + def __eq__(self, other): + # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', + # because it shouldn't have a bearing on this comparison. (I.e., it will + # report equal if 'next_token' does.) + return ( + self.next_token == other.next_token and self.column == other.column and + self.paren_level == other.paren_level and + self.line.depth == other.line.depth and + self.lowest_level_on_line == other.lowest_level_on_line and ( + self.ignore_stack_for_comparison or + other.ignore_stack_for_comparison or self.stack == other.stack and + self.comp_stack == other.comp_stack and + self.param_list_stack == other.param_list_stack)) + + def __ne__(self, other): + return not self == other + + def __hash__(self): + return hash( + ( + self.next_token, self.column, self.paren_level, self.line.depth, + self.lowest_level_on_line)) + + def __repr__(self): + return ( + 'column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % ( + self.column, repr(self.next_token), self.paren_level, + '\n\t'.join(repr(s) for s in self.stack) + ']')) + + def CanSplit(self, must_split): + """Determine if we can split before the next token. Arguments: must_split: (bool) A newline was required before this token. @@ -131,443 +132,436 @@ def CanSplit( self, must_split ): Returns: True if the line can be split before the next token. """ - current = self.next_token - previous = current.previous_token + current = self.next_token + previous = current.previous_token + + if current.is_pseudo: + return False + + if (not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and + subtypes.DICTIONARY_KEY not in current.subtypes and + not style.Get('ALLOW_MULTILINE_DICTIONARY_KEYS')): + # In some situations, a dictionary may be multiline, but pylint doesn't + # like it. So don't allow it unless forced to. + return False + + if (not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and + not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE')): + return False + + if previous and previous.value == '(' and current.value == ')': + # Don't split an empty function call list if we aren't splitting before + # dict values. + token = previous.previous_token + while token: + prev = token.previous_token + if not prev or prev.name not in {'NAME', 'DOT'}: + break + token = token.previous_token + if token and subtypes.DICTIONARY_VALUE in token.subtypes: + if not style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE'): + return False + + if previous and previous.value == '.' and current.value == '.': + return False + + return current.can_break_before + + def MustSplit(self): + """Returns True if the line must split before the next token.""" + current = self.next_token + previous = current.previous_token + + if current.is_pseudo: + return False + + if current.must_break_before: + return True + + if not previous: + return False + + if style.Get('SPLIT_ALL_COMMA_SEPARATED_VALUES') and previous.value == ',': + return True + + if (style.Get('SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES') and + previous.value == ','): + # Avoid breaking in a container that fits in the current line if possible + opening = _GetOpeningBracket(current) + + # Can't find opening bracket, behave the same way as + # SPLIT_ALL_COMMA_SEPARATED_VALUES. + if not opening: + return True + + if current.is_comment: + # Don't require splitting before a comment, since it may be related to + # the current line. + return False - if current.is_pseudo: - return False + # Allow the fallthrough code to handle the closing bracket. + if current != opening.matching_bracket: + # If the container doesn't fit in the current line, must split + return not self._ContainerFitsOnStartLine(opening) + + if (self.stack[-1].split_before_closing_bracket and + (current.value in '}]' and style.Get('SPLIT_BEFORE_CLOSING_BRACKET') or + current.value in '}])' and style.Get('INDENT_CLOSING_BRACKETS'))): + # Split before the closing bracket if we can. + if subtypes.SUBSCRIPT_BRACKET not in current.subtypes: + return current.node_split_penalty != split_penalty.UNBREAKABLE + + if (current.value == ')' and previous.value == ',' and + not _IsSingleElementTuple(current.matching_bracket)): + return True + + # Prevent splitting before the first argument in compound statements + # with the exception of function declarations. + if (style.Get('SPLIT_BEFORE_FIRST_ARGUMENT') and + _IsCompoundStatement(self.line.first) and + not _IsFunctionDef(self.line.first)): + return False + + ########################################################################### + # List Splitting + if (style.Get('DEDENT_CLOSING_BRACKETS') or + style.Get('INDENT_CLOSING_BRACKETS') or + style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): + bracket = current if current.ClosesScope() else previous + if subtypes.SUBSCRIPT_BRACKET not in bracket.subtypes: + if bracket.OpensScope(): + if style.Get('COALESCE_BRACKETS'): + if current.OpensScope(): + # Prefer to keep all opening brackets together. + return False + + if (not _IsLastScopeInLine(bracket) or + logical_line.IsSurroundedByBrackets(bracket)): + last_token = bracket.matching_bracket + else: + last_token = _LastTokenInLine(bracket.matching_bracket) + + if not self._FitsOnLine(bracket, last_token): + # Split before the first element if the whole list can't fit on a + # single line. + self.stack[-1].split_before_closing_bracket = True + return True - if ( not must_split and subtypes.DICTIONARY_KEY_PART in current.subtypes and - subtypes.DICTIONARY_KEY not in current.subtypes and - not style.Get( 'ALLOW_MULTILINE_DICTIONARY_KEYS' ) ): - # In some situations, a dictionary may be multiline, but pylint doesn't - # like it. So don't allow it unless forced to. + elif (style.Get('DEDENT_CLOSING_BRACKETS') or + style.Get('INDENT_CLOSING_BRACKETS')) and current.ClosesScope(): + # Split before and dedent the closing bracket. + return self.stack[-1].split_before_closing_bracket + + if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and + current.is_name): + # An expression that's surrounded by parens gets split after the opening + # parenthesis. + def SurroundedByParens(token): + """Check if it's an expression surrounded by parentheses.""" + while token: + if token.value == ',': return False + if token.value == ')': + return not token.next_token + if token.OpensScope(): + token = token.matching_bracket.next_token + else: + token = token.next_token + return False - if ( not must_split and subtypes.DICTIONARY_VALUE in current.subtypes and - not style.Get( 'ALLOW_SPLIT_BEFORE_DICT_VALUE' ) ): + if (previous.value == '(' and not previous.is_pseudo and + not logical_line.IsSurroundedByBrackets(previous)): + pptoken = previous.previous_token + if (pptoken and not pptoken.is_name and not pptoken.is_keyword and + SurroundedByParens(current)): + return True + + if (current.is_name or current.is_string) and previous.value == ',': + # If the list has function calls in it and the full list itself cannot + # fit on the line, then we want to split. Otherwise, we'll get something + # like this: + # + # X = [ + # Bar(xxx='some string', + # yyy='another long string', + # zzz='a third long string'), Bar( + # xxx='some string', + # yyy='another long string', + # zzz='a third long string') + # ] + # + # or when a string formatting syntax. + func_call_or_string_format = False + tok = current.next_token + if current.is_name: + while tok and (tok.is_name or tok.value == '.'): + tok = tok.next_token + func_call_or_string_format = tok and tok.value == '(' + elif current.is_string: + while tok and tok.is_string: + tok = tok.next_token + func_call_or_string_format = tok and tok.value == '%' + if func_call_or_string_format: + open_bracket = logical_line.IsSurroundedByBrackets(current) + if open_bracket: + if open_bracket.value in '[{': + if not self._FitsOnLine(open_bracket, + open_bracket.matching_bracket): + return True + elif tok.value == '(': + if not self._FitsOnLine(current, tok.matching_bracket): + return True + + if (current.OpensScope() and previous.value == ',' and + subtypes.DICTIONARY_KEY not in current.next_token.subtypes): + # If we have a list of tuples, then we can get a similar look as above. If + # the full list cannot fit on the line, then we want a split. + open_bracket = logical_line.IsSurroundedByBrackets(current) + if (open_bracket and open_bracket.value in '[{' and + subtypes.SUBSCRIPT_BRACKET not in open_bracket.subtypes): + if not self._FitsOnLine(current, current.matching_bracket): + return True + + ########################################################################### + # Dict/Set Splitting + if (style.Get('EACH_DICT_ENTRY_ON_SEPARATE_LINE') and + subtypes.DICTIONARY_KEY in current.subtypes and not current.is_comment): + # Place each dictionary entry onto its own line. + if previous.value == '{' and previous.previous_token: + opening = _GetOpeningBracket(previous.previous_token) + if (opening and opening.value == '(' and opening.previous_token and + opening.previous_token.is_name): + # This is a dictionary that's an argument to a function. + if (self._FitsOnLine(previous, previous.matching_bracket) and + previous.matching_bracket.next_token and + (not opening.matching_bracket.next_token or + opening.matching_bracket.next_token.value != '.') and + _ScopeHasNoCommas(previous)): + # Don't split before the key if: + # - The dictionary fits on a line, and + # - The function call isn't part of a builder-style call and + # - The dictionary has one entry and no trailing comma return False - - if previous and previous.value == '(' and current.value == ')': - # Don't split an empty function call list if we aren't splitting before - # dict values. - token = previous.previous_token - while token: - prev = token.previous_token - if not prev or prev.name not in { 'NAME', 'DOT' }: - break - token = token.previous_token - if token and subtypes.DICTIONARY_VALUE in token.subtypes: - if not style.Get( 'ALLOW_SPLIT_BEFORE_DICT_VALUE' ): - return False - - if previous and previous.value == '.' and current.value == '.': + return True + + if (style.Get('SPLIT_BEFORE_DICT_SET_GENERATOR') and + subtypes.DICT_SET_GENERATOR in current.subtypes): + # Split before a dict/set generator. + return True + + if (subtypes.DICTIONARY_VALUE in current.subtypes or + (previous.is_pseudo and previous.value == '(' and + not current.is_comment)): + # Split before the dictionary value if we can't fit every dictionary + # entry on its own line. + if not current.OpensScope(): + opening = _GetOpeningBracket(current) + if not self._EachDictEntryFitsOnOneLine(opening): + return style.Get('ALLOW_SPLIT_BEFORE_DICT_VALUE') + + if previous.value == '{': + # Split if the dict/set cannot fit on one line and ends in a comma. + closing = previous.matching_bracket + if (not self._FitsOnLine(previous, closing) and + closing.previous_token.value == ','): + self.stack[-1].split_before_closing_bracket = True + return True + + ########################################################################### + # Argument List Splitting + if (style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and not current.is_comment and + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in current.subtypes): + if (previous.value not in {'=', ':', '*', '**'} and + current.value not in ':=,)' and not _IsFunctionDefinition(previous)): + # If we're going to split the lines because of named arguments, then we + # want to split after the opening bracket as well. But not when this is + # part of a function definition. + if previous.value == '(': + # Make sure we don't split after the opening bracket if the + # continuation indent is greater than the opening bracket: + # + # a( + # b=1, + # c=2) + if (self._FitsOnLine(previous, previous.matching_bracket) and + logical_line.IsSurroundedByBrackets(previous)): + # An argument to a function is a function call with named + # assigns. return False - return current.can_break_before - - def MustSplit( self ): - """Returns True if the line must split before the next token.""" - current = self.next_token - previous = current.previous_token - - if current.is_pseudo: + # Don't split if not required + if (not style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') and + not style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): return False - if current.must_break_before: - return True - - if not previous: - return False - - if style.Get( 'SPLIT_ALL_COMMA_SEPARATED_VALUES' ) and previous.value == ',': - return True + column = self.column - self.stack[-1].last_space + return column > style.Get('CONTINUATION_INDENT_WIDTH') - if ( style.Get( 'SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES' ) and - previous.value == ',' ): - # Avoid breaking in a container that fits in the current line if possible - opening = _GetOpeningBracket( current ) - - # Can't find opening bracket, behave the same way as - # SPLIT_ALL_COMMA_SEPARATED_VALUES. - if not opening: - return True - - if current.is_comment: - # Don't require splitting before a comment, since it may be related to - # the current line. - return False - - # Allow the fallthrough code to handle the closing bracket. - if current != opening.matching_bracket: - # If the container doesn't fit in the current line, must split - return not self._ContainerFitsOnStartLine( opening ) - - if ( self.stack[ -1 ].split_before_closing_bracket and - ( current.value in '}]' and style.Get( 'SPLIT_BEFORE_CLOSING_BRACKET' ) or - current.value in '}])' and style.Get( 'INDENT_CLOSING_BRACKETS' ) ) ): - # Split before the closing bracket if we can. - if subtypes.SUBSCRIPT_BRACKET not in current.subtypes: - return current.node_split_penalty != split_penalty.UNBREAKABLE - - if ( current.value == ')' and previous.value == ',' and - not _IsSingleElementTuple( current.matching_bracket ) ): - return True + opening = _GetOpeningBracket(current) + if opening: + return not self._ContainerFitsOnStartLine(opening) - # Prevent splitting before the first argument in compound statements - # with the exception of function declarations. - if ( style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) and - _IsCompoundStatement( self.line.first ) and - not _IsFunctionDef( self.line.first ) ): - return False + if (current.value not in '{)' and previous.value == '(' and + self._ArgumentListHasDictionaryEntry(current)): + return True - ########################################################################### - # List Splitting - if ( style.Get( 'DEDENT_CLOSING_BRACKETS' ) or - style.Get( 'INDENT_CLOSING_BRACKETS' ) or - style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ): - bracket = current if current.ClosesScope() else previous - if subtypes.SUBSCRIPT_BRACKET not in bracket.subtypes: - if bracket.OpensScope(): - if style.Get( 'COALESCE_BRACKETS' ): - if current.OpensScope(): - # Prefer to keep all opening brackets together. - return False - - if ( not _IsLastScopeInLine( bracket ) or - logical_line.IsSurroundedByBrackets( bracket ) ): - last_token = bracket.matching_bracket - else: - last_token = _LastTokenInLine( bracket.matching_bracket ) - - if not self._FitsOnLine( bracket, last_token ): - # Split before the first element if the whole list can't fit on a - # single line. - self.stack[ -1 ].split_before_closing_bracket = True - return True - - elif ( style.Get( 'DEDENT_CLOSING_BRACKETS' ) or - style.Get( 'INDENT_CLOSING_BRACKETS' ) - ) and current.ClosesScope(): - # Split before and dedent the closing bracket. - return self.stack[ -1 ].split_before_closing_bracket - - if ( style.Get( 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ) and - current.is_name ): - # An expression that's surrounded by parens gets split after the opening - # parenthesis. - def SurroundedByParens( token ): - """Check if it's an expression surrounded by parentheses.""" - while token: - if token.value == ',': - return False - if token.value == ')': - return not token.next_token - if token.OpensScope(): - token = token.matching_bracket.next_token - else: - token = token.next_token - return False - - if ( previous.value == '(' and not previous.is_pseudo and - not logical_line.IsSurroundedByBrackets( previous ) ): - pptoken = previous.previous_token - if ( pptoken and not pptoken.is_name and not pptoken.is_keyword and - SurroundedByParens( current ) ): - return True - - if ( current.is_name or current.is_string ) and previous.value == ',': - # If the list has function calls in it and the full list itself cannot - # fit on the line, then we want to split. Otherwise, we'll get something - # like this: - # - # X = [ - # Bar(xxx='some string', - # yyy='another long string', - # zzz='a third long string'), Bar( - # xxx='some string', - # yyy='another long string', - # zzz='a third long string') - # ] - # - # or when a string formatting syntax. - func_call_or_string_format = False - tok = current.next_token - if current.is_name: - while tok and ( tok.is_name or tok.value == '.' ): - tok = tok.next_token - func_call_or_string_format = tok and tok.value == '(' - elif current.is_string: - while tok and tok.is_string: - tok = tok.next_token - func_call_or_string_format = tok and tok.value == '%' - if func_call_or_string_format: - open_bracket = logical_line.IsSurroundedByBrackets( current ) - if open_bracket: - if open_bracket.value in '[{': - if not self._FitsOnLine( open_bracket, - open_bracket.matching_bracket ): - return True - elif tok.value == '(': - if not self._FitsOnLine( current, tok.matching_bracket ): - return True - - if ( current.OpensScope() and previous.value == ',' and - subtypes.DICTIONARY_KEY not in current.next_token.subtypes ): - # If we have a list of tuples, then we can get a similar look as above. If - # the full list cannot fit on the line, then we want a split. - open_bracket = logical_line.IsSurroundedByBrackets( current ) - if ( open_bracket and open_bracket.value in '[{' and - subtypes.SUBSCRIPT_BRACKET not in open_bracket.subtypes ): - if not self._FitsOnLine( current, current.matching_bracket ): - return True - - ########################################################################### - # Dict/Set Splitting - if ( style.Get( 'EACH_DICT_ENTRY_ON_SEPARATE_LINE' ) and - subtypes.DICTIONARY_KEY in current.subtypes and not current.is_comment ): - # Place each dictionary entry onto its own line. - if previous.value == '{' and previous.previous_token: - opening = _GetOpeningBracket( previous.previous_token ) - if ( opening and opening.value == '(' and opening.previous_token and - opening.previous_token.is_name ): - # This is a dictionary that's an argument to a function. - if ( self._FitsOnLine( previous, previous.matching_bracket ) and - previous.matching_bracket.next_token and - ( not opening.matching_bracket.next_token or - opening.matching_bracket.next_token.value != '.' ) and - _ScopeHasNoCommas( previous ) ): - # Don't split before the key if: - # - The dictionary fits on a line, and - # - The function call isn't part of a builder-style call and - # - The dictionary has one entry and no trailing comma - return False + if style.Get('SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'): + # Split before arguments in a function call or definition if the + # arguments are terminated by a comma. + opening = _GetOpeningBracket(current) + if opening and opening.previous_token and opening.previous_token.is_name: + if previous.value in '(,': + if opening.matching_bracket.previous_token.value == ',': return True - if ( style.Get( 'SPLIT_BEFORE_DICT_SET_GENERATOR' ) and - subtypes.DICT_SET_GENERATOR in current.subtypes ): - # Split before a dict/set generator. - return True + if ((current.is_name or current.value in {'*', '**'}) and + previous.value == ','): + # If we have a function call within an argument list and it won't fit on + # the remaining line, but it will fit on a line by itself, then go ahead + # and split before the call. + opening = _GetOpeningBracket(current) + if (opening and opening.value == '(' and opening.previous_token and + (opening.previous_token.is_name or + opening.previous_token.value in {'*', '**'})): + is_func_call = False + opening = current + while opening: + if opening.value == '(': + is_func_call = True + break + if (not (opening.is_name or opening.value in {'*', '**'}) and + opening.value != '.'): + break + opening = opening.next_token - if ( subtypes.DICTIONARY_VALUE in current.subtypes or - ( previous.is_pseudo and previous.value == '(' and - not current.is_comment ) ): - # Split before the dictionary value if we can't fit every dictionary - # entry on its own line. - if not current.OpensScope(): - opening = _GetOpeningBracket( current ) - if not self._EachDictEntryFitsOnOneLine( opening ): - return style.Get( 'ALLOW_SPLIT_BEFORE_DICT_VALUE' ) - - if previous.value == '{': - # Split if the dict/set cannot fit on one line and ends in a comma. - closing = previous.matching_bracket - if ( not self._FitsOnLine( previous, closing ) and - closing.previous_token.value == ',' ): - self.stack[ -1 ].split_before_closing_bracket = True - return True - - ########################################################################### - # Argument List Splitting - if ( style.Get( 'SPLIT_BEFORE_NAMED_ASSIGNS' ) and not current.is_comment and - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in current.subtypes ): - if ( previous.value not in { '=', ':', '*', '**' } and - current.value not in ':=,)' and - not _IsFunctionDefinition( previous ) ): - # If we're going to split the lines because of named arguments, then we - # want to split after the opening bracket as well. But not when this is - # part of a function definition. - if previous.value == '(': - # Make sure we don't split after the opening bracket if the - # continuation indent is greater than the opening bracket: - # - # a( - # b=1, - # c=2) - if ( self._FitsOnLine( previous, previous.matching_bracket ) and - logical_line.IsSurroundedByBrackets( previous ) ): - # An argument to a function is a function call with named - # assigns. - return False - - # Don't split if not required - if ( not style.Get( 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ) - and not style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ): - return False - - column = self.column - self.stack[ -1 ].last_space - return column > style.Get( 'CONTINUATION_INDENT_WIDTH' ) - - opening = _GetOpeningBracket( current ) - if opening: - return not self._ContainerFitsOnStartLine( opening ) - - if ( current.value not in '{)' and previous.value == '(' and - self._ArgumentListHasDictionaryEntry( current ) ): + if is_func_call: + if (not self._FitsOnLine(current, opening.matching_bracket) or + (opening.matching_bracket.next_token and + opening.matching_bracket.next_token.value != ',' and + not opening.matching_bracket.next_token.ClosesScope())): return True - if style.Get( 'SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED' ): - # Split before arguments in a function call or definition if the - # arguments are terminated by a comma. - opening = _GetOpeningBracket( current ) - if opening and opening.previous_token and opening.previous_token.is_name: - if previous.value in '(,': - if opening.matching_bracket.previous_token.value == ',': - return True - - if ( ( current.is_name or current.value in { '*', '**' } ) and - previous.value == ',' ): - # If we have a function call within an argument list and it won't fit on - # the remaining line, but it will fit on a line by itself, then go ahead - # and split before the call. - opening = _GetOpeningBracket( current ) - if ( opening and opening.value == '(' and opening.previous_token and - ( opening.previous_token.is_name or - opening.previous_token.value in { '*', '**' } ) ): - is_func_call = False - opening = current - while opening: - if opening.value == '(': - is_func_call = True - break - if ( not ( opening.is_name or opening.value in { '*', '**' } ) and - opening.value != '.' ): - break - opening = opening.next_token - - if is_func_call: - if ( not self._FitsOnLine( current, opening.matching_bracket ) or - ( opening.matching_bracket.next_token and - opening.matching_bracket.next_token.value != ',' and - not opening.matching_bracket.next_token.ClosesScope() ) ): - return True - - pprevious = previous.previous_token - - # A function call with a dictionary as its first argument may result in - # unreadable formatting if the dictionary spans multiple lines. The - # dictionary itself is formatted just fine, but the remaining arguments are - # indented too far: + pprevious = previous.previous_token + + # A function call with a dictionary as its first argument may result in + # unreadable formatting if the dictionary spans multiple lines. The + # dictionary itself is formatted just fine, but the remaining arguments are + # indented too far: + # + # function_call({ + # KEY_1: 'value one', + # KEY_2: 'value two', + # }, + # default=False) + if (current.value == '{' and previous.value == '(' and pprevious and + pprevious.is_name): + dict_end = current.matching_bracket + next_token = dict_end.next_token + if next_token.value == ',' and not self._FitsOnLine(current, dict_end): + return True + + if (current.is_name and pprevious and pprevious.is_name and + previous.value == '('): + + if (not self._FitsOnLine(previous, previous.matching_bracket) and + _IsFunctionCallWithArguments(current)): + # There is a function call, with more than 1 argument, where the first + # argument is itself a function call with arguments that does not fit + # into the line. In this specific case, if we split after the first + # argument's opening '(', then the formatting will look bad for the + # rest of the arguments. E.g.: + # + # outer_function_call(inner_function_call( + # inner_arg1, inner_arg2), + # outer_arg1, outer_arg2) # - # function_call({ - # KEY_1: 'value one', - # KEY_2: 'value two', - # }, - # default=False) - if ( current.value == '{' and previous.value == '(' and pprevious and - pprevious.is_name ): - dict_end = current.matching_bracket - next_token = dict_end.next_token - if next_token.value == ',' and not self._FitsOnLine( current, dict_end ): - return True - - if ( current.is_name and pprevious and pprevious.is_name and - previous.value == '(' ): - - if ( not self._FitsOnLine( previous, previous.matching_bracket ) and - _IsFunctionCallWithArguments( current ) ): - # There is a function call, with more than 1 argument, where the first - # argument is itself a function call with arguments that does not fit - # into the line. In this specific case, if we split after the first - # argument's opening '(', then the formatting will look bad for the - # rest of the arguments. E.g.: - # - # outer_function_call(inner_function_call( - # inner_arg1, inner_arg2), - # outer_arg1, outer_arg2) - # - # Instead, enforce a split before that argument to keep things looking - # good. - if ( style.Get( 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ) or - style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ): - return True - - opening = _GetOpeningBracket( current ) - if ( opening and opening.value == '(' and opening.previous_token and - ( opening.previous_token.is_name or - opening.previous_token.value in { '*', '**' } ) ): - is_func_call = False - opening = current - while opening: - if opening.value == '(': - is_func_call = True - break - if ( not ( opening.is_name or opening.value in { '*', '**' } ) - and opening.value != '.' ): - break - opening = opening.next_token - - if is_func_call: - if ( - not self._FitsOnLine( current, - opening.matching_bracket ) or - ( opening.matching_bracket.next_token and - opening.matching_bracket.next_token.value != ',' and - not opening.matching_bracket.next_token.ClosesScope() ) ): - return True - - if ( previous.OpensScope() and not current.OpensScope() and - not current.is_comment and - subtypes.SUBSCRIPT_BRACKET not in previous.subtypes ): - if pprevious and not pprevious.is_keyword and not pprevious.is_name: - # We want to split if there's a comment in the container. - token = current - while token != previous.matching_bracket: - if token.is_comment: - return True - token = token.next_token - if previous.value == '(': - pptoken = previous.previous_token - if not pptoken or not pptoken.is_name: - # Split after the opening of a tuple if it doesn't fit on the current - # line and it's not a function call. - if self._FitsOnLine( previous, previous.matching_bracket ): - return False - elif not self._FitsOnLine( previous, previous.matching_bracket ): - if len( previous.container_elements ) == 1: - return False - - elements = previous.container_elements + [ - previous.matching_bracket - ] - i = 1 - while i < len( elements ): - if ( not elements[ i - 1 ].OpensScope() and - not self._FitsOnLine( elements[ i - 1 ], elements[ i ] ) ): - return True - i += 1 - - if ( self.column_limit - self.column ) / float( - self.column_limit ) < 0.3: - # Try not to squish all of the arguments off to the right. - return True - else: - # Split after the opening of a container if it doesn't fit on the - # current line. - if not self._FitsOnLine( previous, previous.matching_bracket ): - return True - - ########################################################################### - # Original Formatting Splitting - # These checks rely upon the original formatting. This is in order to - # attempt to keep hand-written code in the same condition as it was before. - # However, this may cause the formatter to fail to be idempotent. - if ( style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ) and current.value in '&|' and - previous.lineno < current.lineno ): - # Retain the split before a bitwise operator. + # Instead, enforce a split before that argument to keep things looking + # good. + if (style.Get('SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN') or + style.Get('SPLIT_BEFORE_FIRST_ARGUMENT')): + return True + + opening = _GetOpeningBracket(current) + if (opening and opening.value == '(' and opening.previous_token and + (opening.previous_token.is_name or + opening.previous_token.value in {'*', '**'})): + is_func_call = False + opening = current + while opening: + if opening.value == '(': + is_func_call = True + break + if (not (opening.is_name or opening.value in {'*', '**'}) and + opening.value != '.'): + break + opening = opening.next_token + + if is_func_call: + if (not self._FitsOnLine(current, opening.matching_bracket) or + (opening.matching_bracket.next_token and + opening.matching_bracket.next_token.value != ',' and + not opening.matching_bracket.next_token.ClosesScope())): + return True + + if (previous.OpensScope() and not current.OpensScope() and + not current.is_comment and + subtypes.SUBSCRIPT_BRACKET not in previous.subtypes): + if pprevious and not pprevious.is_keyword and not pprevious.is_name: + # We want to split if there's a comment in the container. + token = current + while token != previous.matching_bracket: + if token.is_comment: return True + token = token.next_token + if previous.value == '(': + pptoken = previous.previous_token + if not pptoken or not pptoken.is_name: + # Split after the opening of a tuple if it doesn't fit on the current + # line and it's not a function call. + if self._FitsOnLine(previous, previous.matching_bracket): + return False + elif not self._FitsOnLine(previous, previous.matching_bracket): + if len(previous.container_elements) == 1: + return False - if ( current.is_comment and - previous.lineno < current.lineno - current.value.count( '\n' ) ): - # If a comment comes in the middle of a logical line (like an if - # conditional with comments interspersed), then we want to split if the - # original comments were on a separate line. + elements = previous.container_elements + [previous.matching_bracket] + i = 1 + while i < len(elements): + if (not elements[i - 1].OpensScope() and + not self._FitsOnLine(elements[i - 1], elements[i])): + return True + i += 1 + + if (self.column_limit - self.column) / float(self.column_limit) < 0.3: + # Try not to squish all of the arguments off to the right. return True + else: + # Split after the opening of a container if it doesn't fit on the + # current line. + if not self._FitsOnLine(previous, previous.matching_bracket): + return True + + ########################################################################### + # Original Formatting Splitting + # These checks rely upon the original formatting. This is in order to + # attempt to keep hand-written code in the same condition as it was before. + # However, this may cause the formatter to fail to be idempotent. + if (style.Get('SPLIT_BEFORE_BITWISE_OPERATOR') and current.value in '&|' and + previous.lineno < current.lineno): + # Retain the split before a bitwise operator. + return True + + if (current.is_comment and + previous.lineno < current.lineno - current.value.count('\n')): + # If a comment comes in the middle of a logical line (like an if + # conditional with comments interspersed), then we want to split if the + # original comments were on a separate line. + return True - return False + return False - def AddTokenToState( self, newline, dry_run, must_split = False ): - """Add a token to the format decision state. + def AddTokenToState(self, newline, dry_run, must_split=False): + """Add a token to the format decision state. Allow the heuristic to try out adding the token with and without a newline. Later on, the algorithm will determine which one has the lowest penalty. @@ -581,21 +575,21 @@ def AddTokenToState( self, newline, dry_run, must_split = False ): Returns: The penalty of splitting after the current token. """ - self._PushParameterListState( newline ) + self._PushParameterListState(newline) - penalty = 0 - if newline: - penalty = self._AddTokenOnNewline( dry_run, must_split ) - else: - self._AddTokenOnCurrentLine( dry_run ) + penalty = 0 + if newline: + penalty = self._AddTokenOnNewline(dry_run, must_split) + else: + self._AddTokenOnCurrentLine(dry_run) - penalty += self._CalculateComprehensionState( newline ) - penalty += self._CalculateParameterListState( newline ) + penalty += self._CalculateComprehensionState(newline) + penalty += self._CalculateParameterListState(newline) - return self.MoveStateToNextToken() + penalty + return self.MoveStateToNextToken() + penalty - def _AddTokenOnCurrentLine( self, dry_run ): - """Puts the token on the current line. + def _AddTokenOnCurrentLine(self, dry_run): + """Puts the token on the current line. Appends the next token to the state and updates information necessary for indentation. @@ -603,37 +597,37 @@ def _AddTokenOnCurrentLine( self, dry_run ): Arguments: dry_run: (bool) Commit whitespace changes to the FormatToken if True. """ - current = self.next_token - previous = current.previous_token - - spaces = current.spaces_required_before - if isinstance( spaces, list ): - # Don't set the value here, as we need to look at the lines near - # this one to determine the actual horizontal alignment value. - spaces = 0 - - if not dry_run: - current.AddWhitespacePrefix( newlines_before = 0, spaces = spaces ) - - if previous.OpensScope(): - if not current.is_comment: - # Align closing scopes that are on a newline with the opening scope: - # - # foo = [a, - # b, - # ] - self.stack[ -1 ].closing_scope_indent = self.column - 1 - if style.Get( 'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT' ): - self.stack[ -1 ].closing_scope_indent += 1 - self.stack[ -1 ].indent = self.column + spaces - else: - self.stack[ -1 ].closing_scope_indent = ( - self.stack[ -1 ].indent - style.Get( 'CONTINUATION_INDENT_WIDTH' ) ) - - self.column += spaces - - def _AddTokenOnNewline( self, dry_run, must_split ): - """Adds a line break and necessary indentation. + current = self.next_token + previous = current.previous_token + + spaces = current.spaces_required_before + if isinstance(spaces, list): + # Don't set the value here, as we need to look at the lines near + # this one to determine the actual horizontal alignment value. + spaces = 0 + + if not dry_run: + current.AddWhitespacePrefix(newlines_before=0, spaces=spaces) + + if previous.OpensScope(): + if not current.is_comment: + # Align closing scopes that are on a newline with the opening scope: + # + # foo = [a, + # b, + # ] + self.stack[-1].closing_scope_indent = self.column - 1 + if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'): + self.stack[-1].closing_scope_indent += 1 + self.stack[-1].indent = self.column + spaces + else: + self.stack[-1].closing_scope_indent = ( + self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')) + + self.column += spaces + + def _AddTokenOnNewline(self, dry_run, must_split): + """Adds a line break and necessary indentation. Appends the next token to the state and updates information necessary for indentation. @@ -646,63 +640,63 @@ def _AddTokenOnNewline( self, dry_run, must_split ): Returns: The split penalty for splitting after the current state. """ - current = self.next_token - previous = current.previous_token - - self.column = self._GetNewlineColumn() - - if not dry_run: - indent_level = self.line.depth - spaces = self.column - if spaces: - spaces -= indent_level * style.Get( 'INDENT_WIDTH' ) - current.AddWhitespacePrefix( - newlines_before = 1, spaces = spaces, indent_level = indent_level ) - - if not current.is_comment: - self.stack[ -1 ].last_space = self.column - self.lowest_level_on_line = self.paren_level - - if ( previous.OpensScope() or - ( previous.is_comment and previous.previous_token is not None and - previous.previous_token.OpensScope() ) ): - dedent = ( style.Get( 'CONTINUATION_INDENT_WIDTH' ), - 0 )[ style.Get( 'INDENT_CLOSING_BRACKETS' ) ] - self.stack[ -1 ].closing_scope_indent = ( - max( 0, self.stack[ -1 ].indent - dedent ) ) - self.stack[ -1 ].split_before_closing_bracket = True - - # Calculate the split penalty. - penalty = current.split_penalty - - if must_split: - # Don't penalize for a must split. - return penalty - - if previous.is_pseudo and previous.value == '(': - # Small penalty for splitting after a pseudo paren. - penalty += 50 - - # Add a penalty for each increasing newline we add, but don't penalize for - # splitting before an if-expression or list comprehension. - if current.value not in { 'if', 'for' }: - last = self.stack[ -1 ] - last.num_line_splits += 1 - penalty += ( - style.Get( 'SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT' ) * - last.num_line_splits ) - - if current.OpensScope() and previous.OpensScope(): - # Prefer to keep opening brackets coalesced (unless it's at the beginning - # of a function call). - pprev = previous.previous_token - if not pprev or not pprev.is_name: - penalty += 10 - - return penalty + 10 - - def MoveStateToNextToken( self ): - """Calculate format decision state information and move onto the next token. + current = self.next_token + previous = current.previous_token + + self.column = self._GetNewlineColumn() + + if not dry_run: + indent_level = self.line.depth + spaces = self.column + if spaces: + spaces -= indent_level * style.Get('INDENT_WIDTH') + current.AddWhitespacePrefix( + newlines_before=1, spaces=spaces, indent_level=indent_level) + + if not current.is_comment: + self.stack[-1].last_space = self.column + self.lowest_level_on_line = self.paren_level + + if (previous.OpensScope() or + (previous.is_comment and previous.previous_token is not None and + previous.previous_token.OpensScope())): + dedent = (style.Get('CONTINUATION_INDENT_WIDTH'), + 0)[style.Get('INDENT_CLOSING_BRACKETS')] + self.stack[-1].closing_scope_indent = ( + max(0, self.stack[-1].indent - dedent)) + self.stack[-1].split_before_closing_bracket = True + + # Calculate the split penalty. + penalty = current.split_penalty + + if must_split: + # Don't penalize for a must split. + return penalty + + if previous.is_pseudo and previous.value == '(': + # Small penalty for splitting after a pseudo paren. + penalty += 50 + + # Add a penalty for each increasing newline we add, but don't penalize for + # splitting before an if-expression or list comprehension. + if current.value not in {'if', 'for'}: + last = self.stack[-1] + last.num_line_splits += 1 + penalty += ( + style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * + last.num_line_splits) + + if current.OpensScope() and previous.OpensScope(): + # Prefer to keep opening brackets coalesced (unless it's at the beginning + # of a function call). + pprev = previous.previous_token + if not pprev or not pprev.is_name: + penalty += 10 + + return penalty + 10 + + def MoveStateToNextToken(self): + """Calculate format decision state information and move onto the next token. Before moving onto the next token, we first calculate the format decision state given the current token and its formatting decisions. Then the format @@ -711,55 +705,55 @@ def MoveStateToNextToken( self ): Returns: The penalty for the number of characters over the column limit. """ - current = self.next_token - if not current.OpensScope() and not current.ClosesScope(): - self.lowest_level_on_line = min( - self.lowest_level_on_line, self.paren_level ) - - # If we encounter an opening bracket, we add a level to our stack to prepare - # for the subsequent tokens. - if current.OpensScope(): - last = self.stack[ -1 ] - new_indent = style.Get( 'CONTINUATION_INDENT_WIDTH' ) + last.last_space - - self.stack.append( _ParenState( new_indent, self.stack[ -1 ].last_space ) ) - self.paren_level += 1 - - # If we encounter a closing bracket, we can remove a level from our - # parenthesis stack. - if len( self.stack ) > 1 and current.ClosesScope(): - if subtypes.DICTIONARY_KEY_PART in current.subtypes: - self.stack[ -2 ].last_space = self.stack[ -2 ].indent - else: - self.stack[ -2 ].last_space = self.stack[ -1 ].last_space - self.stack.pop() - self.paren_level -= 1 - - is_multiline_string = current.is_string and '\n' in current.value - if is_multiline_string: - # This is a multiline string. Only look at the first line. - self.column += len( current.value.split( '\n' )[ 0 ] ) - elif not current.is_pseudo: - self.column += len( current.value ) - - self.next_token = self.next_token.next_token - - # Calculate the penalty for overflowing the column limit. - penalty = 0 - if ( not current.is_pylint_comment and not current.is_pytype_comment and - not current.is_copybara_comment and self.column > self.column_limit ): - excess_characters = self.column - self.column_limit - penalty += style.Get( 'SPLIT_PENALTY_EXCESS_CHARACTER' ) * excess_characters - - if is_multiline_string: - # If this is a multiline string, the column is actually the - # end of the last line in the string. - self.column = len( current.value.split( '\n' )[ -1 ] ) - - return penalty - - def _CalculateComprehensionState( self, newline ): - """Makes required changes to comprehension state. + current = self.next_token + if not current.OpensScope() and not current.ClosesScope(): + self.lowest_level_on_line = min( + self.lowest_level_on_line, self.paren_level) + + # If we encounter an opening bracket, we add a level to our stack to prepare + # for the subsequent tokens. + if current.OpensScope(): + last = self.stack[-1] + new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space + + self.stack.append(_ParenState(new_indent, self.stack[-1].last_space)) + self.paren_level += 1 + + # If we encounter a closing bracket, we can remove a level from our + # parenthesis stack. + if len(self.stack) > 1 and current.ClosesScope(): + if subtypes.DICTIONARY_KEY_PART in current.subtypes: + self.stack[-2].last_space = self.stack[-2].indent + else: + self.stack[-2].last_space = self.stack[-1].last_space + self.stack.pop() + self.paren_level -= 1 + + is_multiline_string = current.is_string and '\n' in current.value + if is_multiline_string: + # This is a multiline string. Only look at the first line. + self.column += len(current.value.split('\n')[0]) + elif not current.is_pseudo: + self.column += len(current.value) + + self.next_token = self.next_token.next_token + + # Calculate the penalty for overflowing the column limit. + penalty = 0 + if (not current.is_pylint_comment and not current.is_pytype_comment and + not current.is_copybara_comment and self.column > self.column_limit): + excess_characters = self.column - self.column_limit + penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters + + if is_multiline_string: + # If this is a multiline string, the column is actually the + # end of the last line in the string. + self.column = len(current.value.split('\n')[-1]) + + return penalty + + def _CalculateComprehensionState(self, newline): + """Makes required changes to comprehension state. Args: newline: Whether the current token is to be added on a newline. @@ -768,82 +762,81 @@ def _CalculateComprehensionState( self, newline ): The penalty for the token-newline combination given the current comprehension state. """ - current = self.next_token - previous = current.previous_token - top_of_stack = self.comp_stack[ -1 ] if self.comp_stack else None - penalty = 0 - - if top_of_stack is not None: - # Check if the token terminates the current comprehension. - if current == top_of_stack.closing_bracket: - last = self.comp_stack.pop() - # Lightly penalize comprehensions that are split across multiple lines. - if last.has_interior_split: - penalty += style.Get( 'SPLIT_PENALTY_COMPREHENSION' ) - - return penalty - - if newline: - top_of_stack.has_interior_split = True - - if ( subtypes.COMP_EXPR in current.subtypes and - subtypes.COMP_EXPR not in previous.subtypes ): - self.comp_stack.append( object_state.ComprehensionState( current ) ) - return penalty - - if current.value == 'for' and subtypes.COMP_FOR in current.subtypes: - if top_of_stack.for_token is not None: - # Treat nested comprehensions like normal comp_if expressions. - # Example: - # my_comp = [ - # a.qux + b.qux - # for a in foo - # --> for b in bar <-- - # if a.zut + b.zut - # ] - if ( style.Get( 'SPLIT_COMPLEX_COMPREHENSION' ) and - top_of_stack.has_split_at_for != newline and - ( top_of_stack.has_split_at_for or - not top_of_stack.HasTrivialExpr() ) ): - penalty += split_penalty.UNBREAKABLE - else: - top_of_stack.for_token = current - top_of_stack.has_split_at_for = newline - - # Try to keep trivial expressions on the same line as the comp_for. - if ( style.Get( 'SPLIT_COMPLEX_COMPREHENSION' ) and newline and - top_of_stack.HasTrivialExpr() ): - penalty += split_penalty.CONNECTED - - if ( subtypes.COMP_IF in current.subtypes and - subtypes.COMP_IF not in previous.subtypes ): - # Penalize breaking at comp_if when it doesn't match the newline structure - # in the rest of the comprehension. - if ( style.Get( 'SPLIT_COMPLEX_COMPREHENSION' ) and - top_of_stack.has_split_at_for != newline and - ( top_of_stack.has_split_at_for or - not top_of_stack.HasTrivialExpr() ) ): - penalty += split_penalty.UNBREAKABLE + current = self.next_token + previous = current.previous_token + top_of_stack = self.comp_stack[-1] if self.comp_stack else None + penalty = 0 + + if top_of_stack is not None: + # Check if the token terminates the current comprehension. + if current == top_of_stack.closing_bracket: + last = self.comp_stack.pop() + # Lightly penalize comprehensions that are split across multiple lines. + if last.has_interior_split: + penalty += style.Get('SPLIT_PENALTY_COMPREHENSION') return penalty - def _PushParameterListState( self, newline ): - """Push a new parameter list state for a function definition. + if newline: + top_of_stack.has_interior_split = True + + if (subtypes.COMP_EXPR in current.subtypes and + subtypes.COMP_EXPR not in previous.subtypes): + self.comp_stack.append(object_state.ComprehensionState(current)) + return penalty + + if current.value == 'for' and subtypes.COMP_FOR in current.subtypes: + if top_of_stack.for_token is not None: + # Treat nested comprehensions like normal comp_if expressions. + # Example: + # my_comp = [ + # a.qux + b.qux + # for a in foo + # --> for b in bar <-- + # if a.zut + b.zut + # ] + if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and + top_of_stack.has_split_at_for != newline and + (top_of_stack.has_split_at_for or + not top_of_stack.HasTrivialExpr())): + penalty += split_penalty.UNBREAKABLE + else: + top_of_stack.for_token = current + top_of_stack.has_split_at_for = newline + + # Try to keep trivial expressions on the same line as the comp_for. + if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and newline and + top_of_stack.HasTrivialExpr()): + penalty += split_penalty.CONNECTED + + if (subtypes.COMP_IF in current.subtypes and + subtypes.COMP_IF not in previous.subtypes): + # Penalize breaking at comp_if when it doesn't match the newline structure + # in the rest of the comprehension. + if (style.Get('SPLIT_COMPLEX_COMPREHENSION') and + top_of_stack.has_split_at_for != newline and + (top_of_stack.has_split_at_for or not top_of_stack.HasTrivialExpr())): + penalty += split_penalty.UNBREAKABLE + + return penalty + + def _PushParameterListState(self, newline): + """Push a new parameter list state for a function definition. Args: newline: Whether the current token is to be added on a newline. """ - current = self.next_token - previous = current.previous_token + current = self.next_token + previous = current.previous_token - if _IsFunctionDefinition( previous ): - first_param_column = previous.total_length + self.stack[ -2 ].indent - self.param_list_stack.append( - object_state.ParameterListState( - previous, newline, first_param_column ) ) + if _IsFunctionDefinition(previous): + first_param_column = previous.total_length + self.stack[-2].indent + self.param_list_stack.append( + object_state.ParameterListState( + previous, newline, first_param_column)) - def _CalculateParameterListState( self, newline ): - """Makes required changes to parameter list state. + def _CalculateParameterListState(self, newline): + """Makes required changes to parameter list state. Args: newline: Whether the current token is to be added on a newline. @@ -852,355 +845,355 @@ def _CalculateParameterListState( self, newline ): The penalty for the token-newline combination given the current parameter state. """ - current = self.next_token - previous = current.previous_token - penalty = 0 - - if _IsFunctionDefinition( previous ): - first_param_column = previous.total_length + self.stack[ -2 ].indent - if not newline: - param_list = self.param_list_stack[ -1 ] - if param_list.parameters and param_list.has_typed_return: - last_param = param_list.parameters[ -1 ].first_token - last_token = _LastTokenInLine( previous.matching_bracket ) - total_length = last_token.total_length - total_length -= last_param.total_length - len( last_param.value ) - if total_length + self.column > self.column_limit: - # If we need to split before the trailing code of a function - # definition with return types, then also split before the opening - # parameter so that the trailing bit isn't indented on a line by - # itself: - # - # def rrrrrrrrrrrrrrrrrrrrrr(ccccccccccccccccccccccc: Tuple[Text] - # ) -> List[Tuple[Text, Text]]: - # pass - penalty += split_penalty.VERY_STRONGLY_CONNECTED - return penalty - - if first_param_column <= self.column: - # Make sure we don't split after the opening bracket if the - # continuation indent is greater than the opening bracket: - # - # a( - # b=1, - # c=2) - penalty += split_penalty.VERY_STRONGLY_CONNECTED - return penalty - - if not self.param_list_stack: - return penalty - - param_list = self.param_list_stack[ -1 ] - if current == self.param_list_stack[ -1 ].closing_bracket: - self.param_list_stack.pop() # We're done with this state. - if newline and param_list.has_typed_return: - if param_list.split_before_closing_bracket: - penalty -= split_penalty.STRONGLY_CONNECTED - elif param_list.LastParamFitsOnLine( self.column ): - penalty += split_penalty.STRONGLY_CONNECTED - - if ( not newline and param_list.has_typed_return and - param_list.has_split_before_first_param ): - # Prefer splitting before the closing bracket if there's a return type - # and we've already split before the first parameter. - penalty += split_penalty.STRONGLY_CONNECTED - - return penalty - - if not param_list.parameters: - return penalty - - if newline: - if self._FitsOnLine( param_list.parameters[ 0 ].first_token, - _LastTokenInLine( param_list.closing_bracket ) ): - penalty += split_penalty.STRONGLY_CONNECTED - - if ( not newline and style.Get( 'SPLIT_BEFORE_NAMED_ASSIGNS' ) and - param_list.has_default_values and - current != param_list.parameters[ 0 ].first_token and - current != param_list.closing_bracket and - subtypes.PARAMETER_START in current.subtypes ): - # If we want to split before parameters when there are named assigns, - # then add a penalty for not splitting. - penalty += split_penalty.STRONGLY_CONNECTED - + current = self.next_token + previous = current.previous_token + penalty = 0 + + if _IsFunctionDefinition(previous): + first_param_column = previous.total_length + self.stack[-2].indent + if not newline: + param_list = self.param_list_stack[-1] + if param_list.parameters and param_list.has_typed_return: + last_param = param_list.parameters[-1].first_token + last_token = _LastTokenInLine(previous.matching_bracket) + total_length = last_token.total_length + total_length -= last_param.total_length - len(last_param.value) + if total_length + self.column > self.column_limit: + # If we need to split before the trailing code of a function + # definition with return types, then also split before the opening + # parameter so that the trailing bit isn't indented on a line by + # itself: + # + # def rrrrrrrrrrrrrrrrrrrrrr(ccccccccccccccccccccccc: Tuple[Text] + # ) -> List[Tuple[Text, Text]]: + # pass + penalty += split_penalty.VERY_STRONGLY_CONNECTED return penalty - def _IndentWithContinuationAlignStyle( self, column ): - if column == 0: - return column - align_style = style.Get( 'CONTINUATION_ALIGN_STYLE' ) - if align_style == 'FIXED': - return ( - ( self.line.depth * style.Get( 'INDENT_WIDTH' ) ) + - style.Get( 'CONTINUATION_INDENT_WIDTH' ) ) - if align_style == 'VALIGN-RIGHT': - indent_width = style.Get( 'INDENT_WIDTH' ) - return indent_width * int( ( column + indent_width - 1 ) / indent_width ) - return column - - def _GetNewlineColumn( self ): - """Return the new column on the newline.""" - current = self.next_token - previous = current.previous_token - top_of_stack = self.stack[ -1 ] - - if isinstance( current.spaces_required_before, list ): - # Don't set the value here, as we need to look at the lines near - # this one to determine the actual horizontal alignment value. - return 0 - elif current.spaces_required_before > 2 or self.line.disable: - return current.spaces_required_before - - cont_aligned_indent = self._IndentWithContinuationAlignStyle( - top_of_stack.indent ) - - if current.OpensScope(): - return cont_aligned_indent if self.paren_level else self.first_indent - - if current.ClosesScope(): - if ( previous.OpensScope() or - ( previous.is_comment and previous.previous_token is not None and - previous.previous_token.OpensScope() ) ): - return max( - 0, top_of_stack.indent - style.Get( 'CONTINUATION_INDENT_WIDTH' ) ) - return top_of_stack.closing_scope_indent - - if ( previous and previous.is_string and current.is_string and - subtypes.DICTIONARY_VALUE in current.subtypes ): - return previous.column - - if style.Get( 'INDENT_DICTIONARY_VALUE' ): - if previous and ( previous.value == ':' or previous.is_pseudo ): - if subtypes.DICTIONARY_VALUE in current.subtypes: - return top_of_stack.indent - - if ( not self.param_list_stack and _IsCompoundStatement( self.line.first ) and - ( not ( style.Get( 'DEDENT_CLOSING_BRACKETS' ) or - style.Get( 'INDENT_CLOSING_BRACKETS' ) ) or - style.Get( 'SPLIT_BEFORE_FIRST_ARGUMENT' ) ) ): - token_indent = ( - len( self.line.first.whitespace_prefix.split( '\n' )[ -1 ] ) + - style.Get( 'INDENT_WIDTH' ) ) - if token_indent == top_of_stack.indent: - return token_indent + style.Get( 'CONTINUATION_INDENT_WIDTH' ) - - if ( self.param_list_stack and - not self.param_list_stack[ -1 ].SplitBeforeClosingBracket( - top_of_stack.indent ) and top_of_stack.indent - == ( ( self.line.depth + 1 ) * style.Get( 'INDENT_WIDTH' ) ) ): - # NOTE: comment inside argument list is not excluded in subtype assigner - if ( subtypes.PARAMETER_START in current.subtypes or - ( previous.is_comment and - subtypes.PARAMETER_START in previous.subtypes ) ): - return top_of_stack.indent + style.Get( 'CONTINUATION_INDENT_WIDTH' ) - - return cont_aligned_indent - - def _FitsOnLine( self, start, end ): - """Determines if line between start and end can fit on the current line.""" - length = end.total_length - start.total_length - if not start.is_pseudo: - length += len( start.value ) - return length + self.column <= self.column_limit - - def _EachDictEntryFitsOnOneLine( self, opening ): - """Determine if each dict elems can fit on one line.""" - - def PreviousNonCommentToken( tok ): - tok = tok.previous_token - while tok.is_comment: - tok = tok.previous_token - return tok - - def ImplicitStringConcatenation( tok ): - num_strings = 0 - if tok.is_pseudo: - tok = tok.next_token - while tok.is_string: - num_strings += 1 - tok = tok.next_token - return num_strings > 1 - - def DictValueIsContainer( opening, closing ): - """Return true if the dictionary value is a container.""" - if not opening or not closing: - return False - colon = opening.previous_token - while colon: - if not colon.is_pseudo: - break - colon = colon.previous_token - if not colon or colon.value != ':': - return False - key = colon.previous_token - if not key: - return False - return subtypes.DICTIONARY_KEY_PART in key.subtypes - - closing = opening.matching_bracket - entry_start = opening.next_token - current = opening.next_token.next_token - - while current and current != closing: - if subtypes.DICTIONARY_KEY in current.subtypes: - prev = PreviousNonCommentToken( current ) - if prev.value == ',': - prev = PreviousNonCommentToken( prev.previous_token ) - if not DictValueIsContainer( prev.matching_bracket, prev ): - length = prev.total_length - entry_start.total_length - length += len( entry_start.value ) - if length + self.stack[ -2 ].indent >= self.column_limit: - return False - entry_start = current - if current.OpensScope(): - if ( ( current.value == '{' or - ( current.is_pseudo and current.next_token.value == '{' ) and - subtypes.DICTIONARY_VALUE in current.subtypes ) or - ImplicitStringConcatenation( current ) ): - # A dictionary entry that cannot fit on a single line shouldn't matter - # to this calculation. If it can't fit on a single line, then the - # opening should be on the same line as the key and the rest on - # newlines after it. But the other entries should be on single lines - # if possible. - if current.matching_bracket: - current = current.matching_bracket - while current: - if current == closing: - return True - if subtypes.DICTIONARY_KEY in current.subtypes: - entry_start = current - break - current = current.next_token - else: - current = current.matching_bracket - else: - current = current.next_token - - # At this point, current is the closing bracket. Go back one to get the end - # of the dictionary entry. - current = PreviousNonCommentToken( current ) - length = current.total_length - entry_start.total_length - length += len( entry_start.value ) - return length + self.stack[ -2 ].indent <= self.column_limit - - def _ArgumentListHasDictionaryEntry( self, token ): - """Check if the function argument list has a dictionary as an arg.""" - if _IsArgumentToFunction( token ): - while token: - if token.value == '{': - length = token.matching_bracket.total_length - token.total_length - return length + self.stack[ -2 ].indent > self.column_limit - if token.ClosesScope(): - break - if token.OpensScope(): - token = token.matching_bracket - token = token.next_token + if first_param_column <= self.column: + # Make sure we don't split after the opening bracket if the + # continuation indent is greater than the opening bracket: + # + # a( + # b=1, + # c=2) + penalty += split_penalty.VERY_STRONGLY_CONNECTED + return penalty + + if not self.param_list_stack: + return penalty + + param_list = self.param_list_stack[-1] + if current == self.param_list_stack[-1].closing_bracket: + self.param_list_stack.pop() # We're done with this state. + if newline and param_list.has_typed_return: + if param_list.split_before_closing_bracket: + penalty -= split_penalty.STRONGLY_CONNECTED + elif param_list.LastParamFitsOnLine(self.column): + penalty += split_penalty.STRONGLY_CONNECTED + + if (not newline and param_list.has_typed_return and + param_list.has_split_before_first_param): + # Prefer splitting before the closing bracket if there's a return type + # and we've already split before the first parameter. + penalty += split_penalty.STRONGLY_CONNECTED + + return penalty + + if not param_list.parameters: + return penalty + + if newline: + if self._FitsOnLine(param_list.parameters[0].first_token, + _LastTokenInLine(param_list.closing_bracket)): + penalty += split_penalty.STRONGLY_CONNECTED + + if (not newline and style.Get('SPLIT_BEFORE_NAMED_ASSIGNS') and + param_list.has_default_values and + current != param_list.parameters[0].first_token and + current != param_list.closing_bracket and + subtypes.PARAMETER_START in current.subtypes): + # If we want to split before parameters when there are named assigns, + # then add a penalty for not splitting. + penalty += split_penalty.STRONGLY_CONNECTED + + return penalty + + def _IndentWithContinuationAlignStyle(self, column): + if column == 0: + return column + align_style = style.Get('CONTINUATION_ALIGN_STYLE') + if align_style == 'FIXED': + return ( + (self.line.depth * style.Get('INDENT_WIDTH')) + + style.Get('CONTINUATION_INDENT_WIDTH')) + if align_style == 'VALIGN-RIGHT': + indent_width = style.Get('INDENT_WIDTH') + return indent_width * int((column + indent_width - 1) / indent_width) + return column + + def _GetNewlineColumn(self): + """Return the new column on the newline.""" + current = self.next_token + previous = current.previous_token + top_of_stack = self.stack[-1] + + if isinstance(current.spaces_required_before, list): + # Don't set the value here, as we need to look at the lines near + # this one to determine the actual horizontal alignment value. + return 0 + elif current.spaces_required_before > 2 or self.line.disable: + return current.spaces_required_before + + cont_aligned_indent = self._IndentWithContinuationAlignStyle( + top_of_stack.indent) + + if current.OpensScope(): + return cont_aligned_indent if self.paren_level else self.first_indent + + if current.ClosesScope(): + if (previous.OpensScope() or + (previous.is_comment and previous.previous_token is not None and + previous.previous_token.OpensScope())): + return max( + 0, top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH')) + return top_of_stack.closing_scope_indent + + if (previous and previous.is_string and current.is_string and + subtypes.DICTIONARY_VALUE in current.subtypes): + return previous.column + + if style.Get('INDENT_DICTIONARY_VALUE'): + if previous and (previous.value == ':' or previous.is_pseudo): + if subtypes.DICTIONARY_VALUE in current.subtypes: + return top_of_stack.indent + + if (not self.param_list_stack and _IsCompoundStatement(self.line.first) and + (not (style.Get('DEDENT_CLOSING_BRACKETS') or + style.Get('INDENT_CLOSING_BRACKETS')) or + style.Get('SPLIT_BEFORE_FIRST_ARGUMENT'))): + token_indent = ( + len(self.line.first.whitespace_prefix.split('\n')[-1]) + + style.Get('INDENT_WIDTH')) + if token_indent == top_of_stack.indent: + return token_indent + style.Get('CONTINUATION_INDENT_WIDTH') + + if (self.param_list_stack and + not self.param_list_stack[-1].SplitBeforeClosingBracket( + top_of_stack.indent) and top_of_stack.indent + == ((self.line.depth + 1) * style.Get('INDENT_WIDTH'))): + # NOTE: comment inside argument list is not excluded in subtype assigner + if (subtypes.PARAMETER_START in current.subtypes or + (previous.is_comment and + subtypes.PARAMETER_START in previous.subtypes)): + return top_of_stack.indent + style.Get('CONTINUATION_INDENT_WIDTH') + + return cont_aligned_indent + + def _FitsOnLine(self, start, end): + """Determines if line between start and end can fit on the current line.""" + length = end.total_length - start.total_length + if not start.is_pseudo: + length += len(start.value) + return length + self.column <= self.column_limit + + def _EachDictEntryFitsOnOneLine(self, opening): + """Determine if each dict elems can fit on one line.""" + + def PreviousNonCommentToken(tok): + tok = tok.previous_token + while tok.is_comment: + tok = tok.previous_token + return tok + + def ImplicitStringConcatenation(tok): + num_strings = 0 + if tok.is_pseudo: + tok = tok.next_token + while tok.is_string: + num_strings += 1 + tok = tok.next_token + return num_strings > 1 + + def DictValueIsContainer(opening, closing): + """Return true if the dictionary value is a container.""" + if not opening or not closing: + return False + colon = opening.previous_token + while colon: + if not colon.is_pseudo: + break + colon = colon.previous_token + if not colon or colon.value != ':': return False + key = colon.previous_token + if not key: + return False + return subtypes.DICTIONARY_KEY_PART in key.subtypes + + closing = opening.matching_bracket + entry_start = opening.next_token + current = opening.next_token.next_token + + while current and current != closing: + if subtypes.DICTIONARY_KEY in current.subtypes: + prev = PreviousNonCommentToken(current) + if prev.value == ',': + prev = PreviousNonCommentToken(prev.previous_token) + if not DictValueIsContainer(prev.matching_bracket, prev): + length = prev.total_length - entry_start.total_length + length += len(entry_start.value) + if length + self.stack[-2].indent >= self.column_limit: + return False + entry_start = current + if current.OpensScope(): + if ((current.value == '{' or + (current.is_pseudo and current.next_token.value == '{') and + subtypes.DICTIONARY_VALUE in current.subtypes) or + ImplicitStringConcatenation(current)): + # A dictionary entry that cannot fit on a single line shouldn't matter + # to this calculation. If it can't fit on a single line, then the + # opening should be on the same line as the key and the rest on + # newlines after it. But the other entries should be on single lines + # if possible. + if current.matching_bracket: + current = current.matching_bracket + while current: + if current == closing: + return True + if subtypes.DICTIONARY_KEY in current.subtypes: + entry_start = current + break + current = current.next_token + else: + current = current.matching_bracket + else: + current = current.next_token - def _ContainerFitsOnStartLine( self, opening ): - """Check if the container can fit on its starting line.""" - return ( - opening.matching_bracket.total_length - opening.total_length + - self.stack[ -1 ].indent ) <= self.column_limit + # At this point, current is the closing bracket. Go back one to get the end + # of the dictionary entry. + current = PreviousNonCommentToken(current) + length = current.total_length - entry_start.total_length + length += len(entry_start.value) + return length + self.stack[-2].indent <= self.column_limit + + def _ArgumentListHasDictionaryEntry(self, token): + """Check if the function argument list has a dictionary as an arg.""" + if _IsArgumentToFunction(token): + while token: + if token.value == '{': + length = token.matching_bracket.total_length - token.total_length + return length + self.stack[-2].indent > self.column_limit + if token.ClosesScope(): + break + if token.OpensScope(): + token = token.matching_bracket + token = token.next_token + return False + + def _ContainerFitsOnStartLine(self, opening): + """Check if the container can fit on its starting line.""" + return ( + opening.matching_bracket.total_length - opening.total_length + + self.stack[-1].indent) <= self.column_limit _COMPOUND_STMTS = frozenset( - { 'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class' } ) + {'for', 'while', 'if', 'elif', 'with', 'except', 'def', 'class'}) -def _IsCompoundStatement( token ): - if token.value == 'async': - token = token.next_token - return token.value in _COMPOUND_STMTS +def _IsCompoundStatement(token): + if token.value == 'async': + token = token.next_token + return token.value in _COMPOUND_STMTS -def _IsFunctionDef( token ): - if token.value == 'async': - token = token.next_token - return token.value == 'def' +def _IsFunctionDef(token): + if token.value == 'async': + token = token.next_token + return token.value == 'def' -def _IsFunctionCallWithArguments( token ): - while token: - if token.value == '(': - token = token.next_token - return token and token.value != ')' - elif token.name not in { 'NAME', 'DOT', 'EQUAL' }: - break - token = token.next_token - return False +def _IsFunctionCallWithArguments(token): + while token: + if token.value == '(': + token = token.next_token + return token and token.value != ')' + elif token.name not in {'NAME', 'DOT', 'EQUAL'}: + break + token = token.next_token + return False -def _IsArgumentToFunction( token ): - bracket = logical_line.IsSurroundedByBrackets( token ) - if not bracket or bracket.value != '(': - return False - previous = bracket.previous_token - return previous and previous.is_name +def _IsArgumentToFunction(token): + bracket = logical_line.IsSurroundedByBrackets(token) + if not bracket or bracket.value != '(': + return False + previous = bracket.previous_token + return previous and previous.is_name -def _GetOpeningBracket( current ): - """Get the opening bracket containing the current token.""" - if current.matching_bracket and not current.is_pseudo: - return current if current.OpensScope() else current.matching_bracket +def _GetOpeningBracket(current): + """Get the opening bracket containing the current token.""" + if current.matching_bracket and not current.is_pseudo: + return current if current.OpensScope() else current.matching_bracket - while current: - if current.ClosesScope(): - current = current.matching_bracket - elif current.is_pseudo: - current = current.previous_token - elif current.OpensScope(): - return current - current = current.previous_token - return None + while current: + if current.ClosesScope(): + current = current.matching_bracket + elif current.is_pseudo: + current = current.previous_token + elif current.OpensScope(): + return current + current = current.previous_token + return None -def _LastTokenInLine( current ): - while not current.is_comment and current.next_token: - current = current.next_token - return current +def _LastTokenInLine(current): + while not current.is_comment and current.next_token: + current = current.next_token + return current -def _IsFunctionDefinition( current ): - prev = current.previous_token - return current.value == '(' and prev and subtypes.FUNC_DEF in prev.subtypes +def _IsFunctionDefinition(current): + prev = current.previous_token + return current.value == '(' and prev and subtypes.FUNC_DEF in prev.subtypes -def _IsLastScopeInLine( current ): - current = current.matching_bracket - while current: - current = current.next_token - if current and current.OpensScope(): - return False - return True +def _IsLastScopeInLine(current): + current = current.matching_bracket + while current: + current = current.next_token + if current and current.OpensScope(): + return False + return True -def _IsSingleElementTuple( token ): - """Check if it's a single-element tuple.""" - close = token.matching_bracket - token = token.next_token - num_commas = 0 - while token != close: - if token.value == ',': - num_commas += 1 - token = token.matching_bracket if token.OpensScope() else token.next_token - return num_commas == 1 +def _IsSingleElementTuple(token): + """Check if it's a single-element tuple.""" + close = token.matching_bracket + token = token.next_token + num_commas = 0 + while token != close: + if token.value == ',': + num_commas += 1 + token = token.matching_bracket if token.OpensScope() else token.next_token + return num_commas == 1 -def _ScopeHasNoCommas( token ): - """Check if the scope has no commas.""" - close = token.matching_bracket - token = token.next_token - while token != close: - if token.value == ',': - return False - token = token.matching_bracket if token.OpensScope() else token.next_token - return True +def _ScopeHasNoCommas(token): + """Check if the scope has no commas.""" + close = token.matching_bracket + token = token.next_token + while token != close: + if token.value == ',': + return False + token = token.matching_bracket if token.OpensScope() else token.next_token + return True -class _ParenState( object ): - """Maintains the state of the bracket enclosures. +class _ParenState(object): + """Maintains the state of the bracket enclosures. A stack of _ParenState objects are kept so that we know how to indent relative to the brackets. @@ -1217,34 +1210,34 @@ class _ParenState( object ): Each subsequent line split gets an increasing penalty. """ - # TODO(morbo): This doesn't track "bin packing." - - def __init__( self, indent, last_space ): - self.indent = indent - self.last_space = last_space - self.closing_scope_indent = 0 - self.split_before_closing_bracket = False - self.num_line_splits = 0 - - def Clone( self ): - state = _ParenState( self.indent, self.last_space ) - state.closing_scope_indent = self.closing_scope_indent - state.split_before_closing_bracket = self.split_before_closing_bracket - state.num_line_splits = self.num_line_splits - return state - - def __repr__( self ): - return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % ( - self.indent, self.last_space, self.closing_scope_indent ) - - def __eq__( self, other ): - return hash( self ) == hash( other ) - - def __ne__( self, other ): - return not self == other - - def __hash__( self, *args, **kwargs ): - return hash( - ( - self.indent, self.last_space, self.closing_scope_indent, - self.split_before_closing_bracket, self.num_line_splits ) ) + # TODO(morbo): This doesn't track "bin packing." + + def __init__(self, indent, last_space): + self.indent = indent + self.last_space = last_space + self.closing_scope_indent = 0 + self.split_before_closing_bracket = False + self.num_line_splits = 0 + + def Clone(self): + state = _ParenState(self.indent, self.last_space) + state.closing_scope_indent = self.closing_scope_indent + state.split_before_closing_bracket = self.split_before_closing_bracket + state.num_line_splits = self.num_line_splits + return state + + def __repr__(self): + return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % ( + self.indent, self.last_space, self.closing_scope_indent) + + def __eq__(self, other): + return hash(self) == hash(other) + + def __ne__(self, other): + return not self == other + + def __hash__(self, *args, **kwargs): + return hash( + ( + self.indent, self.last_space, self.closing_scope_indent, + self.split_before_closing_bracket, self.num_line_splits)) diff --git a/yapf/yapflib/format_token.py b/yapf/yapflib/format_token.py index 3dd570ef4..382f5f938 100644 --- a/yapf/yapflib/format_token.py +++ b/yapf/yapflib/format_token.py @@ -25,12 +25,12 @@ CONTINUATION = token.N_TOKENS -_OPENING_BRACKETS = frozenset( { '(', '[', '{' } ) -_CLOSING_BRACKETS = frozenset( { ')', ']', '}' } ) +_OPENING_BRACKETS = frozenset({'(', '[', '{'}) +_CLOSING_BRACKETS = frozenset({')', ']', '}'}) -def _TabbedContinuationAlignPadding( spaces, align_style, tab_width ): - """Build padding string for continuation alignment in tabbed indentation. +def _TabbedContinuationAlignPadding(spaces, align_style, tab_width): + """Build padding string for continuation alignment in tabbed indentation. Arguments: spaces: (int) The number of spaces to place before the token for alignment. @@ -40,15 +40,15 @@ def _TabbedContinuationAlignPadding( spaces, align_style, tab_width ): Returns: A padding string for alignment with style specified by align_style option. """ - if align_style in ( 'FIXED', 'VALIGN-RIGHT' ): - if spaces > 0: - return '\t' * int( ( spaces + tab_width - 1 ) / tab_width ) - return '' - return ' ' * spaces + if align_style in ('FIXED', 'VALIGN-RIGHT'): + if spaces > 0: + return '\t' * int((spaces + tab_width - 1) / tab_width) + return '' + return ' ' * spaces -class FormatToken( object ): - """Enhanced token information for formatting. +class FormatToken(object): + """Enhanced token information for formatting. This represents the token plus additional information useful for reformatting the code. @@ -83,57 +83,58 @@ class FormatToken( object ): newlines: The number of newlines needed before this token. """ - def __init__( self, node, name ): - """Constructor. + def __init__(self, node, name): + """Constructor. Arguments: node: (pytree.Leaf) The node that's being wrapped. name: (string) The name of the node. """ - self.node = node - self.name = name - self.type = node.type - self.column = node.column - self.lineno = node.lineno - self.value = node.value - - if self.is_continuation: - self.value = node.value.rstrip() - - self.next_token = None - self.previous_token = None - self.matching_bracket = None - self.parameters = [] - self.container_opening = None - self.container_elements = [] - self.whitespace_prefix = '' - self.total_length = 0 - self.split_penalty = 0 - self.can_break_before = False - self.must_break_before = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.MUST_SPLIT, default = False ) - self.newlines = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.NEWLINES ) - self.spaces_required_before = 0 - - if self.is_comment: - self.spaces_required_before = style.Get( 'SPACES_BEFORE_COMMENT' ) - - stypes = pytree_utils.GetNodeAnnotation( node, pytree_utils.Annotation.SUBTYPE ) - self.subtypes = { subtypes.NONE } if not stypes else stypes - self.is_pseudo = hasattr( node, 'is_pseudo' ) and node.is_pseudo - - @property - def formatted_whitespace_prefix( self ): - if style.Get( 'INDENT_BLANK_LINES' ): - without_newlines = self.whitespace_prefix.lstrip( '\n' ) - height = len( self.whitespace_prefix ) - len( without_newlines ) - if height: - return ( '\n' + without_newlines ) * height - return self.whitespace_prefix - - def AddWhitespacePrefix( self, newlines_before, spaces = 0, indent_level = 0 ): - """Register a token's whitespace prefix. + self.node = node + self.name = name + self.type = node.type + self.column = node.column + self.lineno = node.lineno + self.value = node.value + + if self.is_continuation: + self.value = node.value.rstrip() + + self.next_token = None + self.previous_token = None + self.matching_bracket = None + self.parameters = [] + self.container_opening = None + self.container_elements = [] + self.whitespace_prefix = '' + self.total_length = 0 + self.split_penalty = 0 + self.can_break_before = False + self.must_break_before = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.MUST_SPLIT, default=False) + self.newlines = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.NEWLINES) + self.spaces_required_before = 0 + + if self.is_comment: + self.spaces_required_before = style.Get('SPACES_BEFORE_COMMENT') + + stypes = pytree_utils.GetNodeAnnotation( + node, pytree_utils.Annotation.SUBTYPE) + self.subtypes = {subtypes.NONE} if not stypes else stypes + self.is_pseudo = hasattr(node, 'is_pseudo') and node.is_pseudo + + @property + def formatted_whitespace_prefix(self): + if style.Get('INDENT_BLANK_LINES'): + without_newlines = self.whitespace_prefix.lstrip('\n') + height = len(self.whitespace_prefix) - len(without_newlines) + if height: + return ('\n' + without_newlines) * height + return self.whitespace_prefix + + def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0): + """Register a token's whitespace prefix. This is the whitespace that will be output before a token's string. @@ -142,196 +143,196 @@ def AddWhitespacePrefix( self, newlines_before, spaces = 0, indent_level = 0 ): spaces: (int) The number of spaces to place before the token. indent_level: (int) The indentation level. """ - if style.Get( 'USE_TABS' ): - if newlines_before > 0: - indent_before = '\t' * indent_level + _TabbedContinuationAlignPadding( - spaces, style.Get( 'CONTINUATION_ALIGN_STYLE' ), - style.Get( 'INDENT_WIDTH' ) ) - else: - indent_before = '\t' * indent_level + ' ' * spaces - else: - indent_before = ( - ' ' * indent_level * style.Get( 'INDENT_WIDTH' ) + ' ' * spaces ) - - if self.is_comment: - comment_lines = [ s.lstrip() for s in self.value.splitlines() ] - self.value = ( '\n' + indent_before ).join( comment_lines ) - - # Update our own value since we are changing node value - self.value = self.value - - if not self.whitespace_prefix: - self.whitespace_prefix = ( - '\n' * ( self.newlines or newlines_before ) + indent_before ) - else: - self.whitespace_prefix += indent_before - - def AdjustNewlinesBefore( self, newlines_before ): - """Change the number of newlines before this token.""" - self.whitespace_prefix = ( - '\n' * newlines_before + self.whitespace_prefix.lstrip( '\n' ) ) - - def RetainHorizontalSpacing( self, first_column, depth ): - """Retains a token's horizontal spacing.""" - previous = self.previous_token - if not previous: - return - - if previous.is_pseudo: - previous = previous.previous_token - if not previous: - return - - cur_lineno = self.lineno - prev_lineno = previous.lineno - if previous.is_multiline_string: - prev_lineno += previous.value.count( '\n' ) - - if ( cur_lineno != prev_lineno or - ( previous.is_pseudo and previous.value != ')' and - cur_lineno != previous.previous_token.lineno ) ): - self.spaces_required_before = ( - self.column - first_column + depth * style.Get( 'INDENT_WIDTH' ) ) - return - - cur_column = self.column - prev_column = previous.column - prev_len = len( previous.value ) - - if previous.is_pseudo and previous.value == ')': - prev_column -= 1 - prev_len = 0 - - if previous.is_multiline_string: - prev_len = len( previous.value.split( '\n' )[ -1 ] ) - if '\n' in previous.value: - prev_column = 0 # Last line starts in column 0. - - self.spaces_required_before = cur_column - ( prev_column + prev_len ) - - def OpensScope( self ): - return self.value in _OPENING_BRACKETS - - def ClosesScope( self ): - return self.value in _CLOSING_BRACKETS - - def AddSubtype( self, subtype ): - self.subtypes.add( subtype ) - - def __repr__( self ): - msg = ( - 'FormatToken(name={0}, value={1}, column={2}, lineno={3}, ' - 'splitpenalty={4}'.format( - 'DOCSTRING' if self.is_docstring else self.name, self.value, - self.column, self.lineno, self.split_penalty ) ) - msg += ', pseudo)' if self.is_pseudo else ')' - return msg - - @property - def node_split_penalty( self ): - """Split penalty attached to the pytree node of this token.""" - return pytree_utils.GetNodeAnnotation( - self.node, pytree_utils.Annotation.SPLIT_PENALTY, default = 0 ) - - @property - def is_binary_op( self ): - """Token is a binary operator.""" - return subtypes.BINARY_OPERATOR in self.subtypes - - @property - @py3compat.lru_cache() - def is_arithmetic_op( self ): - """Token is an arithmetic operator.""" - return self.value in frozenset( - { - '+', # Add - '-', # Subtract - '*', # Multiply - '@', # Matrix Multiply - '/', # Divide - '//', # Floor Divide - '%', # Modulo - '<<', # Left Shift - '>>', # Right Shift - '|', # Bitwise Or - '&', # Bitwise Add - '^', # Bitwise Xor - '**', # Power - } ) - - @property - def is_simple_expr( self ): - """Token is an operator in a simple expression.""" - return subtypes.SIMPLE_EXPRESSION in self.subtypes - - @property - def is_subscript_colon( self ): - """Token is a subscript colon.""" - return subtypes.SUBSCRIPT_COLON in self.subtypes - - @property - def is_comment( self ): - return self.type == token.COMMENT - - @property - def is_continuation( self ): - return self.type == CONTINUATION - - @property - @py3compat.lru_cache() - def is_keyword( self ): - return keyword.iskeyword( self.value ) - - @property - def is_name( self ): - return self.type == token.NAME and not self.is_keyword - - @property - def is_number( self ): - return self.type == token.NUMBER - - @property - def is_string( self ): - return self.type == token.STRING - - @property - def is_multiline_string( self ): - """Test if this string is a multiline string. + if style.Get('USE_TABS'): + if newlines_before > 0: + indent_before = '\t' * indent_level + _TabbedContinuationAlignPadding( + spaces, style.Get('CONTINUATION_ALIGN_STYLE'), + style.Get('INDENT_WIDTH')) + else: + indent_before = '\t' * indent_level + ' ' * spaces + else: + indent_before = ( + ' ' * indent_level * style.Get('INDENT_WIDTH') + ' ' * spaces) + + if self.is_comment: + comment_lines = [s.lstrip() for s in self.value.splitlines()] + self.value = ('\n' + indent_before).join(comment_lines) + + # Update our own value since we are changing node value + self.value = self.value + + if not self.whitespace_prefix: + self.whitespace_prefix = ( + '\n' * (self.newlines or newlines_before) + indent_before) + else: + self.whitespace_prefix += indent_before + + def AdjustNewlinesBefore(self, newlines_before): + """Change the number of newlines before this token.""" + self.whitespace_prefix = ( + '\n' * newlines_before + self.whitespace_prefix.lstrip('\n')) + + def RetainHorizontalSpacing(self, first_column, depth): + """Retains a token's horizontal spacing.""" + previous = self.previous_token + if not previous: + return + + if previous.is_pseudo: + previous = previous.previous_token + if not previous: + return + + cur_lineno = self.lineno + prev_lineno = previous.lineno + if previous.is_multiline_string: + prev_lineno += previous.value.count('\n') + + if (cur_lineno != prev_lineno or + (previous.is_pseudo and previous.value != ')' and + cur_lineno != previous.previous_token.lineno)): + self.spaces_required_before = ( + self.column - first_column + depth * style.Get('INDENT_WIDTH')) + return + + cur_column = self.column + prev_column = previous.column + prev_len = len(previous.value) + + if previous.is_pseudo and previous.value == ')': + prev_column -= 1 + prev_len = 0 + + if previous.is_multiline_string: + prev_len = len(previous.value.split('\n')[-1]) + if '\n' in previous.value: + prev_column = 0 # Last line starts in column 0. + + self.spaces_required_before = cur_column - (prev_column + prev_len) + + def OpensScope(self): + return self.value in _OPENING_BRACKETS + + def ClosesScope(self): + return self.value in _CLOSING_BRACKETS + + def AddSubtype(self, subtype): + self.subtypes.add(subtype) + + def __repr__(self): + msg = ( + 'FormatToken(name={0}, value={1}, column={2}, lineno={3}, ' + 'splitpenalty={4}'.format( + 'DOCSTRING' if self.is_docstring else self.name, self.value, + self.column, self.lineno, self.split_penalty)) + msg += ', pseudo)' if self.is_pseudo else ')' + return msg + + @property + def node_split_penalty(self): + """Split penalty attached to the pytree node of this token.""" + return pytree_utils.GetNodeAnnotation( + self.node, pytree_utils.Annotation.SPLIT_PENALTY, default=0) + + @property + def is_binary_op(self): + """Token is a binary operator.""" + return subtypes.BINARY_OPERATOR in self.subtypes + + @property + @py3compat.lru_cache() + def is_arithmetic_op(self): + """Token is an arithmetic operator.""" + return self.value in frozenset( + { + '+', # Add + '-', # Subtract + '*', # Multiply + '@', # Matrix Multiply + '/', # Divide + '//', # Floor Divide + '%', # Modulo + '<<', # Left Shift + '>>', # Right Shift + '|', # Bitwise Or + '&', # Bitwise Add + '^', # Bitwise Xor + '**', # Power + }) + + @property + def is_simple_expr(self): + """Token is an operator in a simple expression.""" + return subtypes.SIMPLE_EXPRESSION in self.subtypes + + @property + def is_subscript_colon(self): + """Token is a subscript colon.""" + return subtypes.SUBSCRIPT_COLON in self.subtypes + + @property + def is_comment(self): + return self.type == token.COMMENT + + @property + def is_continuation(self): + return self.type == CONTINUATION + + @property + @py3compat.lru_cache() + def is_keyword(self): + return keyword.iskeyword(self.value) + + @property + def is_name(self): + return self.type == token.NAME and not self.is_keyword + + @property + def is_number(self): + return self.type == token.NUMBER + + @property + def is_string(self): + return self.type == token.STRING + + @property + def is_multiline_string(self): + """Test if this string is a multiline string. Returns: A multiline string always ends with triple quotes, so if it is a string token, inspect the last 3 characters and return True if it is a triple double or triple single quote mark. """ - return self.is_string and self.value.endswith( ( '"""', "'''" ) ) - - @property - def is_docstring( self ): - return self.is_string and self.previous_token is None - - @property - def is_pylint_comment( self ): - return self.is_comment and re.match( - r'#.*\bpylint:\s*(disable|enable)=', self.value ) - - @property - def is_pytype_comment( self ): - return self.is_comment and re.match( - r'#.*\bpytype:\s*(disable|enable)=', self.value ) - - @property - def is_copybara_comment( self ): - return self.is_comment and re.match( - r'#.*\bcopybara:\s*(strip|insert|replace)', self.value ) - - @property - def is_assign( self ): - return subtypes.ASSIGN_OPERATOR in self.subtypes - - @property - def is_augassign( self ): - augassigns = { - '+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', '**=', - '//=' - } - return self.value in augassigns + return self.is_string and self.value.endswith(('"""', "'''")) + + @property + def is_docstring(self): + return self.is_string and self.previous_token is None + + @property + def is_pylint_comment(self): + return self.is_comment and re.match( + r'#.*\bpylint:\s*(disable|enable)=', self.value) + + @property + def is_pytype_comment(self): + return self.is_comment and re.match( + r'#.*\bpytype:\s*(disable|enable)=', self.value) + + @property + def is_copybara_comment(self): + return self.is_comment and re.match( + r'#.*\bcopybara:\s*(strip|insert|replace)', self.value) + + @property + def is_assign(self): + return subtypes.ASSIGN_OPERATOR in self.subtypes + + @property + def is_augassign(self): + augassigns = { + '+=', '-=', '*=', '@=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=', + '**=', '//=' + } + return self.value in augassigns diff --git a/yapf/yapflib/identify_container.py b/yapf/yapflib/identify_container.py index 049694a77..d027cc5d4 100644 --- a/yapf/yapflib/identify_container.py +++ b/yapf/yapflib/identify_container.py @@ -25,45 +25,45 @@ from yapf.pytree import pytree_visitor -def IdentifyContainers( tree ): - """Run the identify containers visitor over the tree, modifying it in place. +def IdentifyContainers(tree): + """Run the identify containers visitor over the tree, modifying it in place. Arguments: tree: the top-level pytree node to annotate with subtypes. """ - identify_containers = _IdentifyContainers() - identify_containers.Visit( tree ) + identify_containers = _IdentifyContainers() + identify_containers.Visit(tree) -class _IdentifyContainers( pytree_visitor.PyTreeVisitor ): - """_IdentifyContainers - see file-level docstring for detailed description.""" +class _IdentifyContainers(pytree_visitor.PyTreeVisitor): + """_IdentifyContainers - see file-level docstring for detailed description.""" - def Visit_trailer( self, node ): # pylint: disable=invalid-name - for child in node.children: - self.Visit( child ) + def Visit_trailer(self, node): # pylint: disable=invalid-name + for child in node.children: + self.Visit(child) - if len( node.children ) != 3: - return - if node.children[ 0 ].type != grammar_token.LPAR: - return + if len(node.children) != 3: + return + if node.children[0].type != grammar_token.LPAR: + return - if pytree_utils.NodeName( node.children[ 1 ] ) == 'arglist': - for child in node.children[ 1 ].children: - pytree_utils.SetOpeningBracket( - pytree_utils.FirstLeafNode( child ), node.children[ 0 ] ) - else: - pytree_utils.SetOpeningBracket( - pytree_utils.FirstLeafNode( node.children[ 1 ] ), node.children[ 0 ] ) + if pytree_utils.NodeName(node.children[1]) == 'arglist': + for child in node.children[1].children: + pytree_utils.SetOpeningBracket( + pytree_utils.FirstLeafNode(child), node.children[0]) + else: + pytree_utils.SetOpeningBracket( + pytree_utils.FirstLeafNode(node.children[1]), node.children[0]) - def Visit_atom( self, node ): # pylint: disable=invalid-name - for child in node.children: - self.Visit( child ) + def Visit_atom(self, node): # pylint: disable=invalid-name + for child in node.children: + self.Visit(child) - if len( node.children ) != 3: - return - if node.children[ 0 ].type != grammar_token.LPAR: - return + if len(node.children) != 3: + return + if node.children[0].type != grammar_token.LPAR: + return - for child in node.children[ 1 ].children: - pytree_utils.SetOpeningBracket( - pytree_utils.FirstLeafNode( child ), node.children[ 0 ] ) + for child in node.children[1].children: + pytree_utils.SetOpeningBracket( + pytree_utils.FirstLeafNode(child), node.children[0]) diff --git a/yapf/yapflib/line_joiner.py b/yapf/yapflib/line_joiner.py index 8a2911397..f0acd2f37 100644 --- a/yapf/yapflib/line_joiner.py +++ b/yapf/yapflib/line_joiner.py @@ -36,11 +36,11 @@ from yapf.yapflib import style -_CLASS_OR_FUNC = frozenset( { 'def', 'class' } ) +_CLASS_OR_FUNC = frozenset({'def', 'class'}) -def CanMergeMultipleLines( lines, last_was_merged = False ): - """Determine if multiple lines can be joined into one. +def CanMergeMultipleLines(lines, last_was_merged=False): + """Determine if multiple lines can be joined into one. Arguments: lines: (list of LogicalLine) This is a splice of LogicalLines from the full @@ -51,39 +51,39 @@ def CanMergeMultipleLines( lines, last_was_merged = False ): True if two consecutive lines can be joined together. In reality, this will only happen if two consecutive lines can be joined, due to the style guide. """ - # The indentation amount for the starting line (number of spaces). - indent_amt = lines[ 0 ].depth * style.Get( 'INDENT_WIDTH' ) - if len( lines ) == 1 or indent_amt > style.Get( 'COLUMN_LIMIT' ): - return False - - if ( len( lines ) >= 3 and lines[ 2 ].depth >= lines[ 1 ].depth and - lines[ 0 ].depth != lines[ 2 ].depth ): - # If lines[2]'s depth is greater than or equal to line[1]'s depth, we're not - # looking at a single statement (e.g., if-then, while, etc.). A following - # line with the same depth as the first line isn't part of the lines we - # would want to combine. - return False # Don't merge more than two lines together. - - if lines[ 0 ].first.value in _CLASS_OR_FUNC: - # Don't join lines onto the starting line of a class or function. - return False - - limit = style.Get( 'COLUMN_LIMIT' ) - indent_amt - if lines[ 0 ].last.total_length < limit: - limit -= lines[ 0 ].last.total_length - - if lines[ 0 ].first.value == 'if': - return _CanMergeLineIntoIfStatement( lines, limit ) - if last_was_merged and lines[ 0 ].first.value in { 'elif', 'else' }: - return _CanMergeLineIntoIfStatement( lines, limit ) - - # TODO(morbo): Other control statements? + # The indentation amount for the starting line (number of spaces). + indent_amt = lines[0].depth * style.Get('INDENT_WIDTH') + if len(lines) == 1 or indent_amt > style.Get('COLUMN_LIMIT'): + return False + + if (len(lines) >= 3 and lines[2].depth >= lines[1].depth and + lines[0].depth != lines[2].depth): + # If lines[2]'s depth is greater than or equal to line[1]'s depth, we're not + # looking at a single statement (e.g., if-then, while, etc.). A following + # line with the same depth as the first line isn't part of the lines we + # would want to combine. + return False # Don't merge more than two lines together. + if lines[0].first.value in _CLASS_OR_FUNC: + # Don't join lines onto the starting line of a class or function. return False + limit = style.Get('COLUMN_LIMIT') - indent_amt + if lines[0].last.total_length < limit: + limit -= lines[0].last.total_length + + if lines[0].first.value == 'if': + return _CanMergeLineIntoIfStatement(lines, limit) + if last_was_merged and lines[0].first.value in {'elif', 'else'}: + return _CanMergeLineIntoIfStatement(lines, limit) + + # TODO(morbo): Other control statements? -def _CanMergeLineIntoIfStatement( lines, limit ): - """Determine if we can merge a short if-then statement into one line. + return False + + +def _CanMergeLineIntoIfStatement(lines, limit): + """Determine if we can merge a short if-then statement into one line. Two lines of an if-then statement can be merged if they were that way in the original source, fit on the line without going over the column limit, and are @@ -97,13 +97,13 @@ def _CanMergeLineIntoIfStatement( lines, limit ): Returns: True if the lines can be merged, False otherwise. """ - if len( lines[ 1 ].tokens ) == 1 and lines[ 1 ].last.is_multiline_string: - # This might be part of a multiline shebang. - return True - if lines[ 0 ].lineno != lines[ 1 ].lineno: - # Don't merge lines if the original lines weren't merged. - return False - if lines[ 1 ].last.total_length >= limit: - # Don't merge lines if the result goes over the column limit. - return False - return style.Get( 'JOIN_MULTIPLE_LINES' ) + if len(lines[1].tokens) == 1 and lines[1].last.is_multiline_string: + # This might be part of a multiline shebang. + return True + if lines[0].lineno != lines[1].lineno: + # Don't merge lines if the original lines weren't merged. + return False + if lines[1].last.total_length >= limit: + # Don't merge lines if the result goes over the column limit. + return False + return style.Get('JOIN_MULTIPLE_LINES') diff --git a/yapf/yapflib/logical_line.py b/yapf/yapflib/logical_line.py index b02e3588b..477d4d625 100644 --- a/yapf/yapflib/logical_line.py +++ b/yapf/yapflib/logical_line.py @@ -29,8 +29,8 @@ from lib2to3.fixer_util import syms as python_symbols -class LogicalLine( object ): - """Represents a single logical line in the output. +class LogicalLine(object): + """Represents a single logical line in the output. Attributes: depth: indentation depth of this line. This is just a numeric value used to @@ -38,8 +38,8 @@ class LogicalLine( object ): actual amount of spaces, which is style-dependent. """ - def __init__( self, depth, tokens = None ): - """Constructor. + def __init__(self, depth, tokens=None): + """Constructor. Creates a new logical line with the given depth an initial list of tokens. Constructs the doubly-linked lists for format tokens using their built-in @@ -49,108 +49,108 @@ def __init__( self, depth, tokens = None ): depth: indentation depth of this line tokens: initial list of tokens """ - self.depth = depth - self._tokens = tokens or [] - self.disable = False - - if self._tokens: - # Set up a doubly linked list. - for index, tok in enumerate( self._tokens[ 1 : ] ): - # Note, 'index' is the index to the previous token. - tok.previous_token = self._tokens[ index ] - self._tokens[ index ].next_token = tok - - def CalculateFormattingInformation( self ): - """Calculate the split penalty and total length for the tokens.""" - # Say that the first token in the line should have a space before it. This - # means only that if this logical line is joined with a predecessor line, - # then there will be a space between them. - self.first.spaces_required_before = 1 - self.first.total_length = len( self.first.value ) - - prev_token = self.first - prev_length = self.first.total_length - for token in self._tokens[ 1 : ]: - if ( token.spaces_required_before == 0 and - _SpaceRequiredBetween( prev_token, token, self.disable ) ): - token.spaces_required_before = 1 - - tok_len = len( token.value ) if not token.is_pseudo else 0 - - spaces_required_before = token.spaces_required_before - if isinstance( spaces_required_before, list ): - assert token.is_comment, token - - # If here, we are looking at a comment token that appears on a line - # with other tokens (but because it is a comment, it is always the last - # token). Rather than specifying the actual number of spaces here, - # hard code a value of 0 and then set it later. This logic only works - # because this comment token is guaranteed to be the last token in the - # list. - spaces_required_before = 0 - - token.total_length = prev_length + tok_len + spaces_required_before - - # The split penalty has to be computed before {must|can}_break_before, - # because these may use it for their decision. - token.split_penalty += _SplitPenalty( prev_token, token ) - token.must_break_before = _MustBreakBefore( prev_token, token ) - token.can_break_before = ( - token.must_break_before or _CanBreakBefore( prev_token, token ) ) - - prev_length = token.total_length - prev_token = token - - def Split( self ): - """Split the line at semicolons.""" - if not self.has_semicolon or self.disable: - return [ self ] - - llines = [] - lline = LogicalLine( self.depth ) - for tok in self._tokens: - if tok.value == ';': - llines.append( lline ) - lline = LogicalLine( self.depth ) - else: - lline.AppendToken( tok ) - - if lline.tokens: - llines.append( lline ) - - for lline in llines: - lline.first.previous_token = None - lline.last.next_token = None - - return llines - - ############################################################################ - # Token Access and Manipulation Methods # - ############################################################################ - - def AppendToken( self, token ): - """Append a new FormatToken to the tokens contained in this line.""" - if self._tokens: - token.previous_token = self.last - self.last.next_token = token - self._tokens.append( token ) - - @property - def first( self ): - """Returns the first non-whitespace token.""" - return self._tokens[ 0 ] - - @property - def last( self ): - """Returns the last non-whitespace token.""" - return self._tokens[ -1 ] - - ############################################################################ - # Token -> String Methods # - ############################################################################ - - def AsCode( self, indent_per_depth = 2 ): - """Return a "code" representation of this line. + self.depth = depth + self._tokens = tokens or [] + self.disable = False + + if self._tokens: + # Set up a doubly linked list. + for index, tok in enumerate(self._tokens[1:]): + # Note, 'index' is the index to the previous token. + tok.previous_token = self._tokens[index] + self._tokens[index].next_token = tok + + def CalculateFormattingInformation(self): + """Calculate the split penalty and total length for the tokens.""" + # Say that the first token in the line should have a space before it. This + # means only that if this logical line is joined with a predecessor line, + # then there will be a space between them. + self.first.spaces_required_before = 1 + self.first.total_length = len(self.first.value) + + prev_token = self.first + prev_length = self.first.total_length + for token in self._tokens[1:]: + if (token.spaces_required_before == 0 and + _SpaceRequiredBetween(prev_token, token, self.disable)): + token.spaces_required_before = 1 + + tok_len = len(token.value) if not token.is_pseudo else 0 + + spaces_required_before = token.spaces_required_before + if isinstance(spaces_required_before, list): + assert token.is_comment, token + + # If here, we are looking at a comment token that appears on a line + # with other tokens (but because it is a comment, it is always the last + # token). Rather than specifying the actual number of spaces here, + # hard code a value of 0 and then set it later. This logic only works + # because this comment token is guaranteed to be the last token in the + # list. + spaces_required_before = 0 + + token.total_length = prev_length + tok_len + spaces_required_before + + # The split penalty has to be computed before {must|can}_break_before, + # because these may use it for their decision. + token.split_penalty += _SplitPenalty(prev_token, token) + token.must_break_before = _MustBreakBefore(prev_token, token) + token.can_break_before = ( + token.must_break_before or _CanBreakBefore(prev_token, token)) + + prev_length = token.total_length + prev_token = token + + def Split(self): + """Split the line at semicolons.""" + if not self.has_semicolon or self.disable: + return [self] + + llines = [] + lline = LogicalLine(self.depth) + for tok in self._tokens: + if tok.value == ';': + llines.append(lline) + lline = LogicalLine(self.depth) + else: + lline.AppendToken(tok) + + if lline.tokens: + llines.append(lline) + + for lline in llines: + lline.first.previous_token = None + lline.last.next_token = None + + return llines + + ############################################################################ + # Token Access and Manipulation Methods # + ############################################################################ + + def AppendToken(self, token): + """Append a new FormatToken to the tokens contained in this line.""" + if self._tokens: + token.previous_token = self.last + self.last.next_token = token + self._tokens.append(token) + + @property + def first(self): + """Returns the first non-whitespace token.""" + return self._tokens[0] + + @property + def last(self): + """Returns the last non-whitespace token.""" + return self._tokens[-1] + + ############################################################################ + # Token -> String Methods # + ############################################################################ + + def AsCode(self, indent_per_depth=2): + """Return a "code" representation of this line. The code representation shows how the line would be printed out as code. @@ -164,518 +164,516 @@ def AsCode( self, indent_per_depth = 2 ): Returns: A string representing the line as code. """ - indent = ' ' * indent_per_depth * self.depth - tokens_str = ' '.join( tok.value for tok in self._tokens ) - return indent + tokens_str + indent = ' ' * indent_per_depth * self.depth + tokens_str = ' '.join(tok.value for tok in self._tokens) + return indent + tokens_str - def __str__( self ): # pragma: no cover - return self.AsCode() + def __str__(self): # pragma: no cover + return self.AsCode() - def __repr__( self ): # pragma: no cover - tokens_repr = ','.join( - '{0}({1!r})'.format( tok.name, tok.value ) for tok in self._tokens ) - return 'LogicalLine(depth={0}, tokens=[{1}])'.format( self.depth, tokens_repr ) + def __repr__(self): # pragma: no cover + tokens_repr = ','.join( + '{0}({1!r})'.format(tok.name, tok.value) for tok in self._tokens) + return 'LogicalLine(depth={0}, tokens=[{1}])'.format( + self.depth, tokens_repr) - ############################################################################ - # Properties # - ############################################################################ + ############################################################################ + # Properties # + ############################################################################ - @property - def tokens( self ): - """Access the tokens contained within this line. + @property + def tokens(self): + """Access the tokens contained within this line. The caller must not modify the tokens list returned by this method. Returns: List of tokens in this line. """ - return self._tokens + return self._tokens - @property - def lineno( self ): - """Return the line number of this logical line. + @property + def lineno(self): + """Return the line number of this logical line. Returns: The line number of the first token in this logical line. """ - return self.first.lineno + return self.first.lineno - @property - def start( self ): - """The start of the logical line. + @property + def start(self): + """The start of the logical line. Returns: A tuple of the starting line number and column. """ - return ( self.first.lineno, self.first.column ) + return (self.first.lineno, self.first.column) - @property - def end( self ): - """The end of the logical line. + @property + def end(self): + """The end of the logical line. Returns: A tuple of the ending line number and column. """ - return ( self.last.lineno, self.last.column + len( self.last.value ) ) + return (self.last.lineno, self.last.column + len(self.last.value)) - @property - def is_comment( self ): - return self.first.is_comment + @property + def is_comment(self): + return self.first.is_comment - @property - def has_semicolon( self ): - return any( tok.value == ';' for tok in self._tokens ) + @property + def has_semicolon(self): + return any(tok.value == ';' for tok in self._tokens) -def _IsIdNumberStringToken( tok ): - return tok.is_keyword or tok.is_name or tok.is_number or tok.is_string +def _IsIdNumberStringToken(tok): + return tok.is_keyword or tok.is_name or tok.is_number or tok.is_string -def _IsUnaryOperator( tok ): - return subtypes.UNARY_OPERATOR in tok.subtypes +def _IsUnaryOperator(tok): + return subtypes.UNARY_OPERATOR in tok.subtypes -def _HasPrecedence( tok ): - """Whether a binary operation has precedence within its context.""" - node = tok.node +def _HasPrecedence(tok): + """Whether a binary operation has precedence within its context.""" + node = tok.node - # We let ancestor be the statement surrounding the operation that tok is the - # operator in. - ancestor = node.parent.parent + # We let ancestor be the statement surrounding the operation that tok is the + # operator in. + ancestor = node.parent.parent - while ancestor is not None: - # Search through the ancestor nodes in the parse tree for operators with - # lower precedence. - predecessor_type = pytree_utils.NodeName( ancestor ) - if predecessor_type in [ 'arith_expr', 'term' ]: - # An ancestor "arith_expr" or "term" means we have found an operator - # with lower precedence than our tok. - return True - if predecessor_type != 'atom': - # We understand the context to look for precedence within as an - # arbitrary nesting of "arith_expr", "term", and "atom" nodes. If we - # leave this context we have not found a lower precedence operator. - return False - # Under normal usage we expect a complete parse tree to be available and - # we will return before we get an AttributeError from the root. - ancestor = ancestor.parent + while ancestor is not None: + # Search through the ancestor nodes in the parse tree for operators with + # lower precedence. + predecessor_type = pytree_utils.NodeName(ancestor) + if predecessor_type in ['arith_expr', 'term']: + # An ancestor "arith_expr" or "term" means we have found an operator + # with lower precedence than our tok. + return True + if predecessor_type != 'atom': + # We understand the context to look for precedence within as an + # arbitrary nesting of "arith_expr", "term", and "atom" nodes. If we + # leave this context we have not found a lower precedence operator. + return False + # Under normal usage we expect a complete parse tree to be available and + # we will return before we get an AttributeError from the root. + ancestor = ancestor.parent -def _PriorityIndicatingNoSpace( tok ): - """Whether to remove spaces around an operator due to precedence.""" - if not tok.is_arithmetic_op or not tok.is_simple_expr: - # Limit space removal to highest priority arithmetic operators - return False - return _HasPrecedence( tok ) +def _PriorityIndicatingNoSpace(tok): + """Whether to remove spaces around an operator due to precedence.""" + if not tok.is_arithmetic_op or not tok.is_simple_expr: + # Limit space removal to highest priority arithmetic operators + return False + return _HasPrecedence(tok) -def _IsSubscriptColonAndValuePair( token1, token2 ): - return ( token1.is_number or token1.is_name ) and token2.is_subscript_colon +def _IsSubscriptColonAndValuePair(token1, token2): + return (token1.is_number or token1.is_name) and token2.is_subscript_colon -def _SpaceRequiredBetween( left, right, is_line_disabled ): - """Return True if a space is required between the left and right token.""" - lval = left.value - rval = right.value - if ( left.is_pseudo and _IsIdNumberStringToken( right ) and left.previous_token and - _IsIdNumberStringToken( left.previous_token ) ): - # Space between keyword... tokens and pseudo parens. - return True - if left.is_pseudo or right.is_pseudo: - # There should be a space after the ':' in a dictionary. - if left.OpensScope(): - return True - # The closing pseudo-paren shouldn't affect spacing. - return False - if left.is_continuation or right.is_continuation: - # The continuation node's value has all of the spaces it needs. - return False - if right.name in pytree_utils.NONSEMANTIC_TOKENS: - # No space before a non-semantic token. - return False - if _IsIdNumberStringToken( left ) and _IsIdNumberStringToken( right ): - # Spaces between keyword, string, number, and identifier tokens. - return True - if lval == ',' and rval == ':': - # We do want a space between a comma and colon. - return True - if style.Get( 'SPACE_INSIDE_BRACKETS' ): - # Supersede the "no space before a colon or comma" check. - if left.OpensScope() and rval == ':': - return True - if right.ClosesScope() and lval == ':': - return True - if ( style.Get( 'SPACES_AROUND_SUBSCRIPT_COLON' ) and - ( _IsSubscriptColonAndValuePair( left, right ) or - _IsSubscriptColonAndValuePair( right, left ) ) ): - # Supersede the "never want a space before a colon or comma" check. - return True - if rval in ':,': - # Otherwise, we never want a space before a colon or comma. - return False - if lval == ',' and rval in ']})': - # Add a space between ending ',' and closing bracket if requested. - return style.Get( 'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET' ) - if lval == ',': - # We want a space after a comma. - return True - if lval == 'from' and rval == '.': - # Space before the '.' in an import statement. - return True - if lval == '.' and rval == 'import': - # Space after the '.' in an import statement. - return True - if ( lval == '=' and rval in { '.', ',,,' } and - subtypes.DEFAULT_OR_NAMED_ASSIGN not in left.subtypes ): - # Space between equal and '.' as in "X = ...". - return True - if lval == ':' and rval in { '.', '...' }: - # Space between : and ... - return True - if ( ( right.is_keyword or right.is_name ) and - ( left.is_keyword or left.is_name ) ): - # Don't merge two keywords/identifiers. - return True - if ( subtypes.SUBSCRIPT_COLON in left.subtypes or - subtypes.SUBSCRIPT_COLON in right.subtypes ): - # A subscript shouldn't have spaces separating its colons. - return False - if ( subtypes.TYPED_NAME in left.subtypes or - subtypes.TYPED_NAME in right.subtypes ): - # A typed argument should have a space after the colon. - return True - if left.is_string: - if ( rval == '=' and - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in right.subtypes ): - # If there is a type hint, then we don't want to add a space between the - # equal sign and the hint. - return False - if rval not in '[)]}.' and not right.is_binary_op: - # A string followed by something other than a subscript, closing bracket, - # dot, or a binary op should have a space after it. - return True - if right.ClosesScope(): - # A string followed by closing brackets should have a space after it - # depending on SPACE_INSIDE_BRACKETS. A string followed by opening - # brackets, however, should not. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if subtypes.SUBSCRIPT_BRACKET in right.subtypes: - # It's legal to do this in Python: 'hello'[a] - return False - if left.is_binary_op and lval != '**' and _IsUnaryOperator( right ): - # Space between the binary operator and the unary operator. - return True - if left.is_keyword and _IsUnaryOperator( right ): - # Handle things like "not -3 < x". - return True - if _IsUnaryOperator( left ) and _IsUnaryOperator( right ): - # No space between two unary operators. - return False - if left.is_binary_op or right.is_binary_op: - if lval == '**' or rval == '**': - # Space around the "power" operator. - return style.Get( 'SPACES_AROUND_POWER_OPERATOR' ) - # Enforce spaces around binary operators except the blocked ones. - block_list = style.Get( 'NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS' ) - if lval in block_list or rval in block_list: - return False - if style.Get( 'ARITHMETIC_PRECEDENCE_INDICATION' ): - if _PriorityIndicatingNoSpace( left ) or _PriorityIndicatingNoSpace( - right ): - return False - else: - return True - else: - return True - if ( _IsUnaryOperator( left ) and lval != 'not' and - ( right.is_name or right.is_number or rval == '(' ) ): - # The previous token was a unary op. No space is desired between it and - # the current token. - return False - if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in left.subtypes and - subtypes.TYPED_NAME not in right.subtypes ): - # A named argument or default parameter shouldn't have spaces around it. - return style.Get( 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN' ) - if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in right.subtypes and - subtypes.TYPED_NAME not in left.subtypes ): - # A named argument or default parameter shouldn't have spaces around it. - return style.Get( 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN' ) - if ( subtypes.VARARGS_LIST in left.subtypes or - subtypes.VARARGS_LIST in right.subtypes ): - return False - if ( subtypes.VARARGS_STAR in left.subtypes or - subtypes.KWARGS_STAR_STAR in left.subtypes ): - # Don't add a space after a vararg's star or a keyword's star-star. - return False - if lval == '@' and subtypes.DECORATOR in left.subtypes: - # Decorators shouldn't be separated from the 'at' sign. - return False - if left.is_keyword and rval == '.': - # Add space between keywords and dots. - return lval not in { 'None', 'print' } - if lval == '.' and right.is_keyword: - # Add space between keywords and dots. - return rval not in { 'None', 'print' } - if lval == '.' or rval == '.': - # Don't place spaces between dots. - return False - if ( ( lval == '(' and rval == ')' ) or ( lval == '[' and rval == ']' ) or - ( lval == '{' and rval == '}' ) ): - # Empty objects shouldn't be separated by spaces. - return False - if not is_line_disabled and ( left.OpensScope() or right.ClosesScope() ): - if ( style.GetOrDefault( 'SPACES_AROUND_DICT_DELIMITERS', False ) and - ( ( lval == '{' and - _IsDictListTupleDelimiterTok( left, is_opening = True ) ) or - ( rval == '}' and - _IsDictListTupleDelimiterTok( right, is_opening = False ) ) ) ): - return True - if ( style.GetOrDefault( 'SPACES_AROUND_LIST_DELIMITERS', False ) and - ( ( lval == '[' and - _IsDictListTupleDelimiterTok( left, is_opening = True ) ) or - ( rval == ']' and - _IsDictListTupleDelimiterTok( right, is_opening = False ) ) ) ): - return True - if ( style.GetOrDefault( 'SPACES_AROUND_TUPLE_DELIMITERS', False ) and - ( ( lval == '(' and - _IsDictListTupleDelimiterTok( left, is_opening = True ) ) or - ( rval == ')' and - _IsDictListTupleDelimiterTok( right, is_opening = False ) ) ) ): - return True - if left.OpensScope() and right.OpensScope(): - # Nested objects' opening brackets shouldn't be separated, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if left.ClosesScope() and right.ClosesScope(): - # Nested objects' closing brackets shouldn't be separated, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if left.ClosesScope() and rval in '([': - # A call, set, dictionary, or subscript that has a call or subscript after - # it shouldn't have a space between them. - return False - if left.OpensScope() and _IsIdNumberStringToken( right ): - # Don't separate the opening bracket from the first item, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if left.is_name and rval in '([': - # Don't separate a call or array access from the name. - return False +def _SpaceRequiredBetween(left, right, is_line_disabled): + """Return True if a space is required between the left and right token.""" + lval = left.value + rval = right.value + if (left.is_pseudo and _IsIdNumberStringToken(right) and + left.previous_token and _IsIdNumberStringToken(left.previous_token)): + # Space between keyword... tokens and pseudo parens. + return True + if left.is_pseudo or right.is_pseudo: + # There should be a space after the ':' in a dictionary. + if left.OpensScope(): + return True + # The closing pseudo-paren shouldn't affect spacing. + return False + if left.is_continuation or right.is_continuation: + # The continuation node's value has all of the spaces it needs. + return False + if right.name in pytree_utils.NONSEMANTIC_TOKENS: + # No space before a non-semantic token. + return False + if _IsIdNumberStringToken(left) and _IsIdNumberStringToken(right): + # Spaces between keyword, string, number, and identifier tokens. + return True + if lval == ',' and rval == ':': + # We do want a space between a comma and colon. + return True + if style.Get('SPACE_INSIDE_BRACKETS'): + # Supersede the "no space before a colon or comma" check. + if left.OpensScope() and rval == ':': + return True + if right.ClosesScope() and lval == ':': + return True + if (style.Get('SPACES_AROUND_SUBSCRIPT_COLON') and + (_IsSubscriptColonAndValuePair(left, right) or + _IsSubscriptColonAndValuePair(right, left))): + # Supersede the "never want a space before a colon or comma" check. + return True + if rval in ':,': + # Otherwise, we never want a space before a colon or comma. + return False + if lval == ',' and rval in ']})': + # Add a space between ending ',' and closing bracket if requested. + return style.Get('SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET') + if lval == ',': + # We want a space after a comma. + return True + if lval == 'from' and rval == '.': + # Space before the '.' in an import statement. + return True + if lval == '.' and rval == 'import': + # Space after the '.' in an import statement. + return True + if (lval == '=' and rval in {'.', ',,,'} and + subtypes.DEFAULT_OR_NAMED_ASSIGN not in left.subtypes): + # Space between equal and '.' as in "X = ...". + return True + if lval == ':' and rval in {'.', '...'}: + # Space between : and ... + return True + if ((right.is_keyword or right.is_name) and + (left.is_keyword or left.is_name)): + # Don't merge two keywords/identifiers. + return True + if (subtypes.SUBSCRIPT_COLON in left.subtypes or + subtypes.SUBSCRIPT_COLON in right.subtypes): + # A subscript shouldn't have spaces separating its colons. + return False + if (subtypes.TYPED_NAME in left.subtypes or + subtypes.TYPED_NAME in right.subtypes): + # A typed argument should have a space after the colon. + return True + if left.is_string: + if (rval == '=' and + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in right.subtypes): + # If there is a type hint, then we don't want to add a space between the + # equal sign and the hint. + return False + if rval not in '[)]}.' and not right.is_binary_op: + # A string followed by something other than a subscript, closing bracket, + # dot, or a binary op should have a space after it. + return True if right.ClosesScope(): - # Don't separate the closing bracket from the last item, unless enabled - # by SPACE_INSIDE_BRACKETS. - # FIXME(morbo): This might be too permissive. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if lval == 'print' and rval == '(': - # Special support for the 'print' function. - return False - if left.OpensScope() and _IsUnaryOperator( right ): - # Don't separate a unary operator from the opening bracket, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if ( left.OpensScope() and ( subtypes.VARARGS_STAR in right.subtypes or - subtypes.KWARGS_STAR_STAR in right.subtypes ) ): - # Don't separate a '*' or '**' from the opening bracket, unless enabled - # by SPACE_INSIDE_BRACKETS. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) - if rval == ';': - # Avoid spaces before a semicolon. (Why is there a semicolon?!) - return False - if lval == '(' and rval == 'await': - # Special support for the 'await' keyword. Don't separate the 'await' - # keyword from an opening paren, unless enabled by SPACE_INSIDE_BRACKETS. - return style.Get( 'SPACE_INSIDE_BRACKETS' ) + # A string followed by closing brackets should have a space after it + # depending on SPACE_INSIDE_BRACKETS. A string followed by opening + # brackets, however, should not. + return style.Get('SPACE_INSIDE_BRACKETS') + if subtypes.SUBSCRIPT_BRACKET in right.subtypes: + # It's legal to do this in Python: 'hello'[a] + return False + if left.is_binary_op and lval != '**' and _IsUnaryOperator(right): + # Space between the binary operator and the unary operator. return True - - -def _MustBreakBefore( prev_token, cur_token ): - """Return True if a line break is required before the current token.""" - if prev_token.is_comment or ( prev_token.previous_token and prev_token.is_pseudo and - prev_token.previous_token.is_comment ): - # Must break if the previous token was a comment. - return True - if ( cur_token.is_string and prev_token.is_string and - IsSurroundedByBrackets( cur_token ) ): - # We want consecutive strings to be on separate lines. This is a - # reasonable assumption, because otherwise they should have written them - # all on the same line, or with a '+'. - return True - return cur_token.must_break_before - - -def _CanBreakBefore( prev_token, cur_token ): - """Return True if a line break may occur before the current token.""" - pval = prev_token.value - cval = cur_token.value - if py3compat.PY3: - if pval == 'yield' and cval == 'from': - # Don't break before a yield argument. - return False - if pval in { 'async', 'await' } and cval in { 'def', 'with', 'for' }: - # Don't break after sync keywords. - return False - if cur_token.split_penalty >= split_penalty.UNBREAKABLE: - return False - if pval == '@': - # Don't break right after the beginning of a decorator. - return False - if cval == ':': - # Don't break before the start of a block of code. - return False - if cval == ',': - # Don't break before a comma. - return False - if prev_token.is_name and cval == '(': - # Don't break in the middle of a function definition or call. - return False - if prev_token.is_name and cval == '[': - # Don't break in the middle of an array dereference. - return False - if cur_token.is_comment and prev_token.lineno == cur_token.lineno: - # Don't break a comment at the end of the line. - return False - if subtypes.UNARY_OPERATOR in prev_token.subtypes: - # Don't break after a unary token. - return False - if not style.Get( 'ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS' ): - if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes or - subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes ): - return False + if left.is_keyword and _IsUnaryOperator(right): + # Handle things like "not -3 < x". return True - - -def IsSurroundedByBrackets( tok ): - """Return True if the token is surrounded by brackets.""" - paren_count = 0 - brace_count = 0 - sq_bracket_count = 0 - previous_token = tok.previous_token - while previous_token: - if previous_token.value == ')': - paren_count -= 1 - elif previous_token.value == '}': - brace_count -= 1 - elif previous_token.value == ']': - sq_bracket_count -= 1 - - if previous_token.value == '(': - if paren_count == 0: - return previous_token - paren_count += 1 - elif previous_token.value == '{': - if brace_count == 0: - return previous_token - brace_count += 1 - elif previous_token.value == '[': - if sq_bracket_count == 0: - return previous_token - sq_bracket_count += 1 - - previous_token = previous_token.previous_token - return None - - -def _IsDictListTupleDelimiterTok( tok, is_opening ): - assert tok - - if tok.matching_bracket is None: - return False - - if is_opening: - open_tok = tok - close_tok = tok.matching_bracket - else: - open_tok = tok.matching_bracket - close_tok = tok - - # There must be something in between the tokens - if open_tok.next_token == close_tok: + if _IsUnaryOperator(left) and _IsUnaryOperator(right): + # No space between two unary operators. + return False + if left.is_binary_op or right.is_binary_op: + if lval == '**' or rval == '**': + # Space around the "power" operator. + return style.Get('SPACES_AROUND_POWER_OPERATOR') + # Enforce spaces around binary operators except the blocked ones. + block_list = style.Get('NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS') + if lval in block_list or rval in block_list: + return False + if style.Get('ARITHMETIC_PRECEDENCE_INDICATION'): + if _PriorityIndicatingNoSpace(left) or _PriorityIndicatingNoSpace(right): return False - - assert open_tok.next_token.node - assert open_tok.next_token.node.parent - - return open_tok.next_token.node.parent.type in [ - python_symbols.dictsetmaker, - python_symbols.listmaker, - python_symbols.testlist_gexp, - ] - - -_LOGICAL_OPERATORS = frozenset( { 'and', 'or' } ) -_BITWISE_OPERATORS = frozenset( { '&', '|', '^' } ) -_ARITHMETIC_OPERATORS = frozenset( { '+', '-', '*', '/', '%', '//', '@' } ) - - -def _SplitPenalty( prev_token, cur_token ): - """Return the penalty for breaking the line before the current token.""" - pval = prev_token.value - cval = cur_token.value - if pval == 'not': - return split_penalty.UNBREAKABLE - - if cur_token.node_split_penalty > 0: - return cur_token.node_split_penalty - - if style.Get( 'SPLIT_BEFORE_LOGICAL_OPERATOR' ): - # Prefer to split before 'and' and 'or'. - if pval in _LOGICAL_OPERATORS: - return style.Get( 'SPLIT_PENALTY_LOGICAL_OPERATOR' ) - if cval in _LOGICAL_OPERATORS: - return 0 - else: - # Prefer to split after 'and' and 'or'. - if pval in _LOGICAL_OPERATORS: - return 0 - if cval in _LOGICAL_OPERATORS: - return style.Get( 'SPLIT_PENALTY_LOGICAL_OPERATOR' ) - - if style.Get( 'SPLIT_BEFORE_BITWISE_OPERATOR' ): - # Prefer to split before '&', '|', and '^'. - if pval in _BITWISE_OPERATORS: - return style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) - if cval in _BITWISE_OPERATORS: - return 0 + else: + return True else: - # Prefer to split after '&', '|', and '^'. - if pval in _BITWISE_OPERATORS: - return 0 - if cval in _BITWISE_OPERATORS: - return style.Get( 'SPLIT_PENALTY_BITWISE_OPERATOR' ) - - if ( subtypes.COMP_FOR in cur_token.subtypes or - subtypes.COMP_IF in cur_token.subtypes ): - # We don't mind breaking before the 'for' or 'if' of a list comprehension. - return 0 - if subtypes.UNARY_OPERATOR in prev_token.subtypes: - # Try not to break after a unary operator. - return style.Get( 'SPLIT_PENALTY_AFTER_UNARY_OPERATOR' ) - if pval == ',': - # Breaking after a comma is fine, if need be. - return 0 - if pval == '**' or cval == '**': - return split_penalty.STRONGLY_CONNECTED - if ( subtypes.VARARGS_STAR in prev_token.subtypes or - subtypes.KWARGS_STAR_STAR in prev_token.subtypes ): - # Don't split after a varargs * or kwargs **. - return split_penalty.UNBREAKABLE - if prev_token.OpensScope() and cval != '(': - # Slightly prefer - return style.Get( 'SPLIT_PENALTY_AFTER_OPENING_BRACKET' ) - if cval == ':': - # Don't split before a colon. - return split_penalty.UNBREAKABLE - if cval == '=': - # Don't split before an assignment. - return split_penalty.UNBREAKABLE - if ( subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes or - subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes ): - # Don't break before or after an default or named assignment. - return split_penalty.UNBREAKABLE - if cval == '==': - # We would rather not split before an equality operator. - return split_penalty.STRONGLY_CONNECTED - if cur_token.ClosesScope(): - # Give a slight penalty for splitting before the closing scope. - return 100 + return True + if (_IsUnaryOperator(left) and lval != 'not' and + (right.is_name or right.is_number or rval == '(')): + # The previous token was a unary op. No space is desired between it and + # the current token. + return False + if (subtypes.DEFAULT_OR_NAMED_ASSIGN in left.subtypes and + subtypes.TYPED_NAME not in right.subtypes): + # A named argument or default parameter shouldn't have spaces around it. + return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN') + if (subtypes.DEFAULT_OR_NAMED_ASSIGN in right.subtypes and + subtypes.TYPED_NAME not in left.subtypes): + # A named argument or default parameter shouldn't have spaces around it. + return style.Get('SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN') + if (subtypes.VARARGS_LIST in left.subtypes or + subtypes.VARARGS_LIST in right.subtypes): + return False + if (subtypes.VARARGS_STAR in left.subtypes or + subtypes.KWARGS_STAR_STAR in left.subtypes): + # Don't add a space after a vararg's star or a keyword's star-star. + return False + if lval == '@' and subtypes.DECORATOR in left.subtypes: + # Decorators shouldn't be separated from the 'at' sign. + return False + if left.is_keyword and rval == '.': + # Add space between keywords and dots. + return lval not in {'None', 'print'} + if lval == '.' and right.is_keyword: + # Add space between keywords and dots. + return rval not in {'None', 'print'} + if lval == '.' or rval == '.': + # Don't place spaces between dots. + return False + if ((lval == '(' and rval == ')') or (lval == '[' and rval == ']') or + (lval == '{' and rval == '}')): + # Empty objects shouldn't be separated by spaces. + return False + if not is_line_disabled and (left.OpensScope() or right.ClosesScope()): + if (style.GetOrDefault('SPACES_AROUND_DICT_DELIMITERS', False) and ( + (lval == '{' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or + (rval == '}' and + _IsDictListTupleDelimiterTok(right, is_opening=False)))): + return True + if (style.GetOrDefault('SPACES_AROUND_LIST_DELIMITERS', False) and ( + (lval == '[' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or + (rval == ']' and + _IsDictListTupleDelimiterTok(right, is_opening=False)))): + return True + if (style.GetOrDefault('SPACES_AROUND_TUPLE_DELIMITERS', False) and ( + (lval == '(' and _IsDictListTupleDelimiterTok(left, is_opening=True)) or + (rval == ')' and + _IsDictListTupleDelimiterTok(right, is_opening=False)))): + return True + if left.OpensScope() and right.OpensScope(): + # Nested objects' opening brackets shouldn't be separated, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get('SPACE_INSIDE_BRACKETS') + if left.ClosesScope() and right.ClosesScope(): + # Nested objects' closing brackets shouldn't be separated, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get('SPACE_INSIDE_BRACKETS') + if left.ClosesScope() and rval in '([': + # A call, set, dictionary, or subscript that has a call or subscript after + # it shouldn't have a space between them. + return False + if left.OpensScope() and _IsIdNumberStringToken(right): + # Don't separate the opening bracket from the first item, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get('SPACE_INSIDE_BRACKETS') + if left.is_name and rval in '([': + # Don't separate a call or array access from the name. + return False + if right.ClosesScope(): + # Don't separate the closing bracket from the last item, unless enabled + # by SPACE_INSIDE_BRACKETS. + # FIXME(morbo): This might be too permissive. + return style.Get('SPACE_INSIDE_BRACKETS') + if lval == 'print' and rval == '(': + # Special support for the 'print' function. + return False + if left.OpensScope() and _IsUnaryOperator(right): + # Don't separate a unary operator from the opening bracket, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get('SPACE_INSIDE_BRACKETS') + if (left.OpensScope() and (subtypes.VARARGS_STAR in right.subtypes or + subtypes.KWARGS_STAR_STAR in right.subtypes)): + # Don't separate a '*' or '**' from the opening bracket, unless enabled + # by SPACE_INSIDE_BRACKETS. + return style.Get('SPACE_INSIDE_BRACKETS') + if rval == ';': + # Avoid spaces before a semicolon. (Why is there a semicolon?!) + return False + if lval == '(' and rval == 'await': + # Special support for the 'await' keyword. Don't separate the 'await' + # keyword from an opening paren, unless enabled by SPACE_INSIDE_BRACKETS. + return style.Get('SPACE_INSIDE_BRACKETS') + return True + + +def _MustBreakBefore(prev_token, cur_token): + """Return True if a line break is required before the current token.""" + if prev_token.is_comment or (prev_token.previous_token and + prev_token.is_pseudo and + prev_token.previous_token.is_comment): + # Must break if the previous token was a comment. + return True + if (cur_token.is_string and prev_token.is_string and + IsSurroundedByBrackets(cur_token)): + # We want consecutive strings to be on separate lines. This is a + # reasonable assumption, because otherwise they should have written them + # all on the same line, or with a '+'. + return True + return cur_token.must_break_before + + +def _CanBreakBefore(prev_token, cur_token): + """Return True if a line break may occur before the current token.""" + pval = prev_token.value + cval = cur_token.value + if py3compat.PY3: + if pval == 'yield' and cval == 'from': + # Don't break before a yield argument. + return False + if pval in {'async', 'await'} and cval in {'def', 'with', 'for'}: + # Don't break after sync keywords. + return False + if cur_token.split_penalty >= split_penalty.UNBREAKABLE: + return False + if pval == '@': + # Don't break right after the beginning of a decorator. + return False + if cval == ':': + # Don't break before the start of a block of code. + return False + if cval == ',': + # Don't break before a comma. + return False + if prev_token.is_name and cval == '(': + # Don't break in the middle of a function definition or call. + return False + if prev_token.is_name and cval == '[': + # Don't break in the middle of an array dereference. + return False + if cur_token.is_comment and prev_token.lineno == cur_token.lineno: + # Don't break a comment at the end of the line. + return False + if subtypes.UNARY_OPERATOR in prev_token.subtypes: + # Don't break after a unary token. + return False + if not style.Get('ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS'): + if (subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes or + subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes): + return False + return True + + +def IsSurroundedByBrackets(tok): + """Return True if the token is surrounded by brackets.""" + paren_count = 0 + brace_count = 0 + sq_bracket_count = 0 + previous_token = tok.previous_token + while previous_token: + if previous_token.value == ')': + paren_count -= 1 + elif previous_token.value == '}': + brace_count -= 1 + elif previous_token.value == ']': + sq_bracket_count -= 1 + + if previous_token.value == '(': + if paren_count == 0: + return previous_token + paren_count += 1 + elif previous_token.value == '{': + if brace_count == 0: + return previous_token + brace_count += 1 + elif previous_token.value == '[': + if sq_bracket_count == 0: + return previous_token + sq_bracket_count += 1 + + previous_token = previous_token.previous_token + return None + + +def _IsDictListTupleDelimiterTok(tok, is_opening): + assert tok + + if tok.matching_bracket is None: + return False + + if is_opening: + open_tok = tok + close_tok = tok.matching_bracket + else: + open_tok = tok.matching_bracket + close_tok = tok + + # There must be something in between the tokens + if open_tok.next_token == close_tok: + return False + + assert open_tok.next_token.node + assert open_tok.next_token.node.parent + + return open_tok.next_token.node.parent.type in [ + python_symbols.dictsetmaker, + python_symbols.listmaker, + python_symbols.testlist_gexp, + ] + + +_LOGICAL_OPERATORS = frozenset({'and', 'or'}) +_BITWISE_OPERATORS = frozenset({'&', '|', '^'}) +_ARITHMETIC_OPERATORS = frozenset({'+', '-', '*', '/', '%', '//', '@'}) + + +def _SplitPenalty(prev_token, cur_token): + """Return the penalty for breaking the line before the current token.""" + pval = prev_token.value + cval = cur_token.value + if pval == 'not': + return split_penalty.UNBREAKABLE + + if cur_token.node_split_penalty > 0: + return cur_token.node_split_penalty + + if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'): + # Prefer to split before 'and' and 'or'. + if pval in _LOGICAL_OPERATORS: + return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR') + if cval in _LOGICAL_OPERATORS: + return 0 + else: + # Prefer to split after 'and' and 'or'. + if pval in _LOGICAL_OPERATORS: + return 0 + if cval in _LOGICAL_OPERATORS: + return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR') + + if style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'): + # Prefer to split before '&', '|', and '^'. + if pval in _BITWISE_OPERATORS: + return style.Get('SPLIT_PENALTY_BITWISE_OPERATOR') + if cval in _BITWISE_OPERATORS: + return 0 + else: + # Prefer to split after '&', '|', and '^'. + if pval in _BITWISE_OPERATORS: + return 0 + if cval in _BITWISE_OPERATORS: + return style.Get('SPLIT_PENALTY_BITWISE_OPERATOR') + + if (subtypes.COMP_FOR in cur_token.subtypes or + subtypes.COMP_IF in cur_token.subtypes): + # We don't mind breaking before the 'for' or 'if' of a list comprehension. + return 0 + if subtypes.UNARY_OPERATOR in prev_token.subtypes: + # Try not to break after a unary operator. + return style.Get('SPLIT_PENALTY_AFTER_UNARY_OPERATOR') + if pval == ',': + # Breaking after a comma is fine, if need be. return 0 + if pval == '**' or cval == '**': + return split_penalty.STRONGLY_CONNECTED + if (subtypes.VARARGS_STAR in prev_token.subtypes or + subtypes.KWARGS_STAR_STAR in prev_token.subtypes): + # Don't split after a varargs * or kwargs **. + return split_penalty.UNBREAKABLE + if prev_token.OpensScope() and cval != '(': + # Slightly prefer + return style.Get('SPLIT_PENALTY_AFTER_OPENING_BRACKET') + if cval == ':': + # Don't split before a colon. + return split_penalty.UNBREAKABLE + if cval == '=': + # Don't split before an assignment. + return split_penalty.UNBREAKABLE + if (subtypes.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes or + subtypes.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes): + # Don't break before or after an default or named assignment. + return split_penalty.UNBREAKABLE + if cval == '==': + # We would rather not split before an equality operator. + return split_penalty.STRONGLY_CONNECTED + if cur_token.ClosesScope(): + # Give a slight penalty for splitting before the closing scope. + return 100 + return 0 diff --git a/yapf/yapflib/object_state.py b/yapf/yapflib/object_state.py index 58dd6fe18..0afdb6041 100644 --- a/yapf/yapflib/object_state.py +++ b/yapf/yapflib/object_state.py @@ -27,8 +27,8 @@ from yapf.yapflib import subtypes -class ComprehensionState( object ): - """Maintains the state of list comprehension formatting decisions. +class ComprehensionState(object): + """Maintains the state of list comprehension formatting decisions. A stack of ComprehensionState objects are kept to ensure that list comprehensions are wrapped with well-defined rules. @@ -44,53 +44,53 @@ class ComprehensionState( object ): That is, a split somewhere after expr_token or before closing_bracket. """ - def __init__( self, expr_token ): - self.expr_token = expr_token - self.for_token = None - self.has_split_at_for = False - self.has_interior_split = False + def __init__(self, expr_token): + self.expr_token = expr_token + self.for_token = None + self.has_split_at_for = False + self.has_interior_split = False - def HasTrivialExpr( self ): - """Returns whether the comp_expr is "trivial" i.e. is a single token.""" - return self.expr_token.next_token.value == 'for' + def HasTrivialExpr(self): + """Returns whether the comp_expr is "trivial" i.e. is a single token.""" + return self.expr_token.next_token.value == 'for' - @property - def opening_bracket( self ): - return self.expr_token.previous_token + @property + def opening_bracket(self): + return self.expr_token.previous_token - @property - def closing_bracket( self ): - return self.opening_bracket.matching_bracket + @property + def closing_bracket(self): + return self.opening_bracket.matching_bracket - def Clone( self ): - clone = ComprehensionState( self.expr_token ) - clone.for_token = self.for_token - clone.has_split_at_for = self.has_split_at_for - clone.has_interior_split = self.has_interior_split - return clone + def Clone(self): + clone = ComprehensionState(self.expr_token) + clone.for_token = self.for_token + clone.has_split_at_for = self.has_split_at_for + clone.has_interior_split = self.has_interior_split + return clone - def __repr__( self ): - return ( - '[opening_bracket::%s, for_token::%s, has_split_at_for::%s,' - ' has_interior_split::%s, has_trivial_expr::%s]' % ( - self.opening_bracket, self.for_token, self.has_split_at_for, - self.has_interior_split, self.HasTrivialExpr() ) ) + def __repr__(self): + return ( + '[opening_bracket::%s, for_token::%s, has_split_at_for::%s,' + ' has_interior_split::%s, has_trivial_expr::%s]' % ( + self.opening_bracket, self.for_token, self.has_split_at_for, + self.has_interior_split, self.HasTrivialExpr())) - def __eq__( self, other ): - return hash( self ) == hash( other ) + def __eq__(self, other): + return hash(self) == hash(other) - def __ne__( self, other ): - return not self == other + def __ne__(self, other): + return not self == other - def __hash__( self, *args, **kwargs ): - return hash( - ( - self.expr_token, self.for_token, self.has_split_at_for, - self.has_interior_split ) ) + def __hash__(self, *args, **kwargs): + return hash( + ( + self.expr_token, self.for_token, self.has_split_at_for, + self.has_interior_split)) -class ParameterListState( object ): - """Maintains the state of function parameter list formatting decisions. +class ParameterListState(object): + """Maintains the state of function parameter list formatting decisions. Attributes: opening_bracket: The opening bracket of the parameter list. @@ -107,97 +107,97 @@ class ParameterListState( object ): needed if the indentation would collide. """ - def __init__( self, opening_bracket, newline, opening_column ): - self.opening_bracket = opening_bracket - self.has_split_before_first_param = newline - self.opening_column = opening_column - self.parameters = opening_bracket.parameters - self.split_before_closing_bracket = False - - @property - def closing_bracket( self ): - return self.opening_bracket.matching_bracket - - @property - def has_typed_return( self ): - return self.closing_bracket.next_token.value == '->' - - @property - @py3compat.lru_cache() - def has_default_values( self ): - return any( param.has_default_value for param in self.parameters ) - - @property - @py3compat.lru_cache() - def ends_in_comma( self ): - if not self.parameters: - return False - return self.parameters[ -1 ].last_token.next_token.value == ',' - - @property - @py3compat.lru_cache() - def last_token( self ): - token = self.opening_bracket.matching_bracket - while not token.is_comment and token.next_token: - token = token.next_token - return token - - @py3compat.lru_cache() - def LastParamFitsOnLine( self, indent ): - """Return true if the last parameter fits on a single line.""" - if not self.has_typed_return: - return False - if not self.parameters: - return True - total_length = self.last_token.total_length - last_param = self.parameters[ -1 ].first_token - total_length -= last_param.total_length - len( last_param.value ) - return total_length + indent <= style.Get( 'COLUMN_LIMIT' ) - - @py3compat.lru_cache() - def SplitBeforeClosingBracket( self, indent ): - """Return true if there's a split before the closing bracket.""" - if style.Get( 'DEDENT_CLOSING_BRACKETS' ): - return True - if self.ends_in_comma: - return True - if not self.parameters: - return False - total_length = self.last_token.total_length - last_param = self.parameters[ -1 ].first_token - total_length -= last_param.total_length - len( last_param.value ) - return total_length + indent > style.Get( 'COLUMN_LIMIT' ) - - def Clone( self ): - clone = ParameterListState( + def __init__(self, opening_bracket, newline, opening_column): + self.opening_bracket = opening_bracket + self.has_split_before_first_param = newline + self.opening_column = opening_column + self.parameters = opening_bracket.parameters + self.split_before_closing_bracket = False + + @property + def closing_bracket(self): + return self.opening_bracket.matching_bracket + + @property + def has_typed_return(self): + return self.closing_bracket.next_token.value == '->' + + @property + @py3compat.lru_cache() + def has_default_values(self): + return any(param.has_default_value for param in self.parameters) + + @property + @py3compat.lru_cache() + def ends_in_comma(self): + if not self.parameters: + return False + return self.parameters[-1].last_token.next_token.value == ',' + + @property + @py3compat.lru_cache() + def last_token(self): + token = self.opening_bracket.matching_bracket + while not token.is_comment and token.next_token: + token = token.next_token + return token + + @py3compat.lru_cache() + def LastParamFitsOnLine(self, indent): + """Return true if the last parameter fits on a single line.""" + if not self.has_typed_return: + return False + if not self.parameters: + return True + total_length = self.last_token.total_length + last_param = self.parameters[-1].first_token + total_length -= last_param.total_length - len(last_param.value) + return total_length + indent <= style.Get('COLUMN_LIMIT') + + @py3compat.lru_cache() + def SplitBeforeClosingBracket(self, indent): + """Return true if there's a split before the closing bracket.""" + if style.Get('DEDENT_CLOSING_BRACKETS'): + return True + if self.ends_in_comma: + return True + if not self.parameters: + return False + total_length = self.last_token.total_length + last_param = self.parameters[-1].first_token + total_length -= last_param.total_length - len(last_param.value) + return total_length + indent > style.Get('COLUMN_LIMIT') + + def Clone(self): + clone = ParameterListState( + self.opening_bracket, self.has_split_before_first_param, + self.opening_column) + clone.split_before_closing_bracket = self.split_before_closing_bracket + clone.parameters = [param.Clone() for param in self.parameters] + return clone + + def __repr__(self): + return ( + '[opening_bracket::%s, has_split_before_first_param::%s, ' + 'opening_column::%d]' % ( self.opening_bracket, self.has_split_before_first_param, - self.opening_column ) - clone.split_before_closing_bracket = self.split_before_closing_bracket - clone.parameters = [ param.Clone() for param in self.parameters ] - return clone + self.opening_column)) - def __repr__( self ): - return ( - '[opening_bracket::%s, has_split_before_first_param::%s, ' - 'opening_column::%d]' % ( - self.opening_bracket, self.has_split_before_first_param, - self.opening_column ) ) + def __eq__(self, other): + return hash(self) == hash(other) - def __eq__( self, other ): - return hash( self ) == hash( other ) + def __ne__(self, other): + return not self == other - def __ne__( self, other ): - return not self == other - - def __hash__( self, *args, **kwargs ): - return hash( - ( - self.opening_bracket, self.has_split_before_first_param, - self.opening_column, ( hash( param ) for param in self.parameters ) ) ) + def __hash__(self, *args, **kwargs): + return hash( + ( + self.opening_bracket, self.has_split_before_first_param, + self.opening_column, (hash(param) for param in self.parameters))) -class Parameter( object ): - """A parameter in a parameter list. +class Parameter(object): + """A parameter in a parameter list. Attributes: first_token: (format_token.FormatToken) First token of parameter. @@ -205,33 +205,33 @@ class Parameter( object ): has_default_value: (boolean) True if the parameter has a default value """ - def __init__( self, first_token, last_token ): - self.first_token = first_token - self.last_token = last_token + def __init__(self, first_token, last_token): + self.first_token = first_token + self.last_token = last_token - @property - @py3compat.lru_cache() - def has_default_value( self ): - """Returns true if the parameter has a default value.""" - tok = self.first_token - while tok != self.last_token: - if subtypes.DEFAULT_OR_NAMED_ASSIGN in tok.subtypes: - return True - tok = tok.matching_bracket if tok.OpensScope() else tok.next_token - return False + @property + @py3compat.lru_cache() + def has_default_value(self): + """Returns true if the parameter has a default value.""" + tok = self.first_token + while tok != self.last_token: + if subtypes.DEFAULT_OR_NAMED_ASSIGN in tok.subtypes: + return True + tok = tok.matching_bracket if tok.OpensScope() else tok.next_token + return False - def Clone( self ): - return Parameter( self.first_token, self.last_token ) + def Clone(self): + return Parameter(self.first_token, self.last_token) - def __repr__( self ): - return '[first_token::%s, last_token:%s]' % ( - self.first_token, self.last_token ) + def __repr__(self): + return '[first_token::%s, last_token:%s]' % ( + self.first_token, self.last_token) - def __eq__( self, other ): - return hash( self ) == hash( other ) + def __eq__(self, other): + return hash(self) == hash(other) - def __ne__( self, other ): - return not self == other + def __ne__(self, other): + return not self == other - def __hash__( self, *args, **kwargs ): - return hash( ( self.first_token, self.last_token ) ) + def __hash__(self, *args, **kwargs): + return hash((self.first_token, self.last_token)) diff --git a/yapf/yapflib/py3compat.py b/yapf/yapflib/py3compat.py index 143a13c3e..2ea5910d1 100644 --- a/yapf/yapflib/py3compat.py +++ b/yapf/yapflib/py3compat.py @@ -18,75 +18,75 @@ import os import sys -PY3 = sys.version_info[ 0 ] >= 3 -PY36 = sys.version_info[ 0 ] >= 3 and sys.version_info[ 1 ] >= 6 -PY37 = sys.version_info[ 0 ] >= 3 and sys.version_info[ 1 ] >= 7 -PY38 = sys.version_info[ 0 ] >= 3 and sys.version_info[ 1 ] >= 8 +PY3 = sys.version_info[0] >= 3 +PY36 = sys.version_info[0] >= 3 and sys.version_info[1] >= 6 +PY37 = sys.version_info[0] >= 3 and sys.version_info[1] >= 7 +PY38 = sys.version_info[0] >= 3 and sys.version_info[1] >= 8 if PY3: - StringIO = io.StringIO - BytesIO = io.BytesIO + StringIO = io.StringIO + BytesIO = io.BytesIO - import codecs # noqa: F811 + import codecs # noqa: F811 - def open_with_encoding( filename, mode, encoding, newline = '' ): # pylint: disable=unused-argument # noqa - return codecs.open( filename, mode = mode, encoding = encoding ) + def open_with_encoding(filename, mode, encoding, newline=''): # pylint: disable=unused-argument # noqa + return codecs.open(filename, mode=mode, encoding=encoding) - import functools - lru_cache = functools.lru_cache + import functools + lru_cache = functools.lru_cache - range = range - ifilter = filter + range = range + ifilter = filter - def raw_input(): - wrapper = io.TextIOWrapper( sys.stdin.buffer, encoding = 'utf-8' ) - return wrapper.buffer.raw.readall().decode( 'utf-8' ) + def raw_input(): + wrapper = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8') + return wrapper.buffer.raw.readall().decode('utf-8') - import configparser + import configparser - # Mappings from strings to booleans (such as '1' to True, 'false' to False, - # etc.) - CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser.BOOLEAN_STATES + # Mappings from strings to booleans (such as '1' to True, 'false' to False, + # etc.) + CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser.BOOLEAN_STATES - import tokenize - detect_encoding = tokenize.detect_encoding - TokenInfo = tokenize.TokenInfo + import tokenize + detect_encoding = tokenize.detect_encoding + TokenInfo = tokenize.TokenInfo else: - import __builtin__ - import cStringIO - from itertools import ifilter + import __builtin__ + import cStringIO + from itertools import ifilter - StringIO = BytesIO = cStringIO.StringIO + StringIO = BytesIO = cStringIO.StringIO - open_with_encoding = io.open + open_with_encoding = io.open - # Python 2.7 doesn't have a native LRU cache, so do nothing. - def lru_cache( maxsize = 128, typed = False ): + # Python 2.7 doesn't have a native LRU cache, so do nothing. + def lru_cache(maxsize=128, typed=False): - def fake_wrapper( user_function ): - return user_function + def fake_wrapper(user_function): + return user_function - return fake_wrapper + return fake_wrapper - range = xrange # noqa: F821 + range = xrange # noqa: F821 - raw_input = raw_input + raw_input = raw_input - import ConfigParser as configparser - CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser._boolean_states # pylint: disable=protected-access # noqa + import ConfigParser as configparser + CONFIGPARSER_BOOLEAN_STATES = configparser.ConfigParser._boolean_states # pylint: disable=protected-access # noqa - from lib2to3.pgen2 import tokenize - detect_encoding = tokenize.detect_encoding + from lib2to3.pgen2 import tokenize + detect_encoding = tokenize.detect_encoding - import collections + import collections - class TokenInfo( collections.namedtuple( 'TokenInfo', - 'type string start end line' ) ): - pass + class TokenInfo(collections.namedtuple('TokenInfo', + 'type string start end line')): + pass -def EncodeAndWriteToStdout( s, encoding = 'utf-8' ): - """Encode the given string and emit to stdout. +def EncodeAndWriteToStdout(s, encoding='utf-8'): + """Encode the given string and emit to stdout. The string may contain non-ascii characters. This is a problem when stdout is redirected, because then Python doesn't know the encoding and we may get a @@ -96,50 +96,50 @@ def EncodeAndWriteToStdout( s, encoding = 'utf-8' ): s: (string) The string to encode. encoding: (string) The encoding of the string. """ - if PY3: - sys.stdout.buffer.write( s.encode( encoding ) ) - elif sys.platform == 'win32': - # On python 2 and Windows universal newline transformation will be in - # effect on stdout. Python 2 will not let us avoid the easily because - # it happens based on whether the file handle is opened in O_BINARY or - # O_TEXT state. However we can tell Windows itself to change the current - # mode, and python 2 will follow suit. However we must take care to change - # the mode on the actual external stdout not just the current sys.stdout - # which may have been monkey-patched inside the python environment. - import msvcrt # pylint: disable=g-import-not-at-top - if sys.__stdout__ is sys.stdout: - msvcrt.setmode( sys.stdout.fileno(), os.O_BINARY ) - sys.stdout.write( s.encode( encoding ) ) - else: - sys.stdout.write( s.encode( encoding ) ) + if PY3: + sys.stdout.buffer.write(s.encode(encoding)) + elif sys.platform == 'win32': + # On python 2 and Windows universal newline transformation will be in + # effect on stdout. Python 2 will not let us avoid the easily because + # it happens based on whether the file handle is opened in O_BINARY or + # O_TEXT state. However we can tell Windows itself to change the current + # mode, and python 2 will follow suit. However we must take care to change + # the mode on the actual external stdout not just the current sys.stdout + # which may have been monkey-patched inside the python environment. + import msvcrt # pylint: disable=g-import-not-at-top + if sys.__stdout__ is sys.stdout: + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) + sys.stdout.write(s.encode(encoding)) + else: + sys.stdout.write(s.encode(encoding)) if PY3: - basestring = str - unicode = str # pylint: disable=redefined-builtin,invalid-name + basestring = str + unicode = str # pylint: disable=redefined-builtin,invalid-name else: - basestring = basestring + basestring = basestring - def unicode( s ): # pylint: disable=invalid-name - """Force conversion of s to unicode.""" - return __builtin__.unicode( s, 'utf-8' ) + def unicode(s): # pylint: disable=invalid-name + """Force conversion of s to unicode.""" + return __builtin__.unicode(s, 'utf-8') # In Python 3.2+, readfp is deprecated in favor of read_file, which doesn't # exist in Python 2 yet. To avoid deprecation warnings, subclass ConfigParser to # fix this - now read_file works across all Python versions we care about. -class ConfigParser( configparser.ConfigParser ): - if not PY3: +class ConfigParser(configparser.ConfigParser): + if not PY3: - def read_file( self, fp, source = None ): - self.readfp( fp, filename = source ) + def read_file(self, fp, source=None): + self.readfp(fp, filename=source) -def removeBOM( source ): - """Remove any Byte-order-Mark bytes from the beginning of a file.""" - bom = codecs.BOM_UTF8 - if PY3: - bom = bom.decode( 'utf-8' ) - if source.startswith( bom ): - return source[ len( bom ): ] - return source +def removeBOM(source): + """Remove any Byte-order-Mark bytes from the beginning of a file.""" + bom = codecs.BOM_UTF8 + if PY3: + bom = bom.decode('utf-8') + if source.startswith(bom): + return source[len(bom):] + return source diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index 7e0fdf344..ec196d8b3 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -37,8 +37,8 @@ from yapf.yapflib import verifier -def Reformat( llines, verify = False, lines = None ): - """Reformat the logical lines. +def Reformat(llines, verify=False, lines=None): + """Reformat the logical lines. Arguments: llines: (list of logical_line.LogicalLine) Lines we want to format. @@ -49,138 +49,139 @@ def Reformat( llines, verify = False, lines = None ): Returns: A string representing the reformatted code. """ - final_lines = [] - prev_line = None # The previous line. - indent_width = style.Get( 'INDENT_WIDTH' ) - - for lline in _SingleOrMergedLines( llines ): - first_token = lline.first - _FormatFirstToken( first_token, lline.depth, prev_line, final_lines ) - - indent_amt = indent_width * lline.depth - state = format_decision_state.FormatDecisionState( lline, indent_amt ) - state.MoveStateToNextToken() - - if not lline.disable: - if lline.first.is_comment: - lline.first.value = lline.first.value.rstrip() - elif lline.last.is_comment: - lline.last.value = lline.last.value.rstrip() - if prev_line and prev_line.disable: - # Keep the vertical spacing between a disabled and enabled formatting - # region. - _RetainRequiredVerticalSpacingBetweenTokens( - lline.first, prev_line.last, lines ) - if any( tok.is_comment for tok in lline.tokens ): - _RetainVerticalSpacingBeforeComments( lline ) - - if lline.disable or _LineHasContinuationMarkers( lline ): - _RetainHorizontalSpacing( lline ) - _RetainRequiredVerticalSpacing( lline, prev_line, lines ) - _EmitLineUnformatted( state ) - - elif ( _LineContainsPylintDisableLineTooLong( lline ) or - _LineContainsI18n( lline ) ): - # Don't modify vertical spacing, but fix any horizontal spacing issues. - _RetainRequiredVerticalSpacing( lline, prev_line, lines ) - _EmitLineUnformatted( state ) - - elif _CanPlaceOnSingleLine( lline ) and not any( tok.must_break_before - for tok in lline.tokens ): - # The logical line fits on one line. - while state.next_token: - state.AddTokenToState( newline = False, dry_run = False ) - - elif not _AnalyzeSolutionSpace( state ): - # Failsafe mode. If there isn't a solution to the line, then just emit - # it as is. - state = format_decision_state.FormatDecisionState( lline, indent_amt ) - state.MoveStateToNextToken() - _RetainHorizontalSpacing( lline ) - _RetainRequiredVerticalSpacing( lline, prev_line, None ) - _EmitLineUnformatted( state ) - - final_lines.append( lline ) - prev_line = lline - - if style.Get( 'ALIGN_ASSIGNMENT' ): - _AlignAssignment( final_lines ) - - _AlignTrailingComments( final_lines ) - return _FormatFinalLines( final_lines, verify ) - - -def _RetainHorizontalSpacing( line ): - """Retain all horizontal spacing between tokens.""" - for tok in line.tokens: - tok.RetainHorizontalSpacing( line.first.column, line.depth ) - - -def _RetainRequiredVerticalSpacing( cur_line, prev_line, lines ): - """Retain all vertical spacing between lines.""" - prev_tok = None - if prev_line is not None: - prev_tok = prev_line.last - - if cur_line.disable: - # After the first token we are acting on a single line. So if it is - # disabled we must not reformat. - lines = set() - - for cur_tok in cur_line.tokens: - _RetainRequiredVerticalSpacingBetweenTokens( cur_tok, prev_tok, lines ) - prev_tok = cur_tok - - -def _RetainRequiredVerticalSpacingBetweenTokens( cur_tok, prev_tok, lines ): - """Retain vertical spacing between two tokens if not in editable range.""" - if prev_tok is None: - return - - if prev_tok.is_string: - prev_lineno = prev_tok.lineno + prev_tok.value.count( '\n' ) - elif prev_tok.is_pseudo: - if not prev_tok.previous_token.is_multiline_string: - prev_lineno = prev_tok.previous_token.lineno - else: - prev_lineno = prev_tok.lineno + final_lines = [] + prev_line = None # The previous line. + indent_width = style.Get('INDENT_WIDTH') + + for lline in _SingleOrMergedLines(llines): + first_token = lline.first + _FormatFirstToken(first_token, lline.depth, prev_line, final_lines) + + indent_amt = indent_width * lline.depth + state = format_decision_state.FormatDecisionState(lline, indent_amt) + state.MoveStateToNextToken() + + if not lline.disable: + if lline.first.is_comment: + lline.first.value = lline.first.value.rstrip() + elif lline.last.is_comment: + lline.last.value = lline.last.value.rstrip() + if prev_line and prev_line.disable: + # Keep the vertical spacing between a disabled and enabled formatting + # region. + _RetainRequiredVerticalSpacingBetweenTokens( + lline.first, prev_line.last, lines) + if any(tok.is_comment for tok in lline.tokens): + _RetainVerticalSpacingBeforeComments(lline) + + if lline.disable or _LineHasContinuationMarkers(lline): + _RetainHorizontalSpacing(lline) + _RetainRequiredVerticalSpacing(lline, prev_line, lines) + _EmitLineUnformatted(state) + + elif (_LineContainsPylintDisableLineTooLong(lline) or + _LineContainsI18n(lline)): + # Don't modify vertical spacing, but fix any horizontal spacing issues. + _RetainRequiredVerticalSpacing(lline, prev_line, lines) + _EmitLineUnformatted(state) + + elif _CanPlaceOnSingleLine(lline) and not any(tok.must_break_before + for tok in lline.tokens): + # The logical line fits on one line. + while state.next_token: + state.AddTokenToState(newline=False, dry_run=False) + + elif not _AnalyzeSolutionSpace(state): + # Failsafe mode. If there isn't a solution to the line, then just emit + # it as is. + state = format_decision_state.FormatDecisionState(lline, indent_amt) + state.MoveStateToNextToken() + _RetainHorizontalSpacing(lline) + _RetainRequiredVerticalSpacing(lline, prev_line, None) + _EmitLineUnformatted(state) + + final_lines.append(lline) + prev_line = lline + + if style.Get('ALIGN_ASSIGNMENT'): + _AlignAssignment(final_lines) + + _AlignTrailingComments(final_lines) + return _FormatFinalLines(final_lines, verify) + + +def _RetainHorizontalSpacing(line): + """Retain all horizontal spacing between tokens.""" + for tok in line.tokens: + tok.RetainHorizontalSpacing(line.first.column, line.depth) + + +def _RetainRequiredVerticalSpacing(cur_line, prev_line, lines): + """Retain all vertical spacing between lines.""" + prev_tok = None + if prev_line is not None: + prev_tok = prev_line.last + + if cur_line.disable: + # After the first token we are acting on a single line. So if it is + # disabled we must not reformat. + lines = set() + + for cur_tok in cur_line.tokens: + _RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines) + prev_tok = cur_tok + + +def _RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines): + """Retain vertical spacing between two tokens if not in editable range.""" + if prev_tok is None: + return + + if prev_tok.is_string: + prev_lineno = prev_tok.lineno + prev_tok.value.count('\n') + elif prev_tok.is_pseudo: + if not prev_tok.previous_token.is_multiline_string: + prev_lineno = prev_tok.previous_token.lineno else: - prev_lineno = prev_tok.lineno + prev_lineno = prev_tok.lineno + else: + prev_lineno = prev_tok.lineno - if cur_tok.is_comment: - cur_lineno = cur_tok.lineno - cur_tok.value.count( '\n' ) - else: - cur_lineno = cur_tok.lineno + if cur_tok.is_comment: + cur_lineno = cur_tok.lineno - cur_tok.value.count('\n') + else: + cur_lineno = cur_tok.lineno - if not prev_tok.is_comment and prev_tok.value.endswith( '\\' ): - prev_lineno += prev_tok.value.count( '\n' ) + if not prev_tok.is_comment and prev_tok.value.endswith('\\'): + prev_lineno += prev_tok.value.count('\n') - required_newlines = cur_lineno - prev_lineno - if cur_tok.is_comment and not prev_tok.is_comment: - # Don't adjust between a comment and non-comment. - pass - elif lines and lines.intersection( range( prev_lineno, cur_lineno + 1 ) ): - desired_newlines = cur_tok.whitespace_prefix.count( '\n' ) - whitespace_lines = range( prev_lineno + 1, cur_lineno ) - deletable_lines = len( lines.intersection( whitespace_lines ) ) - required_newlines = max( required_newlines - deletable_lines, desired_newlines ) + required_newlines = cur_lineno - prev_lineno + if cur_tok.is_comment and not prev_tok.is_comment: + # Don't adjust between a comment and non-comment. + pass + elif lines and lines.intersection(range(prev_lineno, cur_lineno + 1)): + desired_newlines = cur_tok.whitespace_prefix.count('\n') + whitespace_lines = range(prev_lineno + 1, cur_lineno) + deletable_lines = len(lines.intersection(whitespace_lines)) + required_newlines = max( + required_newlines - deletable_lines, desired_newlines) - cur_tok.AdjustNewlinesBefore( required_newlines ) + cur_tok.AdjustNewlinesBefore(required_newlines) -def _RetainVerticalSpacingBeforeComments( line ): - """Retain vertical spacing before comments.""" - prev_token = None - for tok in line.tokens: - if tok.is_comment and prev_token: - if tok.lineno - tok.value.count( '\n' ) - prev_token.lineno > 1: - tok.AdjustNewlinesBefore( ONE_BLANK_LINE ) +def _RetainVerticalSpacingBeforeComments(line): + """Retain vertical spacing before comments.""" + prev_token = None + for tok in line.tokens: + if tok.is_comment and prev_token: + if tok.lineno - tok.value.count('\n') - prev_token.lineno > 1: + tok.AdjustNewlinesBefore(ONE_BLANK_LINE) - prev_token = tok + prev_token = tok -def _EmitLineUnformatted( state ): - """Emit the line without formatting. +def _EmitLineUnformatted(state): + """Emit the line without formatting. The line contains code that if reformatted would break a non-syntactic convention. E.g., i18n comments and function calls are tightly bound by @@ -191,23 +192,23 @@ def _EmitLineUnformatted( state ): state: (format_decision_state.FormatDecisionState) The format decision state. """ - while state.next_token: - previous_token = state.next_token.previous_token - previous_lineno = previous_token.lineno + while state.next_token: + previous_token = state.next_token.previous_token + previous_lineno = previous_token.lineno - if previous_token.is_multiline_string or previous_token.is_string: - previous_lineno += previous_token.value.count( '\n' ) + if previous_token.is_multiline_string or previous_token.is_string: + previous_lineno += previous_token.value.count('\n') - if previous_token.is_continuation: - newline = False - else: - newline = state.next_token.lineno > previous_lineno + if previous_token.is_continuation: + newline = False + else: + newline = state.next_token.lineno > previous_lineno - state.AddTokenToState( newline = newline, dry_run = False ) + state.AddTokenToState(newline=newline, dry_run=False) -def _LineContainsI18n( line ): - """Return true if there are i18n comments or function calls in the line. +def _LineContainsI18n(line): + """Return true if there are i18n comments or function calls in the line. I18n comments and pseudo-function calls are closely related. They cannot be moved apart without breaking i18n. @@ -218,33 +219,33 @@ def _LineContainsI18n( line ): Returns: True if the line contains i18n comments or function calls. False otherwise. """ - if style.Get( 'I18N_COMMENT' ): - for tok in line.tokens: - if tok.is_comment and re.match( style.Get( 'I18N_COMMENT' ), tok.value ): - # Contains an i18n comment. - return True - - if style.Get( 'I18N_FUNCTION_CALL' ): - length = len( line.tokens ) - for index in range( length - 1 ): - if ( line.tokens[ index + 1 ].value == '(' and - line.tokens[ index ].value in style.Get( 'I18N_FUNCTION_CALL' ) ): - return True - return False + if style.Get('I18N_COMMENT'): + for tok in line.tokens: + if tok.is_comment and re.match(style.Get('I18N_COMMENT'), tok.value): + # Contains an i18n comment. + return True + + if style.Get('I18N_FUNCTION_CALL'): + length = len(line.tokens) + for index in range(length - 1): + if (line.tokens[index + 1].value == '(' and + line.tokens[index].value in style.Get('I18N_FUNCTION_CALL')): + return True + return False -def _LineContainsPylintDisableLineTooLong( line ): - """Return true if there is a "pylint: disable=line-too-long" comment.""" - return re.search( r'\bpylint:\s+disable=line-too-long\b', line.last.value ) +def _LineContainsPylintDisableLineTooLong(line): + """Return true if there is a "pylint: disable=line-too-long" comment.""" + return re.search(r'\bpylint:\s+disable=line-too-long\b', line.last.value) -def _LineHasContinuationMarkers( line ): - """Return true if the line has continuation markers in it.""" - return any( tok.is_continuation for tok in line.tokens ) +def _LineHasContinuationMarkers(line): + """Return true if the line has continuation markers in it.""" + return any(tok.is_continuation for tok in line.tokens) -def _CanPlaceOnSingleLine( line ): - """Determine if the logical line can go on a single line. +def _CanPlaceOnSingleLine(line): + """Determine if the logical line can go on a single line. Arguments: line: (logical_line.LogicalLine) The line currently being formatted. @@ -252,359 +253,342 @@ def _CanPlaceOnSingleLine( line ): Returns: True if the line can or should be added to a single line. False otherwise. """ - token_names = [ x.name for x in line.tokens ] - if ( style.Get( 'FORCE_MULTILINE_DICT' ) and 'LBRACE' in token_names ): - return False - indent_amt = style.Get( 'INDENT_WIDTH' ) * line.depth - last = line.last - last_index = -1 - if ( last.is_pylint_comment or last.is_pytype_comment or last.is_copybara_comment ): - last = last.previous_token - last_index = -2 - if last is None: - return True - return ( - last.total_length + indent_amt <= style.Get( 'COLUMN_LIMIT' ) and - not any( tok.is_comment for tok in line.tokens[ : last_index ] ) ) - - -def _AlignTrailingComments( final_lines ): - """Align trailing comments to the same column.""" - final_lines_index = 0 - while final_lines_index < len( final_lines ): - line = final_lines[ final_lines_index ] - assert line.tokens - - processed_content = False - - for tok in line.tokens: - if ( tok.is_comment and isinstance( tok.spaces_required_before, list ) and - tok.value.startswith( '#' ) ): - # All trailing comments and comments that appear on a line by themselves - # in this block should be indented at the same level. The block is - # terminated by an empty line or EOF. Enumerate through each line in - # the block and calculate the max line length. Once complete, use the - # first col value greater than that value and create the necessary for - # each line accordingly. - all_pc_line_lengths = [] # All pre-comment line lengths - max_line_length = 0 - - while True: - # EOF - if final_lines_index + len( all_pc_line_lengths ) == len( - final_lines ): - break - - this_line = final_lines[ final_lines_index + - len( all_pc_line_lengths ) ] - - # Blank line - note that content is preformatted so we don't need to - # worry about spaces/tabs; a blank line will always be '\n\n'. - assert this_line.tokens - if ( all_pc_line_lengths and - this_line.tokens[ 0 ].formatted_whitespace_prefix.startswith( - '\n\n' ) ): - break - - if this_line.disable: - all_pc_line_lengths.append( [] ) - continue - - # Calculate the length of each line in this logical line. - line_content = '' - pc_line_lengths = [] - - for line_tok in this_line.tokens: - whitespace_prefix = line_tok.formatted_whitespace_prefix - - newline_index = whitespace_prefix.rfind( '\n' ) - if newline_index != -1: - max_line_length = max( - max_line_length, len( line_content ) ) - line_content = '' - - whitespace_prefix = whitespace_prefix[ newline_index + 1 : ] - - if line_tok.is_comment: - pc_line_lengths.append( len( line_content ) ) - else: - line_content += '{}{}'.format( - whitespace_prefix, line_tok.value ) - - if pc_line_lengths: - max_line_length = max( max_line_length, max( pc_line_lengths ) ) - - all_pc_line_lengths.append( pc_line_lengths ) - - # Calculate the aligned column value - max_line_length += 2 - - aligned_col = None - for potential_col in tok.spaces_required_before: - if potential_col > max_line_length: - aligned_col = potential_col - break - - if aligned_col is None: - aligned_col = max_line_length - - # Update the comment token values based on the aligned values - for all_pc_line_lengths_index, pc_line_lengths in enumerate( - all_pc_line_lengths ): - if not pc_line_lengths: - continue - - this_line = final_lines[ final_lines_index + - all_pc_line_lengths_index ] - - pc_line_length_index = 0 - for line_tok in this_line.tokens: - if line_tok.is_comment: - assert pc_line_length_index < len( pc_line_lengths ) - assert pc_line_lengths[ pc_line_length_index ] < aligned_col - - # Note that there may be newlines embedded in the comments, so - # we need to apply a whitespace prefix to each line. - whitespace = ' ' * ( - aligned_col - pc_line_lengths[ pc_line_length_index ] - - 1 ) - pc_line_length_index += 1 - - line_content = [] - - for comment_line_index, comment_line in enumerate( - line_tok.value.split( '\n' ) ): - line_content.append( - '{}{}'.format( whitespace, comment_line.strip() ) ) - - if comment_line_index == 0: - whitespace = ' ' * ( aligned_col - 1 ) - - line_content = '\n'.join( line_content ) - - # Account for initial whitespace already slated for the - # beginning of the line. - existing_whitespace_prefix = \ - line_tok.formatted_whitespace_prefix.lstrip('\n') + token_names = [x.name for x in line.tokens] + if (style.Get('FORCE_MULTILINE_DICT') and 'LBRACE' in token_names): + return False + indent_amt = style.Get('INDENT_WIDTH') * line.depth + last = line.last + last_index = -1 + if (last.is_pylint_comment or last.is_pytype_comment or + last.is_copybara_comment): + last = last.previous_token + last_index = -2 + if last is None: + return True + return ( + last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and + not any(tok.is_comment for tok in line.tokens[:last_index])) - if line_content.startswith( existing_whitespace_prefix ): - line_content = line_content[ - len( existing_whitespace_prefix ): ] - line_tok.value = line_content +def _AlignTrailingComments(final_lines): + """Align trailing comments to the same column.""" + final_lines_index = 0 + while final_lines_index < len(final_lines): + line = final_lines[final_lines_index] + assert line.tokens - assert pc_line_length_index == len( pc_line_lengths ) + processed_content = False - final_lines_index += len( all_pc_line_lengths ) + for tok in line.tokens: + if (tok.is_comment and isinstance(tok.spaces_required_before, list) and + tok.value.startswith('#')): + # All trailing comments and comments that appear on a line by themselves + # in this block should be indented at the same level. The block is + # terminated by an empty line or EOF. Enumerate through each line in + # the block and calculate the max line length. Once complete, use the + # first col value greater than that value and create the necessary for + # each line accordingly. + all_pc_line_lengths = [] # All pre-comment line lengths + max_line_length = 0 + + while True: + # EOF + if final_lines_index + len(all_pc_line_lengths) == len(final_lines): + break - processed_content = True - break + this_line = final_lines[final_lines_index + len(all_pc_line_lengths)] + + # Blank line - note that content is preformatted so we don't need to + # worry about spaces/tabs; a blank line will always be '\n\n'. + assert this_line.tokens + if (all_pc_line_lengths and + this_line.tokens[0].formatted_whitespace_prefix.startswith('\n\n') + ): + break + + if this_line.disable: + all_pc_line_lengths.append([]) + continue + + # Calculate the length of each line in this logical line. + line_content = '' + pc_line_lengths = [] + + for line_tok in this_line.tokens: + whitespace_prefix = line_tok.formatted_whitespace_prefix + + newline_index = whitespace_prefix.rfind('\n') + if newline_index != -1: + max_line_length = max(max_line_length, len(line_content)) + line_content = '' + + whitespace_prefix = whitespace_prefix[newline_index + 1:] + + if line_tok.is_comment: + pc_line_lengths.append(len(line_content)) + else: + line_content += '{}{}'.format(whitespace_prefix, line_tok.value) + + if pc_line_lengths: + max_line_length = max(max_line_length, max(pc_line_lengths)) + + all_pc_line_lengths.append(pc_line_lengths) + + # Calculate the aligned column value + max_line_length += 2 + + aligned_col = None + for potential_col in tok.spaces_required_before: + if potential_col > max_line_length: + aligned_col = potential_col + break + + if aligned_col is None: + aligned_col = max_line_length + + # Update the comment token values based on the aligned values + for all_pc_line_lengths_index, pc_line_lengths in enumerate( + all_pc_line_lengths): + if not pc_line_lengths: + continue + + this_line = final_lines[final_lines_index + all_pc_line_lengths_index] + + pc_line_length_index = 0 + for line_tok in this_line.tokens: + if line_tok.is_comment: + assert pc_line_length_index < len(pc_line_lengths) + assert pc_line_lengths[pc_line_length_index] < aligned_col + + # Note that there may be newlines embedded in the comments, so + # we need to apply a whitespace prefix to each line. + whitespace = ' ' * ( + aligned_col - pc_line_lengths[pc_line_length_index] - 1) + pc_line_length_index += 1 + + line_content = [] + + for comment_line_index, comment_line in enumerate( + line_tok.value.split('\n')): + line_content.append( + '{}{}'.format(whitespace, comment_line.strip())) + + if comment_line_index == 0: + whitespace = ' ' * (aligned_col - 1) + + line_content = '\n'.join(line_content) + + # Account for initial whitespace already slated for the + # beginning of the line. + existing_whitespace_prefix = \ + line_tok.formatted_whitespace_prefix.lstrip('\n') + + if line_content.startswith(existing_whitespace_prefix): + line_content = line_content[len(existing_whitespace_prefix):] + + line_tok.value = line_content + + assert pc_line_length_index == len(pc_line_lengths) + + final_lines_index += len(all_pc_line_lengths) + + processed_content = True + break + + if not processed_content: + final_lines_index += 1 - if not processed_content: - final_lines_index += 1 - - -def _AlignAssignment( final_lines ): - """Align assignment operators and augmented assignment operators to the same column""" - - final_lines_index = 0 - while final_lines_index < len( final_lines ): - line = final_lines[ final_lines_index ] - - assert line.tokens - process_content = False - - for tok in line.tokens: - if tok.is_assign or tok.is_augassign: - # all pre assignment variable lengths in one block of lines - all_pa_variables_lengths = [] - max_variables_length = 0 - - while True: - # EOF - if final_lines_index + len( all_pa_variables_lengths ) == len( - final_lines ): - break - - this_line_index = final_lines_index + len( - all_pa_variables_lengths ) - this_line = final_lines[ this_line_index ] - - next_line = None - if this_line_index < len( final_lines ) - 1: - next_line = final_lines[ final_lines_index + - len( all_pa_variables_lengths ) + 1 ] - - assert this_line.tokens, next_line.tokens - - # align them differently when there is a blank line in between - if ( all_pa_variables_lengths and - this_line.tokens[ 0 ].formatted_whitespace_prefix.startswith( - '\n\n' ) ): - break - - # if there is a standalone comment or keyword statement line - # or other lines without assignment in between, break - elif ( all_pa_variables_lengths and - True not in [ tok.is_assign or tok.is_augassign - for tok in this_line.tokens ] ): - if this_line.tokens[ 0 ].is_comment: - if style.Get( 'NEW_ALIGNMENT_AFTER_COMMENTLINE' ): - break - else: - break - - if this_line.disable: - all_pa_variables_lengths.append( [] ) - continue - - variables_content = '' - pa_variables_lengths = [] - contain_object = False - line_tokens = this_line.tokens - # only one assignment expression is on each line - for index in range( len( line_tokens ) ): - line_tok = line_tokens[ index ] - - prefix = line_tok.formatted_whitespace_prefix - newline_index = prefix.rfind( '\n' ) - if newline_index != -1: - variables_content = '' - prefix = prefix[ newline_index + 1 : ] - - if line_tok.is_assign or line_tok.is_augassign: - next_toks = [ - line_tokens[ i ] - for i in range( index + 1, len( line_tokens ) ) - ] - # if there is object(list/tuple/dict) with newline entries, break, - # update the alignment so far and start to calulate new alignment - for tok in next_toks: - if tok.value in [ '(', '[', '{' ] and tok.next_token: - if ( - tok.next_token.formatted_whitespace_prefix - .startswith( '\n' ) or - ( tok.next_token.is_comment and - tok.next_token.next_token. - formatted_whitespace_prefix.startswith( '\n' ) - ) ): - pa_variables_lengths.append( - len( variables_content ) ) - contain_object = True - break - if not contain_object: - if line_tok.is_assign: - pa_variables_lengths.append( - len( variables_content ) ) - # if augassign, add the extra augmented part to the max length caculation - elif line_tok.is_augassign: - pa_variables_lengths.append( - len( variables_content ) + - len( line_tok.value ) - 1 ) - # don't add the tokens - # after the assignment operator - break - else: - variables_content += '{}{}'.format( prefix, line_tok.value ) - - if pa_variables_lengths: - max_variables_length = max( - max_variables_length, max( pa_variables_lengths ) ) - - all_pa_variables_lengths.append( pa_variables_lengths ) - - # after saving this line's max variable length, - # we check if next line has the same depth as this line, - # if not, we don't want to calculate their max variable length together - # so we break the while loop, update alignment so far, and - # then go to next line that has '=' - if next_line: - if this_line.depth != next_line.depth: - break - # if this line contains objects with newline entries, - # start new block alignment - if contain_object: - break - - # if no update of max_length, just go to the next block - if max_variables_length == 0: - continue - - max_variables_length += 2 - - # Update the assignment token values based on the max variable length - for all_pa_variables_lengths_index, pa_variables_lengths in enumerate( - all_pa_variables_lengths ): - if not pa_variables_lengths: - continue - this_line = final_lines[ final_lines_index + - all_pa_variables_lengths_index ] - - # only the first assignment operator on each line - pa_variables_lengths_index = 0 - for line_tok in this_line.tokens: - if line_tok.is_assign or line_tok.is_augassign: - assert pa_variables_lengths[ 0 ] < max_variables_length - - if pa_variables_lengths_index < len( pa_variables_lengths ): - whitespace = ' ' * ( - max_variables_length - pa_variables_lengths[ 0 ] - - 1 ) - - assign_content = '{}{}'.format( - whitespace, line_tok.value.strip() ) - - existing_whitespace_prefix = \ - line_tok.formatted_whitespace_prefix.lstrip('\n') - - # in case the existing spaces are larger than padded spaces - if ( len( whitespace ) == 1 or len( whitespace ) > 1 and - len( existing_whitespace_prefix ) - > len( whitespace ) ): - line_tok.whitespace_prefix = '' - elif assign_content.startswith( - existing_whitespace_prefix ): - assign_content = assign_content[ - len( existing_whitespace_prefix ): ] - - # update the assignment operator value - line_tok.value = assign_content - - pa_variables_lengths_index += 1 - - final_lines_index += len( all_pa_variables_lengths ) - - process_content = True + +def _AlignAssignment(final_lines): + """Align assignment operators and augmented assignment operators to the same column""" + + final_lines_index = 0 + while final_lines_index < len(final_lines): + line = final_lines[final_lines_index] + + assert line.tokens + process_content = False + + for tok in line.tokens: + if tok.is_assign or tok.is_augassign: + # all pre assignment variable lengths in one block of lines + all_pa_variables_lengths = [] + max_variables_length = 0 + + while True: + # EOF + if final_lines_index + len(all_pa_variables_lengths) == len( + final_lines): + break + + this_line_index = final_lines_index + len(all_pa_variables_lengths) + this_line = final_lines[this_line_index] + + next_line = None + if this_line_index < len(final_lines) - 1: + next_line = final_lines[final_lines_index + + len(all_pa_variables_lengths) + 1] + + assert this_line.tokens, next_line.tokens + + # align them differently when there is a blank line in between + if (all_pa_variables_lengths and + this_line.tokens[0].formatted_whitespace_prefix.startswith('\n\n') + ): + break + + # if there is a standalone comment or keyword statement line + # or other lines without assignment in between, break + elif (all_pa_variables_lengths and + True not in [tok.is_assign or tok.is_augassign + for tok in this_line.tokens]): + if this_line.tokens[0].is_comment: + if style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE'): break + else: + break + + if this_line.disable: + all_pa_variables_lengths.append([]) + continue + + variables_content = '' + pa_variables_lengths = [] + contain_object = False + line_tokens = this_line.tokens + # only one assignment expression is on each line + for index in range(len(line_tokens)): + line_tok = line_tokens[index] + + prefix = line_tok.formatted_whitespace_prefix + newline_index = prefix.rfind('\n') + if newline_index != -1: + variables_content = '' + prefix = prefix[newline_index + 1:] + + if line_tok.is_assign or line_tok.is_augassign: + next_toks = [ + line_tokens[i] for i in range(index + 1, len(line_tokens)) + ] + # if there is object(list/tuple/dict) with newline entries, break, + # update the alignment so far and start to calulate new alignment + for tok in next_toks: + if tok.value in ['(', '[', '{'] and tok.next_token: + if (tok.next_token.formatted_whitespace_prefix.startswith( + '\n') or + (tok.next_token.is_comment and tok.next_token.next_token + .formatted_whitespace_prefix.startswith('\n'))): + pa_variables_lengths.append(len(variables_content)) + contain_object = True + break + if not contain_object: + if line_tok.is_assign: + pa_variables_lengths.append(len(variables_content)) + # if augassign, add the extra augmented part to the max length caculation + elif line_tok.is_augassign: + pa_variables_lengths.append( + len(variables_content) + len(line_tok.value) - 1) + # don't add the tokens + # after the assignment operator + break + else: + variables_content += '{}{}'.format(prefix, line_tok.value) + + if pa_variables_lengths: + max_variables_length = max( + max_variables_length, max(pa_variables_lengths)) + + all_pa_variables_lengths.append(pa_variables_lengths) + + # after saving this line's max variable length, + # we check if next line has the same depth as this line, + # if not, we don't want to calculate their max variable length together + # so we break the while loop, update alignment so far, and + # then go to next line that has '=' + if next_line: + if this_line.depth != next_line.depth: + break + # if this line contains objects with newline entries, + # start new block alignment + if contain_object: + break + + # if no update of max_length, just go to the next block + if max_variables_length == 0: + continue + + max_variables_length += 2 + + # Update the assignment token values based on the max variable length + for all_pa_variables_lengths_index, pa_variables_lengths in enumerate( + all_pa_variables_lengths): + if not pa_variables_lengths: + continue + this_line = final_lines[final_lines_index + + all_pa_variables_lengths_index] + + # only the first assignment operator on each line + pa_variables_lengths_index = 0 + for line_tok in this_line.tokens: + if line_tok.is_assign or line_tok.is_augassign: + assert pa_variables_lengths[0] < max_variables_length + + if pa_variables_lengths_index < len(pa_variables_lengths): + whitespace = ' ' * ( + max_variables_length - pa_variables_lengths[0] - 1) + + assign_content = '{}{}'.format( + whitespace, line_tok.value.strip()) + + existing_whitespace_prefix = \ + line_tok.formatted_whitespace_prefix.lstrip('\n') + + # in case the existing spaces are larger than padded spaces + if (len(whitespace) == 1 or len(whitespace) > 1 and + len(existing_whitespace_prefix) > len(whitespace)): + line_tok.whitespace_prefix = '' + elif assign_content.startswith(existing_whitespace_prefix): + assign_content = assign_content[ + len(existing_whitespace_prefix):] - if not process_content: - final_lines_index += 1 + # update the assignment operator value + line_tok.value = assign_content + pa_variables_lengths_index += 1 -def _FormatFinalLines( final_lines, verify ): - """Compose the final output from the finalized lines.""" - formatted_code = [] - for line in final_lines: - formatted_line = [] - for tok in line.tokens: - if not tok.is_pseudo: - formatted_line.append( tok.formatted_whitespace_prefix ) - formatted_line.append( tok.value ) - elif ( not tok.next_token.whitespace_prefix.startswith( '\n' ) and - not tok.next_token.whitespace_prefix.startswith( ' ' ) ): - if ( tok.previous_token.value == ':' or - tok.next_token.value not in ',}])' ): - formatted_line.append( ' ' ) + final_lines_index += len(all_pa_variables_lengths) - formatted_code.append( ''.join( formatted_line ) ) - if verify: - verifier.VerifyCode( formatted_code[ -1 ] ) + process_content = True + break - return ''.join( formatted_code ) + '\n' + if not process_content: + final_lines_index += 1 -class _StateNode( object ): - """An edge in the solution space from 'previous.state' to 'state'. +def _FormatFinalLines(final_lines, verify): + """Compose the final output from the finalized lines.""" + formatted_code = [] + for line in final_lines: + formatted_line = [] + for tok in line.tokens: + if not tok.is_pseudo: + formatted_line.append(tok.formatted_whitespace_prefix) + formatted_line.append(tok.value) + elif (not tok.next_token.whitespace_prefix.startswith('\n') and + not tok.next_token.whitespace_prefix.startswith(' ')): + if (tok.previous_token.value == ':' or + tok.next_token.value not in ',}])'): + formatted_line.append(' ') + + formatted_code.append(''.join(formatted_line)) + if verify: + verifier.VerifyCode(formatted_code[-1]) + + return ''.join(formatted_code) + '\n' + + +class _StateNode(object): + """An edge in the solution space from 'previous.state' to 'state'. Attributes: state: (format_decision_state.FormatDecisionState) The format decision state @@ -614,31 +598,32 @@ class _StateNode( object ): previous: (_StateNode) The previous state node in the graph. """ - # TODO(morbo): Add a '__cmp__' method. + # TODO(morbo): Add a '__cmp__' method. - def __init__( self, state, newline, previous ): - self.state = state.Clone() - self.newline = newline - self.previous = previous + def __init__(self, state, newline, previous): + self.state = state.Clone() + self.newline = newline + self.previous = previous - def __repr__( self ): # pragma: no cover - return 'StateNode(state=[\n{0}\n], newline={1})'.format( - self.state, self.newline ) + def __repr__(self): # pragma: no cover + return 'StateNode(state=[\n{0}\n], newline={1})'.format( + self.state, self.newline) # A tuple of (penalty, count) that is used to prioritize the BFS. In case of # equal penalties, we prefer states that were inserted first. During state # generation, we make sure that we insert states first that break the line as # late as possible. -_OrderedPenalty = collections.namedtuple( 'OrderedPenalty', [ 'penalty', 'count' ] ) +_OrderedPenalty = collections.namedtuple('OrderedPenalty', ['penalty', 'count']) # An item in the prioritized BFS search queue. The 'StateNode's 'state' has # the given '_OrderedPenalty'. -_QueueItem = collections.namedtuple( 'QueueItem', [ 'ordered_penalty', 'state_node' ] ) +_QueueItem = collections.namedtuple( + 'QueueItem', ['ordered_penalty', 'state_node']) -def _AnalyzeSolutionSpace( initial_state ): - """Analyze the entire solution space starting from initial_state. +def _AnalyzeSolutionSpace(initial_state): + """Analyze the entire solution space starting from initial_state. This implements a variant of Dijkstra's algorithm on the graph that spans the solution space (LineStates are the nodes). The algorithm tries to find @@ -652,49 +637,49 @@ def _AnalyzeSolutionSpace( initial_state ): Returns: True if a formatting solution was found. False otherwise. """ - count = 0 - seen = set() - p_queue = [] - - # Insert start element. - node = _StateNode( initial_state, False, None ) - heapq.heappush( p_queue, _QueueItem( _OrderedPenalty( 0, count ), node ) ) - - count += 1 - while p_queue: - item = p_queue[ 0 ] - penalty = item.ordered_penalty.penalty - node = item.state_node - if not node.state.next_token: - break - heapq.heappop( p_queue ) - - if count > 10000: - node.state.ignore_stack_for_comparison = True - - # Unconditionally add the state and check if it was present to avoid having - # to hash it twice in the common case (state hashing is expensive). - before_seen_count = len( seen ) - seen.add( node.state ) - # If seen didn't change size, the state was already present. - if before_seen_count == len( seen ): - continue - - # FIXME(morbo): Add a 'decision' element? - - count = _AddNextStateToQueue( penalty, node, False, count, p_queue ) - count = _AddNextStateToQueue( penalty, node, True, count, p_queue ) - - if not p_queue: - # We weren't able to find a solution. Do nothing. - return False + count = 0 + seen = set() + p_queue = [] + + # Insert start element. + node = _StateNode(initial_state, False, None) + heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(0, count), node)) + + count += 1 + while p_queue: + item = p_queue[0] + penalty = item.ordered_penalty.penalty + node = item.state_node + if not node.state.next_token: + break + heapq.heappop(p_queue) + + if count > 10000: + node.state.ignore_stack_for_comparison = True + + # Unconditionally add the state and check if it was present to avoid having + # to hash it twice in the common case (state hashing is expensive). + before_seen_count = len(seen) + seen.add(node.state) + # If seen didn't change size, the state was already present. + if before_seen_count == len(seen): + continue + + # FIXME(morbo): Add a 'decision' element? + + count = _AddNextStateToQueue(penalty, node, False, count, p_queue) + count = _AddNextStateToQueue(penalty, node, True, count, p_queue) + + if not p_queue: + # We weren't able to find a solution. Do nothing. + return False - _ReconstructPath( initial_state, heapq.heappop( p_queue ).state_node ) - return True + _ReconstructPath(initial_state, heapq.heappop(p_queue).state_node) + return True -def _AddNextStateToQueue( penalty, previous_node, newline, count, p_queue ): - """Add the following state to the analysis queue. +def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue): + """Add the following state to the analysis queue. Assume the current state is 'previous_node' and has been reached with a penalty of 'penalty'. Insert a line break if 'newline' is True. @@ -710,23 +695,23 @@ def _AddNextStateToQueue( penalty, previous_node, newline, count, p_queue ): Returns: The updated number of elements in the queue. """ - must_split = previous_node.state.MustSplit() - if newline and not previous_node.state.CanSplit( must_split ): - # Don't add a newline if the token cannot be split. - return count - if not newline and must_split: - # Don't add a token we must split but where we aren't splitting. - return count + must_split = previous_node.state.MustSplit() + if newline and not previous_node.state.CanSplit(must_split): + # Don't add a newline if the token cannot be split. + return count + if not newline and must_split: + # Don't add a token we must split but where we aren't splitting. + return count - node = _StateNode( previous_node.state, newline, previous_node ) - penalty += node.state.AddTokenToState( - newline = newline, dry_run = True, must_split = must_split ) - heapq.heappush( p_queue, _QueueItem( _OrderedPenalty( penalty, count ), node ) ) - return count + 1 + node = _StateNode(previous_node.state, newline, previous_node) + penalty += node.state.AddTokenToState( + newline=newline, dry_run=True, must_split=must_split) + heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(penalty, count), node)) + return count + 1 -def _ReconstructPath( initial_state, current ): - """Reconstruct the path through the queue with lowest penalty. +def _ReconstructPath(initial_state, current): + """Reconstruct the path through the queue with lowest penalty. Arguments: initial_state: (format_decision_state.FormatDecisionState) The initial state @@ -734,21 +719,21 @@ def _ReconstructPath( initial_state, current ): current: (_StateNode) The node in the decision graph that is the end point of the path with the least penalty. """ - path = collections.deque() + path = collections.deque() - while current.previous: - path.appendleft( current ) - current = current.previous + while current.previous: + path.appendleft(current) + current = current.previous - for node in path: - initial_state.AddTokenToState( newline = node.newline, dry_run = False ) + for node in path: + initial_state.AddTokenToState(newline=node.newline, dry_run=False) NESTED_DEPTH = [] -def _FormatFirstToken( first_token, indent_depth, prev_line, final_lines ): - """Format the first token in the logical line. +def _FormatFirstToken(first_token, indent_depth, prev_line, final_lines): + """Format the first token in the logical line. Add a newline and the required indent before the first token of the logical line. @@ -761,22 +746,22 @@ def _FormatFirstToken( first_token, indent_depth, prev_line, final_lines ): final_lines: (list of logical_line.LogicalLine) The logical lines that have already been processed. """ - global NESTED_DEPTH - while NESTED_DEPTH and NESTED_DEPTH[ -1 ] > indent_depth: - NESTED_DEPTH.pop() + global NESTED_DEPTH + while NESTED_DEPTH and NESTED_DEPTH[-1] > indent_depth: + NESTED_DEPTH.pop() - first_nested = False - if _IsClassOrDef( first_token ): - if not NESTED_DEPTH: - NESTED_DEPTH = [ indent_depth ] - elif NESTED_DEPTH[ -1 ] < indent_depth: - first_nested = True - NESTED_DEPTH.append( indent_depth ) + first_nested = False + if _IsClassOrDef(first_token): + if not NESTED_DEPTH: + NESTED_DEPTH = [indent_depth] + elif NESTED_DEPTH[-1] < indent_depth: + first_nested = True + NESTED_DEPTH.append(indent_depth) - first_token.AddWhitespacePrefix( - _CalculateNumberOfNewlines( - first_token, indent_depth, prev_line, final_lines, first_nested ), - indent_level = indent_depth ) + first_token.AddWhitespacePrefix( + _CalculateNumberOfNewlines( + first_token, indent_depth, prev_line, final_lines, first_nested), + indent_level=indent_depth) NO_BLANK_LINES = 1 @@ -784,15 +769,16 @@ def _FormatFirstToken( first_token, indent_depth, prev_line, final_lines ): TWO_BLANK_LINES = 3 -def _IsClassOrDef( tok ): - if tok.value in { 'class', 'def', '@' }: - return True - return ( tok.next_token and tok.value == 'async' and tok.next_token.value == 'def' ) +def _IsClassOrDef(tok): + if tok.value in {'class', 'def', '@'}: + return True + return ( + tok.next_token and tok.value == 'async' and tok.next_token.value == 'def') def _CalculateNumberOfNewlines( - first_token, indent_depth, prev_line, final_lines, first_nested ): - """Calculate the number of newlines we need to add. + first_token, indent_depth, prev_line, final_lines, first_nested): + """Calculate the number of newlines we need to add. Arguments: first_token: (format_token.FormatToken) The first token in the logical @@ -807,103 +793,102 @@ def _CalculateNumberOfNewlines( Returns: The number of newlines needed before the first token. """ - # TODO(morbo): Special handling for imports. - # TODO(morbo): Create a knob that can tune these. - if prev_line is None: - # The first line in the file. Don't add blank lines. - # FIXME(morbo): Is this correct? - if first_token.newlines is not None: + # TODO(morbo): Special handling for imports. + # TODO(morbo): Create a knob that can tune these. + if prev_line is None: + # The first line in the file. Don't add blank lines. + # FIXME(morbo): Is this correct? + if first_token.newlines is not None: + first_token.newlines = None + return 0 + + if first_token.is_docstring: + if (prev_line.first.value == 'class' and + style.Get('BLANK_LINE_BEFORE_CLASS_DOCSTRING')): + # Enforce a blank line before a class's docstring. + return ONE_BLANK_LINE + elif (prev_line.first.value.startswith('#') and + style.Get('BLANK_LINE_BEFORE_MODULE_DOCSTRING')): + # Enforce a blank line before a module's docstring. + return ONE_BLANK_LINE + # The docstring shouldn't have a newline before it. + return NO_BLANK_LINES + + if first_token.is_name and not indent_depth: + if prev_line.first.value in {'from', 'import'}: + # Support custom number of blank lines between top-level imports and + # variable definitions. + return 1 + style.Get( + 'BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES') + + prev_last_token = prev_line.last + if prev_last_token.is_docstring: + if (not indent_depth and first_token.value in {'class', 'def', 'async'}): + # Separate a class or function from the module-level docstring with + # appropriate number of blank lines. + return 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION') + if (first_nested and + not style.Get('BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF') and + _IsClassOrDef(first_token)): + first_token.newlines = None + return NO_BLANK_LINES + if _NoBlankLinesBeforeCurrentToken(prev_last_token.value, first_token, + prev_last_token): + return NO_BLANK_LINES + else: + return ONE_BLANK_LINE + + if _IsClassOrDef(first_token): + # TODO(morbo): This can go once the blank line calculator is more + # sophisticated. + if not indent_depth: + # This is a top-level class or function. + is_inline_comment = prev_last_token.whitespace_prefix.count('\n') == 0 + if (not prev_line.disable and prev_last_token.is_comment and + not is_inline_comment): + # This token follows a non-inline comment. + if _NoBlankLinesBeforeCurrentToken(prev_last_token.value, first_token, + prev_last_token): + # Assume that the comment is "attached" to the current line. + # Therefore, we want two blank lines before the comment. + index = len(final_lines) - 1 + while index > 0: + if not final_lines[index - 1].is_comment: + break + index -= 1 + if final_lines[index - 1].first.value == '@': + final_lines[index].first.AdjustNewlinesBefore(NO_BLANK_LINES) + else: + prev_last_token.AdjustNewlinesBefore( + 1 + style.Get('BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION')) + if first_token.newlines is not None: first_token.newlines = None - return 0 - - if first_token.is_docstring: - if ( prev_line.first.value == 'class' and - style.Get( 'BLANK_LINE_BEFORE_CLASS_DOCSTRING' ) ): - # Enforce a blank line before a class's docstring. - return ONE_BLANK_LINE - elif ( prev_line.first.value.startswith( '#' ) and - style.Get( 'BLANK_LINE_BEFORE_MODULE_DOCSTRING' ) ): - # Enforce a blank line before a module's docstring. - return ONE_BLANK_LINE - # The docstring shouldn't have a newline before it. + return NO_BLANK_LINES + elif _IsClassOrDef(prev_line.first): + if first_nested and not style.Get( + 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'): + first_token.newlines = None return NO_BLANK_LINES - if first_token.is_name and not indent_depth: - if prev_line.first.value in { 'from', 'import' }: - # Support custom number of blank lines between top-level imports and - # variable definitions. - return 1 + style.Get( - 'BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES' ) - - prev_last_token = prev_line.last - if prev_last_token.is_docstring: - if ( not indent_depth and first_token.value in { 'class', 'def', 'async' } ): - # Separate a class or function from the module-level docstring with - # appropriate number of blank lines. - return 1 + style.Get( 'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION' ) - if ( first_nested and - not style.Get( 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF' ) and - _IsClassOrDef( first_token ) ): - first_token.newlines = None - return NO_BLANK_LINES - if _NoBlankLinesBeforeCurrentToken( prev_last_token.value, first_token, - prev_last_token ): - return NO_BLANK_LINES - else: - return ONE_BLANK_LINE - - if _IsClassOrDef( first_token ): - # TODO(morbo): This can go once the blank line calculator is more - # sophisticated. - if not indent_depth: - # This is a top-level class or function. - is_inline_comment = prev_last_token.whitespace_prefix.count( '\n' ) == 0 - if ( not prev_line.disable and prev_last_token.is_comment and - not is_inline_comment ): - # This token follows a non-inline comment. - if _NoBlankLinesBeforeCurrentToken( prev_last_token.value, first_token, - prev_last_token ): - # Assume that the comment is "attached" to the current line. - # Therefore, we want two blank lines before the comment. - index = len( final_lines ) - 1 - while index > 0: - if not final_lines[ index - 1 ].is_comment: - break - index -= 1 - if final_lines[ index - 1 ].first.value == '@': - final_lines[ index ].first.AdjustNewlinesBefore( - NO_BLANK_LINES ) - else: - prev_last_token.AdjustNewlinesBefore( - 1 + style.Get( 'BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION' ) ) - if first_token.newlines is not None: - first_token.newlines = None - return NO_BLANK_LINES - elif _IsClassOrDef( prev_line.first ): - if first_nested and not style.Get( - 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF' ): - first_token.newlines = None - return NO_BLANK_LINES - - # Calculate how many newlines were between the original lines. We want to - # retain that formatting if it doesn't violate one of the style guide rules. - if first_token.is_comment: - first_token_lineno = first_token.lineno - first_token.value.count( '\n' ) - else: - first_token_lineno = first_token.lineno + # Calculate how many newlines were between the original lines. We want to + # retain that formatting if it doesn't violate one of the style guide rules. + if first_token.is_comment: + first_token_lineno = first_token.lineno - first_token.value.count('\n') + else: + first_token_lineno = first_token.lineno - prev_last_token_lineno = prev_last_token.lineno - if prev_last_token.is_multiline_string: - prev_last_token_lineno += prev_last_token.value.count( '\n' ) + prev_last_token_lineno = prev_last_token.lineno + if prev_last_token.is_multiline_string: + prev_last_token_lineno += prev_last_token.value.count('\n') - if first_token_lineno - prev_last_token_lineno > 1: - return ONE_BLANK_LINE + if first_token_lineno - prev_last_token_lineno > 1: + return ONE_BLANK_LINE - return NO_BLANK_LINES + return NO_BLANK_LINES -def _SingleOrMergedLines( lines ): - """Generate the lines we want to format. +def _SingleOrMergedLines(lines): + """Generate the lines we want to format. Arguments: lines: (list of logical_line.LogicalLine) Lines we want to format. @@ -912,49 +897,46 @@ def _SingleOrMergedLines( lines ): Either a single line, if the current line cannot be merged with the succeeding line, or the next two lines merged into one line. """ - index = 0 - last_was_merged = False - while index < len( lines ): - if lines[ index ].disable: - line = lines[ index ] - index += 1 - while index < len( lines ): - column = line.last.column + 2 - if lines[ index ].lineno != line.lineno: - break - if line.last.value != ':': - leaf = pytree.Leaf( - type = token.SEMI, - value = ';', - context = ( '', ( line.lineno, column ) ) ) - line.AppendToken( - format_token.FormatToken( leaf, - pytree_utils.NodeName( leaf ) ) ) - for tok in lines[ index ].tokens: - line.AppendToken( tok ) - index += 1 - yield line - elif line_joiner.CanMergeMultipleLines( lines[ index : ], last_was_merged ): - # TODO(morbo): This splice is potentially very slow. Come up with a more - # performance-friendly way of determining if two lines can be merged. - next_line = lines[ index + 1 ] - for tok in next_line.tokens: - lines[ index ].AppendToken( tok ) - if ( len( next_line.tokens ) == 1 and next_line.first.is_multiline_string ): - # This may be a multiline shebang. In that case, we want to retain the - # formatting. Otherwise, it could mess up the shell script's syntax. - lines[ index ].disable = True - yield lines[ index ] - index += 2 - last_was_merged = True - else: - yield lines[ index ] - index += 1 - last_was_merged = False - - -def _NoBlankLinesBeforeCurrentToken( text, cur_token, prev_token ): - """Determine if there are no blank lines before the current token. + index = 0 + last_was_merged = False + while index < len(lines): + if lines[index].disable: + line = lines[index] + index += 1 + while index < len(lines): + column = line.last.column + 2 + if lines[index].lineno != line.lineno: + break + if line.last.value != ':': + leaf = pytree.Leaf( + type=token.SEMI, value=';', context=('', (line.lineno, column))) + line.AppendToken( + format_token.FormatToken(leaf, pytree_utils.NodeName(leaf))) + for tok in lines[index].tokens: + line.AppendToken(tok) + index += 1 + yield line + elif line_joiner.CanMergeMultipleLines(lines[index:], last_was_merged): + # TODO(morbo): This splice is potentially very slow. Come up with a more + # performance-friendly way of determining if two lines can be merged. + next_line = lines[index + 1] + for tok in next_line.tokens: + lines[index].AppendToken(tok) + if (len(next_line.tokens) == 1 and next_line.first.is_multiline_string): + # This may be a multiline shebang. In that case, we want to retain the + # formatting. Otherwise, it could mess up the shell script's syntax. + lines[index].disable = True + yield lines[index] + index += 2 + last_was_merged = True + else: + yield lines[index] + index += 1 + last_was_merged = False + + +def _NoBlankLinesBeforeCurrentToken(text, cur_token, prev_token): + """Determine if there are no blank lines before the current token. The previous token is a docstring or comment. The prev_token_lineno is the start of the text of that token. Counting the number of newlines in its text @@ -972,8 +954,8 @@ def _NoBlankLinesBeforeCurrentToken( text, cur_token, prev_token ): Returns: True if there is no blank line before the current token. """ - cur_token_lineno = cur_token.lineno - if cur_token.is_comment: - cur_token_lineno -= cur_token.value.count( '\n' ) - num_newlines = text.count( '\n' ) if not prev_token.is_comment else 0 - return prev_token.lineno + num_newlines == cur_token_lineno - 1 + cur_token_lineno = cur_token.lineno + if cur_token.is_comment: + cur_token_lineno -= cur_token.value.count('\n') + num_newlines = text.count('\n') if not prev_token.is_comment else 0 + return prev_token.lineno + num_newlines == cur_token_lineno - 1 diff --git a/yapf/yapflib/style.py b/yapf/yapflib/style.py index 684bfb274..820952492 100644 --- a/yapf/yapflib/style.py +++ b/yapf/yapflib/style.py @@ -21,53 +21,53 @@ from yapf.yapflib import py3compat -class StyleConfigError( errors.YapfError ): - """Raised when there's a problem reading the style configuration.""" - pass +class StyleConfigError(errors.YapfError): + """Raised when there's a problem reading the style configuration.""" + pass -def Get( setting_name ): - """Get a style setting.""" - return _style[ setting_name ] +def Get(setting_name): + """Get a style setting.""" + return _style[setting_name] -def GetOrDefault( setting_name, default_value ): - """Get a style setting or default value if the setting does not exist.""" - return _style.get( setting_name, default_value ) +def GetOrDefault(setting_name, default_value): + """Get a style setting or default value if the setting does not exist.""" + return _style.get(setting_name, default_value) def Help(): - """Return dict mapping style names to help strings.""" - return _STYLE_HELP + """Return dict mapping style names to help strings.""" + return _STYLE_HELP -def SetGlobalStyle( style ): - """Set a style dict.""" - global _style - global _GLOBAL_STYLE_FACTORY - factory = _GetStyleFactory( style ) - if factory: - _GLOBAL_STYLE_FACTORY = factory - _style = style +def SetGlobalStyle(style): + """Set a style dict.""" + global _style + global _GLOBAL_STYLE_FACTORY + factory = _GetStyleFactory(style) + if factory: + _GLOBAL_STYLE_FACTORY = factory + _style = style _STYLE_HELP = dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = textwrap.dedent( + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent( """\ - Align closing bracket with visual indentation.""" ), - ALIGN_ASSIGNMENT = textwrap.dedent( + Align closing bracket with visual indentation."""), + ALIGN_ASSIGNMENT=textwrap.dedent( """\ Align assignment or augmented assignment operators. If there is a blank line or newline comment or objects with newline entries in between, - it will start new block alignment.""" ), - NEW_ALIGNMENT_AFTER_COMMENTLINE = textwrap.dedent( + it will start new block alignment."""), + NEW_ALIGNMENT_AFTER_COMMENTLINE=textwrap.dedent( """\ Start new assignment or colon alignment when there is a newline comment in between.""" ), - ALLOW_MULTILINE_LAMBDAS = textwrap.dedent( + ALLOW_MULTILINE_LAMBDAS=textwrap.dedent( """\ - Allow lambdas to be formatted on more than one line.""" ), - ALLOW_MULTILINE_DICTIONARY_KEYS = textwrap.dedent( + Allow lambdas to be formatted on more than one line."""), + ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent( """\ Allow dictionary keys to exist on multiple lines. For example: @@ -75,15 +75,15 @@ def SetGlobalStyle( style ): ('this is the first element of a tuple', 'this is the second element of a tuple'): value, - }""" ), - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS = textwrap.dedent( + }"""), + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=textwrap.dedent( """\ Allow splitting before a default / named assignment in an argument list. - """ ), - ALLOW_SPLIT_BEFORE_DICT_VALUE = textwrap.dedent( + """), + ALLOW_SPLIT_BEFORE_DICT_VALUE=textwrap.dedent( """\ - Allow splits before the dictionary value.""" ), - ARITHMETIC_PRECEDENCE_INDICATION = textwrap.dedent( + Allow splits before the dictionary value."""), + ARITHMETIC_PRECEDENCE_INDICATION=textwrap.dedent( """\ Let spacing indicate operator precedence. For example: @@ -103,8 +103,8 @@ def SetGlobalStyle( style ): e = 1*2 - 3 f = 1 + 2 + 3 + 4 - """ ), - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = textwrap.dedent( + """), + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=textwrap.dedent( """\ Insert a blank line before a 'def' or 'class' immediately nested within another 'def' or 'class'. For example: @@ -112,22 +112,22 @@ def SetGlobalStyle( style ): class Foo: # <------ this blank line def method(): - ...""" ), - BLANK_LINE_BEFORE_CLASS_DOCSTRING = textwrap.dedent( + ..."""), + BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent( """\ - Insert a blank line before a class-level docstring.""" ), - BLANK_LINE_BEFORE_MODULE_DOCSTRING = textwrap.dedent( + Insert a blank line before a class-level docstring."""), + BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent( """\ - Insert a blank line before a module docstring.""" ), - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = textwrap.dedent( + Insert a blank line before a module docstring."""), + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent( """\ Number of blank lines surrounding top-level function and class - definitions.""" ), - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES = textwrap.dedent( + definitions."""), + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=textwrap.dedent( """\ Number of blank lines between top-level imports and variable - definitions.""" ), - COALESCE_BRACKETS = textwrap.dedent( + definitions."""), + COALESCE_BRACKETS=textwrap.dedent( """\ Do not split consecutive brackets. Only relevant when dedent_closing_brackets is set. For example: @@ -144,10 +144,10 @@ def method(): call_func_that_takes_a_dict({ 'key1': 'value1', 'key2': 'value2', - })""" ), - COLUMN_LIMIT = textwrap.dedent( """\ - The column limit.""" ), - CONTINUATION_ALIGN_STYLE = textwrap.dedent( + })"""), + COLUMN_LIMIT=textwrap.dedent("""\ + The column limit."""), + CONTINUATION_ALIGN_STYLE=textwrap.dedent( """\ The style for continuation alignment. Possible values are: @@ -157,11 +157,11 @@ def method(): CONTINUATION_INDENT_WIDTH spaces) for continuation alignment. - VALIGN-RIGHT: Vertically align continuation lines to multiple of INDENT_WIDTH columns. Slightly right (one tab or a few spaces) if - cannot vertically align continuation lines with indent characters.""" ), - CONTINUATION_INDENT_WIDTH = textwrap.dedent( + cannot vertically align continuation lines with indent characters."""), + CONTINUATION_INDENT_WIDTH=textwrap.dedent( """\ - Indent width used for line continuations.""" ), - DEDENT_CLOSING_BRACKETS = textwrap.dedent( + Indent width used for line continuations."""), + DEDENT_CLOSING_BRACKETS=textwrap.dedent( """\ Put closing brackets on a separate line, dedented, if the bracketed expression can't fit in a single line. Applies to all kinds of brackets, @@ -179,33 +179,33 @@ def method(): start_ts=now()-timedelta(days=3), end_ts=now(), ) # <--- this bracket is dedented and on a separate line - """ ), - DISABLE_ENDING_COMMA_HEURISTIC = textwrap.dedent( + """), + DISABLE_ENDING_COMMA_HEURISTIC=textwrap.dedent( """\ Disable the heuristic which places each list element on a separate line - if the list is comma-terminated.""" ), - EACH_DICT_ENTRY_ON_SEPARATE_LINE = textwrap.dedent( + if the list is comma-terminated."""), + EACH_DICT_ENTRY_ON_SEPARATE_LINE=textwrap.dedent( """\ - Place each dictionary entry onto its own line.""" ), - FORCE_MULTILINE_DICT = textwrap.dedent( + Place each dictionary entry onto its own line."""), + FORCE_MULTILINE_DICT=textwrap.dedent( """\ Require multiline dictionary even if it would normally fit on one line. For example: config = { 'key1': 'value1' - }""" ), - I18N_COMMENT = textwrap.dedent( + }"""), + I18N_COMMENT=textwrap.dedent( """\ The regex for an i18n comment. The presence of this comment stops reformatting of that line, because the comments are required to be - next to the string they translate.""" ), - I18N_FUNCTION_CALL = textwrap.dedent( + next to the string they translate."""), + I18N_FUNCTION_CALL=textwrap.dedent( """\ The i18n function call names. The presence of this function stops reformattting on that line, because the string it has cannot be moved - away from the i18n comment.""" ), - INDENT_CLOSING_BRACKETS = textwrap.dedent( + away from the i18n comment."""), + INDENT_CLOSING_BRACKETS=textwrap.dedent( """\ Put closing brackets on a separate line, indented, if the bracketed expression can't fit in a single line. Applies to all kinds of brackets, @@ -223,8 +223,8 @@ def method(): start_ts=now()-timedelta(days=3), end_ts=now(), ) # <--- this bracket is indented and on a separate line - """ ), - INDENT_DICTIONARY_VALUE = textwrap.dedent( + """), + INDENT_DICTIONARY_VALUE=textwrap.dedent( """\ Indent the dictionary value if it cannot fit on the same line as the dictionary key. For example: @@ -235,16 +235,16 @@ def method(): 'key2': value1 + value2, } - """ ), - INDENT_WIDTH = textwrap.dedent( + """), + INDENT_WIDTH=textwrap.dedent( """\ - The number of columns to use for indentation.""" ), - INDENT_BLANK_LINES = textwrap.dedent( """\ - Indent blank lines.""" ), - JOIN_MULTIPLE_LINES = textwrap.dedent( + The number of columns to use for indentation."""), + INDENT_BLANK_LINES=textwrap.dedent("""\ + Indent blank lines."""), + JOIN_MULTIPLE_LINES=textwrap.dedent( """\ - Join short lines into one line. E.g., single line 'if' statements.""" ), - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = textwrap.dedent( + Join short lines into one line. E.g., single line 'if' statements."""), + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=textwrap.dedent( """\ Do not include spaces around selected binary operators. For example: @@ -253,26 +253,26 @@ def method(): will be formatted as follows when configured with "*,/": 1 + 2*3 - 4/5 - """ ), - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = textwrap.dedent( + """), + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=textwrap.dedent( """\ Insert a space between the ending comma and closing bracket of a list, - etc.""" ), - SPACE_INSIDE_BRACKETS = textwrap.dedent( + etc."""), + SPACE_INSIDE_BRACKETS=textwrap.dedent( """\ Use spaces inside brackets, braces, and parentheses. For example: method_call( 1 ) my_dict[ 3 ][ 1 ][ get_index( *args, **kwargs ) ] my_set = { 1, 2, 3 } - """ ), - SPACES_AROUND_POWER_OPERATOR = textwrap.dedent( + """), + SPACES_AROUND_POWER_OPERATOR=textwrap.dedent( """\ - Use spaces around the power operator.""" ), - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN = textwrap.dedent( + Use spaces around the power operator."""), + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=textwrap.dedent( """\ - Use spaces around default or named assigns.""" ), - SPACES_AROUND_DICT_DELIMITERS = textwrap.dedent( + Use spaces around default or named assigns."""), + SPACES_AROUND_DICT_DELIMITERS=textwrap.dedent( """\ Adds a space after the opening '{' and before the ending '}' dict delimiters. @@ -282,8 +282,8 @@ def method(): will be formatted as: { 1: 2 } - """ ), - SPACES_AROUND_LIST_DELIMITERS = textwrap.dedent( + """), + SPACES_AROUND_LIST_DELIMITERS=textwrap.dedent( """\ Adds a space after the opening '[' and before the ending ']' list delimiters. @@ -293,14 +293,14 @@ def method(): will be formatted as: [ 1, 2 ] - """ ), - SPACES_AROUND_SUBSCRIPT_COLON = textwrap.dedent( + """), + SPACES_AROUND_SUBSCRIPT_COLON=textwrap.dedent( """\ Use spaces around the subscript / slice operator. For example: my_list[1 : 10 : 2] - """ ), - SPACES_AROUND_TUPLE_DELIMITERS = textwrap.dedent( + """), + SPACES_AROUND_TUPLE_DELIMITERS=textwrap.dedent( """\ Adds a space after the opening '(' and before the ending ')' tuple delimiters. @@ -310,8 +310,8 @@ def method(): will be formatted as: ( 1, 2, 3 ) - """ ), - SPACES_BEFORE_COMMENT = textwrap.dedent( + """), + SPACES_BEFORE_COMMENT=textwrap.dedent( """\ The number of spaces required before a trailing comment. This can be a single value (representing the number of spaces @@ -353,31 +353,31 @@ def method(): a_very_long_statement_that_extends_beyond_the_final_column # Comment <-- the end of line comments are aligned based on the line length short # This is a shorter statement - """ ), # noqa - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = textwrap.dedent( + """), # noqa + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=textwrap.dedent( """\ Split before arguments if the argument list is terminated by a - comma.""" ), - SPLIT_ALL_COMMA_SEPARATED_VALUES = textwrap.dedent( + comma."""), + SPLIT_ALL_COMMA_SEPARATED_VALUES=textwrap.dedent( """\ - Split before arguments""" ), - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES = textwrap.dedent( + Split before arguments"""), + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=textwrap.dedent( """\ Split before arguments, but do not split all subexpressions recursively - (unless needed).""" ), - SPLIT_BEFORE_ARITHMETIC_OPERATOR = textwrap.dedent( + (unless needed)."""), + SPLIT_BEFORE_ARITHMETIC_OPERATOR=textwrap.dedent( """\ Set to True to prefer splitting before '+', '-', '*', '/', '//', or '@' - rather than after.""" ), - SPLIT_BEFORE_BITWISE_OPERATOR = textwrap.dedent( + rather than after."""), + SPLIT_BEFORE_BITWISE_OPERATOR=textwrap.dedent( """\ Set to True to prefer splitting before '&', '|' or '^' rather than - after.""" ), - SPLIT_BEFORE_CLOSING_BRACKET = textwrap.dedent( + after."""), + SPLIT_BEFORE_CLOSING_BRACKET=textwrap.dedent( """\ Split before the closing bracket if a list or dict literal doesn't fit on - a single line.""" ), - SPLIT_BEFORE_DICT_SET_GENERATOR = textwrap.dedent( + a single line."""), + SPLIT_BEFORE_DICT_SET_GENERATOR=textwrap.dedent( """\ Split before a dictionary or set generator (comp_for). For example, note the split before the 'for': @@ -385,8 +385,8 @@ def method(): foo = { variable: 'Hello world, have a nice day!' for variable in bar if variable != 42 - }""" ), - SPLIT_BEFORE_DOT = textwrap.dedent( + }"""), + SPLIT_BEFORE_DOT=textwrap.dedent( """\ Split before the '.' if we need to split a longer expression: @@ -396,24 +396,24 @@ def method(): foo = ('This is a really long string: {}, {}, {}, {}' .format(a, b, c, d)) - """ ), # noqa - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = textwrap.dedent( + """), # noqa + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=textwrap.dedent( """\ Split after the opening paren which surrounds an expression if it doesn't fit on a single line. - """ ), - SPLIT_BEFORE_FIRST_ARGUMENT = textwrap.dedent( + """), + SPLIT_BEFORE_FIRST_ARGUMENT=textwrap.dedent( """\ If an argument / parameter list is going to be split, then split before - the first argument.""" ), - SPLIT_BEFORE_LOGICAL_OPERATOR = textwrap.dedent( + the first argument."""), + SPLIT_BEFORE_LOGICAL_OPERATOR=textwrap.dedent( """\ Set to True to prefer splitting before 'and' or 'or' rather than - after.""" ), - SPLIT_BEFORE_NAMED_ASSIGNS = textwrap.dedent( + after."""), + SPLIT_BEFORE_NAMED_ASSIGNS=textwrap.dedent( """\ - Split named assignments onto individual lines.""" ), - SPLIT_COMPLEX_COMPREHENSION = textwrap.dedent( + Split named assignments onto individual lines."""), + SPLIT_COMPLEX_COMPREHENSION=textwrap.dedent( """\ Set to True to split list comprehensions and generators that have non-trivial expressions and multiple clauses before each of these @@ -429,36 +429,36 @@ def method(): a_long_var + 100 for a_long_var in xrange(1000) if a_long_var % 10] - """ ), - SPLIT_PENALTY_AFTER_OPENING_BRACKET = textwrap.dedent( + """), + SPLIT_PENALTY_AFTER_OPENING_BRACKET=textwrap.dedent( """\ - The penalty for splitting right after the opening bracket.""" ), - SPLIT_PENALTY_AFTER_UNARY_OPERATOR = textwrap.dedent( + The penalty for splitting right after the opening bracket."""), + SPLIT_PENALTY_AFTER_UNARY_OPERATOR=textwrap.dedent( """\ - The penalty for splitting the line after a unary operator.""" ), - SPLIT_PENALTY_ARITHMETIC_OPERATOR = textwrap.dedent( + The penalty for splitting the line after a unary operator."""), + SPLIT_PENALTY_ARITHMETIC_OPERATOR=textwrap.dedent( """\ The penalty of splitting the line around the '+', '-', '*', '/', '//', - ``%``, and '@' operators.""" ), - SPLIT_PENALTY_BEFORE_IF_EXPR = textwrap.dedent( + ``%``, and '@' operators."""), + SPLIT_PENALTY_BEFORE_IF_EXPR=textwrap.dedent( """\ - The penalty for splitting right before an if expression.""" ), - SPLIT_PENALTY_BITWISE_OPERATOR = textwrap.dedent( + The penalty for splitting right before an if expression."""), + SPLIT_PENALTY_BITWISE_OPERATOR=textwrap.dedent( """\ The penalty of splitting the line around the '&', '|', and '^' - operators.""" ), - SPLIT_PENALTY_COMPREHENSION = textwrap.dedent( + operators."""), + SPLIT_PENALTY_COMPREHENSION=textwrap.dedent( """\ The penalty for splitting a list comprehension or generator - expression.""" ), - SPLIT_PENALTY_EXCESS_CHARACTER = textwrap.dedent( + expression."""), + SPLIT_PENALTY_EXCESS_CHARACTER=textwrap.dedent( """\ - The penalty for characters over the column limit.""" ), - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = textwrap.dedent( + The penalty for characters over the column limit."""), + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=textwrap.dedent( """\ The penalty incurred by adding a line split to the logical line. The - more line splits added the higher the penalty.""" ), - SPLIT_PENALTY_IMPORT_NAMES = textwrap.dedent( + more line splits added the higher the penalty."""), + SPLIT_PENALTY_IMPORT_NAMES=textwrap.dedent( """\ The penalty of splitting a list of "import as" names. For example: @@ -470,200 +470,201 @@ def method(): from a_very_long_or_indented_module_name_yada_yad import ( long_argument_1, long_argument_2, long_argument_3) - """ ), # noqa - SPLIT_PENALTY_LOGICAL_OPERATOR = textwrap.dedent( + """), # noqa + SPLIT_PENALTY_LOGICAL_OPERATOR=textwrap.dedent( """\ The penalty of splitting the line around the 'and' and 'or' - operators.""" ), - USE_TABS = textwrap.dedent( """\ - Use the Tab character for indentation.""" ), - # BASED_ON_STYLE='Which predefined style this style is based on', + operators."""), + USE_TABS=textwrap.dedent( + """\ + Use the Tab character for indentation."""), + # BASED_ON_STYLE='Which predefined style this style is based on', ) def CreatePEP8Style(): - """Create the PEP8 formatting style.""" - return dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = True, - ALIGN_ASSIGNMENT = False, - NEW_ALIGNMENT_AFTER_COMMENTLINE = False, - ALLOW_MULTILINE_LAMBDAS = False, - ALLOW_MULTILINE_DICTIONARY_KEYS = False, - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS = True, - ALLOW_SPLIT_BEFORE_DICT_VALUE = True, - ARITHMETIC_PRECEDENCE_INDICATION = False, - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = True, - BLANK_LINE_BEFORE_CLASS_DOCSTRING = False, - BLANK_LINE_BEFORE_MODULE_DOCSTRING = False, - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = 2, - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES = 1, - COALESCE_BRACKETS = False, - COLUMN_LIMIT = 79, - CONTINUATION_ALIGN_STYLE = 'SPACE', - CONTINUATION_INDENT_WIDTH = 4, - DEDENT_CLOSING_BRACKETS = False, - INDENT_CLOSING_BRACKETS = False, - DISABLE_ENDING_COMMA_HEURISTIC = False, - EACH_DICT_ENTRY_ON_SEPARATE_LINE = True, - FORCE_MULTILINE_DICT = False, - I18N_COMMENT = '', - I18N_FUNCTION_CALL = '', - INDENT_DICTIONARY_VALUE = False, - INDENT_WIDTH = 4, - INDENT_BLANK_LINES = False, - JOIN_MULTIPLE_LINES = True, - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = set(), - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True, - SPACE_INSIDE_BRACKETS = False, - SPACES_AROUND_POWER_OPERATOR = False, - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN = False, - SPACES_AROUND_DICT_DELIMITERS = False, - SPACES_AROUND_LIST_DELIMITERS = False, - SPACES_AROUND_SUBSCRIPT_COLON = False, - SPACES_AROUND_TUPLE_DELIMITERS = False, - SPACES_BEFORE_COMMENT = 2, - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = False, - SPLIT_ALL_COMMA_SEPARATED_VALUES = False, - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES = False, - SPLIT_BEFORE_ARITHMETIC_OPERATOR = False, - SPLIT_BEFORE_BITWISE_OPERATOR = True, - SPLIT_BEFORE_CLOSING_BRACKET = True, - SPLIT_BEFORE_DICT_SET_GENERATOR = True, - SPLIT_BEFORE_DOT = False, - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = False, - SPLIT_BEFORE_FIRST_ARGUMENT = False, - SPLIT_BEFORE_LOGICAL_OPERATOR = True, - SPLIT_BEFORE_NAMED_ASSIGNS = True, - SPLIT_COMPLEX_COMPREHENSION = False, - SPLIT_PENALTY_AFTER_OPENING_BRACKET = 300, - SPLIT_PENALTY_AFTER_UNARY_OPERATOR = 10000, - SPLIT_PENALTY_ARITHMETIC_OPERATOR = 300, - SPLIT_PENALTY_BEFORE_IF_EXPR = 0, - SPLIT_PENALTY_BITWISE_OPERATOR = 300, - SPLIT_PENALTY_COMPREHENSION = 80, - SPLIT_PENALTY_EXCESS_CHARACTER = 7000, - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = 30, - SPLIT_PENALTY_IMPORT_NAMES = 0, - SPLIT_PENALTY_LOGICAL_OPERATOR = 300, - USE_TABS = False, - ) + """Create the PEP8 formatting style.""" + return dict( + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=True, + ALIGN_ASSIGNMENT=False, + NEW_ALIGNMENT_AFTER_COMMENTLINE=False, + ALLOW_MULTILINE_LAMBDAS=False, + ALLOW_MULTILINE_DICTIONARY_KEYS=False, + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=True, + ALLOW_SPLIT_BEFORE_DICT_VALUE=True, + ARITHMETIC_PRECEDENCE_INDICATION=False, + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=True, + BLANK_LINE_BEFORE_CLASS_DOCSTRING=False, + BLANK_LINE_BEFORE_MODULE_DOCSTRING=False, + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=2, + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=1, + COALESCE_BRACKETS=False, + COLUMN_LIMIT=79, + CONTINUATION_ALIGN_STYLE='SPACE', + CONTINUATION_INDENT_WIDTH=4, + DEDENT_CLOSING_BRACKETS=False, + INDENT_CLOSING_BRACKETS=False, + DISABLE_ENDING_COMMA_HEURISTIC=False, + EACH_DICT_ENTRY_ON_SEPARATE_LINE=True, + FORCE_MULTILINE_DICT=False, + I18N_COMMENT='', + I18N_FUNCTION_CALL='', + INDENT_DICTIONARY_VALUE=False, + INDENT_WIDTH=4, + INDENT_BLANK_LINES=False, + JOIN_MULTIPLE_LINES=True, + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=set(), + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=True, + SPACE_INSIDE_BRACKETS=False, + SPACES_AROUND_POWER_OPERATOR=False, + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=False, + SPACES_AROUND_DICT_DELIMITERS=False, + SPACES_AROUND_LIST_DELIMITERS=False, + SPACES_AROUND_SUBSCRIPT_COLON=False, + SPACES_AROUND_TUPLE_DELIMITERS=False, + SPACES_BEFORE_COMMENT=2, + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=False, + SPLIT_ALL_COMMA_SEPARATED_VALUES=False, + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=False, + SPLIT_BEFORE_ARITHMETIC_OPERATOR=False, + SPLIT_BEFORE_BITWISE_OPERATOR=True, + SPLIT_BEFORE_CLOSING_BRACKET=True, + SPLIT_BEFORE_DICT_SET_GENERATOR=True, + SPLIT_BEFORE_DOT=False, + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=False, + SPLIT_BEFORE_FIRST_ARGUMENT=False, + SPLIT_BEFORE_LOGICAL_OPERATOR=True, + SPLIT_BEFORE_NAMED_ASSIGNS=True, + SPLIT_COMPLEX_COMPREHENSION=False, + SPLIT_PENALTY_AFTER_OPENING_BRACKET=300, + SPLIT_PENALTY_AFTER_UNARY_OPERATOR=10000, + SPLIT_PENALTY_ARITHMETIC_OPERATOR=300, + SPLIT_PENALTY_BEFORE_IF_EXPR=0, + SPLIT_PENALTY_BITWISE_OPERATOR=300, + SPLIT_PENALTY_COMPREHENSION=80, + SPLIT_PENALTY_EXCESS_CHARACTER=7000, + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=30, + SPLIT_PENALTY_IMPORT_NAMES=0, + SPLIT_PENALTY_LOGICAL_OPERATOR=300, + USE_TABS=False, + ) def CreateGoogleStyle(): - """Create the Google formatting style.""" - style = CreatePEP8Style() - style[ 'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT' ] = False - style[ 'COLUMN_LIMIT' ] = 80 - style[ 'INDENT_DICTIONARY_VALUE' ] = True - style[ 'INDENT_WIDTH' ] = 4 - style[ 'I18N_COMMENT' ] = r'#\..*' - style[ 'I18N_FUNCTION_CALL' ] = [ 'N_', '_' ] - style[ 'JOIN_MULTIPLE_LINES' ] = False - style[ 'SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET' ] = False - style[ 'SPLIT_BEFORE_BITWISE_OPERATOR' ] = False - style[ 'SPLIT_BEFORE_DICT_SET_GENERATOR' ] = False - style[ 'SPLIT_BEFORE_LOGICAL_OPERATOR' ] = False - style[ 'SPLIT_COMPLEX_COMPREHENSION' ] = True - style[ 'SPLIT_PENALTY_COMPREHENSION' ] = 2100 - return style + """Create the Google formatting style.""" + style = CreatePEP8Style() + style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False + style['COLUMN_LIMIT'] = 80 + style['INDENT_DICTIONARY_VALUE'] = True + style['INDENT_WIDTH'] = 4 + style['I18N_COMMENT'] = r'#\..*' + style['I18N_FUNCTION_CALL'] = ['N_', '_'] + style['JOIN_MULTIPLE_LINES'] = False + style['SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET'] = False + style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False + style['SPLIT_BEFORE_DICT_SET_GENERATOR'] = False + style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False + style['SPLIT_COMPLEX_COMPREHENSION'] = True + style['SPLIT_PENALTY_COMPREHENSION'] = 2100 + return style def CreateYapfStyle(): - """Create the YAPF formatting style.""" - style = CreateGoogleStyle() - style[ 'ALLOW_MULTILINE_DICTIONARY_KEYS' ] = True - style[ 'ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS' ] = False - style[ 'INDENT_WIDTH' ] = 2 - style[ 'SPLIT_BEFORE_BITWISE_OPERATOR' ] = True - style[ 'SPLIT_BEFORE_DOT' ] = True - style[ 'SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN' ] = True - return style + """Create the YAPF formatting style.""" + style = CreateGoogleStyle() + style['ALLOW_MULTILINE_DICTIONARY_KEYS'] = True + style['ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS'] = False + style['INDENT_WIDTH'] = 2 + style['SPLIT_BEFORE_BITWISE_OPERATOR'] = True + style['SPLIT_BEFORE_DOT'] = True + style['SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN'] = True + return style def CreateFacebookStyle(): - """Create the Facebook formatting style.""" - style = CreatePEP8Style() - style[ 'ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT' ] = False - style[ 'BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF' ] = False - style[ 'COLUMN_LIMIT' ] = 80 - style[ 'DEDENT_CLOSING_BRACKETS' ] = True - style[ 'INDENT_CLOSING_BRACKETS' ] = False - style[ 'INDENT_DICTIONARY_VALUE' ] = True - style[ 'JOIN_MULTIPLE_LINES' ] = False - style[ 'SPACES_BEFORE_COMMENT' ] = 2 - style[ 'SPLIT_PENALTY_AFTER_OPENING_BRACKET' ] = 0 - style[ 'SPLIT_PENALTY_BEFORE_IF_EXPR' ] = 30 - style[ 'SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT' ] = 30 - style[ 'SPLIT_BEFORE_LOGICAL_OPERATOR' ] = False - style[ 'SPLIT_BEFORE_BITWISE_OPERATOR' ] = False - return style + """Create the Facebook formatting style.""" + style = CreatePEP8Style() + style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False + style['BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'] = False + style['COLUMN_LIMIT'] = 80 + style['DEDENT_CLOSING_BRACKETS'] = True + style['INDENT_CLOSING_BRACKETS'] = False + style['INDENT_DICTIONARY_VALUE'] = True + style['JOIN_MULTIPLE_LINES'] = False + style['SPACES_BEFORE_COMMENT'] = 2 + style['SPLIT_PENALTY_AFTER_OPENING_BRACKET'] = 0 + style['SPLIT_PENALTY_BEFORE_IF_EXPR'] = 30 + style['SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT'] = 30 + style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False + style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False + return style _STYLE_NAME_TO_FACTORY = dict( - pep8 = CreatePEP8Style, - google = CreateGoogleStyle, - facebook = CreateFacebookStyle, - yapf = CreateYapfStyle, + pep8=CreatePEP8Style, + google=CreateGoogleStyle, + facebook=CreateFacebookStyle, + yapf=CreateYapfStyle, ) _DEFAULT_STYLE_TO_FACTORY = [ - ( CreateFacebookStyle(), CreateFacebookStyle ), - ( CreateGoogleStyle(), CreateGoogleStyle ), - ( CreatePEP8Style(), CreatePEP8Style ), - ( CreateYapfStyle(), CreateYapfStyle ), + (CreateFacebookStyle(), CreateFacebookStyle), + (CreateGoogleStyle(), CreateGoogleStyle), + (CreatePEP8Style(), CreatePEP8Style), + (CreateYapfStyle(), CreateYapfStyle), ] -def _GetStyleFactory( style ): - for def_style, factory in _DEFAULT_STYLE_TO_FACTORY: - if style == def_style: - return factory - return None +def _GetStyleFactory(style): + for def_style, factory in _DEFAULT_STYLE_TO_FACTORY: + if style == def_style: + return factory + return None -def _ContinuationAlignStyleStringConverter( s ): - """Option value converter for a continuation align style string.""" - accepted_styles = ( 'SPACE', 'FIXED', 'VALIGN-RIGHT' ) - if s: - r = s.strip( '"\'' ).replace( '_', '-' ).upper() - if r not in accepted_styles: - raise ValueError( 'unknown continuation align style: %r' % ( s,) ) - else: - r = accepted_styles[ 0 ] - return r +def _ContinuationAlignStyleStringConverter(s): + """Option value converter for a continuation align style string.""" + accepted_styles = ('SPACE', 'FIXED', 'VALIGN-RIGHT') + if s: + r = s.strip('"\'').replace('_', '-').upper() + if r not in accepted_styles: + raise ValueError('unknown continuation align style: %r' % (s,)) + else: + r = accepted_styles[0] + return r -def _StringListConverter( s ): - """Option value converter for a comma-separated list of strings.""" - return [ part.strip() for part in s.split( ',' ) ] +def _StringListConverter(s): + """Option value converter for a comma-separated list of strings.""" + return [part.strip() for part in s.split(',')] -def _StringSetConverter( s ): - """Option value converter for a comma-separated set of strings.""" - if len( s ) > 2 and s[ 0 ] in '"\'': - s = s[ 1 :-1 ] - return { part.strip() for part in s.split( ',' ) } +def _StringSetConverter(s): + """Option value converter for a comma-separated set of strings.""" + if len(s) > 2 and s[0] in '"\'': + s = s[1:-1] + return {part.strip() for part in s.split(',')} -def _BoolConverter( s ): - """Option value converter for a boolean.""" - return py3compat.CONFIGPARSER_BOOLEAN_STATES[ s.lower() ] +def _BoolConverter(s): + """Option value converter for a boolean.""" + return py3compat.CONFIGPARSER_BOOLEAN_STATES[s.lower()] -def _IntListConverter( s ): - """Option value converter for a comma-separated list of integers.""" - s = s.strip() - if s.startswith( '[' ) and s.endswith( ']' ): - s = s[ 1 :-1 ] +def _IntListConverter(s): + """Option value converter for a comma-separated list of integers.""" + s = s.strip() + if s.startswith('[') and s.endswith(']'): + s = s[1:-1] - return [ int( part.strip() ) for part in s.split( ',' ) if part.strip() ] + return [int(part.strip()) for part in s.split(',') if part.strip()] -def _IntOrIntListConverter( s ): - """Option value converter for an integer or list of integers.""" - if len( s ) > 2 and s[ 0 ] in '"\'': - s = s[ 1 :-1 ] - return _IntListConverter( s ) if ',' in s else int( s ) +def _IntOrIntListConverter(s): + """Option value converter for an integer or list of integers.""" + if len(s) > 2 and s[0] in '"\'': + s = s[1:-1] + return _IntListConverter(s) if ',' in s else int(s) # Different style options need to have their values interpreted differently when @@ -674,73 +675,73 @@ def _IntOrIntListConverter( s ): # # Note: this dict has to map all the supported style options. _STYLE_OPTION_VALUE_CONVERTER = dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT = _BoolConverter, - ALIGN_ASSIGNMENT = _BoolConverter, - NEW_ALIGNMENT_AFTER_COMMENTLINE = _BoolConverter, - ALLOW_MULTILINE_LAMBDAS = _BoolConverter, - ALLOW_MULTILINE_DICTIONARY_KEYS = _BoolConverter, - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS = _BoolConverter, - ALLOW_SPLIT_BEFORE_DICT_VALUE = _BoolConverter, - ARITHMETIC_PRECEDENCE_INDICATION = _BoolConverter, - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = _BoolConverter, - BLANK_LINE_BEFORE_CLASS_DOCSTRING = _BoolConverter, - BLANK_LINE_BEFORE_MODULE_DOCSTRING = _BoolConverter, - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION = int, - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES = int, - COALESCE_BRACKETS = _BoolConverter, - COLUMN_LIMIT = int, - CONTINUATION_ALIGN_STYLE = _ContinuationAlignStyleStringConverter, - CONTINUATION_INDENT_WIDTH = int, - DEDENT_CLOSING_BRACKETS = _BoolConverter, - INDENT_CLOSING_BRACKETS = _BoolConverter, - DISABLE_ENDING_COMMA_HEURISTIC = _BoolConverter, - EACH_DICT_ENTRY_ON_SEPARATE_LINE = _BoolConverter, - FORCE_MULTILINE_DICT = _BoolConverter, - I18N_COMMENT = str, - I18N_FUNCTION_CALL = _StringListConverter, - INDENT_DICTIONARY_VALUE = _BoolConverter, - INDENT_WIDTH = int, - INDENT_BLANK_LINES = _BoolConverter, - JOIN_MULTIPLE_LINES = _BoolConverter, - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = _StringSetConverter, - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = _BoolConverter, - SPACE_INSIDE_BRACKETS = _BoolConverter, - SPACES_AROUND_POWER_OPERATOR = _BoolConverter, - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN = _BoolConverter, - SPACES_AROUND_DICT_DELIMITERS = _BoolConverter, - SPACES_AROUND_LIST_DELIMITERS = _BoolConverter, - SPACES_AROUND_SUBSCRIPT_COLON = _BoolConverter, - SPACES_AROUND_TUPLE_DELIMITERS = _BoolConverter, - SPACES_BEFORE_COMMENT = _IntOrIntListConverter, - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED = _BoolConverter, - SPLIT_ALL_COMMA_SEPARATED_VALUES = _BoolConverter, - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES = _BoolConverter, - SPLIT_BEFORE_ARITHMETIC_OPERATOR = _BoolConverter, - SPLIT_BEFORE_BITWISE_OPERATOR = _BoolConverter, - SPLIT_BEFORE_CLOSING_BRACKET = _BoolConverter, - SPLIT_BEFORE_DICT_SET_GENERATOR = _BoolConverter, - SPLIT_BEFORE_DOT = _BoolConverter, - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = _BoolConverter, - SPLIT_BEFORE_FIRST_ARGUMENT = _BoolConverter, - SPLIT_BEFORE_LOGICAL_OPERATOR = _BoolConverter, - SPLIT_BEFORE_NAMED_ASSIGNS = _BoolConverter, - SPLIT_COMPLEX_COMPREHENSION = _BoolConverter, - SPLIT_PENALTY_AFTER_OPENING_BRACKET = int, - SPLIT_PENALTY_AFTER_UNARY_OPERATOR = int, - SPLIT_PENALTY_ARITHMETIC_OPERATOR = int, - SPLIT_PENALTY_BEFORE_IF_EXPR = int, - SPLIT_PENALTY_BITWISE_OPERATOR = int, - SPLIT_PENALTY_COMPREHENSION = int, - SPLIT_PENALTY_EXCESS_CHARACTER = int, - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT = int, - SPLIT_PENALTY_IMPORT_NAMES = int, - SPLIT_PENALTY_LOGICAL_OPERATOR = int, - USE_TABS = _BoolConverter, + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=_BoolConverter, + ALIGN_ASSIGNMENT=_BoolConverter, + NEW_ALIGNMENT_AFTER_COMMENTLINE=_BoolConverter, + ALLOW_MULTILINE_LAMBDAS=_BoolConverter, + ALLOW_MULTILINE_DICTIONARY_KEYS=_BoolConverter, + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=_BoolConverter, + ALLOW_SPLIT_BEFORE_DICT_VALUE=_BoolConverter, + ARITHMETIC_PRECEDENCE_INDICATION=_BoolConverter, + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=_BoolConverter, + BLANK_LINE_BEFORE_CLASS_DOCSTRING=_BoolConverter, + BLANK_LINE_BEFORE_MODULE_DOCSTRING=_BoolConverter, + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=int, + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=int, + COALESCE_BRACKETS=_BoolConverter, + COLUMN_LIMIT=int, + CONTINUATION_ALIGN_STYLE=_ContinuationAlignStyleStringConverter, + CONTINUATION_INDENT_WIDTH=int, + DEDENT_CLOSING_BRACKETS=_BoolConverter, + INDENT_CLOSING_BRACKETS=_BoolConverter, + DISABLE_ENDING_COMMA_HEURISTIC=_BoolConverter, + EACH_DICT_ENTRY_ON_SEPARATE_LINE=_BoolConverter, + FORCE_MULTILINE_DICT=_BoolConverter, + I18N_COMMENT=str, + I18N_FUNCTION_CALL=_StringListConverter, + INDENT_DICTIONARY_VALUE=_BoolConverter, + INDENT_WIDTH=int, + INDENT_BLANK_LINES=_BoolConverter, + JOIN_MULTIPLE_LINES=_BoolConverter, + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=_StringSetConverter, + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=_BoolConverter, + SPACE_INSIDE_BRACKETS=_BoolConverter, + SPACES_AROUND_POWER_OPERATOR=_BoolConverter, + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=_BoolConverter, + SPACES_AROUND_DICT_DELIMITERS=_BoolConverter, + SPACES_AROUND_LIST_DELIMITERS=_BoolConverter, + SPACES_AROUND_SUBSCRIPT_COLON=_BoolConverter, + SPACES_AROUND_TUPLE_DELIMITERS=_BoolConverter, + SPACES_BEFORE_COMMENT=_IntOrIntListConverter, + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=_BoolConverter, + SPLIT_ALL_COMMA_SEPARATED_VALUES=_BoolConverter, + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=_BoolConverter, + SPLIT_BEFORE_ARITHMETIC_OPERATOR=_BoolConverter, + SPLIT_BEFORE_BITWISE_OPERATOR=_BoolConverter, + SPLIT_BEFORE_CLOSING_BRACKET=_BoolConverter, + SPLIT_BEFORE_DICT_SET_GENERATOR=_BoolConverter, + SPLIT_BEFORE_DOT=_BoolConverter, + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=_BoolConverter, + SPLIT_BEFORE_FIRST_ARGUMENT=_BoolConverter, + SPLIT_BEFORE_LOGICAL_OPERATOR=_BoolConverter, + SPLIT_BEFORE_NAMED_ASSIGNS=_BoolConverter, + SPLIT_COMPLEX_COMPREHENSION=_BoolConverter, + SPLIT_PENALTY_AFTER_OPENING_BRACKET=int, + SPLIT_PENALTY_AFTER_UNARY_OPERATOR=int, + SPLIT_PENALTY_ARITHMETIC_OPERATOR=int, + SPLIT_PENALTY_BEFORE_IF_EXPR=int, + SPLIT_PENALTY_BITWISE_OPERATOR=int, + SPLIT_PENALTY_COMPREHENSION=int, + SPLIT_PENALTY_EXCESS_CHARACTER=int, + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=int, + SPLIT_PENALTY_IMPORT_NAMES=int, + SPLIT_PENALTY_LOGICAL_OPERATOR=int, + USE_TABS=_BoolConverter, ) -def CreateStyleFromConfig( style_config ): - """Create a style dict from the given config. +def CreateStyleFromConfig(style_config): + """Create a style dict from the given config. Arguments: style_config: either a style name or a file name. The file is expected to @@ -756,108 +757,107 @@ def CreateStyleFromConfig( style_config ): StyleConfigError: if an unknown style option was encountered. """ - def GlobalStyles(): - for style, _ in _DEFAULT_STYLE_TO_FACTORY: - yield style - - def_style = False - if style_config is None: - for style in GlobalStyles(): - if _style == style: - def_style = True - break - if not def_style: - return _style - return _GLOBAL_STYLE_FACTORY() - - if isinstance( style_config, dict ): - config = _CreateConfigParserFromConfigDict( style_config ) - elif isinstance( style_config, py3compat.basestring ): - style_factory = _STYLE_NAME_TO_FACTORY.get( style_config.lower() ) - if style_factory is not None: - return style_factory() - if style_config.startswith( '{' ): - # Most likely a style specification from the command line. - config = _CreateConfigParserFromConfigString( style_config ) - else: - # Unknown config name: assume it's a file name then. - config = _CreateConfigParserFromConfigFile( style_config ) - return _CreateStyleFromConfigParser( config ) - - -def _CreateConfigParserFromConfigDict( config_dict ): + def GlobalStyles(): + for style, _ in _DEFAULT_STYLE_TO_FACTORY: + yield style + + def_style = False + if style_config is None: + for style in GlobalStyles(): + if _style == style: + def_style = True + break + if not def_style: + return _style + return _GLOBAL_STYLE_FACTORY() + + if isinstance(style_config, dict): + config = _CreateConfigParserFromConfigDict(style_config) + elif isinstance(style_config, py3compat.basestring): + style_factory = _STYLE_NAME_TO_FACTORY.get(style_config.lower()) + if style_factory is not None: + return style_factory() + if style_config.startswith('{'): + # Most likely a style specification from the command line. + config = _CreateConfigParserFromConfigString(style_config) + else: + # Unknown config name: assume it's a file name then. + config = _CreateConfigParserFromConfigFile(style_config) + return _CreateStyleFromConfigParser(config) + + +def _CreateConfigParserFromConfigDict(config_dict): + config = py3compat.ConfigParser() + config.add_section('style') + for key, value in config_dict.items(): + config.set('style', key, str(value)) + return config + + +def _CreateConfigParserFromConfigString(config_string): + """Given a config string from the command line, return a config parser.""" + if config_string[0] != '{' or config_string[-1] != '}': + raise StyleConfigError( + "Invalid style dict syntax: '{}'.".format(config_string)) + config = py3compat.ConfigParser() + config.add_section('style') + for key, value, _ in re.findall( + r'([a-zA-Z0-9_]+)\s*[:=]\s*' + r'(?:' + r'((?P[\'"]).*?(?P=quote)|' + r'[a-zA-Z0-9_]+)' + r')', config_string): # yapf: disable + config.set('style', key, value) + return config + + +def _CreateConfigParserFromConfigFile(config_filename): + """Read the file and return a ConfigParser object.""" + if not os.path.exists(config_filename): + # Provide a more meaningful error here. + raise StyleConfigError( + '"{0}" is not a valid style or file path'.format(config_filename)) + with open(config_filename) as style_file: config = py3compat.ConfigParser() - config.add_section( 'style' ) - for key, value in config_dict.items(): - config.set( 'style', key, str( value ) ) - return config - + if config_filename.endswith(PYPROJECT_TOML): + try: + import toml + except ImportError: + raise errors.YapfError( + "toml package is needed for using pyproject.toml as a " + "configuration file") + + pyproject_toml = toml.load(style_file) + style_dict = pyproject_toml.get("tool", {}).get("yapf", None) + if style_dict is None: + raise StyleConfigError( + 'Unable to find section [tool.yapf] in {0}'.format(config_filename)) + config.add_section('style') + for k, v in style_dict.items(): + config.set('style', k, str(v)) + return config + + config.read_file(style_file) + if config_filename.endswith(SETUP_CONFIG): + if not config.has_section('yapf'): + raise StyleConfigError( + 'Unable to find section [yapf] in {0}'.format(config_filename)) + return config -def _CreateConfigParserFromConfigString( config_string ): - """Given a config string from the command line, return a config parser.""" - if config_string[ 0 ] != '{' or config_string[ -1 ] != '}': + if config_filename.endswith(LOCAL_STYLE): + if not config.has_section('style'): raise StyleConfigError( - "Invalid style dict syntax: '{}'.".format( config_string ) ) - config = py3compat.ConfigParser() - config.add_section( 'style' ) - for key, value, _ in re.findall( - r'([a-zA-Z0-9_]+)\s*[:=]\s*' - r'(?:' - r'((?P[\'"]).*?(?P=quote)|' - r'[a-zA-Z0-9_]+)' - r')', config_string): # yapf: disable - config.set( 'style', key, value ) + 'Unable to find section [style] in {0}'.format(config_filename)) + return config + + if not config.has_section('style'): + raise StyleConfigError( + 'Unable to find section [style] in {0}'.format(config_filename)) return config -def _CreateConfigParserFromConfigFile( config_filename ): - """Read the file and return a ConfigParser object.""" - if not os.path.exists( config_filename ): - # Provide a more meaningful error here. - raise StyleConfigError( - '"{0}" is not a valid style or file path'.format( config_filename ) ) - with open( config_filename ) as style_file: - config = py3compat.ConfigParser() - if config_filename.endswith( PYPROJECT_TOML ): - try: - import toml - except ImportError: - raise errors.YapfError( - "toml package is needed for using pyproject.toml as a " - "configuration file" ) - - pyproject_toml = toml.load( style_file ) - style_dict = pyproject_toml.get( "tool", {} ).get( "yapf", None ) - if style_dict is None: - raise StyleConfigError( - 'Unable to find section [tool.yapf] in {0}'.format( - config_filename ) ) - config.add_section( 'style' ) - for k, v in style_dict.items(): - config.set( 'style', k, str( v ) ) - return config - - config.read_file( style_file ) - if config_filename.endswith( SETUP_CONFIG ): - if not config.has_section( 'yapf' ): - raise StyleConfigError( - 'Unable to find section [yapf] in {0}'.format( config_filename ) ) - return config - - if config_filename.endswith( LOCAL_STYLE ): - if not config.has_section( 'style' ): - raise StyleConfigError( - 'Unable to find section [style] in {0}'.format( config_filename ) ) - return config - - if not config.has_section( 'style' ): - raise StyleConfigError( - 'Unable to find section [style] in {0}'.format( config_filename ) ) - return config - - -def _CreateStyleFromConfigParser( config ): - """Create a style dict from a configuration file. +def _CreateStyleFromConfigParser(config): + """Create a style dict from a configuration file. Arguments: config: a ConfigParser object. @@ -868,32 +868,32 @@ def _CreateStyleFromConfigParser( config ): Raises: StyleConfigError: if an unknown style option was encountered. """ - # Initialize the base style. - section = 'yapf' if config.has_section( 'yapf' ) else 'style' - if config.has_option( 'style', 'based_on_style' ): - based_on = config.get( 'style', 'based_on_style' ).lower() - base_style = _STYLE_NAME_TO_FACTORY[ based_on ]() - elif config.has_option( 'yapf', 'based_on_style' ): - based_on = config.get( 'yapf', 'based_on_style' ).lower() - base_style = _STYLE_NAME_TO_FACTORY[ based_on ]() - else: - base_style = _GLOBAL_STYLE_FACTORY() - - # Read all options specified in the file and update the style. - for option, value in config.items( section ): - if option.lower() == 'based_on_style': - # Now skip this one - we've already handled it and it's not one of the - # recognized style options. - continue - option = option.upper() - if option not in _STYLE_OPTION_VALUE_CONVERTER: - raise StyleConfigError( 'Unknown style option "{0}"'.format( option ) ) - try: - base_style[ option ] = _STYLE_OPTION_VALUE_CONVERTER[ option ]( value ) - except ValueError: - raise StyleConfigError( - "'{}' is not a valid setting for {}.".format( value, option ) ) - return base_style + # Initialize the base style. + section = 'yapf' if config.has_section('yapf') else 'style' + if config.has_option('style', 'based_on_style'): + based_on = config.get('style', 'based_on_style').lower() + base_style = _STYLE_NAME_TO_FACTORY[based_on]() + elif config.has_option('yapf', 'based_on_style'): + based_on = config.get('yapf', 'based_on_style').lower() + base_style = _STYLE_NAME_TO_FACTORY[based_on]() + else: + base_style = _GLOBAL_STYLE_FACTORY() + + # Read all options specified in the file and update the style. + for option, value in config.items(section): + if option.lower() == 'based_on_style': + # Now skip this one - we've already handled it and it's not one of the + # recognized style options. + continue + option = option.upper() + if option not in _STYLE_OPTION_VALUE_CONVERTER: + raise StyleConfigError('Unknown style option "{0}"'.format(option)) + try: + base_style[option] = _STYLE_OPTION_VALUE_CONVERTER[option](value) + except ValueError: + raise StyleConfigError( + "'{}' is not a valid setting for {}.".format(value, option)) + return base_style # The default style - used if yapf is not invoked without specifically @@ -905,8 +905,8 @@ def _CreateStyleFromConfigParser( config ): # The name of the file to use for global style definition. GLOBAL_STYLE = ( os.path.join( - os.getenv( 'XDG_CONFIG_HOME' ) or os.path.expanduser( '~/.config' ), 'yapf', - 'style' ) ) + os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'), 'yapf', + 'style')) # The name of the file to use for directory-local style definition. LOCAL_STYLE = '.style.yapf' @@ -923,4 +923,4 @@ def _CreateStyleFromConfigParser( config ): # Refactor this so that the style is passed around through yapf rather than # being global. _style = None -SetGlobalStyle( _GLOBAL_STYLE_FACTORY() ) +SetGlobalStyle(_GLOBAL_STYLE_FACTORY()) diff --git a/yapf/yapflib/verifier.py b/yapf/yapflib/verifier.py index 80cfebc08..01dccc0b0 100644 --- a/yapf/yapflib/verifier.py +++ b/yapf/yapflib/verifier.py @@ -25,13 +25,13 @@ import textwrap -class InternalError( Exception ): - """Internal error in verifying formatted code.""" - pass +class InternalError(Exception): + """Internal error in verifying formatted code.""" + pass -def VerifyCode( code ): - """Verify that the reformatted code is syntactically correct. +def VerifyCode(code): + """Verify that the reformatted code is syntactically correct. Arguments: code: (unicode) The reformatted code snippet. @@ -39,57 +39,55 @@ def VerifyCode( code ): Raises: SyntaxError if the code was reformatted incorrectly. """ + try: + compile(textwrap.dedent(code).encode('UTF-8'), '', 'exec') + except SyntaxError: try: - compile( textwrap.dedent( code ).encode( 'UTF-8' ), '', 'exec' ) + ast.parse(textwrap.dedent(code.lstrip('\n')).lstrip(), '', 'exec') except SyntaxError: - try: - ast.parse( - textwrap.dedent( code.lstrip( '\n' ) ).lstrip(), '', 'exec' ) - except SyntaxError: - try: - normalized_code = _NormalizeCode( code ) - compile( normalized_code.encode( 'UTF-8' ), '', 'exec' ) - except SyntaxError: - raise InternalError( sys.exc_info()[ 1 ] ) + try: + normalized_code = _NormalizeCode(code) + compile(normalized_code.encode('UTF-8'), '', 'exec') + except SyntaxError: + raise InternalError(sys.exc_info()[1]) -def _NormalizeCode( code ): - """Make sure that the code snippet is compilable.""" - code = textwrap.dedent( code.lstrip( '\n' ) ).lstrip() +def _NormalizeCode(code): + """Make sure that the code snippet is compilable.""" + code = textwrap.dedent(code.lstrip('\n')).lstrip() - # Split the code to lines and get rid of all leading full-comment lines as - # they can mess up the normalization attempt. - lines = code.split( '\n' ) - i = 0 - for i, line in enumerate( lines ): - line = line.strip() - if line and not line.startswith( '#' ): - break - code = '\n'.join( lines[ i : ] ) + '\n' + # Split the code to lines and get rid of all leading full-comment lines as + # they can mess up the normalization attempt. + lines = code.split('\n') + i = 0 + for i, line in enumerate(lines): + line = line.strip() + if line and not line.startswith('#'): + break + code = '\n'.join(lines[i:]) + '\n' - if re.match( r'(if|while|for|with|def|class|async|await)\b', code ): - code += '\n pass' - elif re.match( r'(elif|else)\b', code ): - try: - try_code = 'if True:\n pass\n' + code + '\n pass' - ast.parse( - textwrap.dedent( try_code.lstrip( '\n' ) ).lstrip(), '', - 'exec' ) - code = try_code - except SyntaxError: - # The assumption here is that the code is on a single line. - code = 'if True: pass\n' + code - elif code.startswith( '@' ): - code += '\ndef _():\n pass' - elif re.match( r'try\b', code ): - code += '\n pass\nexcept:\n pass' - elif re.match( r'(except|finally)\b', code ): - code = 'try:\n pass\n' + code + '\n pass' - elif re.match( r'(return|yield)\b', code ): - code = 'def _():\n ' + code - elif re.match( r'(continue|break)\b', code ): - code = 'while True:\n ' + code - elif re.match( r'print\b', code ): - code = 'from __future__ import print_function\n' + code + if re.match(r'(if|while|for|with|def|class|async|await)\b', code): + code += '\n pass' + elif re.match(r'(elif|else)\b', code): + try: + try_code = 'if True:\n pass\n' + code + '\n pass' + ast.parse( + textwrap.dedent(try_code.lstrip('\n')).lstrip(), '', 'exec') + code = try_code + except SyntaxError: + # The assumption here is that the code is on a single line. + code = 'if True: pass\n' + code + elif code.startswith('@'): + code += '\ndef _():\n pass' + elif re.match(r'try\b', code): + code += '\n pass\nexcept:\n pass' + elif re.match(r'(except|finally)\b', code): + code = 'try:\n pass\n' + code + '\n pass' + elif re.match(r'(return|yield)\b', code): + code = 'def _():\n ' + code + elif re.match(r'(continue|break)\b', code): + code = 'while True:\n ' + code + elif re.match(r'print\b', code): + code = 'from __future__ import print_function\n' + code - return code + '\n' + return code + '\n' diff --git a/yapf/yapflib/yapf_api.py b/yapf/yapflib/yapf_api.py index e8ae26e87..e0098ddd2 100644 --- a/yapf/yapflib/yapf_api.py +++ b/yapf/yapflib/yapf_api.py @@ -52,14 +52,14 @@ def FormatFile( - filename, - style_config = None, - lines = None, - print_diff = False, - verify = False, - in_place = False, - logger = None ): - """Format a single Python file and return the formatted code. + filename, + style_config=None, + lines=None, + print_diff=False, + verify=False, + in_place=False, + logger=None): + """Format a single Python file and return the formatted code. Arguments: filename: (unicode) The file to reformat. @@ -85,33 +85,33 @@ def FormatFile( IOError: raised if there was an error reading the file. ValueError: raised if in_place and print_diff are both specified. """ - _CheckPythonVersion() - - if in_place and print_diff: - raise ValueError( 'Cannot pass both in_place and print_diff.' ) - - original_source, newline, encoding = ReadFile( filename, logger ) - reformatted_source, changed = FormatCode( - original_source, - style_config = style_config, - filename = filename, - lines = lines, - print_diff = print_diff, - verify = verify ) - if reformatted_source.rstrip( '\n' ): - lines = reformatted_source.rstrip( '\n' ).split( '\n' ) - reformatted_source = newline.join( iter( lines ) ) + newline - if in_place: - if original_source and original_source != reformatted_source: - file_resources.WriteReformattedCode( - filename, reformatted_source, encoding, in_place ) - return None, encoding, changed - - return reformatted_source, encoding, changed - - -def FormatTree( tree, style_config = None, lines = None, verify = False ): - """Format a parsed lib2to3 pytree. + _CheckPythonVersion() + + if in_place and print_diff: + raise ValueError('Cannot pass both in_place and print_diff.') + + original_source, newline, encoding = ReadFile(filename, logger) + reformatted_source, changed = FormatCode( + original_source, + style_config=style_config, + filename=filename, + lines=lines, + print_diff=print_diff, + verify=verify) + if reformatted_source.rstrip('\n'): + lines = reformatted_source.rstrip('\n').split('\n') + reformatted_source = newline.join(iter(lines)) + newline + if in_place: + if original_source and original_source != reformatted_source: + file_resources.WriteReformattedCode( + filename, reformatted_source, encoding, in_place) + return None, encoding, changed + + return reformatted_source, encoding, changed + + +def FormatTree(tree, style_config=None, lines=None, verify=False): + """Format a parsed lib2to3 pytree. This provides an alternative entry point to YAPF. @@ -129,34 +129,34 @@ def FormatTree( tree, style_config = None, lines = None, verify = False ): Returns: The source formatted according to the given formatting style. """ - _CheckPythonVersion() - style.SetGlobalStyle( style.CreateStyleFromConfig( style_config ) ) + _CheckPythonVersion() + style.SetGlobalStyle(style.CreateStyleFromConfig(style_config)) - # Run passes on the tree, modifying it in place. - comment_splicer.SpliceComments( tree ) - continuation_splicer.SpliceContinuations( tree ) - subtype_assigner.AssignSubtypes( tree ) - identify_container.IdentifyContainers( tree ) - split_penalty.ComputeSplitPenalties( tree ) - blank_line_calculator.CalculateBlankLines( tree ) + # Run passes on the tree, modifying it in place. + comment_splicer.SpliceComments(tree) + continuation_splicer.SpliceContinuations(tree) + subtype_assigner.AssignSubtypes(tree) + identify_container.IdentifyContainers(tree) + split_penalty.ComputeSplitPenalties(tree) + blank_line_calculator.CalculateBlankLines(tree) - llines = pytree_unwrapper.UnwrapPyTree( tree ) - for lline in llines: - lline.CalculateFormattingInformation() + llines = pytree_unwrapper.UnwrapPyTree(tree) + for lline in llines: + lline.CalculateFormattingInformation() - lines = _LineRangesToSet( lines ) - _MarkLinesToFormat( llines, lines ) - return reformatter.Reformat( _SplitSemicolons( llines ), verify, lines ) + lines = _LineRangesToSet(lines) + _MarkLinesToFormat(llines, lines) + return reformatter.Reformat(_SplitSemicolons(llines), verify, lines) def FormatCode( - unformatted_source, - filename = '', - style_config = None, - lines = None, - print_diff = False, - verify = False ): - """Format a string of Python code. + unformatted_source, + filename='', + style_config=None, + lines=None, + print_diff=False, + verify=False): + """Format a string of Python code. This provides an alternative entry point to YAPF. @@ -178,39 +178,39 @@ def FormatCode( Tuple of (reformatted_source, changed). reformatted_source conforms to the desired formatting style. changed is True if the source changed. """ - try: - tree = pytree_utils.ParseCodeToTree( unformatted_source ) - except Exception as e: - e.filename = filename - raise errors.YapfError( errors.FormatErrorMsg( e ) ) + try: + tree = pytree_utils.ParseCodeToTree(unformatted_source) + except Exception as e: + e.filename = filename + raise errors.YapfError(errors.FormatErrorMsg(e)) - reformatted_source = FormatTree( - tree, style_config = style_config, lines = lines, verify = verify ) + reformatted_source = FormatTree( + tree, style_config=style_config, lines=lines, verify=verify) - if unformatted_source == reformatted_source: - return '' if print_diff else reformatted_source, False + if unformatted_source == reformatted_source: + return '' if print_diff else reformatted_source, False - code_diff = _GetUnifiedDiff( - unformatted_source, reformatted_source, filename = filename ) + code_diff = _GetUnifiedDiff( + unformatted_source, reformatted_source, filename=filename) - if print_diff: - return code_diff, code_diff.strip() != '' # pylint: disable=g-explicit-bool-comparison # noqa + if print_diff: + return code_diff, code_diff.strip() != '' # pylint: disable=g-explicit-bool-comparison # noqa - return reformatted_source, True + return reformatted_source, True -def _CheckPythonVersion(): # pragma: no cover - errmsg = 'yapf is only supported for Python 2.7 or 3.6+' - if sys.version_info[ 0 ] == 2: - if sys.version_info[ 1 ] < 7: - raise RuntimeError( errmsg ) - elif sys.version_info[ 0 ] == 3: - if sys.version_info[ 1 ] < 6: - raise RuntimeError( errmsg ) +def _CheckPythonVersion(): # pragma: no cover + errmsg = 'yapf is only supported for Python 2.7 or 3.6+' + if sys.version_info[0] == 2: + if sys.version_info[1] < 7: + raise RuntimeError(errmsg) + elif sys.version_info[0] == 3: + if sys.version_info[1] < 6: + raise RuntimeError(errmsg) -def ReadFile( filename, logger = None ): - """Read the contents of the file. +def ReadFile(filename, logger=None): + """Read the contents of the file. An optional logger can be specified to emit messages to your favorite logging stream. If specified, then no exception is raised. This is external so that it @@ -226,106 +226,102 @@ def ReadFile( filename, logger = None ): Raises: IOError: raised if there was an error reading the file. """ - try: - encoding = file_resources.FileEncoding( filename ) - - # Preserves line endings. - with py3compat.open_with_encoding( filename, mode = 'r', encoding = encoding, - newline = '' ) as fd: - lines = fd.readlines() - - line_ending = file_resources.LineEnding( lines ) - source = '\n'.join( line.rstrip( '\r\n' ) for line in lines ) + '\n' - return source, line_ending, encoding - except IOError as e: # pragma: no cover - if logger: - logger( e ) - e.args = ( - e.args[ 0 ], - ( filename, e.args[ 1 ][ 1 ], e.args[ 1 ][ 2 ], e.args[ 1 ][ 3 ] ) ) - raise - except UnicodeDecodeError as e: # pragma: no cover - if logger: - logger( - 'Could not parse %s! Consider excluding this file with --exclude.', - filename ) - logger( e ) - e.args = ( - e.args[ 0 ], - ( filename, e.args[ 1 ][ 1 ], e.args[ 1 ][ 2 ], e.args[ 1 ][ 3 ] ) ) - raise - - -def _SplitSemicolons( lines ): - res = [] - for line in lines: - res.extend( line.Split() ) - return res + try: + encoding = file_resources.FileEncoding(filename) + + # Preserves line endings. + with py3compat.open_with_encoding(filename, mode='r', encoding=encoding, + newline='') as fd: + lines = fd.readlines() + + line_ending = file_resources.LineEnding(lines) + source = '\n'.join(line.rstrip('\r\n') for line in lines) + '\n' + return source, line_ending, encoding + except IOError as e: # pragma: no cover + if logger: + logger(e) + e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) + raise + except UnicodeDecodeError as e: # pragma: no cover + if logger: + logger( + 'Could not parse %s! Consider excluding this file with --exclude.', + filename) + logger(e) + e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) + raise + + +def _SplitSemicolons(lines): + res = [] + for line in lines: + res.extend(line.Split()) + return res DISABLE_PATTERN = r'^#.*\byapf:\s*disable\b' ENABLE_PATTERN = r'^#.*\byapf:\s*enable\b' -def _LineRangesToSet( line_ranges ): - """Return a set of lines in the range.""" - - if line_ranges is None: - return None - - line_set = set() - for low, high in sorted( line_ranges ): - line_set.update( range( low, high + 1 ) ) - - return line_set - - -def _MarkLinesToFormat( llines, lines ): - """Skip sections of code that we shouldn't reformat.""" - if lines: - for uwline in llines: - uwline.disable = not lines.intersection( - range( uwline.lineno, uwline.last.lineno + 1 ) ) - - # Now go through the lines and disable any lines explicitly marked as - # disabled. - index = 0 - while index < len( llines ): - uwline = llines[ index ] - if uwline.is_comment: - if _DisableYAPF( uwline.first.value.strip() ): - index += 1 - while index < len( llines ): - uwline = llines[ index ] - line = uwline.first.value.strip() - if uwline.is_comment and _EnableYAPF( line ): - if not _DisableYAPF( line ): - break - uwline.disable = True - index += 1 - elif re.search( DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE ): - uwline.disable = True - index += 1 +def _LineRangesToSet(line_ranges): + """Return a set of lines in the range.""" + if line_ranges is None: + return None -def _DisableYAPF( line ): - return ( - re.search( DISABLE_PATTERN, - line.split( '\n' )[ 0 ].strip(), re.IGNORECASE ) or - re.search( DISABLE_PATTERN, - line.split( '\n' )[ -1 ].strip(), re.IGNORECASE ) ) + line_set = set() + for low, high in sorted(line_ranges): + line_set.update(range(low, high + 1)) + return line_set -def _EnableYAPF( line ): - return ( - re.search( ENABLE_PATTERN, - line.split( '\n' )[ 0 ].strip(), re.IGNORECASE ) or - re.search( ENABLE_PATTERN, - line.split( '\n' )[ -1 ].strip(), re.IGNORECASE ) ) +def _MarkLinesToFormat(llines, lines): + """Skip sections of code that we shouldn't reformat.""" + if lines: + for uwline in llines: + uwline.disable = not lines.intersection( + range(uwline.lineno, uwline.last.lineno + 1)) -def _GetUnifiedDiff( before, after, filename = 'code' ): - """Get a unified diff of the changes. + # Now go through the lines and disable any lines explicitly marked as + # disabled. + index = 0 + while index < len(llines): + uwline = llines[index] + if uwline.is_comment: + if _DisableYAPF(uwline.first.value.strip()): + index += 1 + while index < len(llines): + uwline = llines[index] + line = uwline.first.value.strip() + if uwline.is_comment and _EnableYAPF(line): + if not _DisableYAPF(line): + break + uwline.disable = True + index += 1 + elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE): + uwline.disable = True + index += 1 + + +def _DisableYAPF(line): + return ( + re.search(DISABLE_PATTERN, + line.split('\n')[0].strip(), re.IGNORECASE) or + re.search(DISABLE_PATTERN, + line.split('\n')[-1].strip(), re.IGNORECASE)) + + +def _EnableYAPF(line): + return ( + re.search(ENABLE_PATTERN, + line.split('\n')[0].strip(), re.IGNORECASE) or + re.search(ENABLE_PATTERN, + line.split('\n')[-1].strip(), re.IGNORECASE)) + + +def _GetUnifiedDiff(before, after, filename='code'): + """Get a unified diff of the changes. Arguments: before: (unicode) The original source code. @@ -335,14 +331,14 @@ def _GetUnifiedDiff( before, after, filename = 'code' ): Returns: The unified diff text. """ - before = before.splitlines() - after = after.splitlines() - return '\n'.join( - difflib.unified_diff( - before, - after, - filename, - filename, - '(original)', - '(reformatted)', - lineterm = '' ) ) + '\n' + before = before.splitlines() + after = after.splitlines() + return '\n'.join( + difflib.unified_diff( + before, + after, + filename, + filename, + '(original)', + '(reformatted)', + lineterm='')) + '\n' diff --git a/yapftests/blank_line_calculator_test.py b/yapftests/blank_line_calculator_test.py index 18fa83e0b..d5d97d794 100644 --- a/yapftests/blank_line_calculator_test.py +++ b/yapftests/blank_line_calculator_test.py @@ -30,13 +30,15 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateYapfStyle()) def testDecorators(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ @bork() def foo(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ @bork() def foo(): pass @@ -45,7 +47,8 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testComplexDecorators(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import sys @bork() @@ -60,7 +63,8 @@ class moo(object): def method(self): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import sys @@ -81,7 +85,8 @@ def method(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCodeAfterFunctionsAndClasses(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): pass top_level_code = True @@ -97,7 +102,8 @@ def method_2(self): except Error as error: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): pass @@ -126,7 +132,8 @@ def method_2(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentSpacing(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ # This is the first comment # And it's multiline @@ -155,7 +162,8 @@ def foo(self): # comment pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ # This is the first comment # And it's multiline @@ -192,7 +200,8 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentBeforeMethod(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class foo(object): # pylint: disable=invalid-name @@ -203,7 +212,8 @@ def f(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsBeforeClassDefs(self): - code = textwrap.dedent('''\ + code = textwrap.dedent( + '''\ """Test.""" # Comment @@ -216,7 +226,8 @@ class Foo(object): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsBeforeDecorator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ # The @foo operator adds bork to a(). @foo() def a(): @@ -225,7 +236,8 @@ def a(): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ # Hello world @@ -237,7 +249,8 @@ def a(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsAfterDecorator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _(): @@ -254,7 +267,8 @@ def test_unicode_filename_in_sdist(self, sdist_unicode, tmpdir, monkeypatch): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testInnerClasses(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class DeployAPIClient(object): class Error(Exception): pass @@ -262,7 +276,8 @@ class TaskValidationError(Error): pass class DeployAPIHTTPError(Error): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class DeployAPIClient(object): class Error(Exception): @@ -278,7 +293,8 @@ class DeployAPIHTTPError(Error): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testLinesOnRangeBoundary(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -292,7 +308,8 @@ def D(): # 9 def E(): pass """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -315,7 +332,8 @@ def E(): self.assertTrue(changed) def testLinesRangeBoundaryNotOutside(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -329,7 +347,8 @@ def B(): # 6 def C(): pass """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -348,7 +367,8 @@ def C(): self.assertFalse(changed) def testLinesRangeRemove(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -363,7 +383,8 @@ def B(): # 6 def C(): pass """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -382,7 +403,8 @@ def C(): self.assertTrue(changed) def testLinesRangeRemoveSome(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def A(): pass @@ -398,7 +420,8 @@ def B(): # 7 def C(): pass """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ def A(): pass diff --git a/yapftests/comment_splicer_test.py b/yapftests/comment_splicer_test.py index 2e0141bd4..985ea88b7 100644 --- a/yapftests/comment_splicer_test.py +++ b/yapftests/comment_splicer_test.py @@ -38,9 +38,8 @@ def _AssertNodeIsComment(self, node, text_in_comment=None): self.assertIn(text_in_comment, node_value) def _FindNthChildNamed(self, node, name, n=1): - for i, child in enumerate( - py3compat.ifilter(lambda c: pytree_utils.NodeName(c) == name, - node.pre_order())): + for i, child in enumerate(py3compat.ifilter( + lambda c: pytree_utils.NodeName(c) == name, node.pre_order())): if i == n - 1: return child raise RuntimeError('No Nth child for n={0}'.format(n)) @@ -59,7 +58,8 @@ def testSimpleInline(self): self._AssertNodeIsComment(comment_node, '# and a comment') def testSimpleSeparateLine(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' foo = 1 # first comment bar = 2 @@ -74,7 +74,8 @@ def testSimpleSeparateLine(self): self._AssertNodeIsComment(comment_node) def testTwoLineComment(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' foo = 1 # first comment # second comment @@ -88,7 +89,8 @@ def testTwoLineComment(self): self._AssertNodeIsComment(tree.children[1]) def testCommentIsFirstChildInCompound(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' if x: # a comment foo = 1 @@ -104,7 +106,8 @@ def testCommentIsFirstChildInCompound(self): self._AssertNodeIsComment(if_suite.children[1]) def testCommentIsLastChildInCompound(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' if x: foo = 1 # a comment @@ -120,7 +123,8 @@ def testCommentIsLastChildInCompound(self): self._AssertNodeIsComment(if_suite.children[-2]) def testInlineAfterSeparateLine(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' bar = 1 # line comment foo = 1 # inline comment @@ -133,12 +137,13 @@ def testInlineAfterSeparateLine(self): sep_comment_node = tree.children[1] self._AssertNodeIsComment(sep_comment_node, '# line comment') - expr = tree.children[2].children[0] + expr = tree.children[2].children[0] inline_comment_node = expr.children[-1] self._AssertNodeIsComment(inline_comment_node, '# inline comment') def testSeparateLineAfterInline(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' bar = 1 foo = 1 # inline comment # line comment @@ -151,12 +156,13 @@ def testSeparateLineAfterInline(self): sep_comment_node = tree.children[-2] self._AssertNodeIsComment(sep_comment_node, '# line comment') - expr = tree.children[1].children[0] + expr = tree.children[1].children[0] inline_comment_node = expr.children[-1] self._AssertNodeIsComment(inline_comment_node, '# inline comment') def testCommentBeforeDedent(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' if bar: z = 1 # a comment @@ -171,7 +177,8 @@ def testCommentBeforeDedent(self): self._AssertNodeType('DEDENT', if_suite.children[-1]) def testCommentBeforeDedentTwoLevel(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' if foo: if bar: z = 1 @@ -188,7 +195,8 @@ def testCommentBeforeDedentTwoLevel(self): self._AssertNodeType('DEDENT', if_suite.children[-1]) def testCommentBeforeDedentTwoLevelImproperlyIndented(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' if foo: if bar: z = 1 @@ -208,7 +216,8 @@ def testCommentBeforeDedentTwoLevelImproperlyIndented(self): self._AssertNodeType('DEDENT', if_suite.children[-1]) def testCommentBeforeDedentThreeLevel(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' if foo: if bar: z = 1 @@ -235,7 +244,8 @@ def testCommentBeforeDedentThreeLevel(self): self._AssertNodeType('DEDENT', if_suite_2.children[-1]) def testCommentsInClass(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' class Foo: """docstring abc...""" # top-level comment @@ -246,18 +256,19 @@ def foo(): pass tree = pytree_utils.ParseCodeToTree(code) comment_splicer.SpliceComments(tree) - class_suite = tree.children[0].children[3] + class_suite = tree.children[0].children[3] another_comment = class_suite.children[-2] self._AssertNodeIsComment(another_comment, '# another') # It's OK for the comment to be a child of funcdef, as long as it's # the first child and thus comes before the 'def'. - funcdef = class_suite.children[3] + funcdef = class_suite.children[3] toplevel_comment = funcdef.children[0] self._AssertNodeIsComment(toplevel_comment, '# top-level') def testMultipleBlockComments(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' # Block comment number 1 # Block comment number 2 @@ -268,7 +279,7 @@ def f(): tree = pytree_utils.ParseCodeToTree(code) comment_splicer.SpliceComments(tree) - funcdef = tree.children[0] + funcdef = tree.children[0] block_comment_1 = funcdef.children[0] self._AssertNodeIsComment(block_comment_1, '# Block comment number 1') @@ -276,7 +287,8 @@ def f(): self._AssertNodeIsComment(block_comment_2, '# Block comment number 2') def testCommentsOnDedents(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' class Foo(object): # A comment for qux. def qux(self): @@ -291,7 +303,7 @@ def mux(self): tree = pytree_utils.ParseCodeToTree(code) comment_splicer.SpliceComments(tree) - classdef = tree.children[0] + classdef = tree.children[0] class_suite = classdef.children[6] qux_comment = class_suite.children[1] self._AssertNodeIsComment(qux_comment, '# A comment for qux.') @@ -300,7 +312,8 @@ def mux(self): self._AssertNodeIsComment(interim_comment, '# Interim comment.') def testExprComments(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' foo( # Request fractions of an hour. 948.0/3600, 20) ''') @@ -312,7 +325,8 @@ def testExprComments(self): self._AssertNodeIsComment(comment, '# Request fractions of an hour.') def testMultipleCommentsInOneExpr(self): - code = textwrap.dedent(r''' + code = textwrap.dedent( + r''' foo( # com 1 948.0/3600, # com 2 20 + 12 # com 3 diff --git a/yapftests/file_resources_test.py b/yapftests/file_resources_test.py index 31184c4a3..9e8c568ea 100644 --- a/yapftests/file_resources_test.py +++ b/yapftests/file_resources_test.py @@ -56,7 +56,7 @@ def tearDown(self): # pylint: disable=g-missing-super-call def test_get_exclude_file_patterns_from_yapfignore(self): local_ignore_file = os.path.join(self.test_tmpdir, '.yapfignore') - ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] + ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] with open(local_ignore_file, 'w') as f: f.writelines('\n'.join(ignore_patterns)) @@ -66,7 +66,7 @@ def test_get_exclude_file_patterns_from_yapfignore(self): def test_get_exclude_file_patterns_from_yapfignore_with_wrong_syntax(self): local_ignore_file = os.path.join(self.test_tmpdir, '.yapfignore') - ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] + ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] with open(local_ignore_file, 'w') as f: f.writelines('\n'.join(ignore_patterns)) @@ -79,7 +79,7 @@ def test_get_exclude_file_patterns_from_pyproject(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] + ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] with open(local_ignore_file, 'w') as f: f.write('[tool.yapfignore]\n') f.write('ignore_patterns=[') @@ -97,7 +97,7 @@ def test_get_exclude_file_patterns_from_pyproject_with_wrong_syntax(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] + ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] with open(local_ignore_file, 'w') as f: f.write('[tool.yapfignore]\n') f.write('ignore_patterns=[') @@ -113,7 +113,7 @@ def test_get_exclude_file_patterns_from_pyproject_no_ignore_section(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = [] + ignore_patterns = [] open(local_ignore_file, 'w').close() self.assertEqual( @@ -126,7 +126,7 @@ def test_get_exclude_file_patterns_from_pyproject_ignore_section_empty(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = [] + ignore_patterns = [] with open(local_ignore_file, 'w') as f: f.write('[tool.yapfignore]\n') @@ -151,12 +151,12 @@ def tearDown(self): # pylint: disable=g-missing-super-call shutil.rmtree(self.test_tmpdir) def test_no_local_style(self): - test_file = os.path.join(self.test_tmpdir, 'file.py') + test_file = os.path.join(self.test_tmpdir, 'file.py') style_name = file_resources.GetDefaultStyleForDir(test_file) self.assertEqual(style_name, 'pep8') def test_no_local_style_custom_default(self): - test_file = os.path.join(self.test_tmpdir, 'file.py') + test_file = os.path.join(self.test_tmpdir, 'file.py') style_name = file_resources.GetDefaultStyleForDir( test_file, default_style='custom-default') self.assertEqual(style_name, 'custom-default') @@ -167,27 +167,27 @@ def test_with_local_style(self): open(style_file, 'w').close() test_filename = os.path.join(self.test_tmpdir, 'file.py') - self.assertEqual(style_file, - file_resources.GetDefaultStyleForDir(test_filename)) + self.assertEqual( + style_file, file_resources.GetDefaultStyleForDir(test_filename)) test_filename = os.path.join(self.test_tmpdir, 'dir1', 'file.py') - self.assertEqual(style_file, - file_resources.GetDefaultStyleForDir(test_filename)) + self.assertEqual( + style_file, file_resources.GetDefaultStyleForDir(test_filename)) def test_setup_config(self): # An empty setup.cfg file should not be used setup_config = os.path.join(self.test_tmpdir, 'setup.cfg') open(setup_config, 'w').close() - test_dir = os.path.join(self.test_tmpdir, 'dir1') + test_dir = os.path.join(self.test_tmpdir, 'dir1') style_name = file_resources.GetDefaultStyleForDir(test_dir) self.assertEqual(style_name, 'pep8') # One with a '[yapf]' section should be used with open(setup_config, 'w') as f: f.write('[yapf]\n') - self.assertEqual(setup_config, - file_resources.GetDefaultStyleForDir(test_dir)) + self.assertEqual( + setup_config, file_resources.GetDefaultStyleForDir(test_dir)) def test_pyproject_toml(self): # An empty pyproject.toml file should not be used @@ -199,20 +199,20 @@ def test_pyproject_toml(self): pyproject_toml = os.path.join(self.test_tmpdir, 'pyproject.toml') open(pyproject_toml, 'w').close() - test_dir = os.path.join(self.test_tmpdir, 'dir1') + test_dir = os.path.join(self.test_tmpdir, 'dir1') style_name = file_resources.GetDefaultStyleForDir(test_dir) self.assertEqual(style_name, 'pep8') # One with a '[tool.yapf]' section should be used with open(pyproject_toml, 'w') as f: f.write('[tool.yapf]\n') - self.assertEqual(pyproject_toml, - file_resources.GetDefaultStyleForDir(test_dir)) + self.assertEqual( + pyproject_toml, file_resources.GetDefaultStyleForDir(test_dir)) def test_local_style_at_root(self): # Test behavior of files located on the root, and under root. - rootdir = os.path.abspath(os.path.sep) - test_dir_at_root = os.path.join(rootdir, 'dir1') + rootdir = os.path.abspath(os.path.sep) + test_dir_at_root = os.path.join(rootdir, 'dir1') test_dir_under_root = os.path.join(rootdir, 'dir1', 'dir2') # Fake placing only a style file at the root by mocking `os.path.exists`. @@ -241,7 +241,7 @@ class GetCommandLineFilesTest(unittest.TestCase): def setUp(self): # pylint: disable=g-missing-super-call self.test_tmpdir = tempfile.mkdtemp() - self.old_dir = os.getcwd() + self.old_dir = os.getcwd() def tearDown(self): # pylint: disable=g-missing-super-call os.chdir(self.old_dir) @@ -260,13 +260,11 @@ def test_find_files_not_dirs(self): _touch_files([file1, file2]) self.assertEqual( - file_resources.GetCommandLineFiles([file1, file2], - recursive=False, - exclude=None), [file1, file2]) + file_resources.GetCommandLineFiles( + [file1, file2], recursive=False, exclude=None), [file1, file2]) self.assertEqual( - file_resources.GetCommandLineFiles([file1, file2], - recursive=True, - exclude=None), [file1, file2]) + file_resources.GetCommandLineFiles( + [file1, file2], recursive=True, exclude=None), [file1, file2]) def test_nonrecursive_find_in_dir(self): tdir1 = self._make_test_dir('test1') @@ -295,9 +293,9 @@ def test_recursive_find_in_dir(self): self.assertEqual( sorted( - file_resources.GetCommandLineFiles([self.test_tmpdir], - recursive=True, - exclude=None)), sorted(files)) + file_resources.GetCommandLineFiles( + [self.test_tmpdir], recursive=True, exclude=None)), + sorted(files)) def test_recursive_find_in_dir_with_exclude(self): tdir1 = self._make_test_dir('test1') @@ -312,13 +310,13 @@ def test_recursive_find_in_dir_with_exclude(self): self.assertEqual( sorted( - file_resources.GetCommandLineFiles([self.test_tmpdir], - recursive=True, - exclude=['*test*3.py'])), - sorted([ - os.path.join(tdir1, 'testfile1.py'), - os.path.join(tdir2, 'testfile2.py'), - ])) + file_resources.GetCommandLineFiles( + [self.test_tmpdir], recursive=True, exclude=['*test*3.py'])), + sorted( + [ + os.path.join(tdir1, 'testfile1.py'), + os.path.join(tdir2, 'testfile2.py'), + ])) def test_find_with_excluded_hidden_dirs(self): tdir1 = self._make_test_dir('.test1') @@ -331,16 +329,16 @@ def test_find_with_excluded_hidden_dirs(self): ] _touch_files(files) - actual = file_resources.GetCommandLineFiles([self.test_tmpdir], - recursive=True, - exclude=['*.test1*']) + actual = file_resources.GetCommandLineFiles( + [self.test_tmpdir], recursive=True, exclude=['*.test1*']) self.assertEqual( sorted(actual), - sorted([ - os.path.join(tdir2, 'testfile2.py'), - os.path.join(tdir3, 'testfile3.py'), - ])) + sorted( + [ + os.path.join(tdir2, 'testfile2.py'), + os.path.join(tdir3, 'testfile3.py'), + ])) def test_find_with_excluded_hidden_dirs_relative(self): """Test find with excluded hidden dirs. @@ -375,14 +373,15 @@ def test_find_with_excluded_hidden_dirs_relative(self): self.assertEqual( sorted(actual), - sorted([ - os.path.join( - os.path.relpath(self.test_tmpdir), os.path.basename(tdir2), - 'testfile2.py'), - os.path.join( - os.path.relpath(self.test_tmpdir), os.path.basename(tdir3), - 'testfile3.py'), - ])) + sorted( + [ + os.path.join( + os.path.relpath(self.test_tmpdir), + os.path.basename(tdir2), 'testfile2.py'), + os.path.join( + os.path.relpath(self.test_tmpdir), + os.path.basename(tdir3), 'testfile3.py'), + ])) def test_find_with_excluded_dirs(self): tdir1 = self._make_test_dir('test1') @@ -398,23 +397,23 @@ def test_find_with_excluded_dirs(self): os.chdir(self.test_tmpdir) found = sorted( - file_resources.GetCommandLineFiles(['test1', 'test2', 'test3'], - recursive=True, - exclude=[ - 'test1', - 'test2/testinner/', - ])) + file_resources.GetCommandLineFiles( + ['test1', 'test2', 'test3'], + recursive=True, + exclude=[ + 'test1', + 'test2/testinner/', + ])) self.assertEqual( found, ['test3/foo/bar/bas/xxx/testfile3.py'.replace("/", os.path.sep)]) found = sorted( - file_resources.GetCommandLineFiles(['.'], - recursive=True, - exclude=[ - 'test1', - 'test3', - ])) + file_resources.GetCommandLineFiles( + ['.'], recursive=True, exclude=[ + 'test1', + 'test3', + ])) self.assertEqual( found, ['./test2/testinner/testfile2.py'.replace("/", os.path.sep)]) @@ -517,7 +516,7 @@ def test_write_to_file(self): self.assertEqual(f2.read(), s) def test_write_to_stdout(self): - s = u'foobar' + s = u'foobar' stream = BufferedByteStream() if py3compat.PY3 else py3compat.StringIO() with utils.stdout_redirector(stream): file_resources.WriteReformattedCode( @@ -525,7 +524,7 @@ def test_write_to_stdout(self): self.assertEqual(stream.getvalue(), s) def test_write_encoded_to_stdout(self): - s = '\ufeff# -*- coding: utf-8 -*-\nresult = "passed"\n' # pylint: disable=anomalous-unicode-escape-in-string # noqa + s = '\ufeff# -*- coding: utf-8 -*-\nresult = "passed"\n' # pylint: disable=anomalous-unicode-escape-in-string # noqa stream = BufferedByteStream() if py3compat.PY3 else py3compat.StringIO() with utils.stdout_redirector(stream): file_resources.WriteReformattedCode( @@ -536,17 +535,17 @@ def test_write_encoded_to_stdout(self): class LineEndingTest(unittest.TestCase): def test_line_ending_linefeed(self): - lines = ['spam\n', 'spam\n'] + lines = ['spam\n', 'spam\n'] actual = file_resources.LineEnding(lines) self.assertEqual(actual, '\n') def test_line_ending_carriage_return(self): - lines = ['spam\r', 'spam\r'] + lines = ['spam\r', 'spam\r'] actual = file_resources.LineEnding(lines) self.assertEqual(actual, '\r') def test_line_ending_combo(self): - lines = ['spam\r\n', 'spam\r\n'] + lines = ['spam\r\n', 'spam\r\n'] actual = file_resources.LineEnding(lines) self.assertEqual(actual, '\r\n') diff --git a/yapftests/format_decision_state_test.py b/yapftests/format_decision_state_test.py index 63961f332..d9cdefe8c 100644 --- a/yapftests/format_decision_state_test.py +++ b/yapftests/format_decision_state_test.py @@ -32,12 +32,12 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateYapfStyle()) def testSimpleFunctionDefWithNoSplitting(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(a, b): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) + lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) lline.CalculateFormattingInformation() # Add: 'f' @@ -86,12 +86,12 @@ def f(a, b): self.assertEqual(repr(state), repr(clone)) def testSimpleFunctionDefWithSplitting(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(a, b): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) + lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) lline.CalculateFormattingInformation() # Add: 'f' diff --git a/yapftests/line_joiner_test.py b/yapftests/line_joiner_test.py index 2eaf16478..ea6186693 100644 --- a/yapftests/line_joiner_test.py +++ b/yapftests/line_joiner_test.py @@ -39,20 +39,23 @@ def _CheckLineJoining(self, code, join_lines): self.assertCodeEqual(line_joiner.CanMergeMultipleLines(llines), join_lines) def testSimpleSingleLineStatement(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ if isinstance(a, int): continue """) self._CheckLineJoining(code, join_lines=True) def testSimpleMultipleLineStatement(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ if isinstance(b, int): continue """) self._CheckLineJoining(code, join_lines=False) def testSimpleMultipleLineComplexStatement(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ if isinstance(c, int): while True: continue @@ -60,19 +63,22 @@ def testSimpleMultipleLineComplexStatement(self): self._CheckLineJoining(code, join_lines=False) def testSimpleMultipleLineStatementWithComment(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ if isinstance(d, int): continue # We're pleased that d's an int. """) self._CheckLineJoining(code, join_lines=True) def testSimpleMultipleLineStatementWithLargeIndent(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ if isinstance(e, int): continue """) self._CheckLineJoining(code, join_lines=True) def testOverColumnLimit(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ if instance(bbbbbbbbbbbbbbbbbbbbbbbbb, int): cccccccccccccccccccccccccc = ddddddddddddddddddddd """) # noqa self._CheckLineJoining(code, join_lines=False) diff --git a/yapftests/logical_line_test.py b/yapftests/logical_line_test.py index d18262a7c..695f88bd5 100644 --- a/yapftests/logical_line_test.py +++ b/yapftests/logical_line_test.py @@ -29,25 +29,29 @@ class LogicalLineBasicTest(unittest.TestCase): def testConstruction(self): - toks = _MakeFormatTokenList([(token.DOT, '.', 'DOT'), - (token.VBAR, '|', 'VBAR')]) + toks = _MakeFormatTokenList( + [(token.DOT, '.', 'DOT'), (token.VBAR, '|', 'VBAR')]) lline = logical_line.LogicalLine(20, toks) self.assertEqual(20, lline.depth) self.assertEqual(['DOT', 'VBAR'], [tok.name for tok in lline.tokens]) def testFirstLast(self): - toks = _MakeFormatTokenList([(token.DOT, '.', 'DOT'), - (token.LPAR, '(', 'LPAR'), - (token.VBAR, '|', 'VBAR')]) + toks = _MakeFormatTokenList( + [ + (token.DOT, '.', 'DOT'), (token.LPAR, '(', 'LPAR'), + (token.VBAR, '|', 'VBAR') + ]) lline = logical_line.LogicalLine(20, toks) self.assertEqual(20, lline.depth) self.assertEqual('DOT', lline.first.name) self.assertEqual('VBAR', lline.last.name) def testAsCode(self): - toks = _MakeFormatTokenList([(token.DOT, '.', 'DOT'), - (token.LPAR, '(', 'LPAR'), - (token.VBAR, '|', 'VBAR')]) + toks = _MakeFormatTokenList( + [ + (token.DOT, '.', 'DOT'), (token.LPAR, '(', 'LPAR'), + (token.VBAR, '|', 'VBAR') + ]) lline = logical_line.LogicalLine(2, toks) self.assertEqual(' . ( |', lline.AsCode()) @@ -61,7 +65,7 @@ def testAppendToken(self): class LogicalLineFormattingInformationTest(yapf_test_helper.YAPFTest): def testFuncDef(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(a, b): pass """) diff --git a/yapftests/main_test.py b/yapftests/main_test.py index c83b8b66a..ea6892f5a 100644 --- a/yapftests/main_test.py +++ b/yapftests/main_test.py @@ -78,7 +78,7 @@ def patch_raw_input(lines=lines()): return next(lines) try: - orig_raw_import = yapf.py3compat.raw_input + orig_raw_import = yapf.py3compat.raw_input yapf.py3compat.raw_input = patch_raw_input yield finally: @@ -90,7 +90,7 @@ class RunMainTest(yapf_test_helper.YAPFTest): def testShouldHandleYapfError(self): """run_main should handle YapfError and sys.exit(1).""" expected_message = 'yapf: input filenames did not match any python files\n' - sys.argv = ['yapf', 'foo.c'] + sys.argv = ['yapf', 'foo.c'] with captured_output() as (out, err): with self.assertRaises(SystemExit): yapf.run_main() @@ -114,7 +114,7 @@ def testEchoInput(self): self.assertEqual(out.getvalue(), code) def testEchoInputWithStyle(self): - code = 'def f(a = 1\n\n):\n return 2*a\n' + code = 'def f(a = 1\n\n):\n return 2*a\n' yapf_code = 'def f(a=1):\n return 2 * a\n' with patched_input(code): with captured_output() as (out, _): @@ -135,5 +135,6 @@ def testHelp(self): self.assertEqual(ret, 0) help_message = out.getvalue() self.assertIn('indent_width=4', help_message) - self.assertIn('The number of spaces required before a trailing comment.', - help_message) + self.assertIn( + 'The number of spaces required before a trailing comment.', + help_message) diff --git a/yapftests/pytree_unwrapper_test.py b/yapftests/pytree_unwrapper_test.py index 525278def..cd67e0de1 100644 --- a/yapftests/pytree_unwrapper_test.py +++ b/yapftests/pytree_unwrapper_test.py @@ -43,69 +43,79 @@ def _CheckLogicalLines(self, llines, list_of_expected): self.assertEqual(list_of_expected, actual) def testSimpleFileScope(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" x = 1 # a comment y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['x', '=', '1']), - (0, ['# a comment']), - (0, ['y', '=', '2']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['x', '=', '1']), + (0, ['# a comment']), + (0, ['y', '=', '2']), + ]) def testSimpleMultilineStatement(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" y = (1 + x) """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['y', '=', '(', '1', '+', 'x', ')']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['y', '=', '(', '1', '+', 'x', ')']), + ]) def testFileScopeWithInlineComment(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" x = 1 # a comment y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['x', '=', '1', '# a comment']), - (0, ['y', '=', '2']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['x', '=', '1', '# a comment']), + (0, ['y', '=', '2']), + ]) def testSimpleIf(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" if foo: x = 1 y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['if', 'foo', ':']), - (1, ['x', '=', '1']), - (1, ['y', '=', '2']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['if', 'foo', ':']), + (1, ['x', '=', '1']), + (1, ['y', '=', '2']), + ]) def testSimpleIfWithComments(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" # c1 if foo: # c2 x = 1 y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['# c1']), - (0, ['if', 'foo', ':', '# c2']), - (1, ['x', '=', '1']), - (1, ['y', '=', '2']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['# c1']), + (0, ['if', 'foo', ':', '# c2']), + (1, ['x', '=', '1']), + (1, ['y', '=', '2']), + ]) def testIfWithCommentsInside(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" if foo: # c1 x = 1 # c2 @@ -113,16 +123,18 @@ def testIfWithCommentsInside(self): y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['if', 'foo', ':']), - (1, ['# c1']), - (1, ['x', '=', '1', '# c2']), - (1, ['# c3']), - (1, ['y', '=', '2']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['if', 'foo', ':']), + (1, ['# c1']), + (1, ['x', '=', '1', '# c2']), + (1, ['# c3']), + (1, ['y', '=', '2']), + ]) def testIfElifElse(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" if x: x = 1 # c1 elif y: # c2 @@ -132,18 +144,20 @@ def testIfElifElse(self): z = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['if', 'x', ':']), - (1, ['x', '=', '1', '# c1']), - (0, ['elif', 'y', ':', '# c2']), - (1, ['y', '=', '1']), - (0, ['else', ':']), - (1, ['# c3']), - (1, ['z', '=', '1']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['if', 'x', ':']), + (1, ['x', '=', '1', '# c1']), + (0, ['elif', 'y', ':', '# c2']), + (1, ['y', '=', '1']), + (0, ['else', ':']), + (1, ['# c3']), + (1, ['z', '=', '1']), + ]) def testNestedCompoundTwoLevel(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" if x: x = 1 # c1 while t: @@ -152,30 +166,34 @@ def testNestedCompoundTwoLevel(self): k = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['if', 'x', ':']), - (1, ['x', '=', '1', '# c1']), - (1, ['while', 't', ':']), - (2, ['# c2']), - (2, ['j', '=', '1']), - (1, ['k', '=', '1']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['if', 'x', ':']), + (1, ['x', '=', '1', '# c1']), + (1, ['while', 't', ':']), + (2, ['# c2']), + (2, ['j', '=', '1']), + (1, ['k', '=', '1']), + ]) def testSimpleWhile(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" while x > 1: # c1 # c2 x = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['while', 'x', '>', '1', ':', '# c1']), - (1, ['# c2']), - (1, ['x', '=', '1']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['while', 'x', '>', '1', ':', '# c1']), + (1, ['# c2']), + (1, ['x', '=', '1']), + ]) def testSimpleTry(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" try: pass except: @@ -188,34 +206,38 @@ def testSimpleTry(self): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['try', ':']), - (1, ['pass']), - (0, ['except', ':']), - (1, ['pass']), - (0, ['except', ':']), - (1, ['pass']), - (0, ['else', ':']), - (1, ['pass']), - (0, ['finally', ':']), - (1, ['pass']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['try', ':']), + (1, ['pass']), + (0, ['except', ':']), + (1, ['pass']), + (0, ['except', ':']), + (1, ['pass']), + (0, ['else', ':']), + (1, ['pass']), + (0, ['finally', ':']), + (1, ['pass']), + ]) def testSimpleFuncdef(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" def foo(x): # c1 # c2 return x """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), - (1, ['# c2']), - (1, ['return', 'x']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), + (1, ['# c2']), + (1, ['return', 'x']), + ]) def testTwoFuncDefs(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" def foo(x): # c1 # c2 return x @@ -225,40 +247,45 @@ def bar(): # c3 return x """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), - (1, ['# c2']), - (1, ['return', 'x']), - (0, ['def', 'bar', '(', ')', ':', '# c3']), - (1, ['# c4']), - (1, ['return', 'x']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), + (1, ['# c2']), + (1, ['return', 'x']), + (0, ['def', 'bar', '(', ')', ':', '# c3']), + (1, ['# c4']), + (1, ['return', 'x']), + ]) def testSimpleClassDef(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" class Klass: # c1 # c2 p = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['class', 'Klass', ':', '# c1']), - (1, ['# c2']), - (1, ['p', '=', '1']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['class', 'Klass', ':', '# c1']), + (1, ['# c2']), + (1, ['p', '=', '1']), + ]) def testSingleLineStmtInFunc(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(): return 37 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['def', 'f', '(', ')', ':']), - (1, ['return', '37']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['def', 'f', '(', ')', ':']), + (1, ['return', '37']), + ]) def testMultipleComments(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" # Comment #1 # Comment #2 @@ -266,15 +293,17 @@ def f(): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [ - (0, ['# Comment #1']), - (0, ['# Comment #2']), - (0, ['def', 'f', '(', ')', ':']), - (1, ['pass']), - ]) + self._CheckLogicalLines( + llines, [ + (0, ['# Comment #1']), + (0, ['# Comment #2']), + (0, ['def', 'f', '(', ')', ':']), + (1, ['pass']), + ]) def testSplitListWithComment(self): - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" a = [ 'a', 'b', @@ -282,9 +311,14 @@ def testSplitListWithComment(self): ] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines(llines, [(0, [ - 'a', '=', '[', "'a'", ',', "'b'", ',', "'c'", ',', '# hello world', ']' - ])]) + self._CheckLogicalLines( + llines, [ + ( + 0, [ + 'a', '=', '[', "'a'", ',', "'b'", ',', "'c'", ',', + '# hello world', ']' + ]) + ]) class MatchBracketsTest(yapf_test_helper.YAPFTest): @@ -300,9 +334,11 @@ def _CheckMatchingBrackets(self, llines, list_of_expected): """ actual = [] for lline in llines: - filtered_values = [(ft, ft.matching_bracket) - for ft in lline.tokens - if ft.name not in pytree_utils.NONSEMANTIC_TOKENS] + filtered_values = [ + (ft, ft.matching_bracket) + for ft in lline.tokens + if ft.name not in pytree_utils.NONSEMANTIC_TOKENS + ] if filtered_values: actual.append(filtered_values) @@ -317,7 +353,8 @@ def _CheckMatchingBrackets(self, llines, list_of_expected): self.assertEqual(lline[close_bracket][0], lline[open_bracket][1]) def testFunctionDef(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def foo(a, b=['w','d'], c=[42, 37]): pass """) @@ -328,7 +365,8 @@ def foo(a, b=['w','d'], c=[42, 37]): ]) def testDecorator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ @bar() def foo(a, b, c): pass @@ -341,7 +379,8 @@ def foo(a, b, c): ]) def testClassDef(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class A(B, C, D): pass """) diff --git a/yapftests/pytree_utils_test.py b/yapftests/pytree_utils_test.py index c175f833e..ec61f75d2 100644 --- a/yapftests/pytree_utils_test.py +++ b/yapftests/pytree_utils_test.py @@ -25,7 +25,7 @@ # module. _GRAMMAR_SYMBOL2NUMBER = pygram.python_grammar.symbol2number -_FOO = 'foo' +_FOO = 'foo' _FOO1 = 'foo1' _FOO2 = 'foo2' _FOO3 = 'foo3' @@ -87,12 +87,12 @@ def _BuildSimpleTree(self): # simple_stmt: # NAME('foo') # - lpar1 = pytree.Leaf(token.LPAR, '(') - lpar2 = pytree.Leaf(token.LPAR, '(') - simple_stmt = pytree.Node(_GRAMMAR_SYMBOL2NUMBER['simple_stmt'], - [pytree.Leaf(token.NAME, 'foo')]) - return pytree.Node(_GRAMMAR_SYMBOL2NUMBER['suite'], - [lpar1, lpar2, simple_stmt]) + lpar1 = pytree.Leaf(token.LPAR, '(') + lpar2 = pytree.Leaf(token.LPAR, '(') + simple_stmt = pytree.Node( + _GRAMMAR_SYMBOL2NUMBER['simple_stmt'], [pytree.Leaf(token.NAME, 'foo')]) + return pytree.Node( + _GRAMMAR_SYMBOL2NUMBER['suite'], [lpar1, lpar2, simple_stmt]) def _MakeNewNodeRPAR(self): return pytree.Leaf(token.RPAR, ')') @@ -102,18 +102,18 @@ def setUp(self): def testInsertNodesBefore(self): # Insert before simple_stmt and make sure it went to the right place - pytree_utils.InsertNodesBefore([self._MakeNewNodeRPAR()], - self._simple_tree.children[2]) + pytree_utils.InsertNodesBefore( + [self._MakeNewNodeRPAR()], self._simple_tree.children[2]) self.assertEqual(4, len(self._simple_tree.children)) - self.assertEqual('RPAR', - pytree_utils.NodeName(self._simple_tree.children[2])) - self.assertEqual('simple_stmt', - pytree_utils.NodeName(self._simple_tree.children[3])) + self.assertEqual( + 'RPAR', pytree_utils.NodeName(self._simple_tree.children[2])) + self.assertEqual( + 'simple_stmt', pytree_utils.NodeName(self._simple_tree.children[3])) def testInsertNodesBeforeFirstChild(self): # Insert before the first child of its parent simple_stmt = self._simple_tree.children[2] - foo_child = simple_stmt.children[0] + foo_child = simple_stmt.children[0] pytree_utils.InsertNodesBefore([self._MakeNewNodeRPAR()], foo_child) self.assertEqual(3, len(self._simple_tree.children)) self.assertEqual(2, len(simple_stmt.children)) @@ -122,18 +122,18 @@ def testInsertNodesBeforeFirstChild(self): def testInsertNodesAfter(self): # Insert after and make sure it went to the right place - pytree_utils.InsertNodesAfter([self._MakeNewNodeRPAR()], - self._simple_tree.children[2]) + pytree_utils.InsertNodesAfter( + [self._MakeNewNodeRPAR()], self._simple_tree.children[2]) self.assertEqual(4, len(self._simple_tree.children)) - self.assertEqual('simple_stmt', - pytree_utils.NodeName(self._simple_tree.children[2])) - self.assertEqual('RPAR', - pytree_utils.NodeName(self._simple_tree.children[3])) + self.assertEqual( + 'simple_stmt', pytree_utils.NodeName(self._simple_tree.children[2])) + self.assertEqual( + 'RPAR', pytree_utils.NodeName(self._simple_tree.children[3])) def testInsertNodesAfterLastChild(self): # Insert after the last child of its parent simple_stmt = self._simple_tree.children[2] - foo_child = simple_stmt.children[0] + foo_child = simple_stmt.children[0] pytree_utils.InsertNodesAfter([self._MakeNewNodeRPAR()], foo_child) self.assertEqual(3, len(self._simple_tree.children)) self.assertEqual(2, len(simple_stmt.children)) @@ -143,16 +143,16 @@ def testInsertNodesAfterLastChild(self): def testInsertNodesWhichHasParent(self): # Try to insert an existing tree node into another place and fail. with self.assertRaises(RuntimeError): - pytree_utils.InsertNodesAfter([self._simple_tree.children[1]], - self._simple_tree.children[0]) + pytree_utils.InsertNodesAfter( + [self._simple_tree.children[1]], self._simple_tree.children[0]) class AnnotationsTest(unittest.TestCase): def setUp(self): self._leaf = pytree.Leaf(token.LPAR, '(') - self._node = pytree.Node(_GRAMMAR_SYMBOL2NUMBER['simple_stmt'], - [pytree.Leaf(token.NAME, 'foo')]) + self._node = pytree.Node( + _GRAMMAR_SYMBOL2NUMBER['simple_stmt'], [pytree.Leaf(token.NAME, 'foo')]) def testGetWhenNone(self): self.assertIsNone(pytree_utils.GetNodeAnnotation(self._leaf, _FOO)) @@ -183,18 +183,18 @@ def testMultiple(self): self.assertEqual(pytree_utils.GetNodeAnnotation(self._leaf, _FOO5), 5) def testSubtype(self): - pytree_utils.AppendNodeAnnotation(self._leaf, - pytree_utils.Annotation.SUBTYPE, _FOO) + pytree_utils.AppendNodeAnnotation( + self._leaf, pytree_utils.Annotation.SUBTYPE, _FOO) self.assertSetEqual( - pytree_utils.GetNodeAnnotation(self._leaf, - pytree_utils.Annotation.SUBTYPE), {_FOO}) + pytree_utils.GetNodeAnnotation( + self._leaf, pytree_utils.Annotation.SUBTYPE), {_FOO}) pytree_utils.RemoveSubtypeAnnotation(self._leaf, _FOO) self.assertSetEqual( - pytree_utils.GetNodeAnnotation(self._leaf, - pytree_utils.Annotation.SUBTYPE), set()) + pytree_utils.GetNodeAnnotation( + self._leaf, pytree_utils.Annotation.SUBTYPE), set()) def testSetOnNode(self): pytree_utils.SetNodeAnnotation(self._node, _FOO, 20) diff --git a/yapftests/pytree_visitor_test.py b/yapftests/pytree_visitor_test.py index 45a83b113..231183030 100644 --- a/yapftests/pytree_visitor_test.py +++ b/yapftests/pytree_visitor_test.py @@ -31,7 +31,7 @@ class _NodeNameCollector(pytree_visitor.PyTreeVisitor): """ def __init__(self): - self.all_node_names = [] + self.all_node_names = [] self.name_node_values = [] def DefaultNodeVisit(self, node): @@ -61,7 +61,7 @@ def Visit_NAME(self, leaf): class PytreeVisitorTest(unittest.TestCase): def testCollectAllNodeNamesSimpleCode(self): - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) collector = _NodeNameCollector() collector.Visit(tree) expected_names = [ @@ -76,7 +76,7 @@ def testCollectAllNodeNamesSimpleCode(self): self.assertEqual(expected_name_node_values, collector.name_node_values) def testCollectAllNodeNamesNestedCode(self): - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_NESTED_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_NESTED_CODE) collector = _NodeNameCollector() collector.Visit(tree) expected_names = [ @@ -95,7 +95,7 @@ def testCollectAllNodeNamesNestedCode(self): def testDumper(self): # PyTreeDumper is mainly a debugging utility, so only do basic sanity # checking. - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) stream = py3compat.StringIO() pytree_visitor.PyTreeDumper(target_stream=stream).Visit(tree) @@ -106,7 +106,7 @@ def testDumper(self): def testDumpPyTree(self): # Similar sanity checking for the convenience wrapper DumpPyTree - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) stream = py3compat.StringIO() pytree_visitor.DumpPyTree(tree, target_stream=stream) diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 798dbab9a..7f1e1a43e 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -33,10 +33,12 @@ def testSplittingAllArgs(self): style.SetGlobalStyle( style.CreateStyleFromConfig( '{split_all_comma_separated_values: true, column_limit: 40}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ responseDict = {"timestamp": timestamp, "someValue": value, "whatever": 120} """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ responseDict = { "timestamp": timestamp, "someValue": value, @@ -46,10 +48,12 @@ def testSplittingAllArgs(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ yes = { 'yes': 'no', 'no': 'yes', } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ yes = { 'yes': 'no', 'no': 'yes', @@ -57,11 +61,13 @@ def testSplittingAllArgs(self): """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(long_arg, really_long_arg, really_really_long_arg, @@ -70,10 +76,12 @@ def foo(long_arg, """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ foo_tuple = [long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args] """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo_tuple = [ long_arg, really_long_arg, @@ -83,21 +91,25 @@ def foo(long_arg, """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ foo_tuple = [short, arg] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo_tuple = [short, arg] """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # There is a test for split_all_top_level_comma_separated_values, with # different expected value - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ someLongFunction(this_is_a_very_long_parameter, abc=(a, this_will_just_fit_xxxxxxx)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ someLongFunction( this_is_a_very_long_parameter, abc=(a, @@ -112,10 +124,12 @@ def testSplittingTopLevelAllArgs(self): '{split_all_top_level_comma_separated_values: true, ' 'column_limit: 40}')) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ responseDict = {"timestamp": timestamp, "someValue": value, "whatever": 120} """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ responseDict = { "timestamp": timestamp, "someValue": value, @@ -125,11 +139,13 @@ def testSplittingTopLevelAllArgs(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(long_arg, really_long_arg, really_really_long_arg, @@ -139,10 +155,12 @@ def foo(long_arg, llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ foo_tuple = [long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args] """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo_tuple = [ long_arg, really_long_arg, @@ -153,35 +171,41 @@ def foo(long_arg, llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ foo_tuple = [short, arg] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo_tuple = [short, arg] """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # There is a test for split_all_comma_separated_values, with different # expected value - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ someLongFunction(this_is_a_very_long_parameter, abc=(a, this_will_just_fit_xxxxxxx)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ someLongFunction( this_is_a_very_long_parameter, abc=(a, this_will_just_fit_xxxxxxx)) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) actual_formatted_code = reformatter.Reformat(llines) self.assertEqual(40, len(actual_formatted_code.splitlines()[-1])) self.assertCodeEqual(expected_formatted_code, actual_formatted_code) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ someLongFunction(this_is_a_very_long_parameter, abc=(a, this_will_not_fit_xxxxxxxxx)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ someLongFunction( this_is_a_very_long_parameter, abc=(a, @@ -191,11 +215,13 @@ def foo(long_arg, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Exercise the case where there's no opening bracket (for a, b) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a, b = f( a_very_long_parameter, yet_another_one, and_another) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a, b = f( a_very_long_parameter, yet_another_one, and_another) """) @@ -203,7 +229,8 @@ def foo(long_arg, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Don't require splitting before comments. - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ KO = { 'ABC': Abc, # abc 'DEF': Def, # def @@ -212,7 +239,8 @@ def foo(long_arg, 'JKL': Jkl, } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ KO = { 'ABC': Abc, # abc 'DEF': Def, # def @@ -225,7 +253,8 @@ def foo(long_arg, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleFunctionsWithTrailingComments(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def g(): # Trailing comment if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -237,7 +266,8 @@ def f( # Intermediate comment xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def g(): # Trailing comment if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -254,11 +284,13 @@ def f( # Intermediate comment self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesBetweenTopLevelImportsAndVariables(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import foo as bar VAR = 'baz' """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import foo as bar VAR = 'baz' @@ -266,12 +298,14 @@ def testBlankLinesBetweenTopLevelImportsAndVariables(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import foo as bar VAR = 'baz' """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import foo as bar @@ -283,28 +317,32 @@ def testBlankLinesBetweenTopLevelImportsAndVariables(self): '{based_on_style: yapf, ' 'blank_lines_between_top_level_imports_and_variables: 2}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import foo as bar # Some comment """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import foo as bar # Some comment """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import foo as bar class Baz(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import foo as bar @@ -314,12 +352,14 @@ class Baz(): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import foo as bar def foobar(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import foo as bar @@ -329,12 +369,14 @@ def foobar(): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foobar(): from foo import Bar Bar.baz() """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foobar(): from foo import Bar Bar.baz() @@ -343,34 +385,39 @@ def foobar(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesAtEndOfFile(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foobar(): # foo pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foobar(): # foo pass """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ x = { 'a':37,'b':42, 'c':927} """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ x = {'a': 37, 'b': 42, 'c': 927} """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testIndentBlankLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class foo(object): def foobar(self): @@ -398,18 +445,19 @@ class foo(object):\n \n def foobar(self):\n \n pass\n \n def barfoo(se '{based_on_style: yapf, indent_blank_lines: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) - unformatted_code, expected_formatted_code = (expected_formatted_code, - unformatted_code) + unformatted_code, expected_formatted_code = ( + expected_formatted_code, unformatted_code) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultipleUgliness(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ x = { 'a':37,'b':42, 'c':927} @@ -425,7 +473,8 @@ def g(self, x,y=42): def f ( a ) : return 37+-+a[42-x : y**3] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ x = {'a': 37, 'b': 42, 'c': 927} y = 'hello ' 'world' @@ -449,7 +498,8 @@ def f(a): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testComments(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Foo(object): pass @@ -471,7 +521,8 @@ class Baz(object): class Qux(object): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Foo(object): pass @@ -502,16 +553,18 @@ class Qux(object): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleComment(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ # Thing 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsWithTrailingSpaces(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ # Thing 1 \n# Thing 2 \n""") - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ # Thing 1 # Thing 2 """) @@ -519,7 +572,8 @@ def testCommentsWithTrailingSpaces(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentsInDataLiteral(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): return collections.OrderedDict({ # First comment. @@ -536,7 +590,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testEndingWhitespaceAfterSimpleStatement(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ import foo as bar # Thing 1 # Thing 2 @@ -545,7 +600,8 @@ def testEndingWhitespaceAfterSimpleStatement(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDocstrings(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ u"""Module-level docstring.""" import os class Foo(object): @@ -562,7 +618,8 @@ def qux(self): print('hello {}'.format('world')) return 42 ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ u"""Module-level docstring.""" import os @@ -583,7 +640,8 @@ def qux(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDocstringAndMultilineComment(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ """Hello world""" # A multiline # comment @@ -597,7 +655,8 @@ def foo(self): # comment pass ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ """Hello world""" @@ -618,7 +677,8 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineDocstringAndMultilineComment(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ """Hello world RIP Dennis Richie. @@ -641,7 +701,8 @@ def foo(self): # comment pass ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ """Hello world RIP Dennis Richie. @@ -671,24 +732,26 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testTupleCommaBeforeLastParen(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ a = ( 1, ) """) expected_formatted_code = textwrap.dedent("""\ a = (1,) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoBreakOutsideOfBracket(self): # FIXME(morbo): How this is formatted is not correct. But it's syntactically # correct. - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): assert port >= minimum, \ 'Unexpected port %d when minimum was %d.' % (port, minimum) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): assert port >= minimum, 'Unexpected port %d when minimum was %d.' % (port, minimum) @@ -697,7 +760,8 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesBeforeDecorators(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ @foo() class A(object): @bar() @@ -705,7 +769,8 @@ class A(object): def x(self): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ @foo() class A(object): @@ -718,14 +783,16 @@ def x(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentBetweenDecorators(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ @foo() # frob @bar def x (self): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ @foo() # frob @bar @@ -736,12 +803,14 @@ def x(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehension(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def given(y): [k for k in () if k in y] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def given(y): [k for k in () if k in y] """) @@ -749,14 +818,16 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferOneLine(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def given(y): long_variable_name = [ long_var_name + 1 for long_var_name in () if long_var_name == 2] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def given(y): long_variable_name = [ long_var_name + 1 for long_var_name in () if long_var_name == 2 @@ -766,12 +837,14 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferOneLineOverArithmeticSplit(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def given(used_identifiers): return (sum(len(identifier) for identifier in used_identifiers) / len(used_identifiers)) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def given(used_identifiers): return (sum(len(identifier) for identifier in used_identifiers) / len(used_identifiers)) @@ -780,14 +853,16 @@ def given(used_identifiers): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferThreeLinesForLineWrap(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def given(y): long_variable_name = [ long_var_name + 1 for long_var_name, number_two in () if long_var_name == 2 and number_two == 3] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def given(y): long_variable_name = [ long_var_name + 1 @@ -799,14 +874,16 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferNoBreakForTrivialExpression(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def given(y): long_variable_name = [ long_var_name for long_var_name, number_two in () if long_var_name == 2 and number_two == 3] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def given(y): long_variable_name = [ long_var_name for long_var_name, number_two in () @@ -817,7 +894,7 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testOpeningAndClosingBrackets(self): - unformatted_code = """\ + unformatted_code = """\ foo( (1, ) ) foo( ( 1, 2, 3 ) ) foo( ( 1, 2, 3, ) ) @@ -831,14 +908,16 @@ def testOpeningAndClosingBrackets(self): 3, )) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleLineFunctions(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): return 42 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): return 42 """) @@ -849,21 +928,23 @@ def testNoQueueSeletionInMiddleOfLine(self): # If the queue isn't properly constructed, then a token in the middle of the # line may be selected as the one with least penalty. The tokens after that # one are then splatted at the end of the line with no formatting. - unformatted_code = """\ + unformatted_code = """\ find_symbol(node.type) + "< " + " ".join(find_pattern(n) for n in node.child) + " >" """ # noqa expected_formatted_code = """\ find_symbol(node.type) + "< " + " ".join( find_pattern(n) for n in node.child) + " >" """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoSpacesBetweenSubscriptsAndCalls(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaa = bbbbbbbb.ccccccccc() [42] (a, 2) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaa = bbbbbbbb.ccccccccc()[42](a, 2) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) @@ -871,21 +952,25 @@ def testNoSpacesBetweenSubscriptsAndCalls(self): def testNoSpacesBetweenOpeningBracketAndStartingOperator(self): # Unary operator. - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaa = bbbbbbbb.ccccccccc[ -1 ]( -42 ) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaa = bbbbbbbb.ccccccccc[-1](-42) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Varargs and kwargs. - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaa = bbbbbbbb.ccccccccc( *varargs ) aaaaaaaaaa = bbbbbbbb.ccccccccc( **kwargs ) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaa = bbbbbbbb.ccccccccc(*varargs) aaaaaaaaaa = bbbbbbbb.ccccccccc(**kwargs) """) @@ -893,13 +978,15 @@ def testNoSpacesBetweenOpeningBracketAndStartingOperator(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineCommentReformatted(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: # This is a multiline # comment. pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: # This is a multiline # comment. @@ -909,7 +996,8 @@ def testMultilineCommentReformatted(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDictionaryMakerFormatting(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ _PYTHON_STATEMENTS = frozenset({ lambda x, y: 'simple_stmt': 'small_stmt', 'expr_stmt': 'print_stmt', 'del_stmt': 'pass_stmt', lambda: 'break_stmt': 'continue_stmt', 'return_stmt': 'raise_stmt', @@ -917,7 +1005,8 @@ def testDictionaryMakerFormatting(self): 'if_stmt', 'while_stmt': 'for_stmt', }) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ _PYTHON_STATEMENTS = frozenset({ lambda x, y: 'simple_stmt': 'small_stmt', 'expr_stmt': 'print_stmt', @@ -934,14 +1023,16 @@ def testDictionaryMakerFormatting(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleMultilineCode(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: aaaaaaaaaaaaaa.bbbbbbbbbbbbbb.ccccccc(zzzzzzzzzzzz, \ xxxxxxxxxxx, yyyyyyyyyyyy, vvvvvvvvv) aaaaaaaaaaaaaa.bbbbbbbbbbbbbb.ccccccc(zzzzzzzzzzzz, \ xxxxxxxxxxx, yyyyyyyyyyyy, vvvvvvvvv) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: aaaaaaaaaaaaaa.bbbbbbbbbbbbbb.ccccccc(zzzzzzzzzzzz, xxxxxxxxxxx, yyyyyyyyyyyy, vvvvvvvvv) @@ -952,7 +1043,8 @@ def testSimpleMultilineCode(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineComment(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if Foo: # Hello world # Yo man. @@ -965,14 +1057,15 @@ def testMultilineComment(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testSpaceBetweenStringAndParentheses(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ b = '0' ('hello') """) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultilineString(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ code = textwrap.dedent('''\ if Foo: # Hello world @@ -986,7 +1079,8 @@ def testMultilineString(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

Czar: """+despot["Nicholas"]+"""
@@ -995,7 +1089,8 @@ def f(): """ ''') # noqa - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

Czar: """ + despot["Nicholas"] + """
@@ -1008,7 +1103,8 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleMultilineWithComments(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if ( # This is the first comment a and # This is the second comment # This is the third comment @@ -1020,12 +1116,14 @@ def testSimpleMultilineWithComments(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMatchingParenSplittingMatching(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): raise RuntimeError('unable to find insertion point for target node', (target,)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): raise RuntimeError('unable to find insertion point for target node', (target,)) @@ -1034,7 +1132,8 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testContinuationIndent(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ class F: def _ProcessArgLists(self, node): """Common method for processing argument lists.""" @@ -1044,7 +1143,8 @@ def _ProcessArgLists(self, node): child, subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get( child.value, format_token.Subtype.NONE)) ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ class F: def _ProcessArgLists(self, node): @@ -1060,12 +1160,14 @@ def _ProcessArgLists(self, node): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testTrailingCommaAndBracket(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ a = { 42, } b = ( 42, ) c = [ 42, ] ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ a = { 42, } @@ -1078,20 +1180,23 @@ def testTrailingCommaAndBracket(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testI18n(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ N_('Some years ago - never mind how long precisely - having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world.') # A comment is here. """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ foo('Fake function call') #. Some years ago - never mind how long precisely - having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world. """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testI18nCommentsInDataLiteral(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): return collections.OrderedDict({ #. First i18n comment. @@ -1105,7 +1210,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testClosingBracketIndent(self): - code = textwrap.dedent('''\ + code = textwrap.dedent( + '''\ def f(): def g(): @@ -1118,7 +1224,8 @@ def g(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testClosingBracketsInlinedInCall(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Foo(object): def bar(self): @@ -1132,7 +1239,8 @@ def bar(self): "porkporkpork": 5, }) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Foo(object): def bar(self): @@ -1150,7 +1258,8 @@ def bar(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testLineWrapInForExpression(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class A: def x(self, node, name, n=1): @@ -1163,7 +1272,7 @@ def x(self, node, name, n=1): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionCallContinuationLine(self): - code = """\ + code = """\ class foo: def bar(self, node, name, n=1): @@ -1177,7 +1286,8 @@ def bar(self, node, name, n=1): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testI18nNonFormatting(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class F(object): def __init__(self, fieldname, @@ -1189,7 +1299,8 @@ def __init__(self, fieldname, self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSpaceBetweenUnaryOpAndOpeningParen(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if ~(a or b): pass """) @@ -1197,7 +1308,8 @@ def testNoSpaceBetweenUnaryOpAndOpeningParen(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentBeforeFuncDef(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class Foo(object): a = 42 @@ -1215,7 +1327,8 @@ def __init__(self, self.assertCodeEqual(code, reformatter.Reformat(llines)) def testExcessLineCountWithDefaultKeywords(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Fnord(object): def Moo(self): aaaaaaaaaaaaaaaa = self._bbbbbbbbbbbbbbbbbbbbbbb( @@ -1223,7 +1336,8 @@ def Moo(self): fffff=fffff, ggggggg=ggggggg, hhhhhhhhhhhhh=hhhhhhhhhhhhh, iiiiiii=iiiiiiiiiiiiii) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Fnord(object): def Moo(self): @@ -1240,7 +1354,8 @@ def Moo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSpaceAfterNotOperator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if not (this and that): pass """) @@ -1248,7 +1363,8 @@ def testSpaceAfterNotOperator(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoPenaltySplitting(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if True: if True: @@ -1261,7 +1377,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testExpressionPenalties(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if ((left.value == '(' and right.value == ')') or (left.value == '[' and right.value == ']') or @@ -1272,14 +1389,16 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testLineDepthOfSingleLineStatement(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ while True: continue for x in range(3): continue try: a = 42 except: b = 42 with open(a) as fd: a = fd.read() """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ while True: continue for x in range(3): @@ -1295,11 +1414,13 @@ def testLineDepthOfSingleLineStatement(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitListWithTerminatingComma(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ FOO = ['bar', 'baz', 'mux', 'qux', 'quux', 'quuux', 'quuuux', 'quuuuux', 'quuuuuux', 'quuuuuuux', lambda a, b: 37,] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ FOO = [ 'bar', 'baz', @@ -1318,7 +1439,8 @@ def testSplitListWithTerminatingComma(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitListWithInterspersedComments(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ FOO = [ 'bar', # bar 'baz', # baz @@ -1337,7 +1459,7 @@ def testSplitListWithInterspersedComments(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testRelativeImportStatements(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ from ... import bork """) llines = yapf_test_helper.ParseAndUnwrap(code) @@ -1345,13 +1467,15 @@ def testRelativeImportStatements(self): def testSingleLineList(self): # A list on a single line should prefer to remain contiguous. - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = aaaaaaaaaaa( ("...", "."), "..", ".............................................." ) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = aaaaaaaaaaa( ("...", "."), "..", "..............................................") """) # noqa @@ -1359,7 +1483,8 @@ def testSingleLineList(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesBeforeFunctionsNotInColumnZero(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import signal @@ -1374,7 +1499,8 @@ def timeout(seconds=1): except: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import signal try: @@ -1393,7 +1519,8 @@ def timeout(seconds=1): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoKeywordArgumentBreakage(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class A(object): def b(self): @@ -1405,7 +1532,7 @@ def b(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testTrailerOnSingleLine(self): - code = """\ + code = """\ urlpatterns = patterns('', url(r'^$', 'homepage_view'), url(r'^/login/$', 'login_view'), url(r'^/login/$', 'logout_view'), @@ -1415,7 +1542,8 @@ def testTrailerOnSingleLine(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testIfConditionalParens(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class Foo: def bar(): @@ -1428,7 +1556,8 @@ def bar(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContinuationMarkers(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. "\\ "Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur "\\ "ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis "\\ @@ -1438,14 +1567,16 @@ def testContinuationMarkers(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ from __future__ import nested_scopes, generators, division, absolute_import, with_statement, \\ print_function, unicode_literals """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if aaaaaaaaa == 42 and bbbbbbbbbbbbbb == 42 and \\ cccccccc == 42: pass @@ -1454,7 +1585,8 @@ def testContinuationMarkers(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsWithContinuationMarkers(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def fn(arg): v = fn2(key1=True, #c1 @@ -1465,7 +1597,8 @@ def fn(arg): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultipleContinuationMarkers(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ xyz = \\ \\ some_thing() @@ -1474,7 +1607,7 @@ def testMultipleContinuationMarkers(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContinuationMarkerAfterStringWithContinuation(self): - code = """\ + code = """\ s = 'foo \\ bar' \\ .format() @@ -1483,7 +1616,8 @@ def testContinuationMarkerAfterStringWithContinuation(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testEmptyContainers(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ flags.DEFINE_list( 'output_dirs', [], 'Lorem ipsum dolor sit amet, consetetur adipiscing elit. Donec a diam lectus. ' @@ -1493,10 +1627,12 @@ def testEmptyContainers(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testSplitStringsIfSurroundedByParens(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = foo.bar({'xxxxxxxxxxxxxxxxxxxxxxx' 'yyyyyyyyyyyyyyyyyyyyyyyyyy': baz[42]} + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbb' 'cccccccccccccccccccccccccccccccc' 'ddddddddddddddddddddddddddddd') """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = foo.bar({'xxxxxxxxxxxxxxxxxxxxxxx' 'yyyyyyyyyyyyyyyyyyyyyyyyyy': baz[42]} + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' @@ -1507,7 +1643,8 @@ def testSplitStringsIfSurroundedByParens(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' \ 'bbbbbbbbbbbbbbbbbbbbbbbbbb' 'cccccccccccccccccccccccccccccccc' \ 'ddddddddddddddddddddddddddddd' @@ -1516,7 +1653,8 @@ def testSplitStringsIfSurroundedByParens(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultilineShebang(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ #!/bin/sh if "true" : '''\' then @@ -1536,7 +1674,8 @@ def testMultilineShebang(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingAroundTermOperators(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a_very_long_function_call_yada_yada_etc_etc_etc(long_arg1, long_arg2 / long_arg3) """) @@ -1544,7 +1683,8 @@ def testNoSplittingAroundTermOperators(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingAroundCompOperators(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa is not bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa in bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa not in bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) @@ -1552,7 +1692,8 @@ def testNoSplittingAroundCompOperators(self): c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa is bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa <= bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) """) # noqa - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ c = ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa is not bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) @@ -1574,7 +1715,8 @@ def testNoSplittingAroundCompOperators(self): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testNoSplittingWithinSubscriptList(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ somequitelongvariablename.somemember[(a, b)] = { 'somelongkey': 1, 'someotherlongkey': 2 @@ -1584,7 +1726,8 @@ def testNoSplittingWithinSubscriptList(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testExcessCharacters(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class foo: def bar(self): @@ -1595,14 +1738,16 @@ def bar(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): if True: if True: if contract == allow_contract and attr_dict.get(if_attribute) == has_value: return True """) # noqa - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ def _(): if True: if True: @@ -1614,7 +1759,8 @@ def _(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testDictSetGenerator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ foo = { variable: 'hello world. How are you today?' for variable in fnord @@ -1625,7 +1771,8 @@ def testDictSetGenerator(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnaryOpInDictionaryValue(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ beta = "123" test = {'alpha': beta[-1]} @@ -1636,7 +1783,8 @@ def testUnaryOpInDictionaryValue(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnaryNotOperator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if True: if True: if True: @@ -1648,7 +1796,7 @@ def testUnaryNotOperator(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testRelaxArraySubscriptAffinity(self): - code = """\ + code = """\ class A(object): def f(self, aaaaaaaaa, bbbbbbbbbbbbb, row): @@ -1664,17 +1812,18 @@ def f(self, aaaaaaaaa, bbbbbbbbbbbbb, row): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionCallInDict(self): - code = "a = {'a': b(c=d, **e)}\n" + code = "a = {'a': b(c=d, **e)}\n" llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionCallInNestedDict(self): - code = "a = {'a': {'a': {'a': b(c=d, **e)}}}\n" + code = "a = {'a': {'a': {'a': b(c=d, **e)}}}\n" llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnbreakableNot(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def test(): if not "Foooooooooooooooooooooooooooooo" or "Foooooooooooooooooooooooooooooo" == "Foooooooooooooooooooooooooooooo": pass @@ -1683,7 +1832,8 @@ def test(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testSplitListWithComment(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a = [ 'a', 'b', @@ -1694,7 +1844,8 @@ def testSplitListWithComment(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testOverColumnLimit(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Test: def testSomething(self): @@ -1704,7 +1855,8 @@ def testSomething(self): ('aaaaaaaaaaaaa', 'bbbb'): 'ccccccccccccccccccccccccccccccccccccccccccc', } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Test: def testSomething(self): @@ -1721,7 +1873,8 @@ def testSomething(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testEndingComment(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a = f( a="something", b="something requiring comment which is quite long", # comment about b (pushes line over 79) @@ -1731,7 +1884,8 @@ def testEndingComment(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContinuationSpaceRetention(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def fn(): return module \\ .method(Object(data, @@ -1742,7 +1896,8 @@ def fn(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testIfExpressionWithFunctionCall(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if x or z.y( a, c, @@ -1754,7 +1909,8 @@ def testIfExpressionWithFunctionCall(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnformattedAfterMultilineString(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def foo(): com_text = \\ ''' @@ -1765,7 +1921,8 @@ def foo(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSpacesAroundKeywordDefaultValues(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ sources = { 'json': request.get_json(silent=True) or {}, 'json2': request.get_json(silent=True), @@ -1776,12 +1933,14 @@ def testNoSpacesAroundKeywordDefaultValues(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingBeforeEndingSubscriptBracket(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: if True: status = cf.describe_stacks(StackName=stackname)[u'Stacks'][0][u'StackStatus'] """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: if True: status = cf.describe_stacks( @@ -1791,7 +1950,8 @@ def testNoSplittingBeforeEndingSubscriptBracket(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoSplittingOnSingleArgument(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ xxxxxxxxxxxxxx = (re.search(r'(\\d+\\.\\d+\\.\\d+\\.)\\d+', aaaaaaa.bbbbbbbbbbbb).group(1) + re.search(r'\\d+\\.\\d+\\.\\d+\\.(\\d+)', @@ -1801,7 +1961,8 @@ def testNoSplittingOnSingleArgument(self): re.search(r'\\d+\\.\\d+\\.\\d+\\.(\\d+)', ccccccc).group(c.d)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ xxxxxxxxxxxxxx = ( re.search(r'(\\d+\\.\\d+\\.\\d+\\.)\\d+', aaaaaaa.bbbbbbbbbbbb).group(1) + re.search(r'\\d+\\.\\d+\\.\\d+\\.(\\d+)', ccccccc).group(1)) @@ -1813,13 +1974,15 @@ def testNoSplittingOnSingleArgument(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingArraysSensibly(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ while True: while True: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = list['bbbbbbbbbbbbbbbbbbbbbbbbb'].split(',') aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = list('bbbbbbbbbbbbbbbbbbbbbbbbb').split(',') """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ while True: while True: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = list[ @@ -1831,13 +1994,15 @@ def testSplittingArraysSensibly(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testComprehensionForAndIf(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class f: def __repr__(self): tokens_repr = ','.join(['{0}({1!r})'.format(tok.name, tok.value) for tok in self._tokens]) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class f: def __repr__(self): @@ -1848,7 +2013,8 @@ def __repr__(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testFunctionCallArguments(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): if True: pytree_utils.InsertNodesBefore(_CreateCommentsFromPrefix( @@ -1858,7 +2024,8 @@ def f(): comment_prefix, comment_lineno, comment_column, standalone=True)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): if True: pytree_utils.InsertNodesBefore( @@ -1873,18 +2040,21 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBinaryOperators(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = b ** 37 c = (20 ** -3) / (_GRID_ROWS ** (code_length - 10)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = b**37 c = (20**-3) / (_GRID_ROWS**(code_length - 10)) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if True: if (self.stack[-1].split_before_closing_bracket and @@ -1897,7 +2067,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContiguousList(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ [retval1, retval2] = a_very_long_function(argument_1, argument2, argument_3, argument_4) """) # noqa @@ -1905,7 +2076,8 @@ def testContiguousList(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testArgsAndKwargsFormatting(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a(a=aaaaaaaaaaaaaaaaaaaaa, b=aaaaaaaaaaaaaaaaaaaaaaaa, c=aaaaaaaaaaaaaaaaaa, @@ -1915,7 +2087,8 @@ def testArgsAndKwargsFormatting(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def foo(): return [ Bar(xxx='some string', @@ -1927,7 +2100,8 @@ def foo(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentColumnLimitOverflow(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if True: TaskManager.get_tags = MagicMock( @@ -1940,7 +2114,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultilineLambdas(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class SomeClass(object): do_something = True @@ -1951,7 +2126,8 @@ def succeeded(self, dddddddddddddd): d.addCallback(lambda _: self.aaaaaa.bbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccccc(dddddddddddddd)) return d """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class SomeClass(object): do_something = True @@ -1969,13 +2145,14 @@ def succeeded(self, dddddddddddddd): style.CreateStyleFromConfig( '{based_on_style: yapf, allow_multiline_lambdas: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testMultilineDictionaryKeys(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ MAP_WITH_LONG_KEYS = { ('lorem ipsum', 'dolor sit amet'): 1, @@ -1985,7 +2162,8 @@ def testMultilineDictionaryKeys(self): 3 } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ MAP_WITH_LONG_KEYS = { ('lorem ipsum', 'dolor sit amet'): 1, @@ -1999,16 +2177,18 @@ def testMultilineDictionaryKeys(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf, ' - 'allow_multiline_dictionary_keys: true}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf, ' + 'allow_multiline_dictionary_keys: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testStableDictionaryFormatting(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class A(object): def method(self): @@ -2025,15 +2205,16 @@ def method(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: pep8, indent_width: 2, ' - 'continuation_indent_width: 4, ' - 'indent_dictionary_value: True}')) + style.CreateStyleFromConfig( + '{based_on_style: pep8, indent_width: 2, ' + 'continuation_indent_width: 4, ' + 'indent_dictionary_value: True}')) - llines = yapf_test_helper.ParseAndUnwrap(code) + llines = yapf_test_helper.ParseAndUnwrap(code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) finally: @@ -2042,12 +2223,14 @@ def method(self): def testStableInlinedDictionaryFormatting(self): try: style.SetGlobalStyle(style.CreatePEP8Style()) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({'action': 'update', 'parameter': value})) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({ @@ -2056,23 +2239,25 @@ def _(): })) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testDontSplitKeywordValueArguments(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def mark_game_scored(gid): _connect.execute(_games.update().where(_games.c.gid == gid).values( scored=True)) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def mark_game_scored(gid): _connect.execute( _games.update().where(_games.c.gid == gid).values(scored=True)) @@ -2081,7 +2266,8 @@ def mark_game_scored(gid): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDontAddBlankLineAfterMultilineString(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ query = '''SELECT id FROM table WHERE day in {}''' @@ -2091,7 +2277,8 @@ def testDontAddBlankLineAfterMultilineString(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFormattingListComprehensions(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def a(): if True: if True: @@ -2105,7 +2292,8 @@ def a(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingWhenBinPacking(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a_very_long_function_name( long_argument_name_1=1, long_argument_name_2=2, @@ -2127,23 +2315,25 @@ def testNoSplittingWhenBinPacking(self): 'dedent_closing_brackets: True, ' 'split_before_named_assigns: False}')) - llines = yapf_test_helper.ParseAndUnwrap(code) + llines = yapf_test_helper.ParseAndUnwrap(code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testNotSplittingAfterSubscript(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if not aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.b(c == d[ 'eeeeee']).ffffff(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if not aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.b( c == d['eeeeee']).ffffff(): pass @@ -2152,7 +2342,8 @@ def testNotSplittingAfterSubscript(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingOneArgumentList(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): if True: if True: @@ -2161,7 +2352,8 @@ def _(): if True: boxes[id_] = np.concatenate((points.min(axis=0), qoints.max(axis=0))) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): if True: if True: @@ -2175,7 +2367,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingBeforeFirstElementListArgument(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class _(): @classmethod def _pack_results_for_constraint_or(cls, combination, constraints): @@ -2188,7 +2381,8 @@ def _pack_results_for_constraint_or(cls, combination, constraints): ), constraints, InvestigationResult.OR ) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class _(): @classmethod @@ -2204,7 +2398,8 @@ def _pack_results_for_constraint_or(cls, combination, constraints): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingArgumentsTerminatedByComma(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ function_name(argument_name_1=1, argument_name_2=2, argument_name_3=3) function_name(argument_name_1=1, argument_name_2=2, argument_name_3=3,) @@ -2215,7 +2410,8 @@ def testSplittingArgumentsTerminatedByComma(self): r =f0 (1, 2,3,) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ function_name(argument_name_1=1, argument_name_2=2, argument_name_3=3) function_name( @@ -2250,18 +2446,19 @@ def testSplittingArgumentsTerminatedByComma(self): '{based_on_style: yapf, ' 'split_arguments_when_comma_terminated: True}')) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testImportAsList(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ from toto import titi, tata, tutu # noqa from toto import titi, tata, tutu from toto import (titi, tata, tutu) @@ -2270,7 +2467,8 @@ def testImportAsList(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDictionaryValuesOnOwnLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = { 'aaaaaaaaaaaaaaaaaaaaaaaa': Check('ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ', '=', True), @@ -2294,7 +2492,8 @@ def testDictionaryValuesOnOwnLines(self): Check('QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ', '=', False), } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = { 'aaaaaaaaaaaaaaaaaaaaaaaa': Check('ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ', '=', True), @@ -2322,27 +2521,31 @@ def testDictionaryValuesOnOwnLines(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDictionaryOnOwnLine(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ doc = test_utils.CreateTestDocumentViaController( content={ 'a': 'b' }, branch_key=branch.key, collection_key=collection.key) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ doc = test_utils.CreateTestDocumentViaController( content={'a': 'b'}, branch_key=branch.key, collection_key=collection.key) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ doc = test_utils.CreateTestDocumentViaController( content={ 'a': 'b' }, branch_key=branch.key, collection_key=collection.key, collection_key2=collection.key2) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ doc = test_utils.CreateTestDocumentViaController( content={'a': 'b'}, branch_key=branch.key, @@ -2353,7 +2556,8 @@ def testDictionaryOnOwnLine(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNestedListsInDictionary(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ _A = { 'cccccccccc': ('^^1',), 'rrrrrrrrrrrrrrrrrrrrrrrrr': ('^7913', # AAAAAAAAAAAAAA. @@ -2382,7 +2586,8 @@ def testNestedListsInDictionary(self): ), } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ _A = { 'cccccccccc': ('^^1',), 'rrrrrrrrrrrrrrrrrrrrrrrrr': ( @@ -2420,7 +2625,8 @@ def testNestedListsInDictionary(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNestedDictionary(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class _(): def _(): breadcrumbs = [{'name': 'Admin', @@ -2430,7 +2636,8 @@ def _(): 'url': url_for(".home")}, {'title': title}] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class _(): def _(): breadcrumbs = [ @@ -2448,7 +2655,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDictionaryElementsOnOneLine(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): @mock.patch.dict( @@ -2468,10 +2676,12 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNotInParams(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ list("a long line to break the line. a long line to break the brk a long lin", not True) """) # noqa - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ list("a long line to break the line. a long line to break the brk a long lin", not True) """) # noqa @@ -2479,14 +2689,16 @@ def testNotInParams(self): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testNamedAssignNotAtEndOfLine(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): if True: with py3compat.open_with_encoding(filename, mode='w', encoding=encoding) as fd: pass """) - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ def _(): if True: with py3compat.open_with_encoding( @@ -2497,7 +2709,8 @@ def _(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testBlankLineBeforeClassDocstring(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ class A: """Does something. @@ -2508,7 +2721,8 @@ class A: def __init__(self): pass ''') - expected_code = textwrap.dedent('''\ + expected_code = textwrap.dedent( + '''\ class A: """Does something. @@ -2521,7 +2735,8 @@ def __init__(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ class A: """Does something. @@ -2532,7 +2747,8 @@ class A: def __init__(self): pass ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ class A: """Does something. @@ -2551,13 +2767,14 @@ def __init__(self): 'blank_line_before_class_docstring: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testBlankLineBeforeModuleDocstring(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- @@ -2567,7 +2784,8 @@ def testBlankLineBeforeModuleDocstring(self): def foobar(): pass ''') - expected_code = textwrap.dedent('''\ + expected_code = textwrap.dedent( + '''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- """Some module docstring.""" @@ -2579,7 +2797,8 @@ def foobar(): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- """Some module docstring.""" @@ -2588,7 +2807,8 @@ def foobar(): def foobar(): pass ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- @@ -2606,18 +2826,20 @@ def foobar(): 'blank_line_before_module_docstring: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testTupleCohesion(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): this_is_a_very_long_function_name(an_extremely_long_variable_name, ( 'a string that may be too long %s' % 'M15')) """) - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ def f(): this_is_a_very_long_function_name( an_extremely_long_variable_name, @@ -2627,14 +2849,15 @@ def f(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testSubscriptExpression(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ foo = d[not a] """) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testListWithFunctionCalls(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): return [ Bar( @@ -2646,7 +2869,8 @@ def foo(): zzz='a third long string') ] """) - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ def foo(): return [ Bar(xxx='some string', @@ -2661,11 +2885,13 @@ def foo(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testEllipses(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ X=... Y = X if ... else X """) - expected_code = textwrap.dedent("""\ + expected_code = textwrap.dedent( + """\ X = ... Y = X if ... else X """) @@ -2679,7 +2905,7 @@ def testPseudoParens(self): {'nested_key': 1, }, } """ - expected_code = """\ + expected_code = """\ my_dict = { 'key': # Some comment about the key { @@ -2687,16 +2913,18 @@ def testPseudoParens(self): }, } """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testSplittingBeforeFirstArgumentOnFunctionCall(self): """Tests split_before_first_argument on a function call.""" - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a_very_long_function_name("long string with formatting {0:s}".format( "mystring")) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a_very_long_function_name( "long string with formatting {0:s}".format("mystring")) """) @@ -2707,19 +2935,21 @@ def testSplittingBeforeFirstArgumentOnFunctionCall(self): '{based_on_style: yapf, split_before_first_argument: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testSplittingBeforeFirstArgumentOnFunctionDefinition(self): """Tests split_before_first_argument on a function definition.""" - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _GetNumberOfSecondsFromElements(year, month, day, hours, minutes, seconds, microseconds): return """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _GetNumberOfSecondsFromElements( year, month, day, hours, minutes, seconds, microseconds): return @@ -2731,21 +2961,23 @@ def _GetNumberOfSecondsFromElements( '{based_on_style: yapf, split_before_first_argument: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testSplittingBeforeFirstArgumentOnCompoundStatement(self): """Tests split_before_first_argument on a compound statement.""" - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if (long_argument_name_1 == 1 or long_argument_name_2 == 2 or long_argument_name_3 == 3 or long_argument_name_4 == 4): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if (long_argument_name_1 == 1 or long_argument_name_2 == 2 or long_argument_name_3 == 3 or long_argument_name_4 == 4): pass @@ -2757,14 +2989,15 @@ def testSplittingBeforeFirstArgumentOnCompoundStatement(self): '{based_on_style: yapf, split_before_first_argument: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testCoalesceBracketsOnDict(self): """Tests coalesce_brackets on a dictionary.""" - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ date_time_values = ( { u'year': year, @@ -2776,7 +3009,8 @@ def testCoalesceBracketsOnDict(self): } ) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ date_time_values = ({ u'year': year, u'month': month, @@ -2793,13 +3027,14 @@ def testCoalesceBracketsOnDict(self): '{based_on_style: yapf, coalesce_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testSplitAfterComment(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if __name__ == "__main__": with another_resource: account = { @@ -2824,7 +3059,8 @@ def testAsyncAsNonKeyword(self): style.SetGlobalStyle(style.CreatePEP8Style()) # In Python 2, async may be used as a non-keyword identifier. - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ from util import async @@ -2846,8 +3082,9 @@ def testDisableEndingCommaHeuristic(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' disable_ending_comma_heuristic: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' disable_ending_comma_heuristic: True}')) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) @@ -2855,7 +3092,8 @@ def testDisableEndingCommaHeuristic(self): style.SetGlobalStyle(style.CreateYapfStyle()) def testDedentClosingBracketsWithTypeAnnotationExceedingLineLength(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass @@ -2863,7 +3101,8 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None ) -> None: @@ -2878,17 +3117,19 @@ def function( try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' dedent_closing_brackets: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' dedent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsWithTypeAnnotationExceedingLineLength(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass @@ -2896,7 +3137,8 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None ) -> None: @@ -2911,17 +3153,19 @@ def function( try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInFunctionCall(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None, third_and_final_argument=True): pass @@ -2929,7 +3173,8 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None, third_a def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_and_last_argument=None): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None, @@ -2946,17 +3191,19 @@ def function( try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInTuple(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def function(): some_var = ('a long element', 'another long element', 'short element', 'really really long element') return True @@ -2965,7 +3212,8 @@ def function(): some_var = ('a couple', 'small', 'elemens') return False """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def function(): some_var = ( 'a long element', 'another long element', 'short element', @@ -2981,17 +3229,19 @@ def function(): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInList(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def function(): some_var = ['a long element', 'another long element', 'short element', 'really really long element'] return True @@ -3000,7 +3250,8 @@ def function(): some_var = ['a couple', 'small', 'elemens'] return False """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def function(): some_var = [ 'a long element', 'another long element', 'short element', @@ -3016,17 +3267,19 @@ def function(): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInDict(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def function(): some_var = {1: ('a long element', 'and another really really long element that is really really amazingly long'), 2: 'another long element', 3: 'short element', 4: 'really really long element'} return True @@ -3035,7 +3288,8 @@ def function(): some_var = {1: 'a couple', 2: 'small', 3: 'elemens'} return False """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def function(): some_var = { 1: @@ -3057,17 +3311,19 @@ def function(): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testMultipleDictionariesInList(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class A: def b(): d = { @@ -3093,7 +3349,8 @@ def b(): ] } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class A: def b(): @@ -3125,9 +3382,10 @@ def testForceMultilineDict_True(self): style.CreateStyleFromConfig('{force_multiline_dict: true}')) unformatted_code = textwrap.dedent( "responseDict = {'childDict': {'spam': 'eggs'}}\n") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - actual = reformatter.Reformat(llines) - expected = textwrap.dedent("""\ + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + actual = reformatter.Reformat(llines) + expected = textwrap.dedent( + """\ responseDict = { 'childDict': { 'spam': 'eggs' @@ -3142,23 +3400,26 @@ def testForceMultilineDict_False(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{force_multiline_dict: false}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ responseDict = {'childDict': {'spam': 'eggs'}} """) expected_formatted_code = unformatted_code - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @unittest.skipUnless(py3compat.PY38, 'Requires Python 3.8') def testWalrus(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if (x := len([1]*1000)>100): print(f'{x} is pretty big' ) """) - expected = textwrap.dedent("""\ + expected = textwrap.dedent( + """\ if (x := len([1] * 1000) > 100): print(f'{x} is pretty big') """) @@ -3170,21 +3431,23 @@ def testAlignAssignBlankLineInbetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 val_third = 3 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3194,21 +3457,23 @@ def testAlignAssignCommentLineInbetween(self): style.CreateStyleFromConfig( '{align_assignment: true,' 'new_alignment_after_commentline = true}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 # comment val_third = 3 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 # comment val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3216,7 +3481,8 @@ def testAlignAssignDefLineInbetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 def fun(): @@ -3224,7 +3490,8 @@ def fun(): abc = '' val_third = 3 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 @@ -3237,8 +3504,8 @@ def fun(): val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3246,7 +3513,8 @@ def testAlignAssignObjectWithNewLineInbetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 object = { @@ -3256,7 +3524,8 @@ def testAlignAssignObjectWithNewLineInbetween(self): } val_third = 3 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ val_first = 1 val_second += 2 object = { @@ -3267,8 +3536,8 @@ def testAlignAssignObjectWithNewLineInbetween(self): val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3276,13 +3545,13 @@ def testAlignAssignWithOnlyOneAssignmentLine(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ val_first = 1 """) expected_formatted_code = unformatted_code - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) diff --git a/yapftests/reformatter_buganizer_test.py b/yapftests/reformatter_buganizer_test.py index 54a62b588..d8beb04cb 100644 --- a/yapftests/reformatter_buganizer_test.py +++ b/yapftests/reformatter_buganizer_test.py @@ -29,7 +29,7 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateYapfStyle()) def testB137580392(self): - code = """\ + code = """\ def _create_testing_simulator_and_sink( ) -> Tuple[_batch_simulator:_batch_simulator.BatchSimulator, _batch_simulator.SimulationSink]: @@ -39,7 +39,7 @@ def _create_testing_simulator_and_sink( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB73279849(self): - unformatted_code = """\ + unformatted_code = """\ class A: def _(a): return 'hello' [ a ] @@ -49,11 +49,11 @@ class A: def _(a): return 'hello'[a] """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB122455211(self): - unformatted_code = """\ + unformatted_code = """\ _zzzzzzzzzzzzzzzzzzzz = Union[sssssssssssssssssssss.pppppppppppppppp, sssssssssssssssssssss.pppppppppppppppppppppppppppp] """ @@ -62,11 +62,11 @@ def testB122455211(self): sssssssssssssssssssss.pppppppppppppppp, sssssssssssssssssssss.pppppppppppppppppppppppppppp] """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB119300344(self): - code = """\ + code = """\ def _GenerateStatsEntries( process_id: Text, timestamp: Optional[rdfvalue.RDFDatetime] = None @@ -77,7 +77,7 @@ def _GenerateStatsEntries( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB132886019(self): - code = """\ + code = """\ X = { 'some_dict_key': frozenset([ @@ -90,7 +90,7 @@ def testB132886019(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB26521719(self): - code = """\ + code = """\ class _(): def _(self): @@ -101,7 +101,7 @@ def _(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB122541552(self): - code = """\ + code = """\ # pylint: disable=g-explicit-bool-comparison,singleton-comparison _QUERY = account.Account.query(account.Account.enabled == True) # pylint: enable=g-explicit-bool-comparison,singleton-comparison @@ -114,7 +114,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB124415889(self): - code = """\ + code = """\ class _(): def run_queue_scanners(): @@ -137,7 +137,7 @@ def modules_to_install(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB73166511(self): - code = """\ + code = """\ def _(): if min_std is not None: groundtruth_age_variances = tf.maximum(groundtruth_age_variances, @@ -147,7 +147,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB118624921(self): - code = """\ + code = """\ def _(): function_call( alert_name='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', @@ -160,7 +160,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB35417079(self): - code = """\ + code = """\ class _(): def _(): @@ -175,7 +175,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB120047670(self): - unformatted_code = """\ + unformatted_code = """\ X = { 'NO_PING_COMPONENTS': [ 79775, # Releases / FOO API @@ -195,11 +195,11 @@ def testB120047670(self): 'PING_BLOCKED_BUGS': False, } """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB120245013(self): - unformatted_code = """\ + unformatted_code = """\ class Foo(object): def testNoAlertForShortPeriod(self, rutabaga): self.targets[:][streamz_path,self._fillInOtherFields(streamz_path, {streamz_field_of_interest:True})] = series.Counter('1s', '+ 500x10000') @@ -213,11 +213,11 @@ def testNoAlertForShortPeriod(self, rutabaga): self._fillInOtherFields(streamz_path, {streamz_field_of_interest: True} )] = series.Counter('1s', '+ 500x10000') """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB117841880(self): - code = """\ + code = """\ def xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx( aaaaaaaaaaaaaaaaaaa: AnyStr, bbbbbbbbbbbb: Optional[Sequence[AnyStr]] = None, @@ -234,7 +234,7 @@ def xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB111764402(self): - unformatted_code = """\ + unformatted_code = """\ x = self.stubs.stub(video_classification_map, 'read_video_classifications', (lambda external_ids, **unused_kwargs: {external_id: self._get_serving_classification('video') for external_id in external_ids})) """ # noqa expected_formatted_code = """\ @@ -244,11 +244,11 @@ def testB111764402(self): for external_id in external_ids })) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB116825060(self): - code = """\ + code = """\ result_df = pd.DataFrame({LEARNED_CTR_COLUMN: learned_ctr}, index=df_metrics.index) """ @@ -256,7 +256,7 @@ def testB116825060(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB112711217(self): - code = """\ + code = """\ def _(): stats['moderated'] = ~stats.moderation_reason.isin( approved_moderation_reasons) @@ -265,7 +265,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB112867548(self): - unformatted_code = """\ + unformatted_code = """\ def _(): return flask.make_response( 'Records: {}, Problems: {}, More: {}'.format( @@ -283,11 +283,11 @@ def _(): httplib.ACCEPTED if process_result.has_more else httplib.OK, {'content-type': _TEXT_CONTEXT_TYPE}) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB112651423(self): - unformatted_code = """\ + unformatted_code = """\ def potato(feeditems, browse_use_case=None): for item in turnip: if kumquat: @@ -302,11 +302,11 @@ def potato(feeditems, browse_use_case=None): 'FEEDS_LOAD_PLAYLIST_VIDEOS_FOR_ALL_ITEMS'] and item.video: continue """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB80484938(self): - code = """\ + code = """\ for sssssss, aaaaaaaaaa in [ ('ssssssssssssssssssss', 'sssssssssssssssssssssssss'), ('nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn', @@ -349,7 +349,7 @@ def testB80484938(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB120771563(self): - code = """\ + code = """\ class A: def b(): @@ -376,7 +376,7 @@ def b(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB79462249(self): - code = """\ + code = """\ foo.bar(baz, [ quux(thud=42), norf, @@ -398,7 +398,7 @@ def testB79462249(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB113210278(self): - unformatted_code = """\ + unformatted_code = """\ def _(): aaaaaaaaaaa = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc(\ eeeeeeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffffffffffffffffffffff.\ @@ -410,11 +410,11 @@ def _(): eeeeeeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffffffffffffffffffffff .ggggggggggggggggggggggggggggggggg.hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh()) """ # noqa - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB77923341(self): - code = """\ + code = """\ def f(): if (aaaaaaaaaaaaaa.bbbbbbbbbbbb.ccccc <= 0 and # pytype: disable=attribute-error ddddddddddd.eeeeeeeee == constants.FFFFFFFFFFFFFF): @@ -424,7 +424,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB77329955(self): - code = """\ + code = """\ class _(): @parameterized.named_parameters( @@ -442,7 +442,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB65197969(self): - unformatted_code = """\ + unformatted_code = """\ class _(): def _(): @@ -457,11 +457,11 @@ def _(): seconds=max(float(time_scale), small_interval) * 1.41**min(num_attempts, 9)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB65546221(self): - unformatted_code = """\ + unformatted_code = """\ SUPPORTED_PLATFORMS = ( "centos-6", "centos-7", @@ -484,11 +484,11 @@ def testB65546221(self): "debian-9-stretch", ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30500455(self): - unformatted_code = """\ + unformatted_code = """\ INITIAL_SYMTAB = dict([(name, 'exception#' + name) for name in INITIAL_EXCEPTIONS ] * [(name, 'type#' + name) for name in INITIAL_TYPES] + [ (name, 'function#' + name) for name in INITIAL_FUNCTIONS @@ -501,11 +501,11 @@ def testB30500455(self): [(name, 'function#' + name) for name in INITIAL_FUNCTIONS] + [(name, 'const#' + name) for name in INITIAL_CONSTS]) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB38343525(self): - code = """\ + code = """\ # This does foo. @arg.String('some_path_to_a_file', required=True) # This does bar. @@ -517,7 +517,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB37099651(self): - unformatted_code = """\ + unformatted_code = """\ _MEMCACHE = lazy.MakeLazy( # pylint: disable=g-long-lambda lambda: function.call.mem.clients(FLAGS.some_flag_thingy, default_namespace=_LAZY_MEM_NAMESPACE, allow_pickle=True) @@ -534,11 +534,11 @@ def testB37099651(self): # pylint: enable=g-long-lambda ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33228502(self): - unformatted_code = """\ + unformatted_code = """\ def _(): success_rate_stream_table = module.Precompute( query_function=module.DefineQueryFunction( @@ -572,11 +572,11 @@ def _(): | m.Join('successes', 'total') | m.Point(m.VAL['successes'] / m.VAL['total'])))) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30394228(self): - code = """\ + code = """\ class _(): def _(self): @@ -589,7 +589,7 @@ def _(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB65246454(self): - unformatted_code = """\ + unformatted_code = """\ class _(): def _(self): @@ -605,11 +605,11 @@ def _(self): self.assertEqual({i.id for i in successful_instances}, {i.id for i in self._statuses.successful_instances}) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67935450(self): - unformatted_code = """\ + unformatted_code = """\ def _(): return ( (Gauge( @@ -646,11 +646,11 @@ def _(): m.Cond(m.VAL['start'] != 0, m.VAL['start'], m.TimestampMicros() / 1000000L))) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB66011084(self): - unformatted_code = """\ + unformatted_code = """\ X = { "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": # Comment 1. ([] if True else [ # Comment 2. @@ -678,22 +678,22 @@ def testB66011084(self): ]), } """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67455376(self): - unformatted_code = """\ + unformatted_code = """\ sponge_ids.extend(invocation.id() for invocation in self._client.GetInvocationsByLabels(labels)) """ # noqa expected_formatted_code = """\ sponge_ids.extend(invocation.id() for invocation in self._client.GetInvocationsByLabels(labels)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB35210351(self): - unformatted_code = """\ + unformatted_code = """\ def _(): config.AnotherRuleThing( 'the_title_to_the_thing_here', @@ -719,11 +719,11 @@ def _(): GetTheAlertToIt('the_title_to_the_thing_here'), GetNotificationTemplate('your_email_here'))) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB34774905(self): - unformatted_code = """\ + unformatted_code = """\ x=[VarExprType(ir_name=IrName( value='x', expr_type=UnresolvedAttrExprType( atom=UnknownExprType(), attr_name=IrName( value='x', expr_type=UnknownExprType(), usage='UNKNOWN', fqn=None, @@ -748,18 +748,18 @@ def testB34774905(self): astn=None)) ] """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB65176185(self): - code = """\ + code = """\ xx = zip(*[(a, b) for (a, b, c) in yy]) """ llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB35210166(self): - unformatted_code = """\ + unformatted_code = """\ def _(): query = ( m.Fetch(n.Raw('monarch.BorgTask', '/proc/container/memory/usage'), { 'borg_user': borguser, 'borg_job': jobname }) @@ -776,11 +776,11 @@ def _(): | o.Window(m.Align('5m')) | p.GroupBy(['borg_user', 'borg_job', 'borg_cell'], q.Mean())) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB32167774(self): - unformatted_code = """\ + unformatted_code = """\ X = ( 'is_official', 'is_cover', @@ -803,11 +803,11 @@ def testB32167774(self): 'is_compilation', ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB66912275(self): - unformatted_code = """\ + unformatted_code = """\ def _(): with self.assertRaisesRegexp(errors.HttpError, 'Invalid'): patch_op = api_client.forwardingRules().patch( @@ -827,11 +827,11 @@ def _(): 'fingerprint': base64.urlsafe_b64encode('invalid_fingerprint') }).execute() """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67312284(self): - code = """\ + code = """\ def _(): self.assertEqual( [u'to be published 2', u'to be published 1', u'to be published 0'], @@ -841,7 +841,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB65241516(self): - unformatted_code = """\ + unformatted_code = """\ checkpoint_files = gfile.Glob(os.path.join(TrainTraceDir(unit_key, "*", "*"), embedding_model.CHECKPOINT_FILENAME + "-*")) """ # noqa expected_formatted_code = """\ @@ -850,11 +850,12 @@ def testB65241516(self): TrainTraceDir(unit_key, "*", "*"), embedding_model.CHECKPOINT_FILENAME + "-*")) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB37460004(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ assert all(s not in (_SENTINEL, None) for s in nested_schemas ), 'Nested schemas should never contain None/_SENTINEL' """) @@ -862,7 +863,7 @@ def testB37460004(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB36806207(self): - code = """\ + code = """\ def _(): linearity_data = [[row] for row in [ "%.1f mm" % (np.mean(linearity_values["pos_error"]) * 1000.0), @@ -881,7 +882,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB36215507(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class X(): def _(): @@ -895,7 +897,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB35212469(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): X = { 'retain': { @@ -904,7 +907,8 @@ def _(): } } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): X = { 'retain': { @@ -917,12 +921,14 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB31063453(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): while ((not mpede_proc) or ((time_time() - last_modified) < FLAGS_boot_idle_timeout)): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): while ((not mpede_proc) or ((time_time() - last_modified) < FLAGS_boot_idle_timeout)): @@ -932,7 +938,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB35021894(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): labelacl = Env(qa={ 'read': 'name/some-type-of-very-long-name-for-reading-perms', @@ -943,7 +950,8 @@ def _(): 'modify': 'name/some-other-type-of-very-long-name-for-modifying' }) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): labelacl = Env( qa={ @@ -959,10 +967,12 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB34682902(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ logging.info("Mean angular velocity norm: %.3f", np.linalg.norm(np.mean(ang_vel_arr, axis=0))) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ logging.info("Mean angular velocity norm: %.3f", np.linalg.norm(np.mean(ang_vel_arr, axis=0))) """) @@ -970,13 +980,15 @@ def testB34682902(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33842726(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class _(): def _(): hints.append(('hg tag -f -l -r %s %s # %s' % (short(ctx.node( )), candidatetag, firstline))[:78]) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class _(): def _(): hints.append(('hg tag -f -l -r %s %s # %s' % @@ -986,7 +998,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB32931780(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ environments = { 'prod': { # this is a comment before the first entry. @@ -1017,7 +1030,8 @@ def testB32931780(self): } } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ environments = { 'prod': { # this is a comment before the first entry. @@ -1048,7 +1062,8 @@ def testB32931780(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33047408(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def _(): for sort in (sorts or []): request['sorts'].append({ @@ -1062,7 +1077,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB32714745(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _BlankDefinition(): @@ -1092,14 +1108,16 @@ def _BlankDefinition(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB32737279(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ here_is_a_dict = { 'key': # Comment. 'value' } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ here_is_a_dict = { 'key': # Comment. 'value' @@ -1109,7 +1127,8 @@ def testB32737279(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB32570937(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def _(): if (job_message.ball not in ('*', ball) or job_message.call not in ('*', call) or @@ -1120,7 +1139,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB31937033(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def __init__(self, metric, fields_cb=None): @@ -1130,7 +1150,7 @@ def __init__(self, metric, fields_cb=None): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB31911533(self): - code = """\ + code = """\ class _(): @parameterized.NamedParameters( @@ -1146,7 +1166,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB31847238(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class _(): def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unused-argument @@ -1155,7 +1176,8 @@ def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unuse def xxxxx(self, yyyyy, zzzzzzzzzzzzzz=None): # A normal comment that runs over the column limit. return 1 """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class _(): def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unused-argument @@ -1171,11 +1193,13 @@ def xxxxx( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30760569(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ {'1234567890123456789012345678901234567890123456789012345678901234567890': '1234567890123456789012345678901234567890'} """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ { '1234567890123456789012345678901234567890123456789012345678901234567890': '1234567890123456789012345678901234567890' @@ -1185,13 +1209,15 @@ def testB30760569(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB26034238(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Thing: def Function(self): thing.Scrape('/aaaaaaaaa/bbbbbbbbbb/ccccc/dddd/eeeeeeeeeeeeee/ffffffffffffff').AndReturn(42) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Thing: def Function(self): @@ -1203,7 +1229,8 @@ def Function(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30536435(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def main(unused_argv): if True: if True: @@ -1212,7 +1239,8 @@ def main(unused_argv): ccccccccc.within, imports.ddddddddddddddddddd(name_item.ffffffffffffffff))) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def main(unused_argv): if True: if True: @@ -1224,12 +1252,14 @@ def main(unused_argv): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30442148(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def lulz(): return (some_long_module_name.SomeLongClassName. some_long_attribute_name.some_long_method_name()) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def lulz(): return (some_long_module_name.SomeLongClassName.some_long_attribute_name .some_long_method_name()) @@ -1238,7 +1268,8 @@ def lulz(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB26868213(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): xxxxxxxxxxxxxxxxxxx = { 'ssssss': {'ddddd': 'qqqqq', @@ -1253,7 +1284,8 @@ def _(): } } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): xxxxxxxxxxxxxxxxxxx = { 'ssssss': { @@ -1274,7 +1306,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30173198(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _(): @@ -1285,7 +1318,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB29908765(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def __repr__(self): @@ -1296,7 +1330,8 @@ def __repr__(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB30087362(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def _(): for s in sorted(env['foo']): bar() @@ -1309,7 +1344,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB30087363(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if False: bar() # This is a comment @@ -1321,12 +1357,14 @@ def testB30087363(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB29093579(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): _xxxxxxxxxxxxxxx(aaaaaaaa, bbbbbbbbbbbbbb.cccccccccc[ dddddddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffff]) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): _xxxxxxxxxxxxxxx( aaaaaaaa, @@ -1337,7 +1375,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB26382315(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ @hello_world # This is a first comment @@ -1349,7 +1388,8 @@ def foo(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB27616132(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: query.fetch_page.assert_has_calls([ mock.call(100, @@ -1360,7 +1400,8 @@ def testB27616132(self): start_cursor=cursor_2), ]) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: query.fetch_page.assert_has_calls([ mock.call(100, start_cursor=None), @@ -1372,7 +1413,8 @@ def testB27616132(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB27590179(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: if True: self.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ( @@ -1382,7 +1424,8 @@ def testB27590179(self): self.bbb.cccccccccc(ddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee) }) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: if True: self.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ({ @@ -1396,11 +1439,13 @@ def testB27590179(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB27266946(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = (self.bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccccccccc) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ( self.bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb @@ -1410,7 +1455,8 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB25505359(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ _EXAMPLE = { 'aaaaaaaaaaaaaa': [{ 'bbbb': 'cccccccccccccccccccccc', @@ -1425,7 +1471,8 @@ def testB25505359(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25324261(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ aaaaaaaaa = set(bbbb.cccc for ddd in eeeeee.fffffffffff.gggggggggggggggg for cccc in ddd.specification) @@ -1434,7 +1481,8 @@ def testB25324261(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25136704(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class f: def test(self): @@ -1446,7 +1494,8 @@ def test(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25165602(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): ids = {u: i for u, i in zip(self.aaaaa, xrange(42, 42 + len(self.aaaaaa)))} """) # noqa @@ -1454,7 +1503,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25157123(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def ListArgs(): FairlyLongMethodName([relatively_long_identifier_for_a_list], another_argument_with_a_long_identifier) @@ -1463,7 +1513,8 @@ def ListArgs(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25136820(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): return collections.OrderedDict({ # Preceding comment. @@ -1471,7 +1522,8 @@ def foo(): '$bbbbbbbbbbbbbbbbbbbbbbbb', }) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): return collections.OrderedDict({ # Preceding comment. @@ -1483,13 +1535,15 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB25131481(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ APPARENT_ACTIONS = ('command_type', { 'materialize': lambda x: some_type_of_function('materialize ' + x.command_def), '#': lambda x: x # do nothing }) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ APPARENT_ACTIONS = ( 'command_type', { @@ -1503,7 +1557,8 @@ def testB25131481(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23445244(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): if True: return xxxxxxxxxxxxxxxx( @@ -1514,7 +1569,8 @@ def foo(): FLAGS.aaaaaaaaaaaaaa + FLAGS.bbbbbbbbbbbbbbbbbbb, }) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): if True: return xxxxxxxxxxxxxxxx( @@ -1530,7 +1586,8 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20559654(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class A(object): def foo(self): @@ -1538,7 +1595,8 @@ def foo(self): ['AA BBBB CCC DDD EEEEEEEE X YY ZZZZ FFF EEE AAAAAAAA'], aaaaaaaaaaa=True, bbbbbbbb=None) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class A(object): def foo(self): @@ -1551,7 +1609,8 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23943842(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class F(): def f(): self.assertDictEqual( @@ -1565,7 +1624,8 @@ def f(): 'lines': 'l8'} }) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class F(): def f(): @@ -1589,12 +1649,14 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20551180(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): if True: return (struct.pack('aaaa', bbbbbbbbbb, ccccccccccccccc, dddddddd) + eeeeeee) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): if True: return (struct.pack('aaaa', bbbbbbbbbb, ccccccccccccccc, dddddddd) + @@ -1604,12 +1666,14 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23944849(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class A(object): def xxxxxxxxx(self, aaaaaaa, bbbbbbb=ccccccccccc, dddddd=300, eeeeeeeeeeeeee=None, fffffffffffffff=0): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class A(object): def xxxxxxxxx(self, @@ -1624,12 +1688,14 @@ def xxxxxxxxx(self, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23935890(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class F(): def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, eeeeeeeeeeeeeee): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class F(): def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, @@ -1640,7 +1706,8 @@ def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB28414371(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def _(): return ((m.fffff( m.rrr('mmmmmmmmmmmmmmmm', 'ssssssssssssssssssssssssss'), ffffffffffffffff) @@ -1665,7 +1732,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20127686(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if True: return ((m.fffff( @@ -1683,11 +1751,13 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20016122(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ from a_very_long_or_indented_module_name_yada_yada import (long_argument_1, long_argument_2) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ from a_very_long_or_indented_module_name_yada_yada import ( long_argument_1, long_argument_2) """) @@ -1698,12 +1768,13 @@ def testB20016122(self): '{based_on_style: pep8, split_penalty_import_names: 350}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class foo(): def __eq__(self, other): @@ -1723,8 +1794,9 @@ def __eq__(self, other): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: yapf, ' - 'split_before_logical_operator: True}')) + style.CreateStyleFromConfig( + '{based_on_style: yapf, ' + 'split_before_logical_operator: True}')) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) @@ -1732,12 +1804,14 @@ def __eq__(self, other): style.SetGlobalStyle(style.CreateYapfStyle()) def testB22527411(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): if True: aaaaaa.bbbbbbbbbbbbbbbbbbbb[-1].cccccccccccccc.ddd().eeeeeeee(ffffffffffffff) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): if True: aaaaaa.bbbbbbbbbbbbbbbbbbbb[-1].cccccccccccccc.ddd().eeeeeeee( @@ -1747,7 +1821,8 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20849933(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def main(unused_argv): if True: aaaaaaaa = { @@ -1755,7 +1830,8 @@ def main(unused_argv): (eeeeee.FFFFFFFFFFFFFFFFFF), } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def main(unused_argv): if True: aaaaaaaa = { @@ -1767,7 +1843,8 @@ def main(unused_argv): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20813997(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def myfunc_1(): myarray = numpy.zeros((2, 2, 2)) print(myarray[:, 1, :]) @@ -1776,7 +1853,8 @@ def myfunc_1(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20605036(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ foo = { 'aaaa': { # A comment for no particular reason. @@ -1790,7 +1868,8 @@ def testB20605036(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20562732(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ foo = [ # Comment about first list item 'First item', @@ -1802,7 +1881,8 @@ def testB20562732(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20128830(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a = { 'xxxxxxxxxxxxxxxxxxxx': { 'aaaa': @@ -1822,7 +1902,8 @@ def testB20128830(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20073838(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class DummyModel(object): def do_nothing(self, class_1_count): @@ -1839,7 +1920,8 @@ def do_nothing(self, class_1_count): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19626808(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if True: aaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbb( 'ccccccccccc', ddddddddd='eeeee').fffffffff([ggggggggggggggggggggg]) @@ -1848,7 +1930,8 @@ def testB19626808(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19547210(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ while True: if True: if True: @@ -1862,7 +1945,8 @@ def testB19547210(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19377034(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if (aaaaaaaaaaaaaaa.start >= aaaaaaaaaaaaaaa.end or bbbbbbbbbbbbbbb.start >= bbbbbbbbbbbbbbb.end): @@ -1872,7 +1956,8 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19372573(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def f(): if a: return 42 while True: @@ -1890,7 +1975,8 @@ def f(): style.SetGlobalStyle(style.CreateYapfStyle()) def testB19353268(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ a = {1, 2, 3}[x] b = {'foo': 42, 'bar': 37}['foo'] """) @@ -1898,7 +1984,8 @@ def testB19353268(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19287512(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Foo(object): def bar(self): @@ -1908,7 +1995,8 @@ def bar(self): .Mmmmmmmmmmmmmmmmmm(-1, 'permission error'))): self.assertRaises(nnnnnnnnnnnnnnnn.ooooo, ppppp.qqqqqqqqqqqqqqqqq) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Foo(object): def bar(self): @@ -1923,7 +2011,8 @@ def bar(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB19194420(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ method.Set( 'long argument goes here that causes the line to break', lambda arg2=0.5: arg2) @@ -1932,7 +2021,7 @@ def testB19194420(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19073499(self): - code = """\ + code = """\ instance = ( aaaaaaa.bbbbbbb().ccccccccccccccccc().ddddddddddd({ 'aa': 'context!' @@ -1944,7 +2033,8 @@ def testB19073499(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18257115(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if True: if True: self._Test(aaaa, bbbbbbb.cccccccccc, dddddddd, eeeeeeeeeee, @@ -1954,7 +2044,8 @@ def testB18257115(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18256666(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class Foo(object): def Bar(self): @@ -1972,7 +2063,8 @@ def Bar(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18256826(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if True: pass # A multiline comment. @@ -1991,7 +2083,8 @@ def testB18256826(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18255697(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ AAAAAAAAAAAAAAA = { 'XXXXXXXXXXXXXX': 4242, # Inline comment # Next comment @@ -2002,12 +2095,14 @@ def testB18255697(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB17534869(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: self.assertLess(abs(time.time()-aaaa.bbbbbbbbbbb( datetime.datetime.now())), 1) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: self.assertLess( abs(time.time() - aaaa.bbbbbbbbbbb(datetime.datetime.now())), 1) @@ -2016,14 +2111,16 @@ def testB17534869(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB17489866(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): if True: if True: return aaaa.bbbbbbbbb(ccccccc=dddddddddddddd({('eeee', \ 'ffffffff'): str(j)})) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): if True: if True: @@ -2034,7 +2131,8 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB17133019(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class aaaaaaaaaaaaaa(object): def bbbbbbbbbb(self): @@ -2045,7 +2143,8 @@ def bbbbbbbbbb(self): ), "rb") as gggggggggggggggggggg: print(gggggggggggggggggggg) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class aaaaaaaaaaaaaa(object): def bbbbbbbbbb(self): @@ -2059,7 +2158,8 @@ def bbbbbbbbbb(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB17011869(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ '''blah......''' class SomeClass(object): @@ -2070,7 +2170,8 @@ class SomeClass(object): 'DDDDDDDD': 0.4811 } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ '''blah......''' @@ -2086,14 +2187,16 @@ class SomeClass(object): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB16783631(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: with aaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccc(ddddddddddddd, eeeeeeeee=self.fffffffffffff )as gggg: pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: with aaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccc( ddddddddddddd, eeeeeeeee=self.fffffffffffff) as gggg: @@ -2103,12 +2206,14 @@ def testB16783631(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB16572361(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(self): def bar(my_dict_name): self.my_dict_name['foo-bar-baz-biz-boo-baa-baa'].IncrementBy.assert_called_once_with('foo_bar_baz_boo') """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(self): def bar(my_dict_name): @@ -2120,13 +2225,15 @@ def bar(my_dict_name): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15884241(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if 1: if 1: for row in AAAA: self.create(aaaaaaaa="/aaa/bbbb/cccc/dddddd/eeeeeeeeeeeeeeeeeeeeeeeeee/%s" % row [0].replace(".foo", ".bar"), aaaaa=bbb[1], ccccc=bbb[2], dddd=bbb[3], eeeeeeeeeee=[s.strip() for s in bbb[4].split(",")], ffffffff=[s.strip() for s in bbb[5].split(",")], gggggg=bbb[6]) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if 1: if 1: for row in AAAA: @@ -2144,7 +2251,8 @@ def testB15884241(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15697268(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def main(unused_argv): ARBITRARY_CONSTANT_A = 10 an_array_with_an_exceedingly_long_name = range(ARBITRARY_CONSTANT_A + 1) @@ -2153,7 +2261,8 @@ def main(unused_argv): a_long_name_slicing = an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A] bad_slice = ("I am a crazy, no good, string what's too long, etc." + " no really ")[:ARBITRARY_CONSTANT_A] """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def main(unused_argv): ARBITRARY_CONSTANT_A = 10 an_array_with_an_exceedingly_long_name = range(ARBITRARY_CONSTANT_A + 1) @@ -2169,7 +2278,7 @@ def main(unused_argv): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15597568(self): - unformatted_code = """\ + unformatted_code = """\ if True: if True: if True: @@ -2183,14 +2292,16 @@ def testB15597568(self): (", and the process timed out." if did_time_out else ".")) % errorcode) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15542157(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaaaa = bbbb.ccccccccccccccc(dddddd.eeeeeeeeeeeeee, ffffffffffffffffff, gggggg.hhhhhhhhhhhhhhhhh) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaaaa = bbbb.ccccccccccccccc(dddddd.eeeeeeeeeeeeee, ffffffffffffffffff, gggggg.hhhhhhhhhhhhhhhhh) """) # noqa @@ -2198,7 +2309,8 @@ def testB15542157(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15438132(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if aaaaaaa.bbbbbbbbbb: cccccc.dddddddddd(eeeeeeeeeee=fffffffffffff.gggggggggggggggggg) if hhhhhh.iiiii.jjjjjjjjjjjjj: @@ -2214,7 +2326,8 @@ def testB15438132(self): lllll.mm), nnnnnnnnnn=ooooooo.pppppppppp) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if aaaaaaa.bbbbbbbbbb: cccccc.dddddddddd(eeeeeeeeeee=fffffffffffff.gggggggggggggggggg) if hhhhhh.iiiii.jjjjjjjjjjjjj: @@ -2233,7 +2346,7 @@ def testB15438132(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB14468247(self): - unformatted_code = """\ + unformatted_code = """\ call(a=1, b=2, ) @@ -2244,15 +2357,17 @@ def testB14468247(self): b=2, ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB14406499(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo1(parameter_1, parameter_2, parameter_3, parameter_4, \ parameter_5, parameter_6): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo1(parameter_1, parameter_2, parameter_3, parameter_4, parameter_5, parameter_6): pass @@ -2261,18 +2376,21 @@ def foo1(parameter_1, parameter_2, parameter_3, parameter_4, parameter_5, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB13900309(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ self.aaaaaaaaaaa( # A comment in the middle of it all. 948.0/3600, self.bbb.ccccccccccccccccccccc(dddddddddddddddd.eeee, True)) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ self.aaaaaaaaaaa( # A comment in the middle of it all. 948.0 / 3600, self.bbb.ccccccccccccccccccccc(dddddddddddddddd.eeee, True)) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ aaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccc( DC_1, (CL - 50, CL), AAAAAAAA, BBBBBBBBBBBBBBBB, 98.0, CCCCCCC).ddddddddd( # Look! A comment is here. @@ -2281,41 +2399,49 @@ def testB13900309(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc().dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc( ).dddddddddddddddddddddddddd(1, 2, 3, 4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc(x).dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc( x).dddddddddddddddddddddddddd(1, 2, 3, 4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx).dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa( xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx).dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa().bbbbbbbbbbbbbbbbbbbbbbbb().ccccccccccccccccccc().\ dddddddddddddddddd().eeeeeeeeeeeeeeeeeeeee().fffffffffffffffff().gggggggggggggggggg() """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ aaaaaaaaaaaaaaaaaaaaaaaa().bbbbbbbbbbbbbbbbbbbbbbbb().ccccccccccccccccccc( ).dddddddddddddddddd().eeeeeeeeeeeeeeeeeeeee().fffffffffffffffff( ).gggggggggggggggggg() @@ -2324,7 +2450,8 @@ def testB13900309(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67935687(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ Fetch( Raw('monarch.BorgTask', '/union/row_operator_action_delay'), {'borg_user': self.borg_user}) @@ -2332,13 +2459,15 @@ def testB67935687(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ shelf_renderer.expand_text = text.translate_to_unicode( expand_text % { 'creator': creator }) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ shelf_renderer.expand_text = text.translate_to_unicode(expand_text % {'creator': creator}) """) # noqa diff --git a/yapftests/reformatter_facebook_test.py b/yapftests/reformatter_facebook_test.py index c61f32bf5..14b07d06b 100644 --- a/yapftests/reformatter_facebook_test.py +++ b/yapftests/reformatter_facebook_test.py @@ -29,12 +29,14 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateFacebookStyle()) def testNoNeedForLineBreaks(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def overly_long_function_name( just_one_arg, **kwargs): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def overly_long_function_name(just_one_arg, **kwargs): pass """) @@ -42,13 +44,15 @@ def overly_long_function_name(just_one_arg, **kwargs): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentClosingBracket(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long ): @@ -58,12 +62,14 @@ def overly_long_function_name( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBreakAfterOpeningBracketIfContentsTooBig(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def overly_long_function_name(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def overly_long_function_name( a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, \ v, w, x, y, z @@ -74,7 +80,8 @@ def overly_long_function_name( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentClosingBracketWithComments(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def overly_long_function_name( # comment about the first argument first_argument_with_a_very_long_name_or_so, @@ -82,7 +89,8 @@ def overly_long_function_name( second_argument_makes_the_line_too_long): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def overly_long_function_name( # comment about the first argument first_argument_with_a_very_long_name_or_so, @@ -95,7 +103,8 @@ def overly_long_function_name( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentImportAsNames(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ from module import ( internal_function as function, SOME_CONSTANT_NUMBER1, @@ -107,7 +116,8 @@ def testDedentImportAsNames(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentTestListGexp(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ try: pass except ( @@ -122,7 +132,8 @@ def testDedentTestListGexp(self): ) as exception: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ try: pass except ( @@ -146,13 +157,15 @@ def testDedentTestListGexp(self): def testBrokenIdempotency(self): # TODO(ambv): The following behaviour should be fixed. - pass0_code = textwrap.dedent("""\ + pass0_code = textwrap.dedent( + """\ try: pass except (IOError, OSError, LookupError, RuntimeError, OverflowError) as exception: pass """) # noqa - pass1_code = textwrap.dedent("""\ + pass1_code = textwrap.dedent( + """\ try: pass except ( @@ -163,7 +176,8 @@ def testBrokenIdempotency(self): llines = yapf_test_helper.ParseAndUnwrap(pass0_code) self.assertCodeEqual(pass1_code, reformatter.Reformat(llines)) - pass2_code = textwrap.dedent("""\ + pass2_code = textwrap.dedent( + """\ try: pass except ( @@ -175,7 +189,8 @@ def testBrokenIdempotency(self): self.assertCodeEqual(pass2_code, reformatter.Reformat(llines)) def testIfExprHangingIndent(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: if True: if True: @@ -184,7 +199,8 @@ def testIfExprHangingIndent(self): self.foobars.counters['db.marshmellow_skins'] != 1): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: if True: if True: @@ -198,11 +214,13 @@ def testIfExprHangingIndent(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleDedenting(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: self.assertEqual(result.reason_not_added, "current preflight is still running") """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: self.assertEqual( result.reason_not_added, "current preflight is still running" @@ -212,7 +230,8 @@ def testSimpleDedenting(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentingWithSubscripts(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Foo: class Bar: @classmethod @@ -221,7 +240,8 @@ def baz(cls, clues_list, effect, constraints, constraint_manager): return cls.single_constraint_not(clues_lists, effect, constraints[0], constraint_manager) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Foo: class Bar: @classmethod @@ -235,7 +255,8 @@ def baz(cls, clues_list, effect, constraints, constraint_manager): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentingCallsWithInnerLists(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _(): cls.effect_clues = { @@ -246,7 +267,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentingListComprehension(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Foo(): def _pack_results_for_constraint_or(): self.param_groups = dict( @@ -284,7 +306,8 @@ def _pack_results_for_constraint_or(): ('localhost', os.path.join(path, 'node_2.log'), super_parser) ] """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Foo(): def _pack_results_for_constraint_or(): self.param_groups = dict( @@ -324,7 +347,8 @@ def _pack_results_for_constraint_or(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMustSplitDedenting(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _(): effect_line = FrontInput( @@ -336,7 +360,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentIfConditional(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _(): if True: @@ -350,7 +375,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentSet(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class _(): def _(): assert set(self.constraint_links.get_links()) == set( @@ -366,7 +392,8 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentingInnerScope(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class Foo(): @classmethod def _pack_results_for_constraint_or(cls, combination, constraints): @@ -375,16 +402,17 @@ def _pack_results_for_constraint_or(cls, combination, constraints): constraints, InvestigationResult.OR ) """) # noqa - llines = yapf_test_helper.ParseAndUnwrap(code) + llines = yapf_test_helper.ParseAndUnwrap(code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) def testCommentWithNewlinesInPrefix(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): if 0: return False @@ -397,7 +425,8 @@ def foo(): print(foo()) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): if 0: return False @@ -413,7 +442,7 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testIfStmtClosingBracket(self): - unformatted_code = """\ + unformatted_code = """\ if (isinstance(value , (StopIteration , StopAsyncIteration )) and exc.__cause__ is value_asdfasdfasdfasdfsafsafsafdasfasdfs): return False """ # noqa @@ -424,7 +453,7 @@ def testIfStmtClosingBracket(self): ): return False """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) diff --git a/yapftests/reformatter_pep8_test.py b/yapftests/reformatter_pep8_test.py index acc218d24..19c294d18 100644 --- a/yapftests/reformatter_pep8_test.py +++ b/yapftests/reformatter_pep8_test.py @@ -30,11 +30,13 @@ def setUpClass(cls): # pylint: disable=g-missing-super-call style.SetGlobalStyle(style.CreatePEP8Style()) def testIndent4(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if a+b: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if a + b: pass """) @@ -42,7 +44,8 @@ def testIndent4(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleLineIfStatements(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ if True: a = 42 elif False: b = 42 else: c = 42 @@ -51,12 +54,14 @@ def testSingleLineIfStatements(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testBlankBetweenClassAndDef(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class Foo: def joe(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class Foo: def joe(): @@ -66,7 +71,8 @@ def joe(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankBetweenDefsInClass(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ class TestClass: def __init__(self): self.running = False @@ -75,7 +81,8 @@ def run(self): def is_running(self): return self.running ''') - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ class TestClass: def __init__(self): @@ -91,11 +98,13 @@ def is_running(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleWhiteBeforeTrailingComment(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if a+b: # comment pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if a + b: # comment pass """) @@ -103,19 +112,22 @@ def testSingleWhiteBeforeTrailingComment(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSpaceBetweenEndingCommandAndClosingBracket(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = ( 1, ) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = (1, ) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testContinuedNonOutdentedLine(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class eld(d): if str(geom.geom_type).upper( ) != self.geom_type and not self.geom_type == 'GEOMETRY': @@ -125,7 +137,8 @@ class eld(d): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testWrappingPercentExpressions(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): if True: zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxx.yyy + 1) @@ -133,7 +146,8 @@ def f(): zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxxxxxx + 1) zzzzz = '%s-%s'.ww(xxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxxxxxx + 1) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): if True: zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxxxxx + 1, @@ -149,12 +163,14 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testAlignClosingBracketWithVisualIndentation(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ TEST_LIST = ('foo', 'bar', # first comment 'baz' # second comment ) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ TEST_LIST = ( 'foo', 'bar', # first comment @@ -164,7 +180,8 @@ def testAlignClosingBracketWithVisualIndentation(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def f(): def g(): @@ -173,7 +190,8 @@ def g(): ): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def f(): def g(): @@ -186,11 +204,13 @@ def g(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testIndentSizeChanging(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: runtime_mins = (program_end_time - program_start_time).total_seconds() / 60.0 """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: runtime_mins = (program_end_time - program_start_time).total_seconds() / 60.0 @@ -199,7 +219,8 @@ def testIndentSizeChanging(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testHangingIndentCollision(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if (aaaaaaaaaaaaaa + bbbbbbbbbbbbbbbb == ccccccccccccccccc and xxxxxxxxxxxxx or yyyyyyyyyyyyyyyyy): pass elif (xxxxxxxxxxxxxxx(aaaaaaaaaaa, bbbbbbbbbbbbbb, cccccccccccc, dddddddddd=None)): @@ -213,7 +234,8 @@ def h(): for connection in itertools.chain(branch.contact, branch.address, morestuff.andmore.andmore.andmore.andmore.andmore.andmore.andmore): dosomething(connection) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if (aaaaaaaaaaaaaa + bbbbbbbbbbbbbbbb == ccccccccccccccccc and xxxxxxxxxxxxx or yyyyyyyyyyyyyyyyy): pass @@ -242,7 +264,8 @@ def testSplittingBeforeLogicalOperator(self): style.SetGlobalStyle( style.CreateStyleFromConfig( '{based_on_style: pep8, split_before_logical_operator: True}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): return bool(update.message.new_chat_member or update.message.left_chat_member or update.message.new_chat_title or update.message.new_chat_photo or @@ -251,7 +274,8 @@ def foo(): or update.message.migrate_to_chat_id or update.message.migrate_from_chat_id or update.message.pinned_message) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): return bool( update.message.new_chat_member or update.message.left_chat_member @@ -265,18 +289,20 @@ def foo(): or update.message.pinned_message) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testContiguousListEndingWithComment(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: if True: keys.append(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) # may be unassigned. """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: if True: keys.append( @@ -290,11 +316,13 @@ def testSplittingBeforeFirstArgument(self): style.SetGlobalStyle( style.CreateStyleFromConfig( '{based_on_style: pep8, split_before_first_argument: True}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a_very_long_function_name(long_argument_name_1=1, long_argument_name_2=2, long_argument_name_3=3, long_argument_name_4=4) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a_very_long_function_name( long_argument_name_1=1, long_argument_name_2=2, @@ -302,17 +330,19 @@ def testSplittingBeforeFirstArgument(self): long_argument_name_4=4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testSplittingExpressionsInsideSubscripts(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): df = df[(df['campaign_status'] == 'LIVE') & (df['action_status'] == 'LIVE')] """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): df = df[(df['campaign_status'] == 'LIVE') & (df['action_status'] == 'LIVE')] @@ -321,13 +351,15 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitListsAndDictSetMakersIfCommaTerminated(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ DJANGO_TEMPLATES_OPTIONS = {"context_processors": []} DJANGO_TEMPLATES_OPTIONS = {"context_processors": [],} x = ["context_processors"] x = ["context_processors",] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ DJANGO_TEMPLATES_OPTIONS = {"context_processors": []} DJANGO_TEMPLATES_OPTIONS = { "context_processors": [], @@ -341,13 +373,15 @@ def testSplitListsAndDictSetMakersIfCommaTerminated(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitAroundNamedAssigns(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class a(): def a(): return a( aaaaaaaaaa=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class a(): def a(): @@ -359,13 +393,15 @@ def a(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testUnaryOperator(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if not -3 < x < 3: pass if -3 < x < 3: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if not -3 < x < 3: pass if -3 < x < 3: @@ -377,21 +413,24 @@ def testUnaryOperator(self): def testNoSplitBeforeDictValue(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig('{based_on_style: pep8, ' - 'allow_split_before_dict_value: false, ' - 'coalesce_brackets: true, ' - 'dedent_closing_brackets: true, ' - 'each_dict_entry_on_separate_line: true, ' - 'split_before_logical_operator: true}')) - - unformatted_code = textwrap.dedent("""\ + style.CreateStyleFromConfig( + '{based_on_style: pep8, ' + 'allow_split_before_dict_value: false, ' + 'coalesce_brackets: true, ' + 'dedent_closing_brackets: true, ' + 'each_dict_entry_on_separate_line: true, ' + 'split_before_logical_operator: true}')) + + unformatted_code = textwrap.dedent( + """\ some_dict = { 'title': _("I am example data"), 'description': _("Lorem ipsum dolor met sit amet elit, si vis pacem para bellum " "elites nihi very long string."), } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ some_dict = { 'title': _("I am example data"), 'description': _( @@ -401,13 +440,15 @@ def testNoSplitBeforeDictValue(self): } """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ X = {'a': 1, 'b': 2, 'key': this_is_a_function_call_that_goes_over_the_column_limit_im_pretty_sure()} """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ X = { 'a': 1, 'b': 2, @@ -415,16 +456,18 @@ def testNoSplitBeforeDictValue(self): } """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ attrs = { 'category': category, 'role': forms.ModelChoiceField(label=_("Role"), required=False, queryset=category_roles, initial=selected_role, empty_label=_("No access"),), } """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ attrs = { 'category': category, 'role': forms.ModelChoiceField( @@ -437,17 +480,19 @@ def testNoSplitBeforeDictValue(self): } """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ css_class = forms.CharField( label=_("CSS class"), required=False, help_text=_("Optional CSS class used to customize this category appearance from templates."), ) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ css_class = forms.CharField( label=_("CSS class"), required=False, @@ -457,8 +502,8 @@ def testNoSplitBeforeDictValue(self): ) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) @@ -473,7 +518,7 @@ def _(): cdffile['Latitude'][:] >= select_lat - radius) & ( cdffile['Latitude'][:] <= select_lat + radius)) """ - expected_code = """\ + expected_code = """\ def _(): include_values = np.where( (cdffile['Quality_Flag'][:] >= 5) & (cdffile['Day_Night_Flag'][:] == 1) @@ -482,7 +527,7 @@ def _(): & (cdffile['Latitude'][:] >= select_lat - radius) & (cdffile['Latitude'][:] <= select_lat + radius)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertEqual(expected_code, reformatter.Reformat(llines)) def testNoBlankLinesOnlyForFirstNestedObject(self): @@ -500,7 +545,7 @@ def bar(self): bar docs """ ''' - expected_code = '''\ + expected_code = '''\ class Demo: """ Demo docs @@ -516,7 +561,7 @@ def bar(self): bar docs """ ''' - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertEqual(expected_code, reformatter.Reformat(llines)) def testSplitBeforeArithmeticOperators(self): @@ -525,7 +570,7 @@ def testSplitBeforeArithmeticOperators(self): style.CreateStyleFromConfig( '{based_on_style: pep8, split_before_arithmetic_operator: true}')) - unformatted_code = """\ + unformatted_code = """\ def _(): raise ValueError('This is a long message that ends with an argument: ' + str(42)) """ # noqa @@ -534,9 +579,9 @@ def _(): raise ValueError('This is a long message that ends with an argument: ' + str(42)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) @@ -546,12 +591,12 @@ def testListSplitting(self): (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,10), (1,11), (1, 10), (1,11), (10,11)]) """ - expected_code = """\ + expected_code = """\ foo([(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 10), (1, 11), (1, 10), (1, 11), (10, 11)]) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testNoBlankLineBeforeNestedFuncOrClass(self): @@ -561,7 +606,7 @@ def testNoBlankLineBeforeNestedFuncOrClass(self): '{based_on_style: pep8, ' 'blank_line_before_nested_class_or_def: false}')) - unformatted_code = '''\ + unformatted_code = '''\ def normal_function(): """Return the nested function.""" @@ -589,14 +634,15 @@ class nested_class(): return nested_function ''' - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testParamListIndentationCollision1(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class _(): def __init__(self, title: Optional[str], diffs: Collection[BinaryDiff] = (), charset: Union[Type[AsciiCharset], Type[LineCharset]] = AsciiCharset, preprocess: Callable[[str], str] = identity, @@ -605,7 +651,8 @@ def __init__(self, title: Optional[str], diffs: Collection[BinaryDiff] = (), cha self._cs = charset self._preprocess = preprocess """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class _(): def __init__( @@ -624,7 +671,8 @@ def __init__( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testParamListIndentationCollision2(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def simple_pass_function_with_an_extremely_long_name_and_some_arguments( argument0, argument1): pass @@ -633,7 +681,8 @@ def simple_pass_function_with_an_extremely_long_name_and_some_arguments( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testParamListIndentationCollision3(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def func1( arg1, arg2, @@ -651,11 +700,13 @@ def func2( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testTwoWordComparisonOperators(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl is not ksldfjsdklfjdklsfjdklsfjdklsfjdsklfjdklsfj) _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl not in {ksldfjsdklfjdklsfjdklsfjdklsfjdsklfjdklsfj}) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl is not ksldfjsdklfjdklsfjdklsfjdklsfjdsklfjdklsfj) _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl @@ -667,7 +718,8 @@ def testTwoWordComparisonOperators(self): @unittest.skipUnless(not py3compat.PY3, 'Requires Python 2.7') def testAsyncAsNonKeyword(self): # In Python 2, async may be used as a non-keyword identifier. - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ from util import async @@ -683,12 +735,14 @@ def bar(self): self.assertCodeEqual(code, reformatter.Reformat(llines, verify=False)) def testStableInlinedDictionaryFormatting(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({'action': 'update', 'parameter': value})) """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({ @@ -697,18 +751,19 @@ def _(): })) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testSpaceBetweenColonAndElipses(self): style.SetGlobalStyle(style.CreatePEP8Style()) - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class MyClass(ABC): place: ... @@ -719,10 +774,11 @@ class MyClass(ABC): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testSpaceBetweenDictColonAndElipses(self): style.SetGlobalStyle(style.CreatePEP8Style()) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ {0:"...", 1:...} """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ {0: "...", 1: ...} """) @@ -732,7 +788,8 @@ def testSpaceBetweenDictColonAndElipses(self): class TestsForSpacesInsideBrackets(yapf_test_helper.YAPFTest): """Test the SPACE_INSIDE_BRACKETS style option.""" - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ foo() foo(1) foo(1,2) @@ -765,7 +822,8 @@ def testEnabled(self): style.SetGlobalStyle( style.CreateStyleFromConfig('{space_inside_brackets: True}')) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo() foo( 1 ) foo( 1, 2 ) @@ -803,7 +861,8 @@ def testEnabled(self): def testDefault(self): style.SetGlobalStyle(style.CreatePEP8Style()) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo() foo(1) foo(1, 2) @@ -842,7 +901,8 @@ def testDefault(self): def testAwait(self): style.SetGlobalStyle( style.CreateStyleFromConfig('{space_inside_brackets: True}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import asyncio import time @@ -855,7 +915,8 @@ async def main(): if (await get_html()): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import asyncio import time @@ -876,7 +937,8 @@ async def main(): class TestsForSpacesAroundSubscriptColon(yapf_test_helper.YAPFTest): """Test the SPACES_AROUND_SUBSCRIPT_COLON style option.""" - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = list1[ : ] b = list2[ slice_start: ] c = list3[ slice_start:slice_end ] @@ -892,7 +954,8 @@ class TestsForSpacesAroundSubscriptColon(yapf_test_helper.YAPFTest): def testEnabled(self): style.SetGlobalStyle( style.CreateStyleFromConfig('{spaces_around_subscript_colon: True}')) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = list1[:] b = list2[slice_start :] c = list3[slice_start : slice_end] @@ -909,11 +972,13 @@ def testEnabled(self): def testWithSpaceInsideBrackets(self): style.SetGlobalStyle( - style.CreateStyleFromConfig('{' - 'spaces_around_subscript_colon: true, ' - 'space_inside_brackets: true,' - '}')) - expected_formatted_code = textwrap.dedent("""\ + style.CreateStyleFromConfig( + '{' + 'spaces_around_subscript_colon: true, ' + 'space_inside_brackets: true,' + '}')) + expected_formatted_code = textwrap.dedent( + """\ a = list1[ : ] b = list2[ slice_start : ] c = list3[ slice_start : slice_end ] @@ -930,7 +995,8 @@ def testWithSpaceInsideBrackets(self): def testDefault(self): style.SetGlobalStyle(style.CreatePEP8Style()) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = list1[:] b = list2[slice_start:] c = list3[slice_start:slice_end] diff --git a/yapftests/reformatter_python3_test.py b/yapftests/reformatter_python3_test.py index b5d68e86f..88dd9d7bd 100644 --- a/yapftests/reformatter_python3_test.py +++ b/yapftests/reformatter_python3_test.py @@ -33,11 +33,13 @@ def setUpClass(cls): # pylint: disable=g-missing-super-call style.SetGlobalStyle(style.CreatePEP8Style()) def testTypedNames(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def x(aaaaaaaaaaaaaaa:int,bbbbbbbbbbbbbbbb:str,ccccccccccccccc:dict,eeeeeeeeeeeeee:set={1, 2, 3})->bool: pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def x(aaaaaaaaaaaaaaa: int, bbbbbbbbbbbbbbbb: str, ccccccccccccccc: dict, @@ -48,11 +50,13 @@ def x(aaaaaaaaaaaaaaa: int, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testTypedNameWithLongNamedArg(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def func(arg=long_function_call_that_pushes_the_line_over_eighty_characters()) -> ReturnType: pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def func(arg=long_function_call_that_pushes_the_line_over_eighty_characters() ) -> ReturnType: pass @@ -61,11 +65,13 @@ def func(arg=long_function_call_that_pushes_the_line_over_eighty_characters() self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testKeywordOnlyArgSpecifier(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(a, *, kw): return a+kw """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(a, *, kw): return a + kw """) @@ -74,13 +80,15 @@ def foo(a, *, kw): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testPEP448ParameterExpansion(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ { ** x } { **{} } { **{ **x }, **x } {'a': 1, **kw , 'b':3, **kw2 } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ {**x} {**{}} {**{**x}, **x} @@ -90,11 +98,13 @@ def testPEP448ParameterExpansion(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testAnnotations(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(a: list, b: "bar") -> dict: return a+b """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(a: list, b: "bar") -> dict: return a + b """) @@ -102,15 +112,16 @@ def foo(a: list, b: "bar") -> dict: self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testExecAsNonKeyword(self): - unformatted_code = 'methods.exec( sys.modules[name])\n' + unformatted_code = 'methods.exec( sys.modules[name])\n' expected_formatted_code = 'methods.exec(sys.modules[name])\n' - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testAsyncFunctions(self): if sys.version_info[1] < 5: return - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ import asyncio import time @@ -130,7 +141,7 @@ async def main(): self.assertCodeEqual(code, reformatter.Reformat(llines, verify=False)) def testNoSpacesAroundPowerOperator(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ a**b """) expected_formatted_code = textwrap.dedent("""\ @@ -143,13 +154,13 @@ def testNoSpacesAroundPowerOperator(self): '{based_on_style: pep8, SPACES_AROUND_POWER_OPERATOR: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testSpacesAroundDefaultOrNamedAssign(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ f(a=5) """) expected_formatted_code = textwrap.dedent("""\ @@ -163,13 +174,14 @@ def testSpacesAroundDefaultOrNamedAssign(self): 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testTypeHint(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(x: int=42): pass @@ -177,7 +189,8 @@ def foo(x: int=42): def foo2(x: 'int' =42): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(x: int = 42): pass @@ -189,17 +202,18 @@ def foo2(x: 'int' = 42): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMatrixMultiplication(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ a=b@c """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = b @ c """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoneKeyword(self): - code = """\ + code = """\ None.__ne__() """ llines = yapf_test_helper.ParseAndUnwrap(code) @@ -208,7 +222,8 @@ def testNoneKeyword(self): def testAsyncWithPrecedingComment(self): if sys.version_info[1] < 5: return - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import asyncio # Comment @@ -218,7 +233,8 @@ async def bar(): async def foo(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import asyncio @@ -236,7 +252,8 @@ async def foo(): def testAsyncFunctionsNested(self): if sys.version_info[1] < 5: return - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ async def outer(): async def inner(): @@ -248,13 +265,15 @@ async def inner(): def testKeepTypesIntact(self): if sys.version_info[1] < 5: return - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def _ReduceAbstractContainers( self, *args: Optional[automation_converter.PyiCollectionAbc]) -> List[ automation_converter.PyiCollectionAbc]: pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def _ReduceAbstractContainers( self, *args: Optional[automation_converter.PyiCollectionAbc] ) -> List[automation_converter.PyiCollectionAbc]: @@ -266,13 +285,15 @@ def _ReduceAbstractContainers( def testContinuationIndentWithAsync(self): if sys.version_info[1] < 5: return - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ async def start_websocket(): async with session.ws_connect( r"ws://a_really_long_long_long_long_long_long_url") as ws: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ async def start_websocket(): async with session.ws_connect( r"ws://a_really_long_long_long_long_long_long_url") as ws: @@ -285,7 +306,7 @@ def testSplittingArguments(self): if sys.version_info[1] < 5: return - unformatted_code = """\ + unformatted_code = """\ async def open_file(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None): pass @@ -346,15 +367,15 @@ def run_sync_in_worker_thread(sync_fn, *args, cancellable=False, limiter=None): 'split_before_first_argument: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testDictUnpacking(self): if sys.version_info[1] < 5: return - unformatted_code = """\ + unformatted_code = """\ class Foo: def foo(self): foofoofoofoofoofoofoofoo('foofoofoofoofoo', { @@ -373,7 +394,7 @@ def foo(self): **foofoofoo }) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineFormatString(self): @@ -401,7 +422,7 @@ def dirichlet(x12345678901234567890123456789012345678901234567890=...) -> None: self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionTypedReturnNextLine(self): - code = """\ + code = """\ def _GenerateStatsEntries( process_id: Text, timestamp: Optional[ffffffff.FFFFFFFFFFF] = None @@ -412,7 +433,7 @@ def _GenerateStatsEntries( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionTypedReturnSameLine(self): - code = """\ + code = """\ def rrrrrrrrrrrrrrrrrrrrrr( ccccccccccccccccccccccc: Tuple[Text, Text]) -> List[Tuple[Text, Text]]: pass @@ -423,7 +444,8 @@ def rrrrrrrrrrrrrrrrrrrrrr( def testAsyncForElseNotIndentedInsideBody(self): if sys.version_info[1] < 5: return - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ async def fn(): async for message in websocket: for i in range(10): @@ -439,7 +461,8 @@ async def fn(): def testForElseInAsyncNotMixedWithAsyncFor(self): if sys.version_info[1] < 5: return - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ async def fn(): for i in range(10): pass @@ -450,12 +473,14 @@ async def fn(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testParameterListIndentationConflicts(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def raw_message( # pylint: disable=too-many-arguments self, text, user_id=1000, chat_type='private', forward_date=None, forward_from=None): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def raw_message( # pylint: disable=too-many-arguments self, text, diff --git a/yapftests/reformatter_style_config_test.py b/yapftests/reformatter_style_config_test.py index c5726cb30..6746ba0ed 100644 --- a/yapftests/reformatter_style_config_test.py +++ b/yapftests/reformatter_style_config_test.py @@ -30,26 +30,30 @@ def setUp(self): def testSetGlobalStyle(self): try: style.SetGlobalStyle(style.CreateYapfStyle()) - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ for i in range(5): print('bar') """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ for i in range(5): print('bar') """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ for i in range(5): print('bar') """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ for i in range(5): print('bar') """) @@ -58,32 +62,35 @@ def testSetGlobalStyle(self): def testOperatorNoSpaceStyle(self): try: - sympy_style = style.CreatePEP8Style() + sympy_style = style.CreatePEP8Style() sympy_style['NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS'] = \ style._StringSetConverter('*,/') style.SetGlobalStyle(sympy_style) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = 1+2 * 3 - 4 / 5 b = '0' * 1 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = 1 + 2*3 - 4/5 b = '0'*1 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style def testOperatorPrecedenceStyle(self): try: - pep8_with_precedence = style.CreatePEP8Style() + pep8_with_precedence = style.CreatePEP8Style() pep8_with_precedence['ARITHMETIC_PRECEDENCE_INDICATION'] = True style.SetGlobalStyle(pep8_with_precedence) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ 1+2 (1 + 2) * (3 - (4 / 5)) a = 1 * 2 + 3 / 4 @@ -98,7 +105,8 @@ def testOperatorPrecedenceStyle(self): j = (1 * 2 - 3) + 4 k = (1 * 2 * 3) + (4 * 5 * 6 * 7 * 8) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ 1 + 2 (1+2) * (3 - (4/5)) a = 1*2 + 3/4 @@ -115,19 +123,20 @@ def testOperatorPrecedenceStyle(self): """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style def testNoSplitBeforeFirstArgumentStyle1(self): try: - pep8_no_split_before_first = style.CreatePEP8Style() + pep8_no_split_before_first = style.CreatePEP8Style() pep8_no_split_before_first['SPLIT_BEFORE_FIRST_ARGUMENT'] = False - pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = False + pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = False style.SetGlobalStyle(pep8_no_split_before_first) - formatted_code = textwrap.dedent("""\ + formatted_code = textwrap.dedent( + """\ # Example from in-code MustSplit comments foo = outer_function_call(fitting_inner_function_call(inner_arg1, inner_arg2), outer_arg1, outer_arg2) @@ -164,11 +173,12 @@ def testNoSplitBeforeFirstArgumentStyle1(self): def testNoSplitBeforeFirstArgumentStyle2(self): try: - pep8_no_split_before_first = style.CreatePEP8Style() + pep8_no_split_before_first = style.CreatePEP8Style() pep8_no_split_before_first['SPLIT_BEFORE_FIRST_ARGUMENT'] = False - pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = True + pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = True style.SetGlobalStyle(pep8_no_split_before_first) - formatted_code = textwrap.dedent("""\ + formatted_code = textwrap.dedent( + """\ # Examples Issue#556 i_take_a_lot_of_params(arg1, param1=very_long_expression1(), diff --git a/yapftests/reformatter_verify_test.py b/yapftests/reformatter_verify_test.py index 33ba3a614..2abbd19ff 100644 --- a/yapftests/reformatter_verify_test.py +++ b/yapftests/reformatter_verify_test.py @@ -32,7 +32,8 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreatePEP8Style()) def testVerifyException(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class ABC(metaclass=type): pass """) @@ -42,20 +43,23 @@ class ABC(metaclass=type): reformatter.Reformat(llines) # verify should be False by default. def testNoVerify(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class ABC(metaclass=type): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class ABC(metaclass=type): pass """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines, verify=False)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines, verify=False)) def testVerifyFutureImport(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ from __future__ import print_function def call_my_function(the_function): @@ -68,7 +72,8 @@ def call_my_function(the_function): with self.assertRaises(verifier.InternalError): reformatter.Reformat(llines, verify=True) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ from __future__ import print_function @@ -80,11 +85,12 @@ def call_my_function(the_function): call_my_function(print) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual(expected_formatted_code, - reformatter.Reformat(llines, verify=False)) + self.assertCodeEqual( + expected_formatted_code, reformatter.Reformat(llines, verify=False)) def testContinuationLineShouldBeDistinguished(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ class Foo(object): def bar(self): diff --git a/yapftests/split_penalty_test.py b/yapftests/split_penalty_test.py index f7474a398..24226cbac 100644 --- a/yapftests/split_penalty_test.py +++ b/yapftests/split_penalty_test.py @@ -26,10 +26,10 @@ from yapftests import yapf_test_helper -UNBREAKABLE = split_penalty.UNBREAKABLE +UNBREAKABLE = split_penalty.UNBREAKABLE VERY_STRONGLY_CONNECTED = split_penalty.VERY_STRONGLY_CONNECTED -DOTTED_NAME = split_penalty.DOTTED_NAME -STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED +DOTTED_NAME = split_penalty.DOTTED_NAME +STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED class SplitPenaltyTest(yapf_test_helper.YAPFTest): @@ -68,9 +68,12 @@ def FlattenRec(tree): if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS: return [] if isinstance(tree, pytree.Leaf): - return [(tree.value, - pytree_utils.GetNodeAnnotation( - tree, pytree_utils.Annotation.SPLIT_PENALTY))] + return [ + ( + tree.value, + pytree_utils.GetNodeAnnotation( + tree, pytree_utils.Annotation.SPLIT_PENALTY)) + ] nodes = [] for node in tree.children: nodes += FlattenRec(node) @@ -85,181 +88,194 @@ def foo(x): pass """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('def', None), - ('foo', UNBREAKABLE), - ('(', UNBREAKABLE), - ('x', None), - (')', STRONGLY_CONNECTED), - (':', UNBREAKABLE), - ('pass', None), - ]) + self._CheckPenalties( + tree, [ + ('def', None), + ('foo', UNBREAKABLE), + ('(', UNBREAKABLE), + ('x', None), + (')', STRONGLY_CONNECTED), + (':', UNBREAKABLE), + ('pass', None), + ]) # Test function definition with trailing comment. - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" def foo(x): # trailing comment pass """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('def', None), - ('foo', UNBREAKABLE), - ('(', UNBREAKABLE), - ('x', None), - (')', STRONGLY_CONNECTED), - (':', UNBREAKABLE), - ('pass', None), - ]) + self._CheckPenalties( + tree, [ + ('def', None), + ('foo', UNBREAKABLE), + ('(', UNBREAKABLE), + ('x', None), + (')', STRONGLY_CONNECTED), + (':', UNBREAKABLE), + ('pass', None), + ]) # Test class definitions. - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" class A: pass class B(A): pass """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('class', None), - ('A', UNBREAKABLE), - (':', UNBREAKABLE), - ('pass', None), - ('class', None), - ('B', UNBREAKABLE), - ('(', UNBREAKABLE), - ('A', None), - (')', None), - (':', UNBREAKABLE), - ('pass', None), - ]) + self._CheckPenalties( + tree, [ + ('class', None), + ('A', UNBREAKABLE), + (':', UNBREAKABLE), + ('pass', None), + ('class', None), + ('B', UNBREAKABLE), + ('(', UNBREAKABLE), + ('A', None), + (')', None), + (':', UNBREAKABLE), + ('pass', None), + ]) # Test lambda definitions. code = textwrap.dedent(r""" lambda a, b: None """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('lambda', None), - ('a', VERY_STRONGLY_CONNECTED), - (',', VERY_STRONGLY_CONNECTED), - ('b', VERY_STRONGLY_CONNECTED), - (':', VERY_STRONGLY_CONNECTED), - ('None', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties( + tree, [ + ('lambda', None), + ('a', VERY_STRONGLY_CONNECTED), + (',', VERY_STRONGLY_CONNECTED), + ('b', VERY_STRONGLY_CONNECTED), + (':', VERY_STRONGLY_CONNECTED), + ('None', VERY_STRONGLY_CONNECTED), + ]) # Test dotted names. code = textwrap.dedent(r""" import a.b.c """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('import', None), - ('a', None), - ('.', UNBREAKABLE), - ('b', UNBREAKABLE), - ('.', UNBREAKABLE), - ('c', UNBREAKABLE), - ]) + self._CheckPenalties( + tree, [ + ('import', None), + ('a', None), + ('.', UNBREAKABLE), + ('b', UNBREAKABLE), + ('.', UNBREAKABLE), + ('c', UNBREAKABLE), + ]) def testStronglyConnected(self): # Test dictionary keys. - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" a = { 'x': 42, y(lambda a: 23): 37, } """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('a', None), - ('=', None), - ('{', None), - ("'x'", None), - (':', STRONGLY_CONNECTED), - ('42', None), - (',', None), - ('y', None), - ('(', UNBREAKABLE), - ('lambda', STRONGLY_CONNECTED), - ('a', VERY_STRONGLY_CONNECTED), - (':', VERY_STRONGLY_CONNECTED), - ('23', VERY_STRONGLY_CONNECTED), - (')', VERY_STRONGLY_CONNECTED), - (':', STRONGLY_CONNECTED), - ('37', None), - (',', None), - ('}', None), - ]) + self._CheckPenalties( + tree, [ + ('a', None), + ('=', None), + ('{', None), + ("'x'", None), + (':', STRONGLY_CONNECTED), + ('42', None), + (',', None), + ('y', None), + ('(', UNBREAKABLE), + ('lambda', STRONGLY_CONNECTED), + ('a', VERY_STRONGLY_CONNECTED), + (':', VERY_STRONGLY_CONNECTED), + ('23', VERY_STRONGLY_CONNECTED), + (')', VERY_STRONGLY_CONNECTED), + (':', STRONGLY_CONNECTED), + ('37', None), + (',', None), + ('}', None), + ]) # Test list comprehension. code = textwrap.dedent(r""" [a for a in foo if a.x == 37] """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('[', None), - ('a', None), - ('for', 0), - ('a', STRONGLY_CONNECTED), - ('in', STRONGLY_CONNECTED), - ('foo', STRONGLY_CONNECTED), - ('if', 0), - ('a', STRONGLY_CONNECTED), - ('.', VERY_STRONGLY_CONNECTED), - ('x', DOTTED_NAME), - ('==', STRONGLY_CONNECTED), - ('37', STRONGLY_CONNECTED), - (']', None), - ]) + self._CheckPenalties( + tree, [ + ('[', None), + ('a', None), + ('for', 0), + ('a', STRONGLY_CONNECTED), + ('in', STRONGLY_CONNECTED), + ('foo', STRONGLY_CONNECTED), + ('if', 0), + ('a', STRONGLY_CONNECTED), + ('.', VERY_STRONGLY_CONNECTED), + ('x', DOTTED_NAME), + ('==', STRONGLY_CONNECTED), + ('37', STRONGLY_CONNECTED), + (']', None), + ]) def testFuncCalls(self): code = 'foo(1, 2, 3)\n' tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('foo', None), - ('(', UNBREAKABLE), - ('1', None), - (',', UNBREAKABLE), - ('2', None), - (',', UNBREAKABLE), - ('3', None), - (')', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties( + tree, [ + ('foo', None), + ('(', UNBREAKABLE), + ('1', None), + (',', UNBREAKABLE), + ('2', None), + (',', UNBREAKABLE), + ('3', None), + (')', VERY_STRONGLY_CONNECTED), + ]) # Now a method call, which has more than one trailer code = 'foo.bar.baz(1, 2, 3)\n' tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('foo', None), - ('.', VERY_STRONGLY_CONNECTED), - ('bar', DOTTED_NAME), - ('.', VERY_STRONGLY_CONNECTED), - ('baz', DOTTED_NAME), - ('(', STRONGLY_CONNECTED), - ('1', None), - (',', UNBREAKABLE), - ('2', None), - (',', UNBREAKABLE), - ('3', None), - (')', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties( + tree, [ + ('foo', None), + ('.', VERY_STRONGLY_CONNECTED), + ('bar', DOTTED_NAME), + ('.', VERY_STRONGLY_CONNECTED), + ('baz', DOTTED_NAME), + ('(', STRONGLY_CONNECTED), + ('1', None), + (',', UNBREAKABLE), + ('2', None), + (',', UNBREAKABLE), + ('3', None), + (')', VERY_STRONGLY_CONNECTED), + ]) # Test single generator argument. code = 'max(i for i in xrange(10))\n' tree = self._ParseAndComputePenalties(code) - self._CheckPenalties(tree, [ - ('max', None), - ('(', UNBREAKABLE), - ('i', 0), - ('for', 0), - ('i', STRONGLY_CONNECTED), - ('in', STRONGLY_CONNECTED), - ('xrange', STRONGLY_CONNECTED), - ('(', UNBREAKABLE), - ('10', STRONGLY_CONNECTED), - (')', VERY_STRONGLY_CONNECTED), - (')', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties( + tree, [ + ('max', None), + ('(', UNBREAKABLE), + ('i', 0), + ('for', 0), + ('i', STRONGLY_CONNECTED), + ('in', STRONGLY_CONNECTED), + ('xrange', STRONGLY_CONNECTED), + ('(', UNBREAKABLE), + ('10', STRONGLY_CONNECTED), + (')', VERY_STRONGLY_CONNECTED), + (')', VERY_STRONGLY_CONNECTED), + ]) if __name__ == '__main__': diff --git a/yapftests/style_test.py b/yapftests/style_test.py index 8a37f9535..4aceba3d0 100644 --- a/yapftests/style_test.py +++ b/yapftests/style_test.py @@ -50,8 +50,8 @@ def testContinuationAlignStyleStringConverter(self): 'VALIGN-RIGHT') with self.assertRaises(ValueError) as ctx: style._ContinuationAlignStyleStringConverter('blahblah') - self.assertIn("unknown continuation align style: 'blahblah'", - str(ctx.exception)) + self.assertIn( + "unknown continuation align style: 'blahblah'", str(ctx.exception)) def testStringListConverter(self): self.assertEqual(style._StringListConverter('foo, bar'), ['foo', 'bar']) @@ -136,7 +136,8 @@ def tearDownClass(cls): # pylint: disable=g-missing-super-call shutil.rmtree(cls.test_tmpdir) def testDefaultBasedOnStyle(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] continuation_indent_width = 20 ''') @@ -146,7 +147,8 @@ def testDefaultBasedOnStyle(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 20) def testDefaultBasedOnPEP8Style(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] based_on_style = pep8 continuation_indent_width = 40 @@ -157,7 +159,8 @@ def testDefaultBasedOnPEP8Style(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 40) def testDefaultBasedOnGoogleStyle(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] based_on_style = google continuation_indent_width = 20 @@ -168,7 +171,8 @@ def testDefaultBasedOnGoogleStyle(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 20) def testDefaultBasedOnFacebookStyle(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] based_on_style = facebook continuation_indent_width = 20 @@ -179,7 +183,8 @@ def testDefaultBasedOnFacebookStyle(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 20) def testBoolOptionValue(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] based_on_style = pep8 SPLIT_BEFORE_NAMED_ASSIGNS=False @@ -192,7 +197,8 @@ def testBoolOptionValue(self): self.assertEqual(cfg['SPLIT_BEFORE_LOGICAL_OPERATOR'], True) def testStringListOptionValue(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] based_on_style = pep8 I18N_FUNCTION_CALL = N_, V_, T_ @@ -218,7 +224,8 @@ def testErrorNoStyleSection(self): style.CreateStyleFromConfig(filepath) def testErrorUnknownStyleOption(self): - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [style] indent_width=2 hummus=2 @@ -235,7 +242,7 @@ def testPyprojectTomlNoYapfSection(self): return filepath = os.path.join(self.test_tmpdir, 'pyproject.toml') - _ = open(filepath, 'w') + _ = open(filepath, 'w') with self.assertRaisesRegex(style.StyleConfigError, 'Unable to find section'): style.CreateStyleFromConfig(filepath) @@ -246,7 +253,8 @@ def testPyprojectTomlParseYapfSection(self): except ImportError: return - cfg = textwrap.dedent(u'''\ + cfg = textwrap.dedent( + u'''\ [tool.yapf] based_on_style = "pep8" continuation_indent_width = 40 @@ -276,12 +284,12 @@ def testDefaultBasedOnStyle(self): self.assertEqual(cfg['INDENT_WIDTH'], 2) def testDefaultBasedOnStyleBadDict(self): - self.assertRaisesRegex(style.StyleConfigError, 'Unknown style option', - style.CreateStyleFromConfig, - {'based_on_styl': 'pep8'}) - self.assertRaisesRegex(style.StyleConfigError, 'not a valid', - style.CreateStyleFromConfig, - {'INDENT_WIDTH': 'FOUR'}) + self.assertRaisesRegex( + style.StyleConfigError, 'Unknown style option', + style.CreateStyleFromConfig, {'based_on_styl': 'pep8'}) + self.assertRaisesRegex( + style.StyleConfigError, 'not a valid', style.CreateStyleFromConfig, + {'INDENT_WIDTH': 'FOUR'}) class StyleFromCommandLine(yapf_test_helper.YAPFTest): @@ -315,12 +323,15 @@ def testDefaultBasedOnDetaultTypeString(self): self.assertIsInstance(cfg, dict) def testDefaultBasedOnStyleBadString(self): - self.assertRaisesRegex(style.StyleConfigError, 'Unknown style option', - style.CreateStyleFromConfig, '{based_on_styl: pep8}') - self.assertRaisesRegex(style.StyleConfigError, 'not a valid', - style.CreateStyleFromConfig, '{INDENT_WIDTH: FOUR}') - self.assertRaisesRegex(style.StyleConfigError, 'Invalid style dict', - style.CreateStyleFromConfig, '{based_on_style: pep8') + self.assertRaisesRegex( + style.StyleConfigError, 'Unknown style option', + style.CreateStyleFromConfig, '{based_on_styl: pep8}') + self.assertRaisesRegex( + style.StyleConfigError, 'not a valid', style.CreateStyleFromConfig, + '{INDENT_WIDTH: FOUR}') + self.assertRaisesRegex( + style.StyleConfigError, 'Invalid style dict', + style.CreateStyleFromConfig, '{based_on_style: pep8') class StyleHelp(yapf_test_helper.YAPFTest): diff --git a/yapftests/subtype_assigner_test.py b/yapftests/subtype_assigner_test.py index 8616169c9..222153db4 100644 --- a/yapftests/subtype_assigner_test.py +++ b/yapftests/subtype_assigner_test.py @@ -35,9 +35,11 @@ def _CheckFormatTokenSubtypes(self, llines, list_of_expected): """ actual = [] for lline in llines: - filtered_values = [(ft.value, ft.subtypes) - for ft in lline.tokens - if ft.name not in pytree_utils.NONSEMANTIC_TOKENS] + filtered_values = [ + (ft.value, ft.subtypes) + for ft in lline.tokens + if ft.name not in pytree_utils.NONSEMANTIC_TOKENS + ] if filtered_values: actual.append(filtered_values) @@ -45,242 +47,263 @@ def _CheckFormatTokenSubtypes(self, llines, list_of_expected): def testFuncDefDefaultAssign(self): self.maxDiff = None # pylint: disable=invalid-name - code = textwrap.dedent(r""" + code = textwrap.dedent( + r""" def foo(a=37, *b, **c): return -x[:42] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('def', {subtypes.NONE}), - ('foo', {subtypes.FUNC_DEF}), - ('(', {subtypes.NONE}), - ('a', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.PARAMETER_START, - }), - ('=', { - subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ('37', { - subtypes.NONE, - subtypes.PARAMETER_STOP, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (',', {subtypes.NONE}), - ('*', { - subtypes.PARAMETER_START, - subtypes.VARARGS_STAR, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ('b', { - subtypes.NONE, - subtypes.PARAMETER_STOP, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (',', {subtypes.NONE}), - ('**', { - subtypes.PARAMETER_START, - subtypes.KWARGS_STAR_STAR, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ('c', { - subtypes.NONE, - subtypes.PARAMETER_STOP, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (')', {subtypes.NONE}), - (':', {subtypes.NONE}), - ], - [ - ('return', {subtypes.NONE}), - ('-', {subtypes.UNARY_OPERATOR}), - ('x', {subtypes.NONE}), - ('[', {subtypes.SUBSCRIPT_BRACKET}), - (':', {subtypes.SUBSCRIPT_COLON}), - ('42', {subtypes.NONE}), - (']', {subtypes.SUBSCRIPT_BRACKET}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('def', {subtypes.NONE}), + ('foo', {subtypes.FUNC_DEF}), + ('(', {subtypes.NONE}), + ( + 'a', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + subtypes.PARAMETER_START, + }), + ( + '=', { + subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ( + '37', { + subtypes.NONE, + subtypes.PARAMETER_STOP, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ( + '*', { + subtypes.PARAMETER_START, + subtypes.VARARGS_STAR, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ( + 'b', { + subtypes.NONE, + subtypes.PARAMETER_STOP, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ( + '**', { + subtypes.PARAMETER_START, + subtypes.KWARGS_STAR_STAR, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ( + 'c', { + subtypes.NONE, + subtypes.PARAMETER_STOP, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (')', {subtypes.NONE}), + (':', {subtypes.NONE}), + ], + [ + ('return', {subtypes.NONE}), + ('-', {subtypes.UNARY_OPERATOR}), + ('x', {subtypes.NONE}), + ('[', {subtypes.SUBSCRIPT_BRACKET}), + (':', {subtypes.SUBSCRIPT_COLON}), + ('42', {subtypes.NONE}), + (']', {subtypes.SUBSCRIPT_BRACKET}), + ], + ]) def testFuncCallWithDefaultAssign(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" foo(x, a='hello world') """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('foo', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ('x', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (',', {subtypes.NONE}), - ('a', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), - ("'hello world'", {subtypes.NONE}), - (')', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('foo', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ( + 'x', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ( + 'a', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), + ("'hello world'", {subtypes.NONE}), + (')', {subtypes.NONE}), + ], + ]) def testSetComprehension(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ def foo(strs): return {s.lower() for s in strs} """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('def', {subtypes.NONE}), - ('foo', {subtypes.FUNC_DEF}), - ('(', {subtypes.NONE}), - ('strs', { - subtypes.NONE, - subtypes.PARAMETER_START, - subtypes.PARAMETER_STOP, - }), - (')', {subtypes.NONE}), - (':', {subtypes.NONE}), - ], - [ - ('return', {subtypes.NONE}), - ('{', {subtypes.NONE}), - ('s', {subtypes.COMP_EXPR}), - ('.', {subtypes.COMP_EXPR}), - ('lower', {subtypes.COMP_EXPR}), - ('(', {subtypes.COMP_EXPR}), - (')', {subtypes.COMP_EXPR}), - ('for', { - subtypes.DICT_SET_GENERATOR, - subtypes.COMP_FOR, - }), - ('s', {subtypes.COMP_FOR}), - ('in', {subtypes.COMP_FOR}), - ('strs', {subtypes.COMP_FOR}), - ('}', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('def', {subtypes.NONE}), + ('foo', {subtypes.FUNC_DEF}), + ('(', {subtypes.NONE}), + ( + 'strs', { + subtypes.NONE, + subtypes.PARAMETER_START, + subtypes.PARAMETER_STOP, + }), + (')', {subtypes.NONE}), + (':', {subtypes.NONE}), + ], + [ + ('return', {subtypes.NONE}), + ('{', {subtypes.NONE}), + ('s', {subtypes.COMP_EXPR}), + ('.', {subtypes.COMP_EXPR}), + ('lower', {subtypes.COMP_EXPR}), + ('(', {subtypes.COMP_EXPR}), + (')', {subtypes.COMP_EXPR}), + ('for', { + subtypes.DICT_SET_GENERATOR, + subtypes.COMP_FOR, + }), + ('s', {subtypes.COMP_FOR}), + ('in', {subtypes.COMP_FOR}), + ('strs', {subtypes.COMP_FOR}), + ('}', {subtypes.NONE}), + ], + ]) def testUnaryNotOperator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ not a """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [[('not', {subtypes.UNARY_OPERATOR}), - ('a', {subtypes.NONE})]]) + self._CheckFormatTokenSubtypes( + llines, [[('not', {subtypes.UNARY_OPERATOR}), ('a', {subtypes.NONE})]]) def testBitwiseOperators(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ x = ((a | (b ^ 3) & c) << 3) >> 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('x', {subtypes.NONE}), - ('=', {subtypes.ASSIGN_OPERATOR}), - ('(', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ('a', {subtypes.NONE}), - ('|', {subtypes.BINARY_OPERATOR}), - ('(', {subtypes.NONE}), - ('b', {subtypes.NONE}), - ('^', {subtypes.BINARY_OPERATOR}), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('&', {subtypes.BINARY_OPERATOR}), - ('c', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('<<', {subtypes.BINARY_OPERATOR}), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('>>', {subtypes.BINARY_OPERATOR}), - ('1', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('x', {subtypes.NONE}), + ('=', {subtypes.ASSIGN_OPERATOR}), + ('(', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ('a', {subtypes.NONE}), + ('|', {subtypes.BINARY_OPERATOR}), + ('(', {subtypes.NONE}), + ('b', {subtypes.NONE}), + ('^', {subtypes.BINARY_OPERATOR}), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('&', {subtypes.BINARY_OPERATOR}), + ('c', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('<<', {subtypes.BINARY_OPERATOR}), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('>>', {subtypes.BINARY_OPERATOR}), + ('1', {subtypes.NONE}), + ], + ]) def testArithmeticOperators(self): - code = textwrap.dedent("""\ + code = textwrap.dedent( + """\ x = ((a + (b - 3) * (1 % c) @ d) / 3) // 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('x', {subtypes.NONE}), - ('=', {subtypes.ASSIGN_OPERATOR}), - ('(', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ('a', {subtypes.NONE}), - ('+', {subtypes.BINARY_OPERATOR}), - ('(', {subtypes.NONE}), - ('b', {subtypes.NONE}), - ('-', { - subtypes.BINARY_OPERATOR, - subtypes.SIMPLE_EXPRESSION, - }), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('*', {subtypes.BINARY_OPERATOR}), - ('(', {subtypes.NONE}), - ('1', {subtypes.NONE}), - ('%', { - subtypes.BINARY_OPERATOR, - subtypes.SIMPLE_EXPRESSION, - }), - ('c', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('@', {subtypes.BINARY_OPERATOR}), - ('d', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('/', {subtypes.BINARY_OPERATOR}), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('//', {subtypes.BINARY_OPERATOR}), - ('1', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('x', {subtypes.NONE}), + ('=', {subtypes.ASSIGN_OPERATOR}), + ('(', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ('a', {subtypes.NONE}), + ('+', {subtypes.BINARY_OPERATOR}), + ('(', {subtypes.NONE}), + ('b', {subtypes.NONE}), + ('-', { + subtypes.BINARY_OPERATOR, + subtypes.SIMPLE_EXPRESSION, + }), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('*', {subtypes.BINARY_OPERATOR}), + ('(', {subtypes.NONE}), + ('1', {subtypes.NONE}), + ('%', { + subtypes.BINARY_OPERATOR, + subtypes.SIMPLE_EXPRESSION, + }), + ('c', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('@', {subtypes.BINARY_OPERATOR}), + ('d', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('/', {subtypes.BINARY_OPERATOR}), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('//', {subtypes.BINARY_OPERATOR}), + ('1', {subtypes.NONE}), + ], + ]) def testSubscriptColon(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ x[0:42:1] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('x', {subtypes.NONE}), - ('[', {subtypes.SUBSCRIPT_BRACKET}), - ('0', {subtypes.NONE}), - (':', {subtypes.SUBSCRIPT_COLON}), - ('42', {subtypes.NONE}), - (':', {subtypes.SUBSCRIPT_COLON}), - ('1', {subtypes.NONE}), - (']', {subtypes.SUBSCRIPT_BRACKET}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('x', {subtypes.NONE}), + ('[', {subtypes.SUBSCRIPT_BRACKET}), + ('0', {subtypes.NONE}), + (':', {subtypes.SUBSCRIPT_COLON}), + ('42', {subtypes.NONE}), + (':', {subtypes.SUBSCRIPT_COLON}), + ('1', {subtypes.NONE}), + (']', {subtypes.SUBSCRIPT_BRACKET}), + ], + ]) def testFunctionCallWithStarExpression(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ [a, *b] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes(llines, [ - [ - ('[', {subtypes.NONE}), - ('a', {subtypes.NONE}), - (',', {subtypes.NONE}), - ('*', { - subtypes.UNARY_OPERATOR, - subtypes.VARARGS_STAR, - }), - ('b', {subtypes.NONE}), - (']', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes( + llines, [ + [ + ('[', {subtypes.NONE}), + ('a', {subtypes.NONE}), + (',', {subtypes.NONE}), + ('*', { + subtypes.UNARY_OPERATOR, + subtypes.VARARGS_STAR, + }), + ('b', {subtypes.NONE}), + (']', {subtypes.NONE}), + ], + ]) if __name__ == '__main__': diff --git a/yapftests/utils.py b/yapftests/utils.py index 268b8c43a..d10a0982c 100644 --- a/yapftests/utils.py +++ b/yapftests/utils.py @@ -42,15 +42,16 @@ def stdout_redirector(stream): # pylint: disable=invalid-name # Note: `buffering` is set to -1 despite documentation of NamedTemporaryFile # says None. This is probably a problem with the python documentation. @contextlib.contextmanager -def NamedTempFile(mode='w+b', - buffering=-1, - encoding=None, - errors=None, - newline=None, - suffix=None, - prefix=None, - dirname=None, - text=False): +def NamedTempFile( + mode='w+b', + buffering=-1, + encoding=None, + errors=None, + newline=None, + suffix=None, + prefix=None, + dirname=None, + text=False): """Context manager creating a new temporary file in text mode.""" if sys.version_info < (3, 5): # covers also python 2 if suffix is None: @@ -72,18 +73,11 @@ def NamedTempFile(mode='w+b', @contextlib.contextmanager -def TempFileContents(dirname, - contents, - encoding='utf-8', - newline='', - suffix=None): +def TempFileContents( + dirname, contents, encoding='utf-8', newline='', suffix=None): # Note: NamedTempFile properly handles unicode encoding when using mode='w' - with NamedTempFile( - dirname=dirname, - mode='w', - encoding=encoding, - newline=newline, - suffix=suffix) as (f, fname): + with NamedTempFile(dirname=dirname, mode='w', encoding=encoding, + newline=newline, suffix=suffix) as (f, fname): f.write(contents) f.flush() yield fname diff --git a/yapftests/yapf_test.py b/yapftests/yapf_test.py index 2330f4e18..865a67e3b 100644 --- a/yapftests/yapf_test.py +++ b/yapftests/yapf_test.py @@ -54,10 +54,11 @@ def testSimple(self): self._Check(unformatted_code, unformatted_code) def testNoEndingNewline(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ if True: pass""") - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: pass """) @@ -65,7 +66,7 @@ def testNoEndingNewline(self): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testPrintAfterPeriod(self): - unformatted_code = textwrap.dedent("""a.print\n""") + unformatted_code = textwrap.dedent("""a.print\n""") expected_formatted_code = textwrap.dedent("""a.print\n""") self._Check(unformatted_code, expected_formatted_code) @@ -80,7 +81,7 @@ def tearDown(self): # pylint: disable=g-missing-super-call def assertCodeEqual(self, expected_code, code): if code != expected_code: - msg = 'Code format mismatch:\n' + msg = 'Code format mismatch:\n' msg += 'Expected:\n >' msg += '\n > '.join(expected_code.splitlines()) msg += '\nActual:\n >' @@ -89,15 +90,18 @@ def assertCodeEqual(self, expected_code, code): self.fail(msg) def testFormatFile(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ if True: pass """) - expected_formatted_code_pep8 = textwrap.dedent(u"""\ + expected_formatted_code_pep8 = textwrap.dedent( + u"""\ if True: pass """) - expected_formatted_code_yapf = textwrap.dedent(u"""\ + expected_formatted_code_yapf = textwrap.dedent( + u"""\ if True: pass """) @@ -109,7 +113,8 @@ def testFormatFile(self): self.assertCodeEqual(expected_formatted_code_yapf, formatted_code) def testDisableLinesPattern(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ if a: b # yapf: disable @@ -117,7 +122,8 @@ def testDisableLinesPattern(self): if h: i """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ if a: b # yapf: disable @@ -130,7 +136,8 @@ def testDisableLinesPattern(self): self.assertCodeEqual(expected_formatted_code, formatted_code) def testDisableAndReenableLinesPattern(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ if a: b # yapf: disable @@ -139,7 +146,8 @@ def testDisableAndReenableLinesPattern(self): if h: i """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ if a: b # yapf: disable @@ -153,7 +161,8 @@ def testDisableAndReenableLinesPattern(self): self.assertCodeEqual(expected_formatted_code, formatted_code) def testDisablePartOfMultilineComment(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ if a: b # This is a multiline comment that disables YAPF. @@ -165,7 +174,8 @@ def testDisablePartOfMultilineComment(self): if h: i """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ if a: b # This is a multiline comment that disables YAPF. @@ -180,7 +190,8 @@ def testDisablePartOfMultilineComment(self): formatted_code, _, _ = yapf_api.FormatFile(filepath, style_config='pep8') self.assertCodeEqual(expected_formatted_code, formatted_code) - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ def foo_function(): # some comment # yapf: disable @@ -197,7 +208,8 @@ def foo_function(): self.assertCodeEqual(code, formatted_code) def testEnabledDisabledSameComment(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ # yapf: disable a(bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb, ccccccccccccccccccccccccccccccc, ddddddddddddddddddddddd, eeeeeeeeeeeeeeeeeeeeeeeeeee) # yapf: enable @@ -210,21 +222,24 @@ def testEnabledDisabledSameComment(self): self.assertCodeEqual(code, formatted_code) def testFormatFileLinesSelection(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ if a: b if f: g if h: i """) - expected_formatted_code_lines1and2 = textwrap.dedent(u"""\ + expected_formatted_code_lines1and2 = textwrap.dedent( + u"""\ if a: b if f: g if h: i """) - expected_formatted_code_lines3 = textwrap.dedent(u"""\ + expected_formatted_code_lines3 = textwrap.dedent( + u"""\ if a: b if f: g @@ -240,7 +255,8 @@ def testFormatFileLinesSelection(self): self.assertCodeEqual(expected_formatted_code_lines3, formatted_code) def testFormatFileDiff(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ if True: pass """) @@ -250,7 +266,7 @@ def testFormatFileDiff(self): def testFormatFileInPlace(self): unformatted_code = u'True==False\n' - formatted_code = u'True == False\n' + formatted_code = u'True == False\n' with utils.TempFileContents(self.test_tmpdir, unformatted_code) as filepath: result, _, _ = yapf_api.FormatFile(filepath, in_place=True) self.assertEqual(result, None) @@ -268,17 +284,19 @@ def testFormatFileInPlace(self): print_diff=True) def testNoFile(self): - stream = py3compat.StringIO() + stream = py3compat.StringIO() handler = logging.StreamHandler(stream) - logger = logging.getLogger('mylogger') + logger = logging.getLogger('mylogger') logger.addHandler(handler) self.assertRaises( IOError, yapf_api.FormatFile, 'not_a_file.py', logger=logger.error) - self.assertEqual(stream.getvalue(), - "[Errno 2] No such file or directory: 'not_a_file.py'\n") + self.assertEqual( + stream.getvalue(), + "[Errno 2] No such file or directory: 'not_a_file.py'\n") def testCommentsUnformatted(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ foo = [# A list of things # bork 'one', @@ -290,7 +308,8 @@ def testCommentsUnformatted(self): self.assertCodeEqual(code, formatted_code) def testDisabledHorizontalFormattingOnNewLine(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ # yapf: disable a = [ 1] @@ -301,12 +320,14 @@ def testDisabledHorizontalFormattingOnNewLine(self): self.assertCodeEqual(code, formatted_code) def testSplittingSemicolonStatements(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def f(): x = y + 42 ; z = n * 42 if True: a += 1 ; b += 1; c += 1 """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ def f(): x = y + 42 z = n * 42 @@ -320,12 +341,14 @@ def f(): self.assertCodeEqual(expected_formatted_code, formatted_code) def testSemicolonStatementsDisabled(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def f(): x = y + 42 ; z = n * 42 # yapf: disable if True: a += 1 ; b += 1; c += 1 """) - expected_formatted_code = textwrap.dedent(u"""\ + expected_formatted_code = textwrap.dedent( + u"""\ def f(): x = y + 42 ; z = n * 42 # yapf: disable if True: @@ -338,7 +361,8 @@ def f(): self.assertCodeEqual(expected_formatted_code, formatted_code) def testDisabledSemiColonSeparatedStatements(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ # yapf: disable if True: a ; b """) @@ -347,7 +371,8 @@ def testDisabledSemiColonSeparatedStatements(self): self.assertCodeEqual(code, formatted_code) def testDisabledMultilineStringInDictionary(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ # yapf: disable A = [ @@ -366,7 +391,8 @@ def testDisabledMultilineStringInDictionary(self): self.assertCodeEqual(code, formatted_code) def testDisabledWithPrecedingText(self): - code = textwrap.dedent(u"""\ + code = textwrap.dedent( + u"""\ # TODO(fix formatting): yapf: disable A = [ @@ -402,11 +428,8 @@ def setUpClass(cls): # pylint: disable=g-missing-super-call def tearDownClass(cls): # pylint: disable=g-missing-super-call shutil.rmtree(cls.test_tmpdir) - def assertYapfReformats(self, - unformatted, - expected, - extra_options=None, - env=None): + def assertYapfReformats( + self, unformatted, expected, extra_options=None, env=None): """Check that yapf reformats the given code as expected. Invokes yapf in a subprocess, piping the unformatted code into its stdin. @@ -419,7 +442,7 @@ def assertYapfReformats(self, env: dict of environment variables. """ cmdline = YAPF_BINARY + (extra_options or []) - p = subprocess.Popen( + p = subprocess.Popen( cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, @@ -432,27 +455,30 @@ def assertYapfReformats(self, @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testUnicodeEncodingPipedToFile(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def foo(): print('⇒') """) - with utils.NamedTempFile( - dirname=self.test_tmpdir, suffix='.py') as (out, _): - with utils.TempFileContents( - self.test_tmpdir, unformatted_code, suffix='.py') as filepath: + with utils.NamedTempFile(dirname=self.test_tmpdir, + suffix='.py') as (out, _): + with utils.TempFileContents(self.test_tmpdir, unformatted_code, + suffix='.py') as filepath: subprocess.check_call(YAPF_BINARY + ['--diff', filepath], stdout=out) def testInPlaceReformatting(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ def foo(): x = 37 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): x = 37 """) - with utils.TempFileContents( - self.test_tmpdir, unformatted_code, suffix='.py') as filepath: + with utils.TempFileContents(self.test_tmpdir, unformatted_code, + suffix='.py') as filepath: p = subprocess.Popen(YAPF_BINARY + ['--in-place', filepath]) p.wait() with io.open(filepath, mode='r', newline='') as fd: @@ -460,10 +486,10 @@ def foo(): self.assertEqual(reformatted_code, expected_formatted_code) def testInPlaceReformattingBlank(self): - unformatted_code = u'\n\n' + unformatted_code = u'\n\n' expected_formatted_code = u'\n' - with utils.TempFileContents( - self.test_tmpdir, unformatted_code, suffix='.py') as filepath: + with utils.TempFileContents(self.test_tmpdir, unformatted_code, + suffix='.py') as filepath: p = subprocess.Popen(YAPF_BINARY + ['--in-place', filepath]) p.wait() with io.open(filepath, mode='r', encoding='utf-8', newline='') as fd: @@ -471,10 +497,10 @@ def testInPlaceReformattingBlank(self): self.assertEqual(reformatted_code, expected_formatted_code) def testInPlaceReformattingEmpty(self): - unformatted_code = u'' + unformatted_code = u'' expected_formatted_code = u'' - with utils.TempFileContents( - self.test_tmpdir, unformatted_code, suffix='.py') as filepath: + with utils.TempFileContents(self.test_tmpdir, unformatted_code, + suffix='.py') as filepath: p = subprocess.Popen(YAPF_BINARY + ['--in-place', filepath]) p.wait() with io.open(filepath, mode='r', encoding='utf-8', newline='') as fd: @@ -482,31 +508,37 @@ def testInPlaceReformattingEmpty(self): self.assertEqual(reformatted_code, expected_formatted_code) def testReadFromStdin(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): x = 37 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): x = 37 """) self.assertYapfReformats(unformatted_code, expected_formatted_code) def testReadFromStdinWithEscapedStrings(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ s = "foo\\nbar" """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ s = "foo\\nbar" """) self.assertYapfReformats(unformatted_code, expected_formatted_code) def testSetYapfStyle(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): # trail x = 37 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): # trail x = 37 """) @@ -516,15 +548,18 @@ def foo(): # trail extra_options=['--style=yapf']) def testSetCustomStyleBasedOnYapf(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): # trail x = 37 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): # trail x = 37 """) - style_file = textwrap.dedent(u'''\ + style_file = textwrap.dedent( + u'''\ [style] based_on_style = yapf spaces_before_comment = 4 @@ -536,15 +571,18 @@ def foo(): # trail extra_options=['--style={0}'.format(stylepath)]) def testSetCustomStyleSpacesBeforeComment(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a_very_long_statement_that_extends_way_beyond # Comment short # This is a shorter statement """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a_very_long_statement_that_extends_way_beyond # Comment short # This is a shorter statement """) # noqa - style_file = textwrap.dedent(u'''\ + style_file = textwrap.dedent( + u'''\ [style] spaces_before_comment = 15, 20 ''') @@ -555,26 +593,28 @@ def testSetCustomStyleSpacesBeforeComment(self): extra_options=['--style={0}'.format(stylepath)]) def testReadSingleLineCodeFromStdin(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ if True: pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: pass """) self.assertYapfReformats(unformatted_code, expected_formatted_code) def testEncodingVerification(self): - unformatted_code = textwrap.dedent(u"""\ + unformatted_code = textwrap.dedent( + u"""\ '''The module docstring.''' # -*- coding: utf-8 -*- def f(): x = 37 """) - with utils.NamedTempFile( - suffix='.py', dirname=self.test_tmpdir) as (out, _): - with utils.TempFileContents( - self.test_tmpdir, unformatted_code, suffix='.py') as filepath: + with utils.NamedTempFile(suffix='.py', + dirname=self.test_tmpdir) as (out, _): + with utils.TempFileContents(self.test_tmpdir, unformatted_code, + suffix='.py') as filepath: try: subprocess.check_call(YAPF_BINARY + ['--diff', filepath], stdout=out) except subprocess.CalledProcessError as e: @@ -582,7 +622,8 @@ def f(): self.assertEqual(e.returncode, 1) # pylint: disable=g-assert-in-except # noqa def testReformattingSpecificLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -592,7 +633,8 @@ def g(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -612,14 +654,16 @@ def g(): extra_options=['--lines', '1-2']) def testOmitFormattingLinesBeforeDisabledFunctionComment(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import sys # Comment def some_func(x): x = ["badly" , "formatted","line" ] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import sys # Comment @@ -632,7 +676,8 @@ def some_func(x): extra_options=['--lines', '5-5']) def testReformattingSkippingLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -643,7 +688,8 @@ def g(): pass # yapf: enable """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -659,7 +705,8 @@ def g(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testReformattingSkippingToEndOfFile(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -676,7 +723,8 @@ def e(): 'bbbbbbb'): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -698,7 +746,8 @@ def e(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testReformattingSkippingSingleLine(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -707,7 +756,8 @@ def g(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): # yapf: disable pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -721,13 +771,15 @@ def g(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testDisableWholeDataStructure(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ A = set([ 'hello', 'world', ]) # yapf: disable """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ A = set([ 'hello', 'world', @@ -736,14 +788,16 @@ def testDisableWholeDataStructure(self): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testDisableButAdjustIndentations(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class SplitPenaltyTest(unittest.TestCase): def testUnbreakable(self): self._CheckPenalties(tree, [ ]) # yapf: disable """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class SplitPenaltyTest(unittest.TestCase): def testUnbreakable(self): @@ -753,7 +807,8 @@ def testUnbreakable(self): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testRetainingHorizontalWhitespace(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -762,7 +817,8 @@ def g(): if (xxxxxxxxxxxx.yyyyyyyy (zzzzzzzzzzzzz [0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): # yapf: disable pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -776,7 +832,8 @@ def g(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testRetainingVerticalWhitespace(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -788,7 +845,8 @@ def g(): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -806,7 +864,8 @@ def g(): expected_formatted_code, extra_options=['--lines', '1-2']) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if a: b @@ -823,7 +882,8 @@ def g(): # trailing whitespace """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if a: b @@ -843,7 +903,8 @@ def g(): expected_formatted_code, extra_options=['--lines', '3-3', '--lines', '13-13']) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ ''' docstring @@ -856,7 +917,7 @@ def g(): unformatted_code, unformatted_code, extra_options=['--lines', '2-2']) def testVerticalSpacingWithCommentWithContinuationMarkers(self): - unformatted_code = """\ + unformatted_code = """\ # \\ # \\ # \\ @@ -878,13 +939,15 @@ def testVerticalSpacingWithCommentWithContinuationMarkers(self): extra_options=['--lines', '1-1']) def testRetainingSemicolonsWhenSpecifyingLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a = line_to_format def f(): x = y + 42; z = n * 42 if True: a += 1 ; b += 1 ; c += 1 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a = line_to_format def f(): x = y + 42; z = n * 42 @@ -896,7 +959,8 @@ def f(): extra_options=['--lines', '1-1']) def testDisabledMultilineStrings(self): - unformatted_code = textwrap.dedent('''\ + unformatted_code = textwrap.dedent( + '''\ foo=42 def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

@@ -906,7 +970,8 @@ def f(): """ ''') # noqa - expected_formatted_code = textwrap.dedent('''\ + expected_formatted_code = textwrap.dedent( + '''\ foo = 42 def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

@@ -922,7 +987,8 @@ def f(): extra_options=['--lines', '1-1']) def testDisableWhenSpecifyingLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ # yapf: disable A = set([ 'hello', @@ -934,7 +1000,8 @@ def testDisableWhenSpecifyingLines(self): 'world', ]) # yapf: disable """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ # yapf: disable A = set([ 'hello', @@ -952,7 +1019,8 @@ def testDisableWhenSpecifyingLines(self): extra_options=['--lines', '1-10']) def testDisableFormattingInDataLiteral(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def horrible(): oh_god() why_would_you() @@ -971,7 +1039,8 @@ def still_horrible(): 'that' ] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def horrible(): oh_god() why_would_you() @@ -992,7 +1061,8 @@ def still_horrible(): extra_options=['--lines', '14-15']) def testRetainVerticalFormattingBetweenDisabledAndEnabledLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1003,7 +1073,8 @@ def aaaaaaaaaaaaa(self): gggggggggggg.hhhhhhhhh(c, c.ffffffffffff)) iiiii = jjjjjjjjjjjjjj.iiiii """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1018,7 +1089,8 @@ def aaaaaaaaaaaaa(self): extra_options=['--lines', '4-7']) def testRetainVerticalFormattingBetweenDisabledLines(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class A(object): def aaaaaaaaaaaaa(self): pass @@ -1027,7 +1099,8 @@ def aaaaaaaaaaaaa(self): def bbbbbbbbbbbbb(self): # 5 pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class A(object): def aaaaaaaaaaaaa(self): pass @@ -1042,7 +1115,8 @@ def bbbbbbbbbbbbb(self): # 5 extra_options=['--lines', '4-4']) def testFormatLinesSpecifiedInMiddleOfExpression(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1053,7 +1127,8 @@ def aaaaaaaaaaaaa(self): gggggggggggg.hhhhhhhhh(c, c.ffffffffffff)) iiiii = jjjjjjjjjjjjjj.iiiii """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1068,7 +1143,8 @@ def aaaaaaaaaaaaa(self): extra_options=['--lines', '5-6']) def testCommentFollowingMultilineString(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): '''First line. Second line. @@ -1076,7 +1152,8 @@ def foo(): x = '''hello world''' # second comment return 42 # another comment """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): '''First line. Second line. @@ -1091,12 +1168,14 @@ def foo(): def testDedentClosingBracket(self): # no line-break on the first argument, not dedenting closing brackets - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def overly_long_function_name(first_argument_on_the_same_line, second_argument_makes_the_line_too_long): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def overly_long_function_name(first_argument_on_the_same_line, second_argument_makes_the_line_too_long): pass @@ -1114,7 +1193,8 @@ def overly_long_function_name(first_argument_on_the_same_line, # extra_options=['--style=facebook']) # line-break before the first argument, dedenting closing brackets if set - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long): @@ -1126,7 +1206,8 @@ def overly_long_function_name( # second_argument_makes_the_line_too_long): # pass # """) - expected_formatted_fb_code = textwrap.dedent("""\ + expected_formatted_fb_code = textwrap.dedent( + """\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long ): @@ -1144,14 +1225,16 @@ def overly_long_function_name( # extra_options=['--style=pep8']) def testCoalesceBrackets(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ some_long_function_name_foo( { 'first_argument_of_the_thing': id, 'second_argument_of_the_thing': "some thing" } )""") - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ some_long_function_name_foo({ 'first_argument_of_the_thing': id, 'second_argument_of_the_thing': "some thing" @@ -1159,7 +1242,8 @@ def testCoalesceBrackets(self): """) with utils.NamedTempFile(dirname=self.test_tmpdir, mode='w') as (f, name): f.write( - textwrap.dedent(u'''\ + textwrap.dedent( + u'''\ [style] column_limit=82 coalesce_brackets = True @@ -1171,12 +1255,14 @@ def testCoalesceBrackets(self): extra_options=['--style={0}'.format(name)]) def testPseudoParenSpaces(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo(): def bar(): return {msg_id: author for author, msg_id in reader} """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo(): def bar(): return {msg_id: author for author, msg_id in reader} @@ -1187,7 +1273,8 @@ def bar(): extra_options=['--lines', '1-1', '--style', 'yapf']) def testMultilineCommentFormattingDisabled(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ # This is a comment FOO = { aaaaaaaa.ZZZ: [ @@ -1201,7 +1288,8 @@ def testMultilineCommentFormattingDisabled(self): '#': lambda x: x # do nothing } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ # This is a comment FOO = { aaaaaaaa.ZZZ: [ @@ -1221,14 +1309,16 @@ def testMultilineCommentFormattingDisabled(self): extra_options=['--lines', '1-1', '--style', 'yapf']) def testTrailingCommentsWithDisabledFormatting(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ import os SCOPES = [ 'hello world' # This is a comment. ] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ import os SCOPES = [ @@ -1241,7 +1331,7 @@ def testTrailingCommentsWithDisabledFormatting(self): extra_options=['--lines', '1-1', '--style', 'yapf']) def testUseTabs(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(): if True: pass @@ -1251,7 +1341,7 @@ def foo_function(): if True: pass """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1264,7 +1354,7 @@ def foo_function(): extra_options=['--style={0}'.format(stylepath)]) def testUseTabsWith(self): - unformatted_code = """\ + unformatted_code = """\ def f(): return ['hello', 'world',] """ @@ -1275,7 +1365,7 @@ def f(): 'world', ] """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1288,7 +1378,7 @@ def f(): extra_options=['--style={0}'.format(stylepath)]) def testUseTabsContinuationAlignStyleFixed(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1300,7 +1390,7 @@ def foo_function( 'world', ] """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1316,7 +1406,7 @@ def foo_function( extra_options=['--style={0}'.format(stylepath)]) def testUseTabsContinuationAlignStyleVAlignRight(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1328,7 +1418,7 @@ def foo_function(arg1, arg2, 'world', ] """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1344,7 +1434,7 @@ def foo_function(arg1, arg2, extra_options=['--style={0}'.format(stylepath)]) def testUseSpacesContinuationAlignStyleFixed(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1356,7 +1446,7 @@ def foo_function( 'world', ] """ - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf COLUMN_LIMIT=32 @@ -1371,7 +1461,7 @@ def foo_function( extra_options=['--style={0}'.format(stylepath)]) def testUseSpacesContinuationAlignStyleVAlignRight(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1383,7 +1473,7 @@ def foo_function(arg1, arg2, 'world', ] """ - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf COLUMN_LIMIT=32 @@ -1398,11 +1488,13 @@ def foo_function(arg1, arg2, extra_options=['--style={0}'.format(stylepath)]) def testStyleOutputRoundTrip(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def foo_function(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def foo_function(): pass """) @@ -1422,7 +1514,8 @@ def foo_function(): extra_options=['--style={0}'.format(stylepath)]) def testSpacingBeforeComments(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ A = 42 @@ -1432,7 +1525,8 @@ def x(): def _(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ A = 42 @@ -1448,7 +1542,8 @@ def _(): extra_options=['--lines', '1-2']) def testSpacingBeforeCommentsInDicts(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ A=42 X = { @@ -1463,7 +1558,8 @@ def testSpacingBeforeCommentsInDicts(self): 'BROKEN' } """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ A = 42 X = { @@ -1484,7 +1580,8 @@ def testSpacingBeforeCommentsInDicts(self): extra_options=['--style', 'yapf', '--lines', '1-1']) def testDisableWithLinesOption(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ # yapf_lines_bug.py # yapf: disable def outer_func(): @@ -1493,7 +1590,8 @@ def inner_func(): return # yapf: enable """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ # yapf_lines_bug.py # yapf: disable def outer_func(): @@ -1509,7 +1607,7 @@ def inner_func(): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testNoSpacesAroundBinaryOperators(self): - unformatted_code = """\ + unformatted_code = """\ a = 4-b/c@d**37 """ expected_formatted_code = """\ @@ -1526,7 +1624,7 @@ def testNoSpacesAroundBinaryOperators(self): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testCP936Encoding(self): - unformatted_code = 'print("中文")\n' + unformatted_code = 'print("中文")\n' expected_formatted_code = 'print("中文")\n' self.assertYapfReformats( unformatted_code, @@ -1534,7 +1632,7 @@ def testCP936Encoding(self): env={'PYTHONIOENCODING': 'cp936'}) def testDisableWithLineRanges(self): - unformatted_code = """\ + unformatted_code = """\ # yapf: disable a = [ 1, @@ -1574,8 +1672,8 @@ class DiffIndentTest(unittest.TestCase): @staticmethod def _OwnStyle(): - my_style = style.CreatePEP8Style() - my_style['INDENT_WIDTH'] = 3 + my_style = style.CreatePEP8Style() + my_style['INDENT_WIDTH'] = 3 my_style['CONTINUATION_INDENT_WIDTH'] = 3 return my_style @@ -1585,11 +1683,13 @@ def _Check(self, unformatted_code, expected_formatted_code): self.assertEqual(expected_formatted_code, formatted_code) def testSimple(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ for i in range(5): print('bar') """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ for i in range(5): print('bar') """) @@ -1600,7 +1700,7 @@ class HorizontallyAlignedTrailingCommentsTest(yapf_test_helper.YAPFTest): @staticmethod def _OwnStyle(): - my_style = style.CreatePEP8Style() + my_style = style.CreatePEP8Style() my_style['SPACES_BEFORE_COMMENT'] = [ 15, 25, @@ -1614,7 +1714,8 @@ def _Check(self, unformatted_code, expected_formatted_code): self.assertCodeEqual(expected_formatted_code, formatted_code) def testSimple(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ foo = '1' # Aligned at first list value foo = '2__<15>' # Aligned at second list value @@ -1623,7 +1724,8 @@ def testSimple(self): foo = '4______________________<35>' # Aligned beyond list values """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ foo = '1' # Aligned at first list value foo = '2__<15>' # Aligned at second list value @@ -1635,7 +1737,8 @@ def testSimple(self): self._Check(unformatted_code, expected_formatted_code) def testBlock(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1643,7 +1746,8 @@ def testBlock(self): # Line 5 # Line 6 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1654,7 +1758,8 @@ def testBlock(self): self._Check(unformatted_code, expected_formatted_code) def testBlockWithLongLine(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ func(1) # Line 1 func___________________(2) # Line 2 # Line 3 @@ -1662,7 +1767,8 @@ def testBlockWithLongLine(self): # Line 5 # Line 6 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ func(1) # Line 1 func___________________(2) # Line 2 # Line 3 @@ -1673,7 +1779,8 @@ def testBlockWithLongLine(self): self._Check(unformatted_code, expected_formatted_code) def testBlockFuncSuffix(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1684,7 +1791,8 @@ def testBlockFuncSuffix(self): def Func(): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1699,7 +1807,8 @@ def Func(): self._Check(unformatted_code, expected_formatted_code) def testBlockCommentSuffix(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1709,7 +1818,8 @@ def testBlockCommentSuffix(self): # Aligned with prev comment block """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1722,7 +1832,8 @@ def testBlockCommentSuffix(self): self._Check(unformatted_code, expected_formatted_code) def testBlockIndentedFuncSuffix(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1736,7 +1847,8 @@ def testBlockIndentedFuncSuffix(self): def Func(): pass """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1755,7 +1867,8 @@ def Func(): self._Check(unformatted_code, expected_formatted_code) def testBlockIndentedCommentSuffix(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1766,7 +1879,8 @@ def testBlockIndentedCommentSuffix(self): # Not aligned """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1780,7 +1894,8 @@ def testBlockIndentedCommentSuffix(self): self._Check(unformatted_code, expected_formatted_code) def testBlockMultiIndented(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ if True: if True: if True: @@ -1793,7 +1908,8 @@ def testBlockMultiIndented(self): # Not aligned """) # noqa - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ if True: if True: if True: @@ -1809,7 +1925,8 @@ def testBlockMultiIndented(self): self._Check(unformatted_code, expected_formatted_code) def testArgs(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ def MyFunc( arg1, # Desc 1 arg2, # Desc 2 @@ -1820,7 +1937,8 @@ def MyFunc( ): pass """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ def MyFunc( arg1, # Desc 1 arg2, # Desc 2 @@ -1834,7 +1952,8 @@ def MyFunc( self._Check(unformatted_code, expected_formatted_code) def testDisableBlock(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ a() # comment 1 b() # comment 2 @@ -1846,7 +1965,8 @@ def testDisableBlock(self): e() # comment 5 f() # comment 6 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ a() # comment 1 b() # comment 2 @@ -1861,13 +1981,15 @@ def testDisableBlock(self): self._Check(unformatted_code, expected_formatted_code) def testDisabledLine(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ short # comment 1 do_not_touch1 # yapf: disable do_not_touch2 # yapf: disable a_longer_statement # comment 2 """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ short # comment 1 do_not_touch1 # yapf: disable do_not_touch2 # yapf: disable @@ -1880,9 +2002,9 @@ class _SpacesAroundDictListTupleTestImpl(unittest.TestCase): @staticmethod def _OwnStyle(): - my_style = style.CreatePEP8Style() - my_style['DISABLE_ENDING_COMMA_HEURISTIC'] = True - my_style['SPLIT_ALL_COMMA_SEPARATED_VALUES'] = False + my_style = style.CreatePEP8Style() + my_style['DISABLE_ENDING_COMMA_HEURISTIC'] = True + my_style['SPLIT_ALL_COMMA_SEPARATED_VALUES'] = False my_style['SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'] = False return my_style @@ -1899,13 +2021,14 @@ class SpacesAroundDictTest(_SpacesAroundDictListTupleTestImpl): @classmethod def _OwnStyle(cls): - style = super(SpacesAroundDictTest, cls)._OwnStyle() + style = super(SpacesAroundDictTest, cls)._OwnStyle() style['SPACES_AROUND_DICT_DELIMITERS'] = True return style def testStandard(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ {1 : 2} {k:v for k, v in other.items()} {k for k in [1, 2, 3]} @@ -1922,7 +2045,8 @@ def testStandard(self): [1, 2] (3, 4) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ { 1: 2 } { k: v for k, v in other.items() } { k for k in [1, 2, 3] } @@ -1947,13 +2071,14 @@ class SpacesAroundListTest(_SpacesAroundDictListTupleTestImpl): @classmethod def _OwnStyle(cls): - style = super(SpacesAroundListTest, cls)._OwnStyle() + style = super(SpacesAroundListTest, cls)._OwnStyle() style['SPACES_AROUND_LIST_DELIMITERS'] = True return style def testStandard(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ [a,b,c] [4,5,] [6, [7, 8], 9] @@ -1974,7 +2099,8 @@ def testStandard(self): {a: b} (1, 2) """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ [ a, b, c ] [ 4, 5, ] [ 6, [ 7, 8 ], 9 ] @@ -2003,13 +2129,14 @@ class SpacesAroundTupleTest(_SpacesAroundDictListTupleTestImpl): @classmethod def _OwnStyle(cls): - style = super(SpacesAroundTupleTest, cls)._OwnStyle() + style = super(SpacesAroundTupleTest, cls)._OwnStyle() style['SPACES_AROUND_TUPLE_DELIMITERS'] = True return style def testStandard(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent( + """\ (0, 1) (2, 3) (4, 5, 6,) @@ -2032,7 +2159,8 @@ def testStandard(self): {a: b} [3, 4] """) - expected_formatted_code = textwrap.dedent("""\ + expected_formatted_code = textwrap.dedent( + """\ ( 0, 1 ) ( 2, 3 ) ( 4, 5, 6, ) diff --git a/yapftests/yapf_test_helper.py b/yapftests/yapf_test_helper.py index cb56ec865..b95212a8b 100644 --- a/yapftests/yapf_test_helper.py +++ b/yapftests/yapf_test_helper.py @@ -39,7 +39,7 @@ def __init__(self, *args): def assertCodeEqual(self, expected_code, code): if code != expected_code: - msg = ['Code format mismatch:', 'Expected:'] + msg = ['Code format mismatch:', 'Expected:'] linelen = style.Get('COLUMN_LIMIT') for line in expected_code.splitlines(): if len(line) > linelen: From aa3f8a7554ecac2a67ca5d3a15c0ce6747b86f60 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 3 Jan 2023 10:30:10 +0100 Subject: [PATCH 10/11] change the format back to yapf-based --- yapf/__init__.py | 98 +- yapf/pyparser/pyparser.py | 22 +- yapf/pyparser/pyparser_utils.py | 8 +- yapf/pyparser/split_penalty_visitor.py | 49 +- yapf/pytree/blank_line_calculator.py | 60 +- yapf/pytree/comment_splicer.py | 44 +- yapf/pytree/pytree_unwrapper.py | 42 +- yapf/pytree/pytree_utils.py | 36 +- yapf/pytree/pytree_visitor.py | 2 +- yapf/pytree/split_penalty.py | 87 +- yapf/pytree/subtype_assigner.py | 41 +- yapf/third_party/yapf_diff/yapf_diff.py | 7 +- yapf/yapflib/errors.py | 4 +- yapf/yapflib/file_resources.py | 26 +- yapf/yapflib/format_decision_state.py | 183 ++-- yapf/yapflib/format_token.py | 106 ++- yapf/yapflib/logical_line.py | 34 +- yapf/yapflib/object_state.py | 69 +- yapf/yapflib/py3compat.py | 14 +- yapf/yapflib/reformatter.py | 115 ++- yapf/yapflib/split_penalty.py | 16 +- yapf/yapflib/style.py | 256 ++---- yapf/yapflib/subtypes.py | 48 +- yapf/yapflib/verifier.py | 2 +- yapf/yapflib/yapf_api.py | 73 +- yapftests/blank_line_calculator_test.py | 69 +- yapftests/comment_splicer_test.py | 62 +- yapftests/file_resources_test.py | 137 +-- yapftests/format_decision_state_test.py | 8 +- yapftests/line_joiner_test.py | 18 +- yapftests/logical_line_test.py | 22 +- yapftests/main_test.py | 11 +- yapftests/pytree_unwrapper_test.py | 273 +++--- yapftests/pytree_utils_test.py | 62 +- yapftests/pytree_visitor_test.py | 10 +- yapftests/reformatter_basic_test.py | 981 ++++++++------------- yapftests/reformatter_buganizer_test.py | 513 ++++------- yapftests/reformatter_facebook_test.py | 93 +- yapftests/reformatter_pep8_test.py | 272 +++--- yapftests/reformatter_python3_test.py | 107 +-- yapftests/reformatter_style_config_test.py | 54 +- yapftests/reformatter_verify_test.py | 26 +- yapftests/split_penalty_test.py | 272 +++--- yapftests/style_test.py | 57 +- yapftests/subtype_assigner_test.py | 421 +++++---- yapftests/utils.py | 34 +- yapftests/yapf_test.py | 500 ++++------- yapftests/yapf_test_helper.py | 2 +- 48 files changed, 2276 insertions(+), 3170 deletions(-) diff --git a/yapf/__init__.py b/yapf/__init__.py index 2b69c1ddc..94e445b59 100644 --- a/yapf/__init__.py +++ b/yapf/__init__.py @@ -55,8 +55,8 @@ def main(argv): Raises: YapfError: if none of the supplied files were Python files. """ - parser = _BuildParser() - args = parser.parse_args(argv[1:]) + parser = _BuildParser() + args = parser.parse_args(argv[1:]) style_config = args.style if args.style_help: @@ -70,9 +70,8 @@ def main(argv): if not args.files: # No arguments specified. Read code from stdin. if args.in_place or args.diff: - parser.error( - 'cannot use --in-place or --diff flags when reading ' - 'from stdin') + parser.error('cannot use --in-place or --diff flags when reading ' + 'from stdin') original_source = [] while True: @@ -94,7 +93,7 @@ def main(argv): if style_config is None and not args.no_local_style: style_config = file_resources.GetDefaultStyleForDir(os.getcwd()) - source = [line.rstrip() for line in original_source] + source = [line.rstrip() for line in original_source] source[0] = py3compat.removeBOM(source[0]) try: @@ -116,9 +115,9 @@ def main(argv): exclude_patterns_from_ignore_file = file_resources.GetExcludePatternsForDir( os.getcwd()) - files = file_resources.GetCommandLineFiles( - args.files, args.recursive, - (args.exclude or []) + exclude_patterns_from_ignore_file) + files = file_resources.GetCommandLineFiles(args.files, args.recursive, + (args.exclude or []) + + exclude_patterns_from_ignore_file) if not files: raise errors.YapfError('input filenames did not match any python files') @@ -153,17 +152,16 @@ def _PrintHelp(args): print() -def FormatFiles( - filenames, - lines, - style_config=None, - no_local_style=False, - in_place=False, - print_diff=False, - verify=False, - parallel=False, - quiet=False, - verbose=False): +def FormatFiles(filenames, + lines, + style_config=None, + no_local_style=False, + in_place=False, + print_diff=False, + verify=False, + parallel=False, + quiet=False, + verbose=False): """Format a list of files. Arguments: @@ -193,31 +191,28 @@ def FormatFiles( workers = min(multiprocessing.cpu_count(), len(filenames)) with concurrent.futures.ProcessPoolExecutor(workers) as executor: future_formats = [ - executor.submit( - _FormatFile, filename, lines, style_config, no_local_style, - in_place, print_diff, verify, quiet, verbose) - for filename in filenames + executor.submit(_FormatFile, filename, lines, style_config, + no_local_style, in_place, print_diff, verify, quiet, + verbose) for filename in filenames ] for future in concurrent.futures.as_completed(future_formats): changed |= future.result() else: for filename in filenames: - changed |= _FormatFile( - filename, lines, style_config, no_local_style, in_place, print_diff, - verify, quiet, verbose) + changed |= _FormatFile(filename, lines, style_config, no_local_style, + in_place, print_diff, verify, quiet, verbose) return changed -def _FormatFile( - filename, - lines, - style_config=None, - no_local_style=False, - in_place=False, - print_diff=False, - verify=False, - quiet=False, - verbose=False): +def _FormatFile(filename, + lines, + style_config=None, + no_local_style=False, + in_place=False, + print_diff=False, + verify=False, + quiet=False, + verbose=False): """Format an individual file.""" if verbose and not quiet: print('Reformatting %s' % filename) @@ -241,8 +236,8 @@ def _FormatFile( raise errors.YapfError(errors.FormatErrorMsg(e)) if not in_place and not quiet and reformatted_code: - file_resources.WriteReformattedCode( - filename, reformatted_code, encoding, in_place) + file_resources.WriteReformattedCode(filename, reformatted_code, encoding, + in_place) return has_change @@ -326,20 +321,18 @@ def _BuildParser(): parser.add_argument( '--style', action='store', - help=( - 'specify formatting style: either a style name (for example "pep8" ' - 'or "google"), or the name of a file with style settings. The ' - 'default is pep8 unless a %s or %s or %s file located in the same ' - 'directory as the source or one of its parent directories ' - '(for stdin, the current directory is used).' % - (style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML))) + help=('specify formatting style: either a style name (for example "pep8" ' + 'or "google"), or the name of a file with style settings. The ' + 'default is pep8 unless a %s or %s or %s file located in the same ' + 'directory as the source or one of its parent directories ' + '(for stdin, the current directory is used).' % + (style.LOCAL_STYLE, style.SETUP_CONFIG, style.PYPROJECT_TOML))) parser.add_argument( '--style-help', action='store_true', - help=( - 'show style settings and exit; this output can be ' - 'saved to .style.yapf to make your settings ' - 'permanent')) + help=('show style settings and exit; this output can be ' + 'saved to .style.yapf to make your settings ' + 'permanent')) parser.add_argument( '--no-local-style', action='store_true', @@ -349,9 +342,8 @@ def _BuildParser(): '-p', '--parallel', action='store_true', - help=( - 'run YAPF in parallel when formatting multiple files. Requires ' - 'concurrent.futures in Python 2.X')) + help=('run YAPF in parallel when formatting multiple files. Requires ' + 'concurrent.futures in Python 2.X')) parser.add_argument( '-vv', '--verbose', diff --git a/yapf/pyparser/pyparser.py b/yapf/pyparser/pyparser.py index b2bffa283..a8a28ebc8 100644 --- a/yapf/pyparser/pyparser.py +++ b/yapf/pyparser/pyparser.py @@ -68,7 +68,7 @@ def ParseCode(unformatted_source, filename=''): ast_tree = ast.parse(unformatted_source, filename) ast.fix_missing_locations(ast_tree) readline = py3compat.StringIO(unformatted_source).readline - tokens = tokenize.generate_tokens(readline) + tokens = tokenize.generate_tokens(readline) except Exception: raise @@ -89,10 +89,10 @@ def _CreateLogicalLines(tokens): Returns: A list of LogicalLines. """ - logical_lines = [] + logical_lines = [] cur_logical_line = [] - prev_tok = None - depth = 0 + prev_tok = None + depth = 0 for tok in tokens: tok = py3compat.TokenInfo(*tok) @@ -100,7 +100,7 @@ def _CreateLogicalLines(tokens): # End of a logical line. logical_lines.append(logical_line.LogicalLine(depth, cur_logical_line)) cur_logical_line = [] - prev_tok = None + prev_tok = None elif tok.type == tokenize.INDENT: depth += 1 elif tok.type == tokenize.DEDENT: @@ -117,29 +117,29 @@ def _CreateLogicalLines(tokens): line=prev_tok.line) ctok.lineno = ctok.start[0] ctok.column = ctok.start[1] - ctok.value = '\\' + ctok.value = '\\' cur_logical_line.append(format_token.FormatToken(ctok, 'CONTINUATION')) tok.lineno = tok.start[0] tok.column = tok.start[1] - tok.value = tok.string + tok.value = tok.string cur_logical_line.append( format_token.FormatToken(tok, token.tok_name[tok.type])) prev_tok = tok # Link the FormatTokens in each line together to for a doubly linked list. for line in logical_lines: - previous = line.first + previous = line.first bracket_stack = [previous] if previous.OpensScope() else [] for tok in line.tokens[1:]: - tok.previous_token = previous + tok.previous_token = previous previous.next_token = tok - previous = tok + previous = tok # Set up the "matching_bracket" attribute. if tok.OpensScope(): bracket_stack.append(tok) elif tok.ClosesScope(): bracket_stack[-1].matching_bracket = tok - tok.matching_bracket = bracket_stack.pop() + tok.matching_bracket = bracket_stack.pop() return logical_lines diff --git a/yapf/pyparser/pyparser_utils.py b/yapf/pyparser/pyparser_utils.py index 149e0a280..3f17b15a4 100644 --- a/yapf/pyparser/pyparser_utils.py +++ b/yapf/pyparser/pyparser_utils.py @@ -31,8 +31,8 @@ def GetTokens(logical_lines, node): """Get a list of tokens within the node's range from the logical lines.""" - start = TokenStart(node) - end = TokenEnd(node) + start = TokenStart(node) + end = TokenEnd(node) tokens = [] for line in logical_lines: @@ -46,8 +46,8 @@ def GetTokens(logical_lines, node): def GetTokensInSubRange(tokens, node): """Get a subset of tokens representing the node.""" - start = TokenStart(node) - end = TokenEnd(node) + start = TokenStart(node) + end = TokenEnd(node) tokens_in_range = [] for tok in tokens: diff --git a/yapf/pyparser/split_penalty_visitor.py b/yapf/pyparser/split_penalty_visitor.py index 946bd949f..047b48a3d 100644 --- a/yapf/pyparser/split_penalty_visitor.py +++ b/yapf/pyparser/split_penalty_visitor.py @@ -67,14 +67,13 @@ def visit_FunctionDef(self, node): _SetPenalty(token, split_penalty.UNBREAKABLE) if node.returns: - start_index = pyutils.GetTokenIndex( - tokens, pyutils.TokenStart(node.returns)) - _IncreasePenalty( - tokens[start_index - 1:start_index + 1], - split_penalty.VERY_STRONGLY_CONNECTED) + start_index = pyutils.GetTokenIndex(tokens, + pyutils.TokenStart(node.returns)) + _IncreasePenalty(tokens[start_index - 1:start_index + 1], + split_penalty.VERY_STRONGLY_CONNECTED) end_index = pyutils.GetTokenIndex(tokens, pyutils.TokenEnd(node.returns)) - _IncreasePenalty( - tokens[start_index + 1:end_index], split_penalty.STRONGLY_CONNECTED) + _IncreasePenalty(tokens[start_index + 1:end_index], + split_penalty.STRONGLY_CONNECTED) return self.generic_visit(node) @@ -104,7 +103,7 @@ def visit_ClassDef(self, node): for decorator in node.decorator_list: # Don't split after the '@'. - decorator_range = self._GetTokens(decorator) + decorator_range = self._GetTokens(decorator) decorator_range[0].split_penalty = split_penalty.UNBREAKABLE return self.generic_visit(node) @@ -264,7 +263,7 @@ def visit_BoolOp(self, node): # Lower the split penalty to allow splitting before or after the logical # operator. split_before_operator = style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR') - operator_indices = [ + operator_indices = [ pyutils.GetNextTokenIndex(tokens, pyutils.TokenEnd(value)) for value in node.values[:-1] ] @@ -293,8 +292,8 @@ def visit_BinOp(self, node): # Lower the split penalty to allow splitting before or after the arithmetic # operator. - operator_index = pyutils.GetNextTokenIndex( - tokens, pyutils.TokenEnd(node.left)) + operator_index = pyutils.GetNextTokenIndex(tokens, + pyutils.TokenEnd(node.left)) if not style.Get('SPLIT_BEFORE_ARITHMETIC_OPERATOR'): operator_index += 1 @@ -371,7 +370,7 @@ def visit_ListComp(self, node): # is_async=0), # ... # ]) - tokens = self._GetTokens(node) + tokens = self._GetTokens(node) element = pyutils.GetTokensInSubRange(tokens, node.elt) _IncreasePenalty(element[1:], split_penalty.EXPR) @@ -395,7 +394,7 @@ def visit_SetComp(self, node): # is_async=0), # ... # ]) - tokens = self._GetTokens(node) + tokens = self._GetTokens(node) element = pyutils.GetTokensInSubRange(tokens, node.elt) _IncreasePenalty(element[1:], split_penalty.EXPR) @@ -421,7 +420,7 @@ def visit_DictComp(self, node): # ... # ]) tokens = self._GetTokens(node) - key = pyutils.GetTokensInSubRange(tokens, node.key) + key = pyutils.GetTokensInSubRange(tokens, node.key) _IncreasePenalty(key[1:], split_penalty.EXPR) value = pyutils.GetTokensInSubRange(tokens, node.value) @@ -447,7 +446,7 @@ def visit_GeneratorExp(self, node): # is_async=0), # ... # ]) - tokens = self._GetTokens(node) + tokens = self._GetTokens(node) element = pyutils.GetTokensInSubRange(tokens, node.elt) _IncreasePenalty(element[1:], split_penalty.EXPR) @@ -542,10 +541,10 @@ def visit_Constant(self, node): def visit_Attribute(self, node): # Attribute(value=Expr, # attr=Identifier) - tokens = self._GetTokens(node) + tokens = self._GetTokens(node) split_before = style.Get('SPLIT_BEFORE_DOT') - dot_indices = pyutils.GetNextTokenIndex( - tokens, pyutils.TokenEnd(node.value)) + dot_indices = pyutils.GetNextTokenIndex(tokens, + pyutils.TokenEnd(node.value)) if not split_before: dot_indices += 1 @@ -559,8 +558,8 @@ def visit_Subscript(self, node): tokens = self._GetTokens(node) # Don't split before the opening bracket of a subscript. - bracket_index = pyutils.GetNextTokenIndex( - tokens, pyutils.TokenEnd(node.value)) + bracket_index = pyutils.GetNextTokenIndex(tokens, + pyutils.TokenEnd(node.value)) _IncreasePenalty(tokens[bracket_index], split_penalty.UNBREAKABLE) return self.generic_visit(node) @@ -610,16 +609,16 @@ def visit_Slice(self, node): _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) if hasattr(node, 'upper') and node.upper: - colon_index = pyutils.GetPrevTokenIndex( - tokens, pyutils.TokenStart(node.upper)) + colon_index = pyutils.GetPrevTokenIndex(tokens, + pyutils.TokenStart(node.upper)) _IncreasePenalty(tokens[colon_index], split_penalty.UNBREAKABLE) subrange = pyutils.GetTokensInSubRange(tokens, node.upper) _IncreasePenalty(subrange, split_penalty.EXPR) _DecreasePenalty(subrange[0], split_penalty.EXPR // 2) if hasattr(node, 'step') and node.step: - colon_index = pyutils.GetPrevTokenIndex( - tokens, pyutils.TokenStart(node.step)) + colon_index = pyutils.GetPrevTokenIndex(tokens, + pyutils.TokenStart(node.step)) _IncreasePenalty(tokens[colon_index], split_penalty.UNBREAKABLE) subrange = pyutils.GetTokensInSubRange(tokens, node.step) _IncreasePenalty(subrange, split_penalty.EXPR) @@ -865,7 +864,7 @@ def visit_arg(self, node): # Process any annotations. if hasattr(node, 'annotation') and node.annotation: annotation = node.annotation - subrange = pyutils.GetTokensInSubRange(tokens, annotation) + subrange = pyutils.GetTokensInSubRange(tokens, annotation) _IncreasePenalty(subrange, split_penalty.ANNOTATION) return self.generic_visit(node) diff --git a/yapf/pytree/blank_line_calculator.py b/yapf/pytree/blank_line_calculator.py index 8aa20ec0a..9d218bf97 100644 --- a/yapf/pytree/blank_line_calculator.py +++ b/yapf/pytree/blank_line_calculator.py @@ -29,18 +29,17 @@ from yapf.yapflib import py3compat from yapf.yapflib import style -_NO_BLANK_LINES = 1 -_ONE_BLANK_LINE = 2 +_NO_BLANK_LINES = 1 +_ONE_BLANK_LINE = 2 _TWO_BLANK_LINES = 3 -_PYTHON_STATEMENTS = frozenset( - { - 'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt', - 'break_stmt', 'continue_stmt', 'return_stmt', 'raise_stmt', - 'yield_stmt', 'import_stmt', 'global_stmt', 'exec_stmt', 'assert_stmt', - 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', - 'nonlocal_stmt', 'async_stmt', 'simple_stmt' - }) +_PYTHON_STATEMENTS = frozenset({ + 'small_stmt', 'expr_stmt', 'print_stmt', 'del_stmt', 'pass_stmt', + 'break_stmt', 'continue_stmt', 'return_stmt', 'raise_stmt', 'yield_stmt', + 'import_stmt', 'global_stmt', 'exec_stmt', 'assert_stmt', 'if_stmt', + 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', 'nonlocal_stmt', + 'async_stmt', 'simple_stmt' +}) def CalculateBlankLines(tree): @@ -59,10 +58,10 @@ class _BlankLineCalculator(pytree_visitor.PyTreeVisitor): """_BlankLineCalculator - see file-level docstring for a description.""" def __init__(self): - self.class_level = 0 - self.function_level = 0 - self.last_comment_lineno = 0 - self.last_was_decorator = False + self.class_level = 0 + self.function_level = 0 + self.last_comment_lineno = 0 + self.last_was_decorator = False self.last_was_class_or_function = False def Visit_simple_stmt(self, node): # pylint: disable=invalid-name @@ -82,17 +81,17 @@ def Visit_decorator(self, node): # pylint: disable=invalid-name def Visit_classdef(self, node): # pylint: disable=invalid-name self.last_was_class_or_function = False - index = self._SetBlankLinesBetweenCommentAndClassFunc(node) - self.last_was_decorator = False - self.class_level += 1 + index = self._SetBlankLinesBetweenCommentAndClassFunc(node) + self.last_was_decorator = False + self.class_level += 1 for child in node.children[index:]: self.Visit(child) - self.class_level -= 1 + self.class_level -= 1 self.last_was_class_or_function = True def Visit_funcdef(self, node): # pylint: disable=invalid-name self.last_was_class_or_function = False - index = self._SetBlankLinesBetweenCommentAndClassFunc(node) + index = self._SetBlankLinesBetweenCommentAndClassFunc(node) if _AsyncFunction(node): index = self._SetBlankLinesBetweenCommentAndClassFunc( node.prev_sibling.parent) @@ -100,10 +99,10 @@ def Visit_funcdef(self, node): # pylint: disable=invalid-name else: index = self._SetBlankLinesBetweenCommentAndClassFunc(node) self.last_was_decorator = False - self.function_level += 1 + self.function_level += 1 for child in node.children[index:]: self.Visit(child) - self.function_level -= 1 + self.function_level -= 1 self.last_was_class_or_function = True def DefaultNodeVisit(self, node): @@ -161,23 +160,20 @@ def _GetNumNewlines(self, node): return _ONE_BLANK_LINE def _IsTopLevel(self, node): - return ( - not (self.class_level or self.function_level) and - _StartsInZerothColumn(node)) + return (not (self.class_level or self.function_level) and + _StartsInZerothColumn(node)) def _SetNumNewlines(node, num_newlines): - pytree_utils.SetNodeAnnotation( - node, pytree_utils.Annotation.NEWLINES, num_newlines) + pytree_utils.SetNodeAnnotation(node, pytree_utils.Annotation.NEWLINES, + num_newlines) def _StartsInZerothColumn(node): - return ( - pytree_utils.FirstLeafNode(node).column == 0 or - (_AsyncFunction(node) and node.prev_sibling.column == 0)) + return (pytree_utils.FirstLeafNode(node).column == 0 or + (_AsyncFunction(node) and node.prev_sibling.column == 0)) def _AsyncFunction(node): - return ( - py3compat.PY3 and node.prev_sibling and - node.prev_sibling.type == grammar_token.ASYNC) + return (py3compat.PY3 and node.prev_sibling and + node.prev_sibling.type == grammar_token.ASYNC) diff --git a/yapf/pytree/comment_splicer.py b/yapf/pytree/comment_splicer.py index 01911c896..ae5ffe66f 100644 --- a/yapf/pytree/comment_splicer.py +++ b/yapf/pytree/comment_splicer.py @@ -60,7 +60,7 @@ def _VisitNodeRec(node): # Remember the leading indentation of this prefix and clear it. # Mopping up the prefix is important because we may go over this same # child in the next iteration... - child_prefix = child.prefix.lstrip('\n') + child_prefix = child.prefix.lstrip('\n') prefix_indent = child_prefix[:child_prefix.find('#')] if '\n' in prefix_indent: prefix_indent = prefix_indent[prefix_indent.rfind('\n') + 1:] @@ -171,23 +171,22 @@ def _VisitNodeRec(node): else: if comment_lineno == prev_leaf[0].lineno: comment_lines = comment_prefix.splitlines() - value = comment_lines[0].lstrip() + value = comment_lines[0].lstrip() if value.rstrip('\n'): - comment_column = prev_leaf[0].column + comment_column = prev_leaf[0].column comment_column += len(prev_leaf[0].value) - comment_column += ( + comment_column += ( len(comment_lines[0]) - len(comment_lines[0].lstrip())) comment_leaf = pytree.Leaf( type=token.COMMENT, value=value.rstrip('\n'), context=('', (comment_lineno, comment_column))) pytree_utils.InsertNodesAfter([comment_leaf], prev_leaf[0]) - comment_prefix = '\n'.join(comment_lines[1:]) + comment_prefix = '\n'.join(comment_lines[1:]) comment_lineno += 1 - rindex = ( - 0 if '\n' not in comment_prefix.rstrip() else - comment_prefix.rstrip().rindex('\n') + 1) + rindex = (0 if '\n' not in comment_prefix.rstrip() else + comment_prefix.rstrip().rindex('\n') + 1) comment_column = ( len(comment_prefix[rindex:]) - len(comment_prefix[rindex:].lstrip())) @@ -204,8 +203,10 @@ def _VisitNodeRec(node): _VisitNodeRec(tree) -def _CreateCommentsFromPrefix( - comment_prefix, comment_lineno, comment_column, standalone=False): +def _CreateCommentsFromPrefix(comment_prefix, + comment_lineno, + comment_column, + standalone=False): """Create pytree nodes to represent the given comment prefix. Args: @@ -233,10 +234,10 @@ def _CreateCommentsFromPrefix( index += 1 if comment_block: - new_lineno = comment_lineno + index - 1 - comment_block[0] = comment_block[0].strip() + new_lineno = comment_lineno + index - 1 + comment_block[0] = comment_block[0].strip() comment_block[-1] = comment_block[-1].strip() - comment_leaf = pytree.Leaf( + comment_leaf = pytree.Leaf( type=token.COMMENT, value='\n'.join(comment_block), context=('', (new_lineno, comment_column))) @@ -261,11 +262,10 @@ def _CreateCommentsFromPrefix( # line, not on the same line with other code), it's important to insert it into # an appropriate parent of the node it's attached to. An appropriate parent # is the first "standalone line node" in the parent chain of a node. -_STANDALONE_LINE_NODES = frozenset( - [ - 'suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', - 'funcdef', 'classdef', 'decorated', 'file_input' - ]) +_STANDALONE_LINE_NODES = frozenset([ + 'suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt', + 'funcdef', 'classdef', 'decorated', 'file_input' +]) def _FindNodeWithStandaloneLineParent(node): @@ -352,14 +352,14 @@ def _AnnotateIndents(tree): """ # Annotate the root of the tree with zero indent. if tree.parent is None: - pytree_utils.SetNodeAnnotation( - tree, pytree_utils.Annotation.CHILD_INDENT, '') + pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT, + '') for child in tree.children: if child.type == token.INDENT: child_indent = pytree_utils.GetNodeAnnotation( tree, pytree_utils.Annotation.CHILD_INDENT) if child_indent is not None and child_indent != child.value: raise RuntimeError('inconsistent indentation for child', (tree, child)) - pytree_utils.SetNodeAnnotation( - tree, pytree_utils.Annotation.CHILD_INDENT, child.value) + pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT, + child.value) _AnnotateIndents(child) diff --git a/yapf/pytree/pytree_unwrapper.py b/yapf/pytree/pytree_unwrapper.py index 835ca60a1..3fe4ade08 100644 --- a/yapf/pytree/pytree_unwrapper.py +++ b/yapf/pytree/pytree_unwrapper.py @@ -61,11 +61,10 @@ def UnwrapPyTree(tree): # Grammar tokens considered as whitespace for the purpose of unwrapping. -_WHITESPACE_TOKENS = frozenset( - [ - grammar_token.NEWLINE, grammar_token.DEDENT, grammar_token.INDENT, - grammar_token.ENDMARKER - ]) +_WHITESPACE_TOKENS = frozenset([ + grammar_token.NEWLINE, grammar_token.DEDENT, grammar_token.INDENT, + grammar_token.ENDMARKER +]) class PyTreeUnwrapper(pytree_visitor.PyTreeVisitor): @@ -119,17 +118,16 @@ def _StartNewLine(self): _AdjustSplitPenalty(self._cur_logical_line) self._cur_logical_line = logical_line.LogicalLine(self._cur_depth) - _STMT_TYPES = frozenset( - { - 'if_stmt', - 'while_stmt', - 'for_stmt', - 'try_stmt', - 'expect_clause', - 'with_stmt', - 'funcdef', - 'classdef', - }) + _STMT_TYPES = frozenset({ + 'if_stmt', + 'while_stmt', + 'for_stmt', + 'try_stmt', + 'expect_clause', + 'with_stmt', + 'funcdef', + 'classdef', + }) # pylint: disable=invalid-name,missing-docstring def Visit_simple_stmt(self, node): @@ -322,7 +320,7 @@ def _MatchBrackets(line): bracket_stack.append(token) elif token.value in _CLOSING_BRACKETS: bracket_stack[-1].matching_bracket = token - token.matching_bracket = bracket_stack[-1] + token.matching_bracket = bracket_stack[-1] bracket_stack.pop() for bracket in bracket_stack: @@ -340,7 +338,7 @@ def _IdentifyParameterLists(line): Arguments: line: (LogicalLine) A logical line. """ - func_stack = [] + func_stack = [] param_stack = [] for tok in line.tokens: # Identify parameter list objects. @@ -376,9 +374,9 @@ def _AdjustSplitPenalty(line): bracket_level = 0 for index, token in enumerate(line.tokens): if index and not bracket_level: - pytree_utils.SetNodeAnnotation( - token.node, pytree_utils.Annotation.SPLIT_PENALTY, - split_penalty.UNBREAKABLE) + pytree_utils.SetNodeAnnotation(token.node, + pytree_utils.Annotation.SPLIT_PENALTY, + split_penalty.UNBREAKABLE) if token.value in _OPENING_BRACKETS: bracket_level += 1 elif token.value in _CLOSING_BRACKETS: @@ -398,7 +396,7 @@ def _DetermineMustSplitAnnotation(node): node.children[-1].value != ','): return num_children = len(node.children) - index = 0 + index = 0 _SetMustSplitOnFirstLeaf(node.children[0]) while index < num_children - 1: child = node.children[index] diff --git a/yapf/pytree/pytree_utils.py b/yapf/pytree/pytree_utils.py index 415011806..66a54e617 100644 --- a/yapf/pytree/pytree_utils.py +++ b/yapf/pytree/pytree_utils.py @@ -42,11 +42,11 @@ class Annotation(object): """Annotation names associated with pytrees.""" - CHILD_INDENT = 'child_indent' - NEWLINES = 'newlines' - MUST_SPLIT = 'must_split' + CHILD_INDENT = 'child_indent' + NEWLINES = 'newlines' + MUST_SPLIT = 'must_split' SPLIT_PENALTY = 'split_penalty' - SUBTYPE = 'subtype' + SUBTYPE = 'subtype' def NodeName(node): @@ -113,13 +113,13 @@ def ParseCodeToTree(code): # Try to parse using a Python 3 grammar, which is more permissive (print and # exec are not keywords). parser_driver = driver.Driver(_GRAMMAR_FOR_PY3, convert=pytree.convert) - tree = parser_driver.parse_string(code, debug=False) + tree = parser_driver.parse_string(code, debug=False) except parse.ParseError: # Now try to parse using a Python 2 grammar; If this fails, then # there's something else wrong with the code. try: parser_driver = driver.Driver(_GRAMMAR_FOR_PY2, convert=pytree.convert) - tree = parser_driver.parse_string(code, debug=False) + tree = parser_driver.parse_string(code, debug=False) except parse.ParseError: # Raise a syntax error if the code is invalid python syntax. try: @@ -195,9 +195,8 @@ def _InsertNodeAt(new_node, target, after=False): # Protect against attempts to insert nodes which already belong to some tree. if new_node.parent is not None: - raise RuntimeError( - 'inserting node which already has a parent', - (new_node, new_node.parent)) + raise RuntimeError('inserting node which already has a parent', + (new_node, new_node.parent)) # The code here is based on pytree.Base.next_sibling parent_of_target = target.parent @@ -210,8 +209,8 @@ def _InsertNodeAt(new_node, target, after=False): parent_of_target.insert_child(insertion_index, new_node) return - raise RuntimeError( - 'unable to find insertion point for target node', (target,)) + raise RuntimeError('unable to find insertion point for target node', + (target,)) # The following constant and functions implement a simple custom annotation @@ -317,9 +316,8 @@ def DumpNodeToString(node): The string representation. """ if isinstance(node, pytree.Leaf): - fmt = ( - '{name}({value}) [lineno={lineno}, column={column}, ' - 'prefix={prefix}, penalty={penalty}]') + fmt = ('{name}({value}) [lineno={lineno}, column={column}, ' + 'prefix={prefix}, penalty={penalty}]') return fmt.format( name=NodeName(node), value=_PytreeNodeRepr(node), @@ -338,14 +336,12 @@ def DumpNodeToString(node): def _PytreeNodeRepr(node): """Like pytree.Node.__repr__, but names instead of numbers for tokens.""" if isinstance(node, pytree.Node): - return '%s(%s, %r)' % ( - node.__class__.__name__, NodeName(node), - [_PytreeNodeRepr(c) for c in node.children]) + return '%s(%s, %r)' % (node.__class__.__name__, NodeName(node), + [_PytreeNodeRepr(c) for c in node.children]) if isinstance(node, pytree.Leaf): return '%s(%s, %r)' % (node.__class__.__name__, NodeName(node), node.value) def IsCommentStatement(node): - return ( - NodeName(node) == 'simple_stmt' and - node.children[0].type == token.COMMENT) + return (NodeName(node) == 'simple_stmt' and + node.children[0].type == token.COMMENT) diff --git a/yapf/pytree/pytree_visitor.py b/yapf/pytree/pytree_visitor.py index 1cc2819f6..314431e84 100644 --- a/yapf/pytree/pytree_visitor.py +++ b/yapf/pytree/pytree_visitor.py @@ -117,7 +117,7 @@ def __init__(self, target_stream=sys.stdout): target_stream: the stream to dump the tree to. A file-like object. By default will dump into stdout. """ - self._target_stream = target_stream + self._target_stream = target_stream self._current_indent = 0 def _DumpString(self, s): diff --git a/yapf/pytree/split_penalty.py b/yapf/pytree/split_penalty.py index 8dc8056d3..f51fe1b73 100644 --- a/yapf/pytree/split_penalty.py +++ b/yapf/pytree/split_penalty.py @@ -26,30 +26,30 @@ # TODO(morbo): Document the annotations in a centralized place. E.g., the # README file. -UNBREAKABLE = 1000 * 1000 -NAMED_ASSIGN = 15000 -DOTTED_NAME = 4000 +UNBREAKABLE = 1000 * 1000 +NAMED_ASSIGN = 15000 +DOTTED_NAME = 4000 VERY_STRONGLY_CONNECTED = 3500 -STRONGLY_CONNECTED = 3000 -CONNECTED = 500 -TOGETHER = 100 - -OR_TEST = 1000 -AND_TEST = 1100 -NOT_TEST = 1200 -COMPARISON = 1300 -STAR_EXPR = 1300 -EXPR = 1400 -XOR_EXPR = 1500 -AND_EXPR = 1700 -SHIFT_EXPR = 1800 -ARITH_EXPR = 1900 -TERM = 2000 -FACTOR = 2100 -POWER = 2200 -ATOM = 2300 +STRONGLY_CONNECTED = 3000 +CONNECTED = 500 +TOGETHER = 100 + +OR_TEST = 1000 +AND_TEST = 1100 +NOT_TEST = 1200 +COMPARISON = 1300 +STAR_EXPR = 1300 +EXPR = 1400 +XOR_EXPR = 1500 +AND_EXPR = 1700 +SHIFT_EXPR = 1800 +ARITH_EXPR = 1900 +TERM = 2000 +FACTOR = 2100 +POWER = 2200 +ATOM = 2300 ONE_ELEMENT_ARGUMENT = 500 -SUBSCRIPT = 6000 +SUBSCRIPT = 6000 def ComputeSplitPenalties(tree): @@ -210,10 +210,10 @@ def Visit_trailer(self, node): # pylint: disable=invalid-name # trailer ::= '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME if node.children[0].value == '.': before = style.Get('SPLIT_BEFORE_DOT') - _SetSplitPenalty( - node.children[0], VERY_STRONGLY_CONNECTED if before else DOTTED_NAME) - _SetSplitPenalty( - node.children[1], DOTTED_NAME if before else VERY_STRONGLY_CONNECTED) + _SetSplitPenalty(node.children[0], + VERY_STRONGLY_CONNECTED if before else DOTTED_NAME) + _SetSplitPenalty(node.children[1], + DOTTED_NAME if before else VERY_STRONGLY_CONNECTED) elif len(node.children) == 2: # Don't split an empty argument list if at all possible. _SetSplitPenalty(node.children[1], VERY_STRONGLY_CONNECTED) @@ -237,12 +237,10 @@ def Visit_trailer(self, node): # pylint: disable=invalid-name _SetStronglyConnected(node.children[1].children[2]) # Still allow splitting around the operator. - split_before = ( - ( - name.endswith('_test') and - style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR')) or ( - name.endswith('_expr') and - style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'))) + split_before = ((name.endswith('_test') and + style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR')) or + (name.endswith('_expr') and + style.Get('SPLIT_BEFORE_BITWISE_OPERATOR'))) if split_before: _SetSplitPenalty( pytree_utils.LastLeafNode(node.children[1].children[1]), 0) @@ -251,11 +249,12 @@ def Visit_trailer(self, node): # pylint: disable=invalid-name pytree_utils.FirstLeafNode(node.children[1].children[2]), 0) # Don't split the ending bracket of a subscript list. - _RecAnnotate( - node.children[-1], pytree_utils.Annotation.SPLIT_PENALTY, - VERY_STRONGLY_CONNECTED) - elif name not in {'arglist', 'argument', 'term', 'or_test', 'and_test', - 'comparison', 'atom', 'power'}: + _RecAnnotate(node.children[-1], pytree_utils.Annotation.SPLIT_PENALTY, + VERY_STRONGLY_CONNECTED) + elif name not in { + 'arglist', 'argument', 'term', 'or_test', 'and_test', 'comparison', + 'atom', 'power' + }: # Don't split an argument list with one element if at all possible. stypes = pytree_utils.GetNodeAnnotation( pytree_utils.FirstLeafNode(node), pytree_utils.Annotation.SUBTYPE) @@ -296,7 +295,7 @@ def Visit_power(self, node): # pylint: disable=invalid-name,missing-docstring prev_trailer_idx = 1 while prev_trailer_idx < len(node.children) - 1: cur_trailer_idx = prev_trailer_idx + 1 - cur_trailer = node.children[cur_trailer_idx] + cur_trailer = node.children[cur_trailer_idx] if pytree_utils.NodeName(cur_trailer) != 'trailer': break @@ -370,8 +369,8 @@ def Visit_old_comp_for(self, node): # pylint: disable=invalid-name def Visit_comp_if(self, node): # pylint: disable=invalid-name # comp_if ::= 'if' old_test [comp_iter] - _SetSplitPenalty( - node.children[0], style.Get('SPLIT_PENALTY_BEFORE_IF_EXPR')) + _SetSplitPenalty(node.children[0], + style.Get('SPLIT_PENALTY_BEFORE_IF_EXPR')) _SetStronglyConnected(*node.children[1:]) self.DefaultNodeVisit(node) @@ -515,8 +514,8 @@ def _SetUnbreakable(node): def _SetStronglyConnected(*nodes): """Set a STRONGLY_CONNECTED penalty annotation for the given nodes.""" for node in nodes: - _RecAnnotate( - node, pytree_utils.Annotation.SPLIT_PENALTY, STRONGLY_CONNECTED) + _RecAnnotate(node, pytree_utils.Annotation.SPLIT_PENALTY, + STRONGLY_CONNECTED) def _SetExpressionPenalty(node, penalty): @@ -630,5 +629,5 @@ def _DecrementSplitPenalty(node, amt): def _SetSplitPenalty(node, penalty): - pytree_utils.SetNodeAnnotation( - node, pytree_utils.Annotation.SPLIT_PENALTY, penalty) + pytree_utils.SetNodeAnnotation(node, pytree_utils.Annotation.SPLIT_PENALTY, + penalty) diff --git a/yapf/pytree/subtype_assigner.py b/yapf/pytree/subtype_assigner.py index 06d1411f8..5cd0aea37 100644 --- a/yapf/pytree/subtype_assigner.py +++ b/yapf/pytree/subtype_assigner.py @@ -66,7 +66,7 @@ def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name for child in node.children: self.Visit(child) - comp_for = False + comp_for = False dict_maker = False for child in node.children: @@ -78,7 +78,7 @@ def Visit_dictsetmaker(self, node): # pylint: disable=invalid-name if not comp_for and dict_maker: last_was_colon = False - unpacking = False + unpacking = False for child in node.children: if child.type == grammar_token.DOUBLESTAR: _AppendFirstLeafTokenSubtype(child, subtypes.KWARGS_STAR_STAR) @@ -248,15 +248,13 @@ def Visit_arglist(self, node): # pylint: disable=invalid-name # | '*' test (',' argument)* [',' '**' test] # | '**' test) self._ProcessArgLists(node) - _SetArgListSubtype( - node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) + _SetArgListSubtype(node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) def Visit_tname(self, node): # pylint: disable=invalid-name self._ProcessArgLists(node) - _SetArgListSubtype( - node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) + _SetArgListSubtype(node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) def Visit_decorator(self, node): # pylint: disable=invalid-name # decorator ::= @@ -290,9 +288,8 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name # | '**' tname) # | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) self._ProcessArgLists(node) - _SetArgListSubtype( - node, subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) + _SetArgListSubtype(node, subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST) tname = False if not node.children: return @@ -303,7 +300,7 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name tname = pytree_utils.NodeName(node.children[0]) == 'tname' for i in range(1, len(node.children)): prev_child = node.children[i - 1] - child = node.children[i] + child = node.children[i] if prev_child.type == grammar_token.COMMA: _AppendFirstLeafTokenSubtype(child, subtypes.PARAMETER_START) elif child.type == grammar_token.COMMA: @@ -311,8 +308,8 @@ def Visit_typedargslist(self, node): # pylint: disable=invalid-name if pytree_utils.NodeName(child) == 'tname': tname = True - _SetArgListSubtype( - child, subtypes.TYPED_NAME, subtypes.TYPED_NAME_ARG_LIST) + _SetArgListSubtype(child, subtypes.TYPED_NAME, + subtypes.TYPED_NAME_ARG_LIST) elif child.type == grammar_token.COMMA: tname = False elif child.type == grammar_token.EQUAL and tname: @@ -336,8 +333,8 @@ def Visit_comp_for(self, node): # pylint: disable=invalid-name _AppendSubtypeRec(node, subtypes.COMP_FOR) # Mark the previous node as COMP_EXPR unless this is a nested comprehension # as these will have the outer comprehension as their previous node. - attr = pytree_utils.GetNodeAnnotation( - node.parent, pytree_utils.Annotation.SUBTYPE) + attr = pytree_utils.GetNodeAnnotation(node.parent, + pytree_utils.Annotation.SUBTYPE) if not attr or subtypes.COMP_FOR not in attr: _AppendSubtypeRec(node.parent.children[0], subtypes.COMP_EXPR) self.DefaultNodeVisit(node) @@ -393,8 +390,8 @@ def HasSubtype(node): def _AppendTokenSubtype(node, subtype): """Append the token's subtype only if it's not already set.""" - pytree_utils.AppendNodeAnnotation( - node, pytree_utils.Annotation.SUBTYPE, subtype) + pytree_utils.AppendNodeAnnotation(node, pytree_utils.Annotation.SUBTYPE, + subtype) def _AppendFirstLeafTokenSubtype(node, subtype): @@ -431,14 +428,14 @@ def _InsertPseudoParentheses(node): node.children[-1].remove() first = pytree_utils.FirstLeafNode(node) - last = pytree_utils.LastLeafNode(node) + last = pytree_utils.LastLeafNode(node) if first == last and first.type == grammar_token.COMMENT: # A comment was inserted before the value, which is a pytree.Leaf. # Encompass the dictionary's value into an ATOM node. - last = first.next_sibling + last = first.next_sibling last_clone = last.clone() - new_node = pytree.Node(syms.atom, [first.clone(), last_clone]) + new_node = pytree.Node(syms.atom, [first.clone(), last_clone]) for orig_leaf, clone_leaf in zip(last.leaves(), last_clone.leaves()): pytree_utils.CopyYapfAnnotations(orig_leaf, clone_leaf) if hasattr(orig_leaf, 'is_pseudo'): @@ -449,7 +446,7 @@ def _InsertPseudoParentheses(node): last.remove() first = pytree_utils.FirstLeafNode(node) - last = pytree_utils.LastLeafNode(node) + last = pytree_utils.LastLeafNode(node) lparen = pytree.Leaf( grammar_token.LPAR, diff --git a/yapf/third_party/yapf_diff/yapf_diff.py b/yapf/third_party/yapf_diff/yapf_diff.py index f069aedcb..810a6a2d4 100644 --- a/yapf/third_party/yapf_diff/yapf_diff.py +++ b/yapf/third_party/yapf_diff/yapf_diff.py @@ -83,7 +83,7 @@ def main(): args = parser.parse_args() # Extract changed lines for each file. - filename = None + filename = None lines_by_file = {} for line in sys.stdin: match = re.search(r'^\+\+\+\ (.*?/){%s}(\S*)' % args.prefix, line) @@ -134,9 +134,8 @@ def main(): with open(filename) as f: code = f.readlines() formatted_code = StringIO(stdout).readlines() - diff = difflib.unified_diff( - code, formatted_code, filename, filename, '(before formatting)', - '(after formatting)') + diff = difflib.unified_diff(code, formatted_code, filename, filename, + '(before formatting)', '(after formatting)') diff_string = ''.join(diff) if len(diff_string) > 0: sys.stdout.write(diff_string) diff --git a/yapf/yapflib/errors.py b/yapf/yapflib/errors.py index cb8694d2c..99e88d9c0 100644 --- a/yapf/yapflib/errors.py +++ b/yapf/yapflib/errors.py @@ -32,8 +32,8 @@ def FormatErrorMsg(e): if isinstance(e, SyntaxError): return '{}:{}:{}: {}'.format(e.filename, e.lineno, e.offset, e.msg) if isinstance(e, tokenize.TokenError): - return '{}:{}:{}: {}'.format( - e.filename, e.args[1][0], e.args[1][1], e.args[0]) + return '{}:{}:{}: {}'.format(e.filename, e.args[1][0], e.args[1][1], + e.args[0]) return '{}:{}:{}: {}'.format(e.args[1][0], e.args[1][1], e.args[1][2], e.msg) diff --git a/yapf/yapflib/file_resources.py b/yapf/yapflib/file_resources.py index 9c071db3d..b5e2612bd 100644 --- a/yapf/yapflib/file_resources.py +++ b/yapf/yapflib/file_resources.py @@ -25,8 +25,8 @@ from yapf.yapflib import py3compat from yapf.yapflib import style -CR = '\r' -LF = '\n' +CR = '\r' +LF = '\n' CRLF = '\r\n' @@ -56,7 +56,7 @@ def _GetExcludePatternsFromPyprojectToml(filename): "configuration file") if os.path.isfile(filename) and os.access(filename, os.R_OK): - pyproject_toml = toml.load(filename) + pyproject_toml = toml.load(filename) ignore_patterns = pyproject_toml.get('tool', {}).get('yapfignore', {}).get('ignore_patterns', []) @@ -140,7 +140,7 @@ def GetDefaultStyleForDir(dirname, default_style=style.DEFAULT_STYLE): "configuration file") pyproject_toml = toml.load(config_file) - style_dict = pyproject_toml.get('tool', {}).get('yapf', None) + style_dict = pyproject_toml.get('tool', {}).get('yapf', None) if style_dict is not None: return config_file @@ -161,8 +161,10 @@ def GetCommandLineFiles(command_line_file_list, recursive, exclude): return _FindPythonFiles(command_line_file_list, recursive, exclude) -def WriteReformattedCode( - filename, reformatted_code, encoding='', in_place=False): +def WriteReformattedCode(filename, + reformatted_code, + encoding='', + in_place=False): """Emit the reformatted code. Write the reformatted code into the file, if in_place is True. Otherwise, @@ -175,8 +177,8 @@ def WriteReformattedCode( in_place: (bool) If True, then write the reformatted code to the file. """ if in_place: - with py3compat.open_with_encoding(filename, mode='w', encoding=encoding, - newline='') as fd: + with py3compat.open_with_encoding( + filename, mode='w', encoding=encoding, newline='') as fd: fd.write(reformatted_code) else: py3compat.EncodeAndWriteToStdout(reformatted_code) @@ -263,8 +265,8 @@ def IsPythonFile(filename): encoding = py3compat.detect_encoding(fd.readline)[0] # Check for correctness of encoding. - with py3compat.open_with_encoding(filename, mode='r', - encoding=encoding) as fd: + with py3compat.open_with_encoding( + filename, mode='r', encoding=encoding) as fd: fd.read() except UnicodeDecodeError: encoding = 'latin-1' @@ -275,8 +277,8 @@ def IsPythonFile(filename): return False try: - with py3compat.open_with_encoding(filename, mode='r', - encoding=encoding) as fd: + with py3compat.open_with_encoding( + filename, mode='r', encoding=encoding) as fd: first_line = fd.readline(256) except IOError: return False diff --git a/yapf/yapflib/format_decision_state.py b/yapf/yapflib/format_decision_state.py index 40bf5e25b..efcef0ba4 100644 --- a/yapf/yapflib/format_decision_state.py +++ b/yapf/yapflib/format_decision_state.py @@ -66,62 +66,59 @@ def __init__(self, line, first_indent): line: (LogicalLine) The logical line we're currently processing. first_indent: (int) The indent of the first token. """ - self.next_token = line.first - self.column = first_indent - self.line = line - self.paren_level = 0 - self.lowest_level_on_line = 0 + self.next_token = line.first + self.column = first_indent + self.line = line + self.paren_level = 0 + self.lowest_level_on_line = 0 self.ignore_stack_for_comparison = False - self.stack = [_ParenState(first_indent, first_indent)] - self.comp_stack = [] - self.param_list_stack = [] - self.first_indent = first_indent - self.column_limit = style.Get('COLUMN_LIMIT') + self.stack = [_ParenState(first_indent, first_indent)] + self.comp_stack = [] + self.param_list_stack = [] + self.first_indent = first_indent + self.column_limit = style.Get('COLUMN_LIMIT') def Clone(self): """Clones a FormatDecisionState object.""" - new = FormatDecisionState(self.line, self.first_indent) - new.next_token = self.next_token - new.column = self.column - new.line = self.line - new.paren_level = self.paren_level - new.line.depth = self.line.depth - new.lowest_level_on_line = self.lowest_level_on_line + new = FormatDecisionState(self.line, self.first_indent) + new.next_token = self.next_token + new.column = self.column + new.line = self.line + new.paren_level = self.paren_level + new.line.depth = self.line.depth + new.lowest_level_on_line = self.lowest_level_on_line new.ignore_stack_for_comparison = self.ignore_stack_for_comparison - new.first_indent = self.first_indent - new.stack = [state.Clone() for state in self.stack] - new.comp_stack = [state.Clone() for state in self.comp_stack] - new.param_list_stack = [state.Clone() for state in self.param_list_stack] + new.first_indent = self.first_indent + new.stack = [state.Clone() for state in self.stack] + new.comp_stack = [state.Clone() for state in self.comp_stack] + new.param_list_stack = [state.Clone() for state in self.param_list_stack] return new def __eq__(self, other): # Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous', # because it shouldn't have a bearing on this comparison. (I.e., it will # report equal if 'next_token' does.) - return ( - self.next_token == other.next_token and self.column == other.column and - self.paren_level == other.paren_level and - self.line.depth == other.line.depth and - self.lowest_level_on_line == other.lowest_level_on_line and ( - self.ignore_stack_for_comparison or - other.ignore_stack_for_comparison or self.stack == other.stack and - self.comp_stack == other.comp_stack and - self.param_list_stack == other.param_list_stack)) + return (self.next_token == other.next_token and + self.column == other.column and + self.paren_level == other.paren_level and + self.line.depth == other.line.depth and + self.lowest_level_on_line == other.lowest_level_on_line and + (self.ignore_stack_for_comparison or + other.ignore_stack_for_comparison or self.stack == other.stack and + self.comp_stack == other.comp_stack and + self.param_list_stack == other.param_list_stack)) def __ne__(self, other): return not self == other def __hash__(self): - return hash( - ( - self.next_token, self.column, self.paren_level, self.line.depth, - self.lowest_level_on_line)) + return hash((self.next_token, self.column, self.paren_level, + self.line.depth, self.lowest_level_on_line)) def __repr__(self): - return ( - 'column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % ( - self.column, repr(self.next_token), self.paren_level, - '\n\t'.join(repr(s) for s in self.stack) + ']')) + return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' % + (self.column, repr(self.next_token), self.paren_level, + '\n\t'.join(repr(s) for s in self.stack) + ']')) def CanSplit(self, must_split): """Determine if we can split before the next token. @@ -132,7 +129,7 @@ def CanSplit(self, must_split): Returns: True if the line can be split before the next token. """ - current = self.next_token + current = self.next_token previous = current.previous_token if current.is_pseudo: @@ -169,7 +166,7 @@ def CanSplit(self, must_split): def MustSplit(self): """Returns True if the line must split before the next token.""" - current = self.next_token + current = self.next_token previous = current.previous_token if current.is_pseudo: @@ -292,7 +289,7 @@ def SurroundedByParens(token): # # or when a string formatting syntax. func_call_or_string_format = False - tok = current.next_token + tok = current.next_token if current.is_name: while tok and (tok.is_name or tok.value == '.'): tok = tok.next_token @@ -424,7 +421,7 @@ def SurroundedByParens(token): (opening.previous_token.is_name or opening.previous_token.value in {'*', '**'})): is_func_call = False - opening = current + opening = current while opening: if opening.value == '(': is_func_call = True @@ -455,7 +452,7 @@ def SurroundedByParens(token): # default=False) if (current.value == '{' and previous.value == '(' and pprevious and pprevious.is_name): - dict_end = current.matching_bracket + dict_end = current.matching_bracket next_token = dict_end.next_token if next_token.value == ',' and not self._FitsOnLine(current, dict_end): return True @@ -486,7 +483,7 @@ def SurroundedByParens(token): (opening.previous_token.is_name or opening.previous_token.value in {'*', '**'})): is_func_call = False - opening = current + opening = current while opening: if opening.value == '(': is_func_call = True @@ -525,7 +522,7 @@ def SurroundedByParens(token): return False elements = previous.container_elements + [previous.matching_bracket] - i = 1 + i = 1 while i < len(elements): if (not elements[i - 1].OpensScope() and not self._FitsOnLine(elements[i - 1], elements[i])): @@ -597,7 +594,7 @@ def _AddTokenOnCurrentLine(self, dry_run): Arguments: dry_run: (bool) Commit whitespace changes to the FormatToken if True. """ - current = self.next_token + current = self.next_token previous = current.previous_token spaces = current.spaces_required_before @@ -640,14 +637,14 @@ def _AddTokenOnNewline(self, dry_run, must_split): Returns: The split penalty for splitting after the current state. """ - current = self.next_token + current = self.next_token previous = current.previous_token self.column = self._GetNewlineColumn() if not dry_run: indent_level = self.line.depth - spaces = self.column + spaces = self.column if spaces: spaces -= indent_level * style.Get('INDENT_WIDTH') current.AddWhitespacePrefix( @@ -660,7 +657,7 @@ def _AddTokenOnNewline(self, dry_run, must_split): if (previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope())): - dedent = (style.Get('CONTINUATION_INDENT_WIDTH'), + dedent = (style.Get('CONTINUATION_INDENT_WIDTH'), 0)[style.Get('INDENT_CLOSING_BRACKETS')] self.stack[-1].closing_scope_indent = ( max(0, self.stack[-1].indent - dedent)) @@ -680,9 +677,9 @@ def _AddTokenOnNewline(self, dry_run, must_split): # Add a penalty for each increasing newline we add, but don't penalize for # splitting before an if-expression or list comprehension. if current.value not in {'if', 'for'}: - last = self.stack[-1] + last = self.stack[-1] last.num_line_splits += 1 - penalty += ( + penalty += ( style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits) @@ -707,13 +704,13 @@ def MoveStateToNextToken(self): """ current = self.next_token if not current.OpensScope() and not current.ClosesScope(): - self.lowest_level_on_line = min( - self.lowest_level_on_line, self.paren_level) + self.lowest_level_on_line = min(self.lowest_level_on_line, + self.paren_level) # If we encounter an opening bracket, we add a level to our stack to prepare # for the subsequent tokens. if current.OpensScope(): - last = self.stack[-1] + last = self.stack[-1] new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space self.stack.append(_ParenState(new_indent, self.stack[-1].last_space)) @@ -743,7 +740,7 @@ def MoveStateToNextToken(self): if (not current.is_pylint_comment and not current.is_pytype_comment and not current.is_copybara_comment and self.column > self.column_limit): excess_characters = self.column - self.column_limit - penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters + penalty += style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters if is_multiline_string: # If this is a multiline string, the column is actually the @@ -762,10 +759,10 @@ def _CalculateComprehensionState(self, newline): The penalty for the token-newline combination given the current comprehension state. """ - current = self.next_token - previous = current.previous_token + current = self.next_token + previous = current.previous_token top_of_stack = self.comp_stack[-1] if self.comp_stack else None - penalty = 0 + penalty = 0 if top_of_stack is not None: # Check if the token terminates the current comprehension. @@ -801,7 +798,7 @@ def _CalculateComprehensionState(self, newline): not top_of_stack.HasTrivialExpr())): penalty += split_penalty.UNBREAKABLE else: - top_of_stack.for_token = current + top_of_stack.for_token = current top_of_stack.has_split_at_for = newline # Try to keep trivial expressions on the same line as the comp_for. @@ -826,14 +823,14 @@ def _PushParameterListState(self, newline): Args: newline: Whether the current token is to be added on a newline. """ - current = self.next_token + current = self.next_token previous = current.previous_token if _IsFunctionDefinition(previous): first_param_column = previous.total_length + self.stack[-2].indent self.param_list_stack.append( - object_state.ParameterListState( - previous, newline, first_param_column)) + object_state.ParameterListState(previous, newline, + first_param_column)) def _CalculateParameterListState(self, newline): """Makes required changes to parameter list state. @@ -845,18 +842,18 @@ def _CalculateParameterListState(self, newline): The penalty for the token-newline combination given the current parameter state. """ - current = self.next_token + current = self.next_token previous = current.previous_token - penalty = 0 + penalty = 0 if _IsFunctionDefinition(previous): first_param_column = previous.total_length + self.stack[-2].indent if not newline: param_list = self.param_list_stack[-1] if param_list.parameters and param_list.has_typed_return: - last_param = param_list.parameters[-1].first_token - last_token = _LastTokenInLine(previous.matching_bracket) - total_length = last_token.total_length + last_param = param_list.parameters[-1].first_token + last_token = _LastTokenInLine(previous.matching_bracket) + total_length = last_token.total_length total_length -= last_param.total_length - len(last_param.value) if total_length + self.column > self.column_limit: # If we need to split before the trailing code of a function @@ -924,9 +921,8 @@ def _IndentWithContinuationAlignStyle(self, column): return column align_style = style.Get('CONTINUATION_ALIGN_STYLE') if align_style == 'FIXED': - return ( - (self.line.depth * style.Get('INDENT_WIDTH')) + - style.Get('CONTINUATION_INDENT_WIDTH')) + return ((self.line.depth * style.Get('INDENT_WIDTH')) + + style.Get('CONTINUATION_INDENT_WIDTH')) if align_style == 'VALIGN-RIGHT': indent_width = style.Get('INDENT_WIDTH') return indent_width * int((column + indent_width - 1) / indent_width) @@ -934,8 +930,8 @@ def _IndentWithContinuationAlignStyle(self, column): def _GetNewlineColumn(self): """Return the new column on the newline.""" - current = self.next_token - previous = current.previous_token + current = self.next_token + previous = current.previous_token top_of_stack = self.stack[-1] if isinstance(current.spaces_required_before, list): @@ -955,8 +951,8 @@ def _GetNewlineColumn(self): if (previous.OpensScope() or (previous.is_comment and previous.previous_token is not None and previous.previous_token.OpensScope())): - return max( - 0, top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH')) + return max(0, + top_of_stack.indent - style.Get('CONTINUATION_INDENT_WIDTH')) return top_of_stack.closing_scope_indent if (previous and previous.is_string and current.is_string and @@ -1012,7 +1008,7 @@ def ImplicitStringConcatenation(tok): tok = tok.next_token while tok.is_string: num_strings += 1 - tok = tok.next_token + tok = tok.next_token return num_strings > 1 def DictValueIsContainer(opening, closing): @@ -1031,9 +1027,9 @@ def DictValueIsContainer(opening, closing): return False return subtypes.DICTIONARY_KEY_PART in key.subtypes - closing = opening.matching_bracket + closing = opening.matching_bracket entry_start = opening.next_token - current = opening.next_token.next_token + current = opening.next_token.next_token while current and current != closing: if subtypes.DICTIONARY_KEY in current.subtypes: @@ -1041,7 +1037,7 @@ def DictValueIsContainer(opening, closing): if prev.value == ',': prev = PreviousNonCommentToken(prev.previous_token) if not DictValueIsContainer(prev.matching_bracket, prev): - length = prev.total_length - entry_start.total_length + length = prev.total_length - entry_start.total_length length += len(entry_start.value) if length + self.stack[-2].indent >= self.column_limit: return False @@ -1073,7 +1069,7 @@ def DictValueIsContainer(opening, closing): # At this point, current is the closing bracket. Go back one to get the end # of the dictionary entry. current = PreviousNonCommentToken(current) - length = current.total_length - entry_start.total_length + length = current.total_length - entry_start.total_length length += len(entry_start.value) return length + self.stack[-2].indent <= self.column_limit @@ -1093,9 +1089,8 @@ def _ArgumentListHasDictionaryEntry(self, token): def _ContainerFitsOnStartLine(self, opening): """Check if the container can fit on its starting line.""" - return ( - opening.matching_bracket.total_length - opening.total_length + - self.stack[-1].indent) <= self.column_limit + return (opening.matching_bracket.total_length - opening.total_length + + self.stack[-1].indent) <= self.column_limit _COMPOUND_STMTS = frozenset( @@ -1171,8 +1166,8 @@ def _IsLastScopeInLine(current): def _IsSingleElementTuple(token): """Check if it's a single-element tuple.""" - close = token.matching_bracket - token = token.next_token + close = token.matching_bracket + token = token.next_token num_commas = 0 while token != close: if token.value == ',': @@ -1213,17 +1208,17 @@ class _ParenState(object): # TODO(morbo): This doesn't track "bin packing." def __init__(self, indent, last_space): - self.indent = indent - self.last_space = last_space - self.closing_scope_indent = 0 + self.indent = indent + self.last_space = last_space + self.closing_scope_indent = 0 self.split_before_closing_bracket = False - self.num_line_splits = 0 + self.num_line_splits = 0 def Clone(self): - state = _ParenState(self.indent, self.last_space) - state.closing_scope_indent = self.closing_scope_indent + state = _ParenState(self.indent, self.last_space) + state.closing_scope_indent = self.closing_scope_indent state.split_before_closing_bracket = self.split_before_closing_bracket - state.num_line_splits = self.num_line_splits + state.num_line_splits = self.num_line_splits return state def __repr__(self): @@ -1237,7 +1232,5 @@ def __ne__(self, other): return not self == other def __hash__(self, *args, **kwargs): - return hash( - ( - self.indent, self.last_space, self.closing_scope_indent, - self.split_before_closing_bracket, self.num_line_splits)) + return hash((self.indent, self.last_space, self.closing_scope_indent, + self.split_before_closing_bracket, self.num_line_splits)) diff --git a/yapf/yapflib/format_token.py b/yapf/yapflib/format_token.py index 382f5f938..549271705 100644 --- a/yapf/yapflib/format_token.py +++ b/yapf/yapflib/format_token.py @@ -90,27 +90,27 @@ def __init__(self, node, name): node: (pytree.Leaf) The node that's being wrapped. name: (string) The name of the node. """ - self.node = node - self.name = name - self.type = node.type + self.node = node + self.name = name + self.type = node.type self.column = node.column self.lineno = node.lineno - self.value = node.value + self.value = node.value if self.is_continuation: self.value = node.value.rstrip() - self.next_token = None - self.previous_token = None - self.matching_bracket = None - self.parameters = [] - self.container_opening = None + self.next_token = None + self.previous_token = None + self.matching_bracket = None + self.parameters = [] + self.container_opening = None self.container_elements = [] - self.whitespace_prefix = '' - self.total_length = 0 - self.split_penalty = 0 - self.can_break_before = False - self.must_break_before = pytree_utils.GetNodeAnnotation( + self.whitespace_prefix = '' + self.total_length = 0 + self.split_penalty = 0 + self.can_break_before = False + self.must_break_before = pytree_utils.GetNodeAnnotation( node, pytree_utils.Annotation.MUST_SPLIT, default=False) self.newlines = pytree_utils.GetNodeAnnotation( node, pytree_utils.Annotation.NEWLINES) @@ -119,16 +119,16 @@ def __init__(self, node, name): if self.is_comment: self.spaces_required_before = style.Get('SPACES_BEFORE_COMMENT') - stypes = pytree_utils.GetNodeAnnotation( - node, pytree_utils.Annotation.SUBTYPE) - self.subtypes = {subtypes.NONE} if not stypes else stypes + stypes = pytree_utils.GetNodeAnnotation(node, + pytree_utils.Annotation.SUBTYPE) + self.subtypes = {subtypes.NONE} if not stypes else stypes self.is_pseudo = hasattr(node, 'is_pseudo') and node.is_pseudo @property def formatted_whitespace_prefix(self): if style.Get('INDENT_BLANK_LINES'): without_newlines = self.whitespace_prefix.lstrip('\n') - height = len(self.whitespace_prefix) - len(without_newlines) + height = len(self.whitespace_prefix) - len(without_newlines) if height: return ('\n' + without_newlines) * height return self.whitespace_prefix @@ -151,26 +151,26 @@ def AddWhitespacePrefix(self, newlines_before, spaces=0, indent_level=0): else: indent_before = '\t' * indent_level + ' ' * spaces else: - indent_before = ( - ' ' * indent_level * style.Get('INDENT_WIDTH') + ' ' * spaces) + indent_before = (' ' * indent_level * style.Get('INDENT_WIDTH') + + ' ' * spaces) if self.is_comment: comment_lines = [s.lstrip() for s in self.value.splitlines()] - self.value = ('\n' + indent_before).join(comment_lines) + self.value = ('\n' + indent_before).join(comment_lines) # Update our own value since we are changing node value self.value = self.value if not self.whitespace_prefix: - self.whitespace_prefix = ( - '\n' * (self.newlines or newlines_before) + indent_before) + self.whitespace_prefix = ('\n' * (self.newlines or newlines_before) + + indent_before) else: self.whitespace_prefix += indent_before def AdjustNewlinesBefore(self, newlines_before): """Change the number of newlines before this token.""" - self.whitespace_prefix = ( - '\n' * newlines_before + self.whitespace_prefix.lstrip('\n')) + self.whitespace_prefix = ('\n' * newlines_before + + self.whitespace_prefix.lstrip('\n')) def RetainHorizontalSpacing(self, first_column, depth): """Retains a token's horizontal spacing.""" @@ -183,7 +183,7 @@ def RetainHorizontalSpacing(self, first_column, depth): if not previous: return - cur_lineno = self.lineno + cur_lineno = self.lineno prev_lineno = previous.lineno if previous.is_multiline_string: prev_lineno += previous.value.count('\n') @@ -195,13 +195,13 @@ def RetainHorizontalSpacing(self, first_column, depth): self.column - first_column + depth * style.Get('INDENT_WIDTH')) return - cur_column = self.column + cur_column = self.column prev_column = previous.column - prev_len = len(previous.value) + prev_len = len(previous.value) if previous.is_pseudo and previous.value == ')': prev_column -= 1 - prev_len = 0 + prev_len = 0 if previous.is_multiline_string: prev_len = len(previous.value.split('\n')[-1]) @@ -220,11 +220,10 @@ def AddSubtype(self, subtype): self.subtypes.add(subtype) def __repr__(self): - msg = ( - 'FormatToken(name={0}, value={1}, column={2}, lineno={3}, ' - 'splitpenalty={4}'.format( - 'DOCSTRING' if self.is_docstring else self.name, self.value, - self.column, self.lineno, self.split_penalty)) + msg = ('FormatToken(name={0}, value={1}, column={2}, lineno={3}, ' + 'splitpenalty={4}'.format( + 'DOCSTRING' if self.is_docstring else self.name, self.value, + self.column, self.lineno, self.split_penalty)) msg += ', pseudo)' if self.is_pseudo else ')' return msg @@ -243,22 +242,21 @@ def is_binary_op(self): @py3compat.lru_cache() def is_arithmetic_op(self): """Token is an arithmetic operator.""" - return self.value in frozenset( - { - '+', # Add - '-', # Subtract - '*', # Multiply - '@', # Matrix Multiply - '/', # Divide - '//', # Floor Divide - '%', # Modulo - '<<', # Left Shift - '>>', # Right Shift - '|', # Bitwise Or - '&', # Bitwise Add - '^', # Bitwise Xor - '**', # Power - }) + return self.value in frozenset({ + '+', # Add + '-', # Subtract + '*', # Multiply + '@', # Matrix Multiply + '/', # Divide + '//', # Floor Divide + '%', # Modulo + '<<', # Left Shift + '>>', # Right Shift + '|', # Bitwise Or + '&', # Bitwise Add + '^', # Bitwise Xor + '**', # Power + }) @property def is_simple_expr(self): @@ -312,13 +310,13 @@ def is_docstring(self): @property def is_pylint_comment(self): - return self.is_comment and re.match( - r'#.*\bpylint:\s*(disable|enable)=', self.value) + return self.is_comment and re.match(r'#.*\bpylint:\s*(disable|enable)=', + self.value) @property def is_pytype_comment(self): - return self.is_comment and re.match( - r'#.*\bpytype:\s*(disable|enable)=', self.value) + return self.is_comment and re.match(r'#.*\bpytype:\s*(disable|enable)=', + self.value) @property def is_copybara_comment(self): diff --git a/yapf/yapflib/logical_line.py b/yapf/yapflib/logical_line.py index 477d4d625..8c84b7ba8 100644 --- a/yapf/yapflib/logical_line.py +++ b/yapf/yapflib/logical_line.py @@ -49,7 +49,7 @@ def __init__(self, depth, tokens=None): depth: indentation depth of this line tokens: initial list of tokens """ - self.depth = depth + self.depth = depth self._tokens = tokens or [] self.disable = False @@ -57,7 +57,7 @@ def __init__(self, depth, tokens=None): # Set up a doubly linked list. for index, tok in enumerate(self._tokens[1:]): # Note, 'index' is the index to the previous token. - tok.previous_token = self._tokens[index] + tok.previous_token = self._tokens[index] self._tokens[index].next_token = tok def CalculateFormattingInformation(self): @@ -66,9 +66,9 @@ def CalculateFormattingInformation(self): # means only that if this logical line is joined with a predecessor line, # then there will be a space between them. self.first.spaces_required_before = 1 - self.first.total_length = len(self.first.value) + self.first.total_length = len(self.first.value) - prev_token = self.first + prev_token = self.first prev_length = self.first.total_length for token in self._tokens[1:]: if (token.spaces_required_before == 0 and @@ -93,13 +93,13 @@ def CalculateFormattingInformation(self): # The split penalty has to be computed before {must|can}_break_before, # because these may use it for their decision. - token.split_penalty += _SplitPenalty(prev_token, token) + token.split_penalty += _SplitPenalty(prev_token, token) token.must_break_before = _MustBreakBefore(prev_token, token) - token.can_break_before = ( + token.can_break_before = ( token.must_break_before or _CanBreakBefore(prev_token, token)) prev_length = token.total_length - prev_token = token + prev_token = token def Split(self): """Split the line at semicolons.""" @@ -107,7 +107,7 @@ def Split(self): return [self] llines = [] - lline = LogicalLine(self.depth) + lline = LogicalLine(self.depth) for tok in self._tokens: if tok.value == ';': llines.append(lline) @@ -120,7 +120,7 @@ def Split(self): for lline in llines: lline.first.previous_token = None - lline.last.next_token = None + lline.last.next_token = None return llines @@ -164,7 +164,7 @@ def AsCode(self, indent_per_depth=2): Returns: A string representing the line as code. """ - indent = ' ' * indent_per_depth * self.depth + indent = ' ' * indent_per_depth * self.depth tokens_str = ' '.join(tok.value for tok in self._tokens) return indent + tokens_str @@ -544,10 +544,10 @@ def _CanBreakBefore(prev_token, cur_token): def IsSurroundedByBrackets(tok): """Return True if the token is surrounded by brackets.""" - paren_count = 0 - brace_count = 0 + paren_count = 0 + brace_count = 0 sq_bracket_count = 0 - previous_token = tok.previous_token + previous_token = tok.previous_token while previous_token: if previous_token.value == ')': paren_count -= 1 @@ -580,10 +580,10 @@ def _IsDictListTupleDelimiterTok(tok, is_opening): return False if is_opening: - open_tok = tok + open_tok = tok close_tok = tok.matching_bracket else: - open_tok = tok.matching_bracket + open_tok = tok.matching_bracket close_tok = tok # There must be something in between the tokens @@ -600,8 +600,8 @@ def _IsDictListTupleDelimiterTok(tok, is_opening): ] -_LOGICAL_OPERATORS = frozenset({'and', 'or'}) -_BITWISE_OPERATORS = frozenset({'&', '|', '^'}) +_LOGICAL_OPERATORS = frozenset({'and', 'or'}) +_BITWISE_OPERATORS = frozenset({'&', '|', '^'}) _ARITHMETIC_OPERATORS = frozenset({'+', '-', '*', '/', '%', '//', '@'}) diff --git a/yapf/yapflib/object_state.py b/yapf/yapflib/object_state.py index 0afdb6041..ec259e682 100644 --- a/yapf/yapflib/object_state.py +++ b/yapf/yapflib/object_state.py @@ -45,9 +45,9 @@ class ComprehensionState(object): """ def __init__(self, expr_token): - self.expr_token = expr_token - self.for_token = None - self.has_split_at_for = False + self.expr_token = expr_token + self.for_token = None + self.has_split_at_for = False self.has_interior_split = False def HasTrivialExpr(self): @@ -63,18 +63,17 @@ def closing_bracket(self): return self.opening_bracket.matching_bracket def Clone(self): - clone = ComprehensionState(self.expr_token) - clone.for_token = self.for_token - clone.has_split_at_for = self.has_split_at_for + clone = ComprehensionState(self.expr_token) + clone.for_token = self.for_token + clone.has_split_at_for = self.has_split_at_for clone.has_interior_split = self.has_interior_split return clone def __repr__(self): - return ( - '[opening_bracket::%s, for_token::%s, has_split_at_for::%s,' - ' has_interior_split::%s, has_trivial_expr::%s]' % ( - self.opening_bracket, self.for_token, self.has_split_at_for, - self.has_interior_split, self.HasTrivialExpr())) + return ('[opening_bracket::%s, for_token::%s, has_split_at_for::%s,' + ' has_interior_split::%s, has_trivial_expr::%s]' % + (self.opening_bracket, self.for_token, self.has_split_at_for, + self.has_interior_split, self.HasTrivialExpr())) def __eq__(self, other): return hash(self) == hash(other) @@ -83,10 +82,8 @@ def __ne__(self, other): return not self == other def __hash__(self, *args, **kwargs): - return hash( - ( - self.expr_token, self.for_token, self.has_split_at_for, - self.has_interior_split)) + return hash((self.expr_token, self.for_token, self.has_split_at_for, + self.has_interior_split)) class ParameterListState(object): @@ -108,10 +105,10 @@ class ParameterListState(object): """ def __init__(self, opening_bracket, newline, opening_column): - self.opening_bracket = opening_bracket + self.opening_bracket = opening_bracket self.has_split_before_first_param = newline - self.opening_column = opening_column - self.parameters = opening_bracket.parameters + self.opening_column = opening_column + self.parameters = opening_bracket.parameters self.split_before_closing_bracket = False @property @@ -149,8 +146,8 @@ def LastParamFitsOnLine(self, indent): return False if not self.parameters: return True - total_length = self.last_token.total_length - last_param = self.parameters[-1].first_token + total_length = self.last_token.total_length + last_param = self.parameters[-1].first_token total_length -= last_param.total_length - len(last_param.value) return total_length + indent <= style.Get('COLUMN_LIMIT') @@ -163,25 +160,24 @@ def SplitBeforeClosingBracket(self, indent): return True if not self.parameters: return False - total_length = self.last_token.total_length - last_param = self.parameters[-1].first_token + total_length = self.last_token.total_length + last_param = self.parameters[-1].first_token total_length -= last_param.total_length - len(last_param.value) return total_length + indent > style.Get('COLUMN_LIMIT') def Clone(self): - clone = ParameterListState( - self.opening_bracket, self.has_split_before_first_param, - self.opening_column) + clone = ParameterListState(self.opening_bracket, + self.has_split_before_first_param, + self.opening_column) clone.split_before_closing_bracket = self.split_before_closing_bracket - clone.parameters = [param.Clone() for param in self.parameters] + clone.parameters = [param.Clone() for param in self.parameters] return clone def __repr__(self): - return ( - '[opening_bracket::%s, has_split_before_first_param::%s, ' - 'opening_column::%d]' % ( - self.opening_bracket, self.has_split_before_first_param, - self.opening_column)) + return ('[opening_bracket::%s, has_split_before_first_param::%s, ' + 'opening_column::%d]' % + (self.opening_bracket, self.has_split_before_first_param, + self.opening_column)) def __eq__(self, other): return hash(self) == hash(other) @@ -191,9 +187,8 @@ def __ne__(self, other): def __hash__(self, *args, **kwargs): return hash( - ( - self.opening_bracket, self.has_split_before_first_param, - self.opening_column, (hash(param) for param in self.parameters))) + (self.opening_bracket, self.has_split_before_first_param, + self.opening_column, (hash(param) for param in self.parameters))) class Parameter(object): @@ -207,7 +202,7 @@ class Parameter(object): def __init__(self, first_token, last_token): self.first_token = first_token - self.last_token = last_token + self.last_token = last_token @property @py3compat.lru_cache() @@ -224,8 +219,8 @@ def Clone(self): return Parameter(self.first_token, self.last_token) def __repr__(self): - return '[first_token::%s, last_token:%s]' % ( - self.first_token, self.last_token) + return '[first_token::%s, last_token:%s]' % (self.first_token, + self.last_token) def __eq__(self, other): return hash(self) == hash(other) diff --git a/yapf/yapflib/py3compat.py b/yapf/yapflib/py3compat.py index 2ea5910d1..e4cb9788f 100644 --- a/yapf/yapflib/py3compat.py +++ b/yapf/yapflib/py3compat.py @@ -18,14 +18,14 @@ import os import sys -PY3 = sys.version_info[0] >= 3 +PY3 = sys.version_info[0] >= 3 PY36 = sys.version_info[0] >= 3 and sys.version_info[1] >= 6 PY37 = sys.version_info[0] >= 3 and sys.version_info[1] >= 7 PY38 = sys.version_info[0] >= 3 and sys.version_info[1] >= 8 if PY3: StringIO = io.StringIO - BytesIO = io.BytesIO + BytesIO = io.BytesIO import codecs # noqa: F811 @@ -35,7 +35,7 @@ def open_with_encoding(filename, mode, encoding, newline=''): # pylint: disable import functools lru_cache = functools.lru_cache - range = range + range = range ifilter = filter def raw_input(): @@ -50,7 +50,7 @@ def raw_input(): import tokenize detect_encoding = tokenize.detect_encoding - TokenInfo = tokenize.TokenInfo + TokenInfo = tokenize.TokenInfo else: import __builtin__ import cStringIO @@ -80,8 +80,8 @@ def fake_wrapper(user_function): import collections - class TokenInfo(collections.namedtuple('TokenInfo', - 'type string start end line')): + class TokenInfo( + collections.namedtuple('TokenInfo', 'type string start end line')): pass @@ -116,7 +116,7 @@ def EncodeAndWriteToStdout(s, encoding='utf-8'): if PY3: basestring = str - unicode = str # pylint: disable=redefined-builtin,invalid-name + unicode = str # pylint: disable=redefined-builtin,invalid-name else: basestring = basestring diff --git a/yapf/yapflib/reformatter.py b/yapf/yapflib/reformatter.py index ec196d8b3..90823aed7 100644 --- a/yapf/yapflib/reformatter.py +++ b/yapf/yapflib/reformatter.py @@ -49,8 +49,8 @@ def Reformat(llines, verify=False, lines=None): Returns: A string representing the reformatted code. """ - final_lines = [] - prev_line = None # The previous line. + final_lines = [] + prev_line = None # The previous line. indent_width = style.Get('INDENT_WIDTH') for lline in _SingleOrMergedLines(llines): @@ -58,7 +58,7 @@ def Reformat(llines, verify=False, lines=None): _FormatFirstToken(first_token, lline.depth, prev_line, final_lines) indent_amt = indent_width * lline.depth - state = format_decision_state.FormatDecisionState(lline, indent_amt) + state = format_decision_state.FormatDecisionState(lline, indent_amt) state.MoveStateToNextToken() if not lline.disable: @@ -69,8 +69,8 @@ def Reformat(llines, verify=False, lines=None): if prev_line and prev_line.disable: # Keep the vertical spacing between a disabled and enabled formatting # region. - _RetainRequiredVerticalSpacingBetweenTokens( - lline.first, prev_line.last, lines) + _RetainRequiredVerticalSpacingBetweenTokens(lline.first, prev_line.last, + lines) if any(tok.is_comment for tok in lline.tokens): _RetainVerticalSpacingBeforeComments(lline) @@ -160,11 +160,11 @@ def _RetainRequiredVerticalSpacingBetweenTokens(cur_tok, prev_tok, lines): # Don't adjust between a comment and non-comment. pass elif lines and lines.intersection(range(prev_lineno, cur_lineno + 1)): - desired_newlines = cur_tok.whitespace_prefix.count('\n') - whitespace_lines = range(prev_lineno + 1, cur_lineno) - deletable_lines = len(lines.intersection(whitespace_lines)) - required_newlines = max( - required_newlines - deletable_lines, desired_newlines) + desired_newlines = cur_tok.whitespace_prefix.count('\n') + whitespace_lines = range(prev_lineno + 1, cur_lineno) + deletable_lines = len(lines.intersection(whitespace_lines)) + required_newlines = max(required_newlines - deletable_lines, + desired_newlines) cur_tok.AdjustNewlinesBefore(required_newlines) @@ -193,7 +193,7 @@ def _EmitLineUnformatted(state): state. """ while state.next_token: - previous_token = state.next_token.previous_token + previous_token = state.next_token.previous_token previous_lineno = previous_token.lineno if previous_token.is_multiline_string or previous_token.is_string: @@ -257,17 +257,16 @@ def _CanPlaceOnSingleLine(line): if (style.Get('FORCE_MULTILINE_DICT') and 'LBRACE' in token_names): return False indent_amt = style.Get('INDENT_WIDTH') * line.depth - last = line.last + last = line.last last_index = -1 if (last.is_pylint_comment or last.is_pytype_comment or last.is_copybara_comment): - last = last.previous_token + last = last.previous_token last_index = -2 if last is None: return True - return ( - last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and - not any(tok.is_comment for tok in line.tokens[:last_index])) + return (last.total_length + indent_amt <= style.Get('COLUMN_LIMIT') and + not any(tok.is_comment for tok in line.tokens[:last_index])) def _AlignTrailingComments(final_lines): @@ -289,7 +288,7 @@ def _AlignTrailingComments(final_lines): # first col value greater than that value and create the necessary for # each line accordingly. all_pc_line_lengths = [] # All pre-comment line lengths - max_line_length = 0 + max_line_length = 0 while True: # EOF @@ -311,7 +310,7 @@ def _AlignTrailingComments(final_lines): continue # Calculate the length of each line in this logical line. - line_content = '' + line_content = '' pc_line_lengths = [] for line_tok in this_line.tokens: @@ -320,7 +319,7 @@ def _AlignTrailingComments(final_lines): newline_index = whitespace_prefix.rfind('\n') if newline_index != -1: max_line_length = max(max_line_length, len(line_content)) - line_content = '' + line_content = '' whitespace_prefix = whitespace_prefix[newline_index + 1:] @@ -370,8 +369,8 @@ def _AlignTrailingComments(final_lines): for comment_line_index, comment_line in enumerate( line_tok.value.split('\n')): - line_content.append( - '{}{}'.format(whitespace, comment_line.strip())) + line_content.append('{}{}'.format(whitespace, + comment_line.strip())) if comment_line_index == 0: whitespace = ' ' * (aligned_col - 1) @@ -413,7 +412,7 @@ def _AlignAssignment(final_lines): if tok.is_assign or tok.is_augassign: # all pre assignment variable lengths in one block of lines all_pa_variables_lengths = [] - max_variables_length = 0 + max_variables_length = 0 while True: # EOF @@ -422,7 +421,7 @@ def _AlignAssignment(final_lines): break this_line_index = final_lines_index + len(all_pa_variables_lengths) - this_line = final_lines[this_line_index] + this_line = final_lines[this_line_index] next_line = None if this_line_index < len(final_lines) - 1: @@ -439,9 +438,9 @@ def _AlignAssignment(final_lines): # if there is a standalone comment or keyword statement line # or other lines without assignment in between, break - elif (all_pa_variables_lengths and - True not in [tok.is_assign or tok.is_augassign - for tok in this_line.tokens]): + elif (all_pa_variables_lengths and True not in [ + tok.is_assign or tok.is_augassign for tok in this_line.tokens + ]): if this_line.tokens[0].is_comment: if style.Get('NEW_ALIGNMENT_AFTER_COMMENTLINE'): break @@ -452,19 +451,19 @@ def _AlignAssignment(final_lines): all_pa_variables_lengths.append([]) continue - variables_content = '' + variables_content = '' pa_variables_lengths = [] - contain_object = False - line_tokens = this_line.tokens + contain_object = False + line_tokens = this_line.tokens # only one assignment expression is on each line for index in range(len(line_tokens)): line_tok = line_tokens[index] - prefix = line_tok.formatted_whitespace_prefix + prefix = line_tok.formatted_whitespace_prefix newline_index = prefix.rfind('\n') if newline_index != -1: variables_content = '' - prefix = prefix[newline_index + 1:] + prefix = prefix[newline_index + 1:] if line_tok.is_assign or line_tok.is_augassign: next_toks = [ @@ -495,8 +494,8 @@ def _AlignAssignment(final_lines): variables_content += '{}{}'.format(prefix, line_tok.value) if pa_variables_lengths: - max_variables_length = max( - max_variables_length, max(pa_variables_lengths)) + max_variables_length = max(max_variables_length, + max(pa_variables_lengths)) all_pa_variables_lengths.append(pa_variables_lengths) @@ -537,8 +536,8 @@ def _AlignAssignment(final_lines): whitespace = ' ' * ( max_variables_length - pa_variables_lengths[0] - 1) - assign_content = '{}{}'.format( - whitespace, line_tok.value.strip()) + assign_content = '{}{}'.format(whitespace, + line_tok.value.strip()) existing_whitespace_prefix = \ line_tok.formatted_whitespace_prefix.lstrip('\n') @@ -548,8 +547,8 @@ def _AlignAssignment(final_lines): len(existing_whitespace_prefix) > len(whitespace)): line_tok.whitespace_prefix = '' elif assign_content.startswith(existing_whitespace_prefix): - assign_content = assign_content[ - len(existing_whitespace_prefix):] + assign_content = assign_content[len(existing_whitespace_prefix + ):] # update the assignment operator value line_tok.value = assign_content @@ -601,8 +600,8 @@ class _StateNode(object): # TODO(morbo): Add a '__cmp__' method. def __init__(self, state, newline, previous): - self.state = state.Clone() - self.newline = newline + self.state = state.Clone() + self.newline = newline self.previous = previous def __repr__(self): # pragma: no cover @@ -618,8 +617,8 @@ def __repr__(self): # pragma: no cover # An item in the prioritized BFS search queue. The 'StateNode's 'state' has # the given '_OrderedPenalty'. -_QueueItem = collections.namedtuple( - 'QueueItem', ['ordered_penalty', 'state_node']) +_QueueItem = collections.namedtuple('QueueItem', + ['ordered_penalty', 'state_node']) def _AnalyzeSolutionSpace(initial_state): @@ -637,8 +636,8 @@ def _AnalyzeSolutionSpace(initial_state): Returns: True if a formatting solution was found. False otherwise. """ - count = 0 - seen = set() + count = 0 + seen = set() p_queue = [] # Insert start element. @@ -647,9 +646,9 @@ def _AnalyzeSolutionSpace(initial_state): count += 1 while p_queue: - item = p_queue[0] + item = p_queue[0] penalty = item.ordered_penalty.penalty - node = item.state_node + node = item.state_node if not node.state.next_token: break heapq.heappop(p_queue) @@ -703,7 +702,7 @@ def _AddNextStateToQueue(penalty, previous_node, newline, count, p_queue): # Don't add a token we must split but where we aren't splitting. return count - node = _StateNode(previous_node.state, newline, previous_node) + node = _StateNode(previous_node.state, newline, previous_node) penalty += node.state.AddTokenToState( newline=newline, dry_run=True, must_split=must_split) heapq.heappush(p_queue, _QueueItem(_OrderedPenalty(penalty, count), node)) @@ -759,25 +758,25 @@ def _FormatFirstToken(first_token, indent_depth, prev_line, final_lines): NESTED_DEPTH.append(indent_depth) first_token.AddWhitespacePrefix( - _CalculateNumberOfNewlines( - first_token, indent_depth, prev_line, final_lines, first_nested), + _CalculateNumberOfNewlines(first_token, indent_depth, prev_line, + final_lines, first_nested), indent_level=indent_depth) -NO_BLANK_LINES = 1 -ONE_BLANK_LINE = 2 +NO_BLANK_LINES = 1 +ONE_BLANK_LINE = 2 TWO_BLANK_LINES = 3 def _IsClassOrDef(tok): if tok.value in {'class', 'def', '@'}: return True - return ( - tok.next_token and tok.value == 'async' and tok.next_token.value == 'def') + return (tok.next_token and tok.value == 'async' and + tok.next_token.value == 'def') -def _CalculateNumberOfNewlines( - first_token, indent_depth, prev_line, final_lines, first_nested): +def _CalculateNumberOfNewlines(first_token, indent_depth, prev_line, + final_lines, first_nested): """Calculate the number of newlines we need to add. Arguments: @@ -897,11 +896,11 @@ def _SingleOrMergedLines(lines): Either a single line, if the current line cannot be merged with the succeeding line, or the next two lines merged into one line. """ - index = 0 + index = 0 last_was_merged = False while index < len(lines): if lines[index].disable: - line = lines[index] + line = lines[index] index += 1 while index < len(lines): column = line.last.column + 2 @@ -927,11 +926,11 @@ def _SingleOrMergedLines(lines): # formatting. Otherwise, it could mess up the shell script's syntax. lines[index].disable = True yield lines[index] - index += 2 + index += 2 last_was_merged = True else: yield lines[index] - index += 1 + index += 1 last_was_merged = False diff --git a/yapf/yapflib/split_penalty.py b/yapf/yapflib/split_penalty.py index 8f93d3ade..79b68edcd 100644 --- a/yapf/yapflib/split_penalty.py +++ b/yapf/yapflib/split_penalty.py @@ -15,9 +15,9 @@ from yapf.yapflib import style # Generic split penalties -UNBREAKABLE = 1000**5 +UNBREAKABLE = 1000**5 VERY_STRONGLY_CONNECTED = 5000 -STRONGLY_CONNECTED = 2500 +STRONGLY_CONNECTED = 2500 ############################################################################# # Grammar-specific penalties - should be <= 1000 # @@ -25,15 +25,15 @@ # Lambdas shouldn't be split unless absolutely necessary or if # ALLOW_MULTILINE_LAMBDAS is True. -LAMBDA = 1000 +LAMBDA = 1000 MULTILINE_LAMBDA = 500 ANNOTATION = 100 -ARGUMENT = 25 +ARGUMENT = 25 # TODO: Assign real values. -RETURN_TYPE = 1 -DOTTED_NAME = 40 -EXPR = 10 -DICT_KEY_EXPR = 20 +RETURN_TYPE = 1 +DOTTED_NAME = 40 +EXPR = 10 +DICT_KEY_EXPR = 20 DICT_VALUE_EXPR = 11 diff --git a/yapf/yapflib/style.py b/yapf/yapflib/style.py index 820952492..f2912f1ee 100644 --- a/yapf/yapflib/style.py +++ b/yapf/yapflib/style.py @@ -52,23 +52,18 @@ def SetGlobalStyle(style): _STYLE_HELP = dict( - ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent( - """\ + ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT=textwrap.dedent("""\ Align closing bracket with visual indentation."""), - ALIGN_ASSIGNMENT=textwrap.dedent( - """\ + ALIGN_ASSIGNMENT=textwrap.dedent("""\ Align assignment or augmented assignment operators. If there is a blank line or newline comment or objects with newline entries in between, it will start new block alignment."""), - NEW_ALIGNMENT_AFTER_COMMENTLINE=textwrap.dedent( - """\ + NEW_ALIGNMENT_AFTER_COMMENTLINE=textwrap.dedent("""\ Start new assignment or colon alignment when there is a newline comment in between.""" - ), - ALLOW_MULTILINE_LAMBDAS=textwrap.dedent( - """\ + ), + ALLOW_MULTILINE_LAMBDAS=textwrap.dedent("""\ Allow lambdas to be formatted on more than one line."""), - ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent( - """\ + ALLOW_MULTILINE_DICTIONARY_KEYS=textwrap.dedent("""\ Allow dictionary keys to exist on multiple lines. For example: x = { @@ -76,15 +71,12 @@ def SetGlobalStyle(style): 'this is the second element of a tuple'): value, }"""), - ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=textwrap.dedent( - """\ + ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS=textwrap.dedent("""\ Allow splitting before a default / named assignment in an argument list. """), - ALLOW_SPLIT_BEFORE_DICT_VALUE=textwrap.dedent( - """\ + ALLOW_SPLIT_BEFORE_DICT_VALUE=textwrap.dedent("""\ Allow splits before the dictionary value."""), - ARITHMETIC_PRECEDENCE_INDICATION=textwrap.dedent( - """\ + ARITHMETIC_PRECEDENCE_INDICATION=textwrap.dedent("""\ Let spacing indicate operator precedence. For example: a = 1 * 2 + 3 / 4 @@ -104,8 +96,7 @@ def SetGlobalStyle(style): f = 1 + 2 + 3 + 4 """), - BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=textwrap.dedent( - """\ + BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF=textwrap.dedent("""\ Insert a blank line before a 'def' or 'class' immediately nested within another 'def' or 'class'. For example: @@ -113,22 +104,17 @@ class Foo: # <------ this blank line def method(): ..."""), - BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent( - """\ + BLANK_LINE_BEFORE_CLASS_DOCSTRING=textwrap.dedent("""\ Insert a blank line before a class-level docstring."""), - BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent( - """\ + BLANK_LINE_BEFORE_MODULE_DOCSTRING=textwrap.dedent("""\ Insert a blank line before a module docstring."""), - BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent( - """\ + BLANK_LINES_AROUND_TOP_LEVEL_DEFINITION=textwrap.dedent("""\ Number of blank lines surrounding top-level function and class definitions."""), - BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=textwrap.dedent( - """\ + BLANK_LINES_BETWEEN_TOP_LEVEL_IMPORTS_AND_VARIABLES=textwrap.dedent("""\ Number of blank lines between top-level imports and variable definitions."""), - COALESCE_BRACKETS=textwrap.dedent( - """\ + COALESCE_BRACKETS=textwrap.dedent("""\ Do not split consecutive brackets. Only relevant when dedent_closing_brackets is set. For example: @@ -147,8 +133,7 @@ def method(): })"""), COLUMN_LIMIT=textwrap.dedent("""\ The column limit."""), - CONTINUATION_ALIGN_STYLE=textwrap.dedent( - """\ + CONTINUATION_ALIGN_STYLE=textwrap.dedent("""\ The style for continuation alignment. Possible values are: - SPACE: Use spaces for continuation alignment. This is default behavior. @@ -158,11 +143,9 @@ def method(): - VALIGN-RIGHT: Vertically align continuation lines to multiple of INDENT_WIDTH columns. Slightly right (one tab or a few spaces) if cannot vertically align continuation lines with indent characters."""), - CONTINUATION_INDENT_WIDTH=textwrap.dedent( - """\ + CONTINUATION_INDENT_WIDTH=textwrap.dedent("""\ Indent width used for line continuations."""), - DEDENT_CLOSING_BRACKETS=textwrap.dedent( - """\ + DEDENT_CLOSING_BRACKETS=textwrap.dedent("""\ Put closing brackets on a separate line, dedented, if the bracketed expression can't fit in a single line. Applies to all kinds of brackets, including function definitions and calls. For example: @@ -180,33 +163,27 @@ def method(): end_ts=now(), ) # <--- this bracket is dedented and on a separate line """), - DISABLE_ENDING_COMMA_HEURISTIC=textwrap.dedent( - """\ + DISABLE_ENDING_COMMA_HEURISTIC=textwrap.dedent("""\ Disable the heuristic which places each list element on a separate line if the list is comma-terminated."""), - EACH_DICT_ENTRY_ON_SEPARATE_LINE=textwrap.dedent( - """\ + EACH_DICT_ENTRY_ON_SEPARATE_LINE=textwrap.dedent("""\ Place each dictionary entry onto its own line."""), - FORCE_MULTILINE_DICT=textwrap.dedent( - """\ + FORCE_MULTILINE_DICT=textwrap.dedent("""\ Require multiline dictionary even if it would normally fit on one line. For example: config = { 'key1': 'value1' }"""), - I18N_COMMENT=textwrap.dedent( - """\ + I18N_COMMENT=textwrap.dedent("""\ The regex for an i18n comment. The presence of this comment stops reformatting of that line, because the comments are required to be next to the string they translate."""), - I18N_FUNCTION_CALL=textwrap.dedent( - """\ + I18N_FUNCTION_CALL=textwrap.dedent("""\ The i18n function call names. The presence of this function stops reformattting on that line, because the string it has cannot be moved away from the i18n comment."""), - INDENT_CLOSING_BRACKETS=textwrap.dedent( - """\ + INDENT_CLOSING_BRACKETS=textwrap.dedent("""\ Put closing brackets on a separate line, indented, if the bracketed expression can't fit in a single line. Applies to all kinds of brackets, including function definitions and calls. For example: @@ -224,8 +201,7 @@ def method(): end_ts=now(), ) # <--- this bracket is indented and on a separate line """), - INDENT_DICTIONARY_VALUE=textwrap.dedent( - """\ + INDENT_DICTIONARY_VALUE=textwrap.dedent("""\ Indent the dictionary value if it cannot fit on the same line as the dictionary key. For example: @@ -236,16 +212,13 @@ def method(): value2, } """), - INDENT_WIDTH=textwrap.dedent( - """\ + INDENT_WIDTH=textwrap.dedent("""\ The number of columns to use for indentation."""), INDENT_BLANK_LINES=textwrap.dedent("""\ Indent blank lines."""), - JOIN_MULTIPLE_LINES=textwrap.dedent( - """\ + JOIN_MULTIPLE_LINES=textwrap.dedent("""\ Join short lines into one line. E.g., single line 'if' statements."""), - NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=textwrap.dedent( - """\ + NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS=textwrap.dedent("""\ Do not include spaces around selected binary operators. For example: 1 + 2 * 3 - 4 / 5 @@ -254,26 +227,21 @@ def method(): 1 + 2*3 - 4/5 """), - SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=textwrap.dedent( - """\ + SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET=textwrap.dedent("""\ Insert a space between the ending comma and closing bracket of a list, etc."""), - SPACE_INSIDE_BRACKETS=textwrap.dedent( - """\ + SPACE_INSIDE_BRACKETS=textwrap.dedent("""\ Use spaces inside brackets, braces, and parentheses. For example: method_call( 1 ) my_dict[ 3 ][ 1 ][ get_index( *args, **kwargs ) ] my_set = { 1, 2, 3 } """), - SPACES_AROUND_POWER_OPERATOR=textwrap.dedent( - """\ + SPACES_AROUND_POWER_OPERATOR=textwrap.dedent("""\ Use spaces around the power operator."""), - SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=textwrap.dedent( - """\ + SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN=textwrap.dedent("""\ Use spaces around default or named assigns."""), - SPACES_AROUND_DICT_DELIMITERS=textwrap.dedent( - """\ + SPACES_AROUND_DICT_DELIMITERS=textwrap.dedent("""\ Adds a space after the opening '{' and before the ending '}' dict delimiters. @@ -283,8 +251,7 @@ def method(): { 1: 2 } """), - SPACES_AROUND_LIST_DELIMITERS=textwrap.dedent( - """\ + SPACES_AROUND_LIST_DELIMITERS=textwrap.dedent("""\ Adds a space after the opening '[' and before the ending ']' list delimiters. @@ -294,14 +261,12 @@ def method(): [ 1, 2 ] """), - SPACES_AROUND_SUBSCRIPT_COLON=textwrap.dedent( - """\ + SPACES_AROUND_SUBSCRIPT_COLON=textwrap.dedent("""\ Use spaces around the subscript / slice operator. For example: my_list[1 : 10 : 2] """), - SPACES_AROUND_TUPLE_DELIMITERS=textwrap.dedent( - """\ + SPACES_AROUND_TUPLE_DELIMITERS=textwrap.dedent("""\ Adds a space after the opening '(' and before the ending ')' tuple delimiters. @@ -311,8 +276,7 @@ def method(): ( 1, 2, 3 ) """), - SPACES_BEFORE_COMMENT=textwrap.dedent( - """\ + SPACES_BEFORE_COMMENT=textwrap.dedent("""\ The number of spaces required before a trailing comment. This can be a single value (representing the number of spaces before each trailing comment) or list of values (representing @@ -354,31 +318,24 @@ def method(): short # This is a shorter statement """), # noqa - SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=textwrap.dedent( - """\ + SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED=textwrap.dedent("""\ Split before arguments if the argument list is terminated by a comma."""), - SPLIT_ALL_COMMA_SEPARATED_VALUES=textwrap.dedent( - """\ + SPLIT_ALL_COMMA_SEPARATED_VALUES=textwrap.dedent("""\ Split before arguments"""), - SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=textwrap.dedent( - """\ + SPLIT_ALL_TOP_LEVEL_COMMA_SEPARATED_VALUES=textwrap.dedent("""\ Split before arguments, but do not split all subexpressions recursively (unless needed)."""), - SPLIT_BEFORE_ARITHMETIC_OPERATOR=textwrap.dedent( - """\ + SPLIT_BEFORE_ARITHMETIC_OPERATOR=textwrap.dedent("""\ Set to True to prefer splitting before '+', '-', '*', '/', '//', or '@' rather than after."""), - SPLIT_BEFORE_BITWISE_OPERATOR=textwrap.dedent( - """\ + SPLIT_BEFORE_BITWISE_OPERATOR=textwrap.dedent("""\ Set to True to prefer splitting before '&', '|' or '^' rather than after."""), - SPLIT_BEFORE_CLOSING_BRACKET=textwrap.dedent( - """\ + SPLIT_BEFORE_CLOSING_BRACKET=textwrap.dedent("""\ Split before the closing bracket if a list or dict literal doesn't fit on a single line."""), - SPLIT_BEFORE_DICT_SET_GENERATOR=textwrap.dedent( - """\ + SPLIT_BEFORE_DICT_SET_GENERATOR=textwrap.dedent("""\ Split before a dictionary or set generator (comp_for). For example, note the split before the 'for': @@ -386,8 +343,7 @@ def method(): variable: 'Hello world, have a nice day!' for variable in bar if variable != 42 }"""), - SPLIT_BEFORE_DOT=textwrap.dedent( - """\ + SPLIT_BEFORE_DOT=textwrap.dedent("""\ Split before the '.' if we need to split a longer expression: foo = ('This is a really long string: {}, {}, {}, {}'.format(a, b, c, d)) @@ -397,24 +353,19 @@ def method(): foo = ('This is a really long string: {}, {}, {}, {}' .format(a, b, c, d)) """), # noqa - SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=textwrap.dedent( - """\ + SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN=textwrap.dedent("""\ Split after the opening paren which surrounds an expression if it doesn't fit on a single line. """), - SPLIT_BEFORE_FIRST_ARGUMENT=textwrap.dedent( - """\ + SPLIT_BEFORE_FIRST_ARGUMENT=textwrap.dedent("""\ If an argument / parameter list is going to be split, then split before the first argument."""), - SPLIT_BEFORE_LOGICAL_OPERATOR=textwrap.dedent( - """\ + SPLIT_BEFORE_LOGICAL_OPERATOR=textwrap.dedent("""\ Set to True to prefer splitting before 'and' or 'or' rather than after."""), - SPLIT_BEFORE_NAMED_ASSIGNS=textwrap.dedent( - """\ + SPLIT_BEFORE_NAMED_ASSIGNS=textwrap.dedent("""\ Split named assignments onto individual lines."""), - SPLIT_COMPLEX_COMPREHENSION=textwrap.dedent( - """\ + SPLIT_COMPLEX_COMPREHENSION=textwrap.dedent("""\ Set to True to split list comprehensions and generators that have non-trivial expressions and multiple clauses before each of these clauses. For example: @@ -430,36 +381,27 @@ def method(): for a_long_var in xrange(1000) if a_long_var % 10] """), - SPLIT_PENALTY_AFTER_OPENING_BRACKET=textwrap.dedent( - """\ + SPLIT_PENALTY_AFTER_OPENING_BRACKET=textwrap.dedent("""\ The penalty for splitting right after the opening bracket."""), - SPLIT_PENALTY_AFTER_UNARY_OPERATOR=textwrap.dedent( - """\ + SPLIT_PENALTY_AFTER_UNARY_OPERATOR=textwrap.dedent("""\ The penalty for splitting the line after a unary operator."""), - SPLIT_PENALTY_ARITHMETIC_OPERATOR=textwrap.dedent( - """\ + SPLIT_PENALTY_ARITHMETIC_OPERATOR=textwrap.dedent("""\ The penalty of splitting the line around the '+', '-', '*', '/', '//', ``%``, and '@' operators."""), - SPLIT_PENALTY_BEFORE_IF_EXPR=textwrap.dedent( - """\ + SPLIT_PENALTY_BEFORE_IF_EXPR=textwrap.dedent("""\ The penalty for splitting right before an if expression."""), - SPLIT_PENALTY_BITWISE_OPERATOR=textwrap.dedent( - """\ + SPLIT_PENALTY_BITWISE_OPERATOR=textwrap.dedent("""\ The penalty of splitting the line around the '&', '|', and '^' operators."""), - SPLIT_PENALTY_COMPREHENSION=textwrap.dedent( - """\ + SPLIT_PENALTY_COMPREHENSION=textwrap.dedent("""\ The penalty for splitting a list comprehension or generator expression."""), - SPLIT_PENALTY_EXCESS_CHARACTER=textwrap.dedent( - """\ + SPLIT_PENALTY_EXCESS_CHARACTER=textwrap.dedent("""\ The penalty for characters over the column limit."""), - SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=textwrap.dedent( - """\ + SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT=textwrap.dedent("""\ The penalty incurred by adding a line split to the logical line. The more line splits added the higher the penalty."""), - SPLIT_PENALTY_IMPORT_NAMES=textwrap.dedent( - """\ + SPLIT_PENALTY_IMPORT_NAMES=textwrap.dedent("""\ The penalty of splitting a list of "import as" names. For example: from a_very_long_or_indented_module_name_yada_yad import (long_argument_1, @@ -471,12 +413,10 @@ def method(): from a_very_long_or_indented_module_name_yada_yad import ( long_argument_1, long_argument_2, long_argument_3) """), # noqa - SPLIT_PENALTY_LOGICAL_OPERATOR=textwrap.dedent( - """\ + SPLIT_PENALTY_LOGICAL_OPERATOR=textwrap.dedent("""\ The penalty of splitting the line around the 'and' and 'or' operators."""), - USE_TABS=textwrap.dedent( - """\ + USE_TABS=textwrap.dedent("""\ Use the Tab character for indentation."""), # BASED_ON_STYLE='Which predefined style this style is based on', ) @@ -552,51 +492,51 @@ def CreatePEP8Style(): def CreateGoogleStyle(): """Create the Google formatting style.""" - style = CreatePEP8Style() - style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False - style['COLUMN_LIMIT'] = 80 - style['INDENT_DICTIONARY_VALUE'] = True - style['INDENT_WIDTH'] = 4 - style['I18N_COMMENT'] = r'#\..*' - style['I18N_FUNCTION_CALL'] = ['N_', '_'] - style['JOIN_MULTIPLE_LINES'] = False + style = CreatePEP8Style() + style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False + style['COLUMN_LIMIT'] = 80 + style['INDENT_DICTIONARY_VALUE'] = True + style['INDENT_WIDTH'] = 4 + style['I18N_COMMENT'] = r'#\..*' + style['I18N_FUNCTION_CALL'] = ['N_', '_'] + style['JOIN_MULTIPLE_LINES'] = False style['SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET'] = False - style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False - style['SPLIT_BEFORE_DICT_SET_GENERATOR'] = False - style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False - style['SPLIT_COMPLEX_COMPREHENSION'] = True - style['SPLIT_PENALTY_COMPREHENSION'] = 2100 + style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False + style['SPLIT_BEFORE_DICT_SET_GENERATOR'] = False + style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False + style['SPLIT_COMPLEX_COMPREHENSION'] = True + style['SPLIT_PENALTY_COMPREHENSION'] = 2100 return style def CreateYapfStyle(): """Create the YAPF formatting style.""" - style = CreateGoogleStyle() - style['ALLOW_MULTILINE_DICTIONARY_KEYS'] = True + style = CreateGoogleStyle() + style['ALLOW_MULTILINE_DICTIONARY_KEYS'] = True style['ALLOW_SPLIT_BEFORE_DEFAULT_OR_NAMED_ASSIGNS'] = False - style['INDENT_WIDTH'] = 2 - style['SPLIT_BEFORE_BITWISE_OPERATOR'] = True - style['SPLIT_BEFORE_DOT'] = True + style['INDENT_WIDTH'] = 2 + style['SPLIT_BEFORE_BITWISE_OPERATOR'] = True + style['SPLIT_BEFORE_DOT'] = True style['SPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN'] = True return style def CreateFacebookStyle(): """Create the Facebook formatting style.""" - style = CreatePEP8Style() + style = CreatePEP8Style() style['ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'] = False - style['BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'] = False - style['COLUMN_LIMIT'] = 80 - style['DEDENT_CLOSING_BRACKETS'] = True - style['INDENT_CLOSING_BRACKETS'] = False - style['INDENT_DICTIONARY_VALUE'] = True - style['JOIN_MULTIPLE_LINES'] = False - style['SPACES_BEFORE_COMMENT'] = 2 - style['SPLIT_PENALTY_AFTER_OPENING_BRACKET'] = 0 - style['SPLIT_PENALTY_BEFORE_IF_EXPR'] = 30 - style['SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT'] = 30 - style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False - style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False + style['BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF'] = False + style['COLUMN_LIMIT'] = 80 + style['DEDENT_CLOSING_BRACKETS'] = True + style['INDENT_CLOSING_BRACKETS'] = False + style['INDENT_DICTIONARY_VALUE'] = True + style['JOIN_MULTIPLE_LINES'] = False + style['SPACES_BEFORE_COMMENT'] = 2 + style['SPLIT_PENALTY_AFTER_OPENING_BRACKET'] = 0 + style['SPLIT_PENALTY_BEFORE_IF_EXPR'] = 30 + style['SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT'] = 30 + style['SPLIT_BEFORE_LOGICAL_OPERATOR'] = False + style['SPLIT_BEFORE_BITWISE_OPERATOR'] = False return style @@ -828,7 +768,7 @@ def _CreateConfigParserFromConfigFile(config_filename): "configuration file") pyproject_toml = toml.load(style_file) - style_dict = pyproject_toml.get("tool", {}).get("yapf", None) + style_dict = pyproject_toml.get("tool", {}).get("yapf", None) if style_dict is None: raise StyleConfigError( 'Unable to find section [tool.yapf] in {0}'.format(config_filename)) @@ -871,10 +811,10 @@ def _CreateStyleFromConfigParser(config): # Initialize the base style. section = 'yapf' if config.has_section('yapf') else 'style' if config.has_option('style', 'based_on_style'): - based_on = config.get('style', 'based_on_style').lower() + based_on = config.get('style', 'based_on_style').lower() base_style = _STYLE_NAME_TO_FACTORY[based_on]() elif config.has_option('yapf', 'based_on_style'): - based_on = config.get('yapf', 'based_on_style').lower() + based_on = config.get('yapf', 'based_on_style').lower() base_style = _STYLE_NAME_TO_FACTORY[based_on]() else: base_style = _GLOBAL_STYLE_FACTORY() @@ -891,14 +831,14 @@ def _CreateStyleFromConfigParser(config): try: base_style[option] = _STYLE_OPTION_VALUE_CONVERTER[option](value) except ValueError: - raise StyleConfigError( - "'{}' is not a valid setting for {}.".format(value, option)) + raise StyleConfigError("'{}' is not a valid setting for {}.".format( + value, option)) return base_style # The default style - used if yapf is not invoked without specifically # requesting a formatting style. -DEFAULT_STYLE = 'pep8' +DEFAULT_STYLE = 'pep8' DEFAULT_STYLE_FACTORY = CreatePEP8Style _GLOBAL_STYLE_FACTORY = CreatePEP8Style diff --git a/yapf/yapflib/subtypes.py b/yapf/yapflib/subtypes.py index e675c41c1..b4b7efe75 100644 --- a/yapf/yapflib/subtypes.py +++ b/yapf/yapflib/subtypes.py @@ -13,28 +13,28 @@ # limitations under the License. """Token subtypes used to improve formatting.""" -NONE = 0 -UNARY_OPERATOR = 1 -BINARY_OPERATOR = 2 -SUBSCRIPT_COLON = 3 -SUBSCRIPT_BRACKET = 4 -DEFAULT_OR_NAMED_ASSIGN = 5 +NONE = 0 +UNARY_OPERATOR = 1 +BINARY_OPERATOR = 2 +SUBSCRIPT_COLON = 3 +SUBSCRIPT_BRACKET = 4 +DEFAULT_OR_NAMED_ASSIGN = 5 DEFAULT_OR_NAMED_ASSIGN_ARG_LIST = 6 -VARARGS_LIST = 7 -VARARGS_STAR = 8 -KWARGS_STAR_STAR = 9 -ASSIGN_OPERATOR = 10 -DICTIONARY_KEY = 11 -DICTIONARY_KEY_PART = 12 -DICTIONARY_VALUE = 13 -DICT_SET_GENERATOR = 14 -COMP_EXPR = 15 -COMP_FOR = 16 -COMP_IF = 17 -FUNC_DEF = 18 -DECORATOR = 19 -TYPED_NAME = 20 -TYPED_NAME_ARG_LIST = 21 -SIMPLE_EXPRESSION = 22 -PARAMETER_START = 23 -PARAMETER_STOP = 24 +VARARGS_LIST = 7 +VARARGS_STAR = 8 +KWARGS_STAR_STAR = 9 +ASSIGN_OPERATOR = 10 +DICTIONARY_KEY = 11 +DICTIONARY_KEY_PART = 12 +DICTIONARY_VALUE = 13 +DICT_SET_GENERATOR = 14 +COMP_EXPR = 15 +COMP_FOR = 16 +COMP_IF = 17 +FUNC_DEF = 18 +DECORATOR = 19 +TYPED_NAME = 20 +TYPED_NAME_ARG_LIST = 21 +SIMPLE_EXPRESSION = 22 +PARAMETER_START = 23 +PARAMETER_STOP = 24 diff --git a/yapf/yapflib/verifier.py b/yapf/yapflib/verifier.py index 01dccc0b0..bcbe6fb6b 100644 --- a/yapf/yapflib/verifier.py +++ b/yapf/yapflib/verifier.py @@ -59,7 +59,7 @@ def _NormalizeCode(code): # Split the code to lines and get rid of all leading full-comment lines as # they can mess up the normalization attempt. lines = code.split('\n') - i = 0 + i = 0 for i, line in enumerate(lines): line = line.strip() if line and not line.startswith('#'): diff --git a/yapf/yapflib/yapf_api.py b/yapf/yapflib/yapf_api.py index e0098ddd2..c17451434 100644 --- a/yapf/yapflib/yapf_api.py +++ b/yapf/yapflib/yapf_api.py @@ -51,14 +51,13 @@ from yapf.yapflib import style -def FormatFile( - filename, - style_config=None, - lines=None, - print_diff=False, - verify=False, - in_place=False, - logger=None): +def FormatFile(filename, + style_config=None, + lines=None, + print_diff=False, + verify=False, + in_place=False, + logger=None): """Format a single Python file and return the formatted code. Arguments: @@ -91,7 +90,7 @@ def FormatFile( raise ValueError('Cannot pass both in_place and print_diff.') original_source, newline, encoding = ReadFile(filename, logger) - reformatted_source, changed = FormatCode( + reformatted_source, changed = FormatCode( original_source, style_config=style_config, filename=filename, @@ -99,12 +98,12 @@ def FormatFile( print_diff=print_diff, verify=verify) if reformatted_source.rstrip('\n'): - lines = reformatted_source.rstrip('\n').split('\n') + lines = reformatted_source.rstrip('\n').split('\n') reformatted_source = newline.join(iter(lines)) + newline if in_place: if original_source and original_source != reformatted_source: - file_resources.WriteReformattedCode( - filename, reformatted_source, encoding, in_place) + file_resources.WriteReformattedCode(filename, reformatted_source, + encoding, in_place) return None, encoding, changed return reformatted_source, encoding, changed @@ -149,13 +148,12 @@ def FormatTree(tree, style_config=None, lines=None, verify=False): return reformatter.Reformat(_SplitSemicolons(llines), verify, lines) -def FormatCode( - unformatted_source, - filename='', - style_config=None, - lines=None, - print_diff=False, - verify=False): +def FormatCode(unformatted_source, + filename='', + style_config=None, + lines=None, + print_diff=False, + verify=False): """Format a string of Python code. This provides an alternative entry point to YAPF. @@ -230,12 +228,12 @@ def ReadFile(filename, logger=None): encoding = file_resources.FileEncoding(filename) # Preserves line endings. - with py3compat.open_with_encoding(filename, mode='r', encoding=encoding, - newline='') as fd: + with py3compat.open_with_encoding( + filename, mode='r', encoding=encoding, newline='') as fd: lines = fd.readlines() line_ending = file_resources.LineEnding(lines) - source = '\n'.join(line.rstrip('\r\n') for line in lines) + '\n' + source = '\n'.join(line.rstrip('\r\n') for line in lines) + '\n' return source, line_ending, encoding except IOError as e: # pragma: no cover if logger: @@ -244,9 +242,8 @@ def ReadFile(filename, logger=None): raise except UnicodeDecodeError as e: # pragma: no cover if logger: - logger( - 'Could not parse %s! Consider excluding this file with --exclude.', - filename) + logger('Could not parse %s! Consider excluding this file with --exclude.', + filename) logger(e) e.args = (e.args[0], (filename, e.args[1][1], e.args[1][2], e.args[1][3])) raise @@ -260,7 +257,7 @@ def _SplitSemicolons(lines): DISABLE_PATTERN = r'^#.*\byapf:\s*disable\b' -ENABLE_PATTERN = r'^#.*\byapf:\s*enable\b' +ENABLE_PATTERN = r'^#.*\byapf:\s*enable\b' def _LineRangesToSet(line_ranges): @@ -293,31 +290,29 @@ def _MarkLinesToFormat(llines, lines): index += 1 while index < len(llines): uwline = llines[index] - line = uwline.first.value.strip() + line = uwline.first.value.strip() if uwline.is_comment and _EnableYAPF(line): if not _DisableYAPF(line): break uwline.disable = True - index += 1 + index += 1 elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE): uwline.disable = True index += 1 def _DisableYAPF(line): - return ( - re.search(DISABLE_PATTERN, - line.split('\n')[0].strip(), re.IGNORECASE) or - re.search(DISABLE_PATTERN, - line.split('\n')[-1].strip(), re.IGNORECASE)) + return (re.search(DISABLE_PATTERN, + line.split('\n')[0].strip(), re.IGNORECASE) or + re.search(DISABLE_PATTERN, + line.split('\n')[-1].strip(), re.IGNORECASE)) def _EnableYAPF(line): - return ( - re.search(ENABLE_PATTERN, - line.split('\n')[0].strip(), re.IGNORECASE) or - re.search(ENABLE_PATTERN, - line.split('\n')[-1].strip(), re.IGNORECASE)) + return (re.search(ENABLE_PATTERN, + line.split('\n')[0].strip(), re.IGNORECASE) or + re.search(ENABLE_PATTERN, + line.split('\n')[-1].strip(), re.IGNORECASE)) def _GetUnifiedDiff(before, after, filename='code'): @@ -332,7 +327,7 @@ def _GetUnifiedDiff(before, after, filename='code'): The unified diff text. """ before = before.splitlines() - after = after.splitlines() + after = after.splitlines() return '\n'.join( difflib.unified_diff( before, diff --git a/yapftests/blank_line_calculator_test.py b/yapftests/blank_line_calculator_test.py index d5d97d794..18fa83e0b 100644 --- a/yapftests/blank_line_calculator_test.py +++ b/yapftests/blank_line_calculator_test.py @@ -30,15 +30,13 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateYapfStyle()) def testDecorators(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ @bork() def foo(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ @bork() def foo(): pass @@ -47,8 +45,7 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testComplexDecorators(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import sys @bork() @@ -63,8 +60,7 @@ class moo(object): def method(self): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import sys @@ -85,8 +81,7 @@ def method(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCodeAfterFunctionsAndClasses(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): pass top_level_code = True @@ -102,8 +97,7 @@ def method_2(self): except Error as error: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): pass @@ -132,8 +126,7 @@ def method_2(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentSpacing(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ # This is the first comment # And it's multiline @@ -162,8 +155,7 @@ def foo(self): # comment pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ # This is the first comment # And it's multiline @@ -200,8 +192,7 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentBeforeMethod(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class foo(object): # pylint: disable=invalid-name @@ -212,8 +203,7 @@ def f(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsBeforeClassDefs(self): - code = textwrap.dedent( - '''\ + code = textwrap.dedent('''\ """Test.""" # Comment @@ -226,8 +216,7 @@ class Foo(object): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsBeforeDecorator(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ # The @foo operator adds bork to a(). @foo() def a(): @@ -236,8 +225,7 @@ def a(): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ # Hello world @@ -249,8 +237,7 @@ def a(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsAfterDecorator(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _(): @@ -267,8 +254,7 @@ def test_unicode_filename_in_sdist(self, sdist_unicode, tmpdir, monkeypatch): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testInnerClasses(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class DeployAPIClient(object): class Error(Exception): pass @@ -276,8 +262,7 @@ class TaskValidationError(Error): pass class DeployAPIHTTPError(Error): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class DeployAPIClient(object): class Error(Exception): @@ -293,8 +278,7 @@ class DeployAPIHTTPError(Error): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testLinesOnRangeBoundary(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def A(): pass @@ -308,8 +292,7 @@ def D(): # 9 def E(): pass """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ def A(): pass @@ -332,8 +315,7 @@ def E(): self.assertTrue(changed) def testLinesRangeBoundaryNotOutside(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def A(): pass @@ -347,8 +329,7 @@ def B(): # 6 def C(): pass """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ def A(): pass @@ -367,8 +348,7 @@ def C(): self.assertFalse(changed) def testLinesRangeRemove(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def A(): pass @@ -383,8 +363,7 @@ def B(): # 6 def C(): pass """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ def A(): pass @@ -403,8 +382,7 @@ def C(): self.assertTrue(changed) def testLinesRangeRemoveSome(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def A(): pass @@ -420,8 +398,7 @@ def B(): # 7 def C(): pass """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ def A(): pass diff --git a/yapftests/comment_splicer_test.py b/yapftests/comment_splicer_test.py index 985ea88b7..2e0141bd4 100644 --- a/yapftests/comment_splicer_test.py +++ b/yapftests/comment_splicer_test.py @@ -38,8 +38,9 @@ def _AssertNodeIsComment(self, node, text_in_comment=None): self.assertIn(text_in_comment, node_value) def _FindNthChildNamed(self, node, name, n=1): - for i, child in enumerate(py3compat.ifilter( - lambda c: pytree_utils.NodeName(c) == name, node.pre_order())): + for i, child in enumerate( + py3compat.ifilter(lambda c: pytree_utils.NodeName(c) == name, + node.pre_order())): if i == n - 1: return child raise RuntimeError('No Nth child for n={0}'.format(n)) @@ -58,8 +59,7 @@ def testSimpleInline(self): self._AssertNodeIsComment(comment_node, '# and a comment') def testSimpleSeparateLine(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' foo = 1 # first comment bar = 2 @@ -74,8 +74,7 @@ def testSimpleSeparateLine(self): self._AssertNodeIsComment(comment_node) def testTwoLineComment(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' foo = 1 # first comment # second comment @@ -89,8 +88,7 @@ def testTwoLineComment(self): self._AssertNodeIsComment(tree.children[1]) def testCommentIsFirstChildInCompound(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' if x: # a comment foo = 1 @@ -106,8 +104,7 @@ def testCommentIsFirstChildInCompound(self): self._AssertNodeIsComment(if_suite.children[1]) def testCommentIsLastChildInCompound(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' if x: foo = 1 # a comment @@ -123,8 +120,7 @@ def testCommentIsLastChildInCompound(self): self._AssertNodeIsComment(if_suite.children[-2]) def testInlineAfterSeparateLine(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' bar = 1 # line comment foo = 1 # inline comment @@ -137,13 +133,12 @@ def testInlineAfterSeparateLine(self): sep_comment_node = tree.children[1] self._AssertNodeIsComment(sep_comment_node, '# line comment') - expr = tree.children[2].children[0] + expr = tree.children[2].children[0] inline_comment_node = expr.children[-1] self._AssertNodeIsComment(inline_comment_node, '# inline comment') def testSeparateLineAfterInline(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' bar = 1 foo = 1 # inline comment # line comment @@ -156,13 +151,12 @@ def testSeparateLineAfterInline(self): sep_comment_node = tree.children[-2] self._AssertNodeIsComment(sep_comment_node, '# line comment') - expr = tree.children[1].children[0] + expr = tree.children[1].children[0] inline_comment_node = expr.children[-1] self._AssertNodeIsComment(inline_comment_node, '# inline comment') def testCommentBeforeDedent(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' if bar: z = 1 # a comment @@ -177,8 +171,7 @@ def testCommentBeforeDedent(self): self._AssertNodeType('DEDENT', if_suite.children[-1]) def testCommentBeforeDedentTwoLevel(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' if foo: if bar: z = 1 @@ -195,8 +188,7 @@ def testCommentBeforeDedentTwoLevel(self): self._AssertNodeType('DEDENT', if_suite.children[-1]) def testCommentBeforeDedentTwoLevelImproperlyIndented(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' if foo: if bar: z = 1 @@ -216,8 +208,7 @@ def testCommentBeforeDedentTwoLevelImproperlyIndented(self): self._AssertNodeType('DEDENT', if_suite.children[-1]) def testCommentBeforeDedentThreeLevel(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' if foo: if bar: z = 1 @@ -244,8 +235,7 @@ def testCommentBeforeDedentThreeLevel(self): self._AssertNodeType('DEDENT', if_suite_2.children[-1]) def testCommentsInClass(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' class Foo: """docstring abc...""" # top-level comment @@ -256,19 +246,18 @@ def foo(): pass tree = pytree_utils.ParseCodeToTree(code) comment_splicer.SpliceComments(tree) - class_suite = tree.children[0].children[3] + class_suite = tree.children[0].children[3] another_comment = class_suite.children[-2] self._AssertNodeIsComment(another_comment, '# another') # It's OK for the comment to be a child of funcdef, as long as it's # the first child and thus comes before the 'def'. - funcdef = class_suite.children[3] + funcdef = class_suite.children[3] toplevel_comment = funcdef.children[0] self._AssertNodeIsComment(toplevel_comment, '# top-level') def testMultipleBlockComments(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' # Block comment number 1 # Block comment number 2 @@ -279,7 +268,7 @@ def f(): tree = pytree_utils.ParseCodeToTree(code) comment_splicer.SpliceComments(tree) - funcdef = tree.children[0] + funcdef = tree.children[0] block_comment_1 = funcdef.children[0] self._AssertNodeIsComment(block_comment_1, '# Block comment number 1') @@ -287,8 +276,7 @@ def f(): self._AssertNodeIsComment(block_comment_2, '# Block comment number 2') def testCommentsOnDedents(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' class Foo(object): # A comment for qux. def qux(self): @@ -303,7 +291,7 @@ def mux(self): tree = pytree_utils.ParseCodeToTree(code) comment_splicer.SpliceComments(tree) - classdef = tree.children[0] + classdef = tree.children[0] class_suite = classdef.children[6] qux_comment = class_suite.children[1] self._AssertNodeIsComment(qux_comment, '# A comment for qux.') @@ -312,8 +300,7 @@ def mux(self): self._AssertNodeIsComment(interim_comment, '# Interim comment.') def testExprComments(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' foo( # Request fractions of an hour. 948.0/3600, 20) ''') @@ -325,8 +312,7 @@ def testExprComments(self): self._AssertNodeIsComment(comment, '# Request fractions of an hour.') def testMultipleCommentsInOneExpr(self): - code = textwrap.dedent( - r''' + code = textwrap.dedent(r''' foo( # com 1 948.0/3600, # com 2 20 + 12 # com 3 diff --git a/yapftests/file_resources_test.py b/yapftests/file_resources_test.py index 9e8c568ea..31184c4a3 100644 --- a/yapftests/file_resources_test.py +++ b/yapftests/file_resources_test.py @@ -56,7 +56,7 @@ def tearDown(self): # pylint: disable=g-missing-super-call def test_get_exclude_file_patterns_from_yapfignore(self): local_ignore_file = os.path.join(self.test_tmpdir, '.yapfignore') - ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] + ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] with open(local_ignore_file, 'w') as f: f.writelines('\n'.join(ignore_patterns)) @@ -66,7 +66,7 @@ def test_get_exclude_file_patterns_from_yapfignore(self): def test_get_exclude_file_patterns_from_yapfignore_with_wrong_syntax(self): local_ignore_file = os.path.join(self.test_tmpdir, '.yapfignore') - ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] + ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] with open(local_ignore_file, 'w') as f: f.writelines('\n'.join(ignore_patterns)) @@ -79,7 +79,7 @@ def test_get_exclude_file_patterns_from_pyproject(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] + ignore_patterns = ['temp/**/*.py', 'temp2/*.py'] with open(local_ignore_file, 'w') as f: f.write('[tool.yapfignore]\n') f.write('ignore_patterns=[') @@ -97,7 +97,7 @@ def test_get_exclude_file_patterns_from_pyproject_with_wrong_syntax(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] + ignore_patterns = ['temp/**/*.py', './wrong/syntax/*.py'] with open(local_ignore_file, 'w') as f: f.write('[tool.yapfignore]\n') f.write('ignore_patterns=[') @@ -113,7 +113,7 @@ def test_get_exclude_file_patterns_from_pyproject_no_ignore_section(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = [] + ignore_patterns = [] open(local_ignore_file, 'w').close() self.assertEqual( @@ -126,7 +126,7 @@ def test_get_exclude_file_patterns_from_pyproject_ignore_section_empty(self): except ImportError: return local_ignore_file = os.path.join(self.test_tmpdir, 'pyproject.toml') - ignore_patterns = [] + ignore_patterns = [] with open(local_ignore_file, 'w') as f: f.write('[tool.yapfignore]\n') @@ -151,12 +151,12 @@ def tearDown(self): # pylint: disable=g-missing-super-call shutil.rmtree(self.test_tmpdir) def test_no_local_style(self): - test_file = os.path.join(self.test_tmpdir, 'file.py') + test_file = os.path.join(self.test_tmpdir, 'file.py') style_name = file_resources.GetDefaultStyleForDir(test_file) self.assertEqual(style_name, 'pep8') def test_no_local_style_custom_default(self): - test_file = os.path.join(self.test_tmpdir, 'file.py') + test_file = os.path.join(self.test_tmpdir, 'file.py') style_name = file_resources.GetDefaultStyleForDir( test_file, default_style='custom-default') self.assertEqual(style_name, 'custom-default') @@ -167,27 +167,27 @@ def test_with_local_style(self): open(style_file, 'w').close() test_filename = os.path.join(self.test_tmpdir, 'file.py') - self.assertEqual( - style_file, file_resources.GetDefaultStyleForDir(test_filename)) + self.assertEqual(style_file, + file_resources.GetDefaultStyleForDir(test_filename)) test_filename = os.path.join(self.test_tmpdir, 'dir1', 'file.py') - self.assertEqual( - style_file, file_resources.GetDefaultStyleForDir(test_filename)) + self.assertEqual(style_file, + file_resources.GetDefaultStyleForDir(test_filename)) def test_setup_config(self): # An empty setup.cfg file should not be used setup_config = os.path.join(self.test_tmpdir, 'setup.cfg') open(setup_config, 'w').close() - test_dir = os.path.join(self.test_tmpdir, 'dir1') + test_dir = os.path.join(self.test_tmpdir, 'dir1') style_name = file_resources.GetDefaultStyleForDir(test_dir) self.assertEqual(style_name, 'pep8') # One with a '[yapf]' section should be used with open(setup_config, 'w') as f: f.write('[yapf]\n') - self.assertEqual( - setup_config, file_resources.GetDefaultStyleForDir(test_dir)) + self.assertEqual(setup_config, + file_resources.GetDefaultStyleForDir(test_dir)) def test_pyproject_toml(self): # An empty pyproject.toml file should not be used @@ -199,20 +199,20 @@ def test_pyproject_toml(self): pyproject_toml = os.path.join(self.test_tmpdir, 'pyproject.toml') open(pyproject_toml, 'w').close() - test_dir = os.path.join(self.test_tmpdir, 'dir1') + test_dir = os.path.join(self.test_tmpdir, 'dir1') style_name = file_resources.GetDefaultStyleForDir(test_dir) self.assertEqual(style_name, 'pep8') # One with a '[tool.yapf]' section should be used with open(pyproject_toml, 'w') as f: f.write('[tool.yapf]\n') - self.assertEqual( - pyproject_toml, file_resources.GetDefaultStyleForDir(test_dir)) + self.assertEqual(pyproject_toml, + file_resources.GetDefaultStyleForDir(test_dir)) def test_local_style_at_root(self): # Test behavior of files located on the root, and under root. - rootdir = os.path.abspath(os.path.sep) - test_dir_at_root = os.path.join(rootdir, 'dir1') + rootdir = os.path.abspath(os.path.sep) + test_dir_at_root = os.path.join(rootdir, 'dir1') test_dir_under_root = os.path.join(rootdir, 'dir1', 'dir2') # Fake placing only a style file at the root by mocking `os.path.exists`. @@ -241,7 +241,7 @@ class GetCommandLineFilesTest(unittest.TestCase): def setUp(self): # pylint: disable=g-missing-super-call self.test_tmpdir = tempfile.mkdtemp() - self.old_dir = os.getcwd() + self.old_dir = os.getcwd() def tearDown(self): # pylint: disable=g-missing-super-call os.chdir(self.old_dir) @@ -260,11 +260,13 @@ def test_find_files_not_dirs(self): _touch_files([file1, file2]) self.assertEqual( - file_resources.GetCommandLineFiles( - [file1, file2], recursive=False, exclude=None), [file1, file2]) + file_resources.GetCommandLineFiles([file1, file2], + recursive=False, + exclude=None), [file1, file2]) self.assertEqual( - file_resources.GetCommandLineFiles( - [file1, file2], recursive=True, exclude=None), [file1, file2]) + file_resources.GetCommandLineFiles([file1, file2], + recursive=True, + exclude=None), [file1, file2]) def test_nonrecursive_find_in_dir(self): tdir1 = self._make_test_dir('test1') @@ -293,9 +295,9 @@ def test_recursive_find_in_dir(self): self.assertEqual( sorted( - file_resources.GetCommandLineFiles( - [self.test_tmpdir], recursive=True, exclude=None)), - sorted(files)) + file_resources.GetCommandLineFiles([self.test_tmpdir], + recursive=True, + exclude=None)), sorted(files)) def test_recursive_find_in_dir_with_exclude(self): tdir1 = self._make_test_dir('test1') @@ -310,13 +312,13 @@ def test_recursive_find_in_dir_with_exclude(self): self.assertEqual( sorted( - file_resources.GetCommandLineFiles( - [self.test_tmpdir], recursive=True, exclude=['*test*3.py'])), - sorted( - [ - os.path.join(tdir1, 'testfile1.py'), - os.path.join(tdir2, 'testfile2.py'), - ])) + file_resources.GetCommandLineFiles([self.test_tmpdir], + recursive=True, + exclude=['*test*3.py'])), + sorted([ + os.path.join(tdir1, 'testfile1.py'), + os.path.join(tdir2, 'testfile2.py'), + ])) def test_find_with_excluded_hidden_dirs(self): tdir1 = self._make_test_dir('.test1') @@ -329,16 +331,16 @@ def test_find_with_excluded_hidden_dirs(self): ] _touch_files(files) - actual = file_resources.GetCommandLineFiles( - [self.test_tmpdir], recursive=True, exclude=['*.test1*']) + actual = file_resources.GetCommandLineFiles([self.test_tmpdir], + recursive=True, + exclude=['*.test1*']) self.assertEqual( sorted(actual), - sorted( - [ - os.path.join(tdir2, 'testfile2.py'), - os.path.join(tdir3, 'testfile3.py'), - ])) + sorted([ + os.path.join(tdir2, 'testfile2.py'), + os.path.join(tdir3, 'testfile3.py'), + ])) def test_find_with_excluded_hidden_dirs_relative(self): """Test find with excluded hidden dirs. @@ -373,15 +375,14 @@ def test_find_with_excluded_hidden_dirs_relative(self): self.assertEqual( sorted(actual), - sorted( - [ - os.path.join( - os.path.relpath(self.test_tmpdir), - os.path.basename(tdir2), 'testfile2.py'), - os.path.join( - os.path.relpath(self.test_tmpdir), - os.path.basename(tdir3), 'testfile3.py'), - ])) + sorted([ + os.path.join( + os.path.relpath(self.test_tmpdir), os.path.basename(tdir2), + 'testfile2.py'), + os.path.join( + os.path.relpath(self.test_tmpdir), os.path.basename(tdir3), + 'testfile3.py'), + ])) def test_find_with_excluded_dirs(self): tdir1 = self._make_test_dir('test1') @@ -397,23 +398,23 @@ def test_find_with_excluded_dirs(self): os.chdir(self.test_tmpdir) found = sorted( - file_resources.GetCommandLineFiles( - ['test1', 'test2', 'test3'], - recursive=True, - exclude=[ - 'test1', - 'test2/testinner/', - ])) + file_resources.GetCommandLineFiles(['test1', 'test2', 'test3'], + recursive=True, + exclude=[ + 'test1', + 'test2/testinner/', + ])) self.assertEqual( found, ['test3/foo/bar/bas/xxx/testfile3.py'.replace("/", os.path.sep)]) found = sorted( - file_resources.GetCommandLineFiles( - ['.'], recursive=True, exclude=[ - 'test1', - 'test3', - ])) + file_resources.GetCommandLineFiles(['.'], + recursive=True, + exclude=[ + 'test1', + 'test3', + ])) self.assertEqual( found, ['./test2/testinner/testfile2.py'.replace("/", os.path.sep)]) @@ -516,7 +517,7 @@ def test_write_to_file(self): self.assertEqual(f2.read(), s) def test_write_to_stdout(self): - s = u'foobar' + s = u'foobar' stream = BufferedByteStream() if py3compat.PY3 else py3compat.StringIO() with utils.stdout_redirector(stream): file_resources.WriteReformattedCode( @@ -524,7 +525,7 @@ def test_write_to_stdout(self): self.assertEqual(stream.getvalue(), s) def test_write_encoded_to_stdout(self): - s = '\ufeff# -*- coding: utf-8 -*-\nresult = "passed"\n' # pylint: disable=anomalous-unicode-escape-in-string # noqa + s = '\ufeff# -*- coding: utf-8 -*-\nresult = "passed"\n' # pylint: disable=anomalous-unicode-escape-in-string # noqa stream = BufferedByteStream() if py3compat.PY3 else py3compat.StringIO() with utils.stdout_redirector(stream): file_resources.WriteReformattedCode( @@ -535,17 +536,17 @@ def test_write_encoded_to_stdout(self): class LineEndingTest(unittest.TestCase): def test_line_ending_linefeed(self): - lines = ['spam\n', 'spam\n'] + lines = ['spam\n', 'spam\n'] actual = file_resources.LineEnding(lines) self.assertEqual(actual, '\n') def test_line_ending_carriage_return(self): - lines = ['spam\r', 'spam\r'] + lines = ['spam\r', 'spam\r'] actual = file_resources.LineEnding(lines) self.assertEqual(actual, '\r') def test_line_ending_combo(self): - lines = ['spam\r\n', 'spam\r\n'] + lines = ['spam\r\n', 'spam\r\n'] actual = file_resources.LineEnding(lines) self.assertEqual(actual, '\r\n') diff --git a/yapftests/format_decision_state_test.py b/yapftests/format_decision_state_test.py index d9cdefe8c..63961f332 100644 --- a/yapftests/format_decision_state_test.py +++ b/yapftests/format_decision_state_test.py @@ -32,12 +32,12 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateYapfStyle()) def testSimpleFunctionDefWithNoSplitting(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(a, b): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) + lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) lline.CalculateFormattingInformation() # Add: 'f' @@ -86,12 +86,12 @@ def f(a, b): self.assertEqual(repr(state), repr(clone)) def testSimpleFunctionDefWithSplitting(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(a, b): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) + lline = logical_line.LogicalLine(0, _FilterLine(llines[0])) lline.CalculateFormattingInformation() # Add: 'f' diff --git a/yapftests/line_joiner_test.py b/yapftests/line_joiner_test.py index ea6186693..2eaf16478 100644 --- a/yapftests/line_joiner_test.py +++ b/yapftests/line_joiner_test.py @@ -39,23 +39,20 @@ def _CheckLineJoining(self, code, join_lines): self.assertCodeEqual(line_joiner.CanMergeMultipleLines(llines), join_lines) def testSimpleSingleLineStatement(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ if isinstance(a, int): continue """) self._CheckLineJoining(code, join_lines=True) def testSimpleMultipleLineStatement(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ if isinstance(b, int): continue """) self._CheckLineJoining(code, join_lines=False) def testSimpleMultipleLineComplexStatement(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ if isinstance(c, int): while True: continue @@ -63,22 +60,19 @@ def testSimpleMultipleLineComplexStatement(self): self._CheckLineJoining(code, join_lines=False) def testSimpleMultipleLineStatementWithComment(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ if isinstance(d, int): continue # We're pleased that d's an int. """) self._CheckLineJoining(code, join_lines=True) def testSimpleMultipleLineStatementWithLargeIndent(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ if isinstance(e, int): continue """) self._CheckLineJoining(code, join_lines=True) def testOverColumnLimit(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ if instance(bbbbbbbbbbbbbbbbbbbbbbbbb, int): cccccccccccccccccccccccccc = ddddddddddddddddddddd """) # noqa self._CheckLineJoining(code, join_lines=False) diff --git a/yapftests/logical_line_test.py b/yapftests/logical_line_test.py index 695f88bd5..d18262a7c 100644 --- a/yapftests/logical_line_test.py +++ b/yapftests/logical_line_test.py @@ -29,29 +29,25 @@ class LogicalLineBasicTest(unittest.TestCase): def testConstruction(self): - toks = _MakeFormatTokenList( - [(token.DOT, '.', 'DOT'), (token.VBAR, '|', 'VBAR')]) + toks = _MakeFormatTokenList([(token.DOT, '.', 'DOT'), + (token.VBAR, '|', 'VBAR')]) lline = logical_line.LogicalLine(20, toks) self.assertEqual(20, lline.depth) self.assertEqual(['DOT', 'VBAR'], [tok.name for tok in lline.tokens]) def testFirstLast(self): - toks = _MakeFormatTokenList( - [ - (token.DOT, '.', 'DOT'), (token.LPAR, '(', 'LPAR'), - (token.VBAR, '|', 'VBAR') - ]) + toks = _MakeFormatTokenList([(token.DOT, '.', 'DOT'), + (token.LPAR, '(', 'LPAR'), + (token.VBAR, '|', 'VBAR')]) lline = logical_line.LogicalLine(20, toks) self.assertEqual(20, lline.depth) self.assertEqual('DOT', lline.first.name) self.assertEqual('VBAR', lline.last.name) def testAsCode(self): - toks = _MakeFormatTokenList( - [ - (token.DOT, '.', 'DOT'), (token.LPAR, '(', 'LPAR'), - (token.VBAR, '|', 'VBAR') - ]) + toks = _MakeFormatTokenList([(token.DOT, '.', 'DOT'), + (token.LPAR, '(', 'LPAR'), + (token.VBAR, '|', 'VBAR')]) lline = logical_line.LogicalLine(2, toks) self.assertEqual(' . ( |', lline.AsCode()) @@ -65,7 +61,7 @@ def testAppendToken(self): class LogicalLineFormattingInformationTest(yapf_test_helper.YAPFTest): def testFuncDef(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(a, b): pass """) diff --git a/yapftests/main_test.py b/yapftests/main_test.py index ea6892f5a..c83b8b66a 100644 --- a/yapftests/main_test.py +++ b/yapftests/main_test.py @@ -78,7 +78,7 @@ def patch_raw_input(lines=lines()): return next(lines) try: - orig_raw_import = yapf.py3compat.raw_input + orig_raw_import = yapf.py3compat.raw_input yapf.py3compat.raw_input = patch_raw_input yield finally: @@ -90,7 +90,7 @@ class RunMainTest(yapf_test_helper.YAPFTest): def testShouldHandleYapfError(self): """run_main should handle YapfError and sys.exit(1).""" expected_message = 'yapf: input filenames did not match any python files\n' - sys.argv = ['yapf', 'foo.c'] + sys.argv = ['yapf', 'foo.c'] with captured_output() as (out, err): with self.assertRaises(SystemExit): yapf.run_main() @@ -114,7 +114,7 @@ def testEchoInput(self): self.assertEqual(out.getvalue(), code) def testEchoInputWithStyle(self): - code = 'def f(a = 1\n\n):\n return 2*a\n' + code = 'def f(a = 1\n\n):\n return 2*a\n' yapf_code = 'def f(a=1):\n return 2 * a\n' with patched_input(code): with captured_output() as (out, _): @@ -135,6 +135,5 @@ def testHelp(self): self.assertEqual(ret, 0) help_message = out.getvalue() self.assertIn('indent_width=4', help_message) - self.assertIn( - 'The number of spaces required before a trailing comment.', - help_message) + self.assertIn('The number of spaces required before a trailing comment.', + help_message) diff --git a/yapftests/pytree_unwrapper_test.py b/yapftests/pytree_unwrapper_test.py index cd67e0de1..525278def 100644 --- a/yapftests/pytree_unwrapper_test.py +++ b/yapftests/pytree_unwrapper_test.py @@ -43,79 +43,69 @@ def _CheckLogicalLines(self, llines, list_of_expected): self.assertEqual(list_of_expected, actual) def testSimpleFileScope(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" x = 1 # a comment y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['x', '=', '1']), - (0, ['# a comment']), - (0, ['y', '=', '2']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['x', '=', '1']), + (0, ['# a comment']), + (0, ['y', '=', '2']), + ]) def testSimpleMultilineStatement(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" y = (1 + x) """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['y', '=', '(', '1', '+', 'x', ')']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['y', '=', '(', '1', '+', 'x', ')']), + ]) def testFileScopeWithInlineComment(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" x = 1 # a comment y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['x', '=', '1', '# a comment']), - (0, ['y', '=', '2']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['x', '=', '1', '# a comment']), + (0, ['y', '=', '2']), + ]) def testSimpleIf(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" if foo: x = 1 y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['if', 'foo', ':']), - (1, ['x', '=', '1']), - (1, ['y', '=', '2']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['if', 'foo', ':']), + (1, ['x', '=', '1']), + (1, ['y', '=', '2']), + ]) def testSimpleIfWithComments(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" # c1 if foo: # c2 x = 1 y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['# c1']), - (0, ['if', 'foo', ':', '# c2']), - (1, ['x', '=', '1']), - (1, ['y', '=', '2']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['# c1']), + (0, ['if', 'foo', ':', '# c2']), + (1, ['x', '=', '1']), + (1, ['y', '=', '2']), + ]) def testIfWithCommentsInside(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" if foo: # c1 x = 1 # c2 @@ -123,18 +113,16 @@ def testIfWithCommentsInside(self): y = 2 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['if', 'foo', ':']), - (1, ['# c1']), - (1, ['x', '=', '1', '# c2']), - (1, ['# c3']), - (1, ['y', '=', '2']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['if', 'foo', ':']), + (1, ['# c1']), + (1, ['x', '=', '1', '# c2']), + (1, ['# c3']), + (1, ['y', '=', '2']), + ]) def testIfElifElse(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" if x: x = 1 # c1 elif y: # c2 @@ -144,20 +132,18 @@ def testIfElifElse(self): z = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['if', 'x', ':']), - (1, ['x', '=', '1', '# c1']), - (0, ['elif', 'y', ':', '# c2']), - (1, ['y', '=', '1']), - (0, ['else', ':']), - (1, ['# c3']), - (1, ['z', '=', '1']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['if', 'x', ':']), + (1, ['x', '=', '1', '# c1']), + (0, ['elif', 'y', ':', '# c2']), + (1, ['y', '=', '1']), + (0, ['else', ':']), + (1, ['# c3']), + (1, ['z', '=', '1']), + ]) def testNestedCompoundTwoLevel(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" if x: x = 1 # c1 while t: @@ -166,34 +152,30 @@ def testNestedCompoundTwoLevel(self): k = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['if', 'x', ':']), - (1, ['x', '=', '1', '# c1']), - (1, ['while', 't', ':']), - (2, ['# c2']), - (2, ['j', '=', '1']), - (1, ['k', '=', '1']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['if', 'x', ':']), + (1, ['x', '=', '1', '# c1']), + (1, ['while', 't', ':']), + (2, ['# c2']), + (2, ['j', '=', '1']), + (1, ['k', '=', '1']), + ]) def testSimpleWhile(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" while x > 1: # c1 # c2 x = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['while', 'x', '>', '1', ':', '# c1']), - (1, ['# c2']), - (1, ['x', '=', '1']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['while', 'x', '>', '1', ':', '# c1']), + (1, ['# c2']), + (1, ['x', '=', '1']), + ]) def testSimpleTry(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" try: pass except: @@ -206,38 +188,34 @@ def testSimpleTry(self): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['try', ':']), - (1, ['pass']), - (0, ['except', ':']), - (1, ['pass']), - (0, ['except', ':']), - (1, ['pass']), - (0, ['else', ':']), - (1, ['pass']), - (0, ['finally', ':']), - (1, ['pass']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['try', ':']), + (1, ['pass']), + (0, ['except', ':']), + (1, ['pass']), + (0, ['except', ':']), + (1, ['pass']), + (0, ['else', ':']), + (1, ['pass']), + (0, ['finally', ':']), + (1, ['pass']), + ]) def testSimpleFuncdef(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" def foo(x): # c1 # c2 return x """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), - (1, ['# c2']), - (1, ['return', 'x']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), + (1, ['# c2']), + (1, ['return', 'x']), + ]) def testTwoFuncDefs(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" def foo(x): # c1 # c2 return x @@ -247,45 +225,40 @@ def bar(): # c3 return x """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), - (1, ['# c2']), - (1, ['return', 'x']), - (0, ['def', 'bar', '(', ')', ':', '# c3']), - (1, ['# c4']), - (1, ['return', 'x']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['def', 'foo', '(', 'x', ')', ':', '# c1']), + (1, ['# c2']), + (1, ['return', 'x']), + (0, ['def', 'bar', '(', ')', ':', '# c3']), + (1, ['# c4']), + (1, ['return', 'x']), + ]) def testSimpleClassDef(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" class Klass: # c1 # c2 p = 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['class', 'Klass', ':', '# c1']), - (1, ['# c2']), - (1, ['p', '=', '1']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['class', 'Klass', ':', '# c1']), + (1, ['# c2']), + (1, ['p', '=', '1']), + ]) def testSingleLineStmtInFunc(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" def f(): return 37 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['def', 'f', '(', ')', ':']), - (1, ['return', '37']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['def', 'f', '(', ')', ':']), + (1, ['return', '37']), + ]) def testMultipleComments(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" # Comment #1 # Comment #2 @@ -293,17 +266,15 @@ def f(): pass """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - (0, ['# Comment #1']), - (0, ['# Comment #2']), - (0, ['def', 'f', '(', ')', ':']), - (1, ['pass']), - ]) + self._CheckLogicalLines(llines, [ + (0, ['# Comment #1']), + (0, ['# Comment #2']), + (0, ['def', 'f', '(', ')', ':']), + (1, ['pass']), + ]) def testSplitListWithComment(self): - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" a = [ 'a', 'b', @@ -311,14 +282,9 @@ def testSplitListWithComment(self): ] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckLogicalLines( - llines, [ - ( - 0, [ - 'a', '=', '[', "'a'", ',', "'b'", ',', "'c'", ',', - '# hello world', ']' - ]) - ]) + self._CheckLogicalLines(llines, [(0, [ + 'a', '=', '[', "'a'", ',', "'b'", ',', "'c'", ',', '# hello world', ']' + ])]) class MatchBracketsTest(yapf_test_helper.YAPFTest): @@ -334,11 +300,9 @@ def _CheckMatchingBrackets(self, llines, list_of_expected): """ actual = [] for lline in llines: - filtered_values = [ - (ft, ft.matching_bracket) - for ft in lline.tokens - if ft.name not in pytree_utils.NONSEMANTIC_TOKENS - ] + filtered_values = [(ft, ft.matching_bracket) + for ft in lline.tokens + if ft.name not in pytree_utils.NONSEMANTIC_TOKENS] if filtered_values: actual.append(filtered_values) @@ -353,8 +317,7 @@ def _CheckMatchingBrackets(self, llines, list_of_expected): self.assertEqual(lline[close_bracket][0], lline[open_bracket][1]) def testFunctionDef(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def foo(a, b=['w','d'], c=[42, 37]): pass """) @@ -365,8 +328,7 @@ def foo(a, b=['w','d'], c=[42, 37]): ]) def testDecorator(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ @bar() def foo(a, b, c): pass @@ -379,8 +341,7 @@ def foo(a, b, c): ]) def testClassDef(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class A(B, C, D): pass """) diff --git a/yapftests/pytree_utils_test.py b/yapftests/pytree_utils_test.py index ec61f75d2..c175f833e 100644 --- a/yapftests/pytree_utils_test.py +++ b/yapftests/pytree_utils_test.py @@ -25,7 +25,7 @@ # module. _GRAMMAR_SYMBOL2NUMBER = pygram.python_grammar.symbol2number -_FOO = 'foo' +_FOO = 'foo' _FOO1 = 'foo1' _FOO2 = 'foo2' _FOO3 = 'foo3' @@ -87,12 +87,12 @@ def _BuildSimpleTree(self): # simple_stmt: # NAME('foo') # - lpar1 = pytree.Leaf(token.LPAR, '(') - lpar2 = pytree.Leaf(token.LPAR, '(') - simple_stmt = pytree.Node( - _GRAMMAR_SYMBOL2NUMBER['simple_stmt'], [pytree.Leaf(token.NAME, 'foo')]) - return pytree.Node( - _GRAMMAR_SYMBOL2NUMBER['suite'], [lpar1, lpar2, simple_stmt]) + lpar1 = pytree.Leaf(token.LPAR, '(') + lpar2 = pytree.Leaf(token.LPAR, '(') + simple_stmt = pytree.Node(_GRAMMAR_SYMBOL2NUMBER['simple_stmt'], + [pytree.Leaf(token.NAME, 'foo')]) + return pytree.Node(_GRAMMAR_SYMBOL2NUMBER['suite'], + [lpar1, lpar2, simple_stmt]) def _MakeNewNodeRPAR(self): return pytree.Leaf(token.RPAR, ')') @@ -102,18 +102,18 @@ def setUp(self): def testInsertNodesBefore(self): # Insert before simple_stmt and make sure it went to the right place - pytree_utils.InsertNodesBefore( - [self._MakeNewNodeRPAR()], self._simple_tree.children[2]) + pytree_utils.InsertNodesBefore([self._MakeNewNodeRPAR()], + self._simple_tree.children[2]) self.assertEqual(4, len(self._simple_tree.children)) - self.assertEqual( - 'RPAR', pytree_utils.NodeName(self._simple_tree.children[2])) - self.assertEqual( - 'simple_stmt', pytree_utils.NodeName(self._simple_tree.children[3])) + self.assertEqual('RPAR', + pytree_utils.NodeName(self._simple_tree.children[2])) + self.assertEqual('simple_stmt', + pytree_utils.NodeName(self._simple_tree.children[3])) def testInsertNodesBeforeFirstChild(self): # Insert before the first child of its parent simple_stmt = self._simple_tree.children[2] - foo_child = simple_stmt.children[0] + foo_child = simple_stmt.children[0] pytree_utils.InsertNodesBefore([self._MakeNewNodeRPAR()], foo_child) self.assertEqual(3, len(self._simple_tree.children)) self.assertEqual(2, len(simple_stmt.children)) @@ -122,18 +122,18 @@ def testInsertNodesBeforeFirstChild(self): def testInsertNodesAfter(self): # Insert after and make sure it went to the right place - pytree_utils.InsertNodesAfter( - [self._MakeNewNodeRPAR()], self._simple_tree.children[2]) + pytree_utils.InsertNodesAfter([self._MakeNewNodeRPAR()], + self._simple_tree.children[2]) self.assertEqual(4, len(self._simple_tree.children)) - self.assertEqual( - 'simple_stmt', pytree_utils.NodeName(self._simple_tree.children[2])) - self.assertEqual( - 'RPAR', pytree_utils.NodeName(self._simple_tree.children[3])) + self.assertEqual('simple_stmt', + pytree_utils.NodeName(self._simple_tree.children[2])) + self.assertEqual('RPAR', + pytree_utils.NodeName(self._simple_tree.children[3])) def testInsertNodesAfterLastChild(self): # Insert after the last child of its parent simple_stmt = self._simple_tree.children[2] - foo_child = simple_stmt.children[0] + foo_child = simple_stmt.children[0] pytree_utils.InsertNodesAfter([self._MakeNewNodeRPAR()], foo_child) self.assertEqual(3, len(self._simple_tree.children)) self.assertEqual(2, len(simple_stmt.children)) @@ -143,16 +143,16 @@ def testInsertNodesAfterLastChild(self): def testInsertNodesWhichHasParent(self): # Try to insert an existing tree node into another place and fail. with self.assertRaises(RuntimeError): - pytree_utils.InsertNodesAfter( - [self._simple_tree.children[1]], self._simple_tree.children[0]) + pytree_utils.InsertNodesAfter([self._simple_tree.children[1]], + self._simple_tree.children[0]) class AnnotationsTest(unittest.TestCase): def setUp(self): self._leaf = pytree.Leaf(token.LPAR, '(') - self._node = pytree.Node( - _GRAMMAR_SYMBOL2NUMBER['simple_stmt'], [pytree.Leaf(token.NAME, 'foo')]) + self._node = pytree.Node(_GRAMMAR_SYMBOL2NUMBER['simple_stmt'], + [pytree.Leaf(token.NAME, 'foo')]) def testGetWhenNone(self): self.assertIsNone(pytree_utils.GetNodeAnnotation(self._leaf, _FOO)) @@ -183,18 +183,18 @@ def testMultiple(self): self.assertEqual(pytree_utils.GetNodeAnnotation(self._leaf, _FOO5), 5) def testSubtype(self): - pytree_utils.AppendNodeAnnotation( - self._leaf, pytree_utils.Annotation.SUBTYPE, _FOO) + pytree_utils.AppendNodeAnnotation(self._leaf, + pytree_utils.Annotation.SUBTYPE, _FOO) self.assertSetEqual( - pytree_utils.GetNodeAnnotation( - self._leaf, pytree_utils.Annotation.SUBTYPE), {_FOO}) + pytree_utils.GetNodeAnnotation(self._leaf, + pytree_utils.Annotation.SUBTYPE), {_FOO}) pytree_utils.RemoveSubtypeAnnotation(self._leaf, _FOO) self.assertSetEqual( - pytree_utils.GetNodeAnnotation( - self._leaf, pytree_utils.Annotation.SUBTYPE), set()) + pytree_utils.GetNodeAnnotation(self._leaf, + pytree_utils.Annotation.SUBTYPE), set()) def testSetOnNode(self): pytree_utils.SetNodeAnnotation(self._node, _FOO, 20) diff --git a/yapftests/pytree_visitor_test.py b/yapftests/pytree_visitor_test.py index 231183030..45a83b113 100644 --- a/yapftests/pytree_visitor_test.py +++ b/yapftests/pytree_visitor_test.py @@ -31,7 +31,7 @@ class _NodeNameCollector(pytree_visitor.PyTreeVisitor): """ def __init__(self): - self.all_node_names = [] + self.all_node_names = [] self.name_node_values = [] def DefaultNodeVisit(self, node): @@ -61,7 +61,7 @@ def Visit_NAME(self, leaf): class PytreeVisitorTest(unittest.TestCase): def testCollectAllNodeNamesSimpleCode(self): - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) collector = _NodeNameCollector() collector.Visit(tree) expected_names = [ @@ -76,7 +76,7 @@ def testCollectAllNodeNamesSimpleCode(self): self.assertEqual(expected_name_node_values, collector.name_node_values) def testCollectAllNodeNamesNestedCode(self): - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_NESTED_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_NESTED_CODE) collector = _NodeNameCollector() collector.Visit(tree) expected_names = [ @@ -95,7 +95,7 @@ def testCollectAllNodeNamesNestedCode(self): def testDumper(self): # PyTreeDumper is mainly a debugging utility, so only do basic sanity # checking. - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) stream = py3compat.StringIO() pytree_visitor.PyTreeDumper(target_stream=stream).Visit(tree) @@ -106,7 +106,7 @@ def testDumper(self): def testDumpPyTree(self): # Similar sanity checking for the convenience wrapper DumpPyTree - tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) + tree = pytree_utils.ParseCodeToTree(_VISITOR_TEST_SIMPLE_CODE) stream = py3compat.StringIO() pytree_visitor.DumpPyTree(tree, target_stream=stream) diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index 7f1e1a43e..f7bb4c5f5 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -33,12 +33,10 @@ def testSplittingAllArgs(self): style.SetGlobalStyle( style.CreateStyleFromConfig( '{split_all_comma_separated_values: true, column_limit: 40}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ responseDict = {"timestamp": timestamp, "someValue": value, "whatever": 120} """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ responseDict = { "timestamp": timestamp, "someValue": value, @@ -48,12 +46,10 @@ def testSplittingAllArgs(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ yes = { 'yes': 'no', 'no': 'yes', } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ yes = { 'yes': 'no', 'no': 'yes', @@ -61,13 +57,11 @@ def testSplittingAllArgs(self): """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(long_arg, really_long_arg, really_really_long_arg, @@ -76,12 +70,10 @@ def foo(long_arg, """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ foo_tuple = [long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args] """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo_tuple = [ long_arg, really_long_arg, @@ -91,25 +83,21 @@ def foo(long_arg, """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ foo_tuple = [short, arg] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo_tuple = [short, arg] """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # There is a test for split_all_top_level_comma_separated_values, with # different expected value - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ someLongFunction(this_is_a_very_long_parameter, abc=(a, this_will_just_fit_xxxxxxx)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ someLongFunction( this_is_a_very_long_parameter, abc=(a, @@ -124,12 +112,10 @@ def testSplittingTopLevelAllArgs(self): '{split_all_top_level_comma_separated_values: true, ' 'column_limit: 40}')) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ responseDict = {"timestamp": timestamp, "someValue": value, "whatever": 120} """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ responseDict = { "timestamp": timestamp, "someValue": value, @@ -139,13 +125,11 @@ def testSplittingTopLevelAllArgs(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(long_arg, really_long_arg, really_really_long_arg, @@ -155,12 +139,10 @@ def foo(long_arg, llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ foo_tuple = [long_arg, really_long_arg, really_really_long_arg, cant_keep_all_these_args] """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo_tuple = [ long_arg, really_long_arg, @@ -171,41 +153,35 @@ def foo(long_arg, llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Works the same way as split_all_comma_separated_values - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ foo_tuple = [short, arg] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo_tuple = [short, arg] """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # There is a test for split_all_comma_separated_values, with different # expected value - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ someLongFunction(this_is_a_very_long_parameter, abc=(a, this_will_just_fit_xxxxxxx)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ someLongFunction( this_is_a_very_long_parameter, abc=(a, this_will_just_fit_xxxxxxx)) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) actual_formatted_code = reformatter.Reformat(llines) self.assertEqual(40, len(actual_formatted_code.splitlines()[-1])) self.assertCodeEqual(expected_formatted_code, actual_formatted_code) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ someLongFunction(this_is_a_very_long_parameter, abc=(a, this_will_not_fit_xxxxxxxxx)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ someLongFunction( this_is_a_very_long_parameter, abc=(a, @@ -215,13 +191,11 @@ def foo(long_arg, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Exercise the case where there's no opening bracket (for a, b) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a, b = f( a_very_long_parameter, yet_another_one, and_another) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a, b = f( a_very_long_parameter, yet_another_one, and_another) """) @@ -229,8 +203,7 @@ def foo(long_arg, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Don't require splitting before comments. - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ KO = { 'ABC': Abc, # abc 'DEF': Def, # def @@ -239,8 +212,7 @@ def foo(long_arg, 'JKL': Jkl, } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ KO = { 'ABC': Abc, # abc 'DEF': Def, # def @@ -253,8 +225,7 @@ def foo(long_arg, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleFunctionsWithTrailingComments(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def g(): # Trailing comment if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -266,8 +237,7 @@ def f( # Intermediate comment xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def g(): # Trailing comment if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -284,13 +254,11 @@ def f( # Intermediate comment self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesBetweenTopLevelImportsAndVariables(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import foo as bar VAR = 'baz' """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import foo as bar VAR = 'baz' @@ -298,14 +266,12 @@ def testBlankLinesBetweenTopLevelImportsAndVariables(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import foo as bar VAR = 'baz' """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import foo as bar @@ -317,32 +283,28 @@ def testBlankLinesBetweenTopLevelImportsAndVariables(self): '{based_on_style: yapf, ' 'blank_lines_between_top_level_imports_and_variables: 2}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import foo as bar # Some comment """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import foo as bar # Some comment """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import foo as bar class Baz(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import foo as bar @@ -352,14 +314,12 @@ class Baz(): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import foo as bar def foobar(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import foo as bar @@ -369,14 +329,12 @@ def foobar(): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foobar(): from foo import Bar Bar.baz() """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foobar(): from foo import Bar Bar.baz() @@ -385,39 +343,34 @@ def foobar(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesAtEndOfFile(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foobar(): # foo pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foobar(): # foo pass """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ x = { 'a':37,'b':42, 'c':927} """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ x = {'a': 37, 'b': 42, 'c': 927} """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testIndentBlankLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class foo(object): def foobar(self): @@ -445,19 +398,18 @@ class foo(object):\n \n def foobar(self):\n \n pass\n \n def barfoo(se '{based_on_style: yapf, indent_blank_lines: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) - unformatted_code, expected_formatted_code = ( - expected_formatted_code, unformatted_code) + unformatted_code, expected_formatted_code = (expected_formatted_code, + unformatted_code) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultipleUgliness(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ x = { 'a':37,'b':42, 'c':927} @@ -473,8 +425,7 @@ def g(self, x,y=42): def f ( a ) : return 37+-+a[42-x : y**3] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ x = {'a': 37, 'b': 42, 'c': 927} y = 'hello ' 'world' @@ -498,8 +449,7 @@ def f(a): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testComments(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Foo(object): pass @@ -521,8 +471,7 @@ class Baz(object): class Qux(object): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Foo(object): pass @@ -553,18 +502,16 @@ class Qux(object): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleComment(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ # Thing 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsWithTrailingSpaces(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ # Thing 1 \n# Thing 2 \n""") - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ # Thing 1 # Thing 2 """) @@ -572,8 +519,7 @@ def testCommentsWithTrailingSpaces(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentsInDataLiteral(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): return collections.OrderedDict({ # First comment. @@ -590,8 +536,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testEndingWhitespaceAfterSimpleStatement(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ import foo as bar # Thing 1 # Thing 2 @@ -600,8 +545,7 @@ def testEndingWhitespaceAfterSimpleStatement(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDocstrings(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ u"""Module-level docstring.""" import os class Foo(object): @@ -618,8 +562,7 @@ def qux(self): print('hello {}'.format('world')) return 42 ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ u"""Module-level docstring.""" import os @@ -640,8 +583,7 @@ def qux(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDocstringAndMultilineComment(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ """Hello world""" # A multiline # comment @@ -655,8 +597,7 @@ def foo(self): # comment pass ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ """Hello world""" @@ -677,8 +618,7 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineDocstringAndMultilineComment(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ """Hello world RIP Dennis Richie. @@ -701,8 +641,7 @@ def foo(self): # comment pass ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ """Hello world RIP Dennis Richie. @@ -732,26 +671,24 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testTupleCommaBeforeLastParen(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ a = ( 1, ) """) expected_formatted_code = textwrap.dedent("""\ a = (1,) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoBreakOutsideOfBracket(self): # FIXME(morbo): How this is formatted is not correct. But it's syntactically # correct. - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): assert port >= minimum, \ 'Unexpected port %d when minimum was %d.' % (port, minimum) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): assert port >= minimum, 'Unexpected port %d when minimum was %d.' % (port, minimum) @@ -760,8 +697,7 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesBeforeDecorators(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ @foo() class A(object): @bar() @@ -769,8 +705,7 @@ class A(object): def x(self): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ @foo() class A(object): @@ -783,16 +718,14 @@ def x(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testCommentBetweenDecorators(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ @foo() # frob @bar def x (self): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ @foo() # frob @bar @@ -803,14 +736,12 @@ def x(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehension(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def given(y): [k for k in () if k in y] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def given(y): [k for k in () if k in y] """) @@ -818,16 +749,14 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferOneLine(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def given(y): long_variable_name = [ long_var_name + 1 for long_var_name in () if long_var_name == 2] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def given(y): long_variable_name = [ long_var_name + 1 for long_var_name in () if long_var_name == 2 @@ -837,14 +766,12 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferOneLineOverArithmeticSplit(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def given(used_identifiers): return (sum(len(identifier) for identifier in used_identifiers) / len(used_identifiers)) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def given(used_identifiers): return (sum(len(identifier) for identifier in used_identifiers) / len(used_identifiers)) @@ -853,16 +780,14 @@ def given(used_identifiers): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferThreeLinesForLineWrap(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def given(y): long_variable_name = [ long_var_name + 1 for long_var_name, number_two in () if long_var_name == 2 and number_two == 3] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def given(y): long_variable_name = [ long_var_name + 1 @@ -874,16 +799,14 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testListComprehensionPreferNoBreakForTrivialExpression(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def given(y): long_variable_name = [ long_var_name for long_var_name, number_two in () if long_var_name == 2 and number_two == 3] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def given(y): long_variable_name = [ long_var_name for long_var_name, number_two in () @@ -894,7 +817,7 @@ def given(y): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testOpeningAndClosingBrackets(self): - unformatted_code = """\ + unformatted_code = """\ foo( (1, ) ) foo( ( 1, 2, 3 ) ) foo( ( 1, 2, 3, ) ) @@ -908,16 +831,14 @@ def testOpeningAndClosingBrackets(self): 3, )) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleLineFunctions(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): return 42 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): return 42 """) @@ -935,16 +856,14 @@ def testNoQueueSeletionInMiddleOfLine(self): find_symbol(node.type) + "< " + " ".join( find_pattern(n) for n in node.child) + " >" """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoSpacesBetweenSubscriptsAndCalls(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaa = bbbbbbbb.ccccccccc() [42] (a, 2) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaa = bbbbbbbb.ccccccccc()[42](a, 2) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) @@ -952,25 +871,21 @@ def testNoSpacesBetweenSubscriptsAndCalls(self): def testNoSpacesBetweenOpeningBracketAndStartingOperator(self): # Unary operator. - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaa = bbbbbbbb.ccccccccc[ -1 ]( -42 ) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaa = bbbbbbbb.ccccccccc[-1](-42) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) # Varargs and kwargs. - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaa = bbbbbbbb.ccccccccc( *varargs ) aaaaaaaaaa = bbbbbbbb.ccccccccc( **kwargs ) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaa = bbbbbbbb.ccccccccc(*varargs) aaaaaaaaaa = bbbbbbbb.ccccccccc(**kwargs) """) @@ -978,15 +893,13 @@ def testNoSpacesBetweenOpeningBracketAndStartingOperator(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineCommentReformatted(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: # This is a multiline # comment. pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: # This is a multiline # comment. @@ -996,8 +909,7 @@ def testMultilineCommentReformatted(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDictionaryMakerFormatting(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ _PYTHON_STATEMENTS = frozenset({ lambda x, y: 'simple_stmt': 'small_stmt', 'expr_stmt': 'print_stmt', 'del_stmt': 'pass_stmt', lambda: 'break_stmt': 'continue_stmt', 'return_stmt': 'raise_stmt', @@ -1005,8 +917,7 @@ def testDictionaryMakerFormatting(self): 'if_stmt', 'while_stmt': 'for_stmt', }) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ _PYTHON_STATEMENTS = frozenset({ lambda x, y: 'simple_stmt': 'small_stmt', 'expr_stmt': 'print_stmt', @@ -1023,16 +934,14 @@ def testDictionaryMakerFormatting(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleMultilineCode(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: aaaaaaaaaaaaaa.bbbbbbbbbbbbbb.ccccccc(zzzzzzzzzzzz, \ xxxxxxxxxxx, yyyyyyyyyyyy, vvvvvvvvv) aaaaaaaaaaaaaa.bbbbbbbbbbbbbb.ccccccc(zzzzzzzzzzzz, \ xxxxxxxxxxx, yyyyyyyyyyyy, vvvvvvvvv) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: aaaaaaaaaaaaaa.bbbbbbbbbbbbbb.ccccccc(zzzzzzzzzzzz, xxxxxxxxxxx, yyyyyyyyyyyy, vvvvvvvvv) @@ -1043,8 +952,7 @@ def testSimpleMultilineCode(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineComment(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if Foo: # Hello world # Yo man. @@ -1057,15 +965,14 @@ def testMultilineComment(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testSpaceBetweenStringAndParentheses(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ b = '0' ('hello') """) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultilineString(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ code = textwrap.dedent('''\ if Foo: # Hello world @@ -1079,8 +986,7 @@ def testMultilineString(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

Czar: """+despot["Nicholas"]+"""
@@ -1089,8 +995,7 @@ def f(): """ ''') # noqa - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

Czar: """ + despot["Nicholas"] + """
@@ -1103,8 +1008,7 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleMultilineWithComments(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if ( # This is the first comment a and # This is the second comment # This is the third comment @@ -1116,14 +1020,12 @@ def testSimpleMultilineWithComments(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMatchingParenSplittingMatching(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): raise RuntimeError('unable to find insertion point for target node', (target,)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): raise RuntimeError('unable to find insertion point for target node', (target,)) @@ -1132,8 +1034,7 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testContinuationIndent(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ class F: def _ProcessArgLists(self, node): """Common method for processing argument lists.""" @@ -1143,8 +1044,7 @@ def _ProcessArgLists(self, node): child, subtype=_ARGLIST_TOKEN_TO_SUBTYPE.get( child.value, format_token.Subtype.NONE)) ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ class F: def _ProcessArgLists(self, node): @@ -1160,14 +1060,12 @@ def _ProcessArgLists(self, node): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testTrailingCommaAndBracket(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ a = { 42, } b = ( 42, ) c = [ 42, ] ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ a = { 42, } @@ -1180,23 +1078,20 @@ def testTrailingCommaAndBracket(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testI18n(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ N_('Some years ago - never mind how long precisely - having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world.') # A comment is here. """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ foo('Fake function call') #. Some years ago - never mind how long precisely - having little or no money in my purse, and nothing particular to interest me on shore, I thought I would sail about a little and see the watery part of the world. """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testI18nCommentsInDataLiteral(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): return collections.OrderedDict({ #. First i18n comment. @@ -1210,8 +1105,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testClosingBracketIndent(self): - code = textwrap.dedent( - '''\ + code = textwrap.dedent('''\ def f(): def g(): @@ -1224,8 +1118,7 @@ def g(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testClosingBracketsInlinedInCall(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Foo(object): def bar(self): @@ -1239,8 +1132,7 @@ def bar(self): "porkporkpork": 5, }) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Foo(object): def bar(self): @@ -1258,8 +1150,7 @@ def bar(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testLineWrapInForExpression(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class A: def x(self, node, name, n=1): @@ -1272,7 +1163,7 @@ def x(self, node, name, n=1): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionCallContinuationLine(self): - code = """\ + code = """\ class foo: def bar(self, node, name, n=1): @@ -1286,8 +1177,7 @@ def bar(self, node, name, n=1): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testI18nNonFormatting(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class F(object): def __init__(self, fieldname, @@ -1299,8 +1189,7 @@ def __init__(self, fieldname, self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSpaceBetweenUnaryOpAndOpeningParen(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if ~(a or b): pass """) @@ -1308,8 +1197,7 @@ def testNoSpaceBetweenUnaryOpAndOpeningParen(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentBeforeFuncDef(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class Foo(object): a = 42 @@ -1327,8 +1215,7 @@ def __init__(self, self.assertCodeEqual(code, reformatter.Reformat(llines)) def testExcessLineCountWithDefaultKeywords(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Fnord(object): def Moo(self): aaaaaaaaaaaaaaaa = self._bbbbbbbbbbbbbbbbbbbbbbb( @@ -1336,8 +1223,7 @@ def Moo(self): fffff=fffff, ggggggg=ggggggg, hhhhhhhhhhhhh=hhhhhhhhhhhhh, iiiiiii=iiiiiiiiiiiiii) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Fnord(object): def Moo(self): @@ -1354,8 +1240,7 @@ def Moo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSpaceAfterNotOperator(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if not (this and that): pass """) @@ -1363,8 +1248,7 @@ def testSpaceAfterNotOperator(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoPenaltySplitting(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if True: if True: @@ -1377,8 +1261,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testExpressionPenalties(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if ((left.value == '(' and right.value == ')') or (left.value == '[' and right.value == ']') or @@ -1389,16 +1272,14 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testLineDepthOfSingleLineStatement(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ while True: continue for x in range(3): continue try: a = 42 except: b = 42 with open(a) as fd: a = fd.read() """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ while True: continue for x in range(3): @@ -1414,13 +1295,11 @@ def testLineDepthOfSingleLineStatement(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitListWithTerminatingComma(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ FOO = ['bar', 'baz', 'mux', 'qux', 'quux', 'quuux', 'quuuux', 'quuuuux', 'quuuuuux', 'quuuuuuux', lambda a, b: 37,] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ FOO = [ 'bar', 'baz', @@ -1439,8 +1318,7 @@ def testSplitListWithTerminatingComma(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitListWithInterspersedComments(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ FOO = [ 'bar', # bar 'baz', # baz @@ -1459,7 +1337,7 @@ def testSplitListWithInterspersedComments(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testRelativeImportStatements(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ from ... import bork """) llines = yapf_test_helper.ParseAndUnwrap(code) @@ -1467,15 +1345,13 @@ def testRelativeImportStatements(self): def testSingleLineList(self): # A list on a single line should prefer to remain contiguous. - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = aaaaaaaaaaa( ("...", "."), "..", ".............................................." ) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb = aaaaaaaaaaa( ("...", "."), "..", "..............................................") """) # noqa @@ -1483,8 +1359,7 @@ def testSingleLineList(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankLinesBeforeFunctionsNotInColumnZero(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import signal @@ -1499,8 +1374,7 @@ def timeout(seconds=1): except: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import signal try: @@ -1519,8 +1393,7 @@ def timeout(seconds=1): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoKeywordArgumentBreakage(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class A(object): def b(self): @@ -1532,7 +1405,7 @@ def b(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testTrailerOnSingleLine(self): - code = """\ + code = """\ urlpatterns = patterns('', url(r'^$', 'homepage_view'), url(r'^/login/$', 'login_view'), url(r'^/login/$', 'logout_view'), @@ -1542,8 +1415,7 @@ def testTrailerOnSingleLine(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testIfConditionalParens(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class Foo: def bar(): @@ -1556,8 +1428,7 @@ def bar(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContinuationMarkers(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec a diam lectus. "\\ "Sed sit amet ipsum mauris. Maecenas congue ligula ac quam viverra nec consectetur "\\ "ante hendrerit. Donec et mollis dolor. Praesent et diam eget libero egestas mattis "\\ @@ -1567,16 +1438,14 @@ def testContinuationMarkers(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ from __future__ import nested_scopes, generators, division, absolute_import, with_statement, \\ print_function, unicode_literals """) # noqa llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if aaaaaaaaa == 42 and bbbbbbbbbbbbbb == 42 and \\ cccccccc == 42: pass @@ -1585,8 +1454,7 @@ def testContinuationMarkers(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentsWithContinuationMarkers(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def fn(arg): v = fn2(key1=True, #c1 @@ -1597,8 +1465,7 @@ def fn(arg): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultipleContinuationMarkers(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ xyz = \\ \\ some_thing() @@ -1607,7 +1474,7 @@ def testMultipleContinuationMarkers(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContinuationMarkerAfterStringWithContinuation(self): - code = """\ + code = """\ s = 'foo \\ bar' \\ .format() @@ -1616,8 +1483,7 @@ def testContinuationMarkerAfterStringWithContinuation(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testEmptyContainers(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ flags.DEFINE_list( 'output_dirs', [], 'Lorem ipsum dolor sit amet, consetetur adipiscing elit. Donec a diam lectus. ' @@ -1627,12 +1493,10 @@ def testEmptyContainers(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testSplitStringsIfSurroundedByParens(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = foo.bar({'xxxxxxxxxxxxxxxxxxxxxxx' 'yyyyyyyyyyyyyyyyyyyyyyyyyy': baz[42]} + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' 'bbbbbbbbbbbbbbbbbbbbbbbbbb' 'cccccccccccccccccccccccccccccccc' 'ddddddddddddddddddddddddddddd') """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = foo.bar({'xxxxxxxxxxxxxxxxxxxxxxx' 'yyyyyyyyyyyyyyyyyyyyyyyyyy': baz[42]} + 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' @@ -1643,8 +1507,7 @@ def testSplitStringsIfSurroundedByParens(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a = 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' \ 'bbbbbbbbbbbbbbbbbbbbbbbbbb' 'cccccccccccccccccccccccccccccccc' \ 'ddddddddddddddddddddddddddddd' @@ -1653,8 +1516,7 @@ def testSplitStringsIfSurroundedByParens(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultilineShebang(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ #!/bin/sh if "true" : '''\' then @@ -1674,8 +1536,7 @@ def testMultilineShebang(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingAroundTermOperators(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a_very_long_function_call_yada_yada_etc_etc_etc(long_arg1, long_arg2 / long_arg3) """) @@ -1683,8 +1544,7 @@ def testNoSplittingAroundTermOperators(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingAroundCompOperators(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa is not bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa in bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa not in bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) @@ -1692,8 +1552,7 @@ def testNoSplittingAroundCompOperators(self): c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa is bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) c = (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa <= bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) """) # noqa - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ c = ( aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa is not bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb) @@ -1715,8 +1574,7 @@ def testNoSplittingAroundCompOperators(self): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testNoSplittingWithinSubscriptList(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ somequitelongvariablename.somemember[(a, b)] = { 'somelongkey': 1, 'someotherlongkey': 2 @@ -1726,8 +1584,7 @@ def testNoSplittingWithinSubscriptList(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testExcessCharacters(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class foo: def bar(self): @@ -1738,16 +1595,14 @@ def bar(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): if True: if True: if contract == allow_contract and attr_dict.get(if_attribute) == has_value: return True """) # noqa - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ def _(): if True: if True: @@ -1759,8 +1614,7 @@ def _(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testDictSetGenerator(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ foo = { variable: 'hello world. How are you today?' for variable in fnord @@ -1771,8 +1625,7 @@ def testDictSetGenerator(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnaryOpInDictionaryValue(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ beta = "123" test = {'alpha': beta[-1]} @@ -1783,8 +1636,7 @@ def testUnaryOpInDictionaryValue(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnaryNotOperator(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if True: if True: if True: @@ -1796,7 +1648,7 @@ def testUnaryNotOperator(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testRelaxArraySubscriptAffinity(self): - code = """\ + code = """\ class A(object): def f(self, aaaaaaaaa, bbbbbbbbbbbbb, row): @@ -1812,18 +1664,17 @@ def f(self, aaaaaaaaa, bbbbbbbbbbbbb, row): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionCallInDict(self): - code = "a = {'a': b(c=d, **e)}\n" + code = "a = {'a': b(c=d, **e)}\n" llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionCallInNestedDict(self): - code = "a = {'a': {'a': {'a': b(c=d, **e)}}}\n" + code = "a = {'a': {'a': {'a': b(c=d, **e)}}}\n" llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnbreakableNot(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def test(): if not "Foooooooooooooooooooooooooooooo" or "Foooooooooooooooooooooooooooooo" == "Foooooooooooooooooooooooooooooo": pass @@ -1832,8 +1683,7 @@ def test(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testSplitListWithComment(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a = [ 'a', 'b', @@ -1844,8 +1694,7 @@ def testSplitListWithComment(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testOverColumnLimit(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Test: def testSomething(self): @@ -1855,8 +1704,7 @@ def testSomething(self): ('aaaaaaaaaaaaa', 'bbbb'): 'ccccccccccccccccccccccccccccccccccccccccccc', } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Test: def testSomething(self): @@ -1873,8 +1721,7 @@ def testSomething(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testEndingComment(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a = f( a="something", b="something requiring comment which is quite long", # comment about b (pushes line over 79) @@ -1884,8 +1731,7 @@ def testEndingComment(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContinuationSpaceRetention(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def fn(): return module \\ .method(Object(data, @@ -1896,8 +1742,7 @@ def fn(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testIfExpressionWithFunctionCall(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if x or z.y( a, c, @@ -1909,8 +1754,7 @@ def testIfExpressionWithFunctionCall(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testUnformattedAfterMultilineString(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def foo(): com_text = \\ ''' @@ -1921,8 +1765,7 @@ def foo(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSpacesAroundKeywordDefaultValues(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ sources = { 'json': request.get_json(silent=True) or {}, 'json2': request.get_json(silent=True), @@ -1933,14 +1776,12 @@ def testNoSpacesAroundKeywordDefaultValues(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingBeforeEndingSubscriptBracket(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: if True: status = cf.describe_stacks(StackName=stackname)[u'Stacks'][0][u'StackStatus'] """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: if True: status = cf.describe_stacks( @@ -1950,8 +1791,7 @@ def testNoSplittingBeforeEndingSubscriptBracket(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoSplittingOnSingleArgument(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ xxxxxxxxxxxxxx = (re.search(r'(\\d+\\.\\d+\\.\\d+\\.)\\d+', aaaaaaa.bbbbbbbbbbbb).group(1) + re.search(r'\\d+\\.\\d+\\.\\d+\\.(\\d+)', @@ -1961,8 +1801,7 @@ def testNoSplittingOnSingleArgument(self): re.search(r'\\d+\\.\\d+\\.\\d+\\.(\\d+)', ccccccc).group(c.d)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ xxxxxxxxxxxxxx = ( re.search(r'(\\d+\\.\\d+\\.\\d+\\.)\\d+', aaaaaaa.bbbbbbbbbbbb).group(1) + re.search(r'\\d+\\.\\d+\\.\\d+\\.(\\d+)', ccccccc).group(1)) @@ -1974,15 +1813,13 @@ def testNoSplittingOnSingleArgument(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingArraysSensibly(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ while True: while True: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = list['bbbbbbbbbbbbbbbbbbbbbbbbb'].split(',') aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = list('bbbbbbbbbbbbbbbbbbbbbbbbb').split(',') """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ while True: while True: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = list[ @@ -1994,15 +1831,13 @@ def testSplittingArraysSensibly(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testComprehensionForAndIf(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class f: def __repr__(self): tokens_repr = ','.join(['{0}({1!r})'.format(tok.name, tok.value) for tok in self._tokens]) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class f: def __repr__(self): @@ -2013,8 +1848,7 @@ def __repr__(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testFunctionCallArguments(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): if True: pytree_utils.InsertNodesBefore(_CreateCommentsFromPrefix( @@ -2024,8 +1858,7 @@ def f(): comment_prefix, comment_lineno, comment_column, standalone=True)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): if True: pytree_utils.InsertNodesBefore( @@ -2040,21 +1873,18 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBinaryOperators(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = b ** 37 c = (20 ** -3) / (_GRID_ROWS ** (code_length - 10)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = b**37 c = (20**-3) / (_GRID_ROWS**(code_length - 10)) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if True: if (self.stack[-1].split_before_closing_bracket and @@ -2067,8 +1897,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testContiguousList(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ [retval1, retval2] = a_very_long_function(argument_1, argument2, argument_3, argument_4) """) # noqa @@ -2076,8 +1905,7 @@ def testContiguousList(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testArgsAndKwargsFormatting(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a(a=aaaaaaaaaaaaaaaaaaaaa, b=aaaaaaaaaaaaaaaaaaaaaaaa, c=aaaaaaaaaaaaaaaaaa, @@ -2087,8 +1915,7 @@ def testArgsAndKwargsFormatting(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def foo(): return [ Bar(xxx='some string', @@ -2100,8 +1927,7 @@ def foo(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testCommentColumnLimitOverflow(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if True: TaskManager.get_tags = MagicMock( @@ -2114,8 +1940,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testMultilineLambdas(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class SomeClass(object): do_something = True @@ -2126,8 +1951,7 @@ def succeeded(self, dddddddddddddd): d.addCallback(lambda _: self.aaaaaa.bbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccccc(dddddddddddddd)) return d """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class SomeClass(object): do_something = True @@ -2145,14 +1969,13 @@ def succeeded(self, dddddddddddddd): style.CreateStyleFromConfig( '{based_on_style: yapf, allow_multiline_lambdas: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testMultilineDictionaryKeys(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ MAP_WITH_LONG_KEYS = { ('lorem ipsum', 'dolor sit amet'): 1, @@ -2162,8 +1985,7 @@ def testMultilineDictionaryKeys(self): 3 } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ MAP_WITH_LONG_KEYS = { ('lorem ipsum', 'dolor sit amet'): 1, @@ -2177,18 +1999,16 @@ def testMultilineDictionaryKeys(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf, ' - 'allow_multiline_dictionary_keys: true}')) + style.CreateStyleFromConfig('{based_on_style: yapf, ' + 'allow_multiline_dictionary_keys: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testStableDictionaryFormatting(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class A(object): def method(self): @@ -2205,16 +2025,15 @@ def method(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: pep8, indent_width: 2, ' - 'continuation_indent_width: 4, ' - 'indent_dictionary_value: True}')) + style.CreateStyleFromConfig('{based_on_style: pep8, indent_width: 2, ' + 'continuation_indent_width: 4, ' + 'indent_dictionary_value: True}')) - llines = yapf_test_helper.ParseAndUnwrap(code) + llines = yapf_test_helper.ParseAndUnwrap(code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) finally: @@ -2223,14 +2042,12 @@ def method(self): def testStableInlinedDictionaryFormatting(self): try: style.SetGlobalStyle(style.CreatePEP8Style()) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({'action': 'update', 'parameter': value})) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({ @@ -2239,25 +2056,23 @@ def _(): })) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testDontSplitKeywordValueArguments(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def mark_game_scored(gid): _connect.execute(_games.update().where(_games.c.gid == gid).values( scored=True)) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def mark_game_scored(gid): _connect.execute( _games.update().where(_games.c.gid == gid).values(scored=True)) @@ -2266,8 +2081,7 @@ def mark_game_scored(gid): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDontAddBlankLineAfterMultilineString(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ query = '''SELECT id FROM table WHERE day in {}''' @@ -2277,8 +2091,7 @@ def testDontAddBlankLineAfterMultilineString(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFormattingListComprehensions(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def a(): if True: if True: @@ -2292,8 +2105,7 @@ def a(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNoSplittingWhenBinPacking(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a_very_long_function_name( long_argument_name_1=1, long_argument_name_2=2, @@ -2315,25 +2127,23 @@ def testNoSplittingWhenBinPacking(self): 'dedent_closing_brackets: True, ' 'split_before_named_assigns: False}')) - llines = yapf_test_helper.ParseAndUnwrap(code) + llines = yapf_test_helper.ParseAndUnwrap(code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testNotSplittingAfterSubscript(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if not aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.b(c == d[ 'eeeeee']).ffffff(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if not aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.b( c == d['eeeeee']).ffffff(): pass @@ -2342,8 +2152,7 @@ def testNotSplittingAfterSubscript(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingOneArgumentList(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): if True: if True: @@ -2352,8 +2161,7 @@ def _(): if True: boxes[id_] = np.concatenate((points.min(axis=0), qoints.max(axis=0))) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): if True: if True: @@ -2367,8 +2175,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingBeforeFirstElementListArgument(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class _(): @classmethod def _pack_results_for_constraint_or(cls, combination, constraints): @@ -2381,8 +2188,7 @@ def _pack_results_for_constraint_or(cls, combination, constraints): ), constraints, InvestigationResult.OR ) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class _(): @classmethod @@ -2398,8 +2204,7 @@ def _pack_results_for_constraint_or(cls, combination, constraints): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplittingArgumentsTerminatedByComma(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ function_name(argument_name_1=1, argument_name_2=2, argument_name_3=3) function_name(argument_name_1=1, argument_name_2=2, argument_name_3=3,) @@ -2410,8 +2215,7 @@ def testSplittingArgumentsTerminatedByComma(self): r =f0 (1, 2,3,) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ function_name(argument_name_1=1, argument_name_2=2, argument_name_3=3) function_name( @@ -2446,19 +2250,18 @@ def testSplittingArgumentsTerminatedByComma(self): '{based_on_style: yapf, ' 'split_arguments_when_comma_terminated: True}')) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testImportAsList(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ from toto import titi, tata, tutu # noqa from toto import titi, tata, tutu from toto import (titi, tata, tutu) @@ -2467,8 +2270,7 @@ def testImportAsList(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDictionaryValuesOnOwnLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = { 'aaaaaaaaaaaaaaaaaaaaaaaa': Check('ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ', '=', True), @@ -2492,8 +2294,7 @@ def testDictionaryValuesOnOwnLines(self): Check('QQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQQ', '=', False), } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = { 'aaaaaaaaaaaaaaaaaaaaaaaa': Check('ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ', '=', True), @@ -2521,31 +2322,27 @@ def testDictionaryValuesOnOwnLines(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDictionaryOnOwnLine(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ doc = test_utils.CreateTestDocumentViaController( content={ 'a': 'b' }, branch_key=branch.key, collection_key=collection.key) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ doc = test_utils.CreateTestDocumentViaController( content={'a': 'b'}, branch_key=branch.key, collection_key=collection.key) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ doc = test_utils.CreateTestDocumentViaController( content={ 'a': 'b' }, branch_key=branch.key, collection_key=collection.key, collection_key2=collection.key2) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ doc = test_utils.CreateTestDocumentViaController( content={'a': 'b'}, branch_key=branch.key, @@ -2556,8 +2353,7 @@ def testDictionaryOnOwnLine(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNestedListsInDictionary(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ _A = { 'cccccccccc': ('^^1',), 'rrrrrrrrrrrrrrrrrrrrrrrrr': ('^7913', # AAAAAAAAAAAAAA. @@ -2586,8 +2382,7 @@ def testNestedListsInDictionary(self): ), } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ _A = { 'cccccccccc': ('^^1',), 'rrrrrrrrrrrrrrrrrrrrrrrrr': ( @@ -2625,8 +2420,7 @@ def testNestedListsInDictionary(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNestedDictionary(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class _(): def _(): breadcrumbs = [{'name': 'Admin', @@ -2636,8 +2430,7 @@ def _(): 'url': url_for(".home")}, {'title': title}] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class _(): def _(): breadcrumbs = [ @@ -2655,8 +2448,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDictionaryElementsOnOneLine(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): @mock.patch.dict( @@ -2676,12 +2468,10 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testNotInParams(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ list("a long line to break the line. a long line to break the brk a long lin", not True) """) # noqa - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ list("a long line to break the line. a long line to break the brk a long lin", not True) """) # noqa @@ -2689,16 +2479,14 @@ def testNotInParams(self): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testNamedAssignNotAtEndOfLine(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): if True: with py3compat.open_with_encoding(filename, mode='w', encoding=encoding) as fd: pass """) - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ def _(): if True: with py3compat.open_with_encoding( @@ -2709,8 +2497,7 @@ def _(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testBlankLineBeforeClassDocstring(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ class A: """Does something. @@ -2721,8 +2508,7 @@ class A: def __init__(self): pass ''') - expected_code = textwrap.dedent( - '''\ + expected_code = textwrap.dedent('''\ class A: """Does something. @@ -2735,8 +2521,7 @@ def __init__(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ class A: """Does something. @@ -2747,8 +2532,7 @@ class A: def __init__(self): pass ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ class A: """Does something. @@ -2767,14 +2551,13 @@ def __init__(self): 'blank_line_before_class_docstring: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testBlankLineBeforeModuleDocstring(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- @@ -2784,8 +2567,7 @@ def testBlankLineBeforeModuleDocstring(self): def foobar(): pass ''') - expected_code = textwrap.dedent( - '''\ + expected_code = textwrap.dedent('''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- """Some module docstring.""" @@ -2797,8 +2579,7 @@ def foobar(): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- """Some module docstring.""" @@ -2807,8 +2588,7 @@ def foobar(): def foobar(): pass ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ #!/usr/bin/env python # -*- coding: utf-8 name> -*- @@ -2826,20 +2606,18 @@ def foobar(): 'blank_line_before_module_docstring: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testTupleCohesion(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): this_is_a_very_long_function_name(an_extremely_long_variable_name, ( 'a string that may be too long %s' % 'M15')) """) - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ def f(): this_is_a_very_long_function_name( an_extremely_long_variable_name, @@ -2849,15 +2627,14 @@ def f(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testSubscriptExpression(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ foo = d[not a] """) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) def testListWithFunctionCalls(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): return [ Bar( @@ -2869,8 +2646,7 @@ def foo(): zzz='a third long string') ] """) - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ def foo(): return [ Bar(xxx='some string', @@ -2885,13 +2661,11 @@ def foo(): self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testEllipses(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ X=... Y = X if ... else X """) - expected_code = textwrap.dedent( - """\ + expected_code = textwrap.dedent("""\ X = ... Y = X if ... else X """) @@ -2905,7 +2679,7 @@ def testPseudoParens(self): {'nested_key': 1, }, } """ - expected_code = """\ + expected_code = """\ my_dict = { 'key': # Some comment about the key { @@ -2913,18 +2687,16 @@ def testPseudoParens(self): }, } """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testSplittingBeforeFirstArgumentOnFunctionCall(self): """Tests split_before_first_argument on a function call.""" - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a_very_long_function_name("long string with formatting {0:s}".format( "mystring")) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a_very_long_function_name( "long string with formatting {0:s}".format("mystring")) """) @@ -2935,21 +2707,19 @@ def testSplittingBeforeFirstArgumentOnFunctionCall(self): '{based_on_style: yapf, split_before_first_argument: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testSplittingBeforeFirstArgumentOnFunctionDefinition(self): """Tests split_before_first_argument on a function definition.""" - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _GetNumberOfSecondsFromElements(year, month, day, hours, minutes, seconds, microseconds): return """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _GetNumberOfSecondsFromElements( year, month, day, hours, minutes, seconds, microseconds): return @@ -2961,23 +2731,21 @@ def _GetNumberOfSecondsFromElements( '{based_on_style: yapf, split_before_first_argument: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testSplittingBeforeFirstArgumentOnCompoundStatement(self): """Tests split_before_first_argument on a compound statement.""" - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if (long_argument_name_1 == 1 or long_argument_name_2 == 2 or long_argument_name_3 == 3 or long_argument_name_4 == 4): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if (long_argument_name_1 == 1 or long_argument_name_2 == 2 or long_argument_name_3 == 3 or long_argument_name_4 == 4): pass @@ -2989,15 +2757,14 @@ def testSplittingBeforeFirstArgumentOnCompoundStatement(self): '{based_on_style: yapf, split_before_first_argument: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testCoalesceBracketsOnDict(self): """Tests coalesce_brackets on a dictionary.""" - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ date_time_values = ( { u'year': year, @@ -3009,8 +2776,7 @@ def testCoalesceBracketsOnDict(self): } ) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ date_time_values = ({ u'year': year, u'month': month, @@ -3027,14 +2793,13 @@ def testCoalesceBracketsOnDict(self): '{based_on_style: yapf, coalesce_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testSplitAfterComment(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if __name__ == "__main__": with another_resource: account = { @@ -3059,8 +2824,7 @@ def testAsyncAsNonKeyword(self): style.SetGlobalStyle(style.CreatePEP8Style()) # In Python 2, async may be used as a non-keyword identifier. - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ from util import async @@ -3082,9 +2846,8 @@ def testDisableEndingCommaHeuristic(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' disable_ending_comma_heuristic: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' disable_ending_comma_heuristic: True}')) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) @@ -3092,8 +2855,7 @@ def testDisableEndingCommaHeuristic(self): style.SetGlobalStyle(style.CreateYapfStyle()) def testDedentClosingBracketsWithTypeAnnotationExceedingLineLength(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass @@ -3101,8 +2863,7 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None ) -> None: @@ -3117,19 +2878,17 @@ def function( try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' dedent_closing_brackets: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' dedent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsWithTypeAnnotationExceedingLineLength(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass @@ -3137,8 +2896,7 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_argument=None) -> None: pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None ) -> None: @@ -3153,19 +2911,17 @@ def function( try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInFunctionCall(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None, third_and_final_argument=True): pass @@ -3173,8 +2929,7 @@ def function(first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None, third_a def function(first_argument_xxxxxxxxxxxxxxxxxxxxxxx=(0,), second_and_last_argument=None): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def function( first_argument_xxxxxxxxxxxxxxxx=(0,), second_argument=None, @@ -3191,19 +2946,17 @@ def function( try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInTuple(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def function(): some_var = ('a long element', 'another long element', 'short element', 'really really long element') return True @@ -3212,8 +2965,7 @@ def function(): some_var = ('a couple', 'small', 'elemens') return False """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def function(): some_var = ( 'a long element', 'another long element', 'short element', @@ -3229,19 +2981,17 @@ def function(): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInList(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def function(): some_var = ['a long element', 'another long element', 'short element', 'really really long element'] return True @@ -3250,8 +3000,7 @@ def function(): some_var = ['a couple', 'small', 'elemens'] return False """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def function(): some_var = [ 'a long element', 'another long element', 'short element', @@ -3267,19 +3016,17 @@ def function(): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testIndentClosingBracketsInDict(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def function(): some_var = {1: ('a long element', 'and another really really long element that is really really amazingly long'), 2: 'another long element', 3: 'short element', 4: 'really really long element'} return True @@ -3288,8 +3035,7 @@ def function(): some_var = {1: 'a couple', 2: 'small', 3: 'elemens'} return False """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def function(): some_var = { 1: @@ -3311,19 +3057,17 @@ def function(): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf,' - ' indent_closing_brackets: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf,' + ' indent_closing_brackets: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) def testMultipleDictionariesInList(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class A: def b(): d = { @@ -3349,8 +3093,7 @@ def b(): ] } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class A: def b(): @@ -3382,10 +3125,9 @@ def testForceMultilineDict_True(self): style.CreateStyleFromConfig('{force_multiline_dict: true}')) unformatted_code = textwrap.dedent( "responseDict = {'childDict': {'spam': 'eggs'}}\n") - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - actual = reformatter.Reformat(llines) - expected = textwrap.dedent( - """\ + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + actual = reformatter.Reformat(llines) + expected = textwrap.dedent("""\ responseDict = { 'childDict': { 'spam': 'eggs' @@ -3400,26 +3142,23 @@ def testForceMultilineDict_False(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{force_multiline_dict: false}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ responseDict = {'childDict': {'spam': 'eggs'}} """) expected_formatted_code = unformatted_code - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @unittest.skipUnless(py3compat.PY38, 'Requires Python 3.8') def testWalrus(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if (x := len([1]*1000)>100): print(f'{x} is pretty big' ) """) - expected = textwrap.dedent( - """\ + expected = textwrap.dedent("""\ if (x := len([1] * 1000) > 100): print(f'{x} is pretty big') """) @@ -3431,23 +3170,21 @@ def testAlignAssignBlankLineInbetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 val_third = 3 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3457,23 +3194,21 @@ def testAlignAssignCommentLineInbetween(self): style.CreateStyleFromConfig( '{align_assignment: true,' 'new_alignment_after_commentline = true}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 # comment val_third = 3 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 # comment val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3481,8 +3216,7 @@ def testAlignAssignDefLineInbetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 def fun(): @@ -3490,8 +3224,7 @@ def fun(): abc = '' val_third = 3 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 @@ -3504,8 +3237,8 @@ def fun(): val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3513,8 +3246,7 @@ def testAlignAssignObjectWithNewLineInbetween(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 object = { @@ -3524,8 +3256,7 @@ def testAlignAssignObjectWithNewLineInbetween(self): } val_third = 3 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ val_first = 1 val_second += 2 object = { @@ -3536,8 +3267,8 @@ def testAlignAssignObjectWithNewLineInbetween(self): val_third = 3 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) @@ -3545,13 +3276,13 @@ def testAlignAssignWithOnlyOneAssignmentLine(self): try: style.SetGlobalStyle( style.CreateStyleFromConfig('{align_assignment: true}')) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ val_first = 1 """) expected_formatted_code = unformatted_code - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreateYapfStyle()) diff --git a/yapftests/reformatter_buganizer_test.py b/yapftests/reformatter_buganizer_test.py index d8beb04cb..a4089ad03 100644 --- a/yapftests/reformatter_buganizer_test.py +++ b/yapftests/reformatter_buganizer_test.py @@ -29,7 +29,7 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateYapfStyle()) def testB137580392(self): - code = """\ + code = """\ def _create_testing_simulator_and_sink( ) -> Tuple[_batch_simulator:_batch_simulator.BatchSimulator, _batch_simulator.SimulationSink]: @@ -39,7 +39,7 @@ def _create_testing_simulator_and_sink( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB73279849(self): - unformatted_code = """\ + unformatted_code = """\ class A: def _(a): return 'hello' [ a ] @@ -49,11 +49,11 @@ class A: def _(a): return 'hello'[a] """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB122455211(self): - unformatted_code = """\ + unformatted_code = """\ _zzzzzzzzzzzzzzzzzzzz = Union[sssssssssssssssssssss.pppppppppppppppp, sssssssssssssssssssss.pppppppppppppppppppppppppppp] """ @@ -62,11 +62,11 @@ def testB122455211(self): sssssssssssssssssssss.pppppppppppppppp, sssssssssssssssssssss.pppppppppppppppppppppppppppp] """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB119300344(self): - code = """\ + code = """\ def _GenerateStatsEntries( process_id: Text, timestamp: Optional[rdfvalue.RDFDatetime] = None @@ -77,7 +77,7 @@ def _GenerateStatsEntries( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB132886019(self): - code = """\ + code = """\ X = { 'some_dict_key': frozenset([ @@ -90,7 +90,7 @@ def testB132886019(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB26521719(self): - code = """\ + code = """\ class _(): def _(self): @@ -101,7 +101,7 @@ def _(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB122541552(self): - code = """\ + code = """\ # pylint: disable=g-explicit-bool-comparison,singleton-comparison _QUERY = account.Account.query(account.Account.enabled == True) # pylint: enable=g-explicit-bool-comparison,singleton-comparison @@ -114,7 +114,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB124415889(self): - code = """\ + code = """\ class _(): def run_queue_scanners(): @@ -137,7 +137,7 @@ def modules_to_install(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB73166511(self): - code = """\ + code = """\ def _(): if min_std is not None: groundtruth_age_variances = tf.maximum(groundtruth_age_variances, @@ -147,7 +147,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB118624921(self): - code = """\ + code = """\ def _(): function_call( alert_name='xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', @@ -175,7 +175,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB120047670(self): - unformatted_code = """\ + unformatted_code = """\ X = { 'NO_PING_COMPONENTS': [ 79775, # Releases / FOO API @@ -195,7 +195,7 @@ def testB120047670(self): 'PING_BLOCKED_BUGS': False, } """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB120245013(self): @@ -213,11 +213,11 @@ def testNoAlertForShortPeriod(self, rutabaga): self._fillInOtherFields(streamz_path, {streamz_field_of_interest: True} )] = series.Counter('1s', '+ 500x10000') """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB117841880(self): - code = """\ + code = """\ def xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx( aaaaaaaaaaaaaaaaaaa: AnyStr, bbbbbbbbbbbb: Optional[Sequence[AnyStr]] = None, @@ -244,11 +244,11 @@ def testB111764402(self): for external_id in external_ids })) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB116825060(self): - code = """\ + code = """\ result_df = pd.DataFrame({LEARNED_CTR_COLUMN: learned_ctr}, index=df_metrics.index) """ @@ -256,7 +256,7 @@ def testB116825060(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB112711217(self): - code = """\ + code = """\ def _(): stats['moderated'] = ~stats.moderation_reason.isin( approved_moderation_reasons) @@ -265,7 +265,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB112867548(self): - unformatted_code = """\ + unformatted_code = """\ def _(): return flask.make_response( 'Records: {}, Problems: {}, More: {}'.format( @@ -283,7 +283,7 @@ def _(): httplib.ACCEPTED if process_result.has_more else httplib.OK, {'content-type': _TEXT_CONTEXT_TYPE}) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB112651423(self): @@ -302,11 +302,11 @@ def potato(feeditems, browse_use_case=None): 'FEEDS_LOAD_PLAYLIST_VIDEOS_FOR_ALL_ITEMS'] and item.video: continue """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB80484938(self): - code = """\ + code = """\ for sssssss, aaaaaaaaaa in [ ('ssssssssssssssssssss', 'sssssssssssssssssssssssss'), ('nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn', @@ -349,7 +349,7 @@ def testB80484938(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB120771563(self): - code = """\ + code = """\ class A: def b(): @@ -376,7 +376,7 @@ def b(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB79462249(self): - code = """\ + code = """\ foo.bar(baz, [ quux(thud=42), norf, @@ -410,7 +410,7 @@ def _(): eeeeeeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffffffffffffffffffffff .ggggggggggggggggggggggggggggggggg.hhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhhh()) """ # noqa - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB77923341(self): @@ -424,7 +424,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB77329955(self): - code = """\ + code = """\ class _(): @parameterized.named_parameters( @@ -442,7 +442,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB65197969(self): - unformatted_code = """\ + unformatted_code = """\ class _(): def _(): @@ -457,11 +457,11 @@ def _(): seconds=max(float(time_scale), small_interval) * 1.41**min(num_attempts, 9)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB65546221(self): - unformatted_code = """\ + unformatted_code = """\ SUPPORTED_PLATFORMS = ( "centos-6", "centos-7", @@ -484,7 +484,7 @@ def testB65546221(self): "debian-9-stretch", ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30500455(self): @@ -501,11 +501,11 @@ def testB30500455(self): [(name, 'function#' + name) for name in INITIAL_FUNCTIONS] + [(name, 'const#' + name) for name in INITIAL_CONSTS]) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB38343525(self): - code = """\ + code = """\ # This does foo. @arg.String('some_path_to_a_file', required=True) # This does bar. @@ -534,7 +534,7 @@ def testB37099651(self): # pylint: enable=g-long-lambda ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33228502(self): @@ -572,11 +572,11 @@ def _(): | m.Join('successes', 'total') | m.Point(m.VAL['successes'] / m.VAL['total'])))) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30394228(self): - code = """\ + code = """\ class _(): def _(self): @@ -589,7 +589,7 @@ def _(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB65246454(self): - unformatted_code = """\ + unformatted_code = """\ class _(): def _(self): @@ -605,11 +605,11 @@ def _(self): self.assertEqual({i.id for i in successful_instances}, {i.id for i in self._statuses.successful_instances}) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67935450(self): - unformatted_code = """\ + unformatted_code = """\ def _(): return ( (Gauge( @@ -646,11 +646,11 @@ def _(): m.Cond(m.VAL['start'] != 0, m.VAL['start'], m.TimestampMicros() / 1000000L))) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB66011084(self): - unformatted_code = """\ + unformatted_code = """\ X = { "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": # Comment 1. ([] if True else [ # Comment 2. @@ -678,7 +678,7 @@ def testB66011084(self): ]), } """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67455376(self): @@ -689,11 +689,11 @@ def testB67455376(self): sponge_ids.extend(invocation.id() for invocation in self._client.GetInvocationsByLabels(labels)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB35210351(self): - unformatted_code = """\ + unformatted_code = """\ def _(): config.AnotherRuleThing( 'the_title_to_the_thing_here', @@ -719,11 +719,11 @@ def _(): GetTheAlertToIt('the_title_to_the_thing_here'), GetNotificationTemplate('your_email_here'))) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB34774905(self): - unformatted_code = """\ + unformatted_code = """\ x=[VarExprType(ir_name=IrName( value='x', expr_type=UnresolvedAttrExprType( atom=UnknownExprType(), attr_name=IrName( value='x', expr_type=UnknownExprType(), usage='UNKNOWN', fqn=None, @@ -748,11 +748,11 @@ def testB34774905(self): astn=None)) ] """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB65176185(self): - code = """\ + code = """\ xx = zip(*[(a, b) for (a, b, c) in yy]) """ llines = yapf_test_helper.ParseAndUnwrap(code) @@ -776,11 +776,11 @@ def _(): | o.Window(m.Align('5m')) | p.GroupBy(['borg_user', 'borg_job', 'borg_cell'], q.Mean())) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB32167774(self): - unformatted_code = """\ + unformatted_code = """\ X = ( 'is_official', 'is_cover', @@ -803,7 +803,7 @@ def testB32167774(self): 'is_compilation', ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB66912275(self): @@ -827,11 +827,11 @@ def _(): 'fingerprint': base64.urlsafe_b64encode('invalid_fingerprint') }).execute() """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67312284(self): - code = """\ + code = """\ def _(): self.assertEqual( [u'to be published 2', u'to be published 1', u'to be published 0'], @@ -850,12 +850,11 @@ def testB65241516(self): TrainTraceDir(unit_key, "*", "*"), embedding_model.CHECKPOINT_FILENAME + "-*")) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB37460004(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ assert all(s not in (_SENTINEL, None) for s in nested_schemas ), 'Nested schemas should never contain None/_SENTINEL' """) @@ -863,7 +862,7 @@ def testB37460004(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB36806207(self): - code = """\ + code = """\ def _(): linearity_data = [[row] for row in [ "%.1f mm" % (np.mean(linearity_values["pos_error"]) * 1000.0), @@ -882,8 +881,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB36215507(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class X(): def _(): @@ -897,8 +895,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB35212469(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): X = { 'retain': { @@ -907,8 +904,7 @@ def _(): } } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): X = { 'retain': { @@ -921,14 +917,12 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB31063453(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): while ((not mpede_proc) or ((time_time() - last_modified) < FLAGS_boot_idle_timeout)): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): while ((not mpede_proc) or ((time_time() - last_modified) < FLAGS_boot_idle_timeout)): @@ -938,8 +932,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB35021894(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): labelacl = Env(qa={ 'read': 'name/some-type-of-very-long-name-for-reading-perms', @@ -950,8 +943,7 @@ def _(): 'modify': 'name/some-other-type-of-very-long-name-for-modifying' }) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): labelacl = Env( qa={ @@ -967,12 +959,10 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB34682902(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ logging.info("Mean angular velocity norm: %.3f", np.linalg.norm(np.mean(ang_vel_arr, axis=0))) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ logging.info("Mean angular velocity norm: %.3f", np.linalg.norm(np.mean(ang_vel_arr, axis=0))) """) @@ -980,15 +970,13 @@ def testB34682902(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33842726(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class _(): def _(): hints.append(('hg tag -f -l -r %s %s # %s' % (short(ctx.node( )), candidatetag, firstline))[:78]) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class _(): def _(): hints.append(('hg tag -f -l -r %s %s # %s' % @@ -998,8 +986,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB32931780(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ environments = { 'prod': { # this is a comment before the first entry. @@ -1030,8 +1017,7 @@ def testB32931780(self): } } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ environments = { 'prod': { # this is a comment before the first entry. @@ -1062,8 +1048,7 @@ def testB32931780(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33047408(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def _(): for sort in (sorts or []): request['sorts'].append({ @@ -1077,8 +1062,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB32714745(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _BlankDefinition(): @@ -1108,16 +1092,14 @@ def _BlankDefinition(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB32737279(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ here_is_a_dict = { 'key': # Comment. 'value' } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ here_is_a_dict = { 'key': # Comment. 'value' @@ -1127,8 +1109,7 @@ def testB32737279(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB32570937(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def _(): if (job_message.ball not in ('*', ball) or job_message.call not in ('*', call) or @@ -1139,8 +1120,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB31937033(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def __init__(self, metric, fields_cb=None): @@ -1150,7 +1130,7 @@ def __init__(self, metric, fields_cb=None): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB31911533(self): - code = """\ + code = """\ class _(): @parameterized.NamedParameters( @@ -1166,8 +1146,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB31847238(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class _(): def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unused-argument @@ -1176,8 +1155,7 @@ def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unuse def xxxxx(self, yyyyy, zzzzzzzzzzzzzz=None): # A normal comment that runs over the column limit. return 1 """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class _(): def aaaaa(self, bbbbb, cccccccccccccc=None): # TODO(who): pylint: disable=unused-argument @@ -1193,13 +1171,11 @@ def xxxxx( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30760569(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ {'1234567890123456789012345678901234567890123456789012345678901234567890': '1234567890123456789012345678901234567890'} """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ { '1234567890123456789012345678901234567890123456789012345678901234567890': '1234567890123456789012345678901234567890' @@ -1209,15 +1185,13 @@ def testB30760569(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB26034238(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Thing: def Function(self): thing.Scrape('/aaaaaaaaa/bbbbbbbbbb/ccccc/dddd/eeeeeeeeeeeeee/ffffffffffffff').AndReturn(42) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Thing: def Function(self): @@ -1229,8 +1203,7 @@ def Function(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30536435(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def main(unused_argv): if True: if True: @@ -1239,8 +1212,7 @@ def main(unused_argv): ccccccccc.within, imports.ddddddddddddddddddd(name_item.ffffffffffffffff))) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def main(unused_argv): if True: if True: @@ -1252,14 +1224,12 @@ def main(unused_argv): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30442148(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def lulz(): return (some_long_module_name.SomeLongClassName. some_long_attribute_name.some_long_method_name()) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def lulz(): return (some_long_module_name.SomeLongClassName.some_long_attribute_name .some_long_method_name()) @@ -1268,8 +1238,7 @@ def lulz(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB26868213(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): xxxxxxxxxxxxxxxxxxx = { 'ssssss': {'ddddd': 'qqqqq', @@ -1284,8 +1253,7 @@ def _(): } } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): xxxxxxxxxxxxxxxxxxx = { 'ssssss': { @@ -1306,8 +1274,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30173198(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _(): @@ -1318,8 +1285,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB29908765(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def __repr__(self): @@ -1330,8 +1296,7 @@ def __repr__(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB30087362(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def _(): for s in sorted(env['foo']): bar() @@ -1344,8 +1309,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB30087363(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if False: bar() # This is a comment @@ -1357,14 +1321,12 @@ def testB30087363(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB29093579(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): _xxxxxxxxxxxxxxx(aaaaaaaa, bbbbbbbbbbbbbb.cccccccccc[ dddddddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffff]) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): _xxxxxxxxxxxxxxx( aaaaaaaa, @@ -1375,8 +1337,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB26382315(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ @hello_world # This is a first comment @@ -1388,8 +1349,7 @@ def foo(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB27616132(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: query.fetch_page.assert_has_calls([ mock.call(100, @@ -1400,8 +1360,7 @@ def testB27616132(self): start_cursor=cursor_2), ]) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: query.fetch_page.assert_has_calls([ mock.call(100, start_cursor=None), @@ -1413,8 +1372,7 @@ def testB27616132(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB27590179(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: if True: self.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ( @@ -1424,8 +1382,7 @@ def testB27590179(self): self.bbb.cccccccccc(ddddddddddddddddddddddd.eeeeeeeeeeeeeeeeeeeeee) }) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: if True: self.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ({ @@ -1439,13 +1396,11 @@ def testB27590179(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB27266946(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = (self.bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccccccccc) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa = ( self.bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb @@ -1455,8 +1410,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB25505359(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ _EXAMPLE = { 'aaaaaaaaaaaaaa': [{ 'bbbb': 'cccccccccccccccccccccc', @@ -1471,8 +1425,7 @@ def testB25505359(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25324261(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ aaaaaaaaa = set(bbbb.cccc for ddd in eeeeee.fffffffffff.gggggggggggggggg for cccc in ddd.specification) @@ -1481,8 +1434,7 @@ def testB25324261(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25136704(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class f: def test(self): @@ -1494,8 +1446,7 @@ def test(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25165602(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): ids = {u: i for u, i in zip(self.aaaaa, xrange(42, 42 + len(self.aaaaaa)))} """) # noqa @@ -1503,8 +1454,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25157123(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def ListArgs(): FairlyLongMethodName([relatively_long_identifier_for_a_list], another_argument_with_a_long_identifier) @@ -1513,8 +1463,7 @@ def ListArgs(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB25136820(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): return collections.OrderedDict({ # Preceding comment. @@ -1522,8 +1471,7 @@ def foo(): '$bbbbbbbbbbbbbbbbbbbbbbbb', }) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): return collections.OrderedDict({ # Preceding comment. @@ -1535,15 +1483,13 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB25131481(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ APPARENT_ACTIONS = ('command_type', { 'materialize': lambda x: some_type_of_function('materialize ' + x.command_def), '#': lambda x: x # do nothing }) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ APPARENT_ACTIONS = ( 'command_type', { @@ -1557,8 +1503,7 @@ def testB25131481(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23445244(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): if True: return xxxxxxxxxxxxxxxx( @@ -1569,8 +1514,7 @@ def foo(): FLAGS.aaaaaaaaaaaaaa + FLAGS.bbbbbbbbbbbbbbbbbbb, }) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): if True: return xxxxxxxxxxxxxxxx( @@ -1586,8 +1530,7 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20559654(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class A(object): def foo(self): @@ -1595,8 +1538,7 @@ def foo(self): ['AA BBBB CCC DDD EEEEEEEE X YY ZZZZ FFF EEE AAAAAAAA'], aaaaaaaaaaa=True, bbbbbbbb=None) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class A(object): def foo(self): @@ -1609,8 +1551,7 @@ def foo(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23943842(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class F(): def f(): self.assertDictEqual( @@ -1624,8 +1565,7 @@ def f(): 'lines': 'l8'} }) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class F(): def f(): @@ -1649,14 +1589,12 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20551180(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): if True: return (struct.pack('aaaa', bbbbbbbbbb, ccccccccccccccc, dddddddd) + eeeeeee) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): if True: return (struct.pack('aaaa', bbbbbbbbbb, ccccccccccccccc, dddddddd) + @@ -1666,14 +1604,12 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23944849(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class A(object): def xxxxxxxxx(self, aaaaaaa, bbbbbbb=ccccccccccc, dddddd=300, eeeeeeeeeeeeee=None, fffffffffffffff=0): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class A(object): def xxxxxxxxx(self, @@ -1688,14 +1624,12 @@ def xxxxxxxxx(self, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB23935890(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class F(): def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, eeeeeeeeeeeeeee): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class F(): def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, @@ -1706,8 +1640,7 @@ def functioni(self, aaaaaaa, bbbbbbb, cccccc, dddddddddddddd, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB28414371(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def _(): return ((m.fffff( m.rrr('mmmmmmmmmmmmmmmm', 'ssssssssssssssssssssssssss'), ffffffffffffffff) @@ -1732,8 +1665,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20127686(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if True: return ((m.fffff( @@ -1751,13 +1683,11 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20016122(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ from a_very_long_or_indented_module_name_yada_yada import (long_argument_1, long_argument_2) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ from a_very_long_or_indented_module_name_yada_yada import ( long_argument_1, long_argument_2) """) @@ -1768,13 +1698,12 @@ def testB20016122(self): '{based_on_style: pep8, split_penalty_import_names: 350}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class foo(): def __eq__(self, other): @@ -1794,9 +1723,8 @@ def __eq__(self, other): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: yapf, ' - 'split_before_logical_operator: True}')) + style.CreateStyleFromConfig('{based_on_style: yapf, ' + 'split_before_logical_operator: True}')) llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) @@ -1804,14 +1732,12 @@ def __eq__(self, other): style.SetGlobalStyle(style.CreateYapfStyle()) def testB22527411(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): if True: aaaaaa.bbbbbbbbbbbbbbbbbbbb[-1].cccccccccccccc.ddd().eeeeeeee(ffffffffffffff) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): if True: aaaaaa.bbbbbbbbbbbbbbbbbbbb[-1].cccccccccccccc.ddd().eeeeeeee( @@ -1821,8 +1747,7 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20849933(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def main(unused_argv): if True: aaaaaaaa = { @@ -1830,8 +1755,7 @@ def main(unused_argv): (eeeeee.FFFFFFFFFFFFFFFFFF), } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def main(unused_argv): if True: aaaaaaaa = { @@ -1843,8 +1767,7 @@ def main(unused_argv): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB20813997(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def myfunc_1(): myarray = numpy.zeros((2, 2, 2)) print(myarray[:, 1, :]) @@ -1853,8 +1776,7 @@ def myfunc_1(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20605036(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ foo = { 'aaaa': { # A comment for no particular reason. @@ -1868,8 +1790,7 @@ def testB20605036(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20562732(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ foo = [ # Comment about first list item 'First item', @@ -1881,8 +1802,7 @@ def testB20562732(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20128830(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a = { 'xxxxxxxxxxxxxxxxxxxx': { 'aaaa': @@ -1902,8 +1822,7 @@ def testB20128830(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB20073838(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class DummyModel(object): def do_nothing(self, class_1_count): @@ -1920,8 +1839,7 @@ def do_nothing(self, class_1_count): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19626808(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if True: aaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbb( 'ccccccccccc', ddddddddd='eeeee').fffffffff([ggggggggggggggggggggg]) @@ -1930,8 +1848,7 @@ def testB19626808(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19547210(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ while True: if True: if True: @@ -1945,8 +1862,7 @@ def testB19547210(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19377034(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if (aaaaaaaaaaaaaaa.start >= aaaaaaaaaaaaaaa.end or bbbbbbbbbbbbbbb.start >= bbbbbbbbbbbbbbb.end): @@ -1956,8 +1872,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19372573(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def f(): if a: return 42 while True: @@ -1975,8 +1890,7 @@ def f(): style.SetGlobalStyle(style.CreateYapfStyle()) def testB19353268(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ a = {1, 2, 3}[x] b = {'foo': 42, 'bar': 37}['foo'] """) @@ -1984,8 +1898,7 @@ def testB19353268(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19287512(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Foo(object): def bar(self): @@ -1995,8 +1908,7 @@ def bar(self): .Mmmmmmmmmmmmmmmmmm(-1, 'permission error'))): self.assertRaises(nnnnnnnnnnnnnnnn.ooooo, ppppp.qqqqqqqqqqqqqqqqq) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Foo(object): def bar(self): @@ -2011,8 +1923,7 @@ def bar(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB19194420(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ method.Set( 'long argument goes here that causes the line to break', lambda arg2=0.5: arg2) @@ -2021,7 +1932,7 @@ def testB19194420(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB19073499(self): - code = """\ + code = """\ instance = ( aaaaaaa.bbbbbbb().ccccccccccccccccc().ddddddddddd({ 'aa': 'context!' @@ -2033,8 +1944,7 @@ def testB19073499(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18257115(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if True: if True: self._Test(aaaa, bbbbbbb.cccccccccc, dddddddd, eeeeeeeeeee, @@ -2044,8 +1954,7 @@ def testB18257115(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18256666(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class Foo(object): def Bar(self): @@ -2063,8 +1972,7 @@ def Bar(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18256826(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if True: pass # A multiline comment. @@ -2083,8 +1991,7 @@ def testB18256826(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB18255697(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ AAAAAAAAAAAAAAA = { 'XXXXXXXXXXXXXX': 4242, # Inline comment # Next comment @@ -2095,14 +2002,12 @@ def testB18255697(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB17534869(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: self.assertLess(abs(time.time()-aaaa.bbbbbbbbbbb( datetime.datetime.now())), 1) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: self.assertLess( abs(time.time() - aaaa.bbbbbbbbbbb(datetime.datetime.now())), 1) @@ -2111,16 +2016,14 @@ def testB17534869(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB17489866(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): if True: if True: return aaaa.bbbbbbbbb(ccccccc=dddddddddddddd({('eeee', \ 'ffffffff'): str(j)})) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): if True: if True: @@ -2131,8 +2034,7 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB17133019(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class aaaaaaaaaaaaaa(object): def bbbbbbbbbb(self): @@ -2143,8 +2045,7 @@ def bbbbbbbbbb(self): ), "rb") as gggggggggggggggggggg: print(gggggggggggggggggggg) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class aaaaaaaaaaaaaa(object): def bbbbbbbbbb(self): @@ -2158,8 +2059,7 @@ def bbbbbbbbbb(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB17011869(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ '''blah......''' class SomeClass(object): @@ -2170,8 +2070,7 @@ class SomeClass(object): 'DDDDDDDD': 0.4811 } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ '''blah......''' @@ -2187,16 +2086,14 @@ class SomeClass(object): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB16783631(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: with aaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccc(ddddddddddddd, eeeeeeeee=self.fffffffffffff )as gggg: pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: with aaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccc( ddddddddddddd, eeeeeeeee=self.fffffffffffff) as gggg: @@ -2206,14 +2103,12 @@ def testB16783631(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB16572361(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(self): def bar(my_dict_name): self.my_dict_name['foo-bar-baz-biz-boo-baa-baa'].IncrementBy.assert_called_once_with('foo_bar_baz_boo') """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(self): def bar(my_dict_name): @@ -2225,15 +2120,13 @@ def bar(my_dict_name): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15884241(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if 1: if 1: for row in AAAA: self.create(aaaaaaaa="/aaa/bbbb/cccc/dddddd/eeeeeeeeeeeeeeeeeeeeeeeeee/%s" % row [0].replace(".foo", ".bar"), aaaaa=bbb[1], ccccc=bbb[2], dddd=bbb[3], eeeeeeeeeee=[s.strip() for s in bbb[4].split(",")], ffffffff=[s.strip() for s in bbb[5].split(",")], gggggg=bbb[6]) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if 1: if 1: for row in AAAA: @@ -2251,8 +2144,7 @@ def testB15884241(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15697268(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def main(unused_argv): ARBITRARY_CONSTANT_A = 10 an_array_with_an_exceedingly_long_name = range(ARBITRARY_CONSTANT_A + 1) @@ -2261,8 +2153,7 @@ def main(unused_argv): a_long_name_slicing = an_array_with_an_exceedingly_long_name[:ARBITRARY_CONSTANT_A] bad_slice = ("I am a crazy, no good, string what's too long, etc." + " no really ")[:ARBITRARY_CONSTANT_A] """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def main(unused_argv): ARBITRARY_CONSTANT_A = 10 an_array_with_an_exceedingly_long_name = range(ARBITRARY_CONSTANT_A + 1) @@ -2292,16 +2183,14 @@ def testB15597568(self): (", and the process timed out." if did_time_out else ".")) % errorcode) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15542157(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaaaa = bbbb.ccccccccccccccc(dddddd.eeeeeeeeeeeeee, ffffffffffffffffff, gggggg.hhhhhhhhhhhhhhhhh) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaaaa = bbbb.ccccccccccccccc(dddddd.eeeeeeeeeeeeee, ffffffffffffffffff, gggggg.hhhhhhhhhhhhhhhhh) """) # noqa @@ -2309,8 +2198,7 @@ def testB15542157(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15438132(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if aaaaaaa.bbbbbbbbbb: cccccc.dddddddddd(eeeeeeeeeee=fffffffffffff.gggggggggggggggggg) if hhhhhh.iiiii.jjjjjjjjjjjjj: @@ -2326,8 +2214,7 @@ def testB15438132(self): lllll.mm), nnnnnnnnnn=ooooooo.pppppppppp) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if aaaaaaa.bbbbbbbbbb: cccccc.dddddddddd(eeeeeeeeeee=fffffffffffff.gggggggggggggggggg) if hhhhhh.iiiii.jjjjjjjjjjjjj: @@ -2346,7 +2233,7 @@ def testB15438132(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB14468247(self): - unformatted_code = """\ + unformatted_code = """\ call(a=1, b=2, ) @@ -2357,17 +2244,15 @@ def testB14468247(self): b=2, ) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB14406499(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo1(parameter_1, parameter_2, parameter_3, parameter_4, \ parameter_5, parameter_6): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo1(parameter_1, parameter_2, parameter_3, parameter_4, parameter_5, parameter_6): pass @@ -2376,21 +2261,18 @@ def foo1(parameter_1, parameter_2, parameter_3, parameter_4, parameter_5, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB13900309(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ self.aaaaaaaaaaa( # A comment in the middle of it all. 948.0/3600, self.bbb.ccccccccccccccccccccc(dddddddddddddddd.eeee, True)) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ self.aaaaaaaaaaa( # A comment in the middle of it all. 948.0 / 3600, self.bbb.ccccccccccccccccccccc(dddddddddddddddd.eeee, True)) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ aaaaaaaaaa.bbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccccc( DC_1, (CL - 50, CL), AAAAAAAA, BBBBBBBBBBBBBBBB, 98.0, CCCCCCC).ddddddddd( # Look! A comment is here. @@ -2399,49 +2281,41 @@ def testB13900309(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc().dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc( ).dddddddddddddddddddddddddd(1, 2, 3, 4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc(x).dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa.bbbbbbbbbbbbb.ccccccccccccccccccccccccc( x).dddddddddddddddddddddddddd(1, 2, 3, 4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa(xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx).dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa( xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx).dddddddddddddddddddddddddd(1, 2, 3, 4) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa().bbbbbbbbbbbbbbbbbbbbbbbb().ccccccccccccccccccc().\ dddddddddddddddddd().eeeeeeeeeeeeeeeeeeeee().fffffffffffffffff().gggggggggggggggggg() """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ aaaaaaaaaaaaaaaaaaaaaaaa().bbbbbbbbbbbbbbbbbbbbbbbb().ccccccccccccccccccc( ).dddddddddddddddddd().eeeeeeeeeeeeeeeeeeeee().fffffffffffffffff( ).gggggggggggggggggg() @@ -2450,8 +2324,7 @@ def testB13900309(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67935687(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ Fetch( Raw('monarch.BorgTask', '/union/row_operator_action_delay'), {'borg_user': self.borg_user}) @@ -2459,15 +2332,13 @@ def testB67935687(self): llines = yapf_test_helper.ParseAndUnwrap(code) self.assertCodeEqual(code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ shelf_renderer.expand_text = text.translate_to_unicode( expand_text % { 'creator': creator }) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ shelf_renderer.expand_text = text.translate_to_unicode(expand_text % {'creator': creator}) """) # noqa diff --git a/yapftests/reformatter_facebook_test.py b/yapftests/reformatter_facebook_test.py index 14b07d06b..780b42440 100644 --- a/yapftests/reformatter_facebook_test.py +++ b/yapftests/reformatter_facebook_test.py @@ -29,14 +29,12 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreateFacebookStyle()) def testNoNeedForLineBreaks(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def overly_long_function_name( just_one_arg, **kwargs): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def overly_long_function_name(just_one_arg, **kwargs): pass """) @@ -44,15 +42,13 @@ def overly_long_function_name(just_one_arg, **kwargs): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentClosingBracket(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long ): @@ -62,14 +58,12 @@ def overly_long_function_name( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBreakAfterOpeningBracketIfContentsTooBig(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def overly_long_function_name(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def overly_long_function_name( a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, \ v, w, x, y, z @@ -80,8 +74,7 @@ def overly_long_function_name( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentClosingBracketWithComments(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def overly_long_function_name( # comment about the first argument first_argument_with_a_very_long_name_or_so, @@ -89,8 +82,7 @@ def overly_long_function_name( second_argument_makes_the_line_too_long): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def overly_long_function_name( # comment about the first argument first_argument_with_a_very_long_name_or_so, @@ -103,8 +95,7 @@ def overly_long_function_name( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentImportAsNames(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ from module import ( internal_function as function, SOME_CONSTANT_NUMBER1, @@ -116,8 +107,7 @@ def testDedentImportAsNames(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentTestListGexp(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ try: pass except ( @@ -132,8 +122,7 @@ def testDedentTestListGexp(self): ) as exception: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ try: pass except ( @@ -157,15 +146,13 @@ def testDedentTestListGexp(self): def testBrokenIdempotency(self): # TODO(ambv): The following behaviour should be fixed. - pass0_code = textwrap.dedent( - """\ + pass0_code = textwrap.dedent("""\ try: pass except (IOError, OSError, LookupError, RuntimeError, OverflowError) as exception: pass """) # noqa - pass1_code = textwrap.dedent( - """\ + pass1_code = textwrap.dedent("""\ try: pass except ( @@ -176,8 +163,7 @@ def testBrokenIdempotency(self): llines = yapf_test_helper.ParseAndUnwrap(pass0_code) self.assertCodeEqual(pass1_code, reformatter.Reformat(llines)) - pass2_code = textwrap.dedent( - """\ + pass2_code = textwrap.dedent("""\ try: pass except ( @@ -189,8 +175,7 @@ def testBrokenIdempotency(self): self.assertCodeEqual(pass2_code, reformatter.Reformat(llines)) def testIfExprHangingIndent(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: if True: if True: @@ -199,8 +184,7 @@ def testIfExprHangingIndent(self): self.foobars.counters['db.marshmellow_skins'] != 1): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: if True: if True: @@ -214,13 +198,11 @@ def testIfExprHangingIndent(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSimpleDedenting(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: self.assertEqual(result.reason_not_added, "current preflight is still running") """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: self.assertEqual( result.reason_not_added, "current preflight is still running" @@ -230,8 +212,7 @@ def testSimpleDedenting(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentingWithSubscripts(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Foo: class Bar: @classmethod @@ -240,8 +221,7 @@ def baz(cls, clues_list, effect, constraints, constraint_manager): return cls.single_constraint_not(clues_lists, effect, constraints[0], constraint_manager) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Foo: class Bar: @classmethod @@ -255,8 +235,7 @@ def baz(cls, clues_list, effect, constraints, constraint_manager): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testDedentingCallsWithInnerLists(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _(): cls.effect_clues = { @@ -267,8 +246,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentingListComprehension(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Foo(): def _pack_results_for_constraint_or(): self.param_groups = dict( @@ -306,8 +284,7 @@ def _pack_results_for_constraint_or(): ('localhost', os.path.join(path, 'node_2.log'), super_parser) ] """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Foo(): def _pack_results_for_constraint_or(): self.param_groups = dict( @@ -347,8 +324,7 @@ def _pack_results_for_constraint_or(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMustSplitDedenting(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _(): effect_line = FrontInput( @@ -360,8 +336,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentIfConditional(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _(): if True: @@ -375,8 +350,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentSet(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class _(): def _(): assert set(self.constraint_links.get_links()) == set( @@ -392,8 +366,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testDedentingInnerScope(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class Foo(): @classmethod def _pack_results_for_constraint_or(cls, combination, constraints): @@ -402,17 +375,16 @@ def _pack_results_for_constraint_or(cls, combination, constraints): constraints, InvestigationResult.OR ) """) # noqa - llines = yapf_test_helper.ParseAndUnwrap(code) + llines = yapf_test_helper.ParseAndUnwrap(code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(code, reformatted_code) def testCommentWithNewlinesInPrefix(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): if 0: return False @@ -425,8 +397,7 @@ def foo(): print(foo()) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): if 0: return False @@ -453,7 +424,7 @@ def testIfStmtClosingBracket(self): ): return False """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) diff --git a/yapftests/reformatter_pep8_test.py b/yapftests/reformatter_pep8_test.py index 19c294d18..67ddadc23 100644 --- a/yapftests/reformatter_pep8_test.py +++ b/yapftests/reformatter_pep8_test.py @@ -30,13 +30,11 @@ def setUpClass(cls): # pylint: disable=g-missing-super-call style.SetGlobalStyle(style.CreatePEP8Style()) def testIndent4(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if a+b: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if a + b: pass """) @@ -44,8 +42,7 @@ def testIndent4(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleLineIfStatements(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ if True: a = 42 elif False: b = 42 else: c = 42 @@ -54,14 +51,12 @@ def testSingleLineIfStatements(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testBlankBetweenClassAndDef(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class Foo: def joe(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class Foo: def joe(): @@ -71,8 +66,7 @@ def joe(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testBlankBetweenDefsInClass(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ class TestClass: def __init__(self): self.running = False @@ -81,8 +75,7 @@ def run(self): def is_running(self): return self.running ''') - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ class TestClass: def __init__(self): @@ -98,13 +91,11 @@ def is_running(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSingleWhiteBeforeTrailingComment(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if a+b: # comment pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if a + b: # comment pass """) @@ -112,22 +103,19 @@ def testSingleWhiteBeforeTrailingComment(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSpaceBetweenEndingCommandAndClosingBracket(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = ( 1, ) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = (1, ) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testContinuedNonOutdentedLine(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class eld(d): if str(geom.geom_type).upper( ) != self.geom_type and not self.geom_type == 'GEOMETRY': @@ -137,8 +125,7 @@ class eld(d): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testWrappingPercentExpressions(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): if True: zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxx.yyy + 1) @@ -146,8 +133,7 @@ def f(): zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxxxxxx + 1) zzzzz = '%s-%s'.ww(xxxxxxxxxxxxxxxxxxxxxxx + 1, xxxxxxxxxxxxxxxxxxxxx + 1) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): if True: zzzzz = '%s-%s' % (xxxxxxxxxxxxxxxxxxxxxxxxxx + 1, @@ -163,14 +149,12 @@ def f(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testAlignClosingBracketWithVisualIndentation(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ TEST_LIST = ('foo', 'bar', # first comment 'baz' # second comment ) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ TEST_LIST = ( 'foo', 'bar', # first comment @@ -180,8 +164,7 @@ def testAlignClosingBracketWithVisualIndentation(self): llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def f(): def g(): @@ -190,8 +173,7 @@ def g(): ): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def f(): def g(): @@ -204,13 +186,11 @@ def g(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testIndentSizeChanging(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: runtime_mins = (program_end_time - program_start_time).total_seconds() / 60.0 """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: runtime_mins = (program_end_time - program_start_time).total_seconds() / 60.0 @@ -219,8 +199,7 @@ def testIndentSizeChanging(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testHangingIndentCollision(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if (aaaaaaaaaaaaaa + bbbbbbbbbbbbbbbb == ccccccccccccccccc and xxxxxxxxxxxxx or yyyyyyyyyyyyyyyyy): pass elif (xxxxxxxxxxxxxxx(aaaaaaaaaaa, bbbbbbbbbbbbbb, cccccccccccc, dddddddddd=None)): @@ -234,8 +213,7 @@ def h(): for connection in itertools.chain(branch.contact, branch.address, morestuff.andmore.andmore.andmore.andmore.andmore.andmore.andmore): dosomething(connection) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if (aaaaaaaaaaaaaa + bbbbbbbbbbbbbbbb == ccccccccccccccccc and xxxxxxxxxxxxx or yyyyyyyyyyyyyyyyy): pass @@ -264,8 +242,7 @@ def testSplittingBeforeLogicalOperator(self): style.SetGlobalStyle( style.CreateStyleFromConfig( '{based_on_style: pep8, split_before_logical_operator: True}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): return bool(update.message.new_chat_member or update.message.left_chat_member or update.message.new_chat_title or update.message.new_chat_photo or @@ -274,8 +251,7 @@ def foo(): or update.message.migrate_to_chat_id or update.message.migrate_from_chat_id or update.message.pinned_message) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): return bool( update.message.new_chat_member or update.message.left_chat_member @@ -289,20 +265,18 @@ def foo(): or update.message.pinned_message) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testContiguousListEndingWithComment(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: if True: keys.append(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) # may be unassigned. """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: if True: keys.append( @@ -316,13 +290,11 @@ def testSplittingBeforeFirstArgument(self): style.SetGlobalStyle( style.CreateStyleFromConfig( '{based_on_style: pep8, split_before_first_argument: True}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a_very_long_function_name(long_argument_name_1=1, long_argument_name_2=2, long_argument_name_3=3, long_argument_name_4=4) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a_very_long_function_name( long_argument_name_1=1, long_argument_name_2=2, @@ -330,19 +302,17 @@ def testSplittingBeforeFirstArgument(self): long_argument_name_4=4) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testSplittingExpressionsInsideSubscripts(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): df = df[(df['campaign_status'] == 'LIVE') & (df['action_status'] == 'LIVE')] """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): df = df[(df['campaign_status'] == 'LIVE') & (df['action_status'] == 'LIVE')] @@ -351,15 +321,13 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitListsAndDictSetMakersIfCommaTerminated(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ DJANGO_TEMPLATES_OPTIONS = {"context_processors": []} DJANGO_TEMPLATES_OPTIONS = {"context_processors": [],} x = ["context_processors"] x = ["context_processors",] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ DJANGO_TEMPLATES_OPTIONS = {"context_processors": []} DJANGO_TEMPLATES_OPTIONS = { "context_processors": [], @@ -373,15 +341,13 @@ def testSplitListsAndDictSetMakersIfCommaTerminated(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testSplitAroundNamedAssigns(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class a(): def a(): return a( aaaaaaaaaa=aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class a(): def a(): @@ -393,15 +359,13 @@ def a(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testUnaryOperator(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if not -3 < x < 3: pass if -3 < x < 3: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if not -3 < x < 3: pass if -3 < x < 3: @@ -413,24 +377,21 @@ def testUnaryOperator(self): def testNoSplitBeforeDictValue(self): try: style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{based_on_style: pep8, ' - 'allow_split_before_dict_value: false, ' - 'coalesce_brackets: true, ' - 'dedent_closing_brackets: true, ' - 'each_dict_entry_on_separate_line: true, ' - 'split_before_logical_operator: true}')) - - unformatted_code = textwrap.dedent( - """\ + style.CreateStyleFromConfig('{based_on_style: pep8, ' + 'allow_split_before_dict_value: false, ' + 'coalesce_brackets: true, ' + 'dedent_closing_brackets: true, ' + 'each_dict_entry_on_separate_line: true, ' + 'split_before_logical_operator: true}')) + + unformatted_code = textwrap.dedent("""\ some_dict = { 'title': _("I am example data"), 'description': _("Lorem ipsum dolor met sit amet elit, si vis pacem para bellum " "elites nihi very long string."), } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ some_dict = { 'title': _("I am example data"), 'description': _( @@ -440,15 +401,13 @@ def testNoSplitBeforeDictValue(self): } """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ X = {'a': 1, 'b': 2, 'key': this_is_a_function_call_that_goes_over_the_column_limit_im_pretty_sure()} """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ X = { 'a': 1, 'b': 2, @@ -456,18 +415,16 @@ def testNoSplitBeforeDictValue(self): } """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ attrs = { 'category': category, 'role': forms.ModelChoiceField(label=_("Role"), required=False, queryset=category_roles, initial=selected_role, empty_label=_("No access"),), } """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ attrs = { 'category': category, 'role': forms.ModelChoiceField( @@ -480,19 +437,17 @@ def testNoSplitBeforeDictValue(self): } """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ css_class = forms.CharField( label=_("CSS class"), required=False, help_text=_("Optional CSS class used to customize this category appearance from templates."), ) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ css_class = forms.CharField( label=_("CSS class"), required=False, @@ -502,8 +457,8 @@ def testNoSplitBeforeDictValue(self): ) """) # noqa llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) @@ -518,7 +473,7 @@ def _(): cdffile['Latitude'][:] >= select_lat - radius) & ( cdffile['Latitude'][:] <= select_lat + radius)) """ - expected_code = """\ + expected_code = """\ def _(): include_values = np.where( (cdffile['Quality_Flag'][:] >= 5) & (cdffile['Day_Night_Flag'][:] == 1) @@ -527,7 +482,7 @@ def _(): & (cdffile['Latitude'][:] >= select_lat - radius) & (cdffile['Latitude'][:] <= select_lat + radius)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertEqual(expected_code, reformatter.Reformat(llines)) def testNoBlankLinesOnlyForFirstNestedObject(self): @@ -545,7 +500,7 @@ def bar(self): bar docs """ ''' - expected_code = '''\ + expected_code = '''\ class Demo: """ Demo docs @@ -561,7 +516,7 @@ def bar(self): bar docs """ ''' - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertEqual(expected_code, reformatter.Reformat(llines)) def testSplitBeforeArithmeticOperators(self): @@ -579,9 +534,9 @@ def _(): raise ValueError('This is a long message that ends with an argument: ' + str(42)) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) @@ -591,12 +546,12 @@ def testListSplitting(self): (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,1), (1,10), (1,11), (1, 10), (1,11), (10,11)]) """ - expected_code = """\ + expected_code = """\ foo([(1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 1), (1, 10), (1, 11), (1, 10), (1, 11), (10, 11)]) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_code, reformatter.Reformat(llines)) def testNoBlankLineBeforeNestedFuncOrClass(self): @@ -606,7 +561,7 @@ def testNoBlankLineBeforeNestedFuncOrClass(self): '{based_on_style: pep8, ' 'blank_line_before_nested_class_or_def: false}')) - unformatted_code = '''\ + unformatted_code = '''\ def normal_function(): """Return the nested function.""" @@ -634,15 +589,14 @@ class nested_class(): return nested_function ''' - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testParamListIndentationCollision1(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class _(): def __init__(self, title: Optional[str], diffs: Collection[BinaryDiff] = (), charset: Union[Type[AsciiCharset], Type[LineCharset]] = AsciiCharset, preprocess: Callable[[str], str] = identity, @@ -651,8 +605,7 @@ def __init__(self, title: Optional[str], diffs: Collection[BinaryDiff] = (), cha self._cs = charset self._preprocess = preprocess """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class _(): def __init__( @@ -671,8 +624,7 @@ def __init__( self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testParamListIndentationCollision2(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def simple_pass_function_with_an_extremely_long_name_and_some_arguments( argument0, argument1): pass @@ -681,8 +633,7 @@ def simple_pass_function_with_an_extremely_long_name_and_some_arguments( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testParamListIndentationCollision3(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def func1( arg1, arg2, @@ -700,13 +651,11 @@ def func2( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testTwoWordComparisonOperators(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl is not ksldfjsdklfjdklsfjdklsfjdklsfjdsklfjdklsfj) _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl not in {ksldfjsdklfjdklsfjdklsfjdklsfjdsklfjdklsfj}) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl is not ksldfjsdklfjdklsfjdklsfjdklsfjdsklfjdklsfj) _ = (klsdfjdklsfjksdlfjdklsfjdslkfjsdkl @@ -718,8 +667,7 @@ def testTwoWordComparisonOperators(self): @unittest.skipUnless(not py3compat.PY3, 'Requires Python 2.7') def testAsyncAsNonKeyword(self): # In Python 2, async may be used as a non-keyword identifier. - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ from util import async @@ -735,14 +683,12 @@ def bar(self): self.assertCodeEqual(code, reformatter.Reformat(llines, verify=False)) def testStableInlinedDictionaryFormatting(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({'action': 'update', 'parameter': value})) """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _(): url = "http://{0}/axis-cgi/admin/param.cgi?{1}".format( value, urllib.urlencode({ @@ -751,19 +697,18 @@ def _(): })) """) - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) - llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(reformatted_code) reformatted_code = reformatter.Reformat(llines) self.assertCodeEqual(expected_formatted_code, reformatted_code) @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testSpaceBetweenColonAndElipses(self): style.SetGlobalStyle(style.CreatePEP8Style()) - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class MyClass(ABC): place: ... @@ -774,11 +719,10 @@ class MyClass(ABC): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testSpaceBetweenDictColonAndElipses(self): style.SetGlobalStyle(style.CreatePEP8Style()) - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ {0:"...", 1:...} """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ {0: "...", 1: ...} """) @@ -788,8 +732,7 @@ def testSpaceBetweenDictColonAndElipses(self): class TestsForSpacesInsideBrackets(yapf_test_helper.YAPFTest): """Test the SPACE_INSIDE_BRACKETS style option.""" - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ foo() foo(1) foo(1,2) @@ -822,8 +765,7 @@ def testEnabled(self): style.SetGlobalStyle( style.CreateStyleFromConfig('{space_inside_brackets: True}')) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo() foo( 1 ) foo( 1, 2 ) @@ -861,8 +803,7 @@ def testEnabled(self): def testDefault(self): style.SetGlobalStyle(style.CreatePEP8Style()) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo() foo(1) foo(1, 2) @@ -901,8 +842,7 @@ def testDefault(self): def testAwait(self): style.SetGlobalStyle( style.CreateStyleFromConfig('{space_inside_brackets: True}')) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import asyncio import time @@ -915,8 +855,7 @@ async def main(): if (await get_html()): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import asyncio import time @@ -937,8 +876,7 @@ async def main(): class TestsForSpacesAroundSubscriptColon(yapf_test_helper.YAPFTest): """Test the SPACES_AROUND_SUBSCRIPT_COLON style option.""" - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = list1[ : ] b = list2[ slice_start: ] c = list3[ slice_start:slice_end ] @@ -954,8 +892,7 @@ class TestsForSpacesAroundSubscriptColon(yapf_test_helper.YAPFTest): def testEnabled(self): style.SetGlobalStyle( style.CreateStyleFromConfig('{spaces_around_subscript_colon: True}')) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = list1[:] b = list2[slice_start :] c = list3[slice_start : slice_end] @@ -972,13 +909,11 @@ def testEnabled(self): def testWithSpaceInsideBrackets(self): style.SetGlobalStyle( - style.CreateStyleFromConfig( - '{' - 'spaces_around_subscript_colon: true, ' - 'space_inside_brackets: true,' - '}')) - expected_formatted_code = textwrap.dedent( - """\ + style.CreateStyleFromConfig('{' + 'spaces_around_subscript_colon: true, ' + 'space_inside_brackets: true,' + '}')) + expected_formatted_code = textwrap.dedent("""\ a = list1[ : ] b = list2[ slice_start : ] c = list3[ slice_start : slice_end ] @@ -995,8 +930,7 @@ def testWithSpaceInsideBrackets(self): def testDefault(self): style.SetGlobalStyle(style.CreatePEP8Style()) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = list1[:] b = list2[slice_start:] c = list3[slice_start:slice_end] diff --git a/yapftests/reformatter_python3_test.py b/yapftests/reformatter_python3_test.py index 88dd9d7bd..81e565326 100644 --- a/yapftests/reformatter_python3_test.py +++ b/yapftests/reformatter_python3_test.py @@ -33,13 +33,11 @@ def setUpClass(cls): # pylint: disable=g-missing-super-call style.SetGlobalStyle(style.CreatePEP8Style()) def testTypedNames(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def x(aaaaaaaaaaaaaaa:int,bbbbbbbbbbbbbbbb:str,ccccccccccccccc:dict,eeeeeeeeeeeeee:set={1, 2, 3})->bool: pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def x(aaaaaaaaaaaaaaa: int, bbbbbbbbbbbbbbbb: str, ccccccccccccccc: dict, @@ -50,13 +48,11 @@ def x(aaaaaaaaaaaaaaa: int, self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testTypedNameWithLongNamedArg(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def func(arg=long_function_call_that_pushes_the_line_over_eighty_characters()) -> ReturnType: pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def func(arg=long_function_call_that_pushes_the_line_over_eighty_characters() ) -> ReturnType: pass @@ -65,13 +61,11 @@ def func(arg=long_function_call_that_pushes_the_line_over_eighty_characters() self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testKeywordOnlyArgSpecifier(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(a, *, kw): return a+kw """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(a, *, kw): return a + kw """) @@ -80,15 +74,13 @@ def foo(a, *, kw): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testPEP448ParameterExpansion(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ { ** x } { **{} } { **{ **x }, **x } {'a': 1, **kw , 'b':3, **kw2 } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ {**x} {**{}} {**{**x}, **x} @@ -98,13 +90,11 @@ def testPEP448ParameterExpansion(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testAnnotations(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(a: list, b: "bar") -> dict: return a+b """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(a: list, b: "bar") -> dict: return a + b """) @@ -112,16 +102,15 @@ def foo(a: list, b: "bar") -> dict: self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testExecAsNonKeyword(self): - unformatted_code = 'methods.exec( sys.modules[name])\n' + unformatted_code = 'methods.exec( sys.modules[name])\n' expected_formatted_code = 'methods.exec(sys.modules[name])\n' - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testAsyncFunctions(self): if sys.version_info[1] < 5: return - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ import asyncio import time @@ -141,7 +130,7 @@ async def main(): self.assertCodeEqual(code, reformatter.Reformat(llines, verify=False)) def testNoSpacesAroundPowerOperator(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ a**b """) expected_formatted_code = textwrap.dedent("""\ @@ -154,13 +143,13 @@ def testNoSpacesAroundPowerOperator(self): '{based_on_style: pep8, SPACES_AROUND_POWER_OPERATOR: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testSpacesAroundDefaultOrNamedAssign(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ f(a=5) """) expected_formatted_code = textwrap.dedent("""\ @@ -174,14 +163,13 @@ def testSpacesAroundDefaultOrNamedAssign(self): 'SPACES_AROUND_DEFAULT_OR_NAMED_ASSIGN: True}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testTypeHint(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(x: int=42): pass @@ -189,8 +177,7 @@ def foo(x: int=42): def foo2(x: 'int' =42): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(x: int = 42): pass @@ -202,18 +189,17 @@ def foo2(x: 'int' = 42): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMatrixMultiplication(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ a=b@c """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = b @ c """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testNoneKeyword(self): - code = """\ + code = """\ None.__ne__() """ llines = yapf_test_helper.ParseAndUnwrap(code) @@ -222,8 +208,7 @@ def testNoneKeyword(self): def testAsyncWithPrecedingComment(self): if sys.version_info[1] < 5: return - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import asyncio # Comment @@ -233,8 +218,7 @@ async def bar(): async def foo(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import asyncio @@ -252,8 +236,7 @@ async def foo(): def testAsyncFunctionsNested(self): if sys.version_info[1] < 5: return - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ async def outer(): async def inner(): @@ -265,15 +248,13 @@ async def inner(): def testKeepTypesIntact(self): if sys.version_info[1] < 5: return - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def _ReduceAbstractContainers( self, *args: Optional[automation_converter.PyiCollectionAbc]) -> List[ automation_converter.PyiCollectionAbc]: pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def _ReduceAbstractContainers( self, *args: Optional[automation_converter.PyiCollectionAbc] ) -> List[automation_converter.PyiCollectionAbc]: @@ -285,15 +266,13 @@ def _ReduceAbstractContainers( def testContinuationIndentWithAsync(self): if sys.version_info[1] < 5: return - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ async def start_websocket(): async with session.ws_connect( r"ws://a_really_long_long_long_long_long_long_url") as ws: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ async def start_websocket(): async with session.ws_connect( r"ws://a_really_long_long_long_long_long_long_url") as ws: @@ -367,15 +346,15 @@ def run_sync_in_worker_thread(sync_fn, *args, cancellable=False, limiter=None): 'split_before_first_argument: true}')) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) def testDictUnpacking(self): if sys.version_info[1] < 5: return - unformatted_code = """\ + unformatted_code = """\ class Foo: def foo(self): foofoofoofoofoofoofoofoo('foofoofoofoofoo', { @@ -394,7 +373,7 @@ def foo(self): **foofoofoo }) """ - llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) + llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testMultilineFormatString(self): @@ -422,7 +401,7 @@ def dirichlet(x12345678901234567890123456789012345678901234567890=...) -> None: self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionTypedReturnNextLine(self): - code = """\ + code = """\ def _GenerateStatsEntries( process_id: Text, timestamp: Optional[ffffffff.FFFFFFFFFFF] = None @@ -433,7 +412,7 @@ def _GenerateStatsEntries( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testFunctionTypedReturnSameLine(self): - code = """\ + code = """\ def rrrrrrrrrrrrrrrrrrrrrr( ccccccccccccccccccccccc: Tuple[Text, Text]) -> List[Tuple[Text, Text]]: pass @@ -444,8 +423,7 @@ def rrrrrrrrrrrrrrrrrrrrrr( def testAsyncForElseNotIndentedInsideBody(self): if sys.version_info[1] < 5: return - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ async def fn(): async for message in websocket: for i in range(10): @@ -461,8 +439,7 @@ async def fn(): def testForElseInAsyncNotMixedWithAsyncFor(self): if sys.version_info[1] < 5: return - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ async def fn(): for i in range(10): pass @@ -473,14 +450,12 @@ async def fn(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testParameterListIndentationConflicts(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def raw_message( # pylint: disable=too-many-arguments self, text, user_id=1000, chat_type='private', forward_date=None, forward_from=None): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def raw_message( # pylint: disable=too-many-arguments self, text, diff --git a/yapftests/reformatter_style_config_test.py b/yapftests/reformatter_style_config_test.py index 6746ba0ed..c5726cb30 100644 --- a/yapftests/reformatter_style_config_test.py +++ b/yapftests/reformatter_style_config_test.py @@ -30,30 +30,26 @@ def setUp(self): def testSetGlobalStyle(self): try: style.SetGlobalStyle(style.CreateYapfStyle()) - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ for i in range(5): print('bar') """) @@ -62,35 +58,32 @@ def testSetGlobalStyle(self): def testOperatorNoSpaceStyle(self): try: - sympy_style = style.CreatePEP8Style() + sympy_style = style.CreatePEP8Style() sympy_style['NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS'] = \ style._StringSetConverter('*,/') style.SetGlobalStyle(sympy_style) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = 1+2 * 3 - 4 / 5 b = '0' * 1 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = 1 + 2*3 - 4/5 b = '0'*1 """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style def testOperatorPrecedenceStyle(self): try: - pep8_with_precedence = style.CreatePEP8Style() + pep8_with_precedence = style.CreatePEP8Style() pep8_with_precedence['ARITHMETIC_PRECEDENCE_INDICATION'] = True style.SetGlobalStyle(pep8_with_precedence) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ 1+2 (1 + 2) * (3 - (4 / 5)) a = 1 * 2 + 3 / 4 @@ -105,8 +98,7 @@ def testOperatorPrecedenceStyle(self): j = (1 * 2 - 3) + 4 k = (1 * 2 * 3) + (4 * 5 * 6 * 7 * 8) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ 1 + 2 (1+2) * (3 - (4/5)) a = 1*2 + 3/4 @@ -123,20 +115,19 @@ def testOperatorPrecedenceStyle(self): """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines)) finally: style.SetGlobalStyle(style.CreatePEP8Style()) style.DEFAULT_STYLE = self.current_style def testNoSplitBeforeFirstArgumentStyle1(self): try: - pep8_no_split_before_first = style.CreatePEP8Style() + pep8_no_split_before_first = style.CreatePEP8Style() pep8_no_split_before_first['SPLIT_BEFORE_FIRST_ARGUMENT'] = False - pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = False + pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = False style.SetGlobalStyle(pep8_no_split_before_first) - formatted_code = textwrap.dedent( - """\ + formatted_code = textwrap.dedent("""\ # Example from in-code MustSplit comments foo = outer_function_call(fitting_inner_function_call(inner_arg1, inner_arg2), outer_arg1, outer_arg2) @@ -173,12 +164,11 @@ def testNoSplitBeforeFirstArgumentStyle1(self): def testNoSplitBeforeFirstArgumentStyle2(self): try: - pep8_no_split_before_first = style.CreatePEP8Style() + pep8_no_split_before_first = style.CreatePEP8Style() pep8_no_split_before_first['SPLIT_BEFORE_FIRST_ARGUMENT'] = False - pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = True + pep8_no_split_before_first['SPLIT_BEFORE_NAMED_ASSIGNS'] = True style.SetGlobalStyle(pep8_no_split_before_first) - formatted_code = textwrap.dedent( - """\ + formatted_code = textwrap.dedent("""\ # Examples Issue#556 i_take_a_lot_of_params(arg1, param1=very_long_expression1(), diff --git a/yapftests/reformatter_verify_test.py b/yapftests/reformatter_verify_test.py index 2abbd19ff..33ba3a614 100644 --- a/yapftests/reformatter_verify_test.py +++ b/yapftests/reformatter_verify_test.py @@ -32,8 +32,7 @@ def setUpClass(cls): style.SetGlobalStyle(style.CreatePEP8Style()) def testVerifyException(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class ABC(metaclass=type): pass """) @@ -43,23 +42,20 @@ class ABC(metaclass=type): reformatter.Reformat(llines) # verify should be False by default. def testNoVerify(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class ABC(metaclass=type): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class ABC(metaclass=type): pass """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines, verify=False)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines, verify=False)) def testVerifyFutureImport(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ from __future__ import print_function def call_my_function(the_function): @@ -72,8 +68,7 @@ def call_my_function(the_function): with self.assertRaises(verifier.InternalError): reformatter.Reformat(llines, verify=True) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ from __future__ import print_function @@ -85,12 +80,11 @@ def call_my_function(the_function): call_my_function(print) """) llines = yapf_test_helper.ParseAndUnwrap(unformatted_code) - self.assertCodeEqual( - expected_formatted_code, reformatter.Reformat(llines, verify=False)) + self.assertCodeEqual(expected_formatted_code, + reformatter.Reformat(llines, verify=False)) def testContinuationLineShouldBeDistinguished(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ class Foo(object): def bar(self): diff --git a/yapftests/split_penalty_test.py b/yapftests/split_penalty_test.py index 24226cbac..f7474a398 100644 --- a/yapftests/split_penalty_test.py +++ b/yapftests/split_penalty_test.py @@ -26,10 +26,10 @@ from yapftests import yapf_test_helper -UNBREAKABLE = split_penalty.UNBREAKABLE +UNBREAKABLE = split_penalty.UNBREAKABLE VERY_STRONGLY_CONNECTED = split_penalty.VERY_STRONGLY_CONNECTED -DOTTED_NAME = split_penalty.DOTTED_NAME -STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED +DOTTED_NAME = split_penalty.DOTTED_NAME +STRONGLY_CONNECTED = split_penalty.STRONGLY_CONNECTED class SplitPenaltyTest(yapf_test_helper.YAPFTest): @@ -68,12 +68,9 @@ def FlattenRec(tree): if pytree_utils.NodeName(tree) in pytree_utils.NONSEMANTIC_TOKENS: return [] if isinstance(tree, pytree.Leaf): - return [ - ( - tree.value, - pytree_utils.GetNodeAnnotation( - tree, pytree_utils.Annotation.SPLIT_PENALTY)) - ] + return [(tree.value, + pytree_utils.GetNodeAnnotation( + tree, pytree_utils.Annotation.SPLIT_PENALTY))] nodes = [] for node in tree.children: nodes += FlattenRec(node) @@ -88,194 +85,181 @@ def foo(x): pass """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('def', None), - ('foo', UNBREAKABLE), - ('(', UNBREAKABLE), - ('x', None), - (')', STRONGLY_CONNECTED), - (':', UNBREAKABLE), - ('pass', None), - ]) + self._CheckPenalties(tree, [ + ('def', None), + ('foo', UNBREAKABLE), + ('(', UNBREAKABLE), + ('x', None), + (')', STRONGLY_CONNECTED), + (':', UNBREAKABLE), + ('pass', None), + ]) # Test function definition with trailing comment. - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" def foo(x): # trailing comment pass """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('def', None), - ('foo', UNBREAKABLE), - ('(', UNBREAKABLE), - ('x', None), - (')', STRONGLY_CONNECTED), - (':', UNBREAKABLE), - ('pass', None), - ]) + self._CheckPenalties(tree, [ + ('def', None), + ('foo', UNBREAKABLE), + ('(', UNBREAKABLE), + ('x', None), + (')', STRONGLY_CONNECTED), + (':', UNBREAKABLE), + ('pass', None), + ]) # Test class definitions. - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" class A: pass class B(A): pass """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('class', None), - ('A', UNBREAKABLE), - (':', UNBREAKABLE), - ('pass', None), - ('class', None), - ('B', UNBREAKABLE), - ('(', UNBREAKABLE), - ('A', None), - (')', None), - (':', UNBREAKABLE), - ('pass', None), - ]) + self._CheckPenalties(tree, [ + ('class', None), + ('A', UNBREAKABLE), + (':', UNBREAKABLE), + ('pass', None), + ('class', None), + ('B', UNBREAKABLE), + ('(', UNBREAKABLE), + ('A', None), + (')', None), + (':', UNBREAKABLE), + ('pass', None), + ]) # Test lambda definitions. code = textwrap.dedent(r""" lambda a, b: None """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('lambda', None), - ('a', VERY_STRONGLY_CONNECTED), - (',', VERY_STRONGLY_CONNECTED), - ('b', VERY_STRONGLY_CONNECTED), - (':', VERY_STRONGLY_CONNECTED), - ('None', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties(tree, [ + ('lambda', None), + ('a', VERY_STRONGLY_CONNECTED), + (',', VERY_STRONGLY_CONNECTED), + ('b', VERY_STRONGLY_CONNECTED), + (':', VERY_STRONGLY_CONNECTED), + ('None', VERY_STRONGLY_CONNECTED), + ]) # Test dotted names. code = textwrap.dedent(r""" import a.b.c """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('import', None), - ('a', None), - ('.', UNBREAKABLE), - ('b', UNBREAKABLE), - ('.', UNBREAKABLE), - ('c', UNBREAKABLE), - ]) + self._CheckPenalties(tree, [ + ('import', None), + ('a', None), + ('.', UNBREAKABLE), + ('b', UNBREAKABLE), + ('.', UNBREAKABLE), + ('c', UNBREAKABLE), + ]) def testStronglyConnected(self): # Test dictionary keys. - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" a = { 'x': 42, y(lambda a: 23): 37, } """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('a', None), - ('=', None), - ('{', None), - ("'x'", None), - (':', STRONGLY_CONNECTED), - ('42', None), - (',', None), - ('y', None), - ('(', UNBREAKABLE), - ('lambda', STRONGLY_CONNECTED), - ('a', VERY_STRONGLY_CONNECTED), - (':', VERY_STRONGLY_CONNECTED), - ('23', VERY_STRONGLY_CONNECTED), - (')', VERY_STRONGLY_CONNECTED), - (':', STRONGLY_CONNECTED), - ('37', None), - (',', None), - ('}', None), - ]) + self._CheckPenalties(tree, [ + ('a', None), + ('=', None), + ('{', None), + ("'x'", None), + (':', STRONGLY_CONNECTED), + ('42', None), + (',', None), + ('y', None), + ('(', UNBREAKABLE), + ('lambda', STRONGLY_CONNECTED), + ('a', VERY_STRONGLY_CONNECTED), + (':', VERY_STRONGLY_CONNECTED), + ('23', VERY_STRONGLY_CONNECTED), + (')', VERY_STRONGLY_CONNECTED), + (':', STRONGLY_CONNECTED), + ('37', None), + (',', None), + ('}', None), + ]) # Test list comprehension. code = textwrap.dedent(r""" [a for a in foo if a.x == 37] """) tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('[', None), - ('a', None), - ('for', 0), - ('a', STRONGLY_CONNECTED), - ('in', STRONGLY_CONNECTED), - ('foo', STRONGLY_CONNECTED), - ('if', 0), - ('a', STRONGLY_CONNECTED), - ('.', VERY_STRONGLY_CONNECTED), - ('x', DOTTED_NAME), - ('==', STRONGLY_CONNECTED), - ('37', STRONGLY_CONNECTED), - (']', None), - ]) + self._CheckPenalties(tree, [ + ('[', None), + ('a', None), + ('for', 0), + ('a', STRONGLY_CONNECTED), + ('in', STRONGLY_CONNECTED), + ('foo', STRONGLY_CONNECTED), + ('if', 0), + ('a', STRONGLY_CONNECTED), + ('.', VERY_STRONGLY_CONNECTED), + ('x', DOTTED_NAME), + ('==', STRONGLY_CONNECTED), + ('37', STRONGLY_CONNECTED), + (']', None), + ]) def testFuncCalls(self): code = 'foo(1, 2, 3)\n' tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('foo', None), - ('(', UNBREAKABLE), - ('1', None), - (',', UNBREAKABLE), - ('2', None), - (',', UNBREAKABLE), - ('3', None), - (')', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties(tree, [ + ('foo', None), + ('(', UNBREAKABLE), + ('1', None), + (',', UNBREAKABLE), + ('2', None), + (',', UNBREAKABLE), + ('3', None), + (')', VERY_STRONGLY_CONNECTED), + ]) # Now a method call, which has more than one trailer code = 'foo.bar.baz(1, 2, 3)\n' tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('foo', None), - ('.', VERY_STRONGLY_CONNECTED), - ('bar', DOTTED_NAME), - ('.', VERY_STRONGLY_CONNECTED), - ('baz', DOTTED_NAME), - ('(', STRONGLY_CONNECTED), - ('1', None), - (',', UNBREAKABLE), - ('2', None), - (',', UNBREAKABLE), - ('3', None), - (')', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties(tree, [ + ('foo', None), + ('.', VERY_STRONGLY_CONNECTED), + ('bar', DOTTED_NAME), + ('.', VERY_STRONGLY_CONNECTED), + ('baz', DOTTED_NAME), + ('(', STRONGLY_CONNECTED), + ('1', None), + (',', UNBREAKABLE), + ('2', None), + (',', UNBREAKABLE), + ('3', None), + (')', VERY_STRONGLY_CONNECTED), + ]) # Test single generator argument. code = 'max(i for i in xrange(10))\n' tree = self._ParseAndComputePenalties(code) - self._CheckPenalties( - tree, [ - ('max', None), - ('(', UNBREAKABLE), - ('i', 0), - ('for', 0), - ('i', STRONGLY_CONNECTED), - ('in', STRONGLY_CONNECTED), - ('xrange', STRONGLY_CONNECTED), - ('(', UNBREAKABLE), - ('10', STRONGLY_CONNECTED), - (')', VERY_STRONGLY_CONNECTED), - (')', VERY_STRONGLY_CONNECTED), - ]) + self._CheckPenalties(tree, [ + ('max', None), + ('(', UNBREAKABLE), + ('i', 0), + ('for', 0), + ('i', STRONGLY_CONNECTED), + ('in', STRONGLY_CONNECTED), + ('xrange', STRONGLY_CONNECTED), + ('(', UNBREAKABLE), + ('10', STRONGLY_CONNECTED), + (')', VERY_STRONGLY_CONNECTED), + (')', VERY_STRONGLY_CONNECTED), + ]) if __name__ == '__main__': diff --git a/yapftests/style_test.py b/yapftests/style_test.py index 4aceba3d0..8a37f9535 100644 --- a/yapftests/style_test.py +++ b/yapftests/style_test.py @@ -50,8 +50,8 @@ def testContinuationAlignStyleStringConverter(self): 'VALIGN-RIGHT') with self.assertRaises(ValueError) as ctx: style._ContinuationAlignStyleStringConverter('blahblah') - self.assertIn( - "unknown continuation align style: 'blahblah'", str(ctx.exception)) + self.assertIn("unknown continuation align style: 'blahblah'", + str(ctx.exception)) def testStringListConverter(self): self.assertEqual(style._StringListConverter('foo, bar'), ['foo', 'bar']) @@ -136,8 +136,7 @@ def tearDownClass(cls): # pylint: disable=g-missing-super-call shutil.rmtree(cls.test_tmpdir) def testDefaultBasedOnStyle(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] continuation_indent_width = 20 ''') @@ -147,8 +146,7 @@ def testDefaultBasedOnStyle(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 20) def testDefaultBasedOnPEP8Style(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] based_on_style = pep8 continuation_indent_width = 40 @@ -159,8 +157,7 @@ def testDefaultBasedOnPEP8Style(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 40) def testDefaultBasedOnGoogleStyle(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] based_on_style = google continuation_indent_width = 20 @@ -171,8 +168,7 @@ def testDefaultBasedOnGoogleStyle(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 20) def testDefaultBasedOnFacebookStyle(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] based_on_style = facebook continuation_indent_width = 20 @@ -183,8 +179,7 @@ def testDefaultBasedOnFacebookStyle(self): self.assertEqual(cfg['CONTINUATION_INDENT_WIDTH'], 20) def testBoolOptionValue(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] based_on_style = pep8 SPLIT_BEFORE_NAMED_ASSIGNS=False @@ -197,8 +192,7 @@ def testBoolOptionValue(self): self.assertEqual(cfg['SPLIT_BEFORE_LOGICAL_OPERATOR'], True) def testStringListOptionValue(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] based_on_style = pep8 I18N_FUNCTION_CALL = N_, V_, T_ @@ -224,8 +218,7 @@ def testErrorNoStyleSection(self): style.CreateStyleFromConfig(filepath) def testErrorUnknownStyleOption(self): - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [style] indent_width=2 hummus=2 @@ -242,7 +235,7 @@ def testPyprojectTomlNoYapfSection(self): return filepath = os.path.join(self.test_tmpdir, 'pyproject.toml') - _ = open(filepath, 'w') + _ = open(filepath, 'w') with self.assertRaisesRegex(style.StyleConfigError, 'Unable to find section'): style.CreateStyleFromConfig(filepath) @@ -253,8 +246,7 @@ def testPyprojectTomlParseYapfSection(self): except ImportError: return - cfg = textwrap.dedent( - u'''\ + cfg = textwrap.dedent(u'''\ [tool.yapf] based_on_style = "pep8" continuation_indent_width = 40 @@ -284,12 +276,12 @@ def testDefaultBasedOnStyle(self): self.assertEqual(cfg['INDENT_WIDTH'], 2) def testDefaultBasedOnStyleBadDict(self): - self.assertRaisesRegex( - style.StyleConfigError, 'Unknown style option', - style.CreateStyleFromConfig, {'based_on_styl': 'pep8'}) - self.assertRaisesRegex( - style.StyleConfigError, 'not a valid', style.CreateStyleFromConfig, - {'INDENT_WIDTH': 'FOUR'}) + self.assertRaisesRegex(style.StyleConfigError, 'Unknown style option', + style.CreateStyleFromConfig, + {'based_on_styl': 'pep8'}) + self.assertRaisesRegex(style.StyleConfigError, 'not a valid', + style.CreateStyleFromConfig, + {'INDENT_WIDTH': 'FOUR'}) class StyleFromCommandLine(yapf_test_helper.YAPFTest): @@ -323,15 +315,12 @@ def testDefaultBasedOnDetaultTypeString(self): self.assertIsInstance(cfg, dict) def testDefaultBasedOnStyleBadString(self): - self.assertRaisesRegex( - style.StyleConfigError, 'Unknown style option', - style.CreateStyleFromConfig, '{based_on_styl: pep8}') - self.assertRaisesRegex( - style.StyleConfigError, 'not a valid', style.CreateStyleFromConfig, - '{INDENT_WIDTH: FOUR}') - self.assertRaisesRegex( - style.StyleConfigError, 'Invalid style dict', - style.CreateStyleFromConfig, '{based_on_style: pep8') + self.assertRaisesRegex(style.StyleConfigError, 'Unknown style option', + style.CreateStyleFromConfig, '{based_on_styl: pep8}') + self.assertRaisesRegex(style.StyleConfigError, 'not a valid', + style.CreateStyleFromConfig, '{INDENT_WIDTH: FOUR}') + self.assertRaisesRegex(style.StyleConfigError, 'Invalid style dict', + style.CreateStyleFromConfig, '{based_on_style: pep8') class StyleHelp(yapf_test_helper.YAPFTest): diff --git a/yapftests/subtype_assigner_test.py b/yapftests/subtype_assigner_test.py index 222153db4..8616169c9 100644 --- a/yapftests/subtype_assigner_test.py +++ b/yapftests/subtype_assigner_test.py @@ -35,11 +35,9 @@ def _CheckFormatTokenSubtypes(self, llines, list_of_expected): """ actual = [] for lline in llines: - filtered_values = [ - (ft.value, ft.subtypes) - for ft in lline.tokens - if ft.name not in pytree_utils.NONSEMANTIC_TOKENS - ] + filtered_values = [(ft.value, ft.subtypes) + for ft in lline.tokens + if ft.name not in pytree_utils.NONSEMANTIC_TOKENS] if filtered_values: actual.append(filtered_values) @@ -47,263 +45,242 @@ def _CheckFormatTokenSubtypes(self, llines, list_of_expected): def testFuncDefDefaultAssign(self): self.maxDiff = None # pylint: disable=invalid-name - code = textwrap.dedent( - r""" + code = textwrap.dedent(r""" def foo(a=37, *b, **c): return -x[:42] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('def', {subtypes.NONE}), - ('foo', {subtypes.FUNC_DEF}), - ('(', {subtypes.NONE}), - ( - 'a', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - subtypes.PARAMETER_START, - }), - ( - '=', { - subtypes.DEFAULT_OR_NAMED_ASSIGN, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ( - '37', { - subtypes.NONE, - subtypes.PARAMETER_STOP, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (',', {subtypes.NONE}), - ( - '*', { - subtypes.PARAMETER_START, - subtypes.VARARGS_STAR, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ( - 'b', { - subtypes.NONE, - subtypes.PARAMETER_STOP, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (',', {subtypes.NONE}), - ( - '**', { - subtypes.PARAMETER_START, - subtypes.KWARGS_STAR_STAR, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ( - 'c', { - subtypes.NONE, - subtypes.PARAMETER_STOP, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (')', {subtypes.NONE}), - (':', {subtypes.NONE}), - ], - [ - ('return', {subtypes.NONE}), - ('-', {subtypes.UNARY_OPERATOR}), - ('x', {subtypes.NONE}), - ('[', {subtypes.SUBSCRIPT_BRACKET}), - (':', {subtypes.SUBSCRIPT_COLON}), - ('42', {subtypes.NONE}), - (']', {subtypes.SUBSCRIPT_BRACKET}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('def', {subtypes.NONE}), + ('foo', {subtypes.FUNC_DEF}), + ('(', {subtypes.NONE}), + ('a', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + subtypes.PARAMETER_START, + }), + ('=', { + subtypes.DEFAULT_OR_NAMED_ASSIGN, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ('37', { + subtypes.NONE, + subtypes.PARAMETER_STOP, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ('*', { + subtypes.PARAMETER_START, + subtypes.VARARGS_STAR, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ('b', { + subtypes.NONE, + subtypes.PARAMETER_STOP, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ('**', { + subtypes.PARAMETER_START, + subtypes.KWARGS_STAR_STAR, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ('c', { + subtypes.NONE, + subtypes.PARAMETER_STOP, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (')', {subtypes.NONE}), + (':', {subtypes.NONE}), + ], + [ + ('return', {subtypes.NONE}), + ('-', {subtypes.UNARY_OPERATOR}), + ('x', {subtypes.NONE}), + ('[', {subtypes.SUBSCRIPT_BRACKET}), + (':', {subtypes.SUBSCRIPT_COLON}), + ('42', {subtypes.NONE}), + (']', {subtypes.SUBSCRIPT_BRACKET}), + ], + ]) def testFuncCallWithDefaultAssign(self): - code = textwrap.dedent(r""" + code = textwrap.dedent(r""" foo(x, a='hello world') """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('foo', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ( - 'x', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - (',', {subtypes.NONE}), - ( - 'a', { - subtypes.NONE, - subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, - }), - ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), - ("'hello world'", {subtypes.NONE}), - (')', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('foo', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ('x', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + (',', {subtypes.NONE}), + ('a', { + subtypes.NONE, + subtypes.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST, + }), + ('=', {subtypes.DEFAULT_OR_NAMED_ASSIGN}), + ("'hello world'", {subtypes.NONE}), + (')', {subtypes.NONE}), + ], + ]) def testSetComprehension(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ def foo(strs): return {s.lower() for s in strs} """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('def', {subtypes.NONE}), - ('foo', {subtypes.FUNC_DEF}), - ('(', {subtypes.NONE}), - ( - 'strs', { - subtypes.NONE, - subtypes.PARAMETER_START, - subtypes.PARAMETER_STOP, - }), - (')', {subtypes.NONE}), - (':', {subtypes.NONE}), - ], - [ - ('return', {subtypes.NONE}), - ('{', {subtypes.NONE}), - ('s', {subtypes.COMP_EXPR}), - ('.', {subtypes.COMP_EXPR}), - ('lower', {subtypes.COMP_EXPR}), - ('(', {subtypes.COMP_EXPR}), - (')', {subtypes.COMP_EXPR}), - ('for', { - subtypes.DICT_SET_GENERATOR, - subtypes.COMP_FOR, - }), - ('s', {subtypes.COMP_FOR}), - ('in', {subtypes.COMP_FOR}), - ('strs', {subtypes.COMP_FOR}), - ('}', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('def', {subtypes.NONE}), + ('foo', {subtypes.FUNC_DEF}), + ('(', {subtypes.NONE}), + ('strs', { + subtypes.NONE, + subtypes.PARAMETER_START, + subtypes.PARAMETER_STOP, + }), + (')', {subtypes.NONE}), + (':', {subtypes.NONE}), + ], + [ + ('return', {subtypes.NONE}), + ('{', {subtypes.NONE}), + ('s', {subtypes.COMP_EXPR}), + ('.', {subtypes.COMP_EXPR}), + ('lower', {subtypes.COMP_EXPR}), + ('(', {subtypes.COMP_EXPR}), + (')', {subtypes.COMP_EXPR}), + ('for', { + subtypes.DICT_SET_GENERATOR, + subtypes.COMP_FOR, + }), + ('s', {subtypes.COMP_FOR}), + ('in', {subtypes.COMP_FOR}), + ('strs', {subtypes.COMP_FOR}), + ('}', {subtypes.NONE}), + ], + ]) def testUnaryNotOperator(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ not a """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [[('not', {subtypes.UNARY_OPERATOR}), ('a', {subtypes.NONE})]]) + self._CheckFormatTokenSubtypes(llines, [[('not', {subtypes.UNARY_OPERATOR}), + ('a', {subtypes.NONE})]]) def testBitwiseOperators(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ x = ((a | (b ^ 3) & c) << 3) >> 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('x', {subtypes.NONE}), - ('=', {subtypes.ASSIGN_OPERATOR}), - ('(', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ('a', {subtypes.NONE}), - ('|', {subtypes.BINARY_OPERATOR}), - ('(', {subtypes.NONE}), - ('b', {subtypes.NONE}), - ('^', {subtypes.BINARY_OPERATOR}), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('&', {subtypes.BINARY_OPERATOR}), - ('c', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('<<', {subtypes.BINARY_OPERATOR}), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('>>', {subtypes.BINARY_OPERATOR}), - ('1', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('x', {subtypes.NONE}), + ('=', {subtypes.ASSIGN_OPERATOR}), + ('(', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ('a', {subtypes.NONE}), + ('|', {subtypes.BINARY_OPERATOR}), + ('(', {subtypes.NONE}), + ('b', {subtypes.NONE}), + ('^', {subtypes.BINARY_OPERATOR}), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('&', {subtypes.BINARY_OPERATOR}), + ('c', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('<<', {subtypes.BINARY_OPERATOR}), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('>>', {subtypes.BINARY_OPERATOR}), + ('1', {subtypes.NONE}), + ], + ]) def testArithmeticOperators(self): - code = textwrap.dedent( - """\ + code = textwrap.dedent("""\ x = ((a + (b - 3) * (1 % c) @ d) / 3) // 1 """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('x', {subtypes.NONE}), - ('=', {subtypes.ASSIGN_OPERATOR}), - ('(', {subtypes.NONE}), - ('(', {subtypes.NONE}), - ('a', {subtypes.NONE}), - ('+', {subtypes.BINARY_OPERATOR}), - ('(', {subtypes.NONE}), - ('b', {subtypes.NONE}), - ('-', { - subtypes.BINARY_OPERATOR, - subtypes.SIMPLE_EXPRESSION, - }), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('*', {subtypes.BINARY_OPERATOR}), - ('(', {subtypes.NONE}), - ('1', {subtypes.NONE}), - ('%', { - subtypes.BINARY_OPERATOR, - subtypes.SIMPLE_EXPRESSION, - }), - ('c', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('@', {subtypes.BINARY_OPERATOR}), - ('d', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('/', {subtypes.BINARY_OPERATOR}), - ('3', {subtypes.NONE}), - (')', {subtypes.NONE}), - ('//', {subtypes.BINARY_OPERATOR}), - ('1', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('x', {subtypes.NONE}), + ('=', {subtypes.ASSIGN_OPERATOR}), + ('(', {subtypes.NONE}), + ('(', {subtypes.NONE}), + ('a', {subtypes.NONE}), + ('+', {subtypes.BINARY_OPERATOR}), + ('(', {subtypes.NONE}), + ('b', {subtypes.NONE}), + ('-', { + subtypes.BINARY_OPERATOR, + subtypes.SIMPLE_EXPRESSION, + }), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('*', {subtypes.BINARY_OPERATOR}), + ('(', {subtypes.NONE}), + ('1', {subtypes.NONE}), + ('%', { + subtypes.BINARY_OPERATOR, + subtypes.SIMPLE_EXPRESSION, + }), + ('c', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('@', {subtypes.BINARY_OPERATOR}), + ('d', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('/', {subtypes.BINARY_OPERATOR}), + ('3', {subtypes.NONE}), + (')', {subtypes.NONE}), + ('//', {subtypes.BINARY_OPERATOR}), + ('1', {subtypes.NONE}), + ], + ]) def testSubscriptColon(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ x[0:42:1] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('x', {subtypes.NONE}), - ('[', {subtypes.SUBSCRIPT_BRACKET}), - ('0', {subtypes.NONE}), - (':', {subtypes.SUBSCRIPT_COLON}), - ('42', {subtypes.NONE}), - (':', {subtypes.SUBSCRIPT_COLON}), - ('1', {subtypes.NONE}), - (']', {subtypes.SUBSCRIPT_BRACKET}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('x', {subtypes.NONE}), + ('[', {subtypes.SUBSCRIPT_BRACKET}), + ('0', {subtypes.NONE}), + (':', {subtypes.SUBSCRIPT_COLON}), + ('42', {subtypes.NONE}), + (':', {subtypes.SUBSCRIPT_COLON}), + ('1', {subtypes.NONE}), + (']', {subtypes.SUBSCRIPT_BRACKET}), + ], + ]) def testFunctionCallWithStarExpression(self): - code = textwrap.dedent("""\ + code = textwrap.dedent("""\ [a, *b] """) llines = yapf_test_helper.ParseAndUnwrap(code) - self._CheckFormatTokenSubtypes( - llines, [ - [ - ('[', {subtypes.NONE}), - ('a', {subtypes.NONE}), - (',', {subtypes.NONE}), - ('*', { - subtypes.UNARY_OPERATOR, - subtypes.VARARGS_STAR, - }), - ('b', {subtypes.NONE}), - (']', {subtypes.NONE}), - ], - ]) + self._CheckFormatTokenSubtypes(llines, [ + [ + ('[', {subtypes.NONE}), + ('a', {subtypes.NONE}), + (',', {subtypes.NONE}), + ('*', { + subtypes.UNARY_OPERATOR, + subtypes.VARARGS_STAR, + }), + ('b', {subtypes.NONE}), + (']', {subtypes.NONE}), + ], + ]) if __name__ == '__main__': diff --git a/yapftests/utils.py b/yapftests/utils.py index d10a0982c..268b8c43a 100644 --- a/yapftests/utils.py +++ b/yapftests/utils.py @@ -42,16 +42,15 @@ def stdout_redirector(stream): # pylint: disable=invalid-name # Note: `buffering` is set to -1 despite documentation of NamedTemporaryFile # says None. This is probably a problem with the python documentation. @contextlib.contextmanager -def NamedTempFile( - mode='w+b', - buffering=-1, - encoding=None, - errors=None, - newline=None, - suffix=None, - prefix=None, - dirname=None, - text=False): +def NamedTempFile(mode='w+b', + buffering=-1, + encoding=None, + errors=None, + newline=None, + suffix=None, + prefix=None, + dirname=None, + text=False): """Context manager creating a new temporary file in text mode.""" if sys.version_info < (3, 5): # covers also python 2 if suffix is None: @@ -73,11 +72,18 @@ def NamedTempFile( @contextlib.contextmanager -def TempFileContents( - dirname, contents, encoding='utf-8', newline='', suffix=None): +def TempFileContents(dirname, + contents, + encoding='utf-8', + newline='', + suffix=None): # Note: NamedTempFile properly handles unicode encoding when using mode='w' - with NamedTempFile(dirname=dirname, mode='w', encoding=encoding, - newline=newline, suffix=suffix) as (f, fname): + with NamedTempFile( + dirname=dirname, + mode='w', + encoding=encoding, + newline=newline, + suffix=suffix) as (f, fname): f.write(contents) f.flush() yield fname diff --git a/yapftests/yapf_test.py b/yapftests/yapf_test.py index 865a67e3b..2330f4e18 100644 --- a/yapftests/yapf_test.py +++ b/yapftests/yapf_test.py @@ -54,11 +54,10 @@ def testSimple(self): self._Check(unformatted_code, unformatted_code) def testNoEndingNewline(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ if True: pass""") - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: pass """) @@ -66,7 +65,7 @@ def testNoEndingNewline(self): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testPrintAfterPeriod(self): - unformatted_code = textwrap.dedent("""a.print\n""") + unformatted_code = textwrap.dedent("""a.print\n""") expected_formatted_code = textwrap.dedent("""a.print\n""") self._Check(unformatted_code, expected_formatted_code) @@ -81,7 +80,7 @@ def tearDown(self): # pylint: disable=g-missing-super-call def assertCodeEqual(self, expected_code, code): if code != expected_code: - msg = 'Code format mismatch:\n' + msg = 'Code format mismatch:\n' msg += 'Expected:\n >' msg += '\n > '.join(expected_code.splitlines()) msg += '\nActual:\n >' @@ -90,18 +89,15 @@ def assertCodeEqual(self, expected_code, code): self.fail(msg) def testFormatFile(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ if True: pass """) - expected_formatted_code_pep8 = textwrap.dedent( - u"""\ + expected_formatted_code_pep8 = textwrap.dedent(u"""\ if True: pass """) - expected_formatted_code_yapf = textwrap.dedent( - u"""\ + expected_formatted_code_yapf = textwrap.dedent(u"""\ if True: pass """) @@ -113,8 +109,7 @@ def testFormatFile(self): self.assertCodeEqual(expected_formatted_code_yapf, formatted_code) def testDisableLinesPattern(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ if a: b # yapf: disable @@ -122,8 +117,7 @@ def testDisableLinesPattern(self): if h: i """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ if a: b # yapf: disable @@ -136,8 +130,7 @@ def testDisableLinesPattern(self): self.assertCodeEqual(expected_formatted_code, formatted_code) def testDisableAndReenableLinesPattern(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ if a: b # yapf: disable @@ -146,8 +139,7 @@ def testDisableAndReenableLinesPattern(self): if h: i """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ if a: b # yapf: disable @@ -161,8 +153,7 @@ def testDisableAndReenableLinesPattern(self): self.assertCodeEqual(expected_formatted_code, formatted_code) def testDisablePartOfMultilineComment(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ if a: b # This is a multiline comment that disables YAPF. @@ -174,8 +165,7 @@ def testDisablePartOfMultilineComment(self): if h: i """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ if a: b # This is a multiline comment that disables YAPF. @@ -190,8 +180,7 @@ def testDisablePartOfMultilineComment(self): formatted_code, _, _ = yapf_api.FormatFile(filepath, style_config='pep8') self.assertCodeEqual(expected_formatted_code, formatted_code) - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ def foo_function(): # some comment # yapf: disable @@ -208,8 +197,7 @@ def foo_function(): self.assertCodeEqual(code, formatted_code) def testEnabledDisabledSameComment(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ # yapf: disable a(bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb, ccccccccccccccccccccccccccccccc, ddddddddddddddddddddddd, eeeeeeeeeeeeeeeeeeeeeeeeeee) # yapf: enable @@ -222,24 +210,21 @@ def testEnabledDisabledSameComment(self): self.assertCodeEqual(code, formatted_code) def testFormatFileLinesSelection(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ if a: b if f: g if h: i """) - expected_formatted_code_lines1and2 = textwrap.dedent( - u"""\ + expected_formatted_code_lines1and2 = textwrap.dedent(u"""\ if a: b if f: g if h: i """) - expected_formatted_code_lines3 = textwrap.dedent( - u"""\ + expected_formatted_code_lines3 = textwrap.dedent(u"""\ if a: b if f: g @@ -255,8 +240,7 @@ def testFormatFileLinesSelection(self): self.assertCodeEqual(expected_formatted_code_lines3, formatted_code) def testFormatFileDiff(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ if True: pass """) @@ -266,7 +250,7 @@ def testFormatFileDiff(self): def testFormatFileInPlace(self): unformatted_code = u'True==False\n' - formatted_code = u'True == False\n' + formatted_code = u'True == False\n' with utils.TempFileContents(self.test_tmpdir, unformatted_code) as filepath: result, _, _ = yapf_api.FormatFile(filepath, in_place=True) self.assertEqual(result, None) @@ -284,19 +268,17 @@ def testFormatFileInPlace(self): print_diff=True) def testNoFile(self): - stream = py3compat.StringIO() + stream = py3compat.StringIO() handler = logging.StreamHandler(stream) - logger = logging.getLogger('mylogger') + logger = logging.getLogger('mylogger') logger.addHandler(handler) self.assertRaises( IOError, yapf_api.FormatFile, 'not_a_file.py', logger=logger.error) - self.assertEqual( - stream.getvalue(), - "[Errno 2] No such file or directory: 'not_a_file.py'\n") + self.assertEqual(stream.getvalue(), + "[Errno 2] No such file or directory: 'not_a_file.py'\n") def testCommentsUnformatted(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ foo = [# A list of things # bork 'one', @@ -308,8 +290,7 @@ def testCommentsUnformatted(self): self.assertCodeEqual(code, formatted_code) def testDisabledHorizontalFormattingOnNewLine(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ # yapf: disable a = [ 1] @@ -320,14 +301,12 @@ def testDisabledHorizontalFormattingOnNewLine(self): self.assertCodeEqual(code, formatted_code) def testSplittingSemicolonStatements(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def f(): x = y + 42 ; z = n * 42 if True: a += 1 ; b += 1; c += 1 """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ def f(): x = y + 42 z = n * 42 @@ -341,14 +320,12 @@ def f(): self.assertCodeEqual(expected_formatted_code, formatted_code) def testSemicolonStatementsDisabled(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def f(): x = y + 42 ; z = n * 42 # yapf: disable if True: a += 1 ; b += 1; c += 1 """) - expected_formatted_code = textwrap.dedent( - u"""\ + expected_formatted_code = textwrap.dedent(u"""\ def f(): x = y + 42 ; z = n * 42 # yapf: disable if True: @@ -361,8 +338,7 @@ def f(): self.assertCodeEqual(expected_formatted_code, formatted_code) def testDisabledSemiColonSeparatedStatements(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ # yapf: disable if True: a ; b """) @@ -371,8 +347,7 @@ def testDisabledSemiColonSeparatedStatements(self): self.assertCodeEqual(code, formatted_code) def testDisabledMultilineStringInDictionary(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ # yapf: disable A = [ @@ -391,8 +366,7 @@ def testDisabledMultilineStringInDictionary(self): self.assertCodeEqual(code, formatted_code) def testDisabledWithPrecedingText(self): - code = textwrap.dedent( - u"""\ + code = textwrap.dedent(u"""\ # TODO(fix formatting): yapf: disable A = [ @@ -428,8 +402,11 @@ def setUpClass(cls): # pylint: disable=g-missing-super-call def tearDownClass(cls): # pylint: disable=g-missing-super-call shutil.rmtree(cls.test_tmpdir) - def assertYapfReformats( - self, unformatted, expected, extra_options=None, env=None): + def assertYapfReformats(self, + unformatted, + expected, + extra_options=None, + env=None): """Check that yapf reformats the given code as expected. Invokes yapf in a subprocess, piping the unformatted code into its stdin. @@ -442,7 +419,7 @@ def assertYapfReformats( env: dict of environment variables. """ cmdline = YAPF_BINARY + (extra_options or []) - p = subprocess.Popen( + p = subprocess.Popen( cmdline, stdout=subprocess.PIPE, stdin=subprocess.PIPE, @@ -455,30 +432,27 @@ def assertYapfReformats( @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testUnicodeEncodingPipedToFile(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def foo(): print('⇒') """) - with utils.NamedTempFile(dirname=self.test_tmpdir, - suffix='.py') as (out, _): - with utils.TempFileContents(self.test_tmpdir, unformatted_code, - suffix='.py') as filepath: + with utils.NamedTempFile( + dirname=self.test_tmpdir, suffix='.py') as (out, _): + with utils.TempFileContents( + self.test_tmpdir, unformatted_code, suffix='.py') as filepath: subprocess.check_call(YAPF_BINARY + ['--diff', filepath], stdout=out) def testInPlaceReformatting(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ def foo(): x = 37 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): x = 37 """) - with utils.TempFileContents(self.test_tmpdir, unformatted_code, - suffix='.py') as filepath: + with utils.TempFileContents( + self.test_tmpdir, unformatted_code, suffix='.py') as filepath: p = subprocess.Popen(YAPF_BINARY + ['--in-place', filepath]) p.wait() with io.open(filepath, mode='r', newline='') as fd: @@ -486,10 +460,10 @@ def foo(): self.assertEqual(reformatted_code, expected_formatted_code) def testInPlaceReformattingBlank(self): - unformatted_code = u'\n\n' + unformatted_code = u'\n\n' expected_formatted_code = u'\n' - with utils.TempFileContents(self.test_tmpdir, unformatted_code, - suffix='.py') as filepath: + with utils.TempFileContents( + self.test_tmpdir, unformatted_code, suffix='.py') as filepath: p = subprocess.Popen(YAPF_BINARY + ['--in-place', filepath]) p.wait() with io.open(filepath, mode='r', encoding='utf-8', newline='') as fd: @@ -497,10 +471,10 @@ def testInPlaceReformattingBlank(self): self.assertEqual(reformatted_code, expected_formatted_code) def testInPlaceReformattingEmpty(self): - unformatted_code = u'' + unformatted_code = u'' expected_formatted_code = u'' - with utils.TempFileContents(self.test_tmpdir, unformatted_code, - suffix='.py') as filepath: + with utils.TempFileContents( + self.test_tmpdir, unformatted_code, suffix='.py') as filepath: p = subprocess.Popen(YAPF_BINARY + ['--in-place', filepath]) p.wait() with io.open(filepath, mode='r', encoding='utf-8', newline='') as fd: @@ -508,37 +482,31 @@ def testInPlaceReformattingEmpty(self): self.assertEqual(reformatted_code, expected_formatted_code) def testReadFromStdin(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): x = 37 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): x = 37 """) self.assertYapfReformats(unformatted_code, expected_formatted_code) def testReadFromStdinWithEscapedStrings(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ s = "foo\\nbar" """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ s = "foo\\nbar" """) self.assertYapfReformats(unformatted_code, expected_formatted_code) def testSetYapfStyle(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): # trail x = 37 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): # trail x = 37 """) @@ -548,18 +516,15 @@ def foo(): # trail extra_options=['--style=yapf']) def testSetCustomStyleBasedOnYapf(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): # trail x = 37 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): # trail x = 37 """) - style_file = textwrap.dedent( - u'''\ + style_file = textwrap.dedent(u'''\ [style] based_on_style = yapf spaces_before_comment = 4 @@ -571,18 +536,15 @@ def foo(): # trail extra_options=['--style={0}'.format(stylepath)]) def testSetCustomStyleSpacesBeforeComment(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a_very_long_statement_that_extends_way_beyond # Comment short # This is a shorter statement """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a_very_long_statement_that_extends_way_beyond # Comment short # This is a shorter statement """) # noqa - style_file = textwrap.dedent( - u'''\ + style_file = textwrap.dedent(u'''\ [style] spaces_before_comment = 15, 20 ''') @@ -593,28 +555,26 @@ def testSetCustomStyleSpacesBeforeComment(self): extra_options=['--style={0}'.format(stylepath)]) def testReadSingleLineCodeFromStdin(self): - unformatted_code = textwrap.dedent("""\ + unformatted_code = textwrap.dedent("""\ if True: pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: pass """) self.assertYapfReformats(unformatted_code, expected_formatted_code) def testEncodingVerification(self): - unformatted_code = textwrap.dedent( - u"""\ + unformatted_code = textwrap.dedent(u"""\ '''The module docstring.''' # -*- coding: utf-8 -*- def f(): x = 37 """) - with utils.NamedTempFile(suffix='.py', - dirname=self.test_tmpdir) as (out, _): - with utils.TempFileContents(self.test_tmpdir, unformatted_code, - suffix='.py') as filepath: + with utils.NamedTempFile( + suffix='.py', dirname=self.test_tmpdir) as (out, _): + with utils.TempFileContents( + self.test_tmpdir, unformatted_code, suffix='.py') as filepath: try: subprocess.check_call(YAPF_BINARY + ['--diff', filepath], stdout=out) except subprocess.CalledProcessError as e: @@ -622,8 +582,7 @@ def f(): self.assertEqual(e.returncode, 1) # pylint: disable=g-assert-in-except # noqa def testReformattingSpecificLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -633,8 +592,7 @@ def g(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -654,16 +612,14 @@ def g(): extra_options=['--lines', '1-2']) def testOmitFormattingLinesBeforeDisabledFunctionComment(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import sys # Comment def some_func(x): x = ["badly" , "formatted","line" ] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import sys # Comment @@ -676,8 +632,7 @@ def some_func(x): extra_options=['--lines', '5-5']) def testReformattingSkippingLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -688,8 +643,7 @@ def g(): pass # yapf: enable """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -705,8 +659,7 @@ def g(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testReformattingSkippingToEndOfFile(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -723,8 +676,7 @@ def e(): 'bbbbbbb'): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -746,8 +698,7 @@ def e(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testReformattingSkippingSingleLine(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -756,8 +707,7 @@ def g(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): # yapf: disable pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -771,15 +721,13 @@ def g(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testDisableWholeDataStructure(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ A = set([ 'hello', 'world', ]) # yapf: disable """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ A = set([ 'hello', 'world', @@ -788,16 +736,14 @@ def testDisableWholeDataStructure(self): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testDisableButAdjustIndentations(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class SplitPenaltyTest(unittest.TestCase): def testUnbreakable(self): self._CheckPenalties(tree, [ ]) # yapf: disable """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class SplitPenaltyTest(unittest.TestCase): def testUnbreakable(self): @@ -807,8 +753,7 @@ def testUnbreakable(self): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testRetainingHorizontalWhitespace(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -817,8 +762,7 @@ def g(): if (xxxxxxxxxxxx.yyyyyyyy (zzzzzzzzzzzzz [0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): # yapf: disable pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -832,8 +776,7 @@ def g(): self.assertYapfReformats(unformatted_code, expected_formatted_code) def testRetainingVerticalWhitespace(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): pass @@ -845,8 +788,7 @@ def g(): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def h(): if (xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0]) == 'aaaaaaaaaaa' and xxxxxxxxxxxx.yyyyyyyy(zzzzzzzzzzzzz[0].mmmmmmmm[0]) == 'bbbbbbb'): @@ -864,8 +806,7 @@ def g(): expected_formatted_code, extra_options=['--lines', '1-2']) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if a: b @@ -882,8 +823,7 @@ def g(): # trailing whitespace """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if a: b @@ -903,8 +843,7 @@ def g(): expected_formatted_code, extra_options=['--lines', '3-3', '--lines', '13-13']) - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ ''' docstring @@ -917,7 +856,7 @@ def g(): unformatted_code, unformatted_code, extra_options=['--lines', '2-2']) def testVerticalSpacingWithCommentWithContinuationMarkers(self): - unformatted_code = """\ + unformatted_code = """\ # \\ # \\ # \\ @@ -939,15 +878,13 @@ def testVerticalSpacingWithCommentWithContinuationMarkers(self): extra_options=['--lines', '1-1']) def testRetainingSemicolonsWhenSpecifyingLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a = line_to_format def f(): x = y + 42; z = n * 42 if True: a += 1 ; b += 1 ; c += 1 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a = line_to_format def f(): x = y + 42; z = n * 42 @@ -959,8 +896,7 @@ def f(): extra_options=['--lines', '1-1']) def testDisabledMultilineStrings(self): - unformatted_code = textwrap.dedent( - '''\ + unformatted_code = textwrap.dedent('''\ foo=42 def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

@@ -970,8 +906,7 @@ def f(): """ ''') # noqa - expected_formatted_code = textwrap.dedent( - '''\ + expected_formatted_code = textwrap.dedent('''\ foo = 42 def f(): email_text += """This is a really long docstring that goes over the column limit and is multi-line.

@@ -987,8 +922,7 @@ def f(): extra_options=['--lines', '1-1']) def testDisableWhenSpecifyingLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ # yapf: disable A = set([ 'hello', @@ -1000,8 +934,7 @@ def testDisableWhenSpecifyingLines(self): 'world', ]) # yapf: disable """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ # yapf: disable A = set([ 'hello', @@ -1019,8 +952,7 @@ def testDisableWhenSpecifyingLines(self): extra_options=['--lines', '1-10']) def testDisableFormattingInDataLiteral(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def horrible(): oh_god() why_would_you() @@ -1039,8 +971,7 @@ def still_horrible(): 'that' ] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def horrible(): oh_god() why_would_you() @@ -1061,8 +992,7 @@ def still_horrible(): extra_options=['--lines', '14-15']) def testRetainVerticalFormattingBetweenDisabledAndEnabledLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1073,8 +1003,7 @@ def aaaaaaaaaaaaa(self): gggggggggggg.hhhhhhhhh(c, c.ffffffffffff)) iiiii = jjjjjjjjjjjjjj.iiiii """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1089,8 +1018,7 @@ def aaaaaaaaaaaaa(self): extra_options=['--lines', '4-7']) def testRetainVerticalFormattingBetweenDisabledLines(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class A(object): def aaaaaaaaaaaaa(self): pass @@ -1099,8 +1027,7 @@ def aaaaaaaaaaaaa(self): def bbbbbbbbbbbbb(self): # 5 pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class A(object): def aaaaaaaaaaaaa(self): pass @@ -1115,8 +1042,7 @@ def bbbbbbbbbbbbb(self): # 5 extra_options=['--lines', '4-4']) def testFormatLinesSpecifiedInMiddleOfExpression(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1127,8 +1053,7 @@ def aaaaaaaaaaaaa(self): gggggggggggg.hhhhhhhhh(c, c.ffffffffffff)) iiiii = jjjjjjjjjjjjjj.iiiii """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ class A(object): def aaaaaaaaaaaaa(self): c = bbbbbbbbb.ccccccccc('challenge', 0, 1, 10) @@ -1143,8 +1068,7 @@ def aaaaaaaaaaaaa(self): extra_options=['--lines', '5-6']) def testCommentFollowingMultilineString(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): '''First line. Second line. @@ -1152,8 +1076,7 @@ def foo(): x = '''hello world''' # second comment return 42 # another comment """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): '''First line. Second line. @@ -1168,14 +1091,12 @@ def foo(): def testDedentClosingBracket(self): # no line-break on the first argument, not dedenting closing brackets - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def overly_long_function_name(first_argument_on_the_same_line, second_argument_makes_the_line_too_long): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def overly_long_function_name(first_argument_on_the_same_line, second_argument_makes_the_line_too_long): pass @@ -1193,8 +1114,7 @@ def overly_long_function_name(first_argument_on_the_same_line, # extra_options=['--style=facebook']) # line-break before the first argument, dedenting closing brackets if set - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long): @@ -1206,8 +1126,7 @@ def overly_long_function_name( # second_argument_makes_the_line_too_long): # pass # """) - expected_formatted_fb_code = textwrap.dedent( - """\ + expected_formatted_fb_code = textwrap.dedent("""\ def overly_long_function_name( first_argument_on_the_same_line, second_argument_makes_the_line_too_long ): @@ -1225,16 +1144,14 @@ def overly_long_function_name( # extra_options=['--style=pep8']) def testCoalesceBrackets(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ some_long_function_name_foo( { 'first_argument_of_the_thing': id, 'second_argument_of_the_thing': "some thing" } )""") - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ some_long_function_name_foo({ 'first_argument_of_the_thing': id, 'second_argument_of_the_thing': "some thing" @@ -1242,8 +1159,7 @@ def testCoalesceBrackets(self): """) with utils.NamedTempFile(dirname=self.test_tmpdir, mode='w') as (f, name): f.write( - textwrap.dedent( - u'''\ + textwrap.dedent(u'''\ [style] column_limit=82 coalesce_brackets = True @@ -1255,14 +1171,12 @@ def testCoalesceBrackets(self): extra_options=['--style={0}'.format(name)]) def testPseudoParenSpaces(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo(): def bar(): return {msg_id: author for author, msg_id in reader} """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo(): def bar(): return {msg_id: author for author, msg_id in reader} @@ -1273,8 +1187,7 @@ def bar(): extra_options=['--lines', '1-1', '--style', 'yapf']) def testMultilineCommentFormattingDisabled(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ # This is a comment FOO = { aaaaaaaa.ZZZ: [ @@ -1288,8 +1201,7 @@ def testMultilineCommentFormattingDisabled(self): '#': lambda x: x # do nothing } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ # This is a comment FOO = { aaaaaaaa.ZZZ: [ @@ -1309,16 +1221,14 @@ def testMultilineCommentFormattingDisabled(self): extra_options=['--lines', '1-1', '--style', 'yapf']) def testTrailingCommentsWithDisabledFormatting(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ import os SCOPES = [ 'hello world' # This is a comment. ] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ import os SCOPES = [ @@ -1331,7 +1241,7 @@ def testTrailingCommentsWithDisabledFormatting(self): extra_options=['--lines', '1-1', '--style', 'yapf']) def testUseTabs(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(): if True: pass @@ -1341,7 +1251,7 @@ def foo_function(): if True: pass """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1354,7 +1264,7 @@ def foo_function(): extra_options=['--style={0}'.format(stylepath)]) def testUseTabsWith(self): - unformatted_code = """\ + unformatted_code = """\ def f(): return ['hello', 'world',] """ @@ -1365,7 +1275,7 @@ def f(): 'world', ] """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1378,7 +1288,7 @@ def f(): extra_options=['--style={0}'.format(stylepath)]) def testUseTabsContinuationAlignStyleFixed(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1390,7 +1300,7 @@ def foo_function( 'world', ] """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1406,7 +1316,7 @@ def foo_function( extra_options=['--style={0}'.format(stylepath)]) def testUseTabsContinuationAlignStyleVAlignRight(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1418,7 +1328,7 @@ def foo_function(arg1, arg2, 'world', ] """ # noqa: W191,E101 - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf USE_TABS = true @@ -1434,7 +1344,7 @@ def foo_function(arg1, arg2, extra_options=['--style={0}'.format(stylepath)]) def testUseSpacesContinuationAlignStyleFixed(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1446,7 +1356,7 @@ def foo_function( 'world', ] """ - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf COLUMN_LIMIT=32 @@ -1461,7 +1371,7 @@ def foo_function( extra_options=['--style={0}'.format(stylepath)]) def testUseSpacesContinuationAlignStyleVAlignRight(self): - unformatted_code = """\ + unformatted_code = """\ def foo_function(arg1, arg2, arg3): return ['hello', 'world',] """ @@ -1473,7 +1383,7 @@ def foo_function(arg1, arg2, 'world', ] """ - style_contents = u"""\ + style_contents = u"""\ [style] based_on_style = yapf COLUMN_LIMIT=32 @@ -1488,13 +1398,11 @@ def foo_function(arg1, arg2, extra_options=['--style={0}'.format(stylepath)]) def testStyleOutputRoundTrip(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def foo_function(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def foo_function(): pass """) @@ -1514,8 +1422,7 @@ def foo_function(): extra_options=['--style={0}'.format(stylepath)]) def testSpacingBeforeComments(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ A = 42 @@ -1525,8 +1432,7 @@ def x(): def _(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ A = 42 @@ -1542,8 +1448,7 @@ def _(): extra_options=['--lines', '1-2']) def testSpacingBeforeCommentsInDicts(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ A=42 X = { @@ -1558,8 +1463,7 @@ def testSpacingBeforeCommentsInDicts(self): 'BROKEN' } """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ A = 42 X = { @@ -1580,8 +1484,7 @@ def testSpacingBeforeCommentsInDicts(self): extra_options=['--style', 'yapf', '--lines', '1-1']) def testDisableWithLinesOption(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ # yapf_lines_bug.py # yapf: disable def outer_func(): @@ -1590,8 +1493,7 @@ def inner_func(): return # yapf: enable """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ # yapf_lines_bug.py # yapf: disable def outer_func(): @@ -1607,7 +1509,7 @@ def inner_func(): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testNoSpacesAroundBinaryOperators(self): - unformatted_code = """\ + unformatted_code = """\ a = 4-b/c@d**37 """ expected_formatted_code = """\ @@ -1624,7 +1526,7 @@ def testNoSpacesAroundBinaryOperators(self): @unittest.skipUnless(py3compat.PY36, 'Requires Python 3.6') def testCP936Encoding(self): - unformatted_code = 'print("中文")\n' + unformatted_code = 'print("中文")\n' expected_formatted_code = 'print("中文")\n' self.assertYapfReformats( unformatted_code, @@ -1632,7 +1534,7 @@ def testCP936Encoding(self): env={'PYTHONIOENCODING': 'cp936'}) def testDisableWithLineRanges(self): - unformatted_code = """\ + unformatted_code = """\ # yapf: disable a = [ 1, @@ -1672,8 +1574,8 @@ class DiffIndentTest(unittest.TestCase): @staticmethod def _OwnStyle(): - my_style = style.CreatePEP8Style() - my_style['INDENT_WIDTH'] = 3 + my_style = style.CreatePEP8Style() + my_style['INDENT_WIDTH'] = 3 my_style['CONTINUATION_INDENT_WIDTH'] = 3 return my_style @@ -1683,13 +1585,11 @@ def _Check(self, unformatted_code, expected_formatted_code): self.assertEqual(expected_formatted_code, formatted_code) def testSimple(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ for i in range(5): print('bar') """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ for i in range(5): print('bar') """) @@ -1700,7 +1600,7 @@ class HorizontallyAlignedTrailingCommentsTest(yapf_test_helper.YAPFTest): @staticmethod def _OwnStyle(): - my_style = style.CreatePEP8Style() + my_style = style.CreatePEP8Style() my_style['SPACES_BEFORE_COMMENT'] = [ 15, 25, @@ -1714,8 +1614,7 @@ def _Check(self, unformatted_code, expected_formatted_code): self.assertCodeEqual(expected_formatted_code, formatted_code) def testSimple(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ foo = '1' # Aligned at first list value foo = '2__<15>' # Aligned at second list value @@ -1724,8 +1623,7 @@ def testSimple(self): foo = '4______________________<35>' # Aligned beyond list values """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ foo = '1' # Aligned at first list value foo = '2__<15>' # Aligned at second list value @@ -1737,8 +1635,7 @@ def testSimple(self): self._Check(unformatted_code, expected_formatted_code) def testBlock(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1746,8 +1643,7 @@ def testBlock(self): # Line 5 # Line 6 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1758,8 +1654,7 @@ def testBlock(self): self._Check(unformatted_code, expected_formatted_code) def testBlockWithLongLine(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ func(1) # Line 1 func___________________(2) # Line 2 # Line 3 @@ -1767,8 +1662,7 @@ def testBlockWithLongLine(self): # Line 5 # Line 6 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ func(1) # Line 1 func___________________(2) # Line 2 # Line 3 @@ -1779,8 +1673,7 @@ def testBlockWithLongLine(self): self._Check(unformatted_code, expected_formatted_code) def testBlockFuncSuffix(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1791,8 +1684,7 @@ def testBlockFuncSuffix(self): def Func(): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1807,8 +1699,7 @@ def Func(): self._Check(unformatted_code, expected_formatted_code) def testBlockCommentSuffix(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1818,8 +1709,7 @@ def testBlockCommentSuffix(self): # Aligned with prev comment block """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ func(1) # Line 1 func(2) # Line 2 # Line 3 @@ -1832,8 +1722,7 @@ def testBlockCommentSuffix(self): self._Check(unformatted_code, expected_formatted_code) def testBlockIndentedFuncSuffix(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1847,8 +1736,7 @@ def testBlockIndentedFuncSuffix(self): def Func(): pass """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1867,8 +1755,7 @@ def Func(): self._Check(unformatted_code, expected_formatted_code) def testBlockIndentedCommentSuffix(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1879,8 +1766,7 @@ def testBlockIndentedCommentSuffix(self): # Not aligned """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: func(1) # Line 1 func(2) # Line 2 @@ -1894,8 +1780,7 @@ def testBlockIndentedCommentSuffix(self): self._Check(unformatted_code, expected_formatted_code) def testBlockMultiIndented(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ if True: if True: if True: @@ -1908,8 +1793,7 @@ def testBlockMultiIndented(self): # Not aligned """) # noqa - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ if True: if True: if True: @@ -1925,8 +1809,7 @@ def testBlockMultiIndented(self): self._Check(unformatted_code, expected_formatted_code) def testArgs(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ def MyFunc( arg1, # Desc 1 arg2, # Desc 2 @@ -1937,8 +1820,7 @@ def MyFunc( ): pass """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ def MyFunc( arg1, # Desc 1 arg2, # Desc 2 @@ -1952,8 +1834,7 @@ def MyFunc( self._Check(unformatted_code, expected_formatted_code) def testDisableBlock(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ a() # comment 1 b() # comment 2 @@ -1965,8 +1846,7 @@ def testDisableBlock(self): e() # comment 5 f() # comment 6 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ a() # comment 1 b() # comment 2 @@ -1981,15 +1861,13 @@ def testDisableBlock(self): self._Check(unformatted_code, expected_formatted_code) def testDisabledLine(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ short # comment 1 do_not_touch1 # yapf: disable do_not_touch2 # yapf: disable a_longer_statement # comment 2 """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ short # comment 1 do_not_touch1 # yapf: disable do_not_touch2 # yapf: disable @@ -2002,9 +1880,9 @@ class _SpacesAroundDictListTupleTestImpl(unittest.TestCase): @staticmethod def _OwnStyle(): - my_style = style.CreatePEP8Style() - my_style['DISABLE_ENDING_COMMA_HEURISTIC'] = True - my_style['SPLIT_ALL_COMMA_SEPARATED_VALUES'] = False + my_style = style.CreatePEP8Style() + my_style['DISABLE_ENDING_COMMA_HEURISTIC'] = True + my_style['SPLIT_ALL_COMMA_SEPARATED_VALUES'] = False my_style['SPLIT_ARGUMENTS_WHEN_COMMA_TERMINATED'] = False return my_style @@ -2021,14 +1899,13 @@ class SpacesAroundDictTest(_SpacesAroundDictListTupleTestImpl): @classmethod def _OwnStyle(cls): - style = super(SpacesAroundDictTest, cls)._OwnStyle() + style = super(SpacesAroundDictTest, cls)._OwnStyle() style['SPACES_AROUND_DICT_DELIMITERS'] = True return style def testStandard(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ {1 : 2} {k:v for k, v in other.items()} {k for k in [1, 2, 3]} @@ -2045,8 +1922,7 @@ def testStandard(self): [1, 2] (3, 4) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ { 1: 2 } { k: v for k, v in other.items() } { k for k in [1, 2, 3] } @@ -2071,14 +1947,13 @@ class SpacesAroundListTest(_SpacesAroundDictListTupleTestImpl): @classmethod def _OwnStyle(cls): - style = super(SpacesAroundListTest, cls)._OwnStyle() + style = super(SpacesAroundListTest, cls)._OwnStyle() style['SPACES_AROUND_LIST_DELIMITERS'] = True return style def testStandard(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ [a,b,c] [4,5,] [6, [7, 8], 9] @@ -2099,8 +1974,7 @@ def testStandard(self): {a: b} (1, 2) """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ [ a, b, c ] [ 4, 5, ] [ 6, [ 7, 8 ], 9 ] @@ -2129,14 +2003,13 @@ class SpacesAroundTupleTest(_SpacesAroundDictListTupleTestImpl): @classmethod def _OwnStyle(cls): - style = super(SpacesAroundTupleTest, cls)._OwnStyle() + style = super(SpacesAroundTupleTest, cls)._OwnStyle() style['SPACES_AROUND_TUPLE_DELIMITERS'] = True return style def testStandard(self): - unformatted_code = textwrap.dedent( - """\ + unformatted_code = textwrap.dedent("""\ (0, 1) (2, 3) (4, 5, 6,) @@ -2159,8 +2032,7 @@ def testStandard(self): {a: b} [3, 4] """) - expected_formatted_code = textwrap.dedent( - """\ + expected_formatted_code = textwrap.dedent("""\ ( 0, 1 ) ( 2, 3 ) ( 4, 5, 6, ) diff --git a/yapftests/yapf_test_helper.py b/yapftests/yapf_test_helper.py index b95212a8b..cb56ec865 100644 --- a/yapftests/yapf_test_helper.py +++ b/yapftests/yapf_test_helper.py @@ -39,7 +39,7 @@ def __init__(self, *args): def assertCodeEqual(self, expected_code, code): if code != expected_code: - msg = ['Code format mismatch:', 'Expected:'] + msg = ['Code format mismatch:', 'Expected:'] linelen = style.Get('COLUMN_LIMIT') for line in expected_code.splitlines(): if len(line) > linelen: From ad906167503976938b19641b3f72d254754b05dc Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 3 Jan 2023 11:01:27 +0100 Subject: [PATCH 11/11] change the format back to yapf-based 2 --- yapftests/reformatter_basic_test.py | 2 +- yapftests/reformatter_buganizer_test.py | 28 ++++++++++++------------- yapftests/reformatter_facebook_test.py | 2 +- yapftests/reformatter_pep8_test.py | 2 +- yapftests/reformatter_python3_test.py | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/yapftests/reformatter_basic_test.py b/yapftests/reformatter_basic_test.py index f7bb4c5f5..798dbab9a 100644 --- a/yapftests/reformatter_basic_test.py +++ b/yapftests/reformatter_basic_test.py @@ -849,7 +849,7 @@ def testNoQueueSeletionInMiddleOfLine(self): # If the queue isn't properly constructed, then a token in the middle of the # line may be selected as the one with least penalty. The tokens after that # one are then splatted at the end of the line with no formatting. - unformatted_code = """\ + unformatted_code = """\ find_symbol(node.type) + "< " + " ".join(find_pattern(n) for n in node.child) + " >" """ # noqa expected_formatted_code = """\ diff --git a/yapftests/reformatter_buganizer_test.py b/yapftests/reformatter_buganizer_test.py index a4089ad03..54a62b588 100644 --- a/yapftests/reformatter_buganizer_test.py +++ b/yapftests/reformatter_buganizer_test.py @@ -160,7 +160,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB35417079(self): - code = """\ + code = """\ class _(): def _(): @@ -199,7 +199,7 @@ def testB120047670(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB120245013(self): - unformatted_code = """\ + unformatted_code = """\ class Foo(object): def testNoAlertForShortPeriod(self, rutabaga): self.targets[:][streamz_path,self._fillInOtherFields(streamz_path, {streamz_field_of_interest:True})] = series.Counter('1s', '+ 500x10000') @@ -234,7 +234,7 @@ def xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx( self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB111764402(self): - unformatted_code = """\ + unformatted_code = """\ x = self.stubs.stub(video_classification_map, 'read_video_classifications', (lambda external_ids, **unused_kwargs: {external_id: self._get_serving_classification('video') for external_id in external_ids})) """ # noqa expected_formatted_code = """\ @@ -287,7 +287,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB112651423(self): - unformatted_code = """\ + unformatted_code = """\ def potato(feeditems, browse_use_case=None): for item in turnip: if kumquat: @@ -398,7 +398,7 @@ def testB79462249(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB113210278(self): - unformatted_code = """\ + unformatted_code = """\ def _(): aaaaaaaaaaa = bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.cccccccccccccccccccccccccccc(\ eeeeeeeeeeeeeeeeeeeeeeeeee.fffffffffffffffffffffffffffffffffffffff.\ @@ -414,7 +414,7 @@ def _(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB77923341(self): - code = """\ + code = """\ def f(): if (aaaaaaaaaaaaaa.bbbbbbbbbbbb.ccccc <= 0 and # pytype: disable=attribute-error ddddddddddd.eeeeeeeee == constants.FFFFFFFFFFFFFF): @@ -488,7 +488,7 @@ def testB65546221(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB30500455(self): - unformatted_code = """\ + unformatted_code = """\ INITIAL_SYMTAB = dict([(name, 'exception#' + name) for name in INITIAL_EXCEPTIONS ] * [(name, 'type#' + name) for name in INITIAL_TYPES] + [ (name, 'function#' + name) for name in INITIAL_FUNCTIONS @@ -517,7 +517,7 @@ def f(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB37099651(self): - unformatted_code = """\ + unformatted_code = """\ _MEMCACHE = lazy.MakeLazy( # pylint: disable=g-long-lambda lambda: function.call.mem.clients(FLAGS.some_flag_thingy, default_namespace=_LAZY_MEM_NAMESPACE, allow_pickle=True) @@ -538,7 +538,7 @@ def testB37099651(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB33228502(self): - unformatted_code = """\ + unformatted_code = """\ def _(): success_rate_stream_table = module.Precompute( query_function=module.DefineQueryFunction( @@ -682,7 +682,7 @@ def testB66011084(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB67455376(self): - unformatted_code = """\ + unformatted_code = """\ sponge_ids.extend(invocation.id() for invocation in self._client.GetInvocationsByLabels(labels)) """ # noqa expected_formatted_code = """\ @@ -759,7 +759,7 @@ def testB65176185(self): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB35210166(self): - unformatted_code = """\ + unformatted_code = """\ def _(): query = ( m.Fetch(n.Raw('monarch.BorgTask', '/proc/container/memory/usage'), { 'borg_user': borguser, 'borg_job': jobname }) @@ -807,7 +807,7 @@ def testB32167774(self): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB66912275(self): - unformatted_code = """\ + unformatted_code = """\ def _(): with self.assertRaisesRegexp(errors.HttpError, 'Invalid'): patch_op = api_client.forwardingRules().patch( @@ -841,7 +841,7 @@ def _(): self.assertCodeEqual(code, reformatter.Reformat(llines)) def testB65241516(self): - unformatted_code = """\ + unformatted_code = """\ checkpoint_files = gfile.Glob(os.path.join(TrainTraceDir(unit_key, "*", "*"), embedding_model.CHECKPOINT_FILENAME + "-*")) """ # noqa expected_formatted_code = """\ @@ -2169,7 +2169,7 @@ def main(unused_argv): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testB15597568(self): - unformatted_code = """\ + unformatted_code = """\ if True: if True: if True: diff --git a/yapftests/reformatter_facebook_test.py b/yapftests/reformatter_facebook_test.py index 780b42440..c61f32bf5 100644 --- a/yapftests/reformatter_facebook_test.py +++ b/yapftests/reformatter_facebook_test.py @@ -413,7 +413,7 @@ def foo(): self.assertCodeEqual(expected_formatted_code, reformatter.Reformat(llines)) def testIfStmtClosingBracket(self): - unformatted_code = """\ + unformatted_code = """\ if (isinstance(value , (StopIteration , StopAsyncIteration )) and exc.__cause__ is value_asdfasdfasdfasdfsafsafsafdasfasdfs): return False """ # noqa diff --git a/yapftests/reformatter_pep8_test.py b/yapftests/reformatter_pep8_test.py index 67ddadc23..acc218d24 100644 --- a/yapftests/reformatter_pep8_test.py +++ b/yapftests/reformatter_pep8_test.py @@ -525,7 +525,7 @@ def testSplitBeforeArithmeticOperators(self): style.CreateStyleFromConfig( '{based_on_style: pep8, split_before_arithmetic_operator: true}')) - unformatted_code = """\ + unformatted_code = """\ def _(): raise ValueError('This is a long message that ends with an argument: ' + str(42)) """ # noqa diff --git a/yapftests/reformatter_python3_test.py b/yapftests/reformatter_python3_test.py index 81e565326..b5d68e86f 100644 --- a/yapftests/reformatter_python3_test.py +++ b/yapftests/reformatter_python3_test.py @@ -285,7 +285,7 @@ def testSplittingArguments(self): if sys.version_info[1] < 5: return - unformatted_code = """\ + unformatted_code = """\ async def open_file(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None): pass