contrib/byteify-strings.py
changeset 42687 26a31c88e1a5
parent 42683 bbb002b378f3
child 42700 f9b64ff9d26b
equal deleted inserted replaced
42686:3364b4da5271 42687:26a31c88e1a5
   122         except IndexError:
   122         except IndexError:
   123             return False
   123             return False
   124 
   124 
   125     coldelta = 0  # column increment for new opening parens
   125     coldelta = 0  # column increment for new opening parens
   126     coloffset = -1  # column offset for the current line (-1: TBD)
   126     coloffset = -1  # column offset for the current line (-1: TBD)
   127     parens = [(0, 0, 0)]  # stack of (line, end-column, column-offset)
   127     parens = [(0, 0, 0, -1)]  # stack of (line, end-column, column-offset, type)
   128     ignorenextline = False  # don't transform the next line
   128     ignorenextline = False  # don't transform the next line
   129     insideignoreblock = False # don't transform until turned off
   129     insideignoreblock = False # don't transform until turned off
   130     for i, t in enumerate(tokens):
   130     for i, t in enumerate(tokens):
   131         # Compute the column offset for the current line, such that
   131         # Compute the column offset for the current line, such that
   132         # the current line will be aligned to the last opening paren
   132         # the current line will be aligned to the last opening paren
   133         # as before.
   133         # as before.
   134         if coloffset < 0:
   134         if coloffset < 0:
   135             if t.start[1] == parens[-1][1]:
   135             lastparen = parens[-1]
   136                 coloffset = parens[-1][2]
   136             if t.start[1] == lastparen[1]:
   137             elif t.start[1] + 1 == parens[-1][1]:
   137                 coloffset = lastparen[2]
       
   138             elif (
       
   139                 t.start[1] + 1 == lastparen[1]
       
   140                 and lastparen[3] not in (token.NEWLINE, tokenize.NL)
       
   141             ):
   138                 # fix misaligned indent of s/util.Abort/error.Abort/
   142                 # fix misaligned indent of s/util.Abort/error.Abort/
   139                 coloffset = parens[-1][2] + (parens[-1][1] - t.start[1])
   143                 coloffset = lastparen[2] + (lastparen[1] - t.start[1])
   140             else:
   144             else:
   141                 coloffset = 0
   145                 coloffset = 0
   142 
   146 
   143         # Reset per-line attributes at EOL.
   147         # Reset per-line attributes at EOL.
   144         if t.type in (token.NEWLINE, tokenize.NL):
   148         if t.type in (token.NEWLINE, tokenize.NL):
   162             yield adjusttokenpos(t, coloffset)
   166             yield adjusttokenpos(t, coloffset)
   163             continue
   167             continue
   164 
   168 
   165         # Remember the last paren position.
   169         # Remember the last paren position.
   166         if _isop(i, '(', '[', '{'):
   170         if _isop(i, '(', '[', '{'):
   167             parens.append(t.end + (coloffset + coldelta,))
   171             parens.append(t.end + (coloffset + coldelta, tokens[i + 1].type))
   168         elif _isop(i, ')', ']', '}'):
   172         elif _isop(i, ')', ']', '}'):
   169             parens.pop()
   173             parens.pop()
   170 
   174 
   171         # Convert most string literals to byte literals. String literals
   175         # Convert most string literals to byte literals. String literals
   172         # in Python 2 are bytes. String literals in Python 3 are unicode.
   176         # in Python 2 are bytes. String literals in Python 3 are unicode.