Mercurial > public > mercurial-scm > hg
diff mercurial/__init__.py @ 29800:178c89e8519a
py3: import builtin wrappers automagically by code transformer
This should be less invasive than mucking builtins.
Since tokenize.untokenize() looks start/end positions of tokens, we calculates
them from the NEWLINE token of the future import.
author | Yuya Nishihara <yuya@tcha.org> |
---|---|
date | Tue, 16 Aug 2016 12:35:15 +0900 |
parents | 1c22400db72d |
children | 3139ec39b505 |
line wrap: on
line diff
--- a/mercurial/__init__.py Sun Aug 14 12:51:21 2016 +0900 +++ b/mercurial/__init__.py Tue Aug 16 12:35:15 2016 +0900 @@ -170,7 +170,7 @@ spec.loader = hgloader(spec.name, spec.origin) return spec - def replacetokens(tokens): + def replacetokens(tokens, fullname): """Transform a stream of tokens from raw to Python 3. It is called by the custom module loading machinery to rewrite @@ -184,6 +184,7 @@ REMEMBER TO CHANGE ``BYTECODEHEADER`` WHEN CHANGING THIS FUNCTION OR CACHED FILES WON'T GET INVALIDATED PROPERLY. """ + futureimpline = False for i, t in enumerate(tokens): # Convert most string literals to byte literals. String literals # in Python 2 are bytes. String literals in Python 3 are unicode. @@ -217,6 +218,29 @@ t.line) continue + # Insert compatibility imports at "from __future__ import" line. + # No '\n' should be added to preserve line numbers. + if (t.type == token.NAME and t.string == 'import' and + all(u.type == token.NAME for u in tokens[i - 2:i]) and + [u.string for u in tokens[i - 2:i]] == ['from', '__future__']): + futureimpline = True + if t.type == token.NEWLINE and futureimpline: + futureimpline = False + if fullname == 'mercurial.pycompat': + yield t + continue + r, c = t.start + l = (b'; from mercurial.pycompat import ' + b'delattr, getattr, hasattr, setattr, xrange\n') + for u in tokenize.tokenize(io.BytesIO(l).readline): + if u.type in (tokenize.ENCODING, token.ENDMARKER): + continue + yield tokenize.TokenInfo(u.type, u.string, + (r, c + u.start[1]), + (r, c + u.end[1]), + '') + continue + try: nexttoken = tokens[i + 1] except IndexError: @@ -279,7 +303,7 @@ # ``replacetoken`` or any mechanism that changes semantics of module # loading is changed. Otherwise cached bytecode may get loaded without # the new transformation mechanisms applied. - BYTECODEHEADER = b'HG\x00\x01' + BYTECODEHEADER = b'HG\x00\x02' class hgloader(importlib.machinery.SourceFileLoader): """Custom module loader that transforms source code. @@ -338,7 +362,7 @@ """Perform token transformation before compilation.""" buf = io.BytesIO(data) tokens = tokenize.tokenize(buf.readline) - data = tokenize.untokenize(replacetokens(list(tokens))) + data = tokenize.untokenize(replacetokens(list(tokens), self.name)) # Python's built-in importer strips frames from exceptions raised # for this code. Unfortunately, that mechanism isn't extensible # and our frame will be blamed for the import failure. There