--- a/Makefile Sun Aug 07 14:58:49 2016 +0900
+++ b/Makefile Mon Aug 15 12:26:02 2016 -0400
@@ -164,10 +164,12 @@
--install-lib=/Library/Python/2.7/site-packages/
make -C doc all install DESTDIR="$(PWD)/build/mercurial/"
mkdir -p $${OUTPUTDIR:-dist}
- pkgbuild --root build/mercurial/ --identifier org.mercurial-scm.mercurial \
- build/mercurial.pkg
HGVER=$$((cat build/mercurial/Library/Python/2.7/site-packages/mercurial/__version__.py; echo 'print(version)') | python) && \
OSXVER=$$(sw_vers -productVersion | cut -d. -f1,2) && \
+ pkgbuild --root build/mercurial/ \
+ --identifier org.mercurial-scm.mercurial \
+ --version "$${HGVER}" \
+ build/mercurial.pkg && \
productbuild --distribution contrib/macosx/distribution.xml \
--package-path build/ \
--version "$${HGVER}" \
--- a/contrib/check-commit Sun Aug 07 14:58:49 2016 +0900
+++ b/contrib/check-commit Mon Aug 15 12:26:02 2016 -0400
@@ -41,7 +41,12 @@
(afterheader + r".{79,}", "summary line too long (limit is 78)"),
(r"\n\+\n( |\+)\n", "adds double empty line"),
(r"\n \n\+\n", "adds double empty line"),
- (r"\n\+[ \t]+def [a-z]+_[a-z]", "adds a function with foo_bar naming"),
+ # Forbid "_" in function name.
+ #
+ # We skip the check for cffi related functions. They use names mapping the
+ # name of the C function. C function names may contain "_".
+ (r"\n\+[ \t]+def (?!cffi)[a-z]+_[a-z]",
+ "adds a function with foo_bar naming"),
]
word = re.compile('\S')
--- a/contrib/check-py3-compat.py Sun Aug 07 14:58:49 2016 +0900
+++ b/contrib/check-py3-compat.py Mon Aug 15 12:26:02 2016 -0400
@@ -55,7 +55,7 @@
# out module paths for things not in a package can be confusing.
if f.startswith(('hgext/', 'mercurial/')) and not f.endswith('__init__.py'):
assert f.endswith('.py')
- name = f.replace('/', '.')[:-3]
+ name = f.replace('/', '.')[:-3].replace('.pure.', '.')
with open(f, 'r') as fh:
try:
imp.load_module(name, fh, '', ('py', 'r', imp.PY_SOURCE))
--- a/contrib/chg/hgclient.c Sun Aug 07 14:58:49 2016 +0900
+++ b/contrib/chg/hgclient.c Mon Aug 15 12:26:02 2016 -0400
@@ -126,15 +126,10 @@
return; /* assumes input request */
size_t cursize = 0;
- int emptycount = 0;
while (cursize < hgc->ctx.datasize) {
rsize = recv(hgc->sockfd, hgc->ctx.data + cursize,
hgc->ctx.datasize - cursize, 0);
- /* rsize == 0 normally indicates EOF, while it's also a valid
- * packet size for unix socket. treat it as EOF and abort if
- * we get many empty responses in a row. */
- emptycount = (rsize == 0 ? emptycount + 1 : 0);
- if (rsize < 0 || emptycount > 20)
+ if (rsize < 1)
abortmsg("failed to read data block");
cursize += rsize;
}
--- a/doc/hgmanpage.py Sun Aug 07 14:58:49 2016 +0900
+++ b/doc/hgmanpage.py Mon Aug 15 12:26:02 2016 -0400
@@ -57,7 +57,6 @@
import roman
except ImportError:
from docutils.utils import roman
-import inspect
FIELD_LIST_INDENT = 7
DEFINITION_LIST_INDENT = 7
--- a/hgext/extdiff.py Sun Aug 07 14:58:49 2016 +0900
+++ b/hgext/extdiff.py Mon Aug 15 12:26:02 2016 -0400
@@ -324,6 +324,34 @@
cmdline = ' '.join(map(util.shellquote, [program] + option))
return dodiff(ui, repo, cmdline, pats, opts)
+class savedcmd(object):
+ """use external program to diff repository (or selected files)
+
+ Show differences between revisions for the specified files, using
+ the following program::
+
+ %(path)s
+
+ When two revision arguments are given, then changes are shown
+ between those revisions. If only one revision is specified then
+ that revision is compared to the working directory, and, when no
+ revisions are specified, the working directory files are compared
+ to its parent.
+ """
+
+ def __init__(self, path, cmdline):
+ # We can't pass non-ASCII through docstrings (and path is
+ # in an unknown encoding anyway)
+ docpath = path.encode("string-escape")
+ self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
+ self._cmdline = cmdline
+
+ def __call__(self, ui, repo, *pats, **opts):
+ options = ' '.join(map(util.shellquote, opts['option']))
+ if options:
+ options = ' ' + options
+ return dodiff(ui, repo, self._cmdline + options, pats, opts)
+
def uisetup(ui):
for cmd, path in ui.configitems('extdiff'):
path = util.expandpath(path)
@@ -357,28 +385,8 @@
ui.config('merge-tools', cmd+'.diffargs')
if args:
cmdline += ' ' + args
- def save(cmdline):
- '''use closure to save diff command to use'''
- def mydiff(ui, repo, *pats, **opts):
- options = ' '.join(map(util.shellquote, opts['option']))
- if options:
- options = ' ' + options
- return dodiff(ui, repo, cmdline + options, pats, opts)
- # We can't pass non-ASCII through docstrings (and path is
- # in an unknown encoding anyway)
- docpath = path.encode("string-escape")
- mydiff.__doc__ = '''\
-use %(path)s to diff repository (or selected files)
+ command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
+ inferrepo=True)(savedcmd(path, cmdline))
- Show differences between revisions for the specified files, using
- the %(path)s program.
-
- When two revision arguments are given, then changes are shown
- between those revisions. If only one revision is specified then
- that revision is compared to the working directory, and, when no
- revisions are specified, the working directory files are compared
- to its parent.\
-''' % {'path': util.uirepr(docpath)}
- return mydiff
- command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
- inferrepo=True)(save(cmdline))
+# tell hggettext to extract docstrings from these functions:
+i18nfunctions = [savedcmd]
--- a/hgext/journal.py Sun Aug 07 14:58:49 2016 +0900
+++ b/hgext/journal.py Mon Aug 15 12:26:02 2016 -0400
@@ -24,7 +24,6 @@
bookmarks,
cmdutil,
commands,
- dirstate,
dispatch,
error,
extensions,
@@ -63,8 +62,6 @@
extensions.wrapfunction(dispatch, 'runcommand', runcommand)
extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
extensions.wrapfunction(
- dirstate.dirstate, '_writedirstate', recorddirstateparents)
- extensions.wrapfunction(
localrepo.localrepository.dirstate, 'func', wrapdirstate)
extensions.wrapfunction(hg, 'postshare', wrappostshare)
extensions.wrapfunction(hg, 'copystore', unsharejournal)
@@ -84,34 +81,19 @@
dirstate = orig(repo)
if util.safehasattr(repo, 'journal'):
dirstate.journalstorage = repo.journal
+ dirstate.addparentchangecallback('journal', recorddirstateparents)
return dirstate
-def recorddirstateparents(orig, dirstate, dirstatefp):
+def recorddirstateparents(dirstate, old, new):
"""Records all dirstate parent changes in the journal."""
+ old = list(old)
+ new = list(new)
if util.safehasattr(dirstate, 'journalstorage'):
- old = [node.nullid, node.nullid]
- nodesize = len(node.nullid)
- try:
- # The only source for the old state is in the dirstate file still
- # on disk; the in-memory dirstate object only contains the new
- # state. dirstate._opendirstatefile() switches beteen .hg/dirstate
- # and .hg/dirstate.pending depending on the transaction state.
- with dirstate._opendirstatefile() as fp:
- state = fp.read(2 * nodesize)
- if len(state) == 2 * nodesize:
- old = [state[:nodesize], state[nodesize:]]
- except IOError:
- pass
-
- new = dirstate.parents()
- if old != new:
- # only record two hashes if there was a merge
- oldhashes = old[:1] if old[1] == node.nullid else old
- newhashes = new[:1] if new[1] == node.nullid else new
- dirstate.journalstorage.record(
- wdirparenttype, '.', oldhashes, newhashes)
-
- return orig(dirstate, dirstatefp)
+ # only record two hashes if there was a merge
+ oldhashes = old[:1] if old[1] == node.nullid else old
+ newhashes = new[:1] if new[1] == node.nullid else new
+ dirstate.journalstorage.record(
+ wdirparenttype, '.', oldhashes, newhashes)
# hooks to record bookmark changes (both local and remote)
def recordbookmarks(orig, store, fp):
@@ -165,9 +147,10 @@
def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
"""Mark this shared working copy as sharing journal information"""
- orig(sourcerepo, destrepo, **kwargs)
- with destrepo.vfs('shared', 'a') as fp:
- fp.write('journal\n')
+ with destrepo.wlock():
+ orig(sourcerepo, destrepo, **kwargs)
+ with destrepo.vfs('shared', 'a') as fp:
+ fp.write('journal\n')
def unsharejournal(orig, ui, repo, repopath):
"""Copy shared journal entries into this repo when unsharing"""
@@ -475,8 +458,10 @@
for count, entry in enumerate(repo.journal.filtered(name=name)):
if count == limit:
break
- newhashesstr = ','.join([node.short(hash) for hash in entry.newhashes])
- oldhashesstr = ','.join([node.short(hash) for hash in entry.oldhashes])
+ newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
+ name='node', sep=',')
+ oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
+ name='node', sep=',')
fm.startitem()
fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
@@ -486,7 +471,7 @@
opts.get('all') or name.startswith('re:'),
'name', ' %-8s', entry.name)
- timestring = util.datestr(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
+ timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
fm.condwrite(ui.verbose, 'date', ' %s', timestring)
fm.write('command', ' %s\n', entry.command)
--- a/hgext/mq.py Sun Aug 07 14:58:49 2016 +0900
+++ b/hgext/mq.py Mon Aug 15 12:26:02 2016 -0400
@@ -3354,53 +3354,54 @@
raise error.Abort(
_('invalid queue name, may not contain the characters ":\\/."'))
- existing = _getqueues()
-
- if opts.get('create'):
- if name in existing:
- raise error.Abort(_('queue "%s" already exists') % name)
- if _noqueues():
- _addqueue(_defaultqueue)
- _addqueue(name)
- _setactive(name)
- elif opts.get('rename'):
- current = _getcurrent()
- if name == current:
- raise error.Abort(_('can\'t rename "%s" to its current name')
- % name)
- if name in existing:
- raise error.Abort(_('queue "%s" already exists') % name)
-
- olddir = _queuedir(current)
- newdir = _queuedir(name)
-
- if os.path.exists(newdir):
- raise error.Abort(_('non-queue directory "%s" already exists') %
- newdir)
-
- fh = repo.vfs('patches.queues.new', 'w')
- for queue in existing:
- if queue == current:
- fh.write('%s\n' % (name,))
- if os.path.exists(olddir):
- util.rename(olddir, newdir)
- else:
- fh.write('%s\n' % (queue,))
- fh.close()
- util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
- _setactivenocheck(name)
- elif opts.get('delete'):
- _delete(name)
- elif opts.get('purge'):
- if name in existing:
+ with repo.wlock():
+ existing = _getqueues()
+
+ if opts.get('create'):
+ if name in existing:
+ raise error.Abort(_('queue "%s" already exists') % name)
+ if _noqueues():
+ _addqueue(_defaultqueue)
+ _addqueue(name)
+ _setactive(name)
+ elif opts.get('rename'):
+ current = _getcurrent()
+ if name == current:
+ raise error.Abort(_('can\'t rename "%s" to its current name')
+ % name)
+ if name in existing:
+ raise error.Abort(_('queue "%s" already exists') % name)
+
+ olddir = _queuedir(current)
+ newdir = _queuedir(name)
+
+ if os.path.exists(newdir):
+ raise error.Abort(_('non-queue directory "%s" already exists') %
+ newdir)
+
+ fh = repo.vfs('patches.queues.new', 'w')
+ for queue in existing:
+ if queue == current:
+ fh.write('%s\n' % (name,))
+ if os.path.exists(olddir):
+ util.rename(olddir, newdir)
+ else:
+ fh.write('%s\n' % (queue,))
+ fh.close()
+ util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
+ _setactivenocheck(name)
+ elif opts.get('delete'):
_delete(name)
- qdir = _queuedir(name)
- if os.path.exists(qdir):
- shutil.rmtree(qdir)
- else:
- if name not in existing:
- raise error.Abort(_('use --create to create a new queue'))
- _setactive(name)
+ elif opts.get('purge'):
+ if name in existing:
+ _delete(name)
+ qdir = _queuedir(name)
+ if os.path.exists(qdir):
+ shutil.rmtree(qdir)
+ else:
+ if name not in existing:
+ raise error.Abort(_('use --create to create a new queue'))
+ _setactive(name)
def mqphasedefaults(repo, roots):
"""callback used to set mq changeset as secret when no phase data exists"""
--- a/i18n/hggettext Sun Aug 07 14:58:49 2016 +0900
+++ b/i18n/hggettext Mon Aug 15 12:26:02 2016 -0400
@@ -114,7 +114,7 @@
if func.__doc__:
src = inspect.getsource(func)
name = "%s.%s" % (path, func.__name__)
- lineno = func.func_code.co_firstlineno
+ lineno = inspect.getsourcelines(func)[1]
doc = func.__doc__
if rstrip:
doc = doc.rstrip()
--- a/mercurial/branchmap.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/branchmap.py Mon Aug 15 12:26:02 2016 -0400
@@ -470,8 +470,12 @@
def write(self, tr=None):
"""Save branch cache if it is dirty."""
repo = self._repo
- if self._rbcnamescount < len(self._names):
- try:
+ wlock = None
+ step = ''
+ try:
+ if self._rbcnamescount < len(self._names):
+ step = ' names'
+ wlock = repo.wlock(wait=False)
if self._rbcnamescount != 0:
f = repo.vfs.open(_rbcnames, 'ab')
if f.tell() == self._rbcsnameslen:
@@ -489,16 +493,15 @@
for b in self._names[self._rbcnamescount:]))
self._rbcsnameslen = f.tell()
f.close()
- except (IOError, OSError, error.Abort) as inst:
- repo.ui.debug("couldn't write revision branch cache names: "
- "%s\n" % inst)
- return
- self._rbcnamescount = len(self._names)
+ self._rbcnamescount = len(self._names)
- start = self._rbcrevslen * _rbcrecsize
- if start != len(self._rbcrevs):
- revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
- try:
+ start = self._rbcrevslen * _rbcrecsize
+ if start != len(self._rbcrevs):
+ step = ''
+ if wlock is None:
+ wlock = repo.wlock(wait=False)
+ revs = min(len(repo.changelog),
+ len(self._rbcrevs) // _rbcrecsize)
f = repo.vfs.open(_rbcrevs, 'ab')
if f.tell() != start:
repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
@@ -510,8 +513,10 @@
end = revs * _rbcrecsize
f.write(self._rbcrevs[start:end])
f.close()
- except (IOError, OSError, error.Abort) as inst:
- repo.ui.debug("couldn't write revision branch cache: %s\n" %
- inst)
- return
- self._rbcrevslen = revs
+ self._rbcrevslen = revs
+ except (IOError, OSError, error.Abort, error.LockError) as inst:
+ repo.ui.debug("couldn't write revision branch cache%s: %s\n"
+ % (step, inst))
+ finally:
+ if wlock is not None:
+ wlock.release()
--- a/mercurial/bundle2.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/bundle2.py Mon Aug 15 12:26:02 2016 -0400
@@ -989,7 +989,10 @@
outdebug(ui, 'closing payload chunk')
# abort current part payload
yield _pack(_fpayloadsize, 0)
- raise exc_info[0], exc_info[1], exc_info[2]
+ if sys.version_info[0] >= 3:
+ raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
+ else:
+ exec("""raise exc_info[0], exc_info[1], exc_info[2]""")
# end of payload
outdebug(ui, 'closing payload chunk')
yield _pack(_fpayloadsize, 0)
--- a/mercurial/bundlerepo.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/bundlerepo.py Mon Aug 15 12:26:02 2016 -0400
@@ -56,10 +56,8 @@
self.repotiprev = n - 1
chain = None
self.bundlerevs = set() # used by 'bundle()' revset expression
- while True:
- chunkdata = bundle.deltachunk(chain)
- if not chunkdata:
- break
+ getchunk = lambda: bundle.deltachunk(chain)
+ for chunkdata in iter(getchunk, {}):
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
@@ -190,10 +188,16 @@
self.filteredrevs = oldfilter
class bundlemanifest(bundlerevlog, manifest.manifest):
- def __init__(self, opener, bundle, linkmapper):
- manifest.manifest.__init__(self, opener)
+ def __init__(self, opener, bundle, linkmapper, dirlogstarts=None, dir=''):
+ manifest.manifest.__init__(self, opener, dir=dir)
bundlerevlog.__init__(self, opener, self.indexfile, bundle,
linkmapper)
+ if dirlogstarts is None:
+ dirlogstarts = {}
+ if self.bundle.version == "03":
+ dirlogstarts = _getfilestarts(self.bundle)
+ self._dirlogstarts = dirlogstarts
+ self._linkmapper = linkmapper
def baserevision(self, nodeorrev):
node = nodeorrev
@@ -206,6 +210,14 @@
result = manifest.manifest.revision(self, nodeorrev)
return result
+ def dirlog(self, d):
+ if d in self._dirlogstarts:
+ self.bundle.seek(self._dirlogstarts[d])
+ return bundlemanifest(
+ self.opener, self.bundle, self._linkmapper,
+ self._dirlogstarts, dir=d)
+ return super(bundlemanifest, self).dirlog(d)
+
class bundlefilelog(bundlerevlog, filelog.filelog):
def __init__(self, opener, path, bundle, linkmapper):
filelog.filelog.__init__(self, opener, path)
@@ -236,6 +248,15 @@
self.invalidate()
self.dirty = True
+def _getfilestarts(bundle):
+ bundlefilespos = {}
+ for chunkdata in iter(bundle.filelogheader, {}):
+ fname = chunkdata['filename']
+ bundlefilespos[fname] = bundle.tell()
+ for chunk in iter(lambda: bundle.deltachunk(None), {}):
+ pass
+ return bundlefilespos
+
class bundlerepository(localrepo.localrepository):
def __init__(self, ui, path, bundlename):
def _writetempbundle(read, suffix, header=''):
@@ -283,7 +304,8 @@
"multiple changegroups")
cgstream = part
version = part.params.get('version', '01')
- if version not in changegroup.allsupportedversions(ui):
+ legalcgvers = changegroup.supportedincomingversions(self)
+ if version not in legalcgvers:
msg = _('Unsupported changegroup version: %s')
raise error.Abort(msg % version)
if self.bundle.compressed():
@@ -328,10 +350,6 @@
self.bundle.manifestheader()
linkmapper = self.unfiltered().changelog.rev
m = bundlemanifest(self.svfs, self.bundle, linkmapper)
- # XXX: hack to work with changegroup3, but we still don't handle
- # tree manifests correctly
- if self.bundle.version == "03":
- self.bundle.filelogheader()
self.filestart = self.bundle.tell()
return m
@@ -351,16 +369,7 @@
def file(self, f):
if not self.bundlefilespos:
self.bundle.seek(self.filestart)
- while True:
- chunkdata = self.bundle.filelogheader()
- if not chunkdata:
- break
- fname = chunkdata['filename']
- self.bundlefilespos[fname] = self.bundle.tell()
- while True:
- c = self.bundle.deltachunk(None)
- if not c:
- break
+ self.bundlefilespos = _getfilestarts(self.bundle)
if f in self.bundlefilespos:
self.bundle.seek(self.bundlefilespos[f])
@@ -480,7 +489,10 @@
if bundlename or not localrepo:
# create a bundle (uncompressed if other repo is not local)
- canbundle2 = (ui.configbool('experimental', 'bundle2-exp', True)
+ # developer config: devel.legacy.exchange
+ legexc = ui.configlist('devel', 'legacy.exchange')
+ forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
+ canbundle2 = (not forcebundle1
and other.capable('getbundle')
and other.capable('bundle2'))
if canbundle2:
--- a/mercurial/changegroup.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/changegroup.py Mon Aug 15 12:26:02 2016 -0400
@@ -404,6 +404,7 @@
# coming call to `destroyed` will repair it.
# In other case we can safely update cache on
# disk.
+ repo.ui.debug('updating the branch cache\n')
branchmap.updatecache(repo.filtered('served'))
def runhooks():
@@ -413,8 +414,6 @@
if clstart >= len(repo):
return
- # forcefully update the on-disk branch cache
- repo.ui.debug("updating the branch cache\n")
repo.hook("changegroup", **hookargs)
for n in added:
@@ -475,10 +474,7 @@
def _unpackmanifests(self, repo, revmap, trp, prog, numchanges):
super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog,
numchanges)
- while True:
- chunkdata = self.filelogheader()
- if not chunkdata:
- break
+ for chunkdata in iter(self.filelogheader, {}):
# If we get here, there are directory manifests in the changegroup
d = chunkdata["filename"]
repo.ui.debug("adding %s revisions\n" % d)
@@ -946,17 +942,7 @@
Another wrinkle is doing the reverse, figuring out which changeset in
the changegroup a particular filenode or manifestnode belongs to.
"""
- cl = repo.changelog
- if not roots:
- roots = [nullid]
- discbases = []
- for n in roots:
- discbases.extend([p for p in cl.parents(n) if p != nullid])
- # TODO: remove call to nodesbetween.
- csets, roots, heads = cl.nodesbetween(roots, heads)
- included = set(csets)
- discbases = [n for n in discbases if n not in included]
- outgoing = discovery.outgoing(cl, discbases, heads)
+ outgoing = discovery.outgoingbetween(repo, roots, heads)
bundler = getbundler(version, repo)
return getsubset(repo, outgoing, bundler, source)
@@ -1022,10 +1008,7 @@
def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
revisions = 0
files = 0
- while True:
- chunkdata = source.filelogheader()
- if not chunkdata:
- break
+ for chunkdata in iter(source.filelogheader, {}):
files += 1
f = chunkdata["filename"]
repo.ui.debug("adding %s revisions\n" % f)
--- a/mercurial/changelog.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/changelog.py Mon Aug 15 12:26:02 2016 -0400
@@ -138,9 +138,10 @@
return appender(opener, name, mode, buf)
return _delay
-_changelogrevision = collections.namedtuple('changelogrevision',
- ('manifest', 'user', 'date',
- 'files', 'description', 'extra'))
+_changelogrevision = collections.namedtuple(u'changelogrevision',
+ (u'manifest', u'user', u'date',
+ u'files', u'description',
+ u'extra'))
class changelogrevision(object):
"""Holds results of a parsed changelog revision.
@@ -151,8 +152,8 @@
"""
__slots__ = (
- '_offsets',
- '_text',
+ u'_offsets',
+ u'_text',
)
def __new__(cls, text):
--- a/mercurial/cmdutil.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/cmdutil.py Mon Aug 15 12:26:02 2016 -0400
@@ -549,7 +549,7 @@
if 'treemanifest' not in repo.requirements:
raise error.Abort(_("--dir can only be used on repos with "
"treemanifest enabled"))
- dirlog = repo.dirlog(dir)
+ dirlog = repo.manifest.dirlog(dir)
if len(dirlog):
r = dirlog
elif mf:
@@ -2415,11 +2415,7 @@
ret = 0
for subpath in sorted(ctx.substate):
- def matchessubrepo(subpath):
- return (m.exact(subpath)
- or any(f.startswith(subpath + '/') for f in m.files()))
-
- if subrepos or matchessubrepo(subpath):
+ if subrepos or m.matchessubrepo(subpath):
sub = ctx.sub(subpath)
try:
submatch = matchmod.subdirmatcher(subpath, m)
@@ -2450,16 +2446,8 @@
total = len(subs)
count = 0
for subpath in subs:
- def matchessubrepo(matcher, subpath):
- if matcher.exact(subpath):
- return True
- for f in matcher.files():
- if f.startswith(subpath):
- return True
- return False
-
count += 1
- if subrepos or matchessubrepo(m, subpath):
+ if subrepos or m.matchessubrepo(subpath):
ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
sub = wctx.sub(subpath)
@@ -2581,14 +2569,7 @@
write(file)
return 0
- # Don't warn about "missing" files that are really in subrepos
- def badfn(path, msg):
- for subpath in ctx.substate:
- if path.startswith(subpath + '/'):
- return
- matcher.bad(path, msg)
-
- for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
+ for abs in ctx.walk(matcher):
write(abs)
err = 0
--- a/mercurial/commands.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/commands.py Mon Aug 15 12:26:02 2016 -0400
@@ -1987,8 +1987,9 @@
tags = []
- lock = tr = None
+ wlock = lock = tr = None
try:
+ wlock = repo.wlock()
lock = repo.lock()
tr = repo.transaction("builddag")
@@ -2073,7 +2074,7 @@
repo.vfs.write("localtags", "".join(tags))
finally:
ui.progress(_('building'), None)
- release(tr, lock)
+ release(tr, lock, wlock)
@command('debugbundle',
[('a', 'all', None, _('show all details')),
@@ -2102,10 +2103,7 @@
def showchunks(named):
ui.write("\n%s%s\n" % (indent_string, named))
chain = None
- while True:
- chunkdata = gen.deltachunk(chain)
- if not chunkdata:
- break
+ for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
@@ -2121,10 +2119,7 @@
showchunks("changelog")
chunkdata = gen.manifestheader()
showchunks("manifest")
- while True:
- chunkdata = gen.filelogheader()
- if not chunkdata:
- break
+ for chunkdata in iter(gen.filelogheader, {}):
fname = chunkdata['filename']
showchunks(fname)
else:
@@ -2132,10 +2127,7 @@
raise error.Abort(_('use debugbundle2 for this file'))
chunkdata = gen.changelogheader()
chain = None
- while True:
- chunkdata = gen.deltachunk(chain)
- if not chunkdata:
- break
+ for chunkdata in iter(lambda: gen.deltachunk(chain), {}):
node = chunkdata['node']
ui.write("%s%s\n" % (indent_string, hex(node)))
chain = node
@@ -2398,12 +2390,11 @@
def debugextensions(ui, **opts):
'''show information about active extensions'''
exts = extensions.extensions(ui)
+ hgver = util.version()
fm = ui.formatter('debugextensions', opts)
for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
extsource = extmod.__file__
- exttestedwith = getattr(extmod, 'testedwith', None)
- if exttestedwith is not None:
- exttestedwith = exttestedwith.split()
+ exttestedwith = getattr(extmod, 'testedwith', '').split()
extbuglink = getattr(extmod, 'buglink', None)
fm.startitem()
@@ -2414,19 +2405,18 @@
fm.write('name', '%s', extname)
if not exttestedwith:
fm.plain(_(' (untested!)\n'))
+ elif exttestedwith == ['internal'] or hgver in exttestedwith:
+ fm.plain('\n')
else:
- if exttestedwith == ['internal'] or \
- util.version() in exttestedwith:
- fm.plain('\n')
- else:
- lasttestedversion = exttestedwith[-1]
- fm.plain(' (%s!)\n' % lasttestedversion)
+ lasttestedversion = exttestedwith[-1]
+ fm.plain(' (%s!)\n' % lasttestedversion)
fm.condwrite(ui.verbose and extsource, 'source',
_(' location: %s\n'), extsource or "")
fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
- _(' tested with: %s\n'), ' '.join(exttestedwith or []))
+ _(' tested with: %s\n'),
+ fm.formatlist(exttestedwith, name='ver'))
fm.condwrite(ui.verbose and extbuglink, 'buglink',
_(' bug reporting: %s\n'), extbuglink or "")
@@ -4604,12 +4594,15 @@
section = None
subtopic = None
if name and '.' in name:
- name, section = name.split('.', 1)
- section = encoding.lower(section)
- if '.' in section:
- subtopic, section = section.split('.', 1)
+ name, remaining = name.split('.', 1)
+ remaining = encoding.lower(remaining)
+ if '.' in remaining:
+ subtopic, section = remaining.split('.', 1)
else:
- subtopic = section
+ if name in help.subtopics:
+ subtopic = remaining
+ else:
+ section = remaining
text = help.help_(ui, name, subtopic=subtopic, **opts)
--- a/mercurial/crecord.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/crecord.py Mon Aug 15 12:26:02 2016 -0400
@@ -28,7 +28,7 @@
# This is required for ncurses to display non-ASCII characters in default user
# locale encoding correctly. --immerrr
-locale.setlocale(locale.LC_ALL, '')
+locale.setlocale(locale.LC_ALL, u'')
# patch comments based on the git one
diffhelptext = _("""# To remove '-' lines, make them ' ' lines (context).
--- a/mercurial/demandimport.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/demandimport.py Mon Aug 15 12:26:02 2016 -0400
@@ -64,8 +64,12 @@
return importfunc(hgextname, globals, *args, **kwargs)
class _demandmod(object):
- """module demand-loader and proxy"""
- def __init__(self, name, globals, locals, level=level):
+ """module demand-loader and proxy
+
+ Specify 1 as 'level' argument at construction, to import module
+ relatively.
+ """
+ def __init__(self, name, globals, locals, level):
if '.' in name:
head, rest = name.split('.', 1)
after = [rest]
@@ -117,7 +121,8 @@
if '.' in p:
h, t = p.split('.', 1)
if getattr(mod, h, nothing) is nothing:
- setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__))
+ setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__,
+ level=1))
elif t:
subload(getattr(mod, h), t)
@@ -210,8 +215,8 @@
mod = rootmod
for comp in modname.split('.')[1:]:
if getattr(mod, comp, nothing) is nothing:
- setattr(mod, comp,
- _demandmod(comp, mod.__dict__, mod.__dict__))
+ setattr(mod, comp, _demandmod(comp, mod.__dict__,
+ mod.__dict__, level=1))
mod = getattr(mod, comp)
return mod
--- a/mercurial/dirstate.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/dirstate.py Mon Aug 15 12:26:02 2016 -0400
@@ -74,8 +74,6 @@
raise
return (vfs(filename), False)
-_token = object()
-
class dirstate(object):
def __init__(self, opener, ui, root, validate):
@@ -103,6 +101,8 @@
self._parentwriters = 0
self._filename = 'dirstate'
self._pendingfilename = '%s.pending' % self._filename
+ self._plchangecallbacks = {}
+ self._origpl = None
# for consistent view between _pl() and _read() invocations
self._pendingmode = None
@@ -349,6 +349,8 @@
self._dirty = self._dirtypl = True
oldp2 = self._pl[1]
+ if self._origpl is None:
+ self._origpl = self._pl
self._pl = p1, p2
copies = {}
if oldp2 != nullid and p2 == nullid:
@@ -444,6 +446,7 @@
self._lastnormaltime = 0
self._dirty = False
self._parentwriters = 0
+ self._origpl = None
def copy(self, source, dest):
"""Mark dest as a copy of source. Unmark dest if source is None."""
@@ -689,25 +692,17 @@
if f in self._nonnormalset:
self._nonnormalset.remove(f)
+ if self._origpl is None:
+ self._origpl = self._pl
self._pl = (parent, nullid)
self._dirty = True
- def write(self, tr=_token):
+ def write(self, tr):
if not self._dirty:
return
filename = self._filename
- if tr is _token: # not explicitly specified
- self._ui.deprecwarn('use dirstate.write with '
- 'repo.currenttransaction()',
- '3.9')
-
- if self._opener.lexists(self._pendingfilename):
- # if pending file already exists, in-memory changes
- # should be written into it, because it has priority
- # to '.hg/dirstate' at reading under HG_PENDING mode
- filename = self._pendingfilename
- elif tr:
+ if tr:
# 'dirstate.write()' is not only for writing in-memory
# changes out, but also for dropping ambiguous timestamp.
# delayed writing re-raise "ambiguous timestamp issue".
@@ -733,7 +728,23 @@
st = self._opener(filename, "w", atomictemp=True, checkambig=True)
self._writedirstate(st)
+ def addparentchangecallback(self, category, callback):
+ """add a callback to be called when the wd parents are changed
+
+ Callback will be called with the following arguments:
+ dirstate, (oldp1, oldp2), (newp1, newp2)
+
+ Category is a unique identifier to allow overwriting an old callback
+ with a newer callback.
+ """
+ self._plchangecallbacks[category] = callback
+
def _writedirstate(self, st):
+ # notify callbacks about parents change
+ if self._origpl is not None and self._origpl != self._pl:
+ for c, callback in sorted(self._plchangecallbacks.iteritems()):
+ callback(self, self._origpl, self._pl)
+ self._origpl = None
# use the modification time of the newly created temporary file as the
# filesystem's notion of 'now'
now = util.fstat(st).st_mtime & _rangemask
--- a/mercurial/discovery.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/discovery.py Mon Aug 15 12:26:02 2016 -0400
@@ -101,6 +101,27 @@
self._computecommonmissing()
return self._missing
+def outgoingbetween(repo, roots, heads):
+ """create an ``outgoing`` consisting of nodes between roots and heads
+
+ The ``missing`` nodes will be descendants of any of the ``roots`` and
+ ancestors of any of the ``heads``, both are which are defined as a list
+ of binary nodes.
+ """
+ cl = repo.changelog
+ if not roots:
+ roots = [nullid]
+ discbases = []
+ for n in roots:
+ discbases.extend([p for p in cl.parents(n) if p != nullid])
+ # TODO remove call to nodesbetween.
+ # TODO populate attributes on outgoing instance instead of setting
+ # discbases.
+ csets, roots, heads = cl.nodesbetween(roots, heads)
+ included = set(csets)
+ discbases = [n for n in discbases if n not in included]
+ return outgoing(cl, discbases, heads)
+
def findcommonoutgoing(repo, other, onlyheads=None, force=False,
commoninc=None, portable=False):
'''Return an outgoing instance to identify the nodes present in repo but
--- a/mercurial/dispatch.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/dispatch.py Mon Aug 15 12:26:02 2016 -0400
@@ -150,7 +150,7 @@
except ValueError:
pass # happens if called in a thread
- try:
+ def _runcatchfunc():
try:
debugger = 'pdb'
debugtrace = {
@@ -212,6 +212,16 @@
ui.traceback()
raise
+ return callcatch(ui, _runcatchfunc)
+
+def callcatch(ui, func):
+ """call func() with global exception handling
+
+ return func() if no exception happens. otherwise do some error handling
+ and return an exit code accordingly.
+ """
+ try:
+ return func()
# Global exception handling, alphabetically
# Mercurial-specific first, followed by built-in and library exceptions
except error.AmbiguousCommand as inst:
--- a/mercurial/exchange.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/exchange.py Mon Aug 15 12:26:02 2016 -0400
@@ -257,13 +257,21 @@
return bundler.newpart('obsmarkers', data=stream)
return None
-def _canusebundle2(op):
- """return true if a pull/push can use bundle2
+def _forcebundle1(op):
+ """return true if a pull/push must use bundle1
- Feel free to nuke this function when we drop the experimental option"""
- return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
- and op.remote.capable('bundle2'))
-
+ This function is used to allow testing of the older bundle version"""
+ ui = op.repo.ui
+ forcebundle1 = False
+ # The goal is this config is to allow developper to choose the bundle
+ # version used during exchanged. This is especially handy during test.
+ # Value is a list of bundle version to be picked from, highest version
+ # should be used.
+ #
+ # developer config: devel.legacy.exchange
+ exchange = ui.configlist('devel', 'legacy.exchange')
+ forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
+ return forcebundle1 or not op.remote.capable('bundle2')
class pushoperation(object):
"""A object that represent a single push operation
@@ -417,7 +425,7 @@
# bundle2 push may receive a reply bundle touching bookmarks or other
# things requiring the wlock. Take it now to ensure proper ordering.
maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
- if _canusebundle2(pushop) and maypushback:
+ if (not _forcebundle1(pushop)) and maypushback:
localwlock = pushop.repo.wlock()
locallock = pushop.repo.lock()
pushop.locallocked = True
@@ -442,7 +450,7 @@
lock = pushop.remote.lock()
try:
_pushdiscovery(pushop)
- if _canusebundle2(pushop):
+ if not _forcebundle1(pushop):
_pushbundle2(pushop)
_pushchangeset(pushop)
_pushsyncphase(pushop)
@@ -1100,7 +1108,7 @@
@util.propertycache
def canusebundle2(self):
- return _canusebundle2(self)
+ return not _forcebundle1(self)
@util.propertycache
def remotebundle2caps(self):
--- a/mercurial/extensions.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/extensions.py Mon Aug 15 12:26:02 2016 -0400
@@ -210,11 +210,13 @@
return func(*(args + a), **kw)
return closure
-def _updatewrapper(wrap, origfn):
- '''Copy attributes to wrapper function'''
+def _updatewrapper(wrap, origfn, unboundwrapper):
+ '''Copy and add some useful attributes to wrapper'''
wrap.__module__ = getattr(origfn, '__module__')
wrap.__doc__ = getattr(origfn, '__doc__')
wrap.__dict__.update(getattr(origfn, '__dict__', {}))
+ wrap._origfunc = origfn
+ wrap._unboundwrapper = unboundwrapper
def wrapcommand(table, command, wrapper, synopsis=None, docstring=None):
'''Wrap the command named `command' in table
@@ -254,7 +256,7 @@
origfn = entry[0]
wrap = bind(util.checksignature(wrapper), util.checksignature(origfn))
- _updatewrapper(wrap, origfn)
+ _updatewrapper(wrap, origfn, wrapper)
if docstring is not None:
wrap.__doc__ += docstring
@@ -303,10 +305,46 @@
origfn = getattr(container, funcname)
assert callable(origfn)
wrap = bind(wrapper, origfn)
- _updatewrapper(wrap, origfn)
+ _updatewrapper(wrap, origfn, wrapper)
setattr(container, funcname, wrap)
return origfn
+def unwrapfunction(container, funcname, wrapper=None):
+ '''undo wrapfunction
+
+ If wrappers is None, undo the last wrap. Otherwise removes the wrapper
+ from the chain of wrappers.
+
+ Return the removed wrapper.
+ Raise IndexError if wrapper is None and nothing to unwrap; ValueError if
+ wrapper is not None but is not found in the wrapper chain.
+ '''
+ chain = getwrapperchain(container, funcname)
+ origfn = chain.pop()
+ if wrapper is None:
+ wrapper = chain[0]
+ chain.remove(wrapper)
+ setattr(container, funcname, origfn)
+ for w in reversed(chain):
+ wrapfunction(container, funcname, w)
+ return wrapper
+
+def getwrapperchain(container, funcname):
+ '''get a chain of wrappers of a function
+
+ Return a list of functions: [newest wrapper, ..., oldest wrapper, origfunc]
+
+ The wrapper functions are the ones passed to wrapfunction, whose first
+ argument is origfunc.
+ '''
+ result = []
+ fn = getattr(container, funcname)
+ while fn:
+ assert callable(fn)
+ result.append(getattr(fn, '_unboundwrapper', fn))
+ fn = getattr(fn, '_origfunc', None)
+ return result
+
def _disabledpaths(strip_init=False):
'''find paths of disabled extensions. returns a dict of {name: path}
removes /__init__.py from packages if strip_init is True'''
--- a/mercurial/filemerge.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/filemerge.py Mon Aug 15 12:26:02 2016 -0400
@@ -230,50 +230,56 @@
util.writefile(file, newdata)
@internaltool('prompt', nomerge)
-def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
+def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Asks the user which of the local `p1()` or the other `p2()` version to
keep as the merged version."""
ui = repo.ui
fd = fcd.path()
+ prompts = partextras(labels)
+ prompts['fd'] = fd
try:
if fco.isabsent():
index = ui.promptchoice(
- _("local changed %s which remote deleted\n"
+ _("local%(l)s changed %(fd)s which other%(o)s deleted\n"
"use (c)hanged version, (d)elete, or leave (u)nresolved?"
- "$$ &Changed $$ &Delete $$ &Unresolved") % fd, 2)
+ "$$ &Changed $$ &Delete $$ &Unresolved") % prompts, 2)
choice = ['local', 'other', 'unresolved'][index]
elif fcd.isabsent():
index = ui.promptchoice(
- _("remote changed %s which local deleted\n"
+ _("other%(o)s changed %(fd)s which local%(l)s deleted\n"
"use (c)hanged version, leave (d)eleted, or "
"leave (u)nresolved?"
- "$$ &Changed $$ &Deleted $$ &Unresolved") % fd, 2)
+ "$$ &Changed $$ &Deleted $$ &Unresolved") % prompts, 2)
choice = ['other', 'local', 'unresolved'][index]
else:
index = ui.promptchoice(
- _("no tool found to merge %s\n"
- "keep (l)ocal, take (o)ther, or leave (u)nresolved?"
- "$$ &Local $$ &Other $$ &Unresolved") % fd, 2)
+ _("no tool found to merge %(fd)s\n"
+ "keep (l)ocal%(l)s, take (o)ther%(o)s, or leave (u)nresolved?"
+ "$$ &Local $$ &Other $$ &Unresolved") % prompts, 2)
choice = ['local', 'other', 'unresolved'][index]
if choice == 'other':
- return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
+ return _iother(repo, mynode, orig, fcd, fco, fca, toolconf,
+ labels)
elif choice == 'local':
- return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
+ return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf,
+ labels)
elif choice == 'unresolved':
- return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf)
+ return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
+ labels)
except error.ResponseExpected:
ui.write("\n")
- return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf)
+ return _ifail(repo, mynode, orig, fcd, fco, fca, toolconf,
+ labels)
@internaltool('local', nomerge)
-def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
+def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Uses the local `p1()` version of files as the merged version."""
return 0, fcd.isabsent()
@internaltool('other', nomerge)
-def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
+def _iother(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""Uses the other `p2()` version of files as the merged version."""
if fco.isabsent():
# local changed, remote deleted -- 'deleted' picked
@@ -285,7 +291,7 @@
return 0, deleted
@internaltool('fail', nomerge)
-def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
+def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf, labels=None):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
@@ -508,11 +514,11 @@
# 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
return util.ellipsis(mark, 80 - 8)
-_defaultconflictmarker = ('{node|short} ' +
- '{ifeq(tags, "tip", "", "{tags} ")}' +
- '{if(bookmarks, "{bookmarks} ")}' +
- '{ifeq(branch, "default", "", "{branch} ")}' +
- '- {author|user}: {desc|firstline}')
+_defaultconflictmarker = ('{node|short} '
+ '{ifeq(tags, "tip", "", "{tags} ")}'
+ '{if(bookmarks, "{bookmarks} ")}'
+ '{ifeq(branch, "default", "", "{branch} ")}'
+ '- {author|user}: {desc|firstline}')
_defaultconflictlabels = ['local', 'other']
@@ -537,6 +543,22 @@
newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
return newlabels
+def partextras(labels):
+ """Return a dictionary of extra labels for use in prompts to the user
+
+ Intended use is in strings of the form "(l)ocal%(l)s".
+ """
+ if labels is None:
+ return {
+ "l": "",
+ "o": "",
+ }
+
+ return {
+ "l": " [%s]" % labels[0],
+ "o": " [%s]" % labels[1],
+ }
+
def _filemerge(premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
"""perform a 3-way merge in the working directory
@@ -588,7 +610,7 @@
toolconf = tool, toolpath, binary, symlink
if mergetype == nomerge:
- r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf)
+ r, deleted = func(repo, mynode, orig, fcd, fco, fca, toolconf, labels)
return True, r, deleted
if premerge:
--- a/mercurial/formatter.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/formatter.py Mon Aug 15 12:26:02 2016 -0400
@@ -18,6 +18,7 @@
from . import (
encoding,
error,
+ templatekw,
templater,
util,
)
@@ -45,6 +46,14 @@
if self._item is not None:
self._showitem()
self._item = {}
+ @staticmethod
+ def formatdate(date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
+ '''convert date tuple to appropriate format'''
+ return date
+ @staticmethod
+ def formatlist(data, name, fmt='%s', sep=' '):
+ '''convert iterable to appropriate list format'''
+ return list(data)
def data(self, **data):
'''insert data into item that's not shown in default output'''
self._item.update(data)
@@ -78,6 +87,14 @@
return False
def startitem(self):
pass
+ @staticmethod
+ def formatdate(date, fmt='%a %b %d %H:%M:%S %Y %1%2'):
+ '''stringify date tuple in the given format'''
+ return util.datestr(date, fmt)
+ @staticmethod
+ def formatlist(data, name, fmt='%s', sep=' '):
+ '''stringify iterable separated by sep'''
+ return sep.join(fmt % e for e in data)
def data(self, **data):
pass
def write(self, fields, deftext, *fielddata, **opts):
@@ -112,7 +129,7 @@
self._ui.write(pickle.dumps(self._data))
def _jsonifyobj(v):
- if isinstance(v, tuple):
+ if isinstance(v, (list, tuple)):
return '[' + ', '.join(_jsonifyobj(e) for e in v) + ']'
elif v is None:
return 'null'
@@ -157,6 +174,16 @@
def _showitem(self):
g = self._t(self._topic, ui=self._ui, **self._item)
self._ui.write(templater.stringify(g))
+ @staticmethod
+ def formatlist(data, name, fmt='%s', sep=' '):
+ '''build object that can be evaluated as either plain string or list'''
+ # name is mandatory argument for now, but it could be optional if
+ # we have default template keyword, e.g. {item}
+ data = list(data)
+ def f():
+ yield plainformatter.formatlist(data, name, fmt, sep)
+ return templatekw._hybrid(f(), data, lambda x: {name: x},
+ lambda d: fmt % d[name])
def lookuptemplate(ui, topic, tmpl):
# looks like a literal template?
--- a/mercurial/help.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/help.py Mon Aug 15 12:26:02 2016 -0400
@@ -184,13 +184,13 @@
return loader
internalstable = sorted([
- (['bundles'], _('container for exchange of repository data'),
+ (['bundles'], _('Bundles'),
loaddoc('bundles', subdir='internals')),
- (['changegroups'], _('representation of revlog data'),
+ (['changegroups'], _('Changegroups'),
loaddoc('changegroups', subdir='internals')),
- (['requirements'], _('repository requirements'),
+ (['requirements'], _('Repository Requirements'),
loaddoc('requirements', subdir='internals')),
- (['revlogs'], _('revision storage mechanism'),
+ (['revlogs'], _('Revision Logs'),
loaddoc('revlogs', subdir='internals')),
])
--- a/mercurial/help/config.txt Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/help/config.txt Mon Aug 15 12:26:02 2016 -0400
@@ -1557,6 +1557,21 @@
repositories to the exchange format required by the bundle1 data
format can consume a lot of CPU.
+``zliblevel``
+ Integer between ``-1`` and ``9`` that controls the zlib compression level
+ for wire protocol commands that send zlib compressed output (notably the
+ commands that send repository history data).
+
+ The default (``-1``) uses the default zlib compression level, which is
+ likely equivalent to ``6``. ``0`` means no compression. ``9`` means
+ maximum compression.
+
+ Setting this option allows server operators to make trade-offs between
+ bandwidth and CPU used. Lowering the compression lowers CPU utilization
+ but sends more bytes to clients.
+
+ This option only impacts the HTTP server.
+
``smtp``
--------
--- a/mercurial/help/internals/bundles.txt Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/help/internals/bundles.txt Mon Aug 15 12:26:02 2016 -0400
@@ -1,6 +1,3 @@
-Bundles
-=======
-
A bundle is a container for repository data.
Bundles are used as standalone files as well as the interchange format
@@ -8,7 +5,7 @@
each other.
Headers
--------
+=======
Bundles produced since Mercurial 0.7 (September 2005) have a 4 byte
header identifying the major bundle type. The header always begins with
--- a/mercurial/help/internals/changegroups.txt Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/help/internals/changegroups.txt Mon Aug 15 12:26:02 2016 -0400
@@ -1,6 +1,3 @@
-Changegroups
-============
-
Changegroups are representations of repository revlog data, specifically
the changelog, manifest, and filelogs.
@@ -35,7 +32,7 @@
call this an *empty chunk*.
Delta Groups
-------------
+============
A *delta group* expresses the content of a revlog as a series of deltas,
or patches against previous revisions.
@@ -111,21 +108,21 @@
which can result in smaller deltas and more efficient encoding of data.
Changeset Segment
------------------
+=================
The *changeset segment* consists of a single *delta group* holding
changelog data. It is followed by an *empty chunk* to denote the
boundary to the *manifests segment*.
Manifest Segment
-----------------
+================
The *manifest segment* consists of a single *delta group* holding
manifest data. It is followed by an *empty chunk* to denote the boundary
to the *filelogs segment*.
Filelogs Segment
-----------------
+================
The *filelogs* segment consists of multiple sub-segments, each
corresponding to an individual file whose data is being described::
@@ -154,4 +151,3 @@
That is, a *chunk* consisting of the filename (not terminated or padded)
followed by N chunks constituting the *delta group* for this file.
-
--- a/mercurial/help/internals/requirements.txt Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/help/internals/requirements.txt Mon Aug 15 12:26:02 2016 -0400
@@ -1,5 +1,3 @@
-Requirements
-============
Repositories contain a file (``.hg/requires``) containing a list of
features/capabilities that are *required* for clients to interface
@@ -19,7 +17,7 @@
Mercurial core distribution.
revlogv1
---------
+========
When present, revlogs are version 1 (RevlogNG). RevlogNG was introduced
in 2006. The ``revlogv1`` requirement has been enabled by default
@@ -28,7 +26,7 @@
If this requirement is not present, version 0 revlogs are assumed.
store
------
+=====
The *store* repository layout should be used.
@@ -36,7 +34,7 @@
was introduced in Mercurial 0.9.2.
fncache
--------
+=======
The *fncache* repository layout should be used.
@@ -48,7 +46,7 @@
1.1 (released December 2008).
shared
-------
+======
Denotes that the store for a repository is shared from another location
(defined by the ``.hg/sharedpath`` file).
@@ -58,7 +56,7 @@
The requirement was added in Mercurial 1.3 (released July 2009).
dotencode
----------
+=========
The *dotencode* repository layout should be used.
@@ -70,7 +68,7 @@
Mercurial 1.7 (released November 2010).
parentdelta
------------
+===========
Denotes a revlog delta encoding format that was experimental and
replaced by *generaldelta*. It should not be seen in the wild because
@@ -80,7 +78,7 @@
1.9.
generaldelta
-------------
+============
Revlogs should be created with the *generaldelta* flag enabled. The
generaldelta flag will cause deltas to be encoded against a parent
@@ -91,7 +89,7 @@
default until Mercurial 3.7 (released February 2016).
manifestv2
-----------
+==========
Denotes that version 2 of manifests are being used.
@@ -100,7 +98,7 @@
by default.
treemanifest
-------------
+============
Denotes that tree manifests are being used. Tree manifests are
one manifest per directory (as opposed to a single flat manifest).
--- a/mercurial/help/internals/revlogs.txt Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/help/internals/revlogs.txt Mon Aug 15 12:26:02 2016 -0400
@@ -1,6 +1,3 @@
-Revlogs
-=======
-
Revision logs - or *revlogs* - are an append only data structure for
storing discrete entries, or *revisions*. They are the primary storage
mechanism of repository data.
@@ -28,7 +25,7 @@
used to mean *does not exist* or *not defined*.
File Format
------------
+===========
A revlog begins with a 32-bit big endian integer holding version info
and feature flags. This integer is shared with the first revision
@@ -77,7 +74,7 @@
below.
RevlogNG Format
----------------
+===============
RevlogNG (version 1) begins with an index describing the revisions in
the revlog. If the ``inline`` flag is set, revision data is stored inline,
@@ -129,7 +126,7 @@
and the 6 byte absolute offset field from the first revlog entry.
Delta Chains
-------------
+============
Revision data is encoded as a chain of *chunks*. Each chain begins with
the compressed original full text for that revision. Each subsequent
@@ -153,7 +150,7 @@
computed against an arbitrary revision (almost certainly a parent revision).
File Storage
-------------
+============
Revlogs logically consist of an index (metadata of entries) and
revision data. This data may be stored together in a single file or in
@@ -172,7 +169,7 @@
(possibly containing inline data) and a ``.d`` file holds the revision data.
Revision Entries
-----------------
+================
Revision entries consist of an optional 1 byte header followed by an
encoding of the revision data. The headers are as follows:
@@ -187,7 +184,7 @@
The 0x78 value is actually the first byte of the zlib header (CMF byte).
Hash Computation
-----------------
+================
The hash of the revision is stored in the index and is used both as a primary
key and for data integrity verification.
--- a/mercurial/hg.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/hg.py Mon Aug 15 12:26:02 2016 -0400
@@ -259,10 +259,11 @@
fp.write("default = %s\n" % default)
fp.close()
- if bookmarks:
- fp = destrepo.vfs('shared', 'w')
- fp.write(sharedbookmarks + '\n')
- fp.close()
+ with destrepo.wlock():
+ if bookmarks:
+ fp = destrepo.vfs('shared', 'w')
+ fp.write(sharedbookmarks + '\n')
+ fp.close()
def _postshareupdate(repo, update, checkout=None):
"""Maybe perform a working directory update after a shared repo is created.
--- a/mercurial/hgweb/protocol.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/hgweb/protocol.py Mon Aug 15 12:26:02 2016 -0400
@@ -74,7 +74,7 @@
self.ui.ferr, self.ui.fout = self.oldio
return val
def groupchunks(self, cg):
- z = zlib.compressobj()
+ z = zlib.compressobj(self.ui.configint('server', 'zliblevel', -1))
while True:
chunk = cg.read(4096)
if not chunk:
--- a/mercurial/localrepo.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/localrepo.py Mon Aug 15 12:26:02 2016 -0400
@@ -504,9 +504,6 @@
def manifest(self):
return manifest.manifest(self.svfs)
- def dirlog(self, dir):
- return self.manifest.dirlog(dir)
-
@repofilecache('dirstate')
def dirstate(self):
return dirstate.dirstate(self.vfs, self.ui, self.root,
@@ -1007,8 +1004,7 @@
def transaction(self, desc, report=None):
if (self.ui.configbool('devel', 'all-warnings')
or self.ui.configbool('devel', 'check-locks')):
- l = self._lockref and self._lockref()
- if l is None or not l.held:
+ if self._currentlock(self._lockref) is None:
raise RuntimeError('programming error: transaction requires '
'locking')
tr = self.currenttransaction()
@@ -1320,8 +1316,8 @@
If both 'lock' and 'wlock' must be acquired, ensure you always acquires
'wlock' first to avoid a dead-lock hazard.'''
- l = self._lockref and self._lockref()
- if l is not None and l.held:
+ l = self._currentlock(self._lockref)
+ if l is not None:
l.lock()
return l
@@ -1352,8 +1348,7 @@
# acquisition would not cause dead-lock as they would just fail.
if wait and (self.ui.configbool('devel', 'all-warnings')
or self.ui.configbool('devel', 'check-locks')):
- l = self._lockref and self._lockref()
- if l is not None and l.held:
+ if self._currentlock(self._lockref) is not None:
self.ui.develwarn('"wlock" acquired after "lock"')
def unlock():
--- a/mercurial/manifest.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/manifest.py Mon Aug 15 12:26:02 2016 -0400
@@ -913,7 +913,7 @@
self._usemanifestv2 = usemanifestv2
indexfile = "00manifest.i"
if dir:
- assert self._treeondisk
+ assert self._treeondisk, 'opts is %r' % opts
if not dir.endswith('/'):
dir = dir + '/'
indexfile = "meta/" + dir + "00manifest.i"
--- a/mercurial/match.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/match.py Mon Aug 15 12:26:02 2016 -0400
@@ -320,6 +320,10 @@
kindpats.append((kind, pat, ''))
return kindpats
+ def matchessubrepo(self, subpath):
+ return (self.exact(subpath)
+ or any(f.startswith(subpath + '/') for f in self.files()))
+
def exact(root, cwd, files, badfn=None):
return match(root, cwd, files, exact=True, badfn=badfn)
--- a/mercurial/merge.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/merge.py Mon Aug 15 12:26:02 2016 -0400
@@ -1535,11 +1535,13 @@
if '.hgsubstate' in actionbyfile:
f = '.hgsubstate'
m, args, msg = actionbyfile[f]
+ prompts = filemerge.partextras(labels)
+ prompts['f'] = f
if m == 'cd':
if repo.ui.promptchoice(
- _("local changed %s which remote deleted\n"
+ _("local%(l)s changed %(f)s which other%(o)s deleted\n"
"use (c)hanged version or (d)elete?"
- "$$ &Changed $$ &Delete") % f, 0):
+ "$$ &Changed $$ &Delete") % prompts, 0):
actionbyfile[f] = ('r', None, "prompt delete")
elif f in p1:
actionbyfile[f] = ('am', None, "prompt keep")
@@ -1549,9 +1551,9 @@
f1, f2, fa, move, anc = args
flags = p2[f2].flags()
if repo.ui.promptchoice(
- _("remote changed %s which local deleted\n"
+ _("other%(o)s changed %(f)s which local%(l)s deleted\n"
"use (c)hanged version or leave (d)eleted?"
- "$$ &Changed $$ &Deleted") % f, 0) == 0:
+ "$$ &Changed $$ &Deleted") % prompts, 0) == 0:
actionbyfile[f] = ('g', (flags, False), "prompt recreating")
else:
del actionbyfile[f]
--- a/mercurial/mpatch.c Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/mpatch.c Mon Aug 15 12:26:02 2016 -0400
@@ -20,49 +20,33 @@
of the GNU General Public License, incorporated herein by reference.
*/
-#define PY_SSIZE_T_CLEAN
-#include <Python.h>
#include <stdlib.h>
#include <string.h>
-#include "util.h"
#include "bitmanipulation.h"
-
-static char mpatch_doc[] = "Efficient binary patching.";
-static PyObject *mpatch_Error;
+#include "compat.h"
+#include "mpatch.h"
-struct frag {
- int start, end, len;
- const char *data;
-};
-
-struct flist {
- struct frag *base, *head, *tail;
-};
-
-static struct flist *lalloc(Py_ssize_t size)
+static struct mpatch_flist *lalloc(ssize_t size)
{
- struct flist *a = NULL;
+ struct mpatch_flist *a = NULL;
if (size < 1)
size = 1;
- a = (struct flist *)malloc(sizeof(struct flist));
+ a = (struct mpatch_flist *)malloc(sizeof(struct mpatch_flist));
if (a) {
- a->base = (struct frag *)malloc(sizeof(struct frag) * size);
+ a->base = (struct mpatch_frag *)malloc(sizeof(struct mpatch_frag) * size);
if (a->base) {
a->head = a->tail = a->base;
return a;
}
free(a);
- a = NULL;
}
- if (!PyErr_Occurred())
- PyErr_NoMemory();
return NULL;
}
-static void lfree(struct flist *a)
+void mpatch_lfree(struct mpatch_flist *a)
{
if (a) {
free(a->base);
@@ -70,7 +54,7 @@
}
}
-static Py_ssize_t lsize(struct flist *a)
+static ssize_t lsize(struct mpatch_flist *a)
{
return a->tail - a->head;
}
@@ -78,9 +62,10 @@
/* move hunks in source that are less cut to dest, compensating
for changes in offset. the last hunk may be split if necessary.
*/
-static int gather(struct flist *dest, struct flist *src, int cut, int offset)
+static int gather(struct mpatch_flist *dest, struct mpatch_flist *src, int cut,
+ int offset)
{
- struct frag *d = dest->tail, *s = src->head;
+ struct mpatch_frag *d = dest->tail, *s = src->head;
int postend, c, l;
while (s != src->tail) {
@@ -123,9 +108,9 @@
}
/* like gather, but with no output list */
-static int discard(struct flist *src, int cut, int offset)
+static int discard(struct mpatch_flist *src, int cut, int offset)
{
- struct frag *s = src->head;
+ struct mpatch_frag *s = src->head;
int postend, c, l;
while (s != src->tail) {
@@ -160,10 +145,11 @@
/* combine hunk lists a and b, while adjusting b for offset changes in a/
this deletes a and b and returns the resultant list. */
-static struct flist *combine(struct flist *a, struct flist *b)
+static struct mpatch_flist *combine(struct mpatch_flist *a,
+ struct mpatch_flist *b)
{
- struct flist *c = NULL;
- struct frag *bh, *ct;
+ struct mpatch_flist *c = NULL;
+ struct mpatch_frag *bh, *ct;
int offset = 0, post;
if (a && b)
@@ -189,26 +175,26 @@
}
/* hold on to tail from a */
- memcpy(c->tail, a->head, sizeof(struct frag) * lsize(a));
+ memcpy(c->tail, a->head, sizeof(struct mpatch_frag) * lsize(a));
c->tail += lsize(a);
}
- lfree(a);
- lfree(b);
+ mpatch_lfree(a);
+ mpatch_lfree(b);
return c;
}
/* decode a binary patch into a hunk list */
-static struct flist *decode(const char *bin, Py_ssize_t len)
+int mpatch_decode(const char *bin, ssize_t len, struct mpatch_flist **res)
{
- struct flist *l;
- struct frag *lt;
+ struct mpatch_flist *l;
+ struct mpatch_frag *lt;
int pos = 0;
/* assume worst case size, we won't have many of these lists */
l = lalloc(len / 12 + 1);
if (!l)
- return NULL;
+ return MPATCH_ERR_NO_MEM;
lt = l->tail;
@@ -224,28 +210,24 @@
}
if (pos != len) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error, "patch cannot be decoded");
- lfree(l);
- return NULL;
+ mpatch_lfree(l);
+ return MPATCH_ERR_CANNOT_BE_DECODED;
}
l->tail = lt;
- return l;
+ *res = l;
+ return 0;
}
/* calculate the size of resultant text */
-static Py_ssize_t calcsize(Py_ssize_t len, struct flist *l)
+ssize_t mpatch_calcsize(ssize_t len, struct mpatch_flist *l)
{
- Py_ssize_t outlen = 0, last = 0;
- struct frag *f = l->head;
+ ssize_t outlen = 0, last = 0;
+ struct mpatch_frag *f = l->head;
while (f != l->tail) {
if (f->start < last || f->end > len) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error,
- "invalid patch");
- return -1;
+ return MPATCH_ERR_INVALID_PATCH;
}
outlen += f->start - last;
last = f->end;
@@ -257,18 +239,16 @@
return outlen;
}
-static int apply(char *buf, const char *orig, Py_ssize_t len, struct flist *l)
+int mpatch_apply(char *buf, const char *orig, ssize_t len,
+ struct mpatch_flist *l)
{
- struct frag *f = l->head;
+ struct mpatch_frag *f = l->head;
int last = 0;
char *p = buf;
while (f != l->tail) {
if (f->start < last || f->end > len) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error,
- "invalid patch");
- return 0;
+ return MPATCH_ERR_INVALID_PATCH;
}
memcpy(p, orig + last, f->start - last);
p += f->start - last;
@@ -278,146 +258,23 @@
f++;
}
memcpy(p, orig + last, len - last);
- return 1;
+ return 0;
}
/* recursively generate a patch of all bins between start and end */
-static struct flist *fold(PyObject *bins, Py_ssize_t start, Py_ssize_t end)
+struct mpatch_flist *mpatch_fold(void *bins,
+ struct mpatch_flist* (*get_next_item)(void*, ssize_t),
+ ssize_t start, ssize_t end)
{
- Py_ssize_t len, blen;
- const char *buffer;
+ ssize_t len;
if (start + 1 == end) {
/* trivial case, output a decoded list */
- PyObject *tmp = PyList_GetItem(bins, start);
- if (!tmp)
- return NULL;
- if (PyObject_AsCharBuffer(tmp, &buffer, &blen))
- return NULL;
- return decode(buffer, blen);
+ return get_next_item(bins, start);
}
/* divide and conquer, memory management is elsewhere */
len = (end - start) / 2;
- return combine(fold(bins, start, start + len),
- fold(bins, start + len, end));
-}
-
-static PyObject *
-patches(PyObject *self, PyObject *args)
-{
- PyObject *text, *bins, *result;
- struct flist *patch;
- const char *in;
- char *out;
- Py_ssize_t len, outlen, inlen;
-
- if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins))
- return NULL;
-
- len = PyList_Size(bins);
- if (!len) {
- /* nothing to do */
- Py_INCREF(text);
- return text;
- }
-
- if (PyObject_AsCharBuffer(text, &in, &inlen))
- return NULL;
-
- patch = fold(bins, 0, len);
- if (!patch)
- return NULL;
-
- outlen = calcsize(inlen, patch);
- if (outlen < 0) {
- result = NULL;
- goto cleanup;
- }
- result = PyBytes_FromStringAndSize(NULL, outlen);
- if (!result) {
- result = NULL;
- goto cleanup;
- }
- out = PyBytes_AsString(result);
- if (!apply(out, in, inlen, patch)) {
- Py_DECREF(result);
- result = NULL;
- }
-cleanup:
- lfree(patch);
- return result;
+ return combine(mpatch_fold(bins, get_next_item, start, start + len),
+ mpatch_fold(bins, get_next_item, start + len, end));
}
-
-/* calculate size of a patched file directly */
-static PyObject *
-patchedsize(PyObject *self, PyObject *args)
-{
- long orig, start, end, len, outlen = 0, last = 0, pos = 0;
- Py_ssize_t patchlen;
- char *bin;
-
- if (!PyArg_ParseTuple(args, "ls#", &orig, &bin, &patchlen))
- return NULL;
-
- while (pos >= 0 && pos < patchlen) {
- start = getbe32(bin + pos);
- end = getbe32(bin + pos + 4);
- len = getbe32(bin + pos + 8);
- if (start > end)
- break; /* sanity check */
- pos += 12 + len;
- outlen += start - last;
- last = end;
- outlen += len;
- }
-
- if (pos != patchlen) {
- if (!PyErr_Occurred())
- PyErr_SetString(mpatch_Error, "patch cannot be decoded");
- return NULL;
- }
-
- outlen += orig - last;
- return Py_BuildValue("l", outlen);
-}
-
-static PyMethodDef methods[] = {
- {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
- {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
- {NULL, NULL}
-};
-
-#ifdef IS_PY3K
-static struct PyModuleDef mpatch_module = {
- PyModuleDef_HEAD_INIT,
- "mpatch",
- mpatch_doc,
- -1,
- methods
-};
-
-PyMODINIT_FUNC PyInit_mpatch(void)
-{
- PyObject *m;
-
- m = PyModule_Create(&mpatch_module);
- if (m == NULL)
- return NULL;
-
- mpatch_Error = PyErr_NewException("mercurial.mpatch.mpatchError",
- NULL, NULL);
- Py_INCREF(mpatch_Error);
- PyModule_AddObject(m, "mpatchError", mpatch_Error);
-
- return m;
-}
-#else
-PyMODINIT_FUNC
-initmpatch(void)
-{
- Py_InitModule3("mpatch", methods, mpatch_doc);
- mpatch_Error = PyErr_NewException("mercurial.mpatch.mpatchError",
- NULL, NULL);
-}
-#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/mpatch.h Mon Aug 15 12:26:02 2016 -0400
@@ -0,0 +1,26 @@
+#ifndef _HG_MPATCH_H_
+#define _HG_MPATCH_H_
+
+#define MPATCH_ERR_NO_MEM -3
+#define MPATCH_ERR_CANNOT_BE_DECODED -2
+#define MPATCH_ERR_INVALID_PATCH -1
+
+struct mpatch_frag {
+ int start, end, len;
+ const char *data;
+};
+
+struct mpatch_flist {
+ struct mpatch_frag *base, *head, *tail;
+};
+
+int mpatch_decode(const char *bin, ssize_t len, struct mpatch_flist** res);
+ssize_t mpatch_calcsize(ssize_t len, struct mpatch_flist *l);
+void mpatch_lfree(struct mpatch_flist *a);
+int mpatch_apply(char *buf, const char *orig, ssize_t len,
+ struct mpatch_flist *l);
+struct mpatch_flist *mpatch_fold(void *bins,
+ struct mpatch_flist* (*get_next_item)(void*, ssize_t),
+ ssize_t start, ssize_t end);
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/mercurial/mpatch_module.c Mon Aug 15 12:26:02 2016 -0400
@@ -0,0 +1,195 @@
+/*
+ mpatch.c - efficient binary patching for Mercurial
+
+ This implements a patch algorithm that's O(m + nlog n) where m is the
+ size of the output and n is the number of patches.
+
+ Given a list of binary patches, it unpacks each into a hunk list,
+ then combines the hunk lists with a treewise recursion to form a
+ single hunk list. This hunk list is then applied to the original
+ text.
+
+ The text (or binary) fragments are copied directly from their source
+ Python objects into a preallocated output string to avoid the
+ allocation of intermediate Python objects. Working memory is about 2x
+ the total number of hunks.
+
+ Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
+
+ This software may be used and distributed according to the terms
+ of the GNU General Public License, incorporated herein by reference.
+*/
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "util.h"
+#include "bitmanipulation.h"
+#include "compat.h"
+#include "mpatch.h"
+
+static char mpatch_doc[] = "Efficient binary patching.";
+static PyObject *mpatch_Error;
+
+static void setpyerr(int r)
+{
+ switch (r) {
+ case MPATCH_ERR_NO_MEM:
+ PyErr_NoMemory();
+ break;
+ case MPATCH_ERR_CANNOT_BE_DECODED:
+ PyErr_SetString(mpatch_Error, "patch cannot be decoded");
+ break;
+ case MPATCH_ERR_INVALID_PATCH:
+ PyErr_SetString(mpatch_Error, "invalid patch");
+ break;
+ }
+}
+
+struct mpatch_flist *cpygetitem(void *bins, ssize_t pos)
+{
+ const char *buffer;
+ struct mpatch_flist *res;
+ ssize_t blen;
+ int r;
+
+ PyObject *tmp = PyList_GetItem((PyObject*)bins, pos);
+ if (!tmp)
+ return NULL;
+ if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t*)&blen))
+ return NULL;
+ if ((r = mpatch_decode(buffer, blen, &res)) < 0) {
+ if (!PyErr_Occurred())
+ setpyerr(r);
+ return NULL;
+ }
+ return res;
+}
+
+static PyObject *
+patches(PyObject *self, PyObject *args)
+{
+ PyObject *text, *bins, *result;
+ struct mpatch_flist *patch;
+ const char *in;
+ int r = 0;
+ char *out;
+ Py_ssize_t len, outlen, inlen;
+
+ if (!PyArg_ParseTuple(args, "OO:mpatch", &text, &bins))
+ return NULL;
+
+ len = PyList_Size(bins);
+ if (!len) {
+ /* nothing to do */
+ Py_INCREF(text);
+ return text;
+ }
+
+ if (PyObject_AsCharBuffer(text, &in, &inlen))
+ return NULL;
+
+ patch = mpatch_fold(bins, cpygetitem, 0, len);
+ if (!patch) { /* error already set or memory error */
+ if (!PyErr_Occurred())
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ outlen = mpatch_calcsize(inlen, patch);
+ if (outlen < 0) {
+ r = (int)outlen;
+ result = NULL;
+ goto cleanup;
+ }
+ result = PyBytes_FromStringAndSize(NULL, outlen);
+ if (!result) {
+ result = NULL;
+ goto cleanup;
+ }
+ out = PyBytes_AsString(result);
+ if ((r = mpatch_apply(out, in, inlen, patch)) < 0) {
+ Py_DECREF(result);
+ result = NULL;
+ }
+cleanup:
+ mpatch_lfree(patch);
+ if (!result && !PyErr_Occurred())
+ setpyerr(r);
+ return result;
+}
+
+/* calculate size of a patched file directly */
+static PyObject *
+patchedsize(PyObject *self, PyObject *args)
+{
+ long orig, start, end, len, outlen = 0, last = 0, pos = 0;
+ Py_ssize_t patchlen;
+ char *bin;
+
+ if (!PyArg_ParseTuple(args, "ls#", &orig, &bin, &patchlen))
+ return NULL;
+
+ while (pos >= 0 && pos < patchlen) {
+ start = getbe32(bin + pos);
+ end = getbe32(bin + pos + 4);
+ len = getbe32(bin + pos + 8);
+ if (start > end)
+ break; /* sanity check */
+ pos += 12 + len;
+ outlen += start - last;
+ last = end;
+ outlen += len;
+ }
+
+ if (pos != patchlen) {
+ if (!PyErr_Occurred())
+ PyErr_SetString(mpatch_Error, "patch cannot be decoded");
+ return NULL;
+ }
+
+ outlen += orig - last;
+ return Py_BuildValue("l", outlen);
+}
+
+static PyMethodDef methods[] = {
+ {"patches", patches, METH_VARARGS, "apply a series of patches\n"},
+ {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"},
+ {NULL, NULL}
+};
+
+#ifdef IS_PY3K
+static struct PyModuleDef mpatch_module = {
+ PyModuleDef_HEAD_INIT,
+ "mpatch",
+ mpatch_doc,
+ -1,
+ methods
+};
+
+PyMODINIT_FUNC PyInit_mpatch(void)
+{
+ PyObject *m;
+
+ m = PyModule_Create(&mpatch_module);
+ if (m == NULL)
+ return NULL;
+
+ mpatch_Error = PyErr_NewException("mercurial.mpatch.mpatchError",
+ NULL, NULL);
+ Py_INCREF(mpatch_Error);
+ PyModule_AddObject(m, "mpatchError", mpatch_Error);
+
+ return m;
+}
+#else
+PyMODINIT_FUNC
+initmpatch(void)
+{
+ Py_InitModule3("mpatch", methods, mpatch_doc);
+ mpatch_Error = PyErr_NewException("mercurial.mpatch.mpatchError",
+ NULL, NULL);
+}
+#endif
--- a/mercurial/parser.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/parser.py Mon Aug 15 12:26:02 2016 -0400
@@ -65,7 +65,7 @@
# handle infix rules, take as suffix if unambiguous
infix, suffix = self._elements[token][3:]
if suffix and not (infix and self._hasnewterm()):
- expr = (suffix[0], expr)
+ expr = (suffix, expr)
elif infix:
expr = (infix[0], expr, self._parseoperand(*infix[1:]))
else:
--- a/mercurial/patch.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/patch.py Mon Aug 15 12:26:02 2016 -0400
@@ -410,11 +410,7 @@
return self.fp.readline()
def __iter__(self):
- while True:
- l = self.readline()
- if not l:
- break
- yield l
+ return iter(self.readline, '')
class abstractbackend(object):
def __init__(self, ui):
@@ -1688,10 +1684,7 @@
def scanwhile(first, p):
"""scan lr while predicate holds"""
lines = [first]
- while True:
- line = lr.readline()
- if not line:
- break
+ for line in iter(lr.readline, ''):
if p(line):
lines.append(line)
else:
@@ -1699,10 +1692,7 @@
break
return lines
- while True:
- line = lr.readline()
- if not line:
- break
+ for line in iter(lr.readline, ''):
if line.startswith('diff --git a/') or line.startswith('diff -r '):
def notheader(line):
s = line.split(None, 1)
@@ -1772,10 +1762,7 @@
context = None
lr = linereader(fp)
- while True:
- x = lr.readline()
- if not x:
- break
+ for x in iter(lr.readline, ''):
if state == BFILE and (
(not context and x[0] == '@')
or (context is not False and x.startswith('***************'))
--- a/mercurial/pure/mpatch.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/pure/mpatch.py Mon Aug 15 12:26:02 2016 -0400
@@ -9,8 +9,10 @@
import struct
-from . import pycompat
+from . import policy, pycompat
stringio = pycompat.stringio
+modulepolicy = policy.policy
+policynocffi = policy.policynocffi
class mpatchError(Exception):
"""error raised when a delta cannot be decoded
@@ -125,3 +127,44 @@
outlen += orig - last
return outlen
+
+if modulepolicy not in policynocffi:
+ try:
+ from _mpatch_cffi import ffi, lib
+ except ImportError:
+ if modulepolicy == 'cffi': # strict cffi import
+ raise
+ else:
+ @ffi.def_extern()
+ def cffi_get_next_item(arg, pos):
+ all, bins = ffi.from_handle(arg)
+ container = ffi.new("struct mpatch_flist*[1]")
+ to_pass = ffi.new("char[]", str(bins[pos]))
+ all.append(to_pass)
+ r = lib.mpatch_decode(to_pass, len(to_pass) - 1, container)
+ if r < 0:
+ return ffi.NULL
+ return container[0]
+
+ def patches(text, bins):
+ lgt = len(bins)
+ all = []
+ if not lgt:
+ return text
+ arg = (all, bins)
+ patch = lib.mpatch_fold(ffi.new_handle(arg),
+ lib.cffi_get_next_item, 0, lgt)
+ if not patch:
+ raise mpatchError("cannot decode chunk")
+ outlen = lib.mpatch_calcsize(len(text), patch)
+ if outlen < 0:
+ lib.mpatch_lfree(patch)
+ raise mpatchError("inconsistency detected")
+ buf = ffi.new("char[]", outlen)
+ if lib.mpatch_apply(buf, text, len(text), patch) < 0:
+ lib.mpatch_lfree(patch)
+ raise mpatchError("error applying patches")
+ res = ffi.buffer(buf, outlen)[:]
+ lib.mpatch_lfree(patch)
+ return res
+
--- a/mercurial/pure/osutil.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/pure/osutil.py Mon Aug 15 12:26:02 2016 -0400
@@ -173,30 +173,30 @@
class _iovec(ctypes.Structure):
_fields_ = [
- ('iov_base', ctypes.c_void_p),
- ('iov_len', ctypes.c_size_t),
+ (u'iov_base', ctypes.c_void_p),
+ (u'iov_len', ctypes.c_size_t),
]
class _msghdr(ctypes.Structure):
_fields_ = [
- ('msg_name', ctypes.c_void_p),
- ('msg_namelen', _socklen_t),
- ('msg_iov', ctypes.POINTER(_iovec)),
- ('msg_iovlen', _msg_iovlen_t),
- ('msg_control', ctypes.c_void_p),
- ('msg_controllen', _msg_controllen_t),
- ('msg_flags', ctypes.c_int),
+ (u'msg_name', ctypes.c_void_p),
+ (u'msg_namelen', _socklen_t),
+ (u'msg_iov', ctypes.POINTER(_iovec)),
+ (u'msg_iovlen', _msg_iovlen_t),
+ (u'msg_control', ctypes.c_void_p),
+ (u'msg_controllen', _msg_controllen_t),
+ (u'msg_flags', ctypes.c_int),
]
class _cmsghdr(ctypes.Structure):
_fields_ = [
- ('cmsg_len', _cmsg_len_t),
- ('cmsg_level', ctypes.c_int),
- ('cmsg_type', ctypes.c_int),
- ('cmsg_data', ctypes.c_ubyte * 0),
+ (u'cmsg_len', _cmsg_len_t),
+ (u'cmsg_level', ctypes.c_int),
+ (u'cmsg_type', ctypes.c_int),
+ (u'cmsg_data', ctypes.c_ubyte * 0),
]
- _libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
+ _libc = ctypes.CDLL(ctypes.util.find_library(u'c'), use_errno=True)
_recvmsg = getattr(_libc, 'recvmsg', None)
if _recvmsg:
_recvmsg.restype = getattr(ctypes, 'c_ssize_t', ctypes.c_long)
--- a/mercurial/pycompat.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/pycompat.py Mon Aug 15 12:26:02 2016 -0400
@@ -41,11 +41,10 @@
copies items from origin to alias
"""
- def hgcase(item):
- return item.replace('_', '').lower()
for item in items:
try:
- setattr(alias, hgcase(item), getattr(origin, item))
+ lcase = item.replace('_', '').lower()
+ setattr(alias, lcase, getattr(origin, item))
except AttributeError:
pass
@@ -71,7 +70,6 @@
"unquote",
"url2pathname",
"urlencode",
- "urlencode",
))
_alias(urlreq, urllib2, (
"AbstractHTTPHandler",
--- a/mercurial/repair.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/repair.py Mon Aug 15 12:26:02 2016 -0400
@@ -173,7 +173,7 @@
if (unencoded.startswith('meta/') and
unencoded.endswith('00manifest.i')):
dir = unencoded[5:-12]
- repo.dirlog(dir).strip(striprev, tr)
+ repo.manifest.dirlog(dir).strip(striprev, tr)
for fn in files:
repo.file(fn).strip(striprev, tr)
tr.endgroup()
--- a/mercurial/revlog.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/revlog.py Mon Aug 15 12:26:02 2016 -0400
@@ -1585,10 +1585,7 @@
try:
# loop through our set of deltas
chain = None
- while True:
- chunkdata = cg.deltachunk(chain)
- if not chunkdata:
- break
+ for chunkdata in iter(lambda: cg.deltachunk(chain), {}):
node = chunkdata['node']
p1 = chunkdata['p1']
p2 = chunkdata['p2']
--- a/mercurial/revset.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/revset.py Mon Aug 15 12:26:02 2016 -0400
@@ -149,18 +149,16 @@
"(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None),
"##": (20, None, None, ("_concat", 20), None),
"~": (18, None, None, ("ancestor", 18), None),
- "^": (18, None, None, ("parent", 18), ("parentpost", 18)),
+ "^": (18, None, None, ("parent", 18), "parentpost"),
"-": (5, None, ("negate", 19), ("minus", 5), None),
- "::": (17, None, ("dagrangepre", 17), ("dagrange", 17),
- ("dagrangepost", 17)),
- "..": (17, None, ("dagrangepre", 17), ("dagrange", 17),
- ("dagrangepost", 17)),
- ":": (15, "rangeall", ("rangepre", 15), ("range", 15), ("rangepost", 15)),
+ "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+ "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), "dagrangepost"),
+ ":": (15, "rangeall", ("rangepre", 15), ("range", 15), "rangepost"),
"not": (10, None, ("not", 10), None, None),
"!": (10, None, ("not", 10), None, None),
"and": (5, None, None, ("and", 5), None),
"&": (5, None, None, ("and", 5), None),
- "%": (5, None, None, ("only", 5), ("onlypost", 5)),
+ "%": (5, None, None, ("only", 5), "onlypost"),
"or": (4, None, None, ("or", 4), None),
"|": (4, None, None, ("or", 4), None),
"+": (4, None, None, ("or", 4), None),
@@ -2316,6 +2314,26 @@
and getsymbol(bases[1][1]) == 'ancestors'):
return ('list', revs[2], bases[1][2])
+def _fixops(x):
+ """Rewrite raw parsed tree to resolve ambiguous syntax which cannot be
+ handled well by our simple top-down parser"""
+ if not isinstance(x, tuple):
+ return x
+
+ op = x[0]
+ if op == 'parent':
+ # x^:y means (x^) : y, not x ^ (:y)
+ # x^: means (x^) :, not x ^ (:)
+ post = ('parentpost', x[1])
+ if x[2][0] == 'dagrangepre':
+ return _fixops(('dagrange', post, x[2][1]))
+ elif x[2][0] == 'rangepre':
+ return _fixops(('range', post, x[2][1]))
+ elif x[2][0] == 'rangeall':
+ return _fixops(('rangepost', post))
+
+ return (op,) + tuple(_fixops(y) for y in x[1:])
+
def _optimize(x, small):
if x is None:
return 0, x
@@ -2409,14 +2427,6 @@
elif op == 'group':
return _optimize(x[1], small)
elif op in 'dagrange range parent ancestorspec':
- if op == 'parent':
- # x^:y means (x^) : y, not x ^ (:y)
- post = ('parentpost', x[1])
- if x[2][0] == 'dagrangepre':
- return _optimize(('dagrange', post, x[2][1]), small)
- elif x[2][0] == 'rangepre':
- return _optimize(('range', post, x[2][1]), small)
-
wa, ta = _optimize(x[1], small)
wb, tb = _optimize(x[2], small)
return wa + wb, (op, ta, tb)
@@ -2475,7 +2485,7 @@
syminitletters=syminitletters))
if pos != len(spec):
raise error.ParseError(_('invalid token'), pos)
- return parser.simplifyinfixops(tree, ('list', 'or'))
+ return _fixops(parser.simplifyinfixops(tree, ('list', 'or')))
class _aliasrules(parser.basealiasrules):
"""Parsing and expansion rule set of revset aliases"""
--- a/mercurial/scmutil.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/scmutil.py Mon Aug 15 12:26:02 2016 -0400
@@ -256,17 +256,15 @@
raise
return []
- def open(self, path, mode="r", text=False, atomictemp=False,
- notindexed=False, backgroundclose=False):
+ @util.propertycache
+ def open(self):
'''Open ``path`` file, which is relative to vfs root.
Newly created directories are marked as "not to be indexed by
the content indexing service", if ``notindexed`` is specified
for "write" mode access.
'''
- self.open = self.__call__
- return self.__call__(path, mode, text, atomictemp, notindexed,
- backgroundclose=backgroundclose)
+ return self.__call__
def read(self, path):
with self(path, 'rb') as fp:
@@ -638,6 +636,14 @@
def mustaudit(self, onoff):
self.vfs.mustaudit = onoff
+ @property
+ def options(self):
+ return self.vfs.options
+
+ @options.setter
+ def options(self, value):
+ self.vfs.options = value
+
class filtervfs(abstractvfs, auditvfs):
'''Wrapper vfs for filtering filenames with a function.'''
@@ -775,7 +781,6 @@
def _pairspec(revspec):
tree = revset.parse(revspec)
- tree = revset.optimize(tree) # fix up "x^:y" -> "(x^):y"
return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
def revpair(repo, revs):
@@ -942,17 +947,9 @@
ret = 0
join = lambda f: os.path.join(prefix, f)
- def matchessubrepo(matcher, subpath):
- if matcher.exact(subpath):
- return True
- for f in matcher.files():
- if f.startswith(subpath):
- return True
- return False
-
wctx = repo[None]
for subpath in sorted(wctx.substate):
- if opts.get('subrepos') or matchessubrepo(m, subpath):
+ if opts.get('subrepos') or m.matchessubrepo(subpath):
sub = wctx.sub(subpath)
try:
submatch = matchmod.subdirmatcher(subpath, m)
--- a/mercurial/scmwindows.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/scmwindows.py Mon Aug 15 12:26:02 2016 -0400
@@ -1,6 +1,5 @@
from __future__ import absolute_import
-import _winreg
import os
from . import (
@@ -8,6 +7,12 @@
util,
)
+try:
+ import _winreg as winreg
+ winreg.CloseKey
+except ImportError:
+ import winreg
+
def systemrcpath():
'''return default os-specific hgrc search path'''
rcpath = []
@@ -23,7 +28,7 @@
rcpath.append(os.path.join(progrcd, f))
# else look for a system rcpath in the registry
value = util.lookupreg('SOFTWARE\\Mercurial', None,
- _winreg.HKEY_LOCAL_MACHINE)
+ winreg.HKEY_LOCAL_MACHINE)
if not isinstance(value, str) or not value:
return rcpath
value = util.localpath(value)
--- a/mercurial/sshpeer.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/sshpeer.py Mon Aug 15 12:26:02 2016 -0400
@@ -232,13 +232,7 @@
__del__ = cleanup
def _submitbatch(self, req):
- cmds = []
- for op, argsdict in req:
- args = ','.join('%s=%s' % (wireproto.escapearg(k),
- wireproto.escapearg(v))
- for k, v in argsdict.iteritems())
- cmds.append('%s %s' % (op, args))
- rsp = self._callstream("batch", cmds=';'.join(cmds))
+ rsp = self._callstream("batch", cmds=wireproto.encodebatchcmds(req))
available = self._getamount()
# TODO this response parsing is probably suboptimal for large
# batches with large responses.
@@ -292,10 +286,7 @@
r = self._call(cmd, **args)
if r:
return '', r
- while True:
- d = fp.read(4096)
- if not d:
- break
+ for d in iter(lambda: fp.read(4096), ''):
self._send(d)
self._send("", flush=True)
r = self._recv()
@@ -308,10 +299,7 @@
if r:
# XXX needs to be made better
raise error.Abort(_('unexpected remote reply: %s') % r)
- while True:
- d = fp.read(4096)
- if not d:
- break
+ for d in iter(lambda: fp.read(4096), ''):
self._send(d)
self._send("", flush=True)
return self.pipei
@@ -353,10 +341,7 @@
d = self._call("addchangegroup")
if d:
self._abort(error.RepoError(_("push refused: %s") % d))
- while True:
- d = cg.read(4096)
- if not d:
- break
+ for d in iter(lambda: cg.read(4096), ''):
self.pipeo.write(d)
self.readerr()
--- a/mercurial/sshserver.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/sshserver.py Mon Aug 15 12:26:02 2016 -0400
@@ -69,11 +69,7 @@
pass
def groupchunks(self, changegroup):
- while True:
- d = changegroup.read(4096)
- if not d:
- break
- yield d
+ return iter(lambda: changegroup.read(4096), '')
def sendresponse(self, v):
self.fout.write("%d\n" % len(v))
--- a/mercurial/statichttprepo.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/statichttprepo.py Mon Aug 15 12:26:02 2016 -0400
@@ -181,6 +181,9 @@
def lock(self, wait=True):
raise error.Abort(_('cannot lock static-http repository'))
+ def _writecaches(self):
+ pass # statichttprepository are read only
+
def instance(ui, path, create):
if create:
raise error.Abort(_('cannot create new static-http repository'))
--- a/mercurial/templatekw.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/templatekw.py Mon Aug 15 12:26:02 2016 -0400
@@ -26,14 +26,11 @@
# "{get(extras, key)}"
class _hybrid(object):
- def __init__(self, gen, values, makemap, joinfmt=None):
+ def __init__(self, gen, values, makemap, joinfmt):
self.gen = gen
self.values = values
self._makemap = makemap
- if joinfmt:
- self.joinfmt = joinfmt
- else:
- self.joinfmt = lambda x: x.values()[0]
+ self.joinfmt = joinfmt
def __iter__(self):
return self.gen
def itermaps(self):
@@ -53,7 +50,7 @@
if not element:
element = name
f = _showlist(name, values, plural, separator, **args)
- return _hybrid(f, values, lambda x: {element: x})
+ return _hybrid(f, values, lambda x: {element: x}, lambda d: d[element])
def _showlist(name, values, plural=None, separator=' ', **args):
'''expand set of values.
--- a/mercurial/ui.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/ui.py Mon Aug 15 12:26:02 2016 -0400
@@ -1175,6 +1175,7 @@
% ((msg,) + calframe[stacklevel][1:4]))
self.log('develwarn', '%s at: %s:%s (%s)\n',
msg, *calframe[stacklevel][1:4])
+ curframe = calframe = None # avoid cycles
def deprecwarn(self, msg, version):
"""issue a deprecation warning
--- a/mercurial/url.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/url.py Mon Aug 15 12:26:02 2016 -0400
@@ -151,35 +151,6 @@
return _sendfile
has_https = util.safehasattr(urlreq, 'httpshandler')
-if has_https:
- try:
- _create_connection = socket.create_connection
- except AttributeError:
- _GLOBAL_DEFAULT_TIMEOUT = object()
-
- def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
- source_address=None):
- # lifted from Python 2.6
-
- msg = "getaddrinfo returns an empty list"
- host, port = address
- for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- sock = None
- try:
- sock = socket.socket(af, socktype, proto)
- if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(timeout)
- if source_address:
- sock.bind(source_address)
- sock.connect(sa)
- return sock
-
- except socket.error as msg:
- if sock is not None:
- sock.close()
-
- raise socket.error(msg)
class httpconnection(keepalive.HTTPConnection):
# must be able to send big bundle as stream.
@@ -237,18 +208,14 @@
version, status, reason = res._read_status()
if status != httplib.CONTINUE:
break
- while True:
- skip = res.fp.readline().strip()
- if not skip:
- break
+ # skip lines that are all whitespace
+ list(iter(lambda: res.fp.readline().strip(), ''))
res.status = status
res.reason = reason.strip()
if res.status == 200:
- while True:
- line = res.fp.readline()
- if line == '\r\n':
- break
+ # skip lines until we find a blank line
+ list(iter(res.fp.readline, '\r\n'))
return True
if version == 'HTTP/1.0':
@@ -337,7 +304,7 @@
self.cert_file = cert_file
def connect(self):
- self.sock = _create_connection((self.host, self.port))
+ self.sock = socket.create_connection((self.host, self.port))
host = self.host
if self.realhostport: # use CONNECT proxy
--- a/mercurial/util.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/util.py Mon Aug 15 12:26:02 2016 -0400
@@ -881,6 +881,8 @@
This garbage collector issue have been fixed in 2.7.
"""
+ if sys.version >= (2, 7):
+ return func
def wrapper(*args, **kwargs):
gcenabled = gc.isenabled()
gc.disable()
@@ -1012,10 +1014,7 @@
proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
env=env, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
- while True:
- line = proc.stdout.readline()
- if not line:
- break
+ for line in iter(proc.stdout.readline, ''):
out.write(line)
proc.wait()
rc = proc.returncode
--- a/mercurial/windows.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/windows.py Mon Aug 15 12:26:02 2016 -0400
@@ -7,7 +7,6 @@
from __future__ import absolute_import
-import _winreg
import errno
import msvcrt
import os
@@ -22,6 +21,12 @@
win32,
)
+try:
+ import _winreg as winreg
+ winreg.CloseKey
+except ImportError:
+ import winreg
+
executablepath = win32.executablepath
getuser = win32.getuser
hidewindow = win32.hidewindow
@@ -432,12 +437,12 @@
LOCAL_MACHINE).
'''
if scope is None:
- scope = (_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE)
+ scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE)
elif not isinstance(scope, (list, tuple)):
scope = (scope,)
for s in scope:
try:
- val = _winreg.QueryValueEx(_winreg.OpenKey(s, key), valname)[0]
+ val = winreg.QueryValueEx(winreg.OpenKey(s, key), valname)[0]
# never let a Unicode string escape into the wild
return encoding.tolocal(val.encode('UTF-8'))
except EnvironmentError:
--- a/mercurial/wireproto.py Sun Aug 07 14:58:49 2016 +0900
+++ b/mercurial/wireproto.py Mon Aug 15 12:26:02 2016 -0400
@@ -187,6 +187,21 @@
.replace(':o', ',')
.replace(':c', ':'))
+def encodebatchcmds(req):
+ """Return a ``cmds`` argument value for the ``batch`` command."""
+ cmds = []
+ for op, argsdict in req:
+ # Old servers didn't properly unescape argument names. So prevent
+ # the sending of argument names that may not be decoded properly by
+ # servers.
+ assert all(escapearg(k) == k for k in argsdict)
+
+ args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
+ for k, v in argsdict.iteritems())
+ cmds.append('%s %s' % (op, args))
+
+ return ';'.join(cmds)
+
# mapping of options accepted by getbundle and their types
#
# Meant to be extended by extensions. It is extensions responsibility to ensure
@@ -226,12 +241,7 @@
Returns an iterator of the raw responses from the server.
"""
- cmds = []
- for op, argsdict in req:
- args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
- for k, v in argsdict.iteritems())
- cmds.append('%s %s' % (op, args))
- rsp = self._callstream("batch", cmds=';'.join(cmds))
+ rsp = self._callstream("batch", cmds=encodebatchcmds(req))
chunk = rsp.read(1024)
work = [chunk]
while chunk:
@@ -399,7 +409,7 @@
else:
return changegroupmod.cg1unpacker(f, 'UN')
- def unbundle(self, cg, heads, source):
+ def unbundle(self, cg, heads, url):
'''Send cg (a readable file-like object representing the
changegroup to push, typically a chunkbuffer object) to the
remote server as a bundle.
@@ -407,7 +417,11 @@
When pushing a bundle10 stream, return an integer indicating the
result of the push (see localrepository.addchangegroup()).
- When pushing a bundle20 stream, return a bundle20 stream.'''
+ When pushing a bundle20 stream, return a bundle20 stream.
+
+ `url` is the url the client thinks it's pushing to, which is
+ visible to hooks.
+ '''
if heads != ['force'] and self.capable('unbundlehash'):
heads = encodelist(['hashed',
@@ -611,7 +625,7 @@
for a in args.split(','):
if a:
n, v = a.split('=')
- vals[n] = unescapearg(v)
+ vals[unescapearg(n)] = unescapearg(v)
func, spec = commands[op]
if spec:
keys = spec.split()
@@ -731,12 +745,6 @@
opts = options('debugwireargs', ['three', 'four'], others)
return repo.debugwireargs(one, two, **opts)
-# List of options accepted by getbundle.
-#
-# Meant to be extended by extensions. It is the extension's responsibility to
-# ensure such options are properly processed in exchange.getbundle.
-gboptslist = ['heads', 'common', 'bundlecaps']
-
@wireprotocommand('getbundle', '*')
def getbundle(repo, proto, others):
opts = options('getbundle', gboptsmap.keys(), others)
--- a/setup.py Sun Aug 07 14:58:49 2016 +0900
+++ b/setup.py Mon Aug 15 12:26:02 2016 -0400
@@ -318,7 +318,8 @@
if self.distribution.pure:
self.distribution.ext_modules = []
elif self.distribution.cffi:
- exts = []
+ import setup_mpatch_cffi
+ exts = [setup_mpatch_cffi.ffi.distutils_extension()]
# cffi modules go here
if sys.platform == 'darwin':
import setup_osutil_cffi
@@ -561,7 +562,8 @@
depends=common_depends + ['mercurial/bdiff.h']),
Extension('mercurial.diffhelpers', ['mercurial/diffhelpers.c'],
depends=common_depends),
- Extension('mercurial.mpatch', ['mercurial/mpatch.c'],
+ Extension('mercurial.mpatch', ['mercurial/mpatch.c',
+ 'mercurial/mpatch_module.c'],
depends=common_depends),
Extension('mercurial.parsers', ['mercurial/dirs.c',
'mercurial/manifest.c',
--- a/tests/failfilemerge.py Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/failfilemerge.py Mon Aug 15 12:26:02 2016 -0400
@@ -9,7 +9,7 @@
)
def failfilemerge(filemergefn,
- premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
+ premerge, repo, mynode, orig, fcd, fco, fca, labels=None):
raise error.Abort("^C")
return filemergefn(premerge, repo, mynode, orig, fcd, fco, fca, labels)
--- a/tests/fakemergerecord.py Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/fakemergerecord.py Mon Aug 15 12:26:02 2016 -0400
@@ -16,10 +16,11 @@
[('X', 'mandatory', None, 'add a fake mandatory record'),
('x', 'advisory', None, 'add a fake advisory record')], '')
def fakemergerecord(ui, repo, *pats, **opts):
- ms = merge.mergestate.read(repo)
- records = ms._makerecords()
- if opts.get('mandatory'):
- records.append(('X', 'mandatory record'))
- if opts.get('advisory'):
- records.append(('x', 'advisory record'))
- ms._writerecords(records)
+ with repo.wlock():
+ ms = merge.mergestate.read(repo)
+ records = ms._makerecords()
+ if opts.get('mandatory'):
+ records.append(('X', 'mandatory record'))
+ if opts.get('advisory'):
+ records.append(('x', 'advisory record'))
+ ms._writerecords(records)
--- a/tests/md5sum.py Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/md5sum.py Mon Aug 15 12:26:02 2016 -0400
@@ -34,10 +34,7 @@
m = md5()
try:
- while True:
- data = fp.read(8192)
- if not data:
- break
+ for data in iter(lambda: fp.read(8192), ''):
m.update(data)
except IOError as msg:
sys.stderr.write('%s: I/O error: %s\n' % (filename, msg))
--- a/tests/test-acl.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-acl.t Mon Aug 15 12:26:02 2016 -0400
@@ -44,13 +44,6 @@
> EOF
> }
- $ cat << EOF >> $HGRCPATH
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
- > EOF
-
$ hg init a
$ cd a
$ mkdir foo foo/Bar quux
@@ -119,11 +112,11 @@
adding foo/file.txt revisions
adding quux/file.py revisions
added 3 changesets with 3 changes to 3 files
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -184,11 +177,11 @@
added 3 changesets with 3 changes to 3 files
calling hook pretxnchangegroup.acl: hgext.acl.hook
acl: changes have source "push" - skipping
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -260,11 +253,11 @@
acl: path access granted: "f9cafe1212c8"
acl: branch access granted: "911600dab2ae" on branch "default"
acl: path access granted: "911600dab2ae"
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -750,11 +743,11 @@
acl: path access granted: "f9cafe1212c8"
acl: branch access granted: "911600dab2ae" on branch "default"
acl: path access granted: "911600dab2ae"
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -1068,11 +1061,11 @@
acl: path access granted: "f9cafe1212c8"
acl: branch access granted: "911600dab2ae" on branch "default"
acl: path access granted: "911600dab2ae"
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -1154,11 +1147,11 @@
acl: path access granted: "f9cafe1212c8"
acl: branch access granted: "911600dab2ae" on branch "default"
acl: path access granted: "911600dab2ae"
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -1313,11 +1306,11 @@
acl: path access granted: "f9cafe1212c8"
acl: branch access granted: "911600dab2ae" on branch "default"
acl: path access granted: "911600dab2ae"
+ updating the branch cache
bundle2-input-part: total payload size 1606
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-bundle: 3 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 2 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -1515,13 +1508,13 @@
acl: path access granted: "911600dab2ae"
acl: branch access granted: "e8fc755d4d82" on branch "foobar"
acl: path access granted: "e8fc755d4d82"
+ updating the branch cache
bundle2-input-part: total payload size 2101
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:e8fc755d4d8217ee5b0c2bb41558c40d43b92c01"
bundle2-input-bundle: 4 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 3 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -1811,13 +1804,13 @@
acl: path access granted: "911600dab2ae"
acl: branch access granted: "e8fc755d4d82" on branch "foobar"
acl: path access granted: "e8fc755d4d82"
+ updating the branch cache
bundle2-input-part: total payload size 2101
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:e8fc755d4d8217ee5b0c2bb41558c40d43b92c01"
bundle2-input-bundle: 4 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 3 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -1904,13 +1897,13 @@
acl: path access granted: "911600dab2ae"
acl: branch access granted: "e8fc755d4d82" on branch "foobar"
acl: path access granted: "e8fc755d4d82"
+ updating the branch cache
bundle2-input-part: total payload size 2101
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:e8fc755d4d8217ee5b0c2bb41558c40d43b92c01"
bundle2-input-bundle: 4 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 3 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
@@ -2065,13 +2058,13 @@
acl: path access granted: "911600dab2ae"
acl: branch access granted: "e8fc755d4d82" on branch "foobar"
acl: path access granted: "e8fc755d4d82"
+ updating the branch cache
bundle2-input-part: total payload size 2101
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:911600dab2ae7a9baff75958b84fe606851ce955"
bundle2-input-part: "pushkey" (params: 4 mandatory) supported
pushing key for "phases:e8fc755d4d8217ee5b0c2bb41558c40d43b92c01"
bundle2-input-bundle: 4 parts total
- updating the branch cache
bundle2-output-bundle: "HG20", 3 parts total
bundle2-output-part: "reply:changegroup" (advisory) (params: 0 advisory) empty payload
bundle2-output-part: "reply:pushkey" (params: 0 advisory) empty payload
--- a/tests/test-bookmarks-pushpull.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-bookmarks-pushpull.t Mon Aug 15 12:26:02 2016 -0400
@@ -7,9 +7,6 @@
> publish=False
> [experimental]
> evolution=createmarkers,exchange
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
> EOF
initialize
@@ -825,7 +822,7 @@
Using ssh
---------
- $ hg push -B @ ssh --config experimental.bundle2-exp=True
+ $ hg push -B @ ssh # bundle2+
pushing to ssh://user@dummy/issue4455-dest
searching for changes
no changes found
@@ -835,7 +832,7 @@
$ hg -R ../issue4455-dest/ bookmarks
no bookmarks set
- $ hg push -B @ ssh --config experimental.bundle2-exp=False
+ $ hg push -B @ ssh --config devel.legacy.exchange=bundle1
pushing to ssh://user@dummy/issue4455-dest
searching for changes
no changes found
@@ -848,7 +845,7 @@
Using http
----------
- $ hg push -B @ http --config experimental.bundle2-exp=True
+ $ hg push -B @ http # bundle2+
pushing to http://localhost:$HGPORT/
searching for changes
no changes found
@@ -858,7 +855,7 @@
$ hg -R ../issue4455-dest/ bookmarks
no bookmarks set
- $ hg push -B @ http --config experimental.bundle2-exp=False
+ $ hg push -B @ http --config devel.legacy.exchange=bundle1
pushing to http://localhost:$HGPORT/
searching for changes
no changes found
--- a/tests/test-branches.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-branches.t Mon Aug 15 12:26:02 2016 -0400
@@ -554,6 +554,18 @@
$ rmdir .hg/cache/rbc-revs-v1
$ mv .hg/cache/rbc-revs-v1_ .hg/cache/rbc-revs-v1
+no errors when wlock cannot be acquired
+
+#if unix-permissions
+ $ mv .hg/cache/rbc-revs-v1 .hg/cache/rbc-revs-v1_
+ $ rm -f .hg/cache/branch*
+ $ chmod 555 .hg
+ $ hg head a -T '{rev}\n'
+ 5
+ $ chmod 755 .hg
+ $ mv .hg/cache/rbc-revs-v1_ .hg/cache/rbc-revs-v1
+#endif
+
recovery from invalid cache revs file with trailing data
$ echo >> .hg/cache/rbc-revs-v1
$ rm -f .hg/cache/branch* && hg head a -T '{rev}\n' --debug
--- a/tests/test-bundle2-exchange.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-bundle2-exchange.t Mon Aug 15 12:26:02 2016 -0400
@@ -16,7 +16,6 @@
$ cat >> $HGRCPATH << EOF
> [experimental]
> evolution=createmarkers,exchange
- > bundle2-exp=True
> bundle2-output-capture=True
> [ui]
> ssh=python "$TESTDIR/dummyssh"
@@ -970,7 +969,7 @@
$ hg serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ hg --config experimental.bundle2-exp=false clone http://localhost:$HGPORT/ not-bundle2
+ $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
requesting all changes
abort: remote error:
incompatible Mercurial client; bundle2 required
@@ -993,7 +992,7 @@
$ hg serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ hg --config experimental.bundle2-exp=false clone http://localhost:$HGPORT/ not-bundle2-1
+ $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-1
requesting all changes
adding changesets
adding manifests
@@ -1014,7 +1013,7 @@
$ hg serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ hg --config experimental.bundle2-exp=false clone http://localhost:$HGPORT/ not-bundle2
+ $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
requesting all changes
abort: remote error:
incompatible Mercurial client; bundle2 required
@@ -1031,7 +1030,7 @@
> EOF
$ hg serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ hg --config experimental.bundle2-exp=false clone http://localhost:$HGPORT not-bundle2
+ $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT not-bundle2
requesting all changes
abort: remote error:
incompatible Mercurial client; bundle2 required
@@ -1046,7 +1045,7 @@
$ hg serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ hg --config experimental.bundle2-exp=false clone http://localhost:$HGPORT/ not-bundle2
+ $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2
requesting all changes
abort: remote error:
incompatible Mercurial client; bundle2 required
@@ -1063,7 +1062,7 @@
$ hg serve -p $HGPORT -d --pid-file=hg.pid
$ cat hg.pid >> $DAEMON_PIDS
- $ hg --config experimental.bundle2-exp=false clone http://localhost:$HGPORT/ not-bundle2-2
+ $ hg --config devel.legacy.exchange=bundle1 clone http://localhost:$HGPORT/ not-bundle2-2
requesting all changes
adding changesets
adding manifests
@@ -1100,7 +1099,7 @@
$ cd bundle2-only
$ echo commit > foo
$ hg commit -m commit
- $ hg --config experimental.bundle2-exp=false push
+ $ hg --config devel.legacy.exchange=bundle1 push
pushing to http://localhost:$HGPORT/
searching for changes
abort: remote error:
--- a/tests/test-bundle2-format.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-bundle2-format.t Mon Aug 15 12:26:02 2016 -0400
@@ -227,7 +227,6 @@
> [extensions]
> bundle2=$TESTTMP/bundle2.py
> [experimental]
- > bundle2-exp=True
> evolution=createmarkers
> [ui]
> ssh=python "$TESTDIR/dummyssh"
--- a/tests/test-bundle2-multiple-changegroups.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-bundle2-multiple-changegroups.t Mon Aug 15 12:26:02 2016 -0400
@@ -33,8 +33,6 @@
> EOF
$ cat >> $HGRCPATH << EOF
- > [experimental]
- > bundle2-exp=True
> [ui]
> logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
> EOF
--- a/tests/test-bundle2-pushback.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-bundle2-pushback.t Mon Aug 15 12:26:02 2016 -0400
@@ -50,8 +50,6 @@
$ cat >> $HGRCPATH <<EOF
> [extensions]
> bundle2=$TESTTMP/bundle2.py
- > [experimental]
- > bundle2-exp = True
> EOF
Without config
--- a/tests/test-bundle2-remote-changegroup.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-bundle2-remote-changegroup.t Mon Aug 15 12:26:02 2016 -0400
@@ -78,8 +78,6 @@
$ cat dumb.pid >> $DAEMON_PIDS
$ cat >> $HGRCPATH << EOF
- > [experimental]
- > bundle2-exp=True
> [ui]
> ssh=python "$TESTDIR/dummyssh"
> logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
--- a/tests/test-check-py3-compat.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-check-py3-compat.t Mon Aug 15 12:26:02 2016 -0400
@@ -81,7 +81,7 @@
mercurial/archival.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
mercurial/bookmarks.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
mercurial/branchmap.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
- mercurial/bundle2.py: invalid syntax: invalid syntax (<unknown>, line *) (glob)
+ mercurial/bundle2.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
mercurial/bundlerepo.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
mercurial/byterange.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
mercurial/changegroup.py: error importing: <TypeError> str expected, not bytes (error at encoding.py:*) (glob)
@@ -122,54 +122,52 @@
mercurial/hook.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
mercurial/httpconnection.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
mercurial/httppeer.py: error importing: <TypeError> str expected, not bytes (error at i18n.py:*) (glob)
- mercurial/keepalive.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/localrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/lock.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/mail.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
+ mercurial/keepalive.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/localrepo.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/lock.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/mail.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
mercurial/manifest.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
- mercurial/match.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
+ mercurial/match.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
mercurial/mdiff.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
- mercurial/merge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/minirst.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/namespaces.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
+ mercurial/merge.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/minirst.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/namespaces.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
mercurial/obsolete.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
- mercurial/patch.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/pathutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/peer.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/pure/mpatch.py: error importing module: <AttributeError> 'VendorImporter' object has no attribute 'find_spec' (line *) (glob)
- mercurial/pure/parsers.py: error importing module: <AttributeError> 'VendorImporter' object has no attribute 'find_spec' (line *) (glob)
- mercurial/pushkey.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/pvec.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/registrar.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/repair.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
- mercurial/repoview.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/revlog.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/revset.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/scmposix.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/scmutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/scmwindows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
- mercurial/similar.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/simplemerge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/sshpeer.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/sshserver.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/sslutil.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/statichttprepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/store.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/streamclone.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/subrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/tagmerge.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/tags.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/templatefilters.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/templatekw.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/templater.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/transaction.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/ui.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/unionrepo.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/url.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/util.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
- mercurial/verify.py: error importing: <TypeError> '_fields_' must be a sequence of (name, C type) pairs (error at osutil.py:*) (glob)
+ mercurial/patch.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/pathutil.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/peer.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/pure/mpatch.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/pure/parsers.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/pushkey.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/pvec.py: error importing: <TypeError> getattr(): attribute name must be string (error at pycompat.py:*) (glob)
+ mercurial/registrar.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/repair.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/repoview.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/revlog.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/revset.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/scmutil.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/scmwindows.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/similar.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/simplemerge.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/sshpeer.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/sshserver.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/sslutil.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/statichttprepo.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/store.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/streamclone.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/subrepo.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/tagmerge.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/tags.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/templatefilters.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/templatekw.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/templater.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/transaction.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/ui.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/unionrepo.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/url.py: error importing: <TypeError> getattr(): attribute name must be string (error at util.py:*) (glob)
+ mercurial/verify.py: error importing: <TypeError> attribute name must be string, not 'bytes' (error at mdiff.py:*) (glob)
mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
- mercurial/windows.py: error importing module: <ImportError> No module named '_winreg' (line *) (glob)
- mercurial/wireproto.py: error importing module: <SyntaxError> invalid syntax (bundle2.py, line *) (line *) (glob)
+ mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *) (glob)
+ mercurial/wireproto.py: error importing module: <TypeError> a bytes-like object is required, not 'str' (line *) (glob)
#endif
--- a/tests/test-commit-amend.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-commit-amend.t Mon Aug 15 12:26:02 2016 -0400
@@ -813,7 +813,7 @@
$ hg merge -q bar --config ui.interactive=True << EOF
> c
> EOF
- local changed aa which remote deleted
+ local changed aa which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? c
$ hg ci -m 'merge bar (with conflicts)'
$ hg log --config diff.git=1 -pr .
--- a/tests/test-copy-move-merge.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-copy-move-merge.t Mon Aug 15 12:26:02 2016 -0400
@@ -85,7 +85,7 @@
> c
> EOF
rebasing 2:add3f11052fa "other" (tip)
- remote changed a which local deleted
+ other [source] changed a which local [dest] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
$ cat b
--- a/tests/test-debugextensions.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-debugextensions.t Mon Aug 15 12:26:02 2016 -0400
@@ -48,36 +48,39 @@
"buglink": "",
"name": "color",
"source": "*/hgext/color.py*", (glob)
- "testedwith": "internal"
+ "testedwith": ["internal"]
},
{
"buglink": "",
"name": "ext1",
"source": "*/extwithoutinfos.py*", (glob)
- "testedwith": ""
+ "testedwith": []
},
{
"buglink": "",
"name": "histedit",
"source": "*/hgext/histedit.py*", (glob)
- "testedwith": "internal"
+ "testedwith": ["internal"]
},
{
"buglink": "",
"name": "mq",
"source": "*/hgext/mq.py*", (glob)
- "testedwith": "internal"
+ "testedwith": ["internal"]
},
{
"buglink": "",
"name": "patchbomb",
"source": "*/hgext/patchbomb.py*", (glob)
- "testedwith": "internal"
+ "testedwith": ["internal"]
},
{
"buglink": "",
"name": "rebase",
"source": "*/hgext/rebase.py*", (glob)
- "testedwith": "internal"
+ "testedwith": ["internal"]
}
]
+
+ $ hg debugextensions -T '{ifcontains("internal", testedwith, "", "{name}\n")}'
+ ext1
--- a/tests/test-devel-warnings.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-devel-warnings.t Mon Aug 15 12:26:02 2016 -0400
@@ -91,6 +91,8 @@
*/mercurial/dispatch.py:* in run (glob)
*/mercurial/dispatch.py:* in dispatch (glob)
*/mercurial/dispatch.py:* in _runcatch (glob)
+ */mercurial/dispatch.py:* in callcatch (glob)
+ */mercurial/dispatch.py:* in _runcatchfunc (glob)
*/mercurial/dispatch.py:* in _dispatch (glob)
*/mercurial/dispatch.py:* in runcommand (glob)
*/mercurial/dispatch.py:* in _runcommand (glob)
@@ -125,6 +127,8 @@
*/mercurial/dispatch.py:* in run (glob)
*/mercurial/dispatch.py:* in dispatch (glob)
*/mercurial/dispatch.py:* in _runcatch (glob)
+ */mercurial/dispatch.py:* in callcatch (glob)
+ */mercurial/dispatch.py:* in _runcatchfunc (glob)
*/mercurial/dispatch.py:* in _dispatch (glob)
*/mercurial/dispatch.py:* in runcommand (glob)
*/mercurial/dispatch.py:* in _runcommand (glob)
@@ -147,6 +151,8 @@
*/mercurial/dispatch.py:* in run (glob)
*/mercurial/dispatch.py:* in dispatch (glob)
*/mercurial/dispatch.py:* in _runcatch (glob)
+ */mercurial/dispatch.py:* in callcatch (glob)
+ */mercurial/dispatch.py:* in _runcatchfunc (glob)
*/mercurial/dispatch.py:* in _dispatch (glob)
*/mercurial/dispatch.py:* in runcommand (glob)
*/mercurial/dispatch.py:* in _runcommand (glob)
--- a/tests/test-diff-change.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-diff-change.t Mon Aug 15 12:26:02 2016 -0400
@@ -50,6 +50,13 @@
@@ -1,1 +1,1 @@
-third
+wdir
+ $ hg diff -r '(2:2)' --nodates
+ diff -r bf5ff72eb7e0 file.txt
+ --- a/file.txt
+ +++ b/file.txt
+ @@ -1,1 +1,1 @@
+ -third
+ +wdir
$ hg diff -r 2::2 --nodates
diff -r bf5ff72eb7e0 file.txt
--- a/file.txt
--- a/tests/test-extdiff.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-extdiff.t Mon Aug 15 12:26:02 2016 -0400
@@ -31,10 +31,12 @@
$ hg help falabala
hg falabala [OPTION]... [FILE]...
- use 'echo' to diff repository (or selected files)
+ use external program to diff repository (or selected files)
Show differences between revisions for the specified files, using the
- 'echo' program.
+ following program:
+
+ 'echo'
When two revision arguments are given, then changes are shown between
those revisions. If only one revision is specified then that revision is
@@ -407,5 +409,6 @@
(try "hg help" for a list of topics)
[255]
- $ LC_MESSAGES=ja_JP.UTF-8 hg --config hgext.extdiff= --config extdiff.cmd.td=$U help td | grep "^use"
- use '\xa5\xa5' to diff repository (or selected files)
+ $ LC_MESSAGES=ja_JP.UTF-8 hg --config hgext.extdiff= --config extdiff.cmd.td=$U help td \
+ > | grep "^ '"
+ '\xa5\xa5'
--- a/tests/test-extension.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-extension.t Mon Aug 15 12:26:02 2016 -0400
@@ -432,6 +432,36 @@
REL: this is absextroot.xsub1.xsub2.called.func()
REL: this relimporter imports 'this is absextroot.relimportee'
+Examine whether sub-module is imported relatively as expected.
+
+See also issue5208 for detail about example case on Python 3.x.
+
+ $ f -q $TESTTMP/extlibroot/lsub1/lsub2/notexist.py
+ $TESTTMP/extlibroot/lsub1/lsub2/notexist.py: file not found
+
+ $ cat > $TESTTMP/notexist.py <<EOF
+ > text = 'notexist.py at root is loaded unintentionally\n'
+ > EOF
+
+ $ cat > $TESTTMP/checkrelativity.py <<EOF
+ > from mercurial import cmdutil
+ > cmdtable = {}
+ > command = cmdutil.command(cmdtable)
+ >
+ > # demand import avoids failure of importing notexist here
+ > import extlibroot.lsub1.lsub2.notexist
+ >
+ > @command('checkrelativity', [], norepo=True)
+ > def checkrelativity(ui, *args, **opts):
+ > try:
+ > ui.write(extlibroot.lsub1.lsub2.notexist.text)
+ > return 1 # unintentional success
+ > except ImportError:
+ > pass # intentional failure
+ > EOF
+
+ $ (PYTHONPATH=${PYTHONPATH}${PATHSEP}${TESTTMP}; hg --config extensions.checkrelativity=$TESTTMP/checkrelativity.py checkrelativity)
+
#endif
$ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-extensions-wrapfunction.py Mon Aug 15 12:26:02 2016 -0400
@@ -0,0 +1,39 @@
+from __future__ import absolute_import, print_function
+
+from mercurial import extensions
+
+def genwrapper(x):
+ def f(orig, *args, **kwds):
+ return [x] + orig(*args, **kwds)
+ f.x = x
+ return f
+
+def getid(wrapper):
+ return getattr(wrapper, 'x', '-')
+
+wrappers = [genwrapper(i) for i in range(5)]
+
+class dummyclass(object):
+ def getstack(self):
+ return ['orig']
+
+dummy = dummyclass()
+
+def batchwrap(wrappers):
+ for w in wrappers:
+ extensions.wrapfunction(dummy, 'getstack', w)
+ print('wrap %d: %s' % (getid(w), dummy.getstack()))
+
+def batchunwrap(wrappers):
+ for w in wrappers:
+ result = None
+ try:
+ result = extensions.unwrapfunction(dummy, 'getstack', w)
+ msg = str(dummy.getstack())
+ except (ValueError, IndexError) as e:
+ msg = e.__class__.__name__
+ print('unwrap %s: %s: %s' % (getid(w), getid(result), msg))
+
+batchwrap(wrappers + [wrappers[0]])
+batchunwrap([(wrappers[i] if i >= 0 else None)
+ for i in [3, None, 0, 4, 0, 2, 1, None]])
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tests/test-extensions-wrapfunction.py.out Mon Aug 15 12:26:02 2016 -0400
@@ -0,0 +1,14 @@
+wrap 0: [0, 'orig']
+wrap 1: [1, 0, 'orig']
+wrap 2: [2, 1, 0, 'orig']
+wrap 3: [3, 2, 1, 0, 'orig']
+wrap 4: [4, 3, 2, 1, 0, 'orig']
+wrap 0: [0, 4, 3, 2, 1, 0, 'orig']
+unwrap 3: 3: [0, 4, 2, 1, 0, 'orig']
+unwrap -: 0: [4, 2, 1, 0, 'orig']
+unwrap 0: 0: [4, 2, 1, 'orig']
+unwrap 4: 4: [2, 1, 'orig']
+unwrap 0: -: ValueError
+unwrap 2: 2: [1, 'orig']
+unwrap 1: 1: ['orig']
+unwrap -: -: IndexError
--- a/tests/test-help.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-help.t Mon Aug 15 12:26:02 2016 -0400
@@ -929,16 +929,16 @@
Technical implementation topics
"""""""""""""""""""""""""""""""
- bundles container for exchange of repository data
- changegroups representation of revlog data
- requirements repository requirements
- revlogs revision storage mechanism
+ bundles Bundles
+ changegroups Changegroups
+ requirements Repository Requirements
+ revlogs Revision Logs
sub-topics can be accessed
$ hg help internals.changegroups
- Changegroups
- ============
+ Changegroups
+ """"""""""""
Changegroups are representations of repository revlog data, specifically
the changelog, manifest, and filelogs.
@@ -974,7 +974,7 @@
this an *empty chunk*.
Delta Groups
- ------------
+ ============
A *delta group* expresses the content of a revlog as a series of deltas,
or patches against previous revisions.
@@ -1050,21 +1050,21 @@
which can result in smaller deltas and more efficient encoding of data.
Changeset Segment
- -----------------
+ =================
The *changeset segment* consists of a single *delta group* holding
changelog data. It is followed by an *empty chunk* to denote the boundary
to the *manifests segment*.
Manifest Segment
- ----------------
+ ================
The *manifest segment* consists of a single *delta group* holding manifest
data. It is followed by an *empty chunk* to denote the boundary to the
*filelogs segment*.
Filelogs Segment
- ----------------
+ ================
The *filelogs* segment consists of multiple sub-segments, each
corresponding to an individual file whose data is being described:
@@ -2872,28 +2872,28 @@
bundles
</a>
</td><td>
- container for exchange of repository data
+ Bundles
</td></tr>
<tr><td>
<a href="/help/internals.changegroups">
changegroups
</a>
</td><td>
- representation of revlog data
+ Changegroups
</td></tr>
<tr><td>
<a href="/help/internals.requirements">
requirements
</a>
</td><td>
- repository requirements
+ Repository Requirements
</td></tr>
<tr><td>
<a href="/help/internals.revlogs">
revlogs
</a>
</td><td>
- revision storage mechanism
+ Revision Logs
</td></tr>
@@ -2957,8 +2957,7 @@
number or hash, or <a href="/help/revsets">revset expression</a>.</div>
</form>
<div id="doc">
- <h1>representation of revlog data</h1>
- <h2>Changegroups</h2>
+ <h1>Changegroups</h1>
<p>
Changegroups are representations of repository revlog data, specifically
the changelog, manifest, and filelogs.
@@ -3000,7 +2999,7 @@
There is a special case chunk that has 0 length ("0x00000000"). We
call this an *empty chunk*.
</p>
- <h3>Delta Groups</h3>
+ <h2>Delta Groups</h2>
<p>
A *delta group* expresses the content of a revlog as a series of deltas,
or patches against previous revisions.
@@ -3091,19 +3090,19 @@
changegroup. This allows the delta to be expressed against any parent,
which can result in smaller deltas and more efficient encoding of data.
</p>
- <h3>Changeset Segment</h3>
+ <h2>Changeset Segment</h2>
<p>
The *changeset segment* consists of a single *delta group* holding
changelog data. It is followed by an *empty chunk* to denote the
boundary to the *manifests segment*.
</p>
- <h3>Manifest Segment</h3>
+ <h2>Manifest Segment</h2>
<p>
The *manifest segment* consists of a single *delta group* holding
manifest data. It is followed by an *empty chunk* to denote the boundary
to the *filelogs segment*.
</p>
- <h3>Filelogs Segment</h3>
+ <h2>Filelogs Segment</h2>
<p>
The *filelogs* segment consists of multiple sub-segments, each
corresponding to an individual file whose data is being described:
--- a/tests/test-hook.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-hook.t Mon Aug 15 12:26:02 2016 -0400
@@ -1,12 +1,6 @@
commit hooks can see env vars
(and post-transaction one are run unlocked)
- $ cat << EOF >> $HGRCPATH
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
- > EOF
$ cat > $TESTTMP/txnabort.checkargs.py <<EOF
> def showargs(ui, repo, hooktype, **kwargs):
--- a/tests/test-http-bundle1.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-http-bundle1.t Mon Aug 15 12:26:02 2016 -0400
@@ -4,9 +4,9 @@
parts that are not bundle1/bundle2 specific.
$ cat << EOF >> $HGRCPATH
- > [experimental]
+ > [devel]
> # This test is dedicated to interaction through old bundle
- > bundle2-exp = False
+ > legacy.exchange = bundle1
> EOF
$ hg init test
--- a/tests/test-http-proxy.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-http-proxy.t Mon Aug 15 12:26:02 2016 -0400
@@ -1,10 +1,4 @@
#require serve
- $ cat << EOF >> $HGRCPATH
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
- > EOF
$ hg init a
$ cd a
--- a/tests/test-issue1175.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-issue1175.t Mon Aug 15 12:26:02 2016 -0400
@@ -54,7 +54,7 @@
diff --git a/b b/b
new file mode 100644
-http://bz.selenic.com/show_bug.cgi?id=4476
+https://bz.mercurial-scm.org/show_bug.cgi?id=4476
$ hg init foo
$ cd foo
--- a/tests/test-journal.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-journal.t Mon Aug 15 12:26:02 2016 -0400
@@ -130,7 +130,7 @@
cb9a9f314b8b bar book -f bar
1e6c11564562 bar book -r tip bar
-Test that verbose, JSON and commit output work
+Test that verbose, JSON, template and commit output work
$ hg journal --verbose --all
previous locations of the working copy and bookmarks:
@@ -146,37 +146,57 @@
[
{
"command": "up",
- "date": "1970-01-01 00:00 +0000",
+ "date": [5.0, 0],
"name": ".",
- "newhashes": "1e6c11564562",
- "oldhashes": "cb9a9f314b8b",
+ "newhashes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+ "oldhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
"user": "foobar"
},
{
"command": "up 0",
- "date": "1970-01-01 00:00 +0000",
+ "date": [2.0, 0],
"name": ".",
- "newhashes": "cb9a9f314b8b",
- "oldhashes": "1e6c11564562",
+ "newhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+ "oldhashes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
"user": "foobar"
},
{
"command": "commit -Aqm b",
- "date": "1970-01-01 00:00 +0000",
+ "date": [1.0, 0],
"name": ".",
- "newhashes": "1e6c11564562",
- "oldhashes": "cb9a9f314b8b",
+ "newhashes": ["1e6c11564562b4ed919baca798bc4338bd299d6a"],
+ "oldhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
"user": "foobar"
},
{
"command": "commit -Aqm a",
- "date": "1970-01-01 00:00 +0000",
+ "date": [0.0, 0],
"name": ".",
- "newhashes": "cb9a9f314b8b",
- "oldhashes": "000000000000",
+ "newhashes": ["cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b"],
+ "oldhashes": ["0000000000000000000000000000000000000000"],
"user": "foobar"
}
]
+
+ $ cat <<EOF >> $HGRCPATH
+ > [templates]
+ > j = "{oldhashes % '{node|upper}'} -> {newhashes % '{node|upper}'}
+ > - user: {user}
+ > - command: {command}
+ > - date: {date|rfc3339date}
+ > - newhashes: {newhashes}
+ > - oldhashes: {oldhashes}
+ > "
+ > EOF
+ $ hg journal -Tj -l1
+ previous locations of '.':
+ CB9A9F314B8B07BA71012FCDBC544B5A4D82FF5B -> 1E6C11564562B4ED919BACA798BC4338BD299D6A
+ - user: foobar
+ - command: up
+ - date: 1970-01-01T00:00:05+00:00
+ - newhashes: 1e6c11564562b4ed919baca798bc4338bd299d6a
+ - oldhashes: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
+
$ hg journal --commit
previous locations of '.':
1e6c11564562 up
--- a/tests/test-largefiles-update.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-largefiles-update.t Mon Aug 15 12:26:02 2016 -0400
@@ -611,7 +611,7 @@
> EOF
rebasing 1:72518492caa6 "#1"
rebasing 4:07d6153b5c04 "#4"
- local changed .hglf/large1 which remote deleted
+ local [dest] changed .hglf/large1 which other [source] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? c
$ hg diff -c "tip~1" --nodates .hglf/large1 | grep '^[+-][0-9a-z]'
--- a/tests/test-largefiles.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-largefiles.t Mon Aug 15 12:26:02 2016 -0400
@@ -19,10 +19,6 @@
> usercache=${USERCACHE}
> [hooks]
> precommit=sh -c "echo \\"Invoking status precommit hook\\"; hg status"
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
> EOF
Create the repo with a couple of revisions of both large and normal
--- a/tests/test-merge-changedelete.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-merge-changedelete.t Mon Aug 15 12:26:02 2016 -0400
@@ -54,9 +54,9 @@
Non-interactive merge:
$ hg merge -y
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging file3
warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -117,9 +117,9 @@
> c
> d
> EOF
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? c
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? d
merging file3
warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -181,18 +181,18 @@
> baz
> c
> EOF
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? foo
unrecognized response
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? bar
unrecognized response
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? d
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? baz
unrecognized response
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
merging file3
warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -250,9 +250,9 @@
$ hg merge --config ui.interactive=true <<EOF
> d
> EOF
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? d
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
merging file3
warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -445,9 +445,9 @@
1 other heads for branch "default"
$ hg merge --config ui.interactive=True --tool :prompt
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
no tool found to merge file3
keep (l)ocal, take (o)ther, or leave (u)nresolved?
@@ -501,9 +501,9 @@
1 other heads for branch "default"
$ hg merge --tool :prompt
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
no tool found to merge file3
keep (l)ocal, take (o)ther, or leave (u)nresolved? u
@@ -555,9 +555,9 @@
1 other heads for branch "default"
$ hg merge --tool :merge3
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging file3
warning: conflicts while merging file3! (edit, then use 'hg resolve --mark')
@@ -642,9 +642,9 @@
(status identical)
=== :other -> :prompt ===
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
no tool found to merge file3
keep (l)ocal, take (o)ther, or leave (u)nresolved?
@@ -671,9 +671,9 @@
(status identical)
=== :local -> :prompt ===
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
no tool found to merge file3
keep (l)ocal, take (o)ther, or leave (u)nresolved?
@@ -690,9 +690,9 @@
(status identical)
=== :fail -> :prompt ===
- local changed file1 which remote deleted
+ local changed file1 which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other changed file2 which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
no tool found to merge file3
keep (l)ocal, take (o)ther, or leave (u)nresolved?
@@ -717,9 +717,9 @@
$ echo changed >> file1
$ hg rm file2
$ hg update 1 -y
- local changed file1 which remote deleted
+ local [working copy] changed file1 which other [destination] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- remote changed file2 which local deleted
+ other [destination] changed file2 which local [working copy] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
1 files updated, 0 files merged, 0 files removed, 2 files unresolved
use 'hg resolve' to retry unresolved file merges
@@ -893,9 +893,9 @@
$ echo changed >> file1
$ hg rm file2
$ hg update 1 --config ui.interactive=True --tool :prompt
- local changed file1 which remote deleted
+ local [working copy] changed file1 which other [destination] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other [destination] changed file2 which local [working copy] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
1 files updated, 0 files merged, 0 files removed, 2 files unresolved
use 'hg resolve' to retry unresolved file merges
@@ -943,9 +943,9 @@
$ echo changed >> file1
$ hg rm file2
$ hg update 1 --tool :merge3
- local changed file1 which remote deleted
+ local [working copy] changed file1 which other [destination] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- remote changed file2 which local deleted
+ other [destination] changed file2 which local [working copy] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
1 files updated, 0 files merged, 0 files removed, 2 files unresolved
use 'hg resolve' to retry unresolved file merges
@@ -999,9 +999,9 @@
(status identical)
=== :other -> :prompt ===
- local changed file1 which remote deleted
+ local [working copy] changed file1 which other [destination] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other [destination] changed file2 which local [working copy] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
--- diff of status ---
(status identical)
@@ -1026,9 +1026,9 @@
(status identical)
=== :local -> :prompt ===
- local changed file1 which remote deleted
+ local [working copy] changed file1 which other [destination] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other [destination] changed file2 which local [working copy] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
--- diff of status ---
(status identical)
@@ -1043,9 +1043,9 @@
(status identical)
=== :fail -> :prompt ===
- local changed file1 which remote deleted
+ local [working copy] changed file1 which other [destination] deleted
use (c)hanged version, (d)elete, or leave (u)nresolved?
- remote changed file2 which local deleted
+ other [destination] changed file2 which local [working copy] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved?
--- diff of status ---
(status identical)
--- a/tests/test-merge-force.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-merge-force.t Mon Aug 15 12:26:02 2016 -0400
@@ -142,55 +142,55 @@
# in the same way, so it could potentially be left alone
$ hg merge -f --tool internal:merge3 'desc("remote")' 2>&1 | tee $TESTTMP/merge-output-1
- local changed content1_missing_content1_content4-tracked which remote deleted
+ local changed content1_missing_content1_content4-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- local changed content1_missing_content3_content3-tracked which remote deleted
+ local changed content1_missing_content3_content3-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- local changed content1_missing_content3_content4-tracked which remote deleted
+ local changed content1_missing_content3_content4-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- local changed content1_missing_missing_content4-tracked which remote deleted
+ local changed content1_missing_missing_content4-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- remote changed content1_content2_content1_content1-untracked which local deleted
+ other changed content1_content2_content1_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_content2-untracked which local deleted
+ other changed content1_content2_content1_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_content4-untracked which local deleted
+ other changed content1_content2_content1_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_missing-tracked which local deleted
+ other changed content1_content2_content1_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_missing-untracked which local deleted
+ other changed content1_content2_content1_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_content1-untracked which local deleted
+ other changed content1_content2_content2_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_content2-untracked which local deleted
+ other changed content1_content2_content2_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_content4-untracked which local deleted
+ other changed content1_content2_content2_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_missing-tracked which local deleted
+ other changed content1_content2_content2_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_missing-untracked which local deleted
+ other changed content1_content2_content2_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_content1-untracked which local deleted
+ other changed content1_content2_content3_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_content2-untracked which local deleted
+ other changed content1_content2_content3_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_content3-untracked which local deleted
+ other changed content1_content2_content3_content3-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_content4-untracked which local deleted
+ other changed content1_content2_content3_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_missing-tracked which local deleted
+ other changed content1_content2_content3_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_missing-untracked which local deleted
+ other changed content1_content2_content3_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_content1-untracked which local deleted
+ other changed content1_content2_missing_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_content2-untracked which local deleted
+ other changed content1_content2_missing_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_content4-untracked which local deleted
+ other changed content1_content2_missing_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_missing-tracked which local deleted
+ other changed content1_content2_missing_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_missing-untracked which local deleted
+ other changed content1_content2_missing_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content1_content4-tracked
merging content1_content2_content2_content1-tracked
@@ -703,63 +703,63 @@
(no more unresolved files)
$ hg resolve --unmark --all
$ hg resolve --all --tool internal:merge3
- remote changed content1_content2_content1_content1-untracked which local deleted
+ other changed content1_content2_content1_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_content2-untracked which local deleted
+ other changed content1_content2_content1_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content1_content4-tracked
- remote changed content1_content2_content1_content4-untracked which local deleted
+ other changed content1_content2_content1_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_missing-tracked which local deleted
+ other changed content1_content2_content1_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content1_missing-untracked which local deleted
+ other changed content1_content2_content1_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content2_content1-tracked
- remote changed content1_content2_content2_content1-untracked which local deleted
+ other changed content1_content2_content2_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_content2-untracked which local deleted
+ other changed content1_content2_content2_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content2_content4-tracked
- remote changed content1_content2_content2_content4-untracked which local deleted
+ other changed content1_content2_content2_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_missing-tracked which local deleted
+ other changed content1_content2_content2_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content2_missing-untracked which local deleted
+ other changed content1_content2_content2_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content3_content1-tracked
- remote changed content1_content2_content3_content1-untracked which local deleted
+ other changed content1_content2_content3_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_content2-untracked which local deleted
+ other changed content1_content2_content3_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content3_content3-tracked
- remote changed content1_content2_content3_content3-untracked which local deleted
+ other changed content1_content2_content3_content3-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_content3_content4-tracked
- remote changed content1_content2_content3_content4-untracked which local deleted
+ other changed content1_content2_content3_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_missing-tracked which local deleted
+ other changed content1_content2_content3_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_content3_missing-untracked which local deleted
+ other changed content1_content2_content3_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_missing_content1-tracked
- remote changed content1_content2_missing_content1-untracked which local deleted
+ other changed content1_content2_missing_content1-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_content2-untracked which local deleted
+ other changed content1_content2_missing_content2-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging content1_content2_missing_content4-tracked
- remote changed content1_content2_missing_content4-untracked which local deleted
+ other changed content1_content2_missing_content4-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_missing-tracked which local deleted
+ other changed content1_content2_missing_missing-tracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- remote changed content1_content2_missing_missing-untracked which local deleted
+ other changed content1_content2_missing_missing-untracked which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
- local changed content1_missing_content1_content4-tracked which remote deleted
+ local changed content1_missing_content1_content4-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- local changed content1_missing_content3_content3-tracked which remote deleted
+ local changed content1_missing_content3_content3-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- local changed content1_missing_content3_content4-tracked which remote deleted
+ local changed content1_missing_content3_content4-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
- local changed content1_missing_missing_content4-tracked which remote deleted
+ local changed content1_missing_missing_content4-tracked which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
merging missing_content2_content2_content4-tracked
merging missing_content2_content3_content3-tracked
--- a/tests/test-merge-remove.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-merge-remove.t Mon Aug 15 12:26:02 2016 -0400
@@ -102,7 +102,7 @@
Those who use force will lose
$ hg merge -f
- remote changed bar which local deleted
+ other changed bar which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
merging foo1 and foo to foo1
0 files updated, 1 files merged, 0 files removed, 1 files unresolved
--- a/tests/test-merge-types.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-merge-types.t Mon Aug 15 12:26:02 2016 -0400
@@ -173,7 +173,7 @@
(couldn't find merge tool hgmerge|tool hgmerge can't handle symlinks) (re)
picked tool ':prompt' for a (binary False symlink True changedelete False)
no tool found to merge a
- keep (l)ocal, take (o)ther, or leave (u)nresolved? u
+ keep (l)ocal [working copy], take (o)ther [destination], or leave (u)nresolved? u
0 files updated, 0 files merged, 0 files removed, 1 files unresolved
use 'hg resolve' to retry unresolved file merges
1 other heads for branch "default"
--- a/tests/test-obsolete-changeset-exchange.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-obsolete-changeset-exchange.t Mon Aug 15 12:26:02 2016 -0400
@@ -144,11 +144,11 @@
adding file changes
adding foo revisions
added 1 changesets with 1 changes to 1 files (+1 heads)
+ updating the branch cache
bundle2-input-part: total payload size 474
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-part: total payload size 58
bundle2-input-part: "listkeys" (params: 1 mandatory) supported
bundle2-input-bundle: 2 parts total
checking for updated bookmarks
- updating the branch cache
(run 'hg heads' to see heads, 'hg merge' to merge)
--- a/tests/test-obsolete.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-obsolete.t Mon Aug 15 12:26:02 2016 -0400
@@ -4,10 +4,6 @@
> publish=false
> [ui]
> logtemplate="{rev}:{node|short} ({phase}) [{tags} {bookmarks}] {desc|firstline}\n"
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
> EOF
$ mkcommit() {
> echo "$1" > "$1"
@@ -654,7 +650,7 @@
Test the debug output for exchange
----------------------------------
- $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' --config 'experimental.bundle2-exp=True'
+ $ hg pull ../tmpb --config 'experimental.obsmarkers-exchange-debug=True' # bundle2
pulling from ../tmpb
searching for changes
no changes found
--- a/tests/test-phases-exchange.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-phases-exchange.t Mon Aug 15 12:26:02 2016 -0400
@@ -1,12 +1,5 @@
#require killdaemons
- $ cat << EOF >> $HGRCPATH
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
- > EOF
-
$ hgph() { hg log -G --template "{rev} {phase} {desc} - {node|short}\n" $*; }
$ mkcommit() {
@@ -1048,7 +1041,7 @@
$ cat ../beta.pid >> $DAEMON_PIDS
$ cd ../gamma
- $ hg pull http://localhost:$HGPORT/ --config experimental.bundle2-exp=True
+ $ hg pull http://localhost:$HGPORT/ # bundle2+
pulling from http://localhost:$HGPORT/
searching for changes
no changes found
@@ -1057,7 +1050,7 @@
enforce bundle1
- $ hg pull http://localhost:$HGPORT/ --config experimental.bundle2-exp=False
+ $ hg pull http://localhost:$HGPORT/ --config devel.legacy.exchange=bundle1
pulling from http://localhost:$HGPORT/
searching for changes
no changes found
--- a/tests/test-pull-http.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-pull-http.t Mon Aug 15 12:26:02 2016 -0400
@@ -48,11 +48,11 @@
$ echo 'allowpull = false' >> .hg/hgrc
$ hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
$ cat hg.pid >> $DAEMON_PIDS
- $ hg clone http://localhost:$HGPORT/ test4 --config experimental.bundle2-exp=True
+ $ hg clone http://localhost:$HGPORT/ test4 # bundle2+
requesting all changes
abort: authorization failed
[255]
- $ hg clone http://localhost:$HGPORT/ test4 --config experimental.bundle2-exp=False
+ $ hg clone http://localhost:$HGPORT/ test4 --config devel.legacy.exchange=bundle1
abort: authorization failed
[255]
$ killdaemons.py
--- a/tests/test-push-hook-lock.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-push-hook-lock.t Mon Aug 15 12:26:02 2016 -0400
@@ -26,7 +26,7 @@
$ echo bar >> 3/foo
$ hg --cwd 3 ci -m bar
- $ hg --cwd 3 push ../2 --config experimental.bundle2-exp=False
+ $ hg --cwd 3 push ../2 --config devel.legacy.exchange=bundle1
pushing to ../2
searching for changes
adding changesets
@@ -38,7 +38,7 @@
$ hg --cwd 1 --config extensions.strip= strip tip -q
$ hg --cwd 2 --config extensions.strip= strip tip -q
- $ hg --cwd 3 push ../2 --config experimental.bundle2-exp=True
+ $ hg --cwd 3 push ../2 # bundle2+
pushing to ../2
searching for changes
adding changesets
--- a/tests/test-push-http-bundle1.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-push-http-bundle1.t Mon Aug 15 12:26:02 2016 -0400
@@ -5,9 +5,9 @@
which does not need to exist to keep bundle1 working.
$ cat << EOF >> $HGRCPATH
- > [experimental]
+ > [devel]
> # This test is dedicated to interaction through old bundle
- > bundle2-exp = False
+ > legacy.exchange = bundle1
> EOF
$ hg init test
--- a/tests/test-push-warn.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-push-warn.t Mon Aug 15 12:26:02 2016 -0400
@@ -1,9 +1,3 @@
- $ cat << EOF >> $HGRCPATH
- > [experimental]
- > # drop me once bundle2 is the default,
- > # added to get test change early.
- > bundle2-exp = True
- > EOF
$ hg init a
$ cd a
$ echo foo > t1
--- a/tests/test-rebase-conflicts.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-rebase-conflicts.t Mon Aug 15 12:26:02 2016 -0400
@@ -304,7 +304,6 @@
invalid branchheads cache (served): tip differs
history modification detected - truncating revision branch cache to revision 9
rebase completed
- updating the branch cache
truncating cache/rbc-revs-v1 to 72
Test minimization of merge conflicts
--- a/tests/test-rebase-newancestor.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-rebase-newancestor.t Mon Aug 15 12:26:02 2016 -0400
@@ -135,7 +135,7 @@
note: rebase of 1:1d1a643d390e created no changes to commit
rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
rebasing 4:4b019212aaf6 "dev: merge default"
- remote changed f-default which local deleted
+ other [source] changed f-default which local [dest] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
rebasing 6:9455ee510502 "dev: merge default"
saved backup bundle to $TESTTMP/ancestor-merge/.hg/strip-backup/1d1a643d390e-43e9e04b-backup.hg (glob)
@@ -164,7 +164,7 @@
> EOF
rebasing 2:ec2c14fb2984 "dev: f-dev stuff"
rebasing 4:4b019212aaf6 "dev: merge default"
- remote changed f-default which local deleted
+ other [source] changed f-default which local [dest] deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? c
rebasing 6:9455ee510502 "dev: merge default"
saved backup bundle to $TESTTMP/ancestor-merge-2/.hg/strip-backup/ec2c14fb2984-62d0b222-backup.hg (glob)
--- a/tests/test-rename-merge2.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-rename-merge2.t Mon Aug 15 12:26:02 2016 -0400
@@ -694,7 +694,7 @@
starting 4 threads for background file closing (?)
a: prompt deleted/changed -> m (premerge)
picked tool ':prompt' for a (binary False symlink False changedelete True)
- remote changed a which local deleted
+ other changed a which local deleted
use (c)hanged version, leave (d)eleted, or leave (u)nresolved? u
b: both created -> m (premerge)
picked tool 'python ../merge' for b (binary False symlink False changedelete False)
@@ -739,7 +739,7 @@
starting 4 threads for background file closing (?)
a: prompt changed/deleted -> m (premerge)
picked tool ':prompt' for a (binary False symlink False changedelete True)
- local changed a which remote deleted
+ local changed a which other deleted
use (c)hanged version, (d)elete, or leave (u)nresolved? u
b: both created -> m (premerge)
picked tool 'python ../merge' for b (binary False symlink False changedelete False)
--- a/tests/test-revset.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-revset.t Mon Aug 15 12:26:02 2016 -0400
@@ -497,6 +497,158 @@
8
9
+infix/suffix resolution of ^ operator (issue2884):
+
+ x^:y means (x^):y
+
+ $ try '1^:2'
+ (range
+ (parentpost
+ ('symbol', '1'))
+ ('symbol', '2'))
+ * set:
+ <spanset+ 0:2>
+ 0
+ 1
+ 2
+
+ $ try '1^::2'
+ (dagrange
+ (parentpost
+ ('symbol', '1'))
+ ('symbol', '2'))
+ * set:
+ <baseset+ [0, 1, 2]>
+ 0
+ 1
+ 2
+
+ $ try '9^:'
+ (rangepost
+ (parentpost
+ ('symbol', '9')))
+ * set:
+ <spanset+ 8:9>
+ 8
+ 9
+
+ x^:y should be resolved before omitting group operators
+
+ $ try '1^(:2)'
+ (parent
+ ('symbol', '1')
+ (group
+ (rangepre
+ ('symbol', '2'))))
+ hg: parse error: ^ expects a number 0, 1, or 2
+ [255]
+
+ x^:y should be resolved recursively
+
+ $ try 'sort(1^:2)'
+ (func
+ ('symbol', 'sort')
+ (range
+ (parentpost
+ ('symbol', '1'))
+ ('symbol', '2')))
+ * set:
+ <spanset+ 0:2>
+ 0
+ 1
+ 2
+
+ $ try '(3^:4)^:2'
+ (range
+ (parentpost
+ (group
+ (range
+ (parentpost
+ ('symbol', '3'))
+ ('symbol', '4'))))
+ ('symbol', '2'))
+ * set:
+ <spanset+ 0:2>
+ 0
+ 1
+ 2
+
+ $ try '(3^::4)^::2'
+ (dagrange
+ (parentpost
+ (group
+ (dagrange
+ (parentpost
+ ('symbol', '3'))
+ ('symbol', '4'))))
+ ('symbol', '2'))
+ * set:
+ <baseset+ [0, 1, 2]>
+ 0
+ 1
+ 2
+
+ $ try '(9^:)^:'
+ (rangepost
+ (parentpost
+ (group
+ (rangepost
+ (parentpost
+ ('symbol', '9'))))))
+ * set:
+ <spanset+ 4:9>
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+
+ x^ in alias should also be resolved
+
+ $ try 'A' --config 'revsetalias.A=1^:2'
+ ('symbol', 'A')
+ * expanded:
+ (range
+ (parentpost
+ ('symbol', '1'))
+ ('symbol', '2'))
+ * set:
+ <spanset+ 0:2>
+ 0
+ 1
+ 2
+
+ $ try 'A:2' --config 'revsetalias.A=1^'
+ (range
+ ('symbol', 'A')
+ ('symbol', '2'))
+ * expanded:
+ (range
+ (parentpost
+ ('symbol', '1'))
+ ('symbol', '2'))
+ * set:
+ <spanset+ 0:2>
+ 0
+ 1
+ 2
+
+ but not beyond the boundary of alias expansion, because the resolution should
+ be made at the parsing stage
+
+ $ try '1^A' --config 'revsetalias.A=:2'
+ (parent
+ ('symbol', '1')
+ ('symbol', 'A'))
+ * expanded:
+ (parent
+ ('symbol', '1')
+ (rangepre
+ ('symbol', '2')))
+ hg: parse error: ^ expects a number 0, 1, or 2
+ [255]
+
ancestor can accept 0 or more arguments
$ log 'ancestor()'
--- a/tests/test-ssh-bundle1.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-ssh-bundle1.t Mon Aug 15 12:26:02 2016 -0400
@@ -2,9 +2,9 @@
parts that are not bundle1/bundle2 specific.
$ cat << EOF >> $HGRCPATH
- > [experimental]
+ > [devel]
> # This test is dedicated to interaction through old bundle
- > bundle2-exp = False
+ > legacy.exchange = bundle1
> [format] # temporary settings
> usegeneraldelta=yes
> EOF
--- a/tests/test-subrepo-missing.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-subrepo-missing.t Mon Aug 15 12:26:02 2016 -0400
@@ -62,7 +62,7 @@
2 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ rm .hgsubstate
$ hg up 0
- remote changed .hgsubstate which local deleted
+ other [destination] changed .hgsubstate which local [working copy] deleted
use (c)hanged version or leave (d)eleted? c
1 files updated, 0 files merged, 0 files removed, 0 files unresolved
$ hg st
--- a/tests/test-subrepo.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-subrepo.t Mon Aug 15 12:26:02 2016 -0400
@@ -1529,7 +1529,7 @@
> [paths]
> default=../issue3781-dest/
> EOF
- $ hg push --config experimental.bundle2-exp=False
+ $ hg push --config devel.legacy.exchange=bundle1
pushing to $TESTTMP/issue3781-dest (glob)
pushing subrepo s to $TESTTMP/issue3781-dest/s
searching for changes
@@ -1539,7 +1539,7 @@
[1]
# clean the push cache
$ rm s/.hg/cache/storehash/*
- $ hg push --config experimental.bundle2-exp=True
+ $ hg push # bundle2+
pushing to $TESTTMP/issue3781-dest (glob)
pushing subrepo s to $TESTTMP/issue3781-dest/s
searching for changes
--- a/tests/test-tags.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-tags.t Mon Aug 15 12:26:02 2016 -0400
@@ -645,10 +645,6 @@
$ hg init tagsserver
$ cd tagsserver
- $ cat > .hg/hgrc << EOF
- > [experimental]
- > bundle2-exp=True
- > EOF
$ touch foo
$ hg -q commit -A -m initial
$ hg tag -m 'tag 0.1' 0.1
@@ -663,7 +659,7 @@
Cloning should pull down hgtags fnodes mappings and write the cache file
- $ hg --config experimental.bundle2-exp=True clone --pull tagsserver tagsclient
+ $ hg clone --pull tagsserver tagsclient
requesting all changes
adding changesets
adding manifests
--- a/tests/test-treemanifest.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-treemanifest.t Mon Aug 15 12:26:02 2016 -0400
@@ -62,6 +62,19 @@
dir1/dir2/b (glob)
e
+The manifest command works
+
+ $ hg manifest
+ a
+ b
+ dir1/a
+ dir1/b
+ dir1/dir1/a
+ dir1/dir1/b
+ dir1/dir2/a
+ dir1/dir2/b
+ e
+
Revision is not created for unchanged directory
$ mkdir dir2
@@ -313,6 +326,25 @@
rev offset length delta linkrev nodeid p1 p2
0 0 127 -1 4 064927a0648a 000000000000 000000000000
1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
+ $ hg incoming .hg/strip-backup/*
+ comparing with .hg/strip-backup/*-backup.hg (glob)
+ searching for changes
+ changeset: 6:51cfd7b1e13b
+ tag: tip
+ user: test
+ date: Thu Jan 01 00:00:00 1970 +0000
+ summary: modify dir1/a
+
+ $ hg pull .hg/strip-backup/*
+ pulling from .hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg
+ searching for changes
+ adding changesets
+ adding manifests
+ adding file changes
+ added 1 changesets with 1 changes to 1 files
+ (run 'hg update' to get a working copy)
+ $ hg --config extensions.strip= strip tip
+ saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/*-backup.hg (glob)
$ hg unbundle -q .hg/strip-backup/*
$ hg debugindex --dir dir1
rev offset length delta linkrev nodeid p1 p2
--- a/tests/test-unbundlehash.t Sun Aug 07 14:58:49 2016 +0900
+++ b/tests/test-unbundlehash.t Mon Aug 15 12:26:02 2016 -0400
@@ -3,11 +3,11 @@
Test wire protocol unbundle with hashed heads (capability: unbundlehash)
$ cat << EOF >> $HGRCPATH
- > [experimental]
+ > [devel]
> # This tests is intended for bundle1 only.
> # bundle2 carries the head information inside the bundle itself and
> # always uses 'force' as the heads value.
- > bundle2-exp = False
+ > legacy.exchange = bundle1
> EOF
Create a remote repository.