Mercurial > public > mercurial-scm > hg
changeset 36690:b529e640015d
merge with stable
author | Augie Fackler <augie@google.com> |
---|---|
date | Sun, 04 Mar 2018 10:42:51 -0500 |
parents | 2a258985ffeb (diff) b394778b1a50 (current diff) |
children | 1b179d151578 |
files | mercurial/context.py mercurial/subrepo.py mercurial/subrepoutil.py tests/test-annotate.t |
diffstat | 420 files changed, 25082 insertions(+), 7527 deletions(-) [+] |
line wrap: on
line diff
--- a/.clang-format Sat Mar 03 22:29:24 2018 -0500 +++ b/.clang-format Sun Mar 04 10:42:51 2018 -0500 @@ -6,3 +6,8 @@ IndentCaseLabels: false AllowShortBlocksOnASingleLine: false AllowShortFunctionsOnASingleLine: false +IncludeCategories: + - Regex: '^<' + Priority: 1 + - Regex: '^"' + Priority: 2
--- a/Makefile Sat Mar 03 22:29:24 2018 -0500 +++ b/Makefile Sun Mar 04 10:42:51 2018 -0500 @@ -234,18 +234,6 @@ docker-ubuntu-xenial-ppa: contrib/docker/ubuntu-xenial contrib/dockerdeb ubuntu xenial --source-only -docker-ubuntu-yakkety: contrib/docker/ubuntu-yakkety - contrib/dockerdeb ubuntu yakkety - -docker-ubuntu-yakkety-ppa: contrib/docker/ubuntu-yakkety - contrib/dockerdeb ubuntu yakkety --source-only - -docker-ubuntu-zesty: contrib/docker/ubuntu-zesty - contrib/dockerdeb ubuntu zesty - -docker-ubuntu-zesty-ppa: contrib/docker/ubuntu-zesty - contrib/dockerdeb ubuntu zesty --source-only - docker-ubuntu-artful: contrib/docker/ubuntu-artful contrib/dockerdeb ubuntu artful @@ -318,8 +306,6 @@ osx deb ppa docker-debian-jessie docker-debian-stretch \ docker-ubuntu-trusty docker-ubuntu-trusty-ppa \ docker-ubuntu-xenial docker-ubuntu-xenial-ppa \ - docker-ubuntu-yakkety docker-ubuntu-yakkety-ppa \ - docker-ubuntu-zesty docker-ubuntu-zesty-ppa \ docker-ubuntu-artful docker-ubuntu-artful-ppa \ fedora20 docker-fedora20 fedora21 docker-fedora21 \ centos5 docker-centos5 centos6 docker-centos6 centos7 docker-centos7 \
--- a/contrib/Makefile.python Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/Makefile.python Sun Mar 04 10:42:51 2018 -0500 @@ -1,4 +1,4 @@ -PYTHONVER=2.7.10 +PYTHONVER=2.7.14 PYTHONNAME=python- PREFIX=$(HOME)/bin/prefix-$(PYTHONNAME)$(PYTHONVER) SYMLINKDIR=$(HOME)/bin
--- a/contrib/buildrpm Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/buildrpm Sun Mar 04 10:42:51 2018 -0500 @@ -20,8 +20,8 @@ ;; --withpython | --with-python) shift - PYTHONVER=2.7.10 - PYTHONMD5=d7547558fd673bd9d38e2108c6b42521 + PYTHONVER=2.7.14 + PYTHONMD5=cee2e4b33ad3750da77b2e85f2f8b724 ;; --rpmbuilddir ) shift
--- a/contrib/check-code.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/check-code.py Sun Mar 04 10:42:51 2018 -0500 @@ -150,6 +150,7 @@ (r'grep.* -[ABC]', "don't use grep's context flags"), (r'find.*-printf', "don't use 'find -printf', it doesn't exist on BSD find(1)"), + (r'\$RANDOM ', "don't use bash-only $RANDOM to generate random values"), ], # warnings [
--- a/contrib/check-config.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/check-config.py Sun Mar 04 10:42:51 2018 -0500 @@ -15,7 +15,7 @@ documented = {} allowinconsistent = set() -configre = re.compile(r''' +configre = re.compile(br''' # Function call ui\.config(?P<ctype>|int|bool|list)\( # First argument. @@ -25,7 +25,7 @@ (?:default=)?(?P<default>\S+?))? \)''', re.VERBOSE | re.MULTILINE) -configwithre = re.compile(''' +configwithre = re.compile(b''' ui\.config(?P<ctype>with)\( # First argument is callback function. This doesn't parse robustly # if it is e.g. a function call. @@ -35,57 +35,57 @@ (?:default=)?(?P<default>\S+?))? \)''', re.VERBOSE | re.MULTILINE) -configpartialre = (r"""ui\.config""") +configpartialre = (br"""ui\.config""") -ignorere = re.compile(r''' +ignorere = re.compile(br''' \#\s(?P<reason>internal|experimental|deprecated|developer|inconsistent)\s config:\s(?P<config>\S+\.\S+)$ ''', re.VERBOSE | re.MULTILINE) def main(args): for f in args: - sect = '' - prevname = '' - confsect = '' - carryover = '' + sect = b'' + prevname = b'' + confsect = b'' + carryover = b'' linenum = 0 - for l in open(f): + for l in open(f, 'rb'): linenum += 1 # check topic-like bits - m = re.match('\s*``(\S+)``', l) + m = re.match(b'\s*``(\S+)``', l) if m: prevname = m.group(1) - if re.match('^\s*-+$', l): + if re.match(b'^\s*-+$', l): sect = prevname - prevname = '' + prevname = b'' if sect and prevname: - name = sect + '.' + prevname + name = sect + b'.' + prevname documented[name] = 1 # check docstring bits - m = re.match(r'^\s+\[(\S+)\]', l) + m = re.match(br'^\s+\[(\S+)\]', l) if m: confsect = m.group(1) continue - m = re.match(r'^\s+(?:#\s*)?(\S+) = ', l) + m = re.match(br'^\s+(?:#\s*)?(\S+) = ', l) if m: - name = confsect + '.' + m.group(1) + name = confsect + b'.' + m.group(1) documented[name] = 1 # like the bugzilla extension - m = re.match(r'^\s*(\S+\.\S+)$', l) + m = re.match(br'^\s*(\S+\.\S+)$', l) if m: documented[m.group(1)] = 1 # like convert - m = re.match(r'^\s*:(\S+\.\S+):\s+', l) + m = re.match(br'^\s*:(\S+\.\S+):\s+', l) if m: documented[m.group(1)] = 1 # quoted in help or docstrings - m = re.match(r'.*?``(\S+\.\S+)``', l) + m = re.match(br'.*?``(\S+\.\S+)``', l) if m: documented[m.group(1)] = 1 @@ -108,7 +108,7 @@ default = m.group('default') if default in (None, 'False', 'None', '0', '[]', '""', "''"): default = '' - if re.match('[a-z.]+$', default): + if re.match(b'[a-z.]+$', default): default = '<variable>' if (name in foundopts and (ctype, default) != foundopts[name] and name not in allowinconsistent):
--- a/contrib/chg/chg.c Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/chg/chg.c Sun Mar 04 10:42:51 2018 -0500 @@ -38,11 +38,13 @@ const char **args; }; -static void initcmdserveropts(struct cmdserveropts *opts) { +static void initcmdserveropts(struct cmdserveropts *opts) +{ memset(opts, 0, sizeof(struct cmdserveropts)); } -static void freecmdserveropts(struct cmdserveropts *opts) { +static void freecmdserveropts(struct cmdserveropts *opts) +{ free(opts->args); opts->args = NULL; opts->argsize = 0; @@ -59,12 +61,8 @@ const char *name; size_t narg; } flags[] = { - {"--config", 1}, - {"--cwd", 1}, - {"--repo", 1}, - {"--repository", 1}, - {"--traceback", 0}, - {"-R", 1}, + {"--config", 1}, {"--cwd", 1}, {"--repo", 1}, + {"--repository", 1}, {"--traceback", 0}, {"-R", 1}, }; size_t i; for (i = 0; i < sizeof(flags) / sizeof(flags[0]); ++i) { @@ -89,21 +87,21 @@ /* * Parse argv[] and put sensitive flags to opts->args */ -static void setcmdserverargs(struct cmdserveropts *opts, - int argc, const char *argv[]) +static void setcmdserverargs(struct cmdserveropts *opts, int argc, + const char *argv[]) { size_t i, step; opts->argsize = 0; for (i = 0, step = 1; i < (size_t)argc; i += step, step = 1) { if (!argv[i]) - continue; /* pass clang-analyse */ + continue; /* pass clang-analyse */ if (strcmp(argv[i], "--") == 0) break; size_t n = testsensitiveflag(argv[i]); if (n == 0 || i + n > (size_t)argc) continue; - opts->args = reallocx(opts->args, - (n + opts->argsize) * sizeof(char *)); + opts->args = + reallocx(opts->args, (n + opts->argsize) * sizeof(char *)); memcpy(opts->args + opts->argsize, argv + i, sizeof(char *) * n); opts->argsize += n; @@ -180,8 +178,8 @@ r = snprintf(opts->sockname, sizeof(opts->sockname), sockfmt, basename); if (r < 0 || (size_t)r >= sizeof(opts->sockname)) abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r); - r = snprintf(opts->initsockname, sizeof(opts->initsockname), - "%s.%u", opts->sockname, (unsigned)getpid()); + r = snprintf(opts->initsockname, sizeof(opts->initsockname), "%s.%u", + opts->sockname, (unsigned)getpid()); if (r < 0 || (size_t)r >= sizeof(opts->initsockname)) abortmsg("too long TMPDIR or CHGSOCKNAME (r = %d)", r); } @@ -208,11 +206,14 @@ const char *hgcmd = gethgcmd(); const char *baseargv[] = { - hgcmd, - "serve", - "--cmdserver", "chgunix", - "--address", opts->initsockname, - "--daemon-postexec", "chdir:/", + hgcmd, + "serve", + "--cmdserver", + "chgunix", + "--address", + opts->initsockname, + "--daemon-postexec", + "chdir:/", }; size_t baseargvsize = sizeof(baseargv) / sizeof(baseargv[0]); size_t argsize = baseargvsize + opts->argsize + 1; @@ -237,7 +238,7 @@ debugmsg("try connect to %s repeatedly", opts->initsockname); - unsigned int timeoutsec = 60; /* default: 60 seconds */ + unsigned int timeoutsec = 60; /* default: 60 seconds */ const char *timeoutenv = getenv("CHGTIMEOUT"); if (timeoutenv) sscanf(timeoutenv, "%u", &timeoutsec); @@ -246,7 +247,7 @@ hgclient_t *hgc = hgc_open(opts->initsockname); if (hgc) { debugmsg("rename %s to %s", opts->initsockname, - opts->sockname); + opts->sockname); int r = rename(opts->initsockname, opts->sockname); if (r != 0) abortmsgerrno("cannot rename"); @@ -270,7 +271,7 @@ if (WIFEXITED(pst)) { if (WEXITSTATUS(pst) == 0) abortmsg("could not connect to cmdserver " - "(exited with status 0)"); + "(exited with status 0)"); debugmsg("cmdserver exited with status %d", WEXITSTATUS(pst)); exit(WEXITSTATUS(pst)); } else if (WIFSIGNALED(pst)) { @@ -284,8 +285,8 @@ /* Connect to a cmdserver. Will start a new server on demand. */ static hgclient_t *connectcmdserver(struct cmdserveropts *opts) { - const char *sockname = opts->redirectsockname[0] ? - opts->redirectsockname : opts->sockname; + const char *sockname = + opts->redirectsockname[0] ? opts->redirectsockname : opts->sockname; debugmsg("try connect to %s", sockname); hgclient_t *hgc = hgc_open(sockname); if (hgc) @@ -339,8 +340,8 @@ unlink(*pinst + 7); } else if (strncmp(*pinst, "redirect ", 9) == 0) { int r = snprintf(opts->redirectsockname, - sizeof(opts->redirectsockname), - "%s", *pinst + 9); + sizeof(opts->redirectsockname), "%s", + *pinst + 9); if (r < 0 || r >= (int)sizeof(opts->redirectsockname)) abortmsg("redirect path is too long (%d)", r); needreconnect = 1; @@ -365,10 +366,9 @@ */ static int isunsupported(int argc, const char *argv[]) { - enum { - SERVE = 1, - DAEMON = 2, - SERVEDAEMON = SERVE | DAEMON, + enum { SERVE = 1, + DAEMON = 2, + SERVEDAEMON = SERVE | DAEMON, }; unsigned int state = 0; int i; @@ -378,7 +378,7 @@ if (i == 0 && strcmp("serve", argv[i]) == 0) state |= SERVE; else if (strcmp("-d", argv[i]) == 0 || - strcmp("--daemon", argv[i]) == 0) + strcmp("--daemon", argv[i]) == 0) state |= DAEMON; } return (state & SERVEDAEMON) == SERVEDAEMON; @@ -401,9 +401,9 @@ if (getenv("CHGINTERNALMARK")) abortmsg("chg started by chg detected.\n" - "Please make sure ${HG:-hg} is not a symlink or " - "wrapper to chg. Alternatively, set $CHGHG to the " - "path of real hg."); + "Please make sure ${HG:-hg} is not a symlink or " + "wrapper to chg. Alternatively, set $CHGHG to the " + "path of real hg."); if (isunsupported(argc - 1, argv + 1)) execoriginalhg(argv); @@ -435,11 +435,11 @@ hgc_close(hgc); if (++retry > 10) abortmsg("too many redirections.\n" - "Please make sure %s is not a wrapper which " - "changes sensitive environment variables " - "before executing hg. If you have to use a " - "wrapper, wrap chg instead of hg.", - gethgcmd()); + "Please make sure %s is not a wrapper which " + "changes sensitive environment variables " + "before executing hg. If you have to use a " + "wrapper, wrap chg instead of hg.", + gethgcmd()); } setupsignalhandler(hgc_peerpid(hgc), hgc_peerpgid(hgc));
--- a/contrib/chg/hgclient.c Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/chg/hgclient.c Sun Mar 04 10:42:51 2018 -0500 @@ -7,7 +7,7 @@ * GNU General Public License version 2 or any later version. */ -#include <arpa/inet.h> /* for ntohl(), htonl() */ +#include <arpa/inet.h> /* for ntohl(), htonl() */ #include <assert.h> #include <ctype.h> #include <errno.h> @@ -26,16 +26,15 @@ #include "procutil.h" #include "util.h" -enum { - CAP_GETENCODING = 0x0001, - CAP_RUNCOMMAND = 0x0002, - /* cHg extension: */ - CAP_ATTACHIO = 0x0100, - CAP_CHDIR = 0x0200, - CAP_SETENV = 0x0800, - CAP_SETUMASK = 0x1000, - CAP_VALIDATE = 0x2000, - CAP_SETPROCNAME = 0x4000, +enum { CAP_GETENCODING = 0x0001, + CAP_RUNCOMMAND = 0x0002, + /* cHg extension: */ + CAP_ATTACHIO = 0x0100, + CAP_CHDIR = 0x0200, + CAP_SETENV = 0x0800, + CAP_SETUMASK = 0x1000, + CAP_VALIDATE = 0x2000, + CAP_SETPROCNAME = 0x4000, }; typedef struct { @@ -44,15 +43,15 @@ } cappair_t; static const cappair_t captable[] = { - {"getencoding", CAP_GETENCODING}, - {"runcommand", CAP_RUNCOMMAND}, - {"attachio", CAP_ATTACHIO}, - {"chdir", CAP_CHDIR}, - {"setenv", CAP_SETENV}, - {"setumask", CAP_SETUMASK}, - {"validate", CAP_VALIDATE}, - {"setprocname", CAP_SETPROCNAME}, - {NULL, 0}, /* terminator */ + {"getencoding", CAP_GETENCODING}, + {"runcommand", CAP_RUNCOMMAND}, + {"attachio", CAP_ATTACHIO}, + {"chdir", CAP_CHDIR}, + {"setenv", CAP_SETENV}, + {"setumask", CAP_SETUMASK}, + {"validate", CAP_VALIDATE}, + {"setprocname", CAP_SETPROCNAME}, + {NULL, 0}, /* terminator */ }; typedef struct { @@ -88,8 +87,8 @@ if (newsize <= ctx->maxdatasize) return; - newsize = defaultdatasize - * ((newsize + defaultdatasize - 1) / defaultdatasize); + newsize = defaultdatasize * + ((newsize + defaultdatasize - 1) / defaultdatasize); ctx->data = reallocx(ctx->data, newsize); ctx->maxdatasize = newsize; debugmsg("enlarge context buffer to %zu", ctx->maxdatasize); @@ -126,12 +125,12 @@ enlargecontext(&hgc->ctx, hgc->ctx.datasize); if (isupper(hgc->ctx.ch) && hgc->ctx.ch != 'S') - return; /* assumes input request */ + return; /* assumes input request */ size_t cursize = 0; while (cursize < hgc->ctx.datasize) { rsize = recv(hgc->sockfd, hgc->ctx.data + cursize, - hgc->ctx.datasize - cursize, 0); + hgc->ctx.datasize - cursize, 0); if (rsize < 1) abortmsg("failed to read data block"); cursize += rsize; @@ -176,19 +175,19 @@ /* Build '\0'-separated list of args. argsize < 0 denotes that args are * terminated by NULL. */ static void packcmdargs(context_t *ctx, const char *const args[], - ssize_t argsize) + ssize_t argsize) { ctx->datasize = 0; const char *const *const end = (argsize >= 0) ? args + argsize : NULL; for (const char *const *it = args; it != end && *it; ++it) { - const size_t n = strlen(*it) + 1; /* include '\0' */ + const size_t n = strlen(*it) + 1; /* include '\0' */ enlargecontext(ctx, ctx->datasize + n); memcpy(ctx->data + ctx->datasize, *it, n); ctx->datasize += n; } if (ctx->datasize > 0) - --ctx->datasize; /* strip last '\0' */ + --ctx->datasize; /* strip last '\0' */ } /* Extract '\0'-separated list of args to new buffer, terminated by NULL */ @@ -199,7 +198,7 @@ const char *s = ctx->data; const char *e = ctx->data + ctx->datasize; for (;;) { - if (nargs + 1 >= maxnargs) { /* including last NULL */ + if (nargs + 1 >= maxnargs) { /* including last NULL */ maxnargs += 256; args = reallocx(args, maxnargs * sizeof(args[0])); } @@ -237,7 +236,7 @@ { context_t *ctx = &hgc->ctx; enlargecontext(ctx, ctx->datasize + 1); - ctx->data[ctx->datasize] = '\0'; /* terminate last string */ + ctx->data[ctx->datasize] = '\0'; /* terminate last string */ const char **args = unpackcmdargsnul(ctx); if (!args[0] || !args[1] || !args[2]) @@ -269,8 +268,8 @@ for (;;) { readchannel(hgc); context_t *ctx = &hgc->ctx; - debugmsg("response read from channel %c, size %zu", - ctx->ch, ctx->datasize); + debugmsg("response read from channel %c, size %zu", ctx->ch, + ctx->datasize); switch (ctx->ch) { case 'o': fwrite(ctx->data, sizeof(ctx->data[0]), ctx->datasize, @@ -299,7 +298,7 @@ default: if (isupper(ctx->ch)) abortmsg("cannot handle response (ch = %c)", - ctx->ch); + ctx->ch); } } } @@ -366,8 +365,8 @@ static void updateprocname(hgclient_t *hgc) { - int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize, - "chg[worker/%d]", (int)getpid()); + int r = snprintf(hgc->ctx.data, hgc->ctx.maxdatasize, "chg[worker/%d]", + (int)getpid()); if (r < 0 || (size_t)r >= hgc->ctx.maxdatasize) abortmsg("insufficient buffer to write procname (r = %d)", r); hgc->ctx.datasize = (size_t)r; @@ -387,7 +386,7 @@ static const int fds[3] = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}; struct msghdr msgh; memset(&msgh, 0, sizeof(msgh)); - struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */ + struct iovec iov = {ctx->data, ctx->datasize}; /* dummy payload */ msgh.msg_iov = &iov; msgh.msg_iovlen = 1; char fdbuf[CMSG_SPACE(sizeof(fds))]; @@ -552,7 +551,7 @@ * the last string is guaranteed to be NULL. */ const char **hgc_validate(hgclient_t *hgc, const char *const args[], - size_t argsize) + size_t argsize) { assert(hgc); if (!(hgc->capflags & CAP_VALIDATE))
--- a/contrib/chg/hgclient.h Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/chg/hgclient.h Sun Mar 04 10:42:51 2018 -0500 @@ -22,9 +22,9 @@ pid_t hgc_peerpid(const hgclient_t *hgc); const char **hgc_validate(hgclient_t *hgc, const char *const args[], - size_t argsize); + size_t argsize); int hgc_runcommand(hgclient_t *hgc, const char *const args[], size_t argsize); void hgc_attachio(hgclient_t *hgc); void hgc_setenv(hgclient_t *hgc, const char *const envp[]); -#endif /* HGCLIENT_H_ */ +#endif /* HGCLIENT_H_ */
--- a/contrib/chg/procutil.c Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/chg/procutil.c Sun Mar 04 10:42:51 2018 -0500 @@ -54,7 +54,7 @@ goto error; forwardsignal(sig); - if (raise(sig) < 0) /* resend to self */ + if (raise(sig) < 0) /* resend to self */ goto error; if (sigaction(sig, &sa, &oldsa) < 0) goto error; @@ -205,8 +205,8 @@ close(pipefds[0]); close(pipefds[1]); - int r = execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL, - envp); + int r = + execle("/bin/sh", "/bin/sh", "-c", pagercmd, NULL, envp); if (r < 0) { abortmsgerrno("cannot start pager '%s'", pagercmd); }
--- a/contrib/chg/util.c Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/chg/util.c Sun Mar 04 10:42:51 2018 -0500 @@ -62,7 +62,8 @@ static int debugmsgenabled = 0; static double debugstart = 0; -static double now() { +static double now() +{ struct timeval t; gettimeofday(&t, NULL); return t.tv_usec / 1e6 + t.tv_sec;
--- a/contrib/chg/util.h Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/chg/util.h Sun Mar 04 10:42:51 2018 -0500 @@ -32,4 +32,4 @@ int runshellcmd(const char *cmd, const char *envp[], const char *cwd); -#endif /* UTIL_H_ */ +#endif /* UTIL_H_ */
--- a/contrib/clang-format-blacklist Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/clang-format-blacklist Sun Mar 04 10:42:51 2018 -0500 @@ -1,23 +1,8 @@ # Files that just need to be migrated to the formatter. # Do not add new files here! -contrib/chg/chg.c -contrib/chg/hgclient.c -contrib/chg/hgclient.h -contrib/chg/procutil.c -contrib/chg/procutil.h -contrib/chg/util.c -contrib/chg/util.h -contrib/hgsh/hgsh.c -mercurial/cext/base85.c -mercurial/cext/bdiff.c -mercurial/cext/charencode.c -mercurial/cext/charencode.h -mercurial/cext/diffhelpers.c mercurial/cext/dirs.c mercurial/cext/manifest.c -mercurial/cext/mpatch.c mercurial/cext/osutil.c -mercurial/cext/pathencode.c mercurial/cext/revlog.c # Vendored code that we should never format: contrib/python-zstandard/c-ext/bufferutil.c @@ -67,3 +52,18 @@ contrib/python-zstandard/zstd/dictBuilder/zdict.h contrib/python-zstandard/zstd/zstd.h hgext/fsmonitor/pywatchman/bser.c +mercurial/thirdparty/xdiff/xdiff.h +mercurial/thirdparty/xdiff/xdiffi.c +mercurial/thirdparty/xdiff/xdiffi.h +mercurial/thirdparty/xdiff/xemit.c +mercurial/thirdparty/xdiff/xemit.h +mercurial/thirdparty/xdiff/xhistogram.c +mercurial/thirdparty/xdiff/xinclude.h +mercurial/thirdparty/xdiff/xmacros.h +mercurial/thirdparty/xdiff/xmerge.c +mercurial/thirdparty/xdiff/xpatience.c +mercurial/thirdparty/xdiff/xprepare.c +mercurial/thirdparty/xdiff/xprepare.h +mercurial/thirdparty/xdiff/xtypes.h +mercurial/thirdparty/xdiff/xutils.c +mercurial/thirdparty/xdiff/xutils.h
--- a/contrib/dirstatenonnormalcheck.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/dirstatenonnormalcheck.py Sun Mar 04 10:42:51 2018 -0500 @@ -17,7 +17,7 @@ """Compute nonnormal entries from dirstate's dmap""" res = set() for f, e in dmap.iteritems(): - if e[0] != 'n' or e[3] == -1: + if e[0] != b'n' or e[3] == -1: res.add(f) return res @@ -25,24 +25,25 @@ """Compute nonnormalset from dmap, check that it matches _nonnormalset""" nonnormalcomputedmap = nonnormalentries(dmap) if _nonnormalset != nonnormalcomputedmap: - ui.develwarn("%s call to %s\n" % (label, orig), config='dirstate') - ui.develwarn("inconsistency in nonnormalset\n", config='dirstate') - ui.develwarn("[nonnormalset] %s\n" % _nonnormalset, config='dirstate') - ui.develwarn("[map] %s\n" % nonnormalcomputedmap, config='dirstate') + ui.develwarn(b"%s call to %s\n" % (label, orig), config=b'dirstate') + ui.develwarn(b"inconsistency in nonnormalset\n", config=b'dirstate') + ui.develwarn(b"[nonnormalset] %s\n" % _nonnormalset, config=b'dirstate') + ui.develwarn(b"[map] %s\n" % nonnormalcomputedmap, config=b'dirstate') def _checkdirstate(orig, self, arg): """Check nonnormal set consistency before and after the call to orig""" checkconsistency(self._ui, orig, self._map, self._map.nonnormalset, - "before") + b"before") r = orig(self, arg) - checkconsistency(self._ui, orig, self._map, self._map.nonnormalset, "after") + checkconsistency(self._ui, orig, self._map, self._map.nonnormalset, + b"after") return r def extsetup(ui): """Wrap functions modifying dirstate to check nonnormalset consistency""" dirstatecl = dirstate.dirstate - devel = ui.configbool('devel', 'all-warnings') - paranoid = ui.configbool('experimental', 'nonnormalparanoidcheck') + devel = ui.configbool(b'devel', b'all-warnings') + paranoid = ui.configbool(b'experimental', b'nonnormalparanoidcheck') if devel: extensions.wrapfunction(dirstatecl, '_writedirstate', _checkdirstate) if paranoid:
--- a/contrib/dumprevlog Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/dumprevlog Sun Mar 04 10:42:51 2018 -0500 @@ -14,8 +14,12 @@ for fp in (sys.stdin, sys.stdout, sys.stderr): util.setbinary(fp) +def binopen(path, mode='rb'): + if 'b' not in mode: + mode = mode + 'b' + return open(path, mode) + for f in sys.argv[1:]: - binopen = lambda fn: open(fn, 'rb') r = revlog.revlog(binopen, f) print("file:", f) for i in r:
--- a/contrib/fuzz/Makefile Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/fuzz/Makefile Sun Mar 04 10:42:51 2018 -0500 @@ -13,8 +13,28 @@ $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial bdiff.cc \ bdiff-oss-fuzz.o -lFuzzingEngine -o $$OUT/bdiff_fuzzer -all: bdiff +x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h + clang -g -O1 -fsanitize=fuzzer-no-link,address -c \ + -o $@ \ + $< + +xdiff: xdiff.cc xdiffi.o xemit.o xmerge.o xprepare.o xutils.o + clang -DHG_FUZZER_INCLUDE_MAIN=1 -g -O1 -fsanitize=fuzzer-no-link,address \ + -I../../mercurial xdiff.cc \ + xdiffi.o xemit.o xmerge.o xprepare.o xutils.o -o xdiff -oss-fuzz: bdiff_fuzzer +fuzz-x%.o: ../../mercurial/thirdparty/xdiff/x%.c ../../mercurial/thirdparty/xdiff/*.h + $$CC $$CFLAGS -c \ + -o $@ \ + $< + +xdiff_fuzzer: xdiff.cc fuzz-xdiffi.o fuzz-xemit.o fuzz-xmerge.o fuzz-xprepare.o fuzz-xutils.o + $$CXX $$CXXFLAGS -std=c++11 -I../../mercurial xdiff.cc \ + fuzz-xdiffi.o fuzz-xemit.o fuzz-xmerge.o fuzz-xprepare.o fuzz-xutils.o \ + -lFuzzingEngine -o $$OUT/xdiff_fuzzer + +all: bdiff xdiff + +oss-fuzz: bdiff_fuzzer xdiff_fuzzer .PHONY: all oss-fuzz
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/fuzz/README.rst Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,14 @@ +How to add fuzzers (partially cribbed from oss-fuzz[0]): + + 1) git clone https://github.com/google/oss-fuzz + 2) cd oss-fuzz + 3) python infra/helper.py build_image mercurial + 4) docker run --cap-add=SYS_PTRACE -it -v $HG_REPO_PATH:/hg-new \ + gcr.io/oss-fuzz/mercurial bash + 5) cd /src + 6) rm -r mercurial + 7) ln -s /hg-new mercurial + 8) cd mercurial + 9) compile + +0: https://github.com/google/oss-fuzz/blob/master/docs/new_project_guide.md
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/contrib/fuzz/xdiff.cc Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,67 @@ +/* + * xdiff.cc - fuzzer harness for thirdparty/xdiff + * + * Copyright 2018, Google Inc. + * + * This software may be used and distributed according to the terms of + * the GNU General Public License, incorporated herein by reference. + */ +#include "thirdparty/xdiff/xdiff.h" +#include <inttypes.h> +#include <stdlib.h> + +extern "C" { + +int hunk_consumer(long a1, long a2, long b1, long b2, void *priv) +{ + // TODO: probably also test returning -1 from this when things break? + return 0; +} + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) +{ + if (!Size) { + return 0; + } + // figure out a random point in [0, Size] to split our input. + size_t split = Data[0] / 255.0 * Size; + + mmfile_t a, b; + + // `a` input to diff is data[1:split] + a.ptr = (char *)Data + 1; + // which has len split-1 + a.size = split - 1; + // `b` starts at the next byte after `a` ends + b.ptr = a.ptr + a.size; + b.size = Size - split; + xpparam_t xpp = { + XDF_INDENT_HEURISTIC, /* flags */ + NULL, /* anchors */ + 0, /* anchors_nr */ + }; + xdemitconf_t xecfg = { + 0, /* ctxlen */ + 0, /* interhunkctxlen */ + XDL_EMIT_BDIFFHUNK, /* flags */ + NULL, /* find_func */ + NULL, /* find_func_priv */ + hunk_consumer, /* hunk_consume_func */ + }; + xdemitcb_t ecb = { + NULL, /* priv */ + NULL, /* outf */ + }; + xdl_diff(&a, &b, &xpp, &xecfg, &ecb); + return 0; // Non-zero return values are reserved for future use. +} + +#ifdef HG_FUZZER_INCLUDE_MAIN +int main(int argc, char **argv) +{ + const char data[] = "asdf"; + return LLVMFuzzerTestOneInput((const uint8_t *)data, 4); +} +#endif + +} // extern "C"
--- a/contrib/hgsh/hgsh.c Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/hgsh/hgsh.c Sun Mar 04 10:42:51 2018 -0500 @@ -48,7 +48,7 @@ * have such machine, set to NULL. */ #ifndef HG_GATEWAY -#define HG_GATEWAY "gateway" +#define HG_GATEWAY "gateway" #endif /* @@ -56,7 +56,7 @@ * NULL. */ #ifndef HG_HOST -#define HG_HOST "mercurial" +#define HG_HOST "mercurial" #endif /* @@ -64,7 +64,7 @@ * host username are same, set to NULL. */ #ifndef HG_USER -#define HG_USER "hg" +#define HG_USER "hg" #endif /* @@ -72,14 +72,14 @@ * validate location of repo when someone is try to access, set to NULL. */ #ifndef HG_ROOT -#define HG_ROOT "/home/hg/repos" +#define HG_ROOT "/home/hg/repos" #endif /* * HG: path to the mercurial executable to run. */ #ifndef HG -#define HG "/home/hg/bin/hg" +#define HG "/home/hg/bin/hg" #endif /* @@ -88,7 +88,7 @@ * impossible, set to NULL. */ #ifndef HG_SHELL -#define HG_SHELL NULL +#define HG_SHELL NULL /* #define HG_SHELL "/bin/bash" */ #endif @@ -97,7 +97,7 @@ * should not get helpful message, set to NULL. */ #ifndef HG_HELP -#define HG_HELP "please contact support@example.com for help." +#define HG_HELP "please contact support@example.com for help." #endif /* @@ -106,7 +106,7 @@ * arguments it is called with. see forward_through_gateway. */ #ifndef SSH -#define SSH "/usr/bin/ssh" +#define SSH "/usr/bin/ssh" #endif /* @@ -249,7 +249,6 @@ hg_serve, }; - /* * attempt to verify that a directory is really a hg repo, by testing * for the existence of a subdirectory. @@ -310,8 +309,7 @@ if (sscanf(argv[2], "hg init %as", &repo) == 1) { cmd = hg_init; - } - else if (sscanf(argv[2], "hg -R %as serve --stdio", &repo) == 1) { + } else if (sscanf(argv[2], "hg -R %as serve --stdio", &repo) == 1) { cmd = hg_serve; } else { goto badargs;
--- a/contrib/mercurial.spec Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/mercurial.spec Sun Mar 04 10:42:51 2018 -0500 @@ -6,8 +6,8 @@ %global pythonver %{withpython} %global pythonname Python-%{withpython} -%global docutilsname docutils-0.12 -%global docutilsmd5 4622263b62c5c771c03502afa3157768 +%global docutilsname docutils-0.14 +%global docutilsmd5 c53768d63db3873b7d452833553469de %global pythonhg python-hg %global hgpyprefix /opt/%{pythonhg} # byte compilation will fail on some some Python /test/ files
--- a/contrib/perf.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/perf.py Sun Mar 04 10:42:51 2018 -0500 @@ -64,6 +64,12 @@ from mercurial import scmutil # since 1.9 (or 8b252e826c68) except ImportError: pass +try: + from mercurial import pycompat + getargspec = pycompat.getargspec # added to module after 4.5 +except (ImportError, AttributeError): + import inspect + getargspec = inspect.getargspec # for "historical portability": # define util.safehasattr forcibly, because util.safehasattr has been @@ -114,9 +120,8 @@ if safehasattr(registrar, 'command'): command = registrar.command(cmdtable) elif safehasattr(cmdutil, 'command'): - import inspect command = cmdutil.command(cmdtable) - if 'norepo' not in inspect.getargspec(command)[0]: + if 'norepo' not in getargspec(command).args: # for "historical portability": # wrap original cmdutil.command, because "norepo" option has # been available since 3.1 (or 75a96326cecb) @@ -1031,6 +1036,71 @@ with ready: ready.notify_all() +@command('perfunidiff', revlogopts + formatteropts + [ + ('', 'count', 1, 'number of revisions to test (when using --startrev)'), + ('', 'alldata', False, 'test unidiffs for all associated revisions'), + ], '-c|-m|FILE REV') +def perfunidiff(ui, repo, file_, rev=None, count=None, **opts): + """benchmark a unified diff between revisions + + This doesn't include any copy tracing - it's just a unified diff + of the texts. + + By default, benchmark a diff between its delta parent and itself. + + With ``--count``, benchmark diffs between delta parents and self for N + revisions starting at the specified revision. + + With ``--alldata``, assume the requested revision is a changeset and + measure diffs for all changes related to that changeset (manifest + and filelogs). + """ + if opts['alldata']: + opts['changelog'] = True + + if opts.get('changelog') or opts.get('manifest'): + file_, rev = None, file_ + elif rev is None: + raise error.CommandError('perfunidiff', 'invalid arguments') + + textpairs = [] + + r = cmdutil.openrevlog(repo, 'perfunidiff', file_, opts) + + startrev = r.rev(r.lookup(rev)) + for rev in range(startrev, min(startrev + count, len(r) - 1)): + if opts['alldata']: + # Load revisions associated with changeset. + ctx = repo[rev] + mtext = repo.manifestlog._revlog.revision(ctx.manifestnode()) + for pctx in ctx.parents(): + pman = repo.manifestlog._revlog.revision(pctx.manifestnode()) + textpairs.append((pman, mtext)) + + # Load filelog revisions by iterating manifest delta. + man = ctx.manifest() + pman = ctx.p1().manifest() + for filename, change in pman.diff(man).items(): + fctx = repo.file(filename) + f1 = fctx.revision(change[0][0] or -1) + f2 = fctx.revision(change[1][0] or -1) + textpairs.append((f1, f2)) + else: + dp = r.deltaparent(rev) + textpairs.append((r.revision(dp), r.revision(rev))) + + def d(): + for left, right in textpairs: + # The date strings don't matter, so we pass empty strings. + headerlines, hunks = mdiff.unidiff( + left, '', right, '', 'left', 'right', binary=False) + # consume iterators in roughly the way patch.py does + b'\n'.join(headerlines) + b''.join(sum((list(hlines) for hrange, hlines in hunks), [])) + timer, fm = gettimer(ui, opts) + timer(d) + fm.end() + @command('perfdiffwd', formatteropts) def perfdiffwd(ui, repo, **opts): """Profile diff of working directory changes""" @@ -1498,11 +1568,13 @@ ('', 'clear-revbranch', False, 'purge the revbranch cache between computation'), ] + formatteropts) -def perfbranchmap(ui, repo, full=False, clear_revbranch=False, **opts): +def perfbranchmap(ui, repo, *filternames, **opts): """benchmark the update of a branchmap This benchmarks the full repo.branchmap() call with read and write disabled """ + full = opts.get("full", False) + clear_revbranch = opts.get("clear_revbranch", False) timer, fm = gettimer(ui, opts) def getbranchmap(filtername): """generate a benchmark function for the filtername""" @@ -1521,6 +1593,8 @@ return d # add filter in smaller subset to bigger subset possiblefilters = set(repoview.filtertable) + if filternames: + possiblefilters &= set(filternames) subsettable = getbranchmapsubsettable() allfilters = [] while possiblefilters: @@ -1537,8 +1611,9 @@ if not full: for name in allfilters: repo.filtered(name).branchmap() - # add unfiltered - allfilters.append(None) + if not filternames or 'unfiltered' in filternames: + # add unfiltered + allfilters.append(None) branchcacheread = safeattrsetter(branchmap, 'read') branchcachewrite = safeattrsetter(branchmap.branchcache, 'write') @@ -1546,7 +1621,10 @@ branchcachewrite.set(lambda bc, repo: None) try: for name in allfilters: - timer(getbranchmap(name), title=str(name)) + printname = name + if name is None: + printname = 'unfiltered' + timer(getbranchmap(name), title=str(printname)) finally: branchcacheread.restore() branchcachewrite.restore()
--- a/contrib/phabricator.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/phabricator.py Sun Mar 04 10:42:51 2018 -0500 @@ -868,11 +868,12 @@ templatekeyword = registrar.templatekeyword() -@templatekeyword('phabreview') -def template_review(repo, ctx, revcache, **args): +@templatekeyword('phabreview', requires={'ctx'}) +def template_review(context, mapping): """:phabreview: Object describing the review for this changeset. Has attributes `url` and `id`. """ + ctx = context.resource(mapping, 'ctx') m = _differentialrevisiondescre.search(ctx.description()) if m: return {
--- a/contrib/python3-ratchet.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/python3-ratchet.py Sun Mar 04 10:42:51 2018 -0500 @@ -80,8 +80,7 @@ print('warning: Python 3.6.0 and 3.6.1 have ' 'a bug which breaks Mercurial') print('(see https://bugs.python.org/issue29714 for details)') - # TODO(augie): uncomment exit when Python 3.6.2 is available - # sys.exit(1) + sys.exit(1) rt = subprocess.Popen([opts.python3, 'run-tests.py', '-j', str(opts.j), '--blacklist', opts.working_tests, '--json'])
--- a/contrib/python3-whitelist Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/python3-whitelist Sun Mar 04 10:42:51 2018 -0500 @@ -1,16 +1,35 @@ +test-abort-checkin.t test-add.t test-addremove-similar.t test-addremove.t +test-amend-subrepo.t test-ancestor.py +test-annotate.py +test-annotate.t +test-atomictempfile.py +test-audit-path.t +test-audit-subrepo.t test-automv.t +test-backout.t test-backwards-remove.t +test-basic.t test-bheads.t test-bisect2.t +test-bookmarks-current.t test-bookmarks-merge.t +test-bookmarks-rebase.t test-bookmarks-strip.t +test-bookmarks.t +test-branch-option.t test-branch-tag-confict.t +test-branches.t +test-bundle-phases.t +test-bundle-vs-outgoing.t +test-bundle2-multiple-changegroups.t +test-cappedreader.py test-casecollision.t test-cat.t +test-censor.t test-changelog-exec.t test-check-commit.t test-check-execute.t @@ -19,11 +38,34 @@ test-check-pylint.t test-check-shbang.t test-children.t +test-clone-pull-corruption.t +test-clone-r.t +test-clone-update-order.t +test-command-template.t +test-commit-amend.t test-commit-unresolved.t +test-commit.t test-completion.t +test-conflict.t +test-confused-revert.t test-contrib-check-code.t test-contrib-check-commit.t +test-convert-authormap.t +test-convert-clonebranches.t +test-convert-datesort.t +test-convert-filemap.t +test-convert-hg-sink.t +test-convert-hg-source.t +test-convert-hg-startrev.t +test-copy-move-merge.t +test-copytrace-heuristics.t +test-debugbuilddag.t +test-debugbundle.t +test-debugextensions.t +test-debugindexdot.t test-debugrename.t +test-diff-binary-file.t +test-diff-change.t test-diff-copy-depth.t test-diff-hashes.t test-diff-issue2761.t @@ -32,42 +74,125 @@ test-diff-subdir.t test-diffdir.t test-directaccess.t +test-dirstate-backup.t test-dirstate-nonnormalset.t test-doctest.py test-double-merge.t +test-drawdag.t test-duplicateoptions.py test-empty-dir.t test-empty-file.t +test-empty-group.t test-empty.t test-encoding-func.py +test-encoding.t +test-eol-add.t +test-eol-clone.t +test-eol-tag.t +test-eol-update.t test-excessive-merge.t +test-exchange-obsmarkers-case-A1.t +test-exchange-obsmarkers-case-A2.t +test-exchange-obsmarkers-case-A3.t +test-exchange-obsmarkers-case-A4.t +test-exchange-obsmarkers-case-A5.t +test-exchange-obsmarkers-case-A6.t +test-exchange-obsmarkers-case-A7.t +test-exchange-obsmarkers-case-B1.t +test-exchange-obsmarkers-case-B2.t +test-exchange-obsmarkers-case-B3.t +test-exchange-obsmarkers-case-B4.t +test-exchange-obsmarkers-case-B5.t +test-exchange-obsmarkers-case-B6.t +test-exchange-obsmarkers-case-B7.t +test-exchange-obsmarkers-case-C1.t +test-exchange-obsmarkers-case-C2.t +test-exchange-obsmarkers-case-C3.t +test-exchange-obsmarkers-case-C4.t +test-exchange-obsmarkers-case-D1.t +test-exchange-obsmarkers-case-D2.t +test-exchange-obsmarkers-case-D3.t +test-exchange-obsmarkers-case-D4.t test-execute-bit.t +test-extdiff.t +test-extra-filelog-entry.t +test-filebranch.t +test-fileset-generated.t +test-flags.t +test-generaldelta.t +test-getbundle.t +test-git-export.t +test-glog-topological.t test-gpg.t +test-graft.t test-hghave.t +test-hgignore.t +test-hgk.t +test-hgweb-removed.t +test-histedit-arguments.t +test-histedit-base.t +test-histedit-bookmark-motion.t +test-histedit-commute.t +test-histedit-drop.t +test-histedit-edit.t +test-histedit-fold-non-commute.t +test-histedit-fold.t +test-histedit-no-change.t +test-histedit-non-commute-abort.t +test-histedit-non-commute.t +test-histedit-obsolete.t +test-histedit-outgoing.t +test-histedit-templates.t +test-http-branchmap.t +test-http-bundle1.t +test-http-clone-r.t +test-identify.t test-imports-checker.t +test-inherit-mode.t test-issue1089.t +test-issue1102.t test-issue1175.t +test-issue1306.t +test-issue1438.t test-issue1502.t test-issue1802.t test-issue1877.t test-issue1993.t +test-issue2137.t +test-issue3084.t +test-issue4074.t test-issue522.t +test-issue586.t test-issue612.t test-issue619.t test-issue672.t test-issue842.t test-journal-exists.t +test-largefiles-cache.t +test-largefiles-small-disk.t test-locate.t +test-lock-badness.t +test-logexchange.t test-lrucachedict.py +test-mactext.t +test-manifest-merging.t test-manifest.py -test-manifest-merging.t +test-manifest.t test-match.py +test-mdiff.py +test-merge-closedheads.t +test-merge-commit.t +test-merge-criss-cross.t test-merge-default.t test-merge-internal-tools-pattern.t +test-merge-local.t test-merge-remove.t test-merge-revert.t test-merge-revert2.t test-merge-subrepos.t +test-merge-symlinks.t +test-merge-types.t +test-merge1.t test-merge10.t test-merge2.t test-merge4.t @@ -75,9 +200,63 @@ test-merge6.t test-merge7.t test-merge8.t +test-merge9.t +test-mq-git.t +test-mq-header-date.t +test-mq-header-from.t +test-mq-pull-from-bundle.t +test-mq-qdiff.t +test-mq-qfold.t +test-mq-qgoto.t test-mq-qimport-fail-cleanup.t +test-mq-qpush-exact.t +test-mq-qqueue.t +test-mq-qrefresh-replace-log-message.t +test-mq-qrefresh.t +test-mq-qrename.t +test-mq-qsave.t +test-mq-safety.t +test-mq-symlinks.t +test-mv-cp-st-diff.t +test-narrow-clone-no-ellipsis.t +test-narrow-clone-nonlinear.t +test-narrow-clone.t +test-narrow-commit.t +test-narrow-copies.t +test-narrow-debugrebuilddirstate.t +test-narrow-exchange-merges.t +test-narrow-exchange.t +test-narrow-merge.t +test-narrow-patch.t +test-narrow-patterns.t +test-narrow-pull.t +test-narrow-rebase.t +test-narrow-shallow-merges.t +test-narrow-shallow.t +test-narrow-update.t +test-nested-repo.t +test-newbranch.t test-obshistory.t +test-obsmarker-template.t +test-obsmarkers-effectflag.t +test-obsolete-bundle-strip.t +test-obsolete-changeset-exchange.t +test-obsolete-checkheads.t +test-obsolete-distributed.t +test-obsolete-tag-cache.t +test-parents.t +test-pathconflicts-merge.t +test-pathconflicts-update.t +test-pending.t test-permissions.t +test-phases.t +test-pull-branch.t +test-pull-http.t +test-pull-permission.t +test-pull-pull-corruption.t +test-pull-r.t +test-pull-update.t +test-purge.t test-push-checkheads-partial-C1.t test-push-checkheads-partial-C2.t test-push-checkheads-partial-C3.t @@ -105,27 +284,85 @@ test-push-checkheads-unpushed-D5.t test-push-checkheads-unpushed-D6.t test-push-checkheads-unpushed-D7.t +test-push-http.t +test-push-warn.t +test-pushvars.t +test-rebase-abort.t +test-rebase-base-flag.t +test-rebase-bookmarks.t +test-rebase-brute-force.t +test-rebase-cache.t +test-rebase-check-restore.t +test-rebase-collapse.t +test-rebase-dest.t +test-rebase-detach.t +test-rebase-emptycommit.t +test-rebase-inmemory.t +test-rebase-interruptions.t +test-rebase-issue-noparam-single-rev.t +test-rebase-legacy.t +test-rebase-mq-skip.t +test-rebase-named-branches.t +test-rebase-newancestor.t +test-rebase-obsolete.t +test-rebase-parameters.t +test-rebase-partial.t +test-rebase-pull.t +test-rebase-rename.t +test-rebase-scenario-global.t +test-rebase-templates.t +test-rebase-transaction.t test-record.t +test-relink.t +test-remove.t +test-rename-after-merge.t test-rename-dir-merge.t test-rename-merge1.t test-rename.t +test-repair-strip.t +test-repo-compengines.t +test-resolve.t test-revert-flags.t test-revert-unknown.t +test-revlog-ancestry.py test-revlog-group-emptyiter.t test-revlog-mmapindex.t test-revlog-packentry.t +test-revset-dirstate-parents.t +test-revset-outgoing.t test-run-tests.py +test-serve.t +test-share.t test-show-stack.t +test-show-work.t +test-show.t test-simple-update.t +test-single-head.t test-sparse-clear.t test-sparse-merges.t test-sparse-requirement.t test-sparse-verbose-json.t +test-ssh-clone-r.t +test-ssh-proto.t +test-sshserver.py +test-status-rev.t test-status-terse.t +test-strip-cross.t +test-strip.t +test-symlinks.t +test-unamend.t test-uncommit.t test-unified-test.t test-unrelated-pull.t +test-up-local-change.t +test-update-branches.t +test-update-dest.t test-update-issue1456.t test-update-names.t test-update-reverse.t +test-url-rev.t +test-username-newline.t +test-verify.t +test-websub.t +test-win32text.t test-xdg.t
--- a/contrib/synthrepo.py Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/synthrepo.py Sun Mar 04 10:42:51 2018 -0500 @@ -59,8 +59,8 @@ patch, registrar, scmutil, - util, ) +from mercurial.utils import dateutil # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -381,8 +381,8 @@ ui.progress(_synthesizing, None) message = 'synthesized wide repo with %d files' % (len(files),) mc = context.memctx(repo, [pctx.node(), nullid], message, - files.iterkeys(), filectxfn, ui.username(), - '%d %d' % util.makedate()) + files, filectxfn, ui.username(), + '%d %d' % dateutil.makedate()) initnode = mc.commit() if ui.debugflag: hexfn = hex
--- a/contrib/wix/help.wxs Sat Mar 03 22:29:24 2018 -0500 +++ b/contrib/wix/help.wxs Sun Mar 04 10:42:51 2018 -0500 @@ -40,6 +40,7 @@ <Directory Id="help.internaldir" Name="internals"> <Component Id="help.internals" Guid="$(var.help.internals.guid)" Win64='$(var.IsX64)'> + <File Id="internals.bundle2.txt" Name="bundle2.txt" /> <File Id="internals.bundles.txt" Name="bundles.txt" KeyPath="yes" /> <File Id="internals.censor.txt" Name="censor.txt" /> <File Id="internals.changegroups.txt" Name="changegroups.txt" />
--- a/hgext/acl.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/acl.py Sun Mar 04 10:42:51 2018 -0500 @@ -193,8 +193,6 @@ from __future__ import absolute_import -import getpass - from mercurial.i18n import _ from mercurial import ( error, @@ -334,13 +332,13 @@ return user = None - if source == 'serve' and 'url' in kwargs: - url = kwargs['url'].split(':') + if source == 'serve' and r'url' in kwargs: + url = kwargs[r'url'].split(':') if url[0] == 'remote' and url[1].startswith('http'): user = urlreq.unquote(url[3]) if user is None: - user = getpass.getuser() + user = util.getuser() ui.debug('acl: checking access for user "%s"\n' % user) @@ -355,7 +353,7 @@ allow = buildmatch(ui, repo, user, 'acl.allow') deny = buildmatch(ui, repo, user, 'acl.deny') - for rev in xrange(repo[node], len(repo)): + for rev in xrange(repo[node].rev(), len(repo)): ctx = repo[rev] branch = ctx.branch() if denybranches and denybranches(branch):
--- a/hgext/blackbox.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/blackbox.py Sun Mar 04 10:42:51 2018 -0500 @@ -49,6 +49,7 @@ ui as uimod, util, ) +from mercurial.utils import dateutil # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -164,7 +165,7 @@ return ui._bbinlog = True default = self.configdate('devel', 'default-date') - date = util.datestr(default, '%Y/%m/%d %H:%M:%S') + date = dateutil.datestr(default, '%Y/%m/%d %H:%M:%S') user = util.getuser() pid = '%d' % util.getpid() formattedmsg = msg[0] % msg[1:]
--- a/hgext/bugzilla.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/bugzilla.py Sun Mar 04 10:42:51 2018 -0500 @@ -300,8 +300,8 @@ from mercurial.i18n import _ from mercurial.node import short from mercurial import ( - cmdutil, error, + logcmdutil, mail, registrar, url, @@ -1090,9 +1090,8 @@ if not mapfile and not tmpl: tmpl = _('changeset {node|short} in repo {root} refers ' 'to bug {bug}.\ndetails:\n\t{desc|tabindent}') - spec = cmdutil.logtemplatespec(tmpl, mapfile) - t = cmdutil.changeset_templater(self.ui, self.repo, spec, - False, None, False) + spec = logcmdutil.templatespec(tmpl, mapfile) + t = logcmdutil.changesettemplater(self.ui, self.repo, spec) self.ui.pushbuffer() t.show(ctx, changes=ctx.changeset(), bug=str(bugid),
--- a/hgext/children.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/children.py Sun Mar 04 10:42:51 2018 -0500 @@ -19,6 +19,7 @@ from mercurial.i18n import _ from mercurial import ( cmdutil, + logcmdutil, pycompat, registrar, ) @@ -65,7 +66,7 @@ ctx = repo[rev] childctxs = ctx.children() - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) for cctx in childctxs: displayer.show(cctx) displayer.close()
--- a/hgext/churn.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/churn.py Sun Mar 04 10:42:51 2018 -0500 @@ -18,12 +18,13 @@ from mercurial import ( cmdutil, encoding, + logcmdutil, patch, pycompat, registrar, scmutil, - util, ) +from mercurial.utils import dateutil cmdtable = {} command = registrar.command(cmdtable) @@ -54,7 +55,7 @@ return date.strftime(opts['dateformat']) else: tmpl = opts.get('oldtemplate') or opts.get('template') - tmpl = cmdutil.makelogtemplater(ui, repo, tmpl) + tmpl = logcmdutil.maketemplater(ui, repo, tmpl) def getkey(ctx): ui.pushbuffer() tmpl.show(ctx) @@ -64,7 +65,7 @@ rate = {} df = False if opts.get('date'): - df = util.matchdate(opts['date']) + df = dateutil.matchdate(opts['date']) m = scmutil.match(repo[None], pats, opts) def prep(ctx, fns): @@ -170,7 +171,7 @@ ui.warn(_("skipping malformed alias: %s\n") % l) continue - rate = countrate(ui, repo, amap, *pats, **opts).items() + rate = list(countrate(ui, repo, amap, *pats, **opts).items()) if not rate: return
--- a/hgext/commitextras.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/commitextras.py Sun Mar 04 10:42:51 2018 -0500 @@ -70,7 +70,7 @@ # This __dict__ logic is needed because the normal # extension.wrapfunction doesn't seem to work. - repo.__dict__['commit'] = _wrappedcommit + repo.__dict__[r'commit'] = _wrappedcommit return orig(ui, repo, *pats, **opts) finally: - del repo.__dict__['commit'] + del repo.__dict__[r'commit']
--- a/hgext/convert/__init__.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/__init__.py Sun Mar 04 10:42:51 2018 -0500 @@ -477,7 +477,8 @@ dates.''' return cvsps.debugcvsps(ui, *args, **opts) -def kwconverted(ctx, name): +def kwconverted(context, mapping, name): + ctx = context.resource(mapping, 'ctx') rev = ctx.extra().get('convert_revision', '') if rev.startswith('svn:'): if name == 'svnrev': @@ -490,20 +491,20 @@ templatekeyword = registrar.templatekeyword() -@templatekeyword('svnrev') -def kwsvnrev(repo, ctx, **args): +@templatekeyword('svnrev', requires={'ctx'}) +def kwsvnrev(context, mapping): """String. Converted subversion revision number.""" - return kwconverted(ctx, 'svnrev') + return kwconverted(context, mapping, 'svnrev') -@templatekeyword('svnpath') -def kwsvnpath(repo, ctx, **args): +@templatekeyword('svnpath', requires={'ctx'}) +def kwsvnpath(context, mapping): """String. Converted subversion revision project path.""" - return kwconverted(ctx, 'svnpath') + return kwconverted(context, mapping, 'svnpath') -@templatekeyword('svnuuid') -def kwsvnuuid(repo, ctx, **args): +@templatekeyword('svnuuid', requires={'ctx'}) +def kwsvnuuid(context, mapping): """String. Converted subversion revision repository identifier.""" - return kwconverted(ctx, 'svnuuid') + return kwconverted(context, mapping, 'svnuuid') # tell hggettext to extract docstrings from these functions: i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
--- a/hgext/convert/common.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/common.py Sun Mar 04 10:42:51 2018 -0500 @@ -11,6 +11,7 @@ import errno import os import re +import shlex import subprocess from mercurial.i18n import _ @@ -18,12 +19,65 @@ encoding, error, phases, + pycompat, util, ) pickle = util.pickle propertycache = util.propertycache +def _encodeornone(d): + if d is None: + return + return d.encode('latin1') + +class _shlexpy3proxy(object): + + def __init__(self, l): + self._l = l + + def __iter__(self): + return (_encodeornone(v) for v in self._l) + + def get_token(self): + return _encodeornone(self._l.get_token()) + + @property + def infile(self): + return self._l.infile or '<unknown>' + + @property + def lineno(self): + return self._l.lineno + +def shlexer(data=None, filepath=None, wordchars=None, whitespace=None): + if data is None: + if pycompat.ispy3: + data = open(filepath, 'r', encoding=r'latin1') + else: + data = open(filepath, 'r') + else: + if filepath is not None: + raise error.ProgrammingError( + 'shlexer only accepts data or filepath, not both') + if pycompat.ispy3: + data = data.decode('latin1') + l = shlex.shlex(data, infile=filepath, posix=True) + if whitespace is not None: + l.whitespace_split = True + if pycompat.ispy3: + l.whitespace += whitespace.decode('latin1') + else: + l.whitespace += whitespace + if wordchars is not None: + if pycompat.ispy3: + l.wordchars += wordchars.decode('latin1') + else: + l.wordchars += wordchars + if pycompat.ispy3: + return _shlexpy3proxy(l) + return l + def encodeargs(args): def encodearg(s): lines = base64.encodestring(s) @@ -322,6 +376,7 @@ pass def _cmdline(self, cmd, *args, **kwargs): + kwargs = pycompat.byteskwargs(kwargs) cmdline = [self.command, cmd] + list(args) for k, v in kwargs.iteritems(): if len(k) == 1: @@ -337,7 +392,7 @@ pass cmdline = [util.shellquote(arg) for arg in cmdline] if not self.ui.debugflag: - cmdline += ['2>', os.devnull] + cmdline += ['2>', pycompat.bytestr(os.devnull)] cmdline = ' '.join(cmdline) return cmdline @@ -416,17 +471,17 @@ def _limit_arglist(self, arglist, cmd, *args, **kwargs): cmdlen = len(self._cmdline(cmd, *args, **kwargs)) limit = self.argmax - cmdlen - bytes = 0 + numbytes = 0 fl = [] for fn in arglist: b = len(fn) + 3 - if bytes + b < limit or len(fl) == 0: + if numbytes + b < limit or len(fl) == 0: fl.append(fn) - bytes += b + numbytes += b else: yield fl fl = [fn] - bytes = b + numbytes = b if fl: yield fl @@ -447,7 +502,7 @@ if not self.path: return try: - fp = open(self.path, 'r') + fp = open(self.path, 'rb') except IOError as err: if err.errno != errno.ENOENT: raise @@ -471,12 +526,12 @@ def __setitem__(self, key, value): if self.fp is None: try: - self.fp = open(self.path, 'a') + self.fp = open(self.path, 'ab') except IOError as err: raise error.Abort( _('could not open map file %r: %s') % (self.path, encoding.strtolocal(err.strerror))) - self.fp.write('%s %s\n' % (key, value)) + self.fp.write(util.tonativeeol('%s %s\n' % (key, value))) self.fp.flush() super(mapfile, self).__setitem__(key, value) @@ -486,7 +541,7 @@ self.fp = None def makedatetimestamp(t): - """Like util.makedate() but for time t instead of current time""" + """Like dateutil.makedate() but for time t instead of current time""" delta = (datetime.datetime.utcfromtimestamp(t) - datetime.datetime.fromtimestamp(t)) tz = delta.days * 86400 + delta.seconds
--- a/hgext/convert/convcmd.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/convcmd.py Sun Mar 04 10:42:51 2018 -0500 @@ -8,7 +8,6 @@ import collections import os -import shlex import shutil from mercurial.i18n import _ @@ -16,9 +15,11 @@ encoding, error, hg, + pycompat, scmutil, util, ) +from mercurial.utils import dateutil from . import ( bzr, @@ -55,9 +56,10 @@ def recode(s): if isinstance(s, unicode): - return s.encode(orig_encoding, 'replace') + return s.encode(pycompat.sysstr(orig_encoding), 'replace') else: - return s.decode('utf-8').encode(orig_encoding, 'replace') + return s.decode('utf-8').encode( + pycompat.sysstr(orig_encoding), 'replace') def mapbranch(branch, branchmap): ''' @@ -202,16 +204,14 @@ return {} m = {} try: - fp = open(path, 'r') + fp = open(path, 'rb') for i, line in enumerate(util.iterfile(fp)): line = line.splitlines()[0].rstrip() if not line: # Ignore blank lines continue # split line - lex = shlex.shlex(line, posix=True) - lex.whitespace_split = True - lex.whitespace += ',' + lex = common.shlexer(data=line, whitespace=',') line = list(lex) # check number of parents if not (2 <= len(line) <= 3): @@ -356,7 +356,7 @@ dates = {} def getdate(n): if n not in dates: - dates[n] = util.parsedate(self.commitcache[n].date) + dates[n] = dateutil.parsedate(self.commitcache[n].date) return dates[n] def picknext(nodes): @@ -407,13 +407,14 @@ authorfile = self.authorfile if authorfile: self.ui.status(_('writing author map file %s\n') % authorfile) - ofile = open(authorfile, 'w+') + ofile = open(authorfile, 'wb+') for author in self.authors: - ofile.write("%s=%s\n" % (author, self.authors[author])) + ofile.write(util.tonativeeol("%s=%s\n" + % (author, self.authors[author]))) ofile.close() def readauthormap(self, authorfile): - afile = open(authorfile, 'r') + afile = open(authorfile, 'rb') for line in afile: line = line.strip() @@ -564,6 +565,7 @@ self.map.close() def convert(ui, src, dest=None, revmapfile=None, **opts): + opts = pycompat.byteskwargs(opts) global orig_encoding orig_encoding = encoding.encoding encoding.encoding = 'UTF-8'
--- a/hgext/convert/cvs.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/cvs.py Sun Mar 04 10:42:51 2018 -0500 @@ -18,6 +18,7 @@ pycompat, util, ) +from mercurial.utils import dateutil from . import ( common, @@ -46,8 +47,8 @@ self.tags = {} self.lastbranch = {} self.socket = None - self.cvsroot = open(os.path.join(cvs, "Root")).read()[:-1] - self.cvsrepo = open(os.path.join(cvs, "Repository")).read()[:-1] + self.cvsroot = open(os.path.join(cvs, "Root"), 'rb').read()[:-1] + self.cvsrepo = open(os.path.join(cvs, "Repository"), 'rb').read()[:-1] self.encoding = encoding.encoding self._connect() @@ -93,7 +94,7 @@ cs.comment = self.recode(cs.comment) if self.ui.configbool('convert', 'localtimezone'): cs.date = makedatetimestamp(cs.date[0]) - date = util.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2') + date = dateutil.datestr(cs.date, '%Y-%m-%d %H:%M:%S %1%2') self.tags.update(dict.fromkeys(cs.tags, id)) files = {} @@ -141,7 +142,7 @@ passw = "A" cvspass = os.path.expanduser("~/.cvspass") try: - pf = open(cvspass) + pf = open(cvspass, 'rb') for line in pf.read().splitlines(): part1, part2 = line.split(' ', 1) # /1 :pserver:user@example.com:2401/cvsroot/foo @@ -179,7 +180,7 @@ # :ext:user@host/home/user/path/to/cvsroot if root.startswith(":ext:"): root = root[5:] - m = re.match(r'(?:([^@:/]+)@)?([^:/]+):?(.*)', root) + m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root) # Do not take Windows path "c:\foo\bar" for a connection strings if os.path.isdir(root) or not m: conntype = "local"
--- a/hgext/convert/cvsps.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/cvsps.py Sun Mar 04 10:42:51 2018 -0500 @@ -17,6 +17,7 @@ pycompat, util, ) +from mercurial.utils import dateutil pickle = util.pickle @@ -132,7 +133,7 @@ # Get the real directory in the repository try: - prefix = open(os.path.join('CVS','Repository')).read().strip() + prefix = open(os.path.join('CVS','Repository'), 'rb').read().strip() directory = prefix if prefix == ".": prefix = "" @@ -144,7 +145,7 @@ # Use the Root file in the sandbox, if it exists try: - root = open(os.path.join('CVS','Root')).read().strip() + root = open(os.path.join('CVS','Root'), 'rb').read().strip() except IOError: pass @@ -170,14 +171,14 @@ # /pserver/user/server/path # are mapped to different cache file names. cachefile = root.split(":") + [directory, "cache"] - cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s] + cachefile = ['-'.join(re.findall(br'\w+', s)) for s in cachefile if s] cachefile = os.path.join(cachedir, '.'.join([s for s in cachefile if s])) if cache == 'update': try: ui.note(_('reading cvs log cache %s\n') % cachefile) - oldlog = pickle.load(open(cachefile)) + oldlog = pickle.load(open(cachefile, 'rb')) for e in oldlog: if not (util.safehasattr(e, 'branchpoints') and util.safehasattr(e, 'commitid') and @@ -192,7 +193,7 @@ if oldlog: date = oldlog[-1].date # last commit date as a (time,tz) tuple - date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2') + date = dateutil.datestr(date, '%Y/%m/%d %H:%M:%S %1%2') # build the CVS commandline cmd = ['cvs', '-q'] @@ -336,7 +337,7 @@ if len(d.split()) != 3: # cvs log dates always in GMT d = d + ' UTC' - e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S', + e.date = dateutil.parsedate(d, ['%y/%m/%d %H:%M:%S', '%Y/%m/%d %H:%M:%S', '%Y-%m-%d %H:%M:%S']) e.author = scache(match.group(2)) @@ -486,7 +487,7 @@ # write the new cachefile ui.note(_('writing cvs log cache %s\n') % cachefile) - pickle.dump(log, open(cachefile, 'w')) + pickle.dump(log, open(cachefile, 'wb')) else: log = oldlog @@ -855,6 +856,7 @@ repository, and convert the log to changesets based on matching commit log entries and dates. ''' + opts = pycompat.byteskwargs(opts) if opts["new_cache"]: cache = "write" elif opts["update_cache"]: @@ -900,7 +902,7 @@ # bug-for-bug compatibility with cvsps. ui.write('---------------------\n') ui.write(('PatchSet %d \n' % cs.id)) - ui.write(('Date: %s\n' % util.datestr(cs.date, + ui.write(('Date: %s\n' % dateutil.datestr(cs.date, '%Y/%m/%d %H:%M:%S %1%2'))) ui.write(('Author: %s\n' % cs.author)) ui.write(('Branch: %s\n' % (cs.branch or 'HEAD')))
--- a/hgext/convert/darcs.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/darcs.py Sun Mar 04 10:42:51 2018 -0500 @@ -16,6 +16,7 @@ error, util, ) +from mercurial.utils import dateutil from . import common NoRepo = common.NoRepo @@ -148,12 +149,14 @@ def getcommit(self, rev): elt = self.changes[rev] - date = util.strdate(elt.get('local_date'), '%a %b %d %H:%M:%S %Z %Y') + dateformat = '%a %b %d %H:%M:%S %Z %Y' + date = dateutil.strdate(elt.get('local_date'), dateformat) desc = elt.findtext('name') + '\n' + elt.findtext('comment', '') # etree can return unicode objects for name, comment, and author, # so recode() is used to ensure str objects are emitted. + newdateformat = '%Y-%m-%d %H:%M:%S %1%2' return common.commit(author=self.recode(elt.get('author')), - date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), + date=dateutil.datestr(date, newdateformat), desc=self.recode(desc).strip(), parents=self.parents[rev])
--- a/hgext/convert/filemap.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/filemap.py Sun Mar 04 10:42:51 2018 -0500 @@ -7,11 +7,11 @@ from __future__ import absolute_import, print_function import posixpath -import shlex from mercurial.i18n import _ from mercurial import ( error, + pycompat, ) from . import common SKIPREV = common.SKIPREV @@ -68,11 +68,12 @@ name.endswith('/') or '//' in name): self.ui.warn(_('%s:%d: superfluous / in %s %r\n') % - (lex.infile, lex.lineno, listname, name)) + (lex.infile, lex.lineno, listname, + pycompat.bytestr(name))) return 1 return 0 - lex = shlex.shlex(open(path), path, True) - lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?' + lex = common.shlexer( + filepath=path, wordchars='!@#$%^&*()-=+[]{}|;:,./<>?') cmd = lex.get_token() while cmd: if cmd == 'include': @@ -93,7 +94,7 @@ errs += self.parse(normalize(lex.get_token())) else: self.ui.warn(_('%s:%d: unknown directive %r\n') % - (lex.infile, lex.lineno, cmd)) + (lex.infile, lex.lineno, pycompat.bytestr(cmd))) errs += 1 cmd = lex.get_token() return errs
--- a/hgext/convert/git.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/git.py Sun Mar 04 10:42:51 2018 -0500 @@ -168,19 +168,19 @@ raise error.Abort(_('cannot retrieve git head "%s"') % rev) return heads - def catfile(self, rev, type): + def catfile(self, rev, ftype): if rev == nodemod.nullhex: raise IOError self.catfilepipe[0].write(rev+'\n') self.catfilepipe[0].flush() info = self.catfilepipe[1].readline().split() - if info[1] != type: - raise error.Abort(_('cannot read %r object at %s') % (type, rev)) + if info[1] != ftype: + raise error.Abort(_('cannot read %r object at %s') % (ftype, rev)) size = int(info[2]) data = self.catfilepipe[1].read(size) if len(data) < size: raise error.Abort(_('cannot read %r object at %s: unexpected size') - % (type, rev)) + % (ftype, rev)) # read the trailing newline self.catfilepipe[1].read(1) return data
--- a/hgext/convert/gnuarch.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/gnuarch.py Sun Mar 04 10:42:51 2018 -0500 @@ -19,6 +19,7 @@ error, util, ) +from mercurial.utils import dateutil from . import common class gnuarch_source(common.converter_source, common.commandline): @@ -280,8 +281,8 @@ catlog = self.catlogparser.parsestr(data) # Commit date - self.changes[rev].date = util.datestr( - util.strdate(catlog['Standard-date'], + self.changes[rev].date = dateutil.datestr( + dateutil.strdate(catlog['Standard-date'], '%Y-%m-%d %H:%M:%S')) # Commit author
--- a/hgext/convert/hg.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/hg.py Sun Mar 04 10:42:51 2018 -0500 @@ -36,13 +36,14 @@ scmutil, util, ) +from mercurial.utils import dateutil stringio = util.stringio from . import common mapfile = common.mapfile NoRepo = common.NoRepo -sha1re = re.compile(r'\b[0-9a-f]{12,40}\b') +sha1re = re.compile(br'\b[0-9a-f]{12,40}\b') class mercurial_sink(common.converter_sink): def __init__(self, ui, repotype, path): @@ -563,12 +564,7 @@ if copysource in self.ignored: continue # Ignore copy sources not in parent revisions - found = False - for p in parents: - if copysource in p: - found = True - break - if not found: + if not any(copysource in p for p in parents): continue copies[name] = copysource except TypeError: @@ -588,7 +584,7 @@ crev = rev return common.commit(author=ctx.user(), - date=util.datestr(ctx.date(), + date=dateutil.datestr(ctx.date(), '%Y-%m-%d %H:%M:%S %1%2'), desc=ctx.description(), rev=crev, @@ -625,8 +621,8 @@ def converted(self, rev, destrev): if self.convertfp is None: - self.convertfp = open(self.repo.vfs.join('shamap'), 'a') - self.convertfp.write('%s %s\n' % (destrev, rev)) + self.convertfp = open(self.repo.vfs.join('shamap'), 'ab') + self.convertfp.write(util.tonativeeol('%s %s\n' % (destrev, rev))) self.convertfp.flush() def before(self):
--- a/hgext/convert/monotone.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/monotone.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,8 +13,9 @@ from mercurial.i18n import _ from mercurial import ( error, - util, + pycompat, ) +from mercurial.utils import dateutil from . import common @@ -36,7 +37,7 @@ if not os.path.exists(os.path.join(path, '_MTN')): # Could be a monotone repository (SQLite db file) try: - f = file(path, 'rb') + f = open(path, 'rb') header = f.read(16) f.close() except IOError: @@ -45,11 +46,11 @@ raise norepo # regular expressions for parsing monotone output - space = r'\s*' - name = r'\s+"((?:\\"|[^"])*)"\s*' + space = br'\s*' + name = br'\s+"((?:\\"|[^"])*)"\s*' value = name - revision = r'\s+\[(\w+)\]\s*' - lines = r'(?:.|\n)+' + revision = br'\s+\[(\w+)\]\s*' + lines = br'(?:.|\n)+' self.dir_re = re.compile(space + "dir" + name) self.file_re = re.compile(space + "file" + name + @@ -84,11 +85,12 @@ return self.mtnrunsingle(*args, **kwargs) def mtnrunsingle(self, *args, **kwargs): - kwargs['d'] = self.path + kwargs[r'd'] = self.path return self.run0('automate', *args, **kwargs) def mtnrunstdio(self, *args, **kwargs): # Prepare the command in automate stdio format + kwargs = pycompat.byteskwargs(kwargs) command = [] for k, v in kwargs.iteritems(): command.append("%s:%s" % (len(k), k)) @@ -308,9 +310,10 @@ certs = self.mtngetcerts(rev) if certs.get('suspend') == certs["branch"]: extra['close'] = 1 + dateformat = "%Y-%m-%dT%H:%M:%S" return common.commit( author=certs["author"], - date=util.datestr(util.strdate(certs["date"], "%Y-%m-%dT%H:%M:%S")), + date=dateutil.datestr(dateutil.strdate(certs["date"], dateformat)), desc=certs["changelog"], rev=rev, parents=self.mtnrun("parents", rev).splitlines(),
--- a/hgext/convert/p4.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/p4.py Sun Mar 04 10:42:51 2018 -0500 @@ -14,6 +14,7 @@ error, util, ) +from mercurial.utils import dateutil from . import common @@ -346,7 +347,7 @@ parents = [] return common.commit(author=self.recode(obj["user"]), - date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), + date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), parents=parents, desc=desc, branch=None, rev=obj['change'], extra={"p4": obj['change'], "convert_revision": obj['change']})
--- a/hgext/convert/subversion.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/convert/subversion.py Sun Mar 04 10:42:51 2018 -0500 @@ -16,6 +16,7 @@ util, vfs as vfsmod, ) +from mercurial.utils import dateutil from . import common @@ -146,10 +147,10 @@ # Caller may interrupt the iteration pickle.dump(None, fp, protocol) except Exception as inst: - pickle.dump(str(inst), fp, protocol) + pickle.dump(util.forcebytestr(inst), fp, protocol) else: pickle.dump(None, fp, protocol) - fp.close() + fp.flush() # With large history, cleanup process goes crazy and suddenly # consumes *huge* amount of memory. The output file being closed, # there is no need for clean termination. @@ -231,7 +232,7 @@ def httpcheck(ui, path, proto): try: opener = urlreq.buildopener() - rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path)) + rsp = opener.open('%s://%s/!svn/ver/0/.svn' % (proto, path), 'rb') data = rsp.read() except urlerr.httperror as inst: if inst.code != 404: @@ -384,7 +385,7 @@ def setrevmap(self, revmap): lastrevs = {} - for revid in revmap.iterkeys(): + for revid in revmap: uuid, module, revnum = revsplit(revid) lastrevnum = lastrevs.setdefault(module, revnum) if revnum > lastrevnum: @@ -639,8 +640,9 @@ return if self.convertfp is None: self.convertfp = open(os.path.join(self.wc, '.svn', 'hg-shamap'), - 'a') - self.convertfp.write('%s %d\n' % (destrev, self.revnum(rev))) + 'ab') + self.convertfp.write(util.tonativeeol('%s %d\n' + % (destrev, self.revnum(rev)))) self.convertfp.flush() def revid(self, revnum, module=None): @@ -890,7 +892,7 @@ # Example SVN datetime. Includes microseconds. # ISO-8601 conformant # '2007-01-04T17:35:00.902377Z' - date = util.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) + date = dateutil.parsedate(date[:19] + " UTC", ["%Y-%m-%dT%H:%M:%S"]) if self.ui.configbool('convert', 'localtimezone'): date = makedatetimestamp(date[0]) @@ -912,7 +914,7 @@ branch = None cset = commit(author=author, - date=util.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), + date=dateutil.datestr(date, '%Y-%m-%d %H:%M:%S %1%2'), desc=log, parents=parents, branch=branch, @@ -1128,7 +1130,7 @@ self.wc = os.path.realpath(path) self.run0('update') else: - if not re.search(r'^(file|http|https|svn|svn\+ssh)\://', path): + if not re.search(br'^(file|http|https|svn|svn\+ssh)\://', path): path = os.path.realpath(path) if os.path.isdir(os.path.dirname(path)): if not os.path.exists(os.path.join(path, 'db', 'fs-type')): @@ -1158,7 +1160,7 @@ if created: hook = os.path.join(created, 'hooks', 'pre-revprop-change') - fp = open(hook, 'w') + fp = open(hook, 'wb') fp.write(pre_revprop_change) fp.close() util.setflags(hook, False, True) @@ -1308,8 +1310,8 @@ self.setexec = [] fd, messagefile = tempfile.mkstemp(prefix='hg-convert-') - fp = os.fdopen(fd, pycompat.sysstr('w')) - fp.write(commit.desc) + fp = os.fdopen(fd, pycompat.sysstr('wb')) + fp.write(util.tonativeeol(commit.desc)) fp.close() try: output = self.run0('commit',
--- a/hgext/eol.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/eol.py Sun Mar 04 10:42:51 2018 -0500 @@ -222,7 +222,7 @@ data = ctx[f].data() if (target == "to-lf" and "\r\n" in data or target == "to-crlf" and singlelf.search(data)): - failed.append((f, target, str(ctx))) + failed.append((f, target, bytes(ctx))) break return failed
--- a/hgext/extdiff.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/extdiff.py Sun Mar 04 10:42:51 2018 -0500 @@ -88,12 +88,12 @@ configtable = {} configitem = registrar.configitem(configtable) -configitem('extdiff', r'opts\..*', +configitem('extdiff', br'opts\..*', default='', generic=True, ) -configitem('diff-tools', r'.*\.diffargs$', +configitem('diff-tools', br'.*\.diffargs$', default=None, generic=True, ) @@ -256,8 +256,8 @@ cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()], fntemplate=repo.vfs.reljoin(tmproot, template), match=matcher) - label1a = cmdutil.makefilename(repo, template, node1a) - label2 = cmdutil.makefilename(repo, template, node2) + label1a = cmdutil.makefilename(repo[node1a], template) + label2 = cmdutil.makefilename(repo[node2], template) dir1a = repo.vfs.reljoin(tmproot, label1a) dir2 = repo.vfs.reljoin(tmproot, label2) dir1b = None @@ -279,13 +279,13 @@ return pre + util.shellquote(replace[key]) # Match parent2 first, so 'parent1?' will match both parent1 and parent - regex = (r'''(['"]?)([^\s'"$]*)''' - r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') + regex = (br'''(['"]?)([^\s'"$]*)''' + br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1') if not do3way and not re.search(regex, cmdline): cmdline += ' $parent1 $child' cmdline = re.sub(regex, quote, cmdline) - ui.debug('running %r in %s\n' % (cmdline, tmproot)) + ui.debug('running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot)) ui.system(cmdline, cwd=tmproot, blockedtag='extdiff') for copy_fn, working_fn, st in fnsandstat: @@ -366,7 +366,7 @@ # We can't pass non-ASCII through docstrings (and path is # in an unknown encoding anyway) docpath = util.escapestr(path) - self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)} + self.__doc__ %= {r'path': pycompat.sysstr(util.uirepr(docpath))} self._cmdline = cmdline def __call__(self, ui, repo, *pats, **opts):
--- a/hgext/fetch.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/fetch.py Sun Mar 04 10:42:51 2018 -0500 @@ -23,6 +23,7 @@ registrar, util, ) +from mercurial.utils import dateutil release = lock.release cmdtable = {} @@ -64,7 +65,7 @@ opts = pycompat.byteskwargs(opts) date = opts.get('date') if date: - opts['date'] = util.parsedate(date) + opts['date'] = dateutil.parsedate(date) parent, _p2 = repo.dirstate.parents() branch = repo.dirstate.branch()
--- a/hgext/githelp.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/githelp.py Sun Mar 04 10:42:51 2018 -0500 @@ -22,6 +22,7 @@ from mercurial.i18n import _ from mercurial import ( + encoding, error, fancyopts, registrar, @@ -109,7 +110,7 @@ self.args = [] self.opts = {} - def __str__(self): + def __bytes__(self): cmd = "hg " + self.name if self.opts: for k, values in sorted(self.opts.iteritems()): @@ -123,6 +124,8 @@ cmd += " ".join(self.args) return cmd + __str__ = encoding.strmethod(__bytes__) + def append(self, value): self.args.append(value) @@ -167,14 +170,14 @@ ui.status(_("note: use hg addremove to remove files that have " "been deleted.\n\n")) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def am(ui, repo, *args, **kwargs): cmdoptions=[ ] args, opts = parseoptions(ui, cmdoptions, args) cmd = Command('import') - ui.status(str(cmd), "\n") + ui.status(bytes(cmd), "\n") def apply(ui, repo, *args, **kwargs): cmdoptions = [ @@ -187,7 +190,7 @@ cmd['-p'] = opts.get('p') cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def bisect(ui, repo, *args, **kwargs): ui.status(_("See 'hg help bisect' for how to use bisect.\n\n")) @@ -198,7 +201,7 @@ args, opts = parseoptions(ui, cmdoptions, args) cmd = Command('annotate -udl') cmd.extend([convert(v) for v in args]) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def branch(ui, repo, *args, **kwargs): cmdoptions = [ @@ -239,7 +242,7 @@ cmd.append(args[0]) elif len(args) == 1: cmd.append(args[0]) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def ispath(repo, string): """ @@ -330,7 +333,7 @@ else: raise error.Abort("a commit must be specified") - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def cherrypick(ui, repo, *args, **kwargs): cmdoptions = [ @@ -352,7 +355,7 @@ else: cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def clean(ui, repo, *args, **kwargs): cmdoptions = [ @@ -367,7 +370,7 @@ cmd['--all'] = None cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def clone(ui, repo, *args, **kwargs): cmdoptions = [ @@ -397,7 +400,7 @@ cocmd.append(opts.get('branch')) cmd = cmd & cocmd - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def commit(ui, repo, *args, **kwargs): cmdoptions = [ @@ -445,7 +448,7 @@ cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def deprecated(ui, repo, *args, **kwargs): ui.warn(_('This command has been deprecated in the git project, ' + @@ -476,7 +479,7 @@ except Exception: cmd.append(a) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def difftool(ui, repo, *args, **kwargs): ui.status(_('Mercurial does not enable external difftool by default. You ' @@ -509,7 +512,7 @@ else: cmd['-r'] = v - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def grep(ui, repo, *args, **kwargs): cmdoptions = [ @@ -522,7 +525,7 @@ # pattern first, followed by paths. cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def init(ui, repo, *args, **kwargs): cmdoptions = [ @@ -534,7 +537,7 @@ if len(args) > 0: cmd.append(args[0]) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def log(ui, repo, *args, **kwargs): cmdoptions = [ @@ -588,7 +591,7 @@ del args[0] cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def lsfiles(ui, repo, *args, **kwargs): cmdoptions = [ @@ -624,7 +627,7 @@ for include in args: cmd['-I'] = util.shellquote(include) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def merge(ui, repo, *args, **kwargs): cmdoptions = [ @@ -636,7 +639,7 @@ if len(args) > 0: cmd.append(args[len(args) - 1]) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def mergebase(ui, repo, *args, **kwargs): cmdoptions = [] @@ -650,7 +653,7 @@ ui.status(_('NOTE: ancestors() is part of the revset language.\n'), _("Learn more about revsets with 'hg help revsets'\n\n")) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def mergetool(ui, repo, *args, **kwargs): cmdoptions = [] @@ -661,7 +664,7 @@ if len(args) == 0: cmd['--all'] = None cmd.extend(args) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def mv(ui, repo, *args, **kwargs): cmdoptions = [ @@ -675,7 +678,7 @@ if opts.get('force'): cmd['-f'] = None - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def pull(ui, repo, *args, **kwargs): cmdoptions = [ @@ -701,7 +704,7 @@ else: cmd['-r'] = v - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def push(ui, repo, *args, **kwargs): cmdoptions = [ @@ -728,7 +731,7 @@ if opts.get('force'): cmd['-f'] = None - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def rebase(ui, repo, *args, **kwargs): cmdoptions = [ @@ -748,12 +751,12 @@ if len(args) > 0: ui.status(_("also note: 'hg histedit' will automatically detect" " your stack, so no second argument is necessary.\n\n")) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") return if opts.get('skip'): cmd = Command('revert --all -r .') - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") cmd = Command('rebase') @@ -777,7 +780,7 @@ cmd['-d'] = convert(args[0]) cmd['-b'] = convert(args[1]) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def reflog(ui, repo, *args, **kwargs): cmdoptions = [ @@ -791,7 +794,7 @@ if len(args) > 0: cmd.append(args[0]) - ui.status(str(cmd), "\n\n") + ui.status(bytes(cmd), "\n\n") ui.status(_("note: in hg commits can be deleted from repo but we always" " have backups.\n")) @@ -819,7 +822,7 @@ cmd.append(commit) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def revert(ui, repo, *args, **kwargs): cmdoptions = [ @@ -834,7 +837,7 @@ if args: cmd.append(args[0]) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def revparse(ui, repo, *args, **kwargs): cmdoptions = [ @@ -847,7 +850,7 @@ cmd = Command('root') if opts.get('show_cdup'): ui.status(_("note: hg root prints the root of the repository\n\n")) - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") else: ui.status(_("note: see hg help revset for how to refer to commits\n")) @@ -866,7 +869,7 @@ if opts.get('dry_run'): cmd['-n'] = None - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def show(ui, repo, *args, **kwargs): cmdoptions = [ @@ -898,7 +901,7 @@ else: cmd = Command('export') - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def stash(ui, repo, *args, **kwargs): cmdoptions = [ @@ -934,7 +937,7 @@ elif len(args) > 1: cmd['--name'] = args[1] - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def status(ui, repo, *args, **kwargs): cmdoptions = [ @@ -948,7 +951,7 @@ if opts.get('ignored'): cmd['-i'] = None - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def svn(ui, repo, *args, **kwargs): svncmd = args[0] @@ -965,7 +968,7 @@ cmd = Command('push') - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def svnfetch(ui, repo, *args, **kwargs): cmdoptions = [ @@ -975,7 +978,7 @@ cmd = Command('pull') cmd.append('default-push') - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def svnfindrev(ui, repo, *args, **kwargs): cmdoptions = [ @@ -985,7 +988,7 @@ cmd = Command('log') cmd['-r'] = args[0] - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def svnrebase(ui, repo, *args, **kwargs): cmdoptions = [ @@ -1000,7 +1003,7 @@ cmd = pullcmd & rebasecmd - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") def tag(ui, repo, *args, **kwargs): cmdoptions = [ @@ -1024,7 +1027,7 @@ if opts.get('force'): cmd['-f'] = None - ui.status((str(cmd)), "\n") + ui.status((bytes(cmd)), "\n") gitcommands = { 'add': add,
--- a/hgext/gpg.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/gpg.py Sun Mar 04 10:42:51 2018 -0500 @@ -21,6 +21,7 @@ registrar, util, ) +from mercurial.utils import dateutil cmdtable = {} command = registrar.command(cmdtable) @@ -153,8 +154,7 @@ # warn for expired key and/or sigs for key in keys: if key[0] == "ERRSIG": - ui.write(_("%s Unknown key ID \"%s\"\n") - % (prefix, shortkey(ui, key[1][:15]))) + ui.write(_("%s Unknown key ID \"%s\"\n") % (prefix, key[1])) continue if key[0] == "BADSIG": ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2])) @@ -259,7 +259,7 @@ date = opts.get('date') if date: - opts['date'] = util.parsedate(date) + opts['date'] = dateutil.parsedate(date) if revs: nodes = [repo.lookup(n) for n in revs] @@ -318,14 +318,7 @@ repo.commit(message, opts['user'], opts['date'], match=msigs, editor=editor) except ValueError as inst: - raise error.Abort(str(inst)) - -def shortkey(ui, key): - if len(key) != 16: - ui.debug("key ID \"%s\" format error\n" % key) - return key - - return key[-8:] + raise error.Abort(pycompat.bytestr(inst)) def node2txt(repo, node, ver): """map a manifest into some text"""
--- a/hgext/hgk.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/hgk.py Sun Mar 04 10:42:51 2018 -0500 @@ -146,7 +146,7 @@ date = ctx.date() description = ctx.description().replace("\0", "") - ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1]))) + ui.write(("author %s %d %d\n" % (ctx.user(), int(date[0]), date[1]))) if 'committer' in ctx.extra(): ui.write(("committer %s\n" % ctx.extra()['committer']))
--- a/hgext/histedit.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/histedit.py Sun Mar 04 10:42:51 2018 -0500 @@ -344,7 +344,7 @@ fp.write('v1\n') fp.write('%s\n' % node.hex(self.parentctxnode)) fp.write('%s\n' % node.hex(self.topmost)) - fp.write('%s\n' % self.keep) + fp.write('%s\n' % ('True' if self.keep else 'False')) fp.write('%d\n' % len(self.actions)) for action in self.actions: fp.write('%s\n' % action.tostate()) @@ -491,7 +491,7 @@ repo.dirstate.setbranch(rulectx.branch()) if stats and stats[3] > 0: buf = repo.ui.popbuffer() - repo.ui.write(*buf) + repo.ui.write(buf) raise error.InterventionRequired( _('Fix up the change (%s %s)') % (self.verb, node.short(self.node)), @@ -567,7 +567,7 @@ repo.ui.setconfig('ui', 'forcemerge', '', 'histedit') return stats -def collapse(repo, first, last, commitopts, skipprompt=False): +def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False): """collapse the set of revisions from first to last as new one. Expected commit options are: @@ -577,14 +577,14 @@ Commit message is edited in all cases. This function works in memory.""" - ctxs = list(repo.set('%d::%d', first, last)) + ctxs = list(repo.set('%d::%d', firstctx.rev(), lastctx.rev())) if not ctxs: return None for c in ctxs: if not c.mutable(): raise error.ParseError( _("cannot fold into public change %s") % node.short(c.node())) - base = first.parents()[0] + base = firstctx.parents()[0] # commit a new version of the old changeset, including the update # collect all files which might be affected @@ -593,15 +593,15 @@ files.update(ctx.files()) # Recompute copies (avoid recording a -> b -> a) - copied = copies.pathcopies(base, last) + copied = copies.pathcopies(base, lastctx) # prune files which were reverted by the updates - files = [f for f in files if not cmdutil.samefile(f, last, base)] + files = [f for f in files if not cmdutil.samefile(f, lastctx, base)] # commit version of these files as defined by head - headmf = last.manifest() + headmf = lastctx.manifest() def filectxfn(repo, ctx, path): if path in headmf: - fctx = last[path] + fctx = lastctx[path] flags = fctx.flags() mctx = context.memfilectx(repo, ctx, fctx.path(), fctx.data(), @@ -614,12 +614,12 @@ if commitopts.get('message'): message = commitopts['message'] else: - message = first.description() + message = firstctx.description() user = commitopts.get('user') date = commitopts.get('date') extra = commitopts.get('extra') - parents = (first.p1().node(), first.p2().node()) + parents = (firstctx.p1().node(), firstctx.p2().node()) editor = None if not skipprompt: editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold') @@ -730,8 +730,9 @@ return ctx, [(self.node, (parentctxnode,))] parentctx = repo[parentctxnode] - newcommits = set(c.node() for c in repo.set('(%d::. - %d)', parentctx, - parentctx)) + newcommits = set(c.node() for c in repo.set('(%d::. - %d)', + parentctx.rev(), + parentctx.rev())) if not newcommits: repo.ui.warn(_('%s: cannot fold - working copy is not a ' 'descendant of previous commit %s\n') % @@ -1353,19 +1354,19 @@ """select and validate the set of revision to edit When keep is false, the specified set can't have children.""" - ctxs = list(repo.set('%n::%n', old, new)) - if ctxs and not keep: + revs = repo.revs('%n::%n', old, new) + if revs and not keep: if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and - repo.revs('(%ld::) - (%ld)', ctxs, ctxs)): + repo.revs('(%ld::) - (%ld)', revs, revs)): raise error.Abort(_('can only histedit a changeset together ' 'with all its descendants')) - if repo.revs('(%ld) and merge()', ctxs): + if repo.revs('(%ld) and merge()', revs): raise error.Abort(_('cannot edit history that contains merges')) - root = ctxs[0] # list is already sorted by repo.set + root = repo[revs.first()] # list is already sorted by repo.revs() if not root.mutable(): raise error.Abort(_('cannot edit public changeset: %s') % root, hint=_("see 'hg help phases' for details")) - return [c.node() for c in ctxs] + return pycompat.maplist(repo.changelog.node, revs) def ruleeditor(repo, ui, actions, editcomment=""): """open an editor to edit rules @@ -1415,9 +1416,8 @@ # Save edit rules in .hg/histedit-last-edit.txt in case # the user needs to ask for help after something # surprising happens. - f = open(repo.vfs.join('histedit-last-edit.txt'), 'w') - f.write(rules) - f.close() + with repo.vfs('histedit-last-edit.txt', 'wb') as f: + f.write(rules) return rules
--- a/hgext/journal.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/journal.py Sun Mar 04 10:42:51 2018 -0500 @@ -24,18 +24,19 @@ bookmarks, cmdutil, dispatch, + encoding, error, extensions, hg, localrepo, lock, + logcmdutil, node, pycompat, registrar, util, ) - -from . import share +from mercurial.utils import dateutil cmdtable = {} command = registrar.command(cmdtable) @@ -168,7 +169,7 @@ """Copy shared journal entries into this repo when unsharing""" if (repo.path == repopath and repo.shared() and util.safehasattr(repo, 'journal')): - sharedrepo = share._getsrcrepo(repo) + sharedrepo = hg.sharedreposource(repo) sharedfeatures = _readsharedfeatures(repo) if sharedrepo and sharedfeatures > {'journal'}: # there is a shared repository and there are shared journal entries @@ -219,8 +220,8 @@ (timestamp, tz), user, command, namespace, name, oldhashes, newhashes) - def __str__(self): - """String representation for storage""" + def __bytes__(self): + """bytes representation for storage""" time = ' '.join(map(str, self.timestamp)) oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes]) newhashes = ','.join([node.hex(hash) for hash in self.newhashes]) @@ -228,6 +229,8 @@ time, self.user, self.command, self.namespace, self.name, oldhashes, newhashes)) + __str__ = encoding.strmethod(__bytes__) + class journalstorage(object): """Storage for journal entries @@ -257,7 +260,7 @@ self.sharedfeatures = self.sharedvfs = None if repo.shared(): features = _readsharedfeatures(repo) - sharedrepo = share._getsrcrepo(repo) + sharedrepo = hg.sharedreposource(repo) if sharedrepo is not None and 'journal' in features: self.sharedvfs = sharedrepo.vfs self.sharedfeatures = features @@ -327,7 +330,7 @@ newhashes = [newhashes] entry = journalentry( - util.makedate(), self.user, self.command, namespace, name, + dateutil.makedate(), self.user, self.command, namespace, name, oldhashes, newhashes) vfs = self.vfs @@ -348,7 +351,7 @@ # Read just enough bytes to get a version number (up to 2 # digits plus separator) version = f.read(3).partition('\0')[0] - if version and version != str(storageversion): + if version and version != "%d" % storageversion: # different version of the storage. Exit early (and not # write anything) if this is not a version we can handle or # the file is corrupt. In future, perhaps rotate the file @@ -358,9 +361,9 @@ return if not version: # empty file, write version first - f.write(str(storageversion) + '\0') + f.write(("%d" % storageversion) + '\0') f.seek(0, os.SEEK_END) - f.write(str(entry) + '\0') + f.write(bytes(entry) + '\0') def filtered(self, namespace=None, name=None): """Yield all journal entries with the given namespace or name @@ -410,7 +413,7 @@ lines = raw.split('\0') version = lines and lines[0] - if version != str(storageversion): + if version != "%d" % storageversion: version = version or _('not available') raise error.Abort(_("unknown journal file version '%s'") % version) @@ -478,7 +481,7 @@ displayname = "'%s'" % name ui.status(_("previous locations of %s:\n") % displayname) - limit = cmdutil.loglimit(opts) + limit = logcmdutil.getlimit(opts) entry = None ui.pager('journal') for count, entry in enumerate(repo.journal.filtered(name=name)): @@ -502,13 +505,13 @@ fm.write('command', ' %s\n', entry.command) if opts.get("commits"): - displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) for hash in entry.newhashes: try: ctx = repo[hash] displayer.show(ctx) except error.RepoLookupError as e: - fm.write('repolookuperror', "%s\n\n", str(e)) + fm.write('repolookuperror', "%s\n\n", pycompat.bytestr(e)) displayer.close() fm.end()
--- a/hgext/keyword.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/keyword.py Sun Mar 04 10:42:51 2018 -0500 @@ -101,6 +101,7 @@ extensions, filelog, localrepo, + logcmdutil, match, patch, pathutil, @@ -110,6 +111,7 @@ templatefilters, util, ) +from mercurial.utils import dateutil cmdtable = {} command = registrar.command(cmdtable) @@ -155,21 +157,23 @@ def utcdate(text): '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13". ''' - return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S') + dateformat = '%Y/%m/%d %H:%M:%S' + return dateutil.datestr((dateutil.parsedate(text)[0], 0), dateformat) # date like in svn's $Date @templatefilter('svnisodate') def svnisodate(text): '''Date. Returns a date in this format: "2009-08-18 13:00:13 +0200 (Tue, 18 Aug 2009)". ''' - return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') + return dateutil.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)') # date like in svn's $Id @templatefilter('svnutcdate') def svnutcdate(text): '''Date. Returns a UTC-date in this format: "2009-08-18 11:00:13Z". ''' - return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ') + dateformat = '%Y-%m-%d %H:%M:%SZ' + return dateutil.datestr((dateutil.parsedate(text)[0], 0), dateformat) # make keyword tools accessible kwtools = {'hgcmd': ''} @@ -254,7 +258,7 @@ '''Replaces keywords in data with expanded template.''' def kwsub(mobj): kw = mobj.group(1) - ct = cmdutil.makelogtemplater(self.ui, self.repo, + ct = logcmdutil.maketemplater(self.ui, self.repo, self.templates[kw]) self.ui.pushbuffer() ct.show(ctx, root=self.repo.root, file=path)
--- a/hgext/largefiles/lfcommands.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/lfcommands.py Sun Mar 04 10:42:51 2018 -0500 @@ -365,7 +365,7 @@ at = 0 ui.debug("sending statlfile command for %d largefiles\n" % len(files)) retval = store.exists(files) - files = filter(lambda h: not retval[h], files) + files = [h for h in files if not retval[h]] ui.debug("%d largefiles need to be uploaded\n" % len(files)) for hash in files:
--- a/hgext/largefiles/lfutil.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/lfutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -15,6 +15,7 @@ import stat from mercurial.i18n import _ +from mercurial.node import hex from mercurial import ( dirstate, @@ -371,7 +372,7 @@ for data in instream: hasher.update(data) outfile.write(data) - return hasher.hexdigest() + return hex(hasher.digest()) def hashfile(file): if not os.path.exists(file): @@ -404,7 +405,7 @@ h = hashlib.sha1() for chunk in util.filechunkiter(fileobj): h.update(chunk) - return h.hexdigest() + return hex(h.digest()) def httpsendfile(ui, filename): return httpconnection.httpsendfile(ui, filename, 'rb')
--- a/hgext/largefiles/overrides.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/overrides.py Sun Mar 04 10:42:51 2018 -0500 @@ -19,6 +19,7 @@ cmdutil, error, hg, + logcmdutil, match as matchmod, pathutil, pycompat, @@ -41,7 +42,7 @@ matcher''' m = copy.copy(match) lfile = lambda f: lfutil.standin(f) in manifest - m._files = filter(lfile, m._files) + m._files = [lf for lf in m._files if lfile(lf)] m._fileset = set(m._files) m.always = lambda: False origmatchfn = m.matchfn @@ -56,7 +57,7 @@ m = copy.copy(match) notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded) - m._files = filter(notlfile, m._files) + m._files = [lf for lf in m._files if notlfile(lf)] m._fileset = set(m._files) m.always = lambda: False origmatchfn = m.matchfn @@ -388,20 +389,20 @@ # (2) to determine what files to print out diffs for. # The magic matchandpats override should be used for case (1) but not for # case (2). - def overridemakelogfilematcher(repo, pats, opts, badfn=None): + def overridemakefilematcher(repo, pats, opts, badfn=None): wctx = repo[None] match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn) - return lambda rev: match + return lambda ctx: match oldmatchandpats = installmatchandpatsfn(overridematchandpats) - oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher - setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher) + oldmakefilematcher = logcmdutil._makenofollowfilematcher + setattr(logcmdutil, '_makenofollowfilematcher', overridemakefilematcher) try: return orig(ui, repo, *pats, **opts) finally: restorematchandpatsfn() - setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher) + setattr(logcmdutil, '_makenofollowfilematcher', oldmakefilematcher) def overrideverify(orig, ui, repo, *pats, **opts): large = opts.pop(r'large', False) @@ -597,7 +598,7 @@ try: result = orig(ui, repo, pats, opts, rename) except error.Abort as e: - if str(e) != _('no files to copy'): + if pycompat.bytestr(e) != _('no files to copy'): raise e else: nonormalfiles = True @@ -704,7 +705,7 @@ lfdirstate.add(destlfile) lfdirstate.write() except error.Abort as e: - if str(e) != _('no files to copy'): + if pycompat.bytestr(e) != _('no files to copy'): raise e else: nolfiles = True @@ -823,7 +824,7 @@ """Override push command and store --lfrev parameters in opargs""" lfrevs = kwargs.pop(r'lfrev', None) if lfrevs: - opargs = kwargs.setdefault('opargs', {}) + opargs = kwargs.setdefault(r'opargs', {}) opargs['lfrevs'] = scmutil.revrange(repo, lfrevs) return orig(ui, repo, *args, **kwargs) @@ -1237,10 +1238,11 @@ matchfn = m.matchfn m.matchfn = lambda f: f in s.deleted and matchfn(f) - removelargefiles(repo.ui, repo, True, m, **opts) + removelargefiles(repo.ui, repo, True, m, **pycompat.strkwargs(opts)) # Call into the normal add code, and any files that *should* be added as # largefiles will be - added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts) + added, bad = addlargefiles(repo.ui, repo, True, matcher, + **pycompat.strkwargs(opts)) # Now that we've handled largefiles, hand off to the original addremove # function to take care of the rest. Make sure it doesn't do anything with # largefiles by passing a matcher that will ignore them. @@ -1358,8 +1360,7 @@ m.visitdir = lfvisitdirfn for f in ctx.walk(m): - with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(), - pathname=f) as fp: + with cmdutil.makefileobj(ctx, opts.get('output'), pathname=f) as fp: lf = lfutil.splitstandin(f) if lf is None or origmatchfn(f): # duplicating unreachable code from commands.cat
--- a/hgext/largefiles/proto.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/proto.py Sun Mar 04 10:42:51 2018 -0500 @@ -14,6 +14,7 @@ httppeer, util, wireproto, + wireprototypes, ) from . import ( @@ -34,27 +35,26 @@ def putlfile(repo, proto, sha): '''Server command for putting a largefile into a repository's local store and into the user cache.''' - proto.redirect() - - path = lfutil.storepath(repo, sha) - util.makedirs(os.path.dirname(path)) - tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) + with proto.mayberedirectstdio() as output: + path = lfutil.storepath(repo, sha) + util.makedirs(os.path.dirname(path)) + tmpfp = util.atomictempfile(path, createmode=repo.store.createmode) - try: - proto.getfile(tmpfp) - tmpfp._fp.seek(0) - if sha != lfutil.hexsha1(tmpfp._fp): - raise IOError(0, _('largefile contents do not match hash')) - tmpfp.close() - lfutil.linktousercache(repo, sha) - except IOError as e: - repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') % - (sha, e.strerror)) - return wireproto.pushres(1) - finally: - tmpfp.discard() + try: + proto.forwardpayload(tmpfp) + tmpfp._fp.seek(0) + if sha != lfutil.hexsha1(tmpfp._fp): + raise IOError(0, _('largefile contents do not match hash')) + tmpfp.close() + lfutil.linktousercache(repo, sha) + except IOError as e: + repo.ui.warn(_('largefiles: failed to put %s into store: %s\n') % + (sha, e.strerror)) + return wireproto.pushres(1, output.getvalue() if output else '') + finally: + tmpfp.discard() - return wireproto.pushres(0) + return wireproto.pushres(0, output.getvalue() if output else '') def getlfile(repo, proto, sha): '''Server command for retrieving a largefile from the repository-local @@ -86,8 +86,8 @@ server side.''' filename = lfutil.findfile(repo, sha) if not filename: - return '2\n' - return '0\n' + return wireprototypes.bytesresponse('2\n') + return wireprototypes.bytesresponse('0\n') def wirereposetup(ui, repo): class lfileswirerepository(repo.__class__): @@ -97,7 +97,7 @@ # it ... if issubclass(self.__class__, httppeer.httppeer): res = self._call('putlfile', data=fd, sha=sha, - headers={'content-type':'application/mercurial-0.1'}) + headers={r'content-type': r'application/mercurial-0.1'}) try: d, output = res.split('\n', 1) for l in output.splitlines(True): @@ -180,7 +180,7 @@ args[r'cmds'] = args[r'cmds'].replace('heads ', 'lheads ') return ssholdcallstream(self, cmd, **args) -headsre = re.compile(r'(^|;)heads\b') +headsre = re.compile(br'(^|;)heads\b') def httprepocallstream(self, cmd, **args): if cmd == 'heads' and self.capable('largefiles'):
--- a/hgext/largefiles/remotestore.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/remotestore.py Sun Mar 04 10:42:51 2018 -0500 @@ -52,7 +52,7 @@ except IOError as e: raise error.Abort( _('remotestore: could not open file %s: %s') - % (filename, str(e))) + % (filename, util.forcebytestr(e))) def _getfile(self, tmpfile, filename, hash): try: @@ -60,7 +60,8 @@ except urlerr.httperror as e: # 401s get converted to error.Aborts; everything else is fine being # turned into a StoreError - raise basestore.StoreError(filename, hash, self.url, str(e)) + raise basestore.StoreError(filename, hash, self.url, + util.forcebytestr(e)) except urlerr.urlerror as e: # This usually indicates a connection problem, so don't # keep trying with the other files... they will probably @@ -68,7 +69,8 @@ raise error.Abort('%s: %s' % (util.hidepassword(self.url), e.reason)) except IOError as e: - raise basestore.StoreError(filename, hash, self.url, str(e)) + raise basestore.StoreError(filename, hash, self.url, + util.forcebytestr(e)) return lfutil.copyandhash(chunks, tmpfile)
--- a/hgext/largefiles/storefactory.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/storefactory.py Sun Mar 04 10:42:51 2018 -0500 @@ -80,7 +80,7 @@ 'ssh': [wirestore.wirestore], } -_scheme_re = re.compile(r'^([a-zA-Z0-9+-.]+)://') +_scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://') def getlfile(ui, hash): return util.chunkbuffer(openstore(ui=ui)._get(hash))
--- a/hgext/largefiles/uisetup.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/largefiles/uisetup.py Sun Mar 04 10:42:51 2018 -0500 @@ -165,13 +165,13 @@ overrides.openlargefile) # create the new wireproto commands ... - wireproto.commands['putlfile'] = (proto.putlfile, 'sha') - wireproto.commands['getlfile'] = (proto.getlfile, 'sha') - wireproto.commands['statlfile'] = (proto.statlfile, 'sha') + wireproto.wireprotocommand('putlfile', 'sha')(proto.putlfile) + wireproto.wireprotocommand('getlfile', 'sha')(proto.getlfile) + wireproto.wireprotocommand('statlfile', 'sha')(proto.statlfile) + wireproto.wireprotocommand('lheads', '')(wireproto.heads) # ... and wrap some existing ones - wireproto.commands['heads'] = (proto.heads, '') - wireproto.commands['lheads'] = (wireproto.heads, '') + wireproto.commands['heads'].func = proto.heads # make putlfile behave the same as push and {get,stat}lfile behave # the same as pull w.r.t. permissions checks @@ -185,9 +185,9 @@ # can't do this in reposetup because it needs to have happened before # wirerepo.__init__ is called - proto.ssholdcallstream = sshpeer.sshpeer._callstream + proto.ssholdcallstream = sshpeer.sshv1peer._callstream proto.httpoldcallstream = httppeer.httppeer._callstream - sshpeer.sshpeer._callstream = proto.sshrepocallstream + sshpeer.sshv1peer._callstream = proto.sshrepocallstream httppeer.httppeer._callstream = proto.httprepocallstream # override some extensions' stuff as well
--- a/hgext/lfs/__init__.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/lfs/__init__.py Sun Mar 04 10:42:51 2018 -0500 @@ -192,6 +192,7 @@ command = registrar.command(cmdtable) templatekeyword = registrar.templatekeyword() +filesetpredicate = registrar.filesetpredicate() def featuresetup(ui, supported): # don't die on seeing a repo with the lfs requirement @@ -211,7 +212,7 @@ class lfsrepo(repo.__class__): @localrepo.unfilteredmethod def commitctx(self, ctx, error=False): - repo.svfs.options['lfstrack'] = _trackedmatcher(self, ctx) + repo.svfs.options['lfstrack'] = _trackedmatcher(self) return super(lfsrepo, self).commitctx(ctx, error) repo.__class__ = lfsrepo @@ -219,12 +220,12 @@ if 'lfs' not in repo.requirements: def checkrequireslfs(ui, repo, **kwargs): if 'lfs' not in repo.requirements: - last = kwargs.get('node_last') + last = kwargs.get(r'node_last') _bin = node.bin if last: - s = repo.set('%n:%n', _bin(kwargs['node']), _bin(last)) + s = repo.set('%n:%n', _bin(kwargs[r'node']), _bin(last)) else: - s = repo.set('%n', _bin(kwargs['node'])) + s = repo.set('%n', _bin(kwargs[r'node'])) for ctx in s: # TODO: is there a way to just walk the files in the commit? if any(ctx[f].islfs() for f in ctx.files() if f in ctx): @@ -238,7 +239,7 @@ else: repo.prepushoutgoinghooks.add('lfs', wrapper.prepush) -def _trackedmatcher(repo, ctx): +def _trackedmatcher(repo): """Return a function (path, size) -> bool indicating whether or not to track a given file with lfs.""" if not repo.wvfs.exists('.hglfs'): @@ -331,6 +332,8 @@ wrapfunction(hg, 'clone', wrapper.hgclone) wrapfunction(hg, 'postshare', wrapper.hgpostshare) + scmutil.fileprefetchhooks.add('lfs', wrapper._prefetchfiles) + # Make bundle choose changegroup3 instead of changegroup2. This affects # "hg bundle" command. Note: it does not cover all bundle formats like # "packed1". Using "packed1" with lfs will likely cause trouble. @@ -345,12 +348,22 @@ # when writing a bundle via "hg bundle" command, upload related LFS blobs wrapfunction(bundle2, 'writenewbundle', wrapper.writenewbundle) -@templatekeyword('lfs_files') -def lfsfiles(repo, ctx, **args): - """List of strings. LFS files added or modified by the changeset.""" - args = pycompat.byteskwargs(args) +@filesetpredicate('lfs()', callstatus=True) +def lfsfileset(mctx, x): + """File that uses LFS storage.""" + # i18n: "lfs" is a keyword + fileset.getargs(x, 0, 0, _("lfs takes no arguments")) + return [f for f in mctx.subset + if wrapper.pointerfromctx(mctx.ctx, f, removed=True) is not None] - pointers = wrapper.pointersfromctx(ctx) # {path: pointer} +@templatekeyword('lfs_files', requires={'ctx', 'templ'}) +def lfsfiles(context, mapping): + """List of strings. All files modified, added, or removed by this + changeset.""" + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') + + pointers = wrapper.pointersfromctx(ctx, removed=True) # {path: pointer} files = sorted(pointers.keys()) def pointer(v): @@ -361,18 +374,18 @@ makemap = lambda v: { 'file': v, - 'lfsoid': pointers[v].oid(), + 'lfsoid': pointers[v].oid() if pointers[v] else None, 'lfspointer': templatekw.hybriddict(pointer(v)), } # TODO: make the separator ', '? - f = templatekw._showlist('lfs_file', files, args) + f = templatekw._showlist('lfs_file', files, templ, mapping) return templatekw._hybrid(f, files, makemap, pycompat.identity) @command('debuglfsupload', [('r', 'rev', [], _('upload large files introduced by REV'))]) def debuglfsupload(ui, repo, **opts): """upload lfs blobs added by the working copy parent or given revisions""" - revs = opts.get('rev', []) + revs = opts.get(r'rev', []) pointers = wrapper.extractpointers(repo, scmutil.revrange(repo, revs)) wrapper.uploadblobs(repo, pointers)
--- a/hgext/lfs/blobstore.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/lfs/blobstore.py Sun Mar 04 10:42:51 2018 -0500 @@ -18,6 +18,7 @@ from mercurial import ( error, pathutil, + pycompat, url as urlmod, util, vfs as vfsmod, @@ -27,7 +28,7 @@ from ..largefiles import lfutil # 64 bytes for SHA256 -_lfsre = re.compile(r'\A[a-f0-9]{64}\Z') +_lfsre = re.compile(br'\A[a-f0-9]{64}\Z') class lfsvfs(vfsmod.vfs): def join(self, path): @@ -194,11 +195,11 @@ def writebatch(self, pointers, fromstore): """Batch upload from local to remote blobstore.""" - self._batch(pointers, fromstore, 'upload') + self._batch(_deduplicate(pointers), fromstore, 'upload') def readbatch(self, pointers, tostore): """Batch download from remote to local blostore.""" - self._batch(pointers, tostore, 'download') + self._batch(_deduplicate(pointers), tostore, 'download') def _batchrequest(self, pointers, action): """Get metadata about objects pointed by pointers for given action @@ -281,9 +282,9 @@ See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\ basic-transfers.md """ - oid = str(obj['oid']) + oid = pycompat.bytestr(obj['oid']) - href = str(obj['actions'][action].get('href')) + href = pycompat.bytestr(obj['actions'][action].get('href')) headers = obj['actions'][action].get('header', {}).items() request = util.urlreq.request(href) @@ -366,12 +367,23 @@ oids = transfer(sorted(objects, key=lambda o: o.get('oid'))) processed = 0 + blobs = 0 for _one, oid in oids: processed += sizes[oid] + blobs += 1 self.ui.progress(topic, processed, total=total) self.ui.note(_('lfs: processed: %s\n') % oid) self.ui.progress(topic, pos=None, total=total) + if blobs > 0: + if action == 'upload': + self.ui.status(_('lfs: uploaded %d files (%s)\n') + % (blobs, util.bytecount(processed))) + # TODO: coalesce the download requests, and comment this in + #elif action == 'download': + # self.ui.status(_('lfs: downloaded %d files (%s)\n') + # % (blobs, util.bytecount(processed))) + def __del__(self): # copied from mercurial/httppeer.py urlopener = getattr(self, 'urlopener', None) @@ -388,13 +400,13 @@ self.vfs = lfsvfs(fullpath) def writebatch(self, pointers, fromstore): - for p in pointers: + for p in _deduplicate(pointers): content = fromstore.read(p.oid(), verify=True) with self.vfs(p.oid(), 'wb', atomictemp=True) as fp: fp.write(content) def readbatch(self, pointers, tostore): - for p in pointers: + for p in _deduplicate(pointers): with self.vfs(p.oid(), 'rb') as fp: tostore.download(p.oid(), fp) @@ -433,6 +445,13 @@ None: _promptremote, } +def _deduplicate(pointers): + """Remove any duplicate oids that exist in the list""" + reduced = util.sortdict() + for p in pointers: + reduced[p.oid()] = p + return reduced.values() + def _verify(oid, content): realoid = hashlib.sha256(content).hexdigest() if realoid != oid:
--- a/hgext/lfs/pointer.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/lfs/pointer.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,6 +13,7 @@ from mercurial import ( error, + pycompat, ) class InvalidPointer(error.RevlogError): @@ -23,7 +24,8 @@ def __init__(self, *args, **kwargs): self['version'] = self.VERSION - super(gitlfspointer, self).__init__(*args, **kwargs) + super(gitlfspointer, self).__init__(*args) + self.update(pycompat.byteskwargs(kwargs)) @classmethod def deserialize(cls, text): @@ -45,12 +47,12 @@ # regular expressions used by _validate # see https://github.com/git-lfs/git-lfs/blob/master/docs/spec.md - _keyre = re.compile(r'\A[a-z0-9.-]+\Z') - _valuere = re.compile(r'\A[^\n]*\Z') + _keyre = re.compile(br'\A[a-z0-9.-]+\Z') + _valuere = re.compile(br'\A[^\n]*\Z') _requiredre = { - 'size': re.compile(r'\A[0-9]+\Z'), - 'oid': re.compile(r'\Asha256:[0-9a-f]{64}\Z'), - 'version': re.compile(r'\A%s\Z' % re.escape(VERSION)), + 'size': re.compile(br'\A[0-9]+\Z'), + 'oid': re.compile(br'\Asha256:[0-9a-f]{64}\Z'), + 'version': re.compile(br'\A%s\Z' % re.escape(VERSION)), } def validate(self):
--- a/hgext/lfs/wrapper.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/lfs/wrapper.py Sun Mar 04 10:42:51 2018 -0500 @@ -10,7 +10,7 @@ import hashlib from mercurial.i18n import _ -from mercurial.node import bin, nullid, short +from mercurial.node import bin, hex, nullid, short from mercurial import ( error, @@ -85,12 +85,12 @@ text = text[offset:] # git-lfs only supports sha256 - oid = hashlib.sha256(text).hexdigest() + oid = hex(hashlib.sha256(text).digest()) self.opener.lfslocalblobstore.write(oid, text) # replace contents with metadata longoid = 'sha256:%s' % oid - metadata = pointer.gitlfspointer(oid=longoid, size=str(len(text))) + metadata = pointer.gitlfspointer(oid=longoid, size='%d' % len(text)) # by default, we expect the content to be binary. however, LFS could also # be used for non-binary content. add a special entry for non-binary data. @@ -249,6 +249,21 @@ if 'lfs' in destrepo.requirements: destrepo.vfs.append('hgrc', util.tonativeeol('\n[extensions]\nlfs=\n')) +def _prefetchfiles(repo, ctx, files): + """Ensure that required LFS blobs are present, fetching them as a group if + needed.""" + pointers = [] + localstore = repo.svfs.lfslocalblobstore + + for f in files: + p = pointerfromctx(ctx, f) + if p and not localstore.has(p.oid()): + p.filename = f + pointers.append(p) + + if pointers: + repo.svfs.lfsremoteblobstore.readbatch(pointers, localstore) + def _canskipupload(repo): # if remotestore is a null store, upload is a no-op and can be skipped return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote) @@ -307,20 +322,47 @@ pointers[p.oid()] = p return sorted(pointers.values()) -def pointersfromctx(ctx): - """return a dict {path: pointer} for given single changectx""" +def pointerfromctx(ctx, f, removed=False): + """return a pointer for the named file from the given changectx, or None if + the file isn't LFS. + + Optionally, the pointer for a file deleted from the context can be returned. + Since no such pointer is actually stored, and to distinguish from a non LFS + file, this pointer is represented by an empty dict. + """ + _ctx = ctx + if f not in ctx: + if not removed: + return None + if f in ctx.p1(): + _ctx = ctx.p1() + elif f in ctx.p2(): + _ctx = ctx.p2() + else: + return None + fctx = _ctx[f] + if not _islfs(fctx.filelog(), fctx.filenode()): + return None + try: + p = pointer.deserialize(fctx.rawdata()) + if ctx == _ctx: + return p + return {} + except pointer.InvalidPointer as ex: + raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') + % (f, short(_ctx.node()), ex)) + +def pointersfromctx(ctx, removed=False): + """return a dict {path: pointer} for given single changectx. + + If ``removed`` == True and the LFS file was removed from ``ctx``, the value + stored for the path is an empty dict. + """ result = {} for f in ctx.files(): - if f not in ctx: - continue - fctx = ctx[f] - if not _islfs(fctx.filelog(), fctx.filenode()): - continue - try: - result[f] = pointer.deserialize(fctx.rawdata()) - except pointer.InvalidPointer as ex: - raise error.Abort(_('lfs: corrupted pointer (%s@%s): %s\n') - % (f, short(ctx.node()), ex)) + p = pointerfromctx(ctx, f, removed=removed) + if p is not None: + result[f] = p return result def uploadblobs(repo, pointers):
--- a/hgext/mq.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/mq.py Sun Mar 04 10:42:51 2018 -0500 @@ -86,6 +86,7 @@ hg, localrepo, lock as lockmod, + logcmdutil, patch as patchmod, phases, pycompat, @@ -93,10 +94,11 @@ revsetlang, scmutil, smartset, - subrepo, + subrepoutil, util, vfs as vfsmod, ) +from mercurial.utils import dateutil release = lockmod.release seriesopts = [('s', 'summary', None, _('print first line of patch header'))] @@ -148,9 +150,13 @@ class statusentry(object): def __init__(self, node, name): self.node, self.name = node, name - def __repr__(self): + + def __bytes__(self): return hex(self.node) + ':' + self.name + __str__ = encoding.strmethod(__bytes__) + __repr__ = encoding.strmethod(__bytes__) + # The order of the headers in 'hg export' HG patches: HGHEADERS = [ # '# HG changeset patch', @@ -276,7 +282,7 @@ nodeid = None diffstart = 0 - for line in file(pf): + for line in open(pf, 'rb'): line = line.rstrip() if (line.startswith('diff --git') or (diffstart and line.startswith('+++ '))): @@ -391,12 +397,14 @@ self.comments.append('') self.comments.append(message) - def __str__(self): + def __bytes__(self): s = '\n'.join(self.comments).rstrip() if not s: return '' return s + '\n\n' + __str__ = encoding.strmethod(__bytes__) + def _delmsg(self): '''Remove existing message, keeping the rest of the comments fields. If comments contains 'subject: ', message will prepend @@ -438,9 +446,9 @@ def __init__(self, ui, baseui, path, patchdir=None): self.basepath = path try: - fh = open(os.path.join(path, 'patches.queue')) - cur = fh.read().rstrip() - fh.close() + with open(os.path.join(path, 'patches.queue'), r'rb') as fh: + cur = fh.read().rstrip() + if not cur: curpath = os.path.join(path, 'patches') else: @@ -546,10 +554,8 @@ for patchfn in patches: patchf = self.opener(patchfn, 'r') # if the patch was a git patch, refresh it as a git patch - for line in patchf: - if line.startswith('diff --git'): - diffopts.git = True - break + diffopts.git = any(line.startswith('diff --git') + for line in patchf) patchf.close() return diffopts @@ -643,7 +649,7 @@ self.seriesdirty = True def pushable(self, idx): - if isinstance(idx, str): + if isinstance(idx, bytes): idx = self.series.index(idx) patchguards = self.seriesguards[idx] if not patchguards: @@ -691,12 +697,12 @@ def savedirty(self): def writelist(items, path): - fp = self.opener(path, 'w') + fp = self.opener(path, 'wb') for i in items: fp.write("%s\n" % i) fp.close() if self.applieddirty: - writelist(map(str, self.applied), self.statuspath) + writelist(map(bytes, self.applied), self.statuspath) self.applieddirty = False if self.seriesdirty: writelist(self.fullseries, self.seriespath) @@ -717,7 +723,8 @@ try: os.unlink(undo) except OSError as inst: - self.ui.warn(_('error removing undo: %s\n') % str(inst)) + self.ui.warn(_('error removing undo: %s\n') % + util.forcebytestr(inst)) def backup(self, repo, files, copy=False): # backup local changes in --force case @@ -739,8 +746,8 @@ opts = {} stat = opts.get('stat') m = scmutil.match(repo[node1], files, opts) - cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m, - changes, stat, fp) + logcmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m, + changes, stat, fp) def mergeone(self, repo, mergeq, head, patch, rev, diffopts): # first try just applying the patch @@ -773,7 +780,7 @@ diffopts = self.patchopts(diffopts, patch) patchf = self.opener(patch, "w") - comments = str(ph) + comments = bytes(ph) if comments: patchf.write(comments) self.printdiff(repo, diffopts, head, n, fp=patchf) @@ -850,7 +857,7 @@ files=files, eolmode=None) return (True, list(files), fuzz) except Exception as inst: - self.ui.note(str(inst) + '\n') + self.ui.note(util.forcebytestr(inst) + '\n') if not self.ui.verbose: self.ui.warn(_("patch failed, unable to continue (try -v)\n")) self.ui.traceback() @@ -963,8 +970,8 @@ wctx = repo[None] pctx = repo['.'] overwrite = False - mergedsubstate = subrepo.submerge(repo, pctx, wctx, wctx, - overwrite) + mergedsubstate = subrepoutil.submerge(repo, pctx, wctx, wctx, + overwrite) files += mergedsubstate.keys() match = scmutil.matchfiles(repo, files or []) @@ -1178,7 +1185,7 @@ except error.Abort: pass i += 1 - name = '%s__%s' % (namebase, i) + name = '%s__%d' % (namebase, i) return name def checkkeepchanges(self, keepchanges, force): @@ -1189,13 +1196,14 @@ """options: msg: a string or a no-argument function returning a string """ + opts = pycompat.byteskwargs(opts) msg = opts.get('msg') edit = opts.get('edit') editform = opts.get('editform', 'mq.qnew') user = opts.get('user') date = opts.get('date') if date: - date = util.parsedate(date) + date = dateutil.parsedate(date) diffopts = self.diffopts({'git': opts.get('git')}, plain=True) if opts.get('checkname', True): self.checkpatchname(patchfn) @@ -1259,13 +1267,13 @@ if user: ph.setuser(user) if date: - ph.setdate('%s %s' % date) + ph.setdate('%d %d' % date) ph.setparent(hex(nctx.p1().node())) msg = nctx.description().strip() if msg == defaultmsg.strip(): msg = '' ph.setmessage(msg) - p.write(str(ph)) + p.write(bytes(ph)) if commitfiles: parent = self.qparents(repo, n) if inclsubs: @@ -1550,12 +1558,8 @@ update = True else: parents = [p.node() for p in repo[None].parents()] - needupdate = False - for entry in self.applied[start:]: - if entry.node in parents: - needupdate = True - break - update = needupdate + update = any(entry.node in parents + for entry in self.applied[start:]) tobackup = set() if update: @@ -1632,6 +1636,7 @@ self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts) def refresh(self, repo, pats=None, **opts): + opts = pycompat.byteskwargs(opts) if not self.applied: self.ui.write(_("no patches applied\n")) return 1 @@ -1641,7 +1646,7 @@ newuser = opts.get('user') newdate = opts.get('date') if newdate: - newdate = '%d %d' % util.parsedate(newdate) + newdate = '%d %d' % dateutil.parsedate(newdate) wlock = repo.wlock() try: @@ -1846,7 +1851,7 @@ self.putsubstate2changes(substatestate, c) chunks = patchmod.diff(repo, patchparent, changes=c, opts=diffopts) - comments = str(ph) + comments = bytes(ph) if comments: patchf.write(comments) for chunk in chunks: @@ -1927,7 +1932,7 @@ length = len(self.series) - start if not missing: if self.ui.verbose: - idxwidth = len(str(start + length - 1)) + idxwidth = len("%d" % (start + length - 1)) for i in xrange(start, start + length): patch = self.series[i] if patch in applied: @@ -2093,7 +2098,7 @@ if not self.ui.verbose: p = pname else: - p = str(self.series.index(pname)) + " " + pname + p = ("%d" % self.series.index(pname)) + " " + pname return p def qimport(self, repo, files, patchname=None, rev=None, existing=None, @@ -2260,7 +2265,7 @@ To stop managing a patch and move it into permanent history, use the :hg:`qfinish` command.""" q = repo.mq - q.delete(repo, patches, opts) + q.delete(repo, patches, pycompat.byteskwargs(opts)) q.savedirty() return 0 @@ -2593,7 +2598,7 @@ if not opts.get('user') and opts.get('currentuser'): opts['user'] = ui.username() if not opts.get('date') and opts.get('currentdate'): - opts['date'] = "%d %d" % util.makedate() + opts['date'] = "%d %d" % dateutil.makedate() @command("^qnew", [('e', 'edit', None, _('invoke editor on commit messages')), @@ -3189,7 +3194,7 @@ guards[g] += 1 if ui.verbose: guards['NONE'] = noguards - guards = guards.items() + guards = list(guards.items()) guards.sort(key=lambda x: x[0][1:]) if guards: ui.note(_('guards in series file:\n'))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/TODO.rst Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,37 @@ +Integration with the share extension needs improvement. Right now +we've seen some odd bugs, and the way we modify the contents of the +.hg/shared file is unfortunate. See wrappostshare() and unsharenarrowspec(). + +Resolve commentary on narrowrepo.wraprepo.narrowrepository.status +about the filtering of status being done at an awkward layer. This +came up the import to hgext, but nobody's got concrete improvement +ideas as of then. + +Fold most (or preferably all) of narrowrevlog.py into core. + +Address commentary in narrowrevlog.excludedmanifestrevlog.add - +specifically we should improve the collaboration with core so that +add() never gets called on an excluded directory and we can improve +the stand-in to raise a ProgrammingError. + +Figure out how to correctly produce narrowmanifestrevlog and +narrowfilelog instances instead of monkeypatching regular revlogs at +runtime to our subclass. Even better, merge the narrowing logic +directly into core. + +Reason more completely about rename-filtering logic in +narrowfilelog. There could be some surprises lurking there. + +Formally document the narrowspec format. Unify with sparse, if at all +possible. For bonus points, unify with the server-specified narrowspec +format. + +narrowrepo.setnarrowpats() or narrowspec.save() need to make sure +they're holding the wlock. + +Implement a simple version of the expandnarrow wireproto command for +core. Having configurable shorthands for narrowspecs has been useful +at Google (and sparse has a similar feature from Facebook), so it +probably makes sense to implement the feature in core. (Google's +handler is entirely custom to Google, with a custom format related to +bazel's build language, so it's not in the narrowhg distribution.)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/__init__.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,95 @@ +# __init__.py - narrowhg extension +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +'''create clones which fetch history data for subset of files (EXPERIMENTAL)''' + +from __future__ import absolute_import + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +from mercurial import ( + changegroup, + extensions, + hg, + localrepo, + registrar, + verify as verifymod, +) + +from . import ( + narrowbundle2, + narrowchangegroup, + narrowcommands, + narrowcopies, + narrowdirstate, + narrowmerge, + narrowpatch, + narrowrepo, + narrowrevlog, + narrowtemplates, + narrowwirepeer, +) + +configtable = {} +configitem = registrar.configitem(configtable) +# Narrowhg *has* support for serving ellipsis nodes (which are used at +# least by Google's internal server), but that support is pretty +# fragile and has a lot of problems on real-world repositories that +# have complex graph topologies. This could probably be corrected, but +# absent someone needing the full support for ellipsis nodes in +# repositories with merges, it's unlikely this work will get done. As +# of this writining in late 2017, all repositories large enough for +# ellipsis nodes to be a hard requirement also enforce strictly linear +# history for other scaling reasons. +configitem('experimental', 'narrowservebrokenellipses', + default=False, + alias=[('narrow', 'serveellipses')], +) + +# Export the commands table for Mercurial to see. +cmdtable = narrowcommands.table + +localrepo.localrepository._basesupported.add(changegroup.NARROW_REQUIREMENT) + +def uisetup(ui): + """Wraps user-facing mercurial commands with narrow-aware versions.""" + narrowrevlog.setup() + narrowbundle2.setup() + narrowmerge.setup() + narrowcommands.setup() + narrowchangegroup.setup() + narrowwirepeer.uisetup() + +def reposetup(ui, repo): + """Wraps local repositories with narrow repo support.""" + if not isinstance(repo, localrepo.localrepository): + return + + narrowrepo.wraprepo(repo) + if changegroup.NARROW_REQUIREMENT in repo.requirements: + narrowcopies.setup(repo) + narrowdirstate.setup(repo) + narrowpatch.setup(repo) + narrowwirepeer.reposetup(repo) + +def _verifierinit(orig, self, repo, matcher=None): + # The verifier's matcher argument was desgined for narrowhg, so it should + # be None from core. If another extension passes a matcher (unlikely), + # we'll have to fail until matchers can be composed more easily. + assert matcher is None + orig(self, repo, repo.narrowmatch()) + +def extsetup(ui): + extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit) + extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare) + extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec) + +templatekeyword = narrowtemplates.templatekeyword +revsetpredicate = narrowtemplates.revsetpredicate
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowbundle2.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,489 @@ +# narrowbundle2.py - bundle2 extensions for narrow repository support +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import collections +import errno +import struct + +from mercurial.i18n import _ +from mercurial.node import ( + bin, + nullid, + nullrev, +) +from mercurial import ( + bundle2, + changegroup, + dagutil, + error, + exchange, + extensions, + narrowspec, + repair, + util, + wireproto, +) + +NARROWCAP = 'narrow' +_NARROWACL_SECTION = 'narrowhgacl' +_CHANGESPECPART = NARROWCAP + ':changespec' +_SPECPART = NARROWCAP + ':spec' +_SPECPART_INCLUDE = 'include' +_SPECPART_EXCLUDE = 'exclude' +_KILLNODESIGNAL = 'KILL' +_DONESIGNAL = 'DONE' +_ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text) +_ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text) +_CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER) +_MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER) + +# When advertising capabilities, always include narrow clone support. +def getrepocaps_narrow(orig, repo, **kwargs): + caps = orig(repo, **kwargs) + caps[NARROWCAP] = ['v0'] + return caps + +def _computeellipsis(repo, common, heads, known, match, depth=None): + """Compute the shape of a narrowed DAG. + + Args: + repo: The repository we're transferring. + common: The roots of the DAG range we're transferring. + May be just [nullid], which means all ancestors of heads. + heads: The heads of the DAG range we're transferring. + match: The narrowmatcher that allows us to identify relevant changes. + depth: If not None, only consider nodes to be full nodes if they are at + most depth changesets away from one of heads. + + Returns: + A tuple of (visitnodes, relevant_nodes, ellipsisroots) where: + + visitnodes: The list of nodes (either full or ellipsis) which + need to be sent to the client. + relevant_nodes: The set of changelog nodes which change a file inside + the narrowspec. The client needs these as non-ellipsis nodes. + ellipsisroots: A dict of {rev: parents} that is used in + narrowchangegroup to produce ellipsis nodes with the + correct parents. + """ + cl = repo.changelog + mfl = repo.manifestlog + + cldag = dagutil.revlogdag(cl) + # dagutil does not like nullid/nullrev + commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev]) + headsrevs = cldag.internalizeall(heads) + if depth: + revdepth = {h: 0 for h in headsrevs} + + ellipsisheads = collections.defaultdict(set) + ellipsisroots = collections.defaultdict(set) + + def addroot(head, curchange): + """Add a root to an ellipsis head, splitting heads with 3 roots.""" + ellipsisroots[head].add(curchange) + # Recursively split ellipsis heads with 3 roots by finding the + # roots' youngest common descendant which is an elided merge commit. + # That descendant takes 2 of the 3 roots as its own, and becomes a + # root of the head. + while len(ellipsisroots[head]) > 2: + child, roots = splithead(head) + splitroots(head, child, roots) + head = child # Recurse in case we just added a 3rd root + + def splitroots(head, child, roots): + ellipsisroots[head].difference_update(roots) + ellipsisroots[head].add(child) + ellipsisroots[child].update(roots) + ellipsisroots[child].discard(child) + + def splithead(head): + r1, r2, r3 = sorted(ellipsisroots[head]) + for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)): + mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)', + nr1, head, nr2, head) + for j in mid: + if j == nr2: + return nr2, (nr1, nr2) + if j not in ellipsisroots or len(ellipsisroots[j]) < 2: + return j, (nr1, nr2) + raise error.Abort('Failed to split up ellipsis node! head: %d, ' + 'roots: %d %d %d' % (head, r1, r2, r3)) + + missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs)) + visit = reversed(missing) + relevant_nodes = set() + visitnodes = [cl.node(m) for m in missing] + required = set(headsrevs) | known + for rev in visit: + clrev = cl.changelogrevision(rev) + ps = cldag.parents(rev) + if depth is not None: + curdepth = revdepth[rev] + for p in ps: + revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1)) + needed = False + shallow_enough = depth is None or revdepth[rev] <= depth + if shallow_enough: + curmf = mfl[clrev.manifest].read() + if ps: + # We choose to not trust the changed files list in + # changesets because it's not always correct. TODO: could + # we trust it for the non-merge case? + p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read() + needed = bool(curmf.diff(p1mf, match)) + if not needed and len(ps) > 1: + # For merge changes, the list of changed files is not + # helpful, since we need to emit the merge if a file + # in the narrow spec has changed on either side of the + # merge. As a result, we do a manifest diff to check. + p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read() + needed = bool(curmf.diff(p2mf, match)) + else: + # For a root node, we need to include the node if any + # files in the node match the narrowspec. + needed = any(curmf.walk(match)) + + if needed: + for head in ellipsisheads[rev]: + addroot(head, rev) + for p in ps: + required.add(p) + relevant_nodes.add(cl.node(rev)) + else: + if not ps: + ps = [nullrev] + if rev in required: + for head in ellipsisheads[rev]: + addroot(head, rev) + for p in ps: + ellipsisheads[p].add(rev) + else: + for p in ps: + ellipsisheads[p] |= ellipsisheads[rev] + + # add common changesets as roots of their reachable ellipsis heads + for c in commonrevs: + for head in ellipsisheads[c]: + addroot(head, c) + return visitnodes, relevant_nodes, ellipsisroots + +def _packellipsischangegroup(repo, common, match, relevant_nodes, + ellipsisroots, visitnodes, depth, source, version): + if version in ('01', '02'): + raise error.Abort( + 'ellipsis nodes require at least cg3 on client and server, ' + 'but negotiated version %s' % version) + # We wrap cg1packer.revchunk, using a side channel to pass + # relevant_nodes into that area. Then if linknode isn't in the + # set, we know we have an ellipsis node and we should defer + # sending that node's data. We override close() to detect + # pending ellipsis nodes and flush them. + packer = changegroup.getbundler(version, repo) + # Let the packer have access to the narrow matcher so it can + # omit filelogs and dirlogs as needed + packer._narrow_matcher = lambda : match + # Give the packer the list of nodes which should not be + # ellipsis nodes. We store this rather than the set of nodes + # that should be an ellipsis because for very large histories + # we expect this to be significantly smaller. + packer.full_nodes = relevant_nodes + # Maps ellipsis revs to their roots at the changelog level. + packer.precomputed_ellipsis = ellipsisroots + # Maps CL revs to per-revlog revisions. Cleared in close() at + # the end of each group. + packer.clrev_to_localrev = {} + packer.next_clrev_to_localrev = {} + # Maps changelog nodes to changelog revs. Filled in once + # during changelog stage and then left unmodified. + packer.clnode_to_rev = {} + packer.changelog_done = False + # If true, informs the packer that it is serving shallow content and might + # need to pack file contents not introduced by the changes being packed. + packer.is_shallow = depth is not None + + return packer.generate(common, visitnodes, False, source) + +# Serve a changegroup for a client with a narrow clone. +def getbundlechangegrouppart_narrow(bundler, repo, source, + bundlecaps=None, b2caps=None, heads=None, + common=None, **kwargs): + cgversions = b2caps.get('changegroup') + if cgversions: # 3.1 and 3.2 ship with an empty value + cgversions = [v for v in cgversions + if v in changegroup.supportedoutgoingversions(repo)] + if not cgversions: + raise ValueError(_('no common changegroup version')) + version = max(cgversions) + else: + raise ValueError(_("server does not advertise changegroup version," + " can't negotiate support for ellipsis nodes")) + + include = sorted(filter(bool, kwargs.get(r'includepats', []))) + exclude = sorted(filter(bool, kwargs.get(r'excludepats', []))) + newmatch = narrowspec.match(repo.root, include=include, exclude=exclude) + if not repo.ui.configbool("experimental", "narrowservebrokenellipses"): + outgoing = exchange._computeoutgoing(repo, heads, common) + if not outgoing.missing: + return + def wrappedgetbundler(orig, *args, **kwargs): + bundler = orig(*args, **kwargs) + bundler._narrow_matcher = lambda : newmatch + return bundler + with extensions.wrappedfunction(changegroup, 'getbundler', + wrappedgetbundler): + cg = changegroup.makestream(repo, outgoing, version, source) + part = bundler.newpart('changegroup', data=cg) + part.addparam('version', version) + if 'treemanifest' in repo.requirements: + part.addparam('treemanifest', '1') + + if include or exclude: + narrowspecpart = bundler.newpart(_SPECPART) + if include: + narrowspecpart.addparam( + _SPECPART_INCLUDE, '\n'.join(include), mandatory=True) + if exclude: + narrowspecpart.addparam( + _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True) + + return + + depth = kwargs.get(r'depth', None) + if depth is not None: + depth = int(depth) + if depth < 1: + raise error.Abort(_('depth must be positive, got %d') % depth) + + heads = set(heads or repo.heads()) + common = set(common or [nullid]) + oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', []))) + oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', []))) + known = {bin(n) for n in kwargs.get(r'known', [])} + if known and (oldinclude != include or oldexclude != exclude): + # Steps: + # 1. Send kill for "$known & ::common" + # + # 2. Send changegroup for ::common + # + # 3. Proceed. + # + # In the future, we can send kills for only the specific + # nodes we know should go away or change shape, and then + # send a data stream that tells the client something like this: + # + # a) apply this changegroup + # b) apply nodes XXX, YYY, ZZZ that you already have + # c) goto a + # + # until they've built up the full new state. + # Convert to revnums and intersect with "common". The client should + # have made it a subset of "common" already, but let's be safe. + known = set(repo.revs("%ln & ::%ln", known, common)) + # TODO: we could send only roots() of this set, and the + # list of nodes in common, and the client could work out + # what to strip, instead of us explicitly sending every + # single node. + deadrevs = known + def genkills(): + for r in deadrevs: + yield _KILLNODESIGNAL + yield repo.changelog.node(r) + yield _DONESIGNAL + bundler.newpart(_CHANGESPECPART, data=genkills()) + newvisit, newfull, newellipsis = _computeellipsis( + repo, set(), common, known, newmatch) + if newvisit: + cg = _packellipsischangegroup( + repo, common, newmatch, newfull, newellipsis, + newvisit, depth, source, version) + part = bundler.newpart('changegroup', data=cg) + part.addparam('version', version) + if 'treemanifest' in repo.requirements: + part.addparam('treemanifest', '1') + + visitnodes, relevant_nodes, ellipsisroots = _computeellipsis( + repo, common, heads, set(), newmatch, depth=depth) + + repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes)) + if visitnodes: + cg = _packellipsischangegroup( + repo, common, newmatch, relevant_nodes, ellipsisroots, + visitnodes, depth, source, version) + part = bundler.newpart('changegroup', data=cg) + part.addparam('version', version) + if 'treemanifest' in repo.requirements: + part.addparam('treemanifest', '1') + +def applyacl_narrow(repo, kwargs): + ui = repo.ui + username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username()) + user_includes = ui.configlist( + _NARROWACL_SECTION, username + '.includes', + ui.configlist(_NARROWACL_SECTION, 'default.includes')) + user_excludes = ui.configlist( + _NARROWACL_SECTION, username + '.excludes', + ui.configlist(_NARROWACL_SECTION, 'default.excludes')) + if not user_includes: + raise error.Abort(_("{} configuration for user {} is empty") + .format(_NARROWACL_SECTION, username)) + + user_includes = [ + 'path:.' if p == '*' else 'path:' + p for p in user_includes] + user_excludes = [ + 'path:.' if p == '*' else 'path:' + p for p in user_excludes] + + req_includes = set(kwargs.get(r'includepats', [])) + req_excludes = set(kwargs.get(r'excludepats', [])) + + req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns( + req_includes, req_excludes, user_includes, user_excludes) + + if invalid_includes: + raise error.Abort( + _("The following includes are not accessible for {}: {}") + .format(username, invalid_includes)) + + new_args = {} + new_args.update(kwargs) + new_args['includepats'] = req_includes + if req_excludes: + new_args['excludepats'] = req_excludes + return new_args + +@bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE)) +def _handlechangespec_2(op, inpart): + includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines()) + excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines()) + if not changegroup.NARROW_REQUIREMENT in op.repo.requirements: + op.repo.requirements.add(changegroup.NARROW_REQUIREMENT) + op.repo._writerequirements() + op.repo.setnarrowpats(includepats, excludepats) + +@bundle2.parthandler(_CHANGESPECPART) +def _handlechangespec(op, inpart): + repo = op.repo + cl = repo.changelog + + # changesets which need to be stripped entirely. either they're no longer + # needed in the new narrow spec, or the server is sending a replacement + # in the changegroup part. + clkills = set() + + # A changespec part contains all the updates to ellipsis nodes + # that will happen as a result of widening or narrowing a + # repo. All the changes that this block encounters are ellipsis + # nodes or flags to kill an existing ellipsis. + chunksignal = changegroup.readexactly(inpart, 4) + while chunksignal != _DONESIGNAL: + if chunksignal == _KILLNODESIGNAL: + # a node used to be an ellipsis but isn't anymore + ck = changegroup.readexactly(inpart, 20) + if cl.hasnode(ck): + clkills.add(ck) + else: + raise error.Abort( + _('unexpected changespec node chunk type: %s') % chunksignal) + chunksignal = changegroup.readexactly(inpart, 4) + + if clkills: + # preserve bookmarks that repair.strip() would otherwise strip + bmstore = repo._bookmarks + class dummybmstore(dict): + def applychanges(self, repo, tr, changes): + pass + def recordchange(self, tr): # legacy version + pass + repo._bookmarks = dummybmstore() + chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True, + topic='widen') + repo._bookmarks = bmstore + if chgrpfile: + # presence of _widen_bundle attribute activates widen handler later + op._widen_bundle = chgrpfile + # Set the new narrowspec if we're widening. The setnewnarrowpats() method + # will currently always be there when using the core+narrowhg server, but + # other servers may include a changespec part even when not widening (e.g. + # because we're deepening a shallow repo). + if util.safehasattr(repo, 'setnewnarrowpats'): + repo.setnewnarrowpats() + +def handlechangegroup_widen(op, inpart): + """Changegroup exchange handler which restores temporarily-stripped nodes""" + # We saved a bundle with stripped node data we must now restore. + # This approach is based on mercurial/repair.py@6ee26a53c111. + repo = op.repo + ui = op.ui + + chgrpfile = op._widen_bundle + del op._widen_bundle + vfs = repo.vfs + + ui.note(_("adding branch\n")) + f = vfs.open(chgrpfile, "rb") + try: + gen = exchange.readbundle(ui, f, chgrpfile, vfs) + if not ui.verbose: + # silence internal shuffling chatter + ui.pushbuffer() + if isinstance(gen, bundle2.unbundle20): + with repo.transaction('strip') as tr: + bundle2.processbundle(repo, gen, lambda: tr) + else: + gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True) + if not ui.verbose: + ui.popbuffer() + finally: + f.close() + + # remove undo files + for undovfs, undofile in repo.undofiles(): + try: + undovfs.unlink(undofile) + except OSError as e: + if e.errno != errno.ENOENT: + ui.warn(_('error removing %s: %s\n') % + (undovfs.join(undofile), util.forcebytestr(e))) + + # Remove partial backup only if there were no exceptions + vfs.unlink(chgrpfile) + +def setup(): + """Enable narrow repo support in bundle2-related extension points.""" + extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow) + + wireproto.gboptsmap['narrow'] = 'boolean' + wireproto.gboptsmap['depth'] = 'plain' + wireproto.gboptsmap['oldincludepats'] = 'csv' + wireproto.gboptsmap['oldexcludepats'] = 'csv' + wireproto.gboptsmap['includepats'] = 'csv' + wireproto.gboptsmap['excludepats'] = 'csv' + wireproto.gboptsmap['known'] = 'csv' + + # Extend changegroup serving to handle requests from narrow clients. + origcgfn = exchange.getbundle2partsmapping['changegroup'] + def wrappedcgfn(*args, **kwargs): + repo = args[1] + if repo.ui.has_section(_NARROWACL_SECTION): + getbundlechangegrouppart_narrow( + *args, **applyacl_narrow(repo, kwargs)) + elif kwargs.get(r'narrow', False): + getbundlechangegrouppart_narrow(*args, **kwargs) + else: + origcgfn(*args, **kwargs) + exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn + + # Extend changegroup receiver so client can fixup after widen requests. + origcghandler = bundle2.parthandlermapping['changegroup'] + def wrappedcghandler(op, inpart): + origcghandler(op, inpart) + if util.safehasattr(op, '_widen_bundle'): + handlechangegroup_widen(op, inpart) + wrappedcghandler.params = origcghandler.params + bundle2.parthandlermapping['changegroup'] = wrappedcghandler
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowchangegroup.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,372 @@ +# narrowchangegroup.py - narrow clone changegroup creation and consumption +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial.i18n import _ +from mercurial import ( + changegroup, + error, + extensions, + manifest, + match as matchmod, + mdiff, + node, + revlog, + util, +) + +def setup(): + + def _cgmatcher(cgpacker): + localmatcher = cgpacker._repo.narrowmatch() + remotematcher = getattr(cgpacker, '_narrow_matcher', lambda: None)() + if remotematcher: + return matchmod.intersectmatchers(localmatcher, remotematcher) + else: + return localmatcher + + def prune(orig, self, revlog, missing, commonrevs): + if isinstance(revlog, manifest.manifestrevlog): + matcher = _cgmatcher(self) + if (matcher and + not matcher.visitdir(revlog._dir[:-1] or '.')): + return [] + return orig(self, revlog, missing, commonrevs) + + extensions.wrapfunction(changegroup.cg1packer, 'prune', prune) + + def generatefiles(orig, self, changedfiles, linknodes, commonrevs, + source): + matcher = _cgmatcher(self) + if matcher: + changedfiles = list(filter(matcher, changedfiles)) + if getattr(self, 'is_shallow', False): + # See comment in generate() for why this sadness is a thing. + mfdicts = self._mfdicts + del self._mfdicts + # In a shallow clone, the linknodes callback needs to also include + # those file nodes that are in the manifests we sent but weren't + # introduced by those manifests. + commonctxs = [self._repo[c] for c in commonrevs] + oldlinknodes = linknodes + clrev = self._repo.changelog.rev + def linknodes(flog, fname): + for c in commonctxs: + try: + fnode = c.filenode(fname) + self.clrev_to_localrev[c.rev()] = flog.rev(fnode) + except error.ManifestLookupError: + pass + links = oldlinknodes(flog, fname) + if len(links) != len(mfdicts): + for mf, lr in mfdicts: + fnode = mf.get(fname, None) + if fnode in links: + links[fnode] = min(links[fnode], lr, key=clrev) + elif fnode: + links[fnode] = lr + return links + return orig(self, changedfiles, linknodes, commonrevs, source) + extensions.wrapfunction( + changegroup.cg1packer, 'generatefiles', generatefiles) + + def ellipsisdata(packer, rev, revlog_, p1, p2, data, linknode): + n = revlog_.node(rev) + p1n, p2n = revlog_.node(p1), revlog_.node(p2) + flags = revlog_.flags(rev) + flags |= revlog.REVIDX_ELLIPSIS + meta = packer.builddeltaheader( + n, p1n, p2n, node.nullid, linknode, flags) + # TODO: try and actually send deltas for ellipsis data blocks + diffheader = mdiff.trivialdiffheader(len(data)) + l = len(meta) + len(diffheader) + len(data) + return ''.join((changegroup.chunkheader(l), + meta, + diffheader, + data)) + + def close(orig, self): + getattr(self, 'clrev_to_localrev', {}).clear() + if getattr(self, 'next_clrev_to_localrev', {}): + self.clrev_to_localrev = self.next_clrev_to_localrev + del self.next_clrev_to_localrev + self.changelog_done = True + return orig(self) + extensions.wrapfunction(changegroup.cg1packer, 'close', close) + + # In a perfect world, we'd generate better ellipsis-ified graphs + # for non-changelog revlogs. In practice, we haven't started doing + # that yet, so the resulting DAGs for the manifestlog and filelogs + # are actually full of bogus parentage on all the ellipsis + # nodes. This has the side effect that, while the contents are + # correct, the individual DAGs might be completely out of whack in + # a case like 882681bc3166 and its ancestors (back about 10 + # revisions or so) in the main hg repo. + # + # The one invariant we *know* holds is that the new (potentially + # bogus) DAG shape will be valid if we order the nodes in the + # order that they're introduced in dramatis personae by the + # changelog, so what we do is we sort the non-changelog histories + # by the order in which they are used by the changelog. + def _sortgroup(orig, self, revlog, nodelist, lookup): + if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev: + return orig(self, revlog, nodelist, lookup) + key = lambda n: self.clnode_to_rev[lookup(n)] + return [revlog.rev(n) for n in sorted(nodelist, key=key)] + + extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup) + + def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source): + '''yield a sequence of changegroup chunks (strings)''' + # Note: other than delegating to orig, the only deviation in + # logic from normal hg's generate is marked with BEGIN/END + # NARROW HACK. + if not util.safehasattr(self, 'full_nodes'): + # not sending a narrow bundle + for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source): + yield x + return + + repo = self._repo + cl = repo.changelog + mfl = repo.manifestlog + mfrevlog = mfl._revlog + + clrevorder = {} + mfs = {} # needed manifests + fnodes = {} # needed file nodes + changedfiles = set() + + # Callback for the changelog, used to collect changed files and manifest + # nodes. + # Returns the linkrev node (identity in the changelog case). + def lookupcl(x): + c = cl.read(x) + clrevorder[x] = len(clrevorder) + # BEGIN NARROW HACK + # + # Only update mfs if x is going to be sent. Otherwise we + # end up with bogus linkrevs specified for manifests and + # we skip some manifest nodes that we should otherwise + # have sent. + if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis: + n = c[0] + # record the first changeset introducing this manifest version + mfs.setdefault(n, x) + # Set this narrow-specific dict so we have the lowest manifest + # revnum to look up for this cl revnum. (Part of mapping + # changelog ellipsis parents to manifest ellipsis parents) + self.next_clrev_to_localrev.setdefault(cl.rev(x), + mfrevlog.rev(n)) + # We can't trust the changed files list in the changeset if the + # client requested a shallow clone. + if self.is_shallow: + changedfiles.update(mfl[c[0]].read().keys()) + else: + changedfiles.update(c[3]) + # END NARROW HACK + # Record a complete list of potentially-changed files in + # this manifest. + return x + + self._verbosenote(_('uncompressed size of bundle content:\n')) + size = 0 + for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')): + size += len(chunk) + yield chunk + self._verbosenote(_('%8.i (changelog)\n') % size) + + # We need to make sure that the linkrev in the changegroup refers to + # the first changeset that introduced the manifest or file revision. + # The fastpath is usually safer than the slowpath, because the filelogs + # are walked in revlog order. + # + # When taking the slowpath with reorder=None and the manifest revlog + # uses generaldelta, the manifest may be walked in the "wrong" order. + # Without 'clrevorder', we would get an incorrect linkrev (see fix in + # cc0ff93d0c0c). + # + # When taking the fastpath, we are only vulnerable to reordering + # of the changelog itself. The changelog never uses generaldelta, so + # it is only reordered when reorder=True. To handle this case, we + # simply take the slowpath, which already has the 'clrevorder' logic. + # This was also fixed in cc0ff93d0c0c. + fastpathlinkrev = fastpathlinkrev and not self._reorder + # Treemanifests don't work correctly with fastpathlinkrev + # either, because we don't discover which directory nodes to + # send along with files. This could probably be fixed. + fastpathlinkrev = fastpathlinkrev and ( + 'treemanifest' not in repo.requirements) + # Shallow clones also don't work correctly with fastpathlinkrev + # because file nodes may need to be sent for a manifest even if they + # weren't introduced by that manifest. + fastpathlinkrev = fastpathlinkrev and not self.is_shallow + + for chunk in self.generatemanifests(commonrevs, clrevorder, + fastpathlinkrev, mfs, fnodes, source): + yield chunk + # BEGIN NARROW HACK + mfdicts = None + if self.is_shallow: + mfdicts = [(self._repo.manifestlog[n].read(), lr) + for (n, lr) in mfs.iteritems()] + # END NARROW HACK + mfs.clear() + clrevs = set(cl.rev(x) for x in clnodes) + + if not fastpathlinkrev: + def linknodes(unused, fname): + return fnodes.get(fname, {}) + else: + cln = cl.node + def linknodes(filerevlog, fname): + llr = filerevlog.linkrev + fln = filerevlog.node + revs = ((r, llr(r)) for r in filerevlog) + return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs) + + # BEGIN NARROW HACK + # + # We need to pass the mfdicts variable down into + # generatefiles(), but more than one command might have + # wrapped generatefiles so we can't modify the function + # signature. Instead, we pass the data to ourselves using an + # instance attribute. I'm sorry. + self._mfdicts = mfdicts + # END NARROW HACK + for chunk in self.generatefiles(changedfiles, linknodes, commonrevs, + source): + yield chunk + + yield self.close() + + if clnodes: + repo.hook('outgoing', node=node.hex(clnodes[0]), source=source) + extensions.wrapfunction(changegroup.cg1packer, 'generate', generate) + + def revchunk(orig, self, revlog, rev, prev, linknode): + if not util.safehasattr(self, 'full_nodes'): + # not sending a narrow changegroup + for x in orig(self, revlog, rev, prev, linknode): + yield x + return + # build up some mapping information that's useful later. See + # the local() nested function below. + if not self.changelog_done: + self.clnode_to_rev[linknode] = rev + linkrev = rev + self.clrev_to_localrev[linkrev] = rev + else: + linkrev = self.clnode_to_rev[linknode] + self.clrev_to_localrev[linkrev] = rev + # This is a node to send in full, because the changeset it + # corresponds to was a full changeset. + if linknode in self.full_nodes: + for x in orig(self, revlog, rev, prev, linknode): + yield x + return + # At this point, a node can either be one we should skip or an + # ellipsis. If it's not an ellipsis, bail immediately. + if linkrev not in self.precomputed_ellipsis: + return + linkparents = self.precomputed_ellipsis[linkrev] + def local(clrev): + """Turn a changelog revnum into a local revnum. + + The ellipsis dag is stored as revnums on the changelog, + but when we're producing ellipsis entries for + non-changelog revlogs, we need to turn those numbers into + something local. This does that for us, and during the + changelog sending phase will also expand the stored + mappings as needed. + """ + if clrev == node.nullrev: + return node.nullrev + if not self.changelog_done: + # If we're doing the changelog, it's possible that we + # have a parent that is already on the client, and we + # need to store some extra mapping information so that + # our contained ellipsis nodes will be able to resolve + # their parents. + if clrev not in self.clrev_to_localrev: + clnode = revlog.node(clrev) + self.clnode_to_rev[clnode] = clrev + return clrev + # Walk the ellipsis-ized changelog breadth-first looking for a + # change that has been linked from the current revlog. + # + # For a flat manifest revlog only a single step should be necessary + # as all relevant changelog entries are relevant to the flat + # manifest. + # + # For a filelog or tree manifest dirlog however not every changelog + # entry will have been relevant, so we need to skip some changelog + # nodes even after ellipsis-izing. + walk = [clrev] + while walk: + p = walk[0] + walk = walk[1:] + if p in self.clrev_to_localrev: + return self.clrev_to_localrev[p] + elif p in self.full_nodes: + walk.extend([pp for pp in self._repo.changelog.parentrevs(p) + if pp != node.nullrev]) + elif p in self.precomputed_ellipsis: + walk.extend([pp for pp in self.precomputed_ellipsis[p] + if pp != node.nullrev]) + else: + # In this case, we've got an ellipsis with parents + # outside the current bundle (likely an + # incremental pull). We "know" that we can use the + # value of this same revlog at whatever revision + # is pointed to by linknode. "Know" is in scare + # quotes because I haven't done enough examination + # of edge cases to convince myself this is really + # a fact - it works for all the (admittedly + # thorough) cases in our testsuite, but I would be + # somewhat unsurprised to find a case in the wild + # where this breaks down a bit. That said, I don't + # know if it would hurt anything. + for i in xrange(rev, 0, -1): + if revlog.linkrev(i) == clrev: + return i + # We failed to resolve a parent for this node, so + # we crash the changegroup construction. + raise error.Abort( + 'unable to resolve parent while packing %r %r' + ' for changeset %r' % (revlog.indexfile, rev, clrev)) + return node.nullrev + + if not linkparents or ( + revlog.parentrevs(rev) == (node.nullrev, node.nullrev)): + p1, p2 = node.nullrev, node.nullrev + elif len(linkparents) == 1: + p1, = sorted(local(p) for p in linkparents) + p2 = node.nullrev + else: + p1, p2 = sorted(local(p) for p in linkparents) + yield ellipsisdata( + self, rev, revlog, p1, p2, revlog.revision(rev), linknode) + extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk) + + def deltaparent(orig, self, revlog, rev, p1, p2, prev): + if util.safehasattr(self, 'full_nodes'): + # TODO: send better deltas when in narrow mode. + # + # changegroup.group() loops over revisions to send, + # including revisions we'll skip. What this means is that + # `prev` will be a potentially useless delta base for all + # ellipsis nodes, as the client likely won't have it. In + # the future we should do bookkeeping about which nodes + # have been sent to the client, and try to be + # significantly smarter about delta bases. This is + # slightly tricky because this same code has to work for + # all revlogs, and we don't have the linkrev/linknode here. + return p1 + return orig(self, revlog, rev, p1, p2, prev) + extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowcommands.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,401 @@ +# narrowcommands.py - command modifications for narrowhg extension +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. +from __future__ import absolute_import + +import itertools + +from mercurial.i18n import _ +from mercurial import ( + changegroup, + cmdutil, + commands, + discovery, + error, + exchange, + extensions, + hg, + merge, + narrowspec, + node, + pycompat, + registrar, + repair, + repoview, + util, +) + +from . import ( + narrowbundle2, +) + +table = {} +command = registrar.command(table) + +def setup(): + """Wraps user-facing mercurial commands with narrow-aware versions.""" + + entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd) + entry[1].append(('', 'narrow', None, + _("create a narrow clone of select files"))) + entry[1].append(('', 'depth', '', + _("limit the history fetched by distance from heads"))) + # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit + if 'sparse' not in extensions.enabled(): + entry[1].append(('', 'include', [], + _("specifically fetch this file/directory"))) + entry[1].append( + ('', 'exclude', [], + _("do not fetch this file/directory, even if included"))) + + entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd) + entry[1].append(('', 'depth', '', + _("limit the history fetched by distance from heads"))) + + extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd) + +def expandpull(pullop, includepats, excludepats): + if not narrowspec.needsexpansion(includepats): + return includepats, excludepats + + heads = pullop.heads or pullop.rheads + includepats, excludepats = pullop.remote.expandnarrow( + includepats, excludepats, heads) + pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % ( + includepats, excludepats)) + return set(includepats), set(excludepats) + +def clonenarrowcmd(orig, ui, repo, *args, **opts): + """Wraps clone command, so 'hg clone' first wraps localrepo.clone().""" + opts = pycompat.byteskwargs(opts) + wrappedextraprepare = util.nullcontextmanager() + opts_narrow = opts['narrow'] + if opts_narrow: + def pullbundle2extraprepare_widen(orig, pullop, kwargs): + # Create narrow spec patterns from clone flags + includepats = narrowspec.parsepatterns(opts['include']) + excludepats = narrowspec.parsepatterns(opts['exclude']) + + # If necessary, ask the server to expand the narrowspec. + includepats, excludepats = expandpull( + pullop, includepats, excludepats) + + if not includepats and excludepats: + # If nothing was included, we assume the user meant to include + # everything, except what they asked to exclude. + includepats = {'path:.'} + + pullop.repo.setnarrowpats(includepats, excludepats) + + # This will populate 'includepats' etc with the values from the + # narrowspec we just saved. + orig(pullop, kwargs) + + if opts.get('depth'): + kwargs['depth'] = opts['depth'] + wrappedextraprepare = extensions.wrappedfunction(exchange, + '_pullbundle2extraprepare', pullbundle2extraprepare_widen) + + def pullnarrow(orig, repo, *args, **kwargs): + if opts_narrow: + repo.requirements.add(changegroup.NARROW_REQUIREMENT) + repo._writerequirements() + + return orig(repo, *args, **kwargs) + + wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow) + + with wrappedextraprepare, wrappedpull: + return orig(ui, repo, *args, **pycompat.strkwargs(opts)) + +def pullnarrowcmd(orig, ui, repo, *args, **opts): + """Wraps pull command to allow modifying narrow spec.""" + wrappedextraprepare = util.nullcontextmanager() + if changegroup.NARROW_REQUIREMENT in repo.requirements: + + def pullbundle2extraprepare_widen(orig, pullop, kwargs): + orig(pullop, kwargs) + if opts.get(r'depth'): + kwargs['depth'] = opts[r'depth'] + wrappedextraprepare = extensions.wrappedfunction(exchange, + '_pullbundle2extraprepare', pullbundle2extraprepare_widen) + + with wrappedextraprepare: + return orig(ui, repo, *args, **opts) + +def archivenarrowcmd(orig, ui, repo, *args, **opts): + """Wraps archive command to narrow the default includes.""" + if changegroup.NARROW_REQUIREMENT in repo.requirements: + repo_includes, repo_excludes = repo.narrowpats + includes = set(opts.get(r'include', [])) + excludes = set(opts.get(r'exclude', [])) + includes, excludes, unused_invalid = narrowspec.restrictpatterns( + includes, excludes, repo_includes, repo_excludes) + if includes: + opts[r'include'] = includes + if excludes: + opts[r'exclude'] = excludes + return orig(ui, repo, *args, **opts) + +def pullbundle2extraprepare(orig, pullop, kwargs): + repo = pullop.repo + if changegroup.NARROW_REQUIREMENT not in repo.requirements: + return orig(pullop, kwargs) + + if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps: + raise error.Abort(_("server doesn't support narrow clones")) + orig(pullop, kwargs) + kwargs['narrow'] = True + include, exclude = repo.narrowpats + kwargs['oldincludepats'] = include + kwargs['oldexcludepats'] = exclude + kwargs['includepats'] = include + kwargs['excludepats'] = exclude + kwargs['known'] = [node.hex(ctx.node()) for ctx in + repo.set('::%ln', pullop.common) + if ctx.node() != node.nullid] + if not kwargs['known']: + # Mercurial serialized an empty list as '' and deserializes it as + # [''], so delete it instead to avoid handling the empty string on the + # server. + del kwargs['known'] + +extensions.wrapfunction(exchange,'_pullbundle2extraprepare', + pullbundle2extraprepare) + +def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, + newincludes, newexcludes, force): + oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes) + newmatch = narrowspec.match(repo.root, newincludes, newexcludes) + + # This is essentially doing "hg outgoing" to find all local-only + # commits. We will then check that the local-only commits don't + # have any changes to files that will be untracked. + unfi = repo.unfiltered() + outgoing = discovery.findcommonoutgoing(unfi, remote, + commoninc=commoninc) + ui.status(_('looking for local changes to affected paths\n')) + localnodes = [] + for n in itertools.chain(outgoing.missing, outgoing.excluded): + if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()): + localnodes.append(n) + revstostrip = unfi.revs('descendants(%ln)', localnodes) + hiddenrevs = repoview.filterrevs(repo, 'visible') + visibletostrip = list(repo.changelog.node(r) + for r in (revstostrip - hiddenrevs)) + if visibletostrip: + ui.status(_('The following changeset(s) or their ancestors have ' + 'local changes not on the remote:\n')) + maxnodes = 10 + if ui.verbose or len(visibletostrip) <= maxnodes: + for n in visibletostrip: + ui.status('%s\n' % node.short(n)) + else: + for n in visibletostrip[:maxnodes]: + ui.status('%s\n' % node.short(n)) + ui.status(_('...and %d more, use --verbose to list all\n') % + (len(visibletostrip) - maxnodes)) + if not force: + raise error.Abort(_('local changes found'), + hint=_('use --force-delete-local-changes to ' + 'ignore')) + + if revstostrip: + tostrip = [unfi.changelog.node(r) for r in revstostrip] + if repo['.'].node() in tostrip: + # stripping working copy, so move to a different commit first + urev = max(repo.revs('(::%n) - %ln + null', + repo['.'].node(), visibletostrip)) + hg.clean(repo, urev) + repair.strip(ui, unfi, tostrip, topic='narrow') + + todelete = [] + for f, f2, size in repo.store.datafiles(): + if f.startswith('data/'): + file = f[5:-2] + if not newmatch(file): + todelete.append(f) + elif f.startswith('meta/'): + dir = f[5:-13] + dirs = ['.'] + sorted(util.dirs({dir})) + [dir] + include = True + for d in dirs: + visit = newmatch.visitdir(d) + if not visit: + include = False + break + if visit == 'all': + break + if not include: + todelete.append(f) + + repo.destroying() + + with repo.transaction("narrowing"): + for f in todelete: + ui.status(_('deleting %s\n') % f) + util.unlinkpath(repo.svfs.join(f)) + repo.store.markremoved(f) + + for f in repo.dirstate: + if not newmatch(f): + repo.dirstate.drop(f) + repo.wvfs.unlinkpath(f) + repo.setnarrowpats(newincludes, newexcludes) + + repo.destroyed() + +def _widen(ui, repo, remote, commoninc, newincludes, newexcludes): + newmatch = narrowspec.match(repo.root, newincludes, newexcludes) + + # TODO(martinvonz): Get expansion working with widening/narrowing. + if narrowspec.needsexpansion(newincludes): + raise error.Abort('Expansion not yet supported on pull') + + def pullbundle2extraprepare_widen(orig, pullop, kwargs): + orig(pullop, kwargs) + # The old{in,ex}cludepats have already been set by orig() + kwargs['includepats'] = newincludes + kwargs['excludepats'] = newexcludes + wrappedextraprepare = extensions.wrappedfunction(exchange, + '_pullbundle2extraprepare', pullbundle2extraprepare_widen) + + # define a function that narrowbundle2 can call after creating the + # backup bundle, but before applying the bundle from the server + def setnewnarrowpats(): + repo.setnarrowpats(newincludes, newexcludes) + repo.setnewnarrowpats = setnewnarrowpats + + ds = repo.dirstate + p1, p2 = ds.p1(), ds.p2() + with ds.parentchange(): + ds.setparents(node.nullid, node.nullid) + common = commoninc[0] + with wrappedextraprepare: + exchange.pull(repo, remote, heads=common) + with ds.parentchange(): + ds.setparents(p1, p2) + + actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()} + addgaction = actions['g'].append + + mf = repo['.'].manifest().matches(newmatch) + for f, fn in mf.iteritems(): + if f not in repo.dirstate: + addgaction((f, (mf.flags(f), False), + "add from widened narrow clone")) + + merge.applyupdates(repo, actions, wctx=repo[None], + mctx=repo['.'], overwrite=False) + merge.recordupdates(repo, actions, branchmerge=False) + +# TODO(rdamazio): Make new matcher format and update description +@command('tracked', + [('', 'addinclude', [], _('new paths to include')), + ('', 'removeinclude', [], _('old paths to no longer include')), + ('', 'addexclude', [], _('new paths to exclude')), + ('', 'removeexclude', [], _('old paths to no longer exclude')), + ('', 'clear', False, _('whether to replace the existing narrowspec')), + ('', 'force-delete-local-changes', False, + _('forces deletion of local changes when narrowing')), + ] + commands.remoteopts, + _('[OPTIONS]... [REMOTE]'), + inferrepo=True) +def trackedcmd(ui, repo, remotepath=None, *pats, **opts): + """show or change the current narrowspec + + With no argument, shows the current narrowspec entries, one per line. Each + line will be prefixed with 'I' or 'X' for included or excluded patterns, + respectively. + + The narrowspec is comprised of expressions to match remote files and/or + directories that should be pulled into your client. + The narrowspec has *include* and *exclude* expressions, with excludes always + trumping includes: that is, if a file matches an exclude expression, it will + be excluded even if it also matches an include expression. + Excluding files that were never included has no effect. + + Each included or excluded entry is in the format described by + 'hg help patterns'. + + The options allow you to add or remove included and excluded expressions. + + If --clear is specified, then all previous includes and excludes are DROPPED + and replaced by the new ones specified to --addinclude and --addexclude. + If --clear is specified without any further options, the narrowspec will be + empty and will not match any files. + """ + opts = pycompat.byteskwargs(opts) + if changegroup.NARROW_REQUIREMENT not in repo.requirements: + ui.warn(_('The narrow command is only supported on respositories cloned' + ' with --narrow.\n')) + return 1 + + # Before supporting, decide whether it "hg tracked --clear" should mean + # tracking no paths or all paths. + if opts['clear']: + ui.warn(_('The --clear option is not yet supported.\n')) + return 1 + + if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']): + raise error.Abort('Expansion not yet supported on widen/narrow') + + addedincludes = narrowspec.parsepatterns(opts['addinclude']) + removedincludes = narrowspec.parsepatterns(opts['removeinclude']) + addedexcludes = narrowspec.parsepatterns(opts['addexclude']) + removedexcludes = narrowspec.parsepatterns(opts['removeexclude']) + widening = addedincludes or removedexcludes + narrowing = removedincludes or addedexcludes + only_show = not widening and not narrowing + + # Only print the current narrowspec. + if only_show: + include, exclude = repo.narrowpats + + ui.pager('tracked') + fm = ui.formatter('narrow', opts) + for i in sorted(include): + fm.startitem() + fm.write('status', '%s ', 'I', label='narrow.included') + fm.write('pat', '%s\n', i, label='narrow.included') + for i in sorted(exclude): + fm.startitem() + fm.write('status', '%s ', 'X', label='narrow.excluded') + fm.write('pat', '%s\n', i, label='narrow.excluded') + fm.end() + return 0 + + with repo.wlock(), repo.lock(): + cmdutil.bailifchanged(repo) + + # Find the revisions we have in common with the remote. These will + # be used for finding local-only changes for narrowing. They will + # also define the set of revisions to update for widening. + remotepath = ui.expandpath(remotepath or 'default') + url, branches = hg.parseurl(remotepath) + ui.status(_('comparing with %s\n') % util.hidepassword(url)) + remote = hg.peer(repo, opts, url) + commoninc = discovery.findcommonincoming(repo, remote) + + oldincludes, oldexcludes = repo.narrowpats + if narrowing: + newincludes = oldincludes - removedincludes + newexcludes = oldexcludes | addedexcludes + _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes, + newincludes, newexcludes, + opts['force_delete_local_changes']) + # _narrow() updated the narrowspec and _widen() below needs to + # use the updated values as its base (otherwise removed includes + # and addedexcludes will be lost in the resulting narrowspec) + oldincludes = newincludes + oldexcludes = newexcludes + + if widening: + newincludes = oldincludes | addedincludes + newexcludes = oldexcludes - removedexcludes + _widen(ui, repo, remote, commoninc, newincludes, newexcludes) + + return 0
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowcopies.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,34 @@ +# narrowcopies.py - extensions to mercurial copies module to support narrow +# clones +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial import ( + copies, + extensions, +) + +def setup(repo): + def _computeforwardmissing(orig, a, b, match=None): + missing = orig(a, b, match) + narrowmatch = repo.narrowmatch() + if narrowmatch.always(): + return missing + missing = [f for f in missing if narrowmatch(f)] + return missing + + def _checkcopies(orig, srcctx, dstctx, f, base, tca, remotebase, limit, + data): + narrowmatch = repo.narrowmatch() + if not narrowmatch(f): + return + orig(srcctx, dstctx, f, base, tca, remotebase, limit, data) + + extensions.wrapfunction(copies, '_computeforwardmissing', + _computeforwardmissing) + extensions.wrapfunction(copies, '_checkcopies', _checkcopies)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowdirstate.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,82 @@ +# narrowdirstate.py - extensions to mercurial dirstate to support narrow clones +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial.i18n import _ +from mercurial import ( + dirstate, + error, + extensions, + match as matchmod, + narrowspec, + util as hgutil, +) + +def setup(repo): + """Add narrow spec dirstate ignore, block changes outside narrow spec.""" + + def walk(orig, self, match, subrepos, unknown, ignored, full=True, + narrowonly=True): + if narrowonly: + # hack to not exclude explicitly-specified paths so that they can + # be warned later on e.g. dirstate.add() + em = matchmod.exact(match._root, match._cwd, match.files()) + nm = matchmod.unionmatcher([repo.narrowmatch(), em]) + match = matchmod.intersectmatchers(match, nm) + return orig(self, match, subrepos, unknown, ignored, full) + + extensions.wrapfunction(dirstate.dirstate, 'walk', walk) + + # Prevent adding files that are outside the sparse checkout + editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge'] + for func in editfuncs: + def _wrapper(orig, self, *args): + dirstate = repo.dirstate + narrowmatch = repo.narrowmatch() + for f in args: + if f is not None and not narrowmatch(f) and f not in dirstate: + raise error.Abort(_("cannot track '%s' - it is outside " + + "the narrow clone") % f) + return orig(self, *args) + extensions.wrapfunction(dirstate.dirstate, func, _wrapper) + + def filterrebuild(orig, self, parent, allfiles, changedfiles=None): + if changedfiles is None: + # Rebuilding entire dirstate, let's filter allfiles to match the + # narrowspec. + allfiles = [f for f in allfiles if repo.narrowmatch()(f)] + orig(self, parent, allfiles, changedfiles) + + extensions.wrapfunction(dirstate.dirstate, 'rebuild', filterrebuild) + + def _narrowbackupname(backupname): + assert 'dirstate' in backupname + return backupname.replace('dirstate', narrowspec.FILENAME) + + def restorebackup(orig, self, tr, backupname): + self._opener.rename(_narrowbackupname(backupname), narrowspec.FILENAME, + checkambig=True) + orig(self, tr, backupname) + + extensions.wrapfunction(dirstate.dirstate, 'restorebackup', restorebackup) + + def savebackup(orig, self, tr, backupname): + orig(self, tr, backupname) + + narrowbackupname = _narrowbackupname(backupname) + self._opener.tryunlink(narrowbackupname) + hgutil.copyfile(self._opener.join(narrowspec.FILENAME), + self._opener.join(narrowbackupname), hardlink=True) + + extensions.wrapfunction(dirstate.dirstate, 'savebackup', savebackup) + + def clearbackup(orig, self, tr, backupname): + orig(self, tr, backupname) + self._opener.unlink(_narrowbackupname(backupname)) + + extensions.wrapfunction(dirstate.dirstate, 'clearbackup', clearbackup)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowmerge.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,77 @@ +# narrowmerge.py - extensions to mercurial merge module to support narrow clones +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial.i18n import _ +from mercurial import ( + copies, + error, + extensions, + merge, +) + +def setup(): + def _manifestmerge(orig, repo, wctx, p2, pa, branchmerge, *args, **kwargs): + """Filter updates to only lay out files that match the narrow spec.""" + actions, diverge, renamedelete = orig( + repo, wctx, p2, pa, branchmerge, *args, **kwargs) + + narrowmatch = repo.narrowmatch() + if narrowmatch.always(): + return actions, diverge, renamedelete + + nooptypes = set(['k']) # TODO: handle with nonconflicttypes + nonconflicttypes = set('a am c cm f g r e'.split()) + # We mutate the items in the dict during iteration, so iterate + # over a copy. + for f, action in list(actions.items()): + if narrowmatch(f): + pass + elif not branchmerge: + del actions[f] # just updating, ignore changes outside clone + elif action[0] in nooptypes: + del actions[f] # merge does not affect file + elif action[0] in nonconflicttypes: + raise error.Abort(_('merge affects file \'%s\' outside narrow, ' + 'which is not yet supported') % f, + hint=_('merging in the other direction ' + 'may work')) + else: + raise error.Abort(_('conflict in file \'%s\' is outside ' + 'narrow clone') % f) + + return actions, diverge, renamedelete + + extensions.wrapfunction(merge, 'manifestmerge', _manifestmerge) + + def _checkcollision(orig, repo, wmf, actions): + narrowmatch = repo.narrowmatch() + if not narrowmatch.always(): + wmf = wmf.matches(narrowmatch) + if actions: + narrowactions = {} + for m, actionsfortype in actions.iteritems(): + narrowactions[m] = [] + for (f, args, msg) in actionsfortype: + if narrowmatch(f): + narrowactions[m].append((f, args, msg)) + actions = narrowactions + return orig(repo, wmf, actions) + + extensions.wrapfunction(merge, '_checkcollision', _checkcollision) + + def _computenonoverlap(orig, repo, *args, **kwargs): + u1, u2 = orig(repo, *args, **kwargs) + narrowmatch = repo.narrowmatch() + if narrowmatch.always(): + return u1, u2 + + u1 = [f for f in u1 if narrowmatch(f)] + u2 = [f for f in u2 if narrowmatch(f)] + return u1, u2 + extensions.wrapfunction(copies, '_computenonoverlap', _computenonoverlap)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowpatch.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,41 @@ +# narrowpatch.py - extensions to mercurial patch module to support narrow clones +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial import ( + extensions, + patch, +) + +def setup(repo): + def _filepairs(orig, *args): + """Only includes files within the narrow spec in the diff.""" + narrowmatch = repo.narrowmatch() + if not narrowmatch.always(): + for x in orig(*args): + f1, f2, copyop = x + if ((not f1 or narrowmatch(f1)) and + (not f2 or narrowmatch(f2))): + yield x + else: + for x in orig(*args): + yield x + + def trydiff(orig, repo, revs, ctx1, ctx2, modified, added, removed, + copy, getfilectx, *args, **kwargs): + narrowmatch = repo.narrowmatch() + if not narrowmatch.always(): + modified = [f for f in modified if narrowmatch(f)] + added = [f for f in added if narrowmatch(f)] + removed = [f for f in removed if narrowmatch(f)] + copy = {k: v for k, v in copy.iteritems() if narrowmatch(k)} + return orig(repo, revs, ctx1, ctx2, modified, added, removed, copy, + getfilectx, *args, **kwargs) + + extensions.wrapfunction(patch, '_filepairs', _filepairs) + extensions.wrapfunction(patch, 'trydiff', trydiff)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowrepo.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,86 @@ +# narrowrepo.py - repository which supports narrow revlogs, lazy loading +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial import ( + bundlerepo, + changegroup, + hg, + localrepo, + narrowspec, + scmutil, +) + +from . import ( + narrowrevlog, +) + +def wrappostshare(orig, sourcerepo, destrepo, **kwargs): + orig(sourcerepo, destrepo, **kwargs) + if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements: + with destrepo.wlock(): + with destrepo.vfs('shared', 'a') as fp: + fp.write(narrowspec.FILENAME + '\n') + +def unsharenarrowspec(orig, ui, repo, repopath): + if (changegroup.NARROW_REQUIREMENT in repo.requirements + and repo.path == repopath and repo.shared()): + srcrepo = hg.sharedreposource(repo) + with srcrepo.vfs(narrowspec.FILENAME) as f: + spec = f.read() + with repo.vfs(narrowspec.FILENAME, 'w') as f: + f.write(spec) + return orig(ui, repo, repopath) + +def wraprepo(repo): + """Enables narrow clone functionality on a single local repository.""" + + cacheprop = localrepo.storecache + if isinstance(repo, bundlerepo.bundlerepository): + # We have to use a different caching property decorator for + # bundlerepo because storecache blows up in strange ways on a + # bundlerepo. Fortunately, there's no risk of data changing in + # a bundlerepo. + cacheprop = lambda name: localrepo.unfilteredpropertycache + + class narrowrepository(repo.__class__): + + def _constructmanifest(self): + manifest = super(narrowrepository, self)._constructmanifest() + narrowrevlog.makenarrowmanifestrevlog(manifest, repo) + return manifest + + @cacheprop('00manifest.i') + def manifestlog(self): + mfl = super(narrowrepository, self).manifestlog + narrowrevlog.makenarrowmanifestlog(mfl, self) + return mfl + + def file(self, f): + fl = super(narrowrepository, self).file(f) + narrowrevlog.makenarrowfilelog(fl, self.narrowmatch()) + return fl + + # I'm not sure this is the right place to do this filter. + # context._manifestmatches() would probably be better, or perhaps + # move it to a later place, in case some of the callers do want to know + # which directories changed. This seems to work for now, though. + def status(self, *args, **kwargs): + s = super(narrowrepository, self).status(*args, **kwargs) + narrowmatch = self.narrowmatch() + modified = list(filter(narrowmatch, s.modified)) + added = list(filter(narrowmatch, s.added)) + removed = list(filter(narrowmatch, s.removed)) + deleted = list(filter(narrowmatch, s.deleted)) + unknown = list(filter(narrowmatch, s.unknown)) + ignored = list(filter(narrowmatch, s.ignored)) + clean = list(filter(narrowmatch, s.clean)) + return scmutil.status(modified, added, removed, deleted, unknown, + ignored, clean) + + repo.__class__ = narrowrepository
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowrevlog.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,187 @@ +# narrowrevlog.py - revlog storing irrelevant nodes as "ellipsis" nodes +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial import ( + error, + manifest, + revlog, + util, +) + +def readtransform(self, text): + return text, False + +def writetransform(self, text): + return text, False + +def rawtransform(self, text): + return False + +revlog.addflagprocessor(revlog.REVIDX_ELLIPSIS, + (readtransform, writetransform, rawtransform)) + +def setup(): + # We just wanted to add the flag processor, which is done at module + # load time. + pass + +class excludeddir(manifest.treemanifest): + """Stand-in for a directory that is excluded from the repository. + + With narrowing active on a repository that uses treemanifests, + some of the directory revlogs will be excluded from the resulting + clone. This is a huge storage win for clients, but means we need + some sort of pseudo-manifest to surface to internals so we can + detect a merge conflict outside the narrowspec. That's what this + class is: it stands in for a directory whose node is known, but + whose contents are unknown. + """ + def __init__(self, dir, node): + super(excludeddir, self).__init__(dir) + self._node = node + # Add an empty file, which will be included by iterators and such, + # appearing as the directory itself (i.e. something like "dir/") + self._files[''] = node + self._flags[''] = 't' + + # Manifests outside the narrowspec should never be modified, so avoid + # copying. This makes a noticeable difference when there are very many + # directories outside the narrowspec. Also, it makes sense for the copy to + # be of the same type as the original, which would not happen with the + # super type's copy(). + def copy(self): + return self + +class excludeddirmanifestctx(manifest.treemanifestctx): + """context wrapper for excludeddir - see that docstring for rationale""" + def __init__(self, dir, node): + self._dir = dir + self._node = node + + def read(self): + return excludeddir(self._dir, self._node) + + def write(self, *args): + raise error.ProgrammingError( + 'attempt to write manifest from excluded dir %s' % self._dir) + +class excludedmanifestrevlog(manifest.manifestrevlog): + """Stand-in for excluded treemanifest revlogs. + + When narrowing is active on a treemanifest repository, we'll have + references to directories we can't see due to the revlog being + skipped. This class exists to conform to the manifestrevlog + interface for those directories and proactively prevent writes to + outside the narrowspec. + """ + + def __init__(self, dir): + self._dir = dir + + def __len__(self): + raise error.ProgrammingError( + 'attempt to get length of excluded dir %s' % self._dir) + + def rev(self, node): + raise error.ProgrammingError( + 'attempt to get rev from excluded dir %s' % self._dir) + + def linkrev(self, node): + raise error.ProgrammingError( + 'attempt to get linkrev from excluded dir %s' % self._dir) + + def node(self, rev): + raise error.ProgrammingError( + 'attempt to get node from excluded dir %s' % self._dir) + + def add(self, *args, **kwargs): + # We should never write entries in dirlogs outside the narrow clone. + # However, the method still gets called from writesubtree() in + # _addtree(), so we need to handle it. We should possibly make that + # avoid calling add() with a clean manifest (_dirty is always False + # in excludeddir instances). + pass + +def makenarrowmanifestrevlog(mfrevlog, repo): + if util.safehasattr(mfrevlog, '_narrowed'): + return + + class narrowmanifestrevlog(mfrevlog.__class__): + # This function is called via debug{revlog,index,data}, but also during + # at least some push operations. This will be used to wrap/exclude the + # child directories when using treemanifests. + def dirlog(self, d): + if d and not d.endswith('/'): + d = d + '/' + if not repo.narrowmatch().visitdir(d[:-1] or '.'): + return excludedmanifestrevlog(d) + result = super(narrowmanifestrevlog, self).dirlog(d) + makenarrowmanifestrevlog(result, repo) + return result + + mfrevlog.__class__ = narrowmanifestrevlog + mfrevlog._narrowed = True + +def makenarrowmanifestlog(mfl, repo): + class narrowmanifestlog(mfl.__class__): + def get(self, dir, node, verify=True): + if not repo.narrowmatch().visitdir(dir[:-1] or '.'): + return excludeddirmanifestctx(dir, node) + return super(narrowmanifestlog, self).get(dir, node, verify=verify) + mfl.__class__ = narrowmanifestlog + +def makenarrowfilelog(fl, narrowmatch): + class narrowfilelog(fl.__class__): + def renamed(self, node): + # Renames that come from outside the narrowspec are + # problematic at least for git-diffs, because we lack the + # base text for the rename. This logic was introduced in + # 3cd72b1 of narrowhg (authored by martinvonz, reviewed by + # adgar), but that revision doesn't have any additional + # commentary on what problems we can encounter. + m = super(narrowfilelog, self).renamed(node) + if m and not narrowmatch(m[0]): + return None + return m + + def size(self, rev): + # We take advantage of the fact that remotefilelog + # lacks a node() method to just skip the + # rename-checking logic when on remotefilelog. This + # might be incorrect on other non-revlog-based storage + # engines, but for now this seems to be fine. + # + # TODO: when remotefilelog is in core, improve this to + # explicitly look for remotefilelog instead of cheating + # with a hasattr check. + if util.safehasattr(self, 'node'): + node = self.node(rev) + # Because renamed() is overridden above to + # sometimes return None even if there is metadata + # in the revlog, size can be incorrect for + # copies/renames, so we need to make sure we call + # the super class's implementation of renamed() + # for the purpose of size calculation. + if super(narrowfilelog, self).renamed(node): + return len(self.read(node)) + return super(narrowfilelog, self).size(rev) + + def cmp(self, node, text): + different = super(narrowfilelog, self).cmp(node, text) + if different: + # Similar to size() above, if the file was copied from + # a file outside the narrowspec, the super class's + # would have returned True because we tricked it into + # thinking that the file was not renamed. + if super(narrowfilelog, self).renamed(node): + t2 = self.read(node) + return t2 != text + return different + + fl.__class__ = narrowfilelog
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowtemplates.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,48 @@ +# narrowtemplates.py - added template keywords for narrow clones +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial import ( + registrar, + revlog, +) + +keywords = {} +templatekeyword = registrar.templatekeyword(keywords) +revsetpredicate = registrar.revsetpredicate() + +def _isellipsis(repo, rev): + if repo.changelog.flags(rev) & revlog.REVIDX_ELLIPSIS: + return True + return False + +@templatekeyword('ellipsis', requires={'repo', 'ctx'}) +def ellipsis(context, mapping): + """String. 'ellipsis' if the change is an ellipsis node, else ''.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + if _isellipsis(repo, ctx.rev()): + return 'ellipsis' + return '' + +@templatekeyword('outsidenarrow', requires={'repo', 'ctx'}) +def outsidenarrow(context, mapping): + """String. 'outsidenarrow' if the change affects no tracked files, + else ''.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + m = repo.narrowmatch() + if not m.always(): + if not any(m(f) for f in ctx.files()): + return 'outsidenarrow' + return '' + +@revsetpredicate('ellipsis') +def ellipsisrevset(repo, subset, x): + """Changesets that are ellipsis nodes.""" + return subset.filter(lambda r: _isellipsis(repo, r))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/narrow/narrowwirepeer.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,52 @@ +# narrowwirepeer.py - passes narrow spec with unbundle command +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +from mercurial.i18n import _ +from mercurial import ( + error, + extensions, + hg, + narrowspec, + node, +) + +def uisetup(): + def peersetup(ui, peer): + # We must set up the expansion before reposetup below, since it's used + # at clone time before we have a repo. + class expandingpeer(peer.__class__): + def expandnarrow(self, narrow_include, narrow_exclude, nodes): + ui.status(_("expanding narrowspec\n")) + if not self.capable('exp-expandnarrow'): + raise error.Abort( + 'peer does not support expanding narrowspecs') + + hex_nodes = (node.hex(n) for n in nodes) + new_narrowspec = self._call( + 'expandnarrow', + includepats=','.join(narrow_include), + excludepats=','.join(narrow_exclude), + nodes=','.join(hex_nodes)) + + return narrowspec.parseserverpatterns(new_narrowspec) + peer.__class__ = expandingpeer + hg.wirepeersetupfuncs.append(peersetup) + +def reposetup(repo): + def wirereposetup(ui, peer): + def wrapped(orig, cmd, *args, **kwargs): + if cmd == 'unbundle': + # TODO: don't blindly add include/exclude wireproto + # arguments to unbundle. + include, exclude = repo.narrowpats + kwargs[r"includepats"] = ','.join(include) + kwargs[r"excludepats"] = ','.join(exclude) + return orig(cmd, *args, **kwargs) + extensions.wrapfunction(peer, '_calltwowaystream', wrapped) + hg.wirepeersetupfuncs.append(wirereposetup)
--- a/hgext/notify.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/notify.py Sun Mar 04 10:42:51 2018 -0500 @@ -142,13 +142,14 @@ from mercurial.i18n import _ from mercurial import ( - cmdutil, error, + logcmdutil, mail, patch, registrar, util, ) +from mercurial.utils import dateutil # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should @@ -257,9 +258,8 @@ mapfile = self.ui.config('notify', 'style') if not mapfile and not template: template = deftemplates.get(hooktype) or single_template - spec = cmdutil.logtemplatespec(template, mapfile) - self.t = cmdutil.changeset_templater(self.ui, self.repo, spec, - False, None, False) + spec = logcmdutil.templatespec(template, mapfile) + self.t = logcmdutil.changesettemplater(self.ui, self.repo, spec) def strip(self, path): '''strip leading slashes from local path, turn into web-safe path.''' @@ -361,7 +361,7 @@ for k, v in headers: msg[k] = v - msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") + msg['Date'] = dateutil.datestr(format="%a, %d %b %Y %H:%M:%S %1%2") # try to make subject line exist and be useful if not subject:
--- a/hgext/patchbomb.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/patchbomb.py Sun Mar 04 10:42:51 2018 -0500 @@ -74,6 +74,8 @@ from __future__ import absolute_import import email as emailmod +import email.generator as emailgen +import email.utils as eutil import errno import os import socket @@ -83,6 +85,7 @@ from mercurial import ( cmdutil, commands, + encoding, error, formatter, hg, @@ -96,6 +99,7 @@ templater, util, ) +from mercurial.utils import dateutil stringio = util.stringio cmdtable = {} @@ -208,7 +212,7 @@ if not numbered: return '[PATCH%s]' % flag else: - tlen = len(str(total)) + tlen = len("%d" % total) return '[PATCH %0*d of %d%s]' % (tlen, idx, total, flag) def makepatch(ui, repo, rev, patchlines, opts, _charsets, idx, total, numbered, @@ -265,11 +269,10 @@ if patchtags: patchname = patchtags[0] elif total > 1: - patchname = cmdutil.makefilename(repo, '%b-%n.patch', - binnode, seqno=idx, - total=total) + patchname = cmdutil.makefilename(repo[node], '%b-%n.patch', + seqno=idx, total=total) else: - patchname = cmdutil.makefilename(repo, '%b.patch', binnode) + patchname = cmdutil.makefilename(repo[node], '%b.patch') disposition = 'inline' if opts.get('attach'): disposition = 'attachment' @@ -627,7 +630,7 @@ if outgoing: revs = _getoutgoing(repo, dest, revs) if bundle: - opts['revs'] = [str(r) for r in revs] + opts['revs'] = ["%d" % r for r in revs] # check if revision exist on the public destination publicurl = repo.ui.config('patchbomb', 'publicurl') @@ -655,19 +658,21 @@ else: msg = _('public url %s is missing %s') msg %= (publicurl, missing[0]) + missingrevs = [ctx.rev() for ctx in missing] revhint = ' '.join('-r %s' % h - for h in repo.set('heads(%ld)', missing)) + for h in repo.set('heads(%ld)', missingrevs)) hint = _("use 'hg push %s %s'") % (publicurl, revhint) raise error.Abort(msg, hint=hint) # start if date: - start_time = util.parsedate(date) + start_time = dateutil.parsedate(date) else: - start_time = util.makedate() + start_time = dateutil.makedate() def genmsgid(id): - return '<%s.%s@%s>' % (id[:20], int(start_time[0]), socket.getfqdn()) + return '<%s.%d@%s>' % (id[:20], int(start_time[0]), + encoding.strtolocal(socket.getfqdn())) # deprecated config: patchbomb.from sender = (opts.get('from') or ui.config('email', 'from') or @@ -744,7 +749,7 @@ if not parent.endswith('>'): parent += '>' - sender_addr = emailmod.Utils.parseaddr(sender)[1] + sender_addr = eutil.parseaddr(encoding.strfromlocal(sender))[1] sender = mail.addressencode(ui, sender, _charsets, opts.get('test')) sendmail = None firstpatch = None @@ -763,7 +768,7 @@ parent = m['Message-Id'] m['User-Agent'] = 'Mercurial-patchbomb/%s' % util.version() - m['Date'] = emailmod.Utils.formatdate(start_time[0], localtime=True) + m['Date'] = eutil.formatdate(start_time[0], localtime=True) start_time = (start_time[0] + 1, start_time[1]) m['From'] = sender @@ -777,7 +782,7 @@ if opts.get('test'): ui.status(_('displaying '), subj, ' ...\n') ui.pager('email') - generator = emailmod.Generator.Generator(ui, mangle_from_=False) + generator = emailgen.Generator(ui, mangle_from_=False) try: generator.flatten(m, 0) ui.write('\n') @@ -794,7 +799,7 @@ # Exim does not remove the Bcc field del m['Bcc'] fp = stringio() - generator = emailmod.Generator.Generator(fp, mangle_from_=False) + generator = emailgen.Generator(fp, mangle_from_=False) generator.flatten(m, 0) sendmail(sender_addr, to + bcc + cc, fp.getvalue())
--- a/hgext/purge.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/purge.py Sun Mar 04 10:42:51 2018 -0500 @@ -31,6 +31,7 @@ from mercurial import ( cmdutil, error, + pycompat, registrar, scmutil, util, @@ -84,6 +85,7 @@ list of files that this program would delete, use the --print option. ''' + opts = pycompat.byteskwargs(opts) act = not opts.get('print') eol = '\n' if opts.get('print0'):
--- a/hgext/rebase.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/rebase.py Sun Mar 04 10:42:51 2018 -0500 @@ -214,7 +214,7 @@ if v >= 0: newrev = repo[v].hex() else: - newrev = v + newrev = "%d" % v destnode = repo[destmap[d]].hex() f.write("%s:%s:%s\n" % (oldrev, newrev, destnode)) repo.ui.debug('rebase status stored\n') @@ -289,7 +289,7 @@ skipped.add(old) seen.add(new) repo.ui.debug('computed skipped revs: %s\n' % - (' '.join(str(r) for r in sorted(skipped)) or None)) + (' '.join('%d' % r for r in sorted(skipped)) or '')) repo.ui.debug('rebase status resumed\n') self.originalwd = originalwd @@ -312,10 +312,13 @@ if not self.ui.configbool('experimental', 'rebaseskipobsolete'): return obsoleteset = set(obsoleterevs) - self.obsoletenotrebased, self.obsoletewithoutsuccessorindestination = \ - _computeobsoletenotrebased(self.repo, obsoleteset, destmap) + (self.obsoletenotrebased, + self.obsoletewithoutsuccessorindestination, + obsoleteextinctsuccessors) = _computeobsoletenotrebased( + self.repo, obsoleteset, destmap) skippedset = set(self.obsoletenotrebased) skippedset.update(self.obsoletewithoutsuccessorindestination) + skippedset.update(obsoleteextinctsuccessors) _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset) def _prepareabortorcontinue(self, isabort): @@ -499,7 +502,8 @@ if not self.collapsef: merging = p2 != nullrev editform = cmdutil.mergeeditform(merging, 'rebase') - editor = cmdutil.getcommiteditor(editform=editform, **opts) + editor = cmdutil.getcommiteditor(editform=editform, + **pycompat.strkwargs(opts)) if self.wctx.isinmemory(): newnode = concludememorynode(repo, rev, p1, p2, wctx=self.wctx, @@ -537,7 +541,7 @@ 'to commit\n') % (rev, ctx)) self.skipped.add(rev) self.state[rev] = p1 - ui.debug('next revision set to %s\n' % p1) + ui.debug('next revision set to %d\n' % p1) else: ui.status(_('already rebased %s as %s\n') % (desc, repo[self.state[rev]])) @@ -585,11 +589,12 @@ date=self.date) if newnode is not None: newrev = repo[newnode].rev() - for oldrev in self.state.iterkeys(): + for oldrev in self.state: self.state[oldrev] = newrev if 'qtip' in repo.tags(): - updatemq(repo, self.state, self.skipped, **opts) + updatemq(repo, self.state, self.skipped, + **pycompat.strkwargs(opts)) # restore original working directory # (we do this before stripping) @@ -914,12 +919,12 @@ dest = scmutil.revsingle(repo, destf) else: dest = repo[_destrebase(repo, base, destspace=destspace)] - destf = str(dest) + destf = bytes(dest) roots = [] # selected children of branching points bpbase = {} # {branchingpoint: [origbase]} for b in base: # group bases by branching points - bp = repo.revs('ancestor(%d, %d)', b, dest).first() + bp = repo.revs('ancestor(%d, %d)', b, dest.rev()).first() bpbase[bp] = bpbase.get(bp, []) + [b] if None in bpbase: # emulate the old behavior, showing "nothing to rebase" (a better @@ -941,12 +946,12 @@ else: ui.status(_('nothing to rebase - working directory ' 'parent is also destination\n')) - elif not repo.revs('%ld - ::%d', base, dest): + elif not repo.revs('%ld - ::%d', base, dest.rev()): if basef: ui.status(_('nothing to rebase - "base" %s is ' 'already an ancestor of destination ' '%s\n') % - ('+'.join(str(repo[r]) for r in base), + ('+'.join(bytes(repo[r]) for r in base), dest)) else: ui.status(_('nothing to rebase - working ' @@ -954,7 +959,7 @@ 'ancestor of destination %s\n') % dest) else: # can it happen? ui.status(_('nothing to rebase from %s to %s\n') % - ('+'.join(str(repo[r]) for r in base), dest)) + ('+'.join(bytes(repo[r]) for r in base), dest)) return None # If rebasing the working copy parent, force in-memory merge to be off. # @@ -976,7 +981,7 @@ if not destf: dest = repo[_destrebase(repo, rebaseset, destspace=destspace)] - destf = str(dest) + destf = bytes(dest) allsrc = revsetlang.formatspec('%ld', rebaseset) alias = {'ALLSRC': allsrc} @@ -1031,10 +1036,10 @@ return nullrev if len(parents) == 1: return parents.pop() - raise error.Abort(_('unable to collapse on top of %s, there is more ' + raise error.Abort(_('unable to collapse on top of %d, there is more ' 'than one external parent: %s') % (max(destancestors), - ', '.join(str(p) for p in sorted(parents)))) + ', '.join("%d" % p for p in sorted(parents)))) def concludememorynode(repo, rev, p1, p2, wctx=None, commitmsg=None, editor=None, extrafn=None, @@ -1220,7 +1225,7 @@ `rebaseobsrevs`: set of obsolete revision in source `rebaseobsskipped`: set of revisions from source skipped because they have - successors in destination + successors in destination or no non-obsolete successor. """ # Obsolete node with successors not in dest leads to divergence divergenceok = ui.configbool('experimental', @@ -1228,7 +1233,7 @@ divergencebasecandidates = rebaseobsrevs - rebaseobsskipped if divergencebasecandidates and not divergenceok: - divhashes = (str(repo[r]) + divhashes = (bytes(repo[r]) for r in divergencebasecandidates) msg = _("this rebase will cause " "divergences from: %s") @@ -1436,7 +1441,7 @@ def isagitpatch(repo, patchname): 'Return true if the given patch is in git format' mqpatch = os.path.join(repo.mq.path, patchname) - for line in patch.linereader(file(mqpatch, 'rb')): + for line in patch.linereader(open(mqpatch, 'rb')): if line.startswith('diff --git'): return True return False @@ -1465,10 +1470,10 @@ for rev in sorted(mqrebase, reverse=True): if rev not in skipped: name, isgit = mqrebase[rev] - repo.ui.note(_('updating mq patch %s to %s:%s\n') % + repo.ui.note(_('updating mq patch %s to %d:%s\n') % (name, state[rev], repo[state[rev]])) mq.qimport(repo, (), patchname=name, git=isgit, - rev=[str(state[rev])]) + rev=["%d" % state[rev]]) else: # Rebased and skipped skippedpatches.add(mqrebase[rev][0]) @@ -1549,7 +1554,7 @@ cleanup = True if immutable: repo.ui.warn(_("warning: can't clean up public changesets %s\n") - % ', '.join(str(repo[r]) for r in immutable), + % ', '.join(bytes(repo[r]) for r in immutable), hint=_("see 'hg help phases' for details")) cleanup = False @@ -1646,7 +1651,9 @@ roots = list(repo.set('roots(%ld)', sortedsrc[0])) if not roots: raise error.Abort(_('no matching revisions')) - roots.sort() + def revof(r): + return r.rev() + roots = sorted(roots, key=revof) state = dict.fromkeys(rebaseset, revtodo) emptyrebase = (len(sortedsrc) == 1) for root in roots: @@ -1785,25 +1792,34 @@ `obsoletewithoutsuccessorindestination` is a set with obsolete revisions without a successor in destination. + + `obsoleteextinctsuccessors` is a set of obsolete revisions with only + obsolete successors. """ obsoletenotrebased = {} obsoletewithoutsuccessorindestination = set([]) + obsoleteextinctsuccessors = set([]) assert repo.filtername is None cl = repo.changelog nodemap = cl.nodemap + extinctnodes = set(cl.node(r) for r in repo.revs('extinct()')) for srcrev in rebaseobsrevs: srcnode = cl.node(srcrev) destnode = cl.node(destmap[srcrev]) # XXX: more advanced APIs are required to handle split correctly - successors = list(obsutil.allsuccessors(repo.obsstore, [srcnode])) - if len(successors) == 1: - # obsutil.allsuccessors includes node itself. When the list only - # contains one element, it means there are no successors. + successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode])) + # obsutil.allsuccessors includes node itself + successors.remove(srcnode) + if successors.issubset(extinctnodes): + # all successors are extinct + obsoleteextinctsuccessors.add(srcrev) + if not successors: + # no successor obsoletenotrebased[srcrev] = None else: for succnode in successors: - if succnode == srcnode or succnode not in nodemap: + if succnode not in nodemap: continue if cl.isancestor(succnode, destnode): obsoletenotrebased[srcrev] = nodemap[succnode] @@ -1812,11 +1828,14 @@ # If 'srcrev' has a successor in rebase set but none in # destination (which would be catched above), we shall skip it # and its descendants to avoid divergence. - if any(nodemap[s] in destmap - for s in successors if s != srcnode): + if any(nodemap[s] in destmap for s in successors): obsoletewithoutsuccessorindestination.add(srcrev) - return obsoletenotrebased, obsoletewithoutsuccessorindestination + return ( + obsoletenotrebased, + obsoletewithoutsuccessorindestination, + obsoleteextinctsuccessors, + ) def summaryhook(ui, repo): if not repo.vfs.exists('rebasestate'):
--- a/hgext/relink.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/relink.py Sun Mar 04 10:42:51 2018 -0500 @@ -168,8 +168,8 @@ source = os.path.join(src, f) tgt = os.path.join(dst, f) # Binary mode, so that read() works correctly, especially on Windows - sfp = file(source, 'rb') - dfp = file(tgt, 'rb') + sfp = open(source, 'rb') + dfp = open(tgt, 'rb') sin = sfp.read(CHUNKLEN) while sin: din = dfp.read(CHUNKLEN) @@ -187,7 +187,7 @@ relinked += 1 savedbytes += sz except OSError as inst: - ui.warn('%s: %s\n' % (tgt, str(inst))) + ui.warn('%s: %s\n' % (tgt, util.forcebytestr(inst))) ui.progress(_('relinking'), None)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/hgext/remotenames.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,299 @@ +# remotenames.py - extension to display remotenames +# +# Copyright 2017 Augie Fackler <raf@durin42.com> +# Copyright 2017 Sean Farley <sean@farley.io> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +""" showing remotebookmarks and remotebranches in UI + +By default both remotebookmarks and remotebranches are turned on. Config knob to +control the individually are as follows. + +Config options to tweak the default behaviour: + +remotenames.bookmarks + Boolean value to enable or disable showing of remotebookmarks + +remotenames.branches + Boolean value to enable or disable showing of remotebranches +""" + +from __future__ import absolute_import + +import collections + +from mercurial.i18n import _ + +from mercurial.node import ( + bin, +) +from mercurial import ( + logexchange, + namespaces, + registrar, + revsetlang, + smartset, + templatekw, +) + +# Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for +# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should +# be specifying the version(s) of Mercurial they are tested with, or +# leave the attribute unspecified. +testedwith = 'ships-with-hg-core' + +configtable = {} +configitem = registrar.configitem(configtable) +templatekeyword = registrar.templatekeyword() +revsetpredicate = registrar.revsetpredicate() + +configitem('remotenames', 'bookmarks', + default=True, +) +configitem('remotenames', 'branches', + default=True, +) + +class lazyremotenamedict(collections.MutableMapping): + """ + Read-only dict-like Class to lazily resolve remotename entries + + We are doing that because remotenames startup was slow. + We lazily read the remotenames file once to figure out the potential entries + and store them in self.potentialentries. Then when asked to resolve an + entry, if it is not in self.potentialentries, then it isn't there, if it + is in self.potentialentries we resolve it and store the result in + self.cache. We cannot be lazy is when asked all the entries (keys). + """ + def __init__(self, kind, repo): + self.cache = {} + self.potentialentries = {} + self._kind = kind # bookmarks or branches + self._repo = repo + self.loaded = False + + def _load(self): + """ Read the remotenames file, store entries matching selected kind """ + self.loaded = True + repo = self._repo + for node, rpath, rname in logexchange.readremotenamefile(repo, + self._kind): + name = rpath + '/' + rname + self.potentialentries[name] = (node, rpath, name) + + def _resolvedata(self, potentialentry): + """ Check that the node for potentialentry exists and return it """ + if not potentialentry in self.potentialentries: + return None + node, remote, name = self.potentialentries[potentialentry] + repo = self._repo + binnode = bin(node) + # if the node doesn't exist, skip it + try: + repo.changelog.rev(binnode) + except LookupError: + return None + # Skip closed branches + if (self._kind == 'branches' and repo[binnode].closesbranch()): + return None + return [binnode] + + def __getitem__(self, key): + if not self.loaded: + self._load() + val = self._fetchandcache(key) + if val is not None: + return val + else: + raise KeyError() + + def __iter__(self): + return iter(self.potentialentries) + + def __len__(self): + return len(self.potentialentries) + + def __setitem__(self): + raise NotImplementedError + + def __delitem__(self): + raise NotImplementedError + + def _fetchandcache(self, key): + if key in self.cache: + return self.cache[key] + val = self._resolvedata(key) + if val is not None: + self.cache[key] = val + return val + else: + return None + + def keys(self): + """ Get a list of bookmark or branch names """ + if not self.loaded: + self._load() + return self.potentialentries.keys() + + def iteritems(self): + """ Iterate over (name, node) tuples """ + + if not self.loaded: + self._load() + + for k, vtup in self.potentialentries.iteritems(): + yield (k, [bin(vtup[0])]) + +class remotenames(object): + """ + This class encapsulates all the remotenames state. It also contains + methods to access that state in convenient ways. Remotenames are lazy + loaded. Whenever client code needs to ensure the freshest copy of + remotenames, use the `clearnames` method to force an eventual load. + """ + + def __init__(self, repo, *args): + self._repo = repo + self.clearnames() + + def clearnames(self): + """ Clear all remote names state """ + self.bookmarks = lazyremotenamedict("bookmarks", self._repo) + self.branches = lazyremotenamedict("branches", self._repo) + self._invalidatecache() + + def _invalidatecache(self): + self._nodetobmarks = None + self._nodetobranch = None + + def bmarktonodes(self): + return self.bookmarks + + def nodetobmarks(self): + if not self._nodetobmarks: + bmarktonodes = self.bmarktonodes() + self._nodetobmarks = {} + for name, node in bmarktonodes.iteritems(): + self._nodetobmarks.setdefault(node[0], []).append(name) + return self._nodetobmarks + + def branchtonodes(self): + return self.branches + + def nodetobranch(self): + if not self._nodetobranch: + branchtonodes = self.branchtonodes() + self._nodetobranch = {} + for name, nodes in branchtonodes.iteritems(): + for node in nodes: + self._nodetobranch.setdefault(node, []).append(name) + return self._nodetobranch + +def reposetup(ui, repo): + if not repo.local(): + return + + repo._remotenames = remotenames(repo) + ns = namespaces.namespace + + if ui.configbool('remotenames', 'bookmarks'): + remotebookmarkns = ns( + 'remotebookmarks', + templatename='remotebookmarks', + colorname='remotebookmark', + logfmt='remote bookmark: %s\n', + listnames=lambda repo: repo._remotenames.bmarktonodes().keys(), + namemap=lambda repo, name: + repo._remotenames.bmarktonodes().get(name, []), + nodemap=lambda repo, node: + repo._remotenames.nodetobmarks().get(node, [])) + repo.names.addnamespace(remotebookmarkns) + + if ui.configbool('remotenames', 'branches'): + remotebranchns = ns( + 'remotebranches', + templatename='remotebranches', + colorname='remotebranch', + logfmt='remote branch: %s\n', + listnames = lambda repo: repo._remotenames.branchtonodes().keys(), + namemap = lambda repo, name: + repo._remotenames.branchtonodes().get(name, []), + nodemap = lambda repo, node: + repo._remotenames.nodetobranch().get(node, [])) + repo.names.addnamespace(remotebranchns) + +@templatekeyword('remotenames', requires={'repo', 'ctx', 'templ'}) +def remotenameskw(context, mapping): + """List of strings. Remote names associated with the changeset.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + + remotenames = [] + if 'remotebookmarks' in repo.names: + remotenames = repo.names['remotebookmarks'].names(repo, ctx.node()) + + if 'remotebranches' in repo.names: + remotenames += repo.names['remotebranches'].names(repo, ctx.node()) + + return templatekw.compatlist(context, mapping, 'remotename', remotenames, + plural='remotenames') + +@templatekeyword('remotebookmarks', requires={'repo', 'ctx', 'templ'}) +def remotebookmarkskw(context, mapping): + """List of strings. Remote bookmarks associated with the changeset.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + + remotebmarks = [] + if 'remotebookmarks' in repo.names: + remotebmarks = repo.names['remotebookmarks'].names(repo, ctx.node()) + + return templatekw.compatlist(context, mapping, 'remotebookmark', + remotebmarks, plural='remotebookmarks') + +@templatekeyword('remotebranches', requires={'repo', 'ctx', 'templ'}) +def remotebrancheskw(context, mapping): + """List of strings. Remote branches associated with the changeset.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + + remotebranches = [] + if 'remotebranches' in repo.names: + remotebranches = repo.names['remotebranches'].names(repo, ctx.node()) + + return templatekw.compatlist(context, mapping, 'remotebranch', + remotebranches, plural='remotebranches') + +def _revsetutil(repo, subset, x, rtypes): + """utility function to return a set of revs based on the rtypes""" + + revs = set() + cl = repo.changelog + for rtype in rtypes: + if rtype in repo.names: + ns = repo.names[rtype] + for name in ns.listnames(repo): + revs.update(ns.nodes(repo, name)) + + results = (cl.rev(n) for n in revs if cl.hasnode(n)) + return subset & smartset.baseset(sorted(results)) + +@revsetpredicate('remotenames()') +def remotenamesrevset(repo, subset, x): + """All changesets which have a remotename on them.""" + revsetlang.getargs(x, 0, 0, _("remotenames takes no arguments")) + return _revsetutil(repo, subset, x, ('remotebookmarks', 'remotebranches')) + +@revsetpredicate('remotebranches()') +def remotebranchesrevset(repo, subset, x): + """All changesets which are branch heads on remotes.""" + revsetlang.getargs(x, 0, 0, _("remotebranches takes no arguments")) + return _revsetutil(repo, subset, x, ('remotebranches',)) + +@revsetpredicate('remotebookmarks()') +def remotebmarksrevset(repo, subset, x): + """All changesets which have bookmarks on remotes.""" + revsetlang.getargs(x, 0, 0, _("remotebookmarks takes no arguments")) + return _revsetutil(repo, subset, x, ('remotebookmarks',))
--- a/hgext/schemes.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/schemes.py Sun Mar 04 10:42:51 2018 -0500 @@ -94,7 +94,7 @@ parts = parts[:-1] else: tail = '' - context = dict((str(i + 1), v) for i, v in enumerate(parts)) + context = dict(('%d' % (i + 1), v) for i, v in enumerate(parts)) return ''.join(self.templater.process(self.url, context)) + tail def hasdriveletter(orig, path):
--- a/hgext/share.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/share.py Sun Mar 04 10:42:51 2018 -0500 @@ -52,9 +52,6 @@ util, ) -repository = hg.repository -parseurl = hg.parseurl - cmdtable = {} command = registrar.command(cmdtable) # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for @@ -135,27 +132,9 @@ return False return hg.sharedbookmarks in shared -def _getsrcrepo(repo): - """ - Returns the source repository object for a given shared repository. - If repo is not a shared repository, return None. - """ - if repo.sharedpath == repo.path: - return None - - if util.safehasattr(repo, 'srcrepo') and repo.srcrepo: - return repo.srcrepo - - # the sharedpath always ends in the .hg; we want the path to the repo - source = repo.vfs.split(repo.sharedpath)[0] - srcurl, branches = parseurl(source) - srcrepo = repository(repo.ui, srcurl) - repo.srcrepo = srcrepo - return srcrepo - def getbkfile(orig, repo): if _hassharedbookmarks(repo): - srcrepo = _getsrcrepo(repo) + srcrepo = hg.sharedreposource(repo) if srcrepo is not None: # just orig(srcrepo) doesn't work as expected, because # HG_PENDING refers repo.root. @@ -186,7 +165,7 @@ orig(self, tr) if _hassharedbookmarks(self._repo): - srcrepo = _getsrcrepo(self._repo) + srcrepo = hg.sharedreposource(self._repo) if srcrepo is not None: category = 'share-bookmarks' tr.addpostclose(category, lambda tr: self._writerepo(srcrepo)) @@ -196,6 +175,6 @@ orig(self, repo) if _hassharedbookmarks(self._repo): - srcrepo = _getsrcrepo(self._repo) + srcrepo = hg.sharedreposource(self._repo) if srcrepo is not None: orig(self, srcrepo)
--- a/hgext/shelve.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/shelve.py Sun Mar 04 10:42:51 2018 -0500 @@ -55,6 +55,7 @@ from . import ( rebase, ) +from mercurial.utils import dateutil cmdtable = {} command = registrar.command(cmdtable) @@ -192,7 +193,7 @@ d['nodestoremove'] = [nodemod.bin(h) for h in d['nodestoremove'].split(' ')] except (ValueError, TypeError, KeyError) as err: - raise error.CorruptedState(str(err)) + raise error.CorruptedState(pycompat.bytestr(err)) @classmethod def _getversion(cls, repo): @@ -201,7 +202,7 @@ try: version = int(fp.readline().strip()) except ValueError as err: - raise error.CorruptedState(str(err)) + raise error.CorruptedState(pycompat.bytestr(err)) finally: fp.close() return version @@ -251,7 +252,7 @@ if d.get('activebook', '') != cls._noactivebook: obj.activebookmark = d.get('activebook', '') except (error.RepoLookupError, KeyError) as err: - raise error.CorruptedState(str(err)) + raise error.CorruptedState(pycompat.bytestr(err)) return obj @@ -271,7 +272,7 @@ "activebook": activebook or cls._noactivebook } scmutil.simplekeyvaluefile(repo.vfs, cls._filename)\ - .write(info, firstline=str(cls._version)) + .write(info, firstline=("%d" % cls._version)) @classmethod def clear(cls, repo): @@ -563,7 +564,8 @@ continue ui.write(' ' * (16 - len(sname))) used = 16 - age = '(%s)' % templatefilters.age(util.makedate(mtime), abbrev=True) + date = dateutil.makedate(mtime) + age = '(%s)' % templatefilters.age(date, abbrev=True) ui.write(age, label='shelve.age') ui.write(' ' * (12 - len(age))) used += 12 @@ -619,7 +621,7 @@ repo.vfs.rename('unshelverebasestate', 'rebasestate') try: rebase.rebase(ui, repo, **{ - 'abort' : True + r'abort' : True }) except Exception: repo.vfs.rename('rebasestate', 'unshelverebasestate') @@ -648,7 +650,7 @@ ui.pushbuffer(True) cmdutil.revert(ui, repo, shelvectx, repo.dirstate.parents(), *pathtofiles(repo, files), - **{'no_backup': True}) + **{r'no_backup': True}) ui.popbuffer() def restorebranch(ui, repo, branchtorestore): @@ -681,7 +683,7 @@ repo.vfs.rename('unshelverebasestate', 'rebasestate') try: rebase.rebase(ui, repo, **{ - 'continue' : True + r'continue' : True }) except Exception: repo.vfs.rename('rebasestate', 'unshelverebasestate') @@ -744,10 +746,10 @@ ui.status(_('rebasing shelved changes\n')) try: rebase.rebase(ui, repo, **{ - 'rev': [shelvectx.rev()], - 'dest': str(tmpwctx.rev()), - 'keep': True, - 'tool': opts.get('tool', ''), + r'rev': [shelvectx.rev()], + r'dest': "%d" % tmpwctx.rev(), + r'keep': True, + r'tool': opts.get('tool', ''), }) except error.InterventionRequired: tr.close() @@ -881,7 +883,7 @@ raise cmdutil.wrongtooltocontinue(repo, _('unshelve')) except error.CorruptedState as err: - ui.debug(str(err) + '\n') + ui.debug(pycompat.bytestr(err) + '\n') if continuef: msg = _('corrupted shelved state file') hint = _('please run hg unshelve --abort to abort unshelve '
--- a/hgext/show.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/show.py Sun Mar 04 10:42:51 2018 -0500 @@ -39,6 +39,7 @@ error, formatter, graphmod, + logcmdutil, phases, pycompat, registrar, @@ -125,7 +126,7 @@ ui.write('\n') for name, func in sorted(views.items()): - ui.write(('%s\n') % func.__doc__) + ui.write(('%s\n') % pycompat.sysbytes(func.__doc__)) ui.write('\n') raise error.Abort(_('no view requested'), @@ -148,7 +149,7 @@ elif fn._csettopic: ref = 'show%s' % fn._csettopic spec = formatter.lookuptemplate(ui, ref, template) - displayer = cmdutil.changeset_templater(ui, repo, spec, buffered=True) + displayer = logcmdutil.changesettemplater(ui, repo, spec, buffered=True) return fn(ui, repo, displayer) else: return fn(ui, repo) @@ -409,8 +410,8 @@ revdag = graphmod.dagwalker(repo, revs) ui.setconfig('experimental', 'graphshorten', True) - cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, - props={'nodelen': nodelen}) + logcmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, + props={'nodelen': nodelen}) def extsetup(ui): # Alias `hg <prefix><view>` to `hg show <view>`.
--- a/hgext/sparse.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/sparse.py Sun Mar 04 10:42:51 2018 -0500 @@ -75,12 +75,12 @@ from mercurial.i18n import _ from mercurial import ( - cmdutil, commands, dirstate, error, extensions, hg, + logcmdutil, match as matchmod, pycompat, registrar, @@ -126,7 +126,7 @@ entry[1].append(('', 'sparse', None, "limit to changesets affecting the sparse checkout")) - def _logrevs(orig, repo, opts): + def _initialrevs(orig, repo, opts): revs = orig(repo, opts) if opts.get('sparse'): sparsematch = sparse.matcher(repo) @@ -135,7 +135,7 @@ return any(f for f in ctx.files() if sparsematch(f)) revs = revs.filter(ctxmatch) return revs - extensions.wrapfunction(cmdutil, '_logrevs', _logrevs) + extensions.wrapfunction(logcmdutil, '_initialrevs', _initialrevs) def _clonesparsecmd(orig, ui, repo, *args, **opts): include_pat = opts.get('include') @@ -194,7 +194,11 @@ """ def walk(orig, self, match, subrepos, unknown, ignored, full=True): - match = matchmod.intersectmatchers(match, self._sparsematcher) + # hack to not exclude explicitly-specified paths so that they can + # be warned later on e.g. dirstate.add() + em = matchmod.exact(match._root, match._cwd, match.files()) + sm = matchmod.unionmatcher([self._sparsematcher, em]) + match = matchmod.intersectmatchers(match, sm) return orig(self, match, subrepos, unknown, ignored, full) extensions.wrapfunction(dirstate.dirstate, 'walk', walk)
--- a/hgext/split.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/split.py Sun Mar 04 10:42:51 2018 -0500 @@ -24,6 +24,7 @@ hg, obsolete, phases, + pycompat, registrar, revsetlang, scmutil, @@ -160,7 +161,7 @@ 'interactive': True, 'message': header + ctx.description(), }) - commands.commit(ui, repo, **opts) + commands.commit(ui, repo, **pycompat.strkwargs(opts)) newctx = repo['.'] committed.append(newctx) @@ -172,6 +173,6 @@ return committed[-1] -def dorebase(ui, repo, src, dest): +def dorebase(ui, repo, src, destctx): rebase.rebase(ui, repo, rev=[revsetlang.formatspec('%ld', src)], - dest=revsetlang.formatspec('%d', dest)) + dest=revsetlang.formatspec('%d', destctx.rev()))
--- a/hgext/strip.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/strip.py Sun Mar 04 10:42:51 2018 -0500 @@ -181,13 +181,10 @@ strippedrevs = revs.union(descendants) roots = revs.difference(descendants) - update = False # if one of the wdir parent is stripped we'll need # to update away to an earlier revision - for p in repo.dirstate.parents(): - if p != nullid and cl.rev(p) in strippedrevs: - update = True - break + update = any(p != nullid and cl.rev(p) in strippedrevs + for p in repo.dirstate.parents()) rootnodes = set(cl.node(r) for r in roots) @@ -215,7 +212,7 @@ # only reset the dirstate for files that would actually change # between the working context and uctx - descendantrevs = repo.revs("%s::." % uctx.rev()) + descendantrevs = repo.revs(b"%d::.", uctx.rev()) changedfiles = [] for rev in descendantrevs: # blindly reset the files, regardless of what actually changed
--- a/hgext/transplant.py Sat Mar 03 22:29:24 2018 -0500 +++ b/hgext/transplant.py Sun Mar 04 10:42:51 2018 -0500 @@ -24,6 +24,7 @@ error, exchange, hg, + logcmdutil, match, merge, node as nodemod, @@ -119,7 +120,8 @@ opener=self.opener) def getcommiteditor(): editform = cmdutil.mergeeditform(repo[None], 'transplant') - return cmdutil.getcommiteditor(editform=editform, **opts) + return cmdutil.getcommiteditor(editform=editform, + **pycompat.strkwargs(opts)) self.getcommiteditor = getcommiteditor def applied(self, repo, node, parent): @@ -160,7 +162,7 @@ tr = repo.transaction('transplant') for rev in revs: node = revmap[rev] - revstr = '%s:%s' % (rev, nodemod.short(node)) + revstr = '%d:%s' % (rev, nodemod.short(node)) if self.applied(repo, node, p1): self.ui.warn(_('skipping already applied revision %s\n') % @@ -194,7 +196,7 @@ skipmerge = False if parents[1] != revlog.nullid: if not opts.get('parent'): - self.ui.note(_('skipping merge changeset %s:%s\n') + self.ui.note(_('skipping merge changeset %d:%s\n') % (rev, nodemod.short(node))) skipmerge = True else: @@ -210,7 +212,7 @@ patchfile = None else: fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-') - fp = os.fdopen(fd, pycompat.sysstr('w')) + fp = os.fdopen(fd, pycompat.sysstr('wb')) gen = patch.diff(source, parent, node, opts=diffopts) for chunk in gen: fp.write(chunk) @@ -258,7 +260,7 @@ self.ui.status(_('filtering %s\n') % patchfile) user, date, msg = (changelog[1], changelog[2], changelog[4]) fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-') - fp = os.fdopen(fd, pycompat.sysstr('w')) + fp = os.fdopen(fd, pycompat.sysstr('wb')) fp.write("# HG changeset patch\n") fp.write("# User %s\n" % user) fp.write("# Date %d %d\n" % date) @@ -273,7 +275,7 @@ }, onerr=error.Abort, errprefix=_('filter failed'), blockedtag='transplant_filter') - user, date, msg = self.parselog(file(headerfile))[1:4] + user, date, msg = self.parselog(open(headerfile, 'rb'))[1:4] finally: os.unlink(headerfile) @@ -309,7 +311,7 @@ p1 = repo.dirstate.p1() p2 = node self.log(user, date, message, p1, p2, merge=merge) - self.ui.write(str(inst) + '\n') + self.ui.write(util.forcebytestr(inst) + '\n') raise TransplantError(_('fix up the working directory and run ' 'hg transplant --continue')) else: @@ -501,7 +503,7 @@ def browserevs(ui, repo, nodes, opts): '''interactively transplant changesets''' - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) transplants = [] merges = [] prompt = _('apply changeset? [ynmpcq?]:' @@ -646,6 +648,7 @@ raise error.Abort(_('--all is incompatible with a ' 'revision list')) + opts = pycompat.byteskwargs(opts) checkopts(opts, revs) if not opts.get('log'): @@ -741,10 +744,11 @@ templatekeyword = registrar.templatekeyword() -@templatekeyword('transplanted') -def kwtransplanted(repo, ctx, **args): +@templatekeyword('transplanted', requires={'ctx'}) +def kwtransplanted(context, mapping): """String. The node identifier of the transplanted changeset if any.""" + ctx = context.resource(mapping, 'ctx') n = ctx.extra().get('transplant_source') return n and nodemod.hex(n) or ''
--- a/mercurial/__init__.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/__init__.py Sun Mar 04 10:42:51 2018 -0500 @@ -31,6 +31,9 @@ # Only handle Mercurial-related modules. if not fullname.startswith(('mercurial.', 'hgext.', 'hgext3rd.')): return None + # don't try to parse binary + if fullname.startswith('mercurial.cext.'): + return None # third-party packages are expected to be dual-version clean if fullname.startswith('mercurial.thirdparty'): return None
--- a/mercurial/archival.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/archival.py Sun Mar 04 10:42:51 2018 -0500 @@ -21,6 +21,7 @@ error, formatter, match as matchmod, + scmutil, util, vfs as vfsmod, ) @@ -37,7 +38,7 @@ if prefix: prefix = util.normpath(prefix) else: - if not isinstance(dest, str): + if not isinstance(dest, bytes): raise ValueError('dest must be string if no prefix') prefix = os.path.basename(dest) lower = prefix.lower() @@ -76,29 +77,27 @@ return repo[rev] return repo['null'] +# {tags} on ctx includes local tags and 'tip', with no current way to limit +# that to global tags. Therefore, use {latesttag} as a substitute when +# the distance is 0, since that will be the list of global tags on ctx. +_defaultmetatemplate = br''' +repo: {root} +node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")} +branch: {branch|utf8} +{ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"), + separate("\n", + join(latesttag % "latesttag: {tag}", "\n"), + "latesttagdistance: {latesttagdistance}", + "changessincelatesttag: {changessincelatesttag}"))} +'''[1:] # drop leading '\n' + def buildmetadata(ctx): '''build content of .hg_archival.txt''' repo = ctx.repo() - default = ( - r'repo: {root}\n' - r'node: {ifcontains(rev, revset("wdir()"),' - r'"{p1node}{dirty}", "{node}")}\n' - r'branch: {branch|utf8}\n' - - # {tags} on ctx includes local tags and 'tip', with no current way to - # limit that to global tags. Therefore, use {latesttag} as a substitute - # when the distance is 0, since that will be the list of global tags on - # ctx. - r'{ifeq(latesttagdistance, 0, latesttag % "tag: {tag}\n",' - r'"{latesttag % "latesttag: {tag}\n"}' - r'latesttagdistance: {latesttagdistance}\n' - r'changessincelatesttag: {changessincelatesttag}\n")}' - ) - opts = { 'template': repo.ui.config('experimental', 'archivemetatemplate', - default) + _defaultmetatemplate) } out = util.stringio() @@ -155,7 +154,7 @@ def taropen(mode, name='', fileobj=None): if kind == 'gz': - mode = mode[0] + mode = mode[0:1] if not fileobj: fileobj = open(name, mode + 'b') gzfileobj = self.GzipFileWithTime(name, mode + 'b', @@ -219,7 +218,7 @@ dest.tell() except (AttributeError, IOError): dest = tellable(dest) - self.z = zipfile.ZipFile(dest, 'w', + self.z = zipfile.ZipFile(dest, r'w', compress and zipfile.ZIP_DEFLATED or zipfile.ZIP_STORED) @@ -339,6 +338,7 @@ total = len(files) if total: files.sort() + scmutil.fileprefetchhooks(repo, ctx, files) repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total) for i, f in enumerate(files): ff = ctx.flags(f)
--- a/mercurial/bookmarks.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/bookmarks.py Sun Mar 04 10:42:51 2018 -0500 @@ -84,7 +84,7 @@ # - node in nm, for non-20-bytes entry # - split(...), for string without ' ' repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') - % line) + % pycompat.bytestr(line)) except IOError as inst: if inst.errno != errno.ENOENT: raise @@ -103,30 +103,21 @@ self._aclean = False def __setitem__(self, *args, **kwargs): - msg = ("'bookmarks[name] = node' is deprecated, " - "use 'bookmarks.applychanges'") - self._repo.ui.deprecwarn(msg, '4.3') - self._set(*args, **kwargs) + raise error.ProgrammingError("use 'bookmarks.applychanges' instead") def _set(self, key, value): self._clean = False return dict.__setitem__(self, key, value) def __delitem__(self, key): - msg = ("'del bookmarks[name]' is deprecated, " - "use 'bookmarks.applychanges'") - self._repo.ui.deprecwarn(msg, '4.3') - self._del(key) + raise error.ProgrammingError("use 'bookmarks.applychanges' instead") def _del(self, key): self._clean = False return dict.__delitem__(self, key) def update(self, *others): - msg = ("bookmarks.update(...)' is deprecated, " - "use 'bookmarks.applychanges'") - self._repo.ui.deprecwarn(msg, '4.5') - return dict.update(self, *others) + raise error.ProgrammingError("use 'bookmarks.applychanges' instead") def applychanges(self, repo, tr, changes): """Apply a list of changes to bookmarks @@ -146,12 +137,6 @@ bmchanges[name] = (old, node) self._recordchange(tr) - def recordchange(self, tr): - msg = ("'bookmarks.recorchange' is deprecated, " - "use 'bookmarks.applychanges'") - self._repo.ui.deprecwarn(msg, '4.3') - return self._recordchange(tr) - def _recordchange(self, tr): """record that bookmarks have been changed in a transaction @@ -194,7 +179,7 @@ self._aclean = True def _write(self, fp): - for name, node in self.iteritems(): + for name, node in sorted(self.iteritems()): fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name))) self._clean = True self._repo.invalidatevolatilesets()
--- a/mercurial/branchmap.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/branchmap.py Sun Mar 04 10:42:51 2018 -0500 @@ -18,6 +18,7 @@ from . import ( encoding, error, + pycompat, scmutil, util, ) @@ -52,18 +53,19 @@ filteredhash=filteredhash) if not partial.validfor(repo): # invalidate the cache - raise ValueError('tip differs') + raise ValueError(r'tip differs') cl = repo.changelog for l in lines: if not l: continue node, state, label = l.split(" ", 2) if state not in 'oc': - raise ValueError('invalid branch state') + raise ValueError(r'invalid branch state') label = encoding.tolocal(label.strip()) node = bin(node) if not cl.hasnode(node): - raise ValueError('node %s does not exist' % hex(node)) + raise ValueError( + r'node %s does not exist' % pycompat.sysstr(hex(node))) partial.setdefault(label, []).append(node) if state == 'c': partial._closednodes.add(node) @@ -73,7 +75,7 @@ if repo.filtername is not None: msg += ' (%s)' % repo.filtername msg += ': %s\n' - repo.ui.debug(msg % inst) + repo.ui.debug(msg % pycompat.bytestr(inst)) partial = None return partial @@ -253,7 +255,8 @@ repo.filtername, len(self), nodecount) except (IOError, OSError, error.Abort) as inst: # Abort may be raised by read only opener, so log and continue - repo.ui.debug("couldn't write branch cache: %s\n" % inst) + repo.ui.debug("couldn't write branch cache: %s\n" % + util.forcebytestr(inst)) def update(self, repo, revgen): """Given a branchhead cache, self, that may have extra nodes or be @@ -375,7 +378,7 @@ self._rbcrevs[:] = data except (IOError, OSError) as inst: repo.ui.debug("couldn't read revision branch cache: %s\n" % - inst) + util.forcebytestr(inst)) # remember number of good records on disk self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize, len(repo.changelog)) @@ -517,7 +520,7 @@ self._rbcrevslen = revs except (IOError, OSError, error.Abort, error.LockError) as inst: repo.ui.debug("couldn't write revision branch cache%s: %s\n" - % (step, inst)) + % (step, util.forcebytestr(inst))) finally: if wlock is not None: wlock.release()
--- a/mercurial/bundle2.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/bundle2.py Sun Mar 04 10:42:51 2018 -0500 @@ -1729,7 +1729,7 @@ extrakwargs = {} targetphase = inpart.params.get('targetphase') if targetphase is not None: - extrakwargs['targetphase'] = int(targetphase) + extrakwargs[r'targetphase'] = int(targetphase) ret = _processchangegroup(op, cg, tr, 'bundle2', 'bundle2', expectedtotal=nbchangesets, **extrakwargs) if op.reply is not None: @@ -1946,7 +1946,8 @@ value = inpart.params.get(name) if value is not None: kwargs[name] = value - raise error.PushkeyFailed(inpart.params['in-reply-to'], **kwargs) + raise error.PushkeyFailed(inpart.params['in-reply-to'], + **pycompat.strkwargs(kwargs)) @parthandler('error:unsupportedcontent', ('parttype', 'params')) def handleerrorunsupportedcontent(op, inpart): @@ -1959,7 +1960,7 @@ if params is not None: kwargs['params'] = params.split('\0') - raise error.BundleUnknownFeatureError(**kwargs) + raise error.BundleUnknownFeatureError(**pycompat.strkwargs(kwargs)) @parthandler('error:pushraced', ('message',)) def handleerrorpushraced(op, inpart): @@ -2001,7 +2002,8 @@ for key in ('namespace', 'key', 'new', 'old', 'ret'): if key in inpart.params: kwargs[key] = inpart.params[key] - raise error.PushkeyFailed(partid=str(inpart.id), **kwargs) + raise error.PushkeyFailed(partid='%d' % inpart.id, + **pycompat.strkwargs(kwargs)) @parthandler('bookmarks') def handlebookmark(op, inpart): @@ -2040,14 +2042,15 @@ allhooks.append(hookargs) for hookargs in allhooks: - op.repo.hook('prepushkey', throw=True, **hookargs) + op.repo.hook('prepushkey', throw=True, + **pycompat.strkwargs(hookargs)) bookstore.applychanges(op.repo, op.gettransaction(), changes) if pushkeycompat: def runhook(): for hookargs in allhooks: - op.repo.hook('pushkey', **hookargs) + op.repo.hook('pushkey', **pycompat.strkwargs(hookargs)) op.repo._afterlock(runhook) elif bookmarksmode == 'records':
--- a/mercurial/bundlerepo.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/bundlerepo.py Sun Mar 04 10:42:51 2018 -0500 @@ -402,7 +402,7 @@ # manifestlog implementation did not consume the manifests from the # changegroup (ex: it might be consuming trees from a separate bundle2 # part instead). So we need to manually consume it. - if 'filestart' not in self.__dict__: + if r'filestart' not in self.__dict__: self._consumemanifest() return self.filestart
--- a/mercurial/byterange.py Sat Mar 03 22:29:24 2018 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,472 +0,0 @@ -# This library is free software; you can redistribute it and/or -# modify it under the terms of the GNU Lesser General Public -# License as published by the Free Software Foundation; either -# version 2.1 of the License, or (at your option) any later version. -# -# This library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with this library; if not, see -# <http://www.gnu.org/licenses/>. - -# This file is part of urlgrabber, a high-level cross-protocol url-grabber -# Copyright 2002-2004 Michael D. Stenner, Ryan Tomayko - -# $Id: byterange.py,v 1.9 2005/02/14 21:55:07 mstenner Exp $ - -from __future__ import absolute_import - -import email -import ftplib -import mimetypes -import os -import re -import socket -import stat - -from . import ( - urllibcompat, - util, -) - -urlerr = util.urlerr -urlreq = util.urlreq - -addclosehook = urlreq.addclosehook -addinfourl = urlreq.addinfourl -splitattr = urlreq.splitattr -splitpasswd = urlreq.splitpasswd -splitport = urlreq.splitport -splituser = urlreq.splituser -unquote = urlreq.unquote - -class RangeError(IOError): - """Error raised when an unsatisfiable range is requested.""" - -class HTTPRangeHandler(urlreq.basehandler): - """Handler that enables HTTP Range headers. - - This was extremely simple. The Range header is a HTTP feature to - begin with so all this class does is tell urllib2 that the - "206 Partial Content" response from the HTTP server is what we - expected. - - Example: - import urllib2 - import byterange - - range_handler = range.HTTPRangeHandler() - opener = urlreq.buildopener(range_handler) - - # install it - urlreq.installopener(opener) - - # create Request and set Range header - req = urlreq.request('http://www.python.org/') - req.header['Range'] = 'bytes=30-50' - f = urlreq.urlopen(req) - """ - - def http_error_206(self, req, fp, code, msg, hdrs): - # 206 Partial Content Response - r = urlreq.addinfourl(fp, hdrs, req.get_full_url()) - r.code = code - r.msg = msg - return r - - def http_error_416(self, req, fp, code, msg, hdrs): - # HTTP's Range Not Satisfiable error - raise RangeError('Requested Range Not Satisfiable') - -class RangeableFileObject(object): - """File object wrapper to enable raw range handling. - This was implemented primarily for handling range - specifications for file:// urls. This object effectively makes - a file object look like it consists only of a range of bytes in - the stream. - - Examples: - # expose 10 bytes, starting at byte position 20, from - # /etc/aliases. - >>> fo = RangeableFileObject(file(b'/etc/passwd', b'r'), (20,30)) - # seek seeks within the range (to position 23 in this case) - >>> fo.seek(3) - # tell tells where your at _within the range_ (position 3 in - # this case) - >>> fo.tell() - # read EOFs if an attempt is made to read past the last - # byte in the range. the following will return only 7 bytes. - >>> fo.read(30) - """ - - def __init__(self, fo, rangetup): - """Create a RangeableFileObject. - fo -- a file like object. only the read() method need be - supported but supporting an optimized seek() is - preferable. - rangetup -- a (firstbyte,lastbyte) tuple specifying the range - to work over. - The file object provided is assumed to be at byte offset 0. - """ - self.fo = fo - (self.firstbyte, self.lastbyte) = range_tuple_normalize(rangetup) - self.realpos = 0 - self._do_seek(self.firstbyte) - - def __getattr__(self, name): - """This effectively allows us to wrap at the instance level. - Any attribute not found in _this_ object will be searched for - in self.fo. This includes methods.""" - return getattr(self.fo, name) - - def tell(self): - """Return the position within the range. - This is different from fo.seek in that position 0 is the - first byte position of the range tuple. For example, if - this object was created with a range tuple of (500,899), - tell() will return 0 when at byte position 500 of the file. - """ - return (self.realpos - self.firstbyte) - - def seek(self, offset, whence=0): - """Seek within the byte range. - Positioning is identical to that described under tell(). - """ - assert whence in (0, 1, 2) - if whence == 0: # absolute seek - realoffset = self.firstbyte + offset - elif whence == 1: # relative seek - realoffset = self.realpos + offset - elif whence == 2: # absolute from end of file - # XXX: are we raising the right Error here? - raise IOError('seek from end of file not supported.') - - # do not allow seek past lastbyte in range - if self.lastbyte and (realoffset >= self.lastbyte): - realoffset = self.lastbyte - - self._do_seek(realoffset - self.realpos) - - def read(self, size=-1): - """Read within the range. - This method will limit the size read based on the range. - """ - size = self._calc_read_size(size) - rslt = self.fo.read(size) - self.realpos += len(rslt) - return rslt - - def readline(self, size=-1): - """Read lines within the range. - This method will limit the size read based on the range. - """ - size = self._calc_read_size(size) - rslt = self.fo.readline(size) - self.realpos += len(rslt) - return rslt - - def _calc_read_size(self, size): - """Handles calculating the amount of data to read based on - the range. - """ - if self.lastbyte: - if size > -1: - if ((self.realpos + size) >= self.lastbyte): - size = (self.lastbyte - self.realpos) - else: - size = (self.lastbyte - self.realpos) - return size - - def _do_seek(self, offset): - """Seek based on whether wrapped object supports seek(). - offset is relative to the current position (self.realpos). - """ - assert offset >= 0 - seek = getattr(self.fo, 'seek', self._poor_mans_seek) - seek(self.realpos + offset) - self.realpos += offset - - def _poor_mans_seek(self, offset): - """Seek by calling the wrapped file objects read() method. - This is used for file like objects that do not have native - seek support. The wrapped objects read() method is called - to manually seek to the desired position. - offset -- read this number of bytes from the wrapped - file object. - raise RangeError if we encounter EOF before reaching the - specified offset. - """ - pos = 0 - bufsize = 1024 - while pos < offset: - if (pos + bufsize) > offset: - bufsize = offset - pos - buf = self.fo.read(bufsize) - if len(buf) != bufsize: - raise RangeError('Requested Range Not Satisfiable') - pos += bufsize - -class FileRangeHandler(urlreq.filehandler): - """FileHandler subclass that adds Range support. - This class handles Range headers exactly like an HTTP - server would. - """ - def open_local_file(self, req): - host = urllibcompat.gethost(req) - file = urllibcompat.getselector(req) - localfile = urlreq.url2pathname(file) - stats = os.stat(localfile) - size = stats[stat.ST_SIZE] - modified = email.Utils.formatdate(stats[stat.ST_MTIME]) - mtype = mimetypes.guess_type(file)[0] - if host: - host, port = urlreq.splitport(host) - if port or socket.gethostbyname(host) not in self.get_names(): - raise urlerr.urlerror('file not on local host') - fo = open(localfile,'rb') - brange = req.headers.get('Range', None) - brange = range_header_to_tuple(brange) - assert brange != () - if brange: - (fb, lb) = brange - if lb == '': - lb = size - if fb < 0 or fb > size or lb > size: - raise RangeError('Requested Range Not Satisfiable') - size = (lb - fb) - fo = RangeableFileObject(fo, (fb, lb)) - headers = email.message_from_string( - 'Content-Type: %s\nContent-Length: %d\nLast-Modified: %s\n' % - (mtype or 'text/plain', size, modified)) - return urlreq.addinfourl(fo, headers, 'file:'+file) - - -# FTP Range Support -# Unfortunately, a large amount of base FTP code had to be copied -# from urllib and urllib2 in order to insert the FTP REST command. -# Code modifications for range support have been commented as -# follows: -# -- range support modifications start/end here - -class FTPRangeHandler(urlreq.ftphandler): - def ftp_open(self, req): - host = urllibcompat.gethost(req) - if not host: - raise IOError('ftp error', 'no host given') - host, port = splitport(host) - if port is None: - port = ftplib.FTP_PORT - else: - port = int(port) - - # username/password handling - user, host = splituser(host) - if user: - user, passwd = splitpasswd(user) - else: - passwd = None - host = unquote(host) - user = unquote(user or '') - passwd = unquote(passwd or '') - - try: - host = socket.gethostbyname(host) - except socket.error as msg: - raise urlerr.urlerror(msg) - path, attrs = splitattr(req.get_selector()) - dirs = path.split('/') - dirs = map(unquote, dirs) - dirs, file = dirs[:-1], dirs[-1] - if dirs and not dirs[0]: - dirs = dirs[1:] - try: - fw = self.connect_ftp(user, passwd, host, port, dirs) - if file: - type = 'I' - else: - type = 'D' - - for attr in attrs: - attr, value = splitattr(attr) - if attr.lower() == 'type' and \ - value in ('a', 'A', 'i', 'I', 'd', 'D'): - type = value.upper() - - # -- range support modifications start here - rest = None - range_tup = range_header_to_tuple(req.headers.get('Range', None)) - assert range_tup != () - if range_tup: - (fb, lb) = range_tup - if fb > 0: - rest = fb - # -- range support modifications end here - - fp, retrlen = fw.retrfile(file, type, rest) - - # -- range support modifications start here - if range_tup: - (fb, lb) = range_tup - if lb == '': - if retrlen is None or retrlen == 0: - raise RangeError('Requested Range Not Satisfiable due' - ' to unobtainable file length.') - lb = retrlen - retrlen = lb - fb - if retrlen < 0: - # beginning of range is larger than file - raise RangeError('Requested Range Not Satisfiable') - else: - retrlen = lb - fb - fp = RangeableFileObject(fp, (0, retrlen)) - # -- range support modifications end here - - headers = "" - mtype = mimetypes.guess_type(req.get_full_url())[0] - if mtype: - headers += "Content-Type: %s\n" % mtype - if retrlen is not None and retrlen >= 0: - headers += "Content-Length: %d\n" % retrlen - headers = email.message_from_string(headers) - return addinfourl(fp, headers, req.get_full_url()) - except ftplib.all_errors as msg: - raise IOError('ftp error', msg) - - def connect_ftp(self, user, passwd, host, port, dirs): - fw = ftpwrapper(user, passwd, host, port, dirs) - return fw - -class ftpwrapper(urlreq.ftpwrapper): - # range support note: - # this ftpwrapper code is copied directly from - # urllib. The only enhancement is to add the rest - # argument and pass it on to ftp.ntransfercmd - def retrfile(self, file, type, rest=None): - self.endtransfer() - if type in ('d', 'D'): - cmd = 'TYPE A' - isdir = 1 - else: - cmd = 'TYPE ' + type - isdir = 0 - try: - self.ftp.voidcmd(cmd) - except ftplib.all_errors: - self.init() - self.ftp.voidcmd(cmd) - conn = None - if file and not isdir: - # Use nlst to see if the file exists at all - try: - self.ftp.nlst(file) - except ftplib.error_perm as reason: - raise IOError('ftp error', reason) - # Restore the transfer mode! - self.ftp.voidcmd(cmd) - # Try to retrieve as a file - try: - cmd = 'RETR ' + file - conn = self.ftp.ntransfercmd(cmd, rest) - except ftplib.error_perm as reason: - if str(reason).startswith('501'): - # workaround for REST not supported error - fp, retrlen = self.retrfile(file, type) - fp = RangeableFileObject(fp, (rest,'')) - return (fp, retrlen) - elif not str(reason).startswith('550'): - raise IOError('ftp error', reason) - if not conn: - # Set transfer mode to ASCII! - self.ftp.voidcmd('TYPE A') - # Try a directory listing - if file: - cmd = 'LIST ' + file - else: - cmd = 'LIST' - conn = self.ftp.ntransfercmd(cmd) - self.busy = 1 - # Pass back both a suitably decorated object and a retrieval length - return (addclosehook(conn[0].makefile('rb'), - self.endtransfer), conn[1]) - - -#################################################################### -# Range Tuple Functions -# XXX: These range tuple functions might go better in a class. - -_rangere = None -def range_header_to_tuple(range_header): - """Get a (firstbyte,lastbyte) tuple from a Range header value. - - Range headers have the form "bytes=<firstbyte>-<lastbyte>". This - function pulls the firstbyte and lastbyte values and returns - a (firstbyte,lastbyte) tuple. If lastbyte is not specified in - the header value, it is returned as an empty string in the - tuple. - - Return None if range_header is None - Return () if range_header does not conform to the range spec - pattern. - - """ - global _rangere - if range_header is None: - return None - if _rangere is None: - _rangere = re.compile(br'^bytes=(\d{1,})-(\d*)') - match = _rangere.match(range_header) - if match: - tup = range_tuple_normalize(match.group(1, 2)) - if tup and tup[1]: - tup = (tup[0], tup[1]+1) - return tup - return () - -def range_tuple_to_header(range_tup): - """Convert a range tuple to a Range header value. - Return a string of the form "bytes=<firstbyte>-<lastbyte>" or None - if no range is needed. - """ - if range_tup is None: - return None - range_tup = range_tuple_normalize(range_tup) - if range_tup: - if range_tup[1]: - range_tup = (range_tup[0], range_tup[1] - 1) - return 'bytes=%s-%s' % range_tup - -def range_tuple_normalize(range_tup): - """Normalize a (first_byte,last_byte) range tuple. - Return a tuple whose first element is guaranteed to be an int - and whose second element will be '' (meaning: the last byte) or - an int. Finally, return None if the normalized tuple == (0,'') - as that is equivalent to retrieving the entire file. - """ - if range_tup is None: - return None - # handle first byte - fb = range_tup[0] - if fb in (None, ''): - fb = 0 - else: - fb = int(fb) - # handle last byte - try: - lb = range_tup[1] - except IndexError: - lb = '' - else: - if lb is None: - lb = '' - elif lb != '': - lb = int(lb) - # check if range is over the entire file - if (fb, lb) == (0, ''): - return None - # check that the range is valid - if lb < fb: - raise RangeError('Invalid byte range: %s-%s' % (fb, lb)) - return (fb, lb)
--- a/mercurial/cext/base85.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/base85.c Sun Mar 04 10:42:51 2018 -0500 @@ -14,8 +14,9 @@ #include "util.h" -static const char b85chars[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"; +static const char b85chars[] = + "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~"; static char b85dec[256]; static void b85prep(void) @@ -36,7 +37,7 @@ unsigned int acc, val, ch; int pad = 0; - if (!PyArg_ParseTuple(args, "s#|i", &text, &len, &pad)) + if (!PyArg_ParseTuple(args, PY23("s#|i", "y#|i"), &text, &len, &pad)) return NULL; if (pad) @@ -83,7 +84,7 @@ int c; unsigned int acc; - if (!PyArg_ParseTuple(args, "s#", &text, &len)) + if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &text, &len)) return NULL; olen = len / 5 * 4; @@ -105,25 +106,25 @@ c = b85dec[(int)*text++] - 1; if (c < 0) return PyErr_Format( - PyExc_ValueError, - "bad base85 character at position %d", - (int)i); + PyExc_ValueError, + "bad base85 character at position %d", + (int)i); acc = acc * 85 + c; } if (i++ < len) { c = b85dec[(int)*text++] - 1; if (c < 0) return PyErr_Format( - PyExc_ValueError, - "bad base85 character at position %d", - (int)i); + PyExc_ValueError, + "bad base85 character at position %d", + (int)i); /* overflow detection: 0xffffffff == "|NsC0", * "|NsC" == 0x03030303 */ if (acc > 0x03030303 || (acc *= 85) > 0xffffffff - c) return PyErr_Format( - PyExc_ValueError, - "bad base85 sequence at position %d", - (int)i); + PyExc_ValueError, + "bad base85 sequence at position %d", + (int)i); acc += c; } @@ -145,23 +146,19 @@ static char base85_doc[] = "Base85 Data Encoding"; static PyMethodDef methods[] = { - {"b85encode", b85encode, METH_VARARGS, - "Encode text in base85.\n\n" - "If the second parameter is true, pad the result to a multiple of " - "five characters.\n"}, - {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"}, - {NULL, NULL} + {"b85encode", b85encode, METH_VARARGS, + "Encode text in base85.\n\n" + "If the second parameter is true, pad the result to a multiple of " + "five characters.\n"}, + {"b85decode", b85decode, METH_VARARGS, "Decode base85 text.\n"}, + {NULL, NULL}, }; static const int version = 1; #ifdef IS_PY3K static struct PyModuleDef base85_module = { - PyModuleDef_HEAD_INIT, - "base85", - base85_doc, - -1, - methods + PyModuleDef_HEAD_INIT, "base85", base85_doc, -1, methods, }; PyMODINIT_FUNC PyInit_base85(void)
--- a/mercurial/cext/bdiff.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/bdiff.c Sun Mar 04 10:42:51 2018 -0500 @@ -17,9 +17,9 @@ #include "bdiff.h" #include "bitmanipulation.h" +#include "thirdparty/xdiff/xdiff.h" #include "util.h" - static PyObject *blocks(PyObject *self, PyObject *args) { PyObject *sa, *sb, *rl = NULL, *m; @@ -61,42 +61,60 @@ static PyObject *bdiff(PyObject *self, PyObject *args) { - char *sa, *sb, *rb, *ia, *ib; + Py_buffer ba, bb; + char *rb, *ia, *ib; PyObject *result = NULL; - struct bdiff_line *al, *bl; + struct bdiff_line *al = NULL, *bl = NULL; struct bdiff_hunk l, *h; int an, bn, count; Py_ssize_t len = 0, la, lb, li = 0, lcommon = 0, lmax; - PyThreadState *_save; + PyThreadState *_save = NULL; l.next = NULL; - if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb)) + if (!PyArg_ParseTuple(args, PY23("s*s*:bdiff", "y*y*:bdiff"), &ba, &bb)) return NULL; + if (!PyBuffer_IsContiguous(&ba, 'C') || ba.ndim > 1) { + PyErr_SetString(PyExc_ValueError, "bdiff input not contiguous"); + goto cleanup; + } + + if (!PyBuffer_IsContiguous(&bb, 'C') || bb.ndim > 1) { + PyErr_SetString(PyExc_ValueError, "bdiff input not contiguous"); + goto cleanup; + } + + la = ba.len; + lb = bb.len; + if (la > UINT_MAX || lb > UINT_MAX) { PyErr_SetString(PyExc_ValueError, "bdiff inputs too large"); - return NULL; + goto cleanup; } _save = PyEval_SaveThread(); lmax = la > lb ? lb : la; - for (ia = sa, ib = sb; - li < lmax && *ia == *ib; - ++li, ++ia, ++ib) + for (ia = ba.buf, ib = bb.buf; li < lmax && *ia == *ib; + ++li, ++ia, ++ib) { if (*ia == '\n') lcommon = li + 1; + } /* we can almost add: if (li == lmax) lcommon = li; */ - an = bdiff_splitlines(sa + lcommon, la - lcommon, &al); - bn = bdiff_splitlines(sb + lcommon, lb - lcommon, &bl); - if (!al || !bl) - goto nomem; + an = bdiff_splitlines((char *)ba.buf + lcommon, la - lcommon, &al); + bn = bdiff_splitlines((char *)bb.buf + lcommon, lb - lcommon, &bl); + if (!al || !bl) { + PyErr_NoMemory(); + goto cleanup; + } count = bdiff_diff(al, an, bl, bn, &l); - if (count < 0) - goto nomem; + if (count < 0) { + PyErr_NoMemory(); + goto cleanup; + } /* calculate length of output */ la = lb = 0; @@ -112,7 +130,7 @@ result = PyBytes_FromStringAndSize(NULL, len); if (!result) - goto nomem; + goto cleanup; /* build binary patch */ rb = PyBytes_AsString(result); @@ -122,7 +140,8 @@ if (h->a1 != la || h->b1 != lb) { len = bl[h->b1].l - bl[lb].l; putbe32((uint32_t)(al[la].l + lcommon - al->l), rb); - putbe32((uint32_t)(al[h->a1].l + lcommon - al->l), rb + 4); + putbe32((uint32_t)(al[h->a1].l + lcommon - al->l), + rb + 4); putbe32((uint32_t)len, rb + 8); memcpy(rb + 12, bl[lb].l, len); rb += 12 + len; @@ -131,13 +150,21 @@ lb = h->b2; } -nomem: +cleanup: if (_save) PyEval_RestoreThread(_save); - free(al); - free(bl); - bdiff_freehunks(l.next); - return result ? result : PyErr_NoMemory(); + PyBuffer_Release(&ba); + PyBuffer_Release(&bb); + if (al) { + free(al); + } + if (bl) { + free(bl); + } + if (l.next) { + bdiff_freehunks(l.next); + } + return result; } /* @@ -167,8 +194,8 @@ if (c == ' ' || c == '\t' || c == '\r') { if (!allws && (wlen == 0 || w[wlen - 1] != ' ')) w[wlen++] = ' '; - } else if (c == '\n' && !allws - && wlen > 0 && w[wlen - 1] == ' ') { + } else if (c == '\n' && !allws && wlen > 0 && + w[wlen - 1] == ' ') { w[wlen - 1] = '\n'; } else { w[wlen++] = c; @@ -182,25 +209,130 @@ return result ? result : PyErr_NoMemory(); } +static bool sliceintolist(PyObject *list, Py_ssize_t destidx, + const char *source, Py_ssize_t len) +{ + PyObject *sliced = PyBytes_FromStringAndSize(source, len); + if (sliced == NULL) + return false; + PyList_SET_ITEM(list, destidx, sliced); + return true; +} + +static PyObject *splitnewlines(PyObject *self, PyObject *args) +{ + const char *text; + Py_ssize_t nelts = 0, size, i, start = 0; + PyObject *result = NULL; + + if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &text, &size)) { + goto abort; + } + if (!size) { + return PyList_New(0); + } + /* This loops to size-1 because if the last byte is a newline, + * we don't want to perform a split there. */ + for (i = 0; i < size - 1; ++i) { + if (text[i] == '\n') { + ++nelts; + } + } + if ((result = PyList_New(nelts + 1)) == NULL) + goto abort; + nelts = 0; + for (i = 0; i < size - 1; ++i) { + if (text[i] == '\n') { + if (!sliceintolist(result, nelts++, text + start, + i - start + 1)) + goto abort; + start = i + 1; + } + } + if (!sliceintolist(result, nelts++, text + start, size - start)) + goto abort; + return result; +abort: + Py_XDECREF(result); + return NULL; +} + +static int hunk_consumer(long a1, long a2, long b1, long b2, void *priv) +{ + PyObject *rl = (PyObject *)priv; + PyObject *m = Py_BuildValue("llll", a1, a2, b1, b2); + if (!m) + return -1; + if (PyList_Append(rl, m) != 0) { + Py_DECREF(m); + return -1; + } + return 0; +} + +static PyObject *xdiffblocks(PyObject *self, PyObject *args) +{ + Py_ssize_t la, lb; + mmfile_t a, b; + PyObject *rl; + + xpparam_t xpp = { + XDF_INDENT_HEURISTIC, /* flags */ + NULL, /* anchors */ + 0, /* anchors_nr */ + }; + xdemitconf_t xecfg = { + 0, /* ctxlen */ + 0, /* interhunkctxlen */ + XDL_EMIT_BDIFFHUNK, /* flags */ + NULL, /* find_func */ + NULL, /* find_func_priv */ + hunk_consumer, /* hunk_consume_func */ + }; + xdemitcb_t ecb = { + NULL, /* priv */ + NULL, /* outf */ + }; + + if (!PyArg_ParseTuple(args, PY23("s#s#", "y#y#"), &a.ptr, &la, &b.ptr, + &lb)) + return NULL; + + a.size = la; + b.size = lb; + + rl = PyList_New(0); + if (!rl) + return PyErr_NoMemory(); + + ecb.priv = rl; + + if (xdl_diff(&a, &b, &xpp, &xecfg, &ecb) != 0) { + Py_DECREF(rl); + return PyErr_NoMemory(); + } + + return rl; +} static char mdiff_doc[] = "Efficient binary diff."; static PyMethodDef methods[] = { - {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"}, - {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"}, - {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"}, - {NULL, NULL} + {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"}, + {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"}, + {"fixws", fixws, METH_VARARGS, "normalize diff whitespaces\n"}, + {"splitnewlines", splitnewlines, METH_VARARGS, + "like str.splitlines, but only split on newlines\n"}, + {"xdiffblocks", xdiffblocks, METH_VARARGS, + "find a list of matching lines using xdiff algorithm\n"}, + {NULL, NULL}, }; -static const int version = 1; +static const int version = 3; #ifdef IS_PY3K static struct PyModuleDef bdiff_module = { - PyModuleDef_HEAD_INIT, - "bdiff", - mdiff_doc, - -1, - methods + PyModuleDef_HEAD_INIT, "bdiff", mdiff_doc, -1, methods, }; PyMODINIT_FUNC PyInit_bdiff(void)
--- a/mercurial/cext/charencode.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/charencode.c Sun Mar 04 10:42:51 2018 -0500 @@ -65,7 +65,6 @@ '\x58', '\x59', '\x5a', /* x-z */ '\x7b', '\x7c', '\x7d', '\x7e', '\x7f' }; -/* clang-format on */ /* 1: no escape, 2: \<c>, 6: \u<x> */ static const uint8_t jsonlentable[256] = { @@ -102,6 +101,7 @@ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', }; +/* clang-format on */ /* * Turn a hex-encoded string into binary. @@ -132,7 +132,8 @@ { const char *buf; Py_ssize_t i, len; - if (!PyArg_ParseTuple(args, "s#:isasciistr", &buf, &len)) + if (!PyArg_ParseTuple(args, PY23("s#:isasciistr", "y#:isasciistr"), + &buf, &len)) return NULL; i = 0; /* char array in PyStringObject should be at least 4-byte aligned */ @@ -151,9 +152,8 @@ Py_RETURN_TRUE; } -static inline PyObject *_asciitransform(PyObject *str_obj, - const char table[128], - PyObject *fallback_fn) +static inline PyObject * +_asciitransform(PyObject *str_obj, const char table[128], PyObject *fallback_fn) { char *str, *newstr; Py_ssize_t i, len; @@ -173,12 +173,12 @@ char c = str[i]; if (c & 0x80) { if (fallback_fn != NULL) { - ret = PyObject_CallFunctionObjArgs(fallback_fn, - str_obj, NULL); + ret = PyObject_CallFunctionObjArgs( + fallback_fn, str_obj, NULL); } else { PyObject *err = PyUnicodeDecodeError_Create( - "ascii", str, len, i, (i + 1), - "unexpected code byte"); + "ascii", str, len, i, (i + 1), + "unexpected code byte"); PyErr_SetObject(PyExc_UnicodeDecodeError, err); Py_XDECREF(err); } @@ -220,10 +220,9 @@ Py_ssize_t pos = 0; const char *table; - if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap", - &PyDict_Type, &dmap, - &PyInt_Type, &spec_obj, - &PyFunction_Type, &normcase_fallback)) + if (!PyArg_ParseTuple(args, "O!O!O!:make_file_foldmap", &PyDict_Type, + &dmap, &PyInt_Type, &spec_obj, &PyFunction_Type, + &normcase_fallback)) goto quit; spec = (int)PyInt_AS_LONG(spec_obj); @@ -251,7 +250,7 @@ while (PyDict_Next(dmap, &pos, &k, &v)) { if (!dirstate_tuple_check(v)) { PyErr_SetString(PyExc_TypeError, - "expected a dirstate tuple"); + "expected a dirstate tuple"); goto quit; } @@ -260,10 +259,10 @@ PyObject *normed; if (table != NULL) { normed = _asciitransform(k, table, - normcase_fallback); + normcase_fallback); } else { normed = PyObject_CallFunctionObjArgs( - normcase_fallback, k, NULL); + normcase_fallback, k, NULL); } if (normed == NULL) @@ -292,13 +291,13 @@ char c = buf[i]; if (c & 0x80) { PyErr_SetString(PyExc_ValueError, - "cannot process non-ascii str"); + "cannot process non-ascii str"); return -1; } esclen += jsonparanoidlentable[(unsigned char)c]; if (esclen < 0) { PyErr_SetString(PyExc_MemoryError, - "overflow in jsonescapelen"); + "overflow in jsonescapelen"); return -1; } } @@ -308,7 +307,7 @@ esclen += jsonlentable[(unsigned char)c]; if (esclen < 0) { PyErr_SetString(PyExc_MemoryError, - "overflow in jsonescapelen"); + "overflow in jsonescapelen"); return -1; } } @@ -336,17 +335,17 @@ case '\\': return '\\'; } - return '\0'; /* should not happen */ + return '\0'; /* should not happen */ } /* convert 'origbuf' to JSON-escaped form 'escbuf'; 'origbuf' should only include characters mappable by json(paranoid)lentable */ static void encodejsonescape(char *escbuf, Py_ssize_t esclen, - const char *origbuf, Py_ssize_t origlen, - bool paranoid) + const char *origbuf, Py_ssize_t origlen, + bool paranoid) { const uint8_t *lentable = - (paranoid) ? jsonparanoidlentable : jsonlentable; + (paranoid) ? jsonparanoidlentable : jsonlentable; Py_ssize_t i, j; for (i = 0, j = 0; i < origlen; i++) { @@ -377,15 +376,15 @@ const char *origbuf; Py_ssize_t origlen, esclen; int paranoid; - if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast", - &PyBytes_Type, &origstr, ¶noid)) + if (!PyArg_ParseTuple(args, "O!i:jsonescapeu8fast", &PyBytes_Type, + &origstr, ¶noid)) return NULL; origbuf = PyBytes_AS_STRING(origstr); origlen = PyBytes_GET_SIZE(origstr); esclen = jsonescapelen(origbuf, origlen, paranoid); if (esclen < 0) - return NULL; /* unsupported char found or overflow */ + return NULL; /* unsupported char found or overflow */ if (origlen == esclen) { Py_INCREF(origstr); return origstr; @@ -395,7 +394,7 @@ if (!escstr) return NULL; encodejsonescape(PyBytes_AS_STRING(escstr), esclen, origbuf, origlen, - paranoid); + paranoid); return escstr; }
--- a/mercurial/cext/charencode.h Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/charencode.h Sun Mar 04 10:42:51 2018 -0500 @@ -25,6 +25,7 @@ PyObject *make_file_foldmap(PyObject *self, PyObject *args); PyObject *jsonescapeu8fast(PyObject *self, PyObject *args); +/* clang-format off */ static const int8_t hextable[256] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, @@ -43,6 +44,7 @@ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 }; +/* clang-format on */ static inline int hexdigit(const char *p, Py_ssize_t off) {
--- a/mercurial/cext/diffhelpers.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/diffhelpers.c Sun Mar 04 10:42:51 2018 -0500 @@ -16,12 +16,11 @@ static char diffhelpers_doc[] = "Efficient diff parsing"; static PyObject *diffhelpers_Error; - /* fixup the last lines of a and b when the patch has no newline at eof */ static void _fix_newline(PyObject *hunk, PyObject *a, PyObject *b) { Py_ssize_t hunksz = PyList_Size(hunk); - PyObject *s = PyList_GET_ITEM(hunk, hunksz-1); + PyObject *s = PyList_GET_ITEM(hunk, hunksz - 1); char *l = PyBytes_AsString(s); Py_ssize_t alen = PyList_Size(a); Py_ssize_t blen = PyList_Size(b); @@ -29,29 +28,28 @@ PyObject *hline; Py_ssize_t sz = PyBytes_GET_SIZE(s); - if (sz > 1 && l[sz-2] == '\r') + if (sz > 1 && l[sz - 2] == '\r') /* tolerate CRLF in last line */ sz -= 1; - hline = PyBytes_FromStringAndSize(l, sz-1); + hline = PyBytes_FromStringAndSize(l, sz - 1); if (!hline) { return; } if (c == ' ' || c == '+') { PyObject *rline = PyBytes_FromStringAndSize(l + 1, sz - 2); - PyList_SetItem(b, blen-1, rline); + PyList_SetItem(b, blen - 1, rline); } if (c == ' ' || c == '-') { Py_INCREF(hline); - PyList_SetItem(a, alen-1, hline); + PyList_SetItem(a, alen - 1, hline); } - PyList_SetItem(hunk, hunksz-1, hline); + PyList_SetItem(hunk, hunksz - 1, hline); } /* python callable form of _fix_newline */ -static PyObject * -fix_newline(PyObject *self, PyObject *args) +static PyObject *fix_newline(PyObject *self, PyObject *args) { PyObject *hunk, *a, *b; if (!PyArg_ParseTuple(args, "OOO", &hunk, &a, &b)) @@ -72,8 +70,7 @@ * The control char from the hunk is saved when inserting into a, but not b * (for performance while deleting files) */ -static PyObject * -addlines(PyObject *self, PyObject *args) +static PyObject *addlines(PyObject *self, PyObject *args) { PyObject *fp, *hunk, *a, *b, *x; @@ -83,8 +80,8 @@ Py_ssize_t todoa, todob; char *s, c; PyObject *l; - if (!PyArg_ParseTuple(args, addlines_format, - &fp, &hunk, &lena, &lenb, &a, &b)) + if (!PyArg_ParseTuple(args, addlines_format, &fp, &hunk, &lena, &lenb, + &a, &b)) return NULL; while (1) { @@ -92,7 +89,7 @@ todob = lenb - PyList_Size(b); num = todoa > todob ? todoa : todob; if (num == 0) - break; + break; for (i = 0; i < num; i++) { x = PyFile_GetLine(fp, 0); s = PyBytes_AsString(x); @@ -131,8 +128,7 @@ * a control char at the start of each line, this char is ignored in the * compare */ -static PyObject * -testhunk(PyObject *self, PyObject *args) +static PyObject *testhunk(PyObject *self, PyObject *args) { PyObject *a, *b; @@ -158,21 +154,16 @@ } static PyMethodDef methods[] = { - {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"}, - {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"}, - {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"}, - {NULL, NULL} -}; + {"addlines", addlines, METH_VARARGS, "add lines to a hunk\n"}, + {"fix_newline", fix_newline, METH_VARARGS, "fixup newline counters\n"}, + {"testhunk", testhunk, METH_VARARGS, "test lines in a hunk\n"}, + {NULL, NULL}}; static const int version = 1; #ifdef IS_PY3K static struct PyModuleDef diffhelpers_module = { - PyModuleDef_HEAD_INIT, - "diffhelpers", - diffhelpers_doc, - -1, - methods + PyModuleDef_HEAD_INIT, "diffhelpers", diffhelpers_doc, -1, methods, }; PyMODINIT_FUNC PyInit_diffhelpers(void) @@ -183,8 +174,8 @@ if (m == NULL) return NULL; - diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError", - NULL, NULL); + diffhelpers_Error = + PyErr_NewException("diffhelpers.diffhelpersError", NULL, NULL); Py_INCREF(diffhelpers_Error); PyModule_AddObject(m, "diffhelpersError", diffhelpers_Error); PyModule_AddIntConstant(m, "version", version); @@ -192,13 +183,12 @@ return m; } #else -PyMODINIT_FUNC -initdiffhelpers(void) +PyMODINIT_FUNC initdiffhelpers(void) { PyObject *m; m = Py_InitModule3("diffhelpers", methods, diffhelpers_doc); - diffhelpers_Error = PyErr_NewException("diffhelpers.diffhelpersError", - NULL, NULL); + diffhelpers_Error = + PyErr_NewException("diffhelpers.diffhelpersError", NULL, NULL); PyModule_AddIntConstant(m, "version", version); } #endif
--- a/mercurial/cext/manifest.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/manifest.c Sun Mar 04 10:42:51 2018 -0500 @@ -718,7 +718,8 @@ Py_INCREF(self->pydata); for (i = 0; i < self->numlines; i++) { PyObject *arglist = NULL, *result = NULL; - arglist = Py_BuildValue("(s)", self->lines[i].start); + arglist = Py_BuildValue(PY23("(s)", "(y)"), + self->lines[i].start); if (!arglist) { return NULL; }
--- a/mercurial/cext/mpatch.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/mpatch.c Sun Mar 04 10:42:51 2018 -0500 @@ -55,10 +55,10 @@ ssize_t blen; int r; - PyObject *tmp = PyList_GetItem((PyObject*)bins, pos); + PyObject *tmp = PyList_GetItem((PyObject *)bins, pos); if (!tmp) return NULL; - if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t*)&blen)) + if (PyObject_AsCharBuffer(tmp, &buffer, (Py_ssize_t *)&blen)) return NULL; if ((r = mpatch_decode(buffer, blen, &res)) < 0) { if (!PyErr_Occurred()) @@ -68,8 +68,7 @@ return res; } -static PyObject * -patches(PyObject *self, PyObject *args) +static PyObject *patches(PyObject *self, PyObject *args) { PyObject *text, *bins, *result; struct mpatch_flist *patch; @@ -110,7 +109,14 @@ goto cleanup; } out = PyBytes_AsString(result); - if ((r = mpatch_apply(out, in, inlen, patch)) < 0) { + /* clang-format off */ + { + Py_BEGIN_ALLOW_THREADS + r = mpatch_apply(out, in, inlen, patch); + Py_END_ALLOW_THREADS + } + /* clang-format on */ + if (r < 0) { Py_DECREF(result); result = NULL; } @@ -122,14 +128,13 @@ } /* calculate size of a patched file directly */ -static PyObject * -patchedsize(PyObject *self, PyObject *args) +static PyObject *patchedsize(PyObject *self, PyObject *args) { long orig, start, end, len, outlen = 0, last = 0, pos = 0; Py_ssize_t patchlen; char *bin; - if (!PyArg_ParseTuple(args, "ls#", &orig, &bin, &patchlen)) + if (!PyArg_ParseTuple(args, PY23("ls#", "ly#"), &orig, &bin, &patchlen)) return NULL; while (pos >= 0 && pos < patchlen) { @@ -146,7 +151,8 @@ if (pos != patchlen) { if (!PyErr_Occurred()) - PyErr_SetString(mpatch_Error, "patch cannot be decoded"); + PyErr_SetString(mpatch_Error, + "patch cannot be decoded"); return NULL; } @@ -155,20 +161,16 @@ } static PyMethodDef methods[] = { - {"patches", patches, METH_VARARGS, "apply a series of patches\n"}, - {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"}, - {NULL, NULL} + {"patches", patches, METH_VARARGS, "apply a series of patches\n"}, + {"patchedsize", patchedsize, METH_VARARGS, "calculed patched size\n"}, + {NULL, NULL}, }; static const int version = 1; #ifdef IS_PY3K static struct PyModuleDef mpatch_module = { - PyModuleDef_HEAD_INIT, - "mpatch", - mpatch_doc, - -1, - methods + PyModuleDef_HEAD_INIT, "mpatch", mpatch_doc, -1, methods, }; PyMODINIT_FUNC PyInit_mpatch(void) @@ -179,8 +181,8 @@ if (m == NULL) return NULL; - mpatch_Error = PyErr_NewException("mercurial.cext.mpatch.mpatchError", - NULL, NULL); + mpatch_Error = + PyErr_NewException("mercurial.cext.mpatch.mpatchError", NULL, NULL); Py_INCREF(mpatch_Error); PyModule_AddObject(m, "mpatchError", mpatch_Error); PyModule_AddIntConstant(m, "version", version); @@ -188,13 +190,12 @@ return m; } #else -PyMODINIT_FUNC -initmpatch(void) +PyMODINIT_FUNC initmpatch(void) { PyObject *m; m = Py_InitModule3("mpatch", methods, mpatch_doc); - mpatch_Error = PyErr_NewException("mercurial.cext.mpatch.mpatchError", - NULL, NULL); + mpatch_Error = + PyErr_NewException("mercurial.cext.mpatch.mpatchError", NULL, NULL); PyModule_AddIntConstant(m, "version", version); } #endif
--- a/mercurial/cext/osutil.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/osutil.c Sun Mar 04 10:42:51 2018 -0500 @@ -184,7 +184,7 @@ ? _S_IFDIR : _S_IFREG; if (!wantstat) - return Py_BuildValue("si", fd->cFileName, kind); + return Py_BuildValue(PY23("si", "yi"), fd->cFileName, kind); py_st = PyObject_CallObject((PyObject *)&listdir_stat_type, NULL); if (!py_st) @@ -202,7 +202,7 @@ if (kind == _S_IFREG) stp->st_size = ((__int64)fd->nFileSizeHigh << 32) + fd->nFileSizeLow; - return Py_BuildValue("siN", fd->cFileName, + return Py_BuildValue(PY23("siN", "yiN"), fd->cFileName, kind, py_st); } @@ -390,9 +390,11 @@ stat = makestat(&st); if (!stat) goto error; - elem = Py_BuildValue("siN", ent->d_name, kind, stat); + elem = Py_BuildValue(PY23("siN", "yiN"), ent->d_name, + kind, stat); } else - elem = Py_BuildValue("si", ent->d_name, kind); + elem = Py_BuildValue(PY23("si", "yi"), ent->d_name, + kind); if (!elem) goto error; stat = NULL; @@ -570,9 +572,11 @@ stat = makestat(&st); if (!stat) goto error; - elem = Py_BuildValue("siN", filename, kind, stat); + elem = Py_BuildValue(PY23("siN", "yiN"), + filename, kind, stat); } else - elem = Py_BuildValue("si", filename, kind); + elem = Py_BuildValue(PY23("si", "yi"), + filename, kind); if (!elem) goto error; stat = NULL; @@ -754,7 +758,7 @@ static PyObject *setprocname(PyObject *self, PyObject *args) { const char *name = NULL; - if (!PyArg_ParseTuple(args, "s", &name)) + if (!PyArg_ParseTuple(args, PY23("s", "y"), &name)) return NULL; #if defined(SETPROCNAME_USE_SETPROCTITLE) @@ -1101,14 +1105,14 @@ const char *path = NULL; struct statfs buf; int r; - if (!PyArg_ParseTuple(args, "s", &path)) + if (!PyArg_ParseTuple(args, PY23("s", "y"), &path)) return NULL; memset(&buf, 0, sizeof(buf)); r = statfs(path, &buf); if (r != 0) return PyErr_SetFromErrno(PyExc_OSError); - return Py_BuildValue("s", describefstype(&buf)); + return Py_BuildValue(PY23("s", "y"), describefstype(&buf)); } #endif /* defined(HAVE_LINUX_STATFS) || defined(HAVE_BSD_STATFS) */ @@ -1119,14 +1123,14 @@ const char *path = NULL; struct statfs buf; int r; - if (!PyArg_ParseTuple(args, "s", &path)) + if (!PyArg_ParseTuple(args, PY23("s", "y"), &path)) return NULL; memset(&buf, 0, sizeof(buf)); r = statfs(path, &buf); if (r != 0) return PyErr_SetFromErrno(PyExc_OSError); - return Py_BuildValue("s", buf.f_mntonname); + return Py_BuildValue(PY23("s", "y"), buf.f_mntonname); } #endif /* defined(HAVE_BSD_STATFS) */ @@ -1160,7 +1164,8 @@ static char *kwlist[] = {"path", "stat", "skip", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s#|OO:listdir", + if (!PyArg_ParseTupleAndKeywords(args, kwargs, PY23("s#|OO:listdir", + "y#|OO:listdir"), kwlist, &path, &plen, &statobj, &skipobj)) return NULL; @@ -1193,7 +1198,9 @@ int plus; FILE *fp; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "et|si:posixfile", kwlist, + if (!PyArg_ParseTupleAndKeywords(args, kwds, PY23("et|si:posixfile", + "et|yi:posixfile"), + kwlist, Py_FileSystemDefaultEncoding, &name, &mode, &bufsize)) return NULL;
--- a/mercurial/cext/parsers.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/parsers.c Sun Mar 04 10:42:51 2018 -0500 @@ -48,8 +48,9 @@ char *str, *start, *end; int len; - if (!PyArg_ParseTuple(args, "O!O!s#:parse_manifest", &PyDict_Type, - &mfdict, &PyDict_Type, &fdict, &str, &len)) + if (!PyArg_ParseTuple( + args, PY23("O!O!s#:parse_manifest", "O!O!y#:parse_manifest"), + &PyDict_Type, &mfdict, &PyDict_Type, &fdict, &str, &len)) goto quit; start = str; @@ -241,8 +242,9 @@ unsigned int flen, len, pos = 40; int readlen; - if (!PyArg_ParseTuple(args, "O!O!s#:parse_dirstate", &PyDict_Type, - &dmap, &PyDict_Type, &cmap, &str, &readlen)) + if (!PyArg_ParseTuple( + args, PY23("O!O!s#:parse_dirstate", "O!O!y#:parse_dirstate"), + &PyDict_Type, &dmap, &PyDict_Type, &cmap, &str, &readlen)) goto quit; len = readlen; @@ -254,7 +256,7 @@ goto quit; } - parents = Py_BuildValue("s#s#", str, 20, str + 20, 20); + parents = Py_BuildValue(PY23("s#s#", "y#y#"), str, 20, str + 20, 20); if (!parents) goto quit; @@ -645,7 +647,8 @@ Py_ssize_t offset, stop; PyObject *markers = NULL; - if (!PyArg_ParseTuple(args, "s#nn", &data, &datalen, &offset, &stop)) { + if (!PyArg_ParseTuple(args, PY23("s#nn", "y#nn"), &data, &datalen, + &offset, &stop)) { return NULL; } dataend = data + datalen;
--- a/mercurial/cext/pathencode.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/pathencode.c Sun Mar 04 10:42:51 2018 -0500 @@ -26,26 +26,26 @@ /* state machine for the fast path */ enum path_state { - START, /* first byte of a path component */ - A, /* "AUX" */ + START, /* first byte of a path component */ + A, /* "AUX" */ AU, - THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */ - C, /* "CON" or "COMn" */ + THIRD, /* third of a 3-byte sequence, e.g. "AUX", "NUL" */ + C, /* "CON" or "COMn" */ CO, - COMLPT, /* "COM" or "LPT" */ + COMLPT, /* "COM" or "LPT" */ COMLPTn, L, LP, N, NU, - P, /* "PRN" */ + P, /* "PRN" */ PR, - LDOT, /* leading '.' */ - DOT, /* '.' in a non-leading position */ - H, /* ".h" */ - HGDI, /* ".hg", ".d", or ".i" */ + LDOT, /* leading '.' */ + DOT, /* '.' in a non-leading position */ + H, /* ".h" */ + HGDI, /* ".hg", ".d", or ".i" */ SPACE, - DEFAULT /* byte of a path component after the first */ + DEFAULT, /* byte of a path component after the first */ }; /* state machine for dir-encoding */ @@ -53,7 +53,7 @@ DDOT, DH, DHGDI, - DDEFAULT + DDEFAULT, }; static inline int inset(const uint32_t bitset[], char c) @@ -82,7 +82,7 @@ } static inline void hexencode(char *dest, Py_ssize_t *destlen, size_t destsize, - uint8_t c) + uint8_t c) { static const char hexdigit[] = "0123456789abcdef"; @@ -92,14 +92,14 @@ /* 3-byte escape: tilde followed by two hex digits */ static inline void escape3(char *dest, Py_ssize_t *destlen, size_t destsize, - char c) + char c) { charcopy(dest, destlen, destsize, '~'); hexencode(dest, destlen, destsize, c); } -static Py_ssize_t _encodedir(char *dest, size_t destsize, - const char *src, Py_ssize_t len) +static Py_ssize_t _encodedir(char *dest, size_t destsize, const char *src, + Py_ssize_t len) { enum dir_state state = DDEFAULT; Py_ssize_t i = 0, destlen = 0; @@ -126,8 +126,8 @@ if (src[i] == 'g') { state = DHGDI; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DDEFAULT; + } else + state = DDEFAULT; break; case DHGDI: if (src[i] == '/') { @@ -173,17 +173,15 @@ if (newobj) { assert(PyBytes_Check(newobj)); Py_SIZE(newobj)--; - _encodedir(PyBytes_AS_STRING(newobj), newlen, path, - len + 1); + _encodedir(PyBytes_AS_STRING(newobj), newlen, path, len + 1); } return newobj; } static Py_ssize_t _encode(const uint32_t twobytes[8], const uint32_t onebyte[8], - char *dest, Py_ssize_t destlen, size_t destsize, - const char *src, Py_ssize_t len, - int encodedir) + char *dest, Py_ssize_t destlen, size_t destsize, + const char *src, Py_ssize_t len, int encodedir) { enum path_state state = START; Py_ssize_t i = 0; @@ -237,15 +235,15 @@ if (src[i] == 'u') { state = AU; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case AU: if (src[i] == 'x') { state = THIRD; i++; - } - else state = DEFAULT; + } else + state = DEFAULT; break; case THIRD: state = DEFAULT; @@ -264,24 +262,30 @@ if (src[i] == 'o') { state = CO; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case CO: if (src[i] == 'm') { state = COMLPT; i++; - } - else if (src[i] == 'n') { + } else if (src[i] == 'n') { state = THIRD; i++; - } - else state = DEFAULT; + } else + state = DEFAULT; break; case COMLPT: switch (src[i]) { - case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': state = COMLPTn; i++; break; @@ -301,8 +305,8 @@ charcopy(dest, &destlen, destsize, src[i - 1]); break; default: - memcopy(dest, &destlen, destsize, - &src[i - 2], 2); + memcopy(dest, &destlen, destsize, &src[i - 2], + 2); break; } break; @@ -310,43 +314,43 @@ if (src[i] == 'p') { state = LP; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case LP: if (src[i] == 't') { state = COMLPT; i++; - } - else state = DEFAULT; + } else + state = DEFAULT; break; case N: if (src[i] == 'u') { state = NU; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case NU: if (src[i] == 'l') { state = THIRD; i++; - } - else state = DEFAULT; + } else + state = DEFAULT; break; case P: if (src[i] == 'r') { state = PR; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case PR: if (src[i] == 'n') { state = THIRD; i++; - } - else state = DEFAULT; + } else + state = DEFAULT; break; case LDOT: switch (src[i]) { @@ -393,18 +397,18 @@ if (src[i] == 'g') { state = HGDI; charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case HGDI: if (src[i] == '/') { state = START; if (encodedir) memcopy(dest, &destlen, destsize, ".hg", - 3); + 3); charcopy(dest, &destlen, destsize, src[i++]); - } - else state = DEFAULT; + } else + state = DEFAULT; break; case SPACE: switch (src[i]) { @@ -444,19 +448,17 @@ if (inset(onebyte, src[i])) { do { charcopy(dest, &destlen, - destsize, src[i++]); + destsize, src[i++]); } while (i < len && - inset(onebyte, src[i])); - } - else if (inset(twobytes, src[i])) { + inset(onebyte, src[i])); + } else if (inset(twobytes, src[i])) { char c = src[i++]; charcopy(dest, &destlen, destsize, '_'); charcopy(dest, &destlen, destsize, - c == '_' ? '_' : c + 32); - } - else + c == '_' ? '_' : c + 32); + } else escape3(dest, &destlen, destsize, - src[i++]); + src[i++]); break; } break; @@ -466,31 +468,29 @@ return destlen; } -static Py_ssize_t basicencode(char *dest, size_t destsize, - const char *src, Py_ssize_t len) +static Py_ssize_t basicencode(char *dest, size_t destsize, const char *src, + Py_ssize_t len) { - static const uint32_t twobytes[8] = { 0, 0, 0x87fffffe }; + static const uint32_t twobytes[8] = {0, 0, 0x87fffffe}; static const uint32_t onebyte[8] = { - 1, 0x2bff3bfa, 0x68000001, 0x2fffffff, + 1, 0x2bff3bfa, 0x68000001, 0x2fffffff, }; Py_ssize_t destlen = 0; - return _encode(twobytes, onebyte, dest, destlen, destsize, - src, len, 1); + return _encode(twobytes, onebyte, dest, destlen, destsize, src, len, 1); } static const Py_ssize_t maxstorepathlen = 120; -static Py_ssize_t _lowerencode(char *dest, size_t destsize, - const char *src, Py_ssize_t len) +static Py_ssize_t _lowerencode(char *dest, size_t destsize, const char *src, + Py_ssize_t len) { - static const uint32_t onebyte[8] = { - 1, 0x2bfffbfb, 0xe8000001, 0x2fffffff - }; + static const uint32_t onebyte[8] = {1, 0x2bfffbfb, 0xe8000001, + 0x2fffffff}; - static const uint32_t lower[8] = { 0, 0, 0x7fffffe }; + static const uint32_t lower[8] = {0, 0, 0x7fffffe}; Py_ssize_t i, destlen = 0; @@ -512,7 +512,8 @@ Py_ssize_t len, newlen; PyObject *ret; - if (!PyArg_ParseTuple(args, "s#:lowerencode", &path, &len)) + if (!PyArg_ParseTuple(args, PY23("s#:lowerencode", "y#:lowerencode"), + &path, &len)) return NULL; newlen = _lowerencode(NULL, 0, path, len); @@ -524,13 +525,13 @@ } /* See store.py:_auxencode for a description. */ -static Py_ssize_t auxencode(char *dest, size_t destsize, - const char *src, Py_ssize_t len) +static Py_ssize_t auxencode(char *dest, size_t destsize, const char *src, + Py_ssize_t len) { static const uint32_t twobytes[8]; static const uint32_t onebyte[8] = { - ~0U, 0xffff3ffe, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, + ~0U, 0xffff3ffe, ~0U, ~0U, ~0U, ~0U, ~0U, ~0U, }; return _encode(twobytes, onebyte, dest, 0, destsize, src, len, 0); @@ -590,8 +591,7 @@ break; charcopy(dest, &destlen, destsize, src[i]); p = -1; - } - else if (p < dirprefixlen) + } else if (p < dirprefixlen) charcopy(dest, &destlen, destsize, src[i]); } @@ -622,13 +622,13 @@ slop = maxstorepathlen - used; if (slop > 0) { Py_ssize_t basenamelen = - lastslash >= 0 ? len - lastslash - 2 : len - 1; + lastslash >= 0 ? len - lastslash - 2 : len - 1; if (basenamelen > slop) basenamelen = slop; if (basenamelen > 0) memcopy(dest, &destlen, destsize, &src[lastslash + 1], - basenamelen); + basenamelen); } /* Add hash and suffix. */ @@ -637,7 +637,7 @@ if (lastdot >= 0) memcopy(dest, &destlen, destsize, &src[lastdot], - len - lastdot - 1); + len - lastdot - 1); assert(PyBytes_Check(ret)); Py_SIZE(ret) = destlen; @@ -672,8 +672,8 @@ if (shafunc == NULL) { PyErr_SetString(PyExc_AttributeError, - "module 'hashlib' has no " - "attribute 'sha1'"); + "module 'hashlib' has no " + "attribute 'sha1'"); return -1; } } @@ -690,7 +690,7 @@ if (!PyBytes_Check(hashobj) || PyBytes_GET_SIZE(hashobj) != 20) { PyErr_SetString(PyExc_TypeError, - "result of digest is not a 20-byte hash"); + "result of digest is not a 20-byte hash"); Py_DECREF(hashobj); return -1; } @@ -755,10 +755,9 @@ assert(PyBytes_Check(newobj)); Py_SIZE(newobj)--; basicencode(PyBytes_AS_STRING(newobj), newlen, path, - len + 1); + len + 1); } - } - else + } else newobj = hashencode(path, len + 1); return newobj;
--- a/mercurial/cext/revlog.c Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/revlog.c Sun Mar 04 10:42:51 2018 -0500 @@ -87,9 +87,9 @@ static Py_ssize_t inline_scan(indexObject *self, const char **offsets); #if LONG_MAX == 0x7fffffffL -static char *tuple_format = "Kiiiiiis#"; +static const char *const tuple_format = PY23("Kiiiiiis#", "Kiiiiiiy#"); #else -static char *tuple_format = "kiiiiiis#"; +static const char *const tuple_format = PY23("kiiiiiis#", "kiiiiiiy#"); #endif /* A RevlogNG v1 index entry is 64 bytes long. */ @@ -643,8 +643,10 @@ if (!PyArg_ParseTuple(args, "O", &roots)) goto done; - if (roots == NULL || !PyList_Check(roots)) + if (roots == NULL || !PyList_Check(roots)) { + PyErr_SetString(PyExc_TypeError, "roots must be a list"); goto done; + } phases = calloc(len, 1); /* phase per rev: {0: public, 1: draft, 2: secret} */ if (phases == NULL) { @@ -667,8 +669,11 @@ if (phaseset == NULL) goto release; PyList_SET_ITEM(phasessetlist, i+1, phaseset); - if (!PyList_Check(phaseroots)) + if (!PyList_Check(phaseroots)) { + PyErr_SetString(PyExc_TypeError, + "roots item must be a list"); goto release; + } minrevphase = add_roots_get_min(self, phaseroots, i+1, phases); if (minrevphase == -2) /* Error from add_roots_get_min */ goto release; @@ -1243,7 +1248,7 @@ char *node; int rev, i; - if (!PyArg_ParseTuple(args, "s#", &node, &nodelen)) + if (!PyArg_ParseTuple(args, PY23("s#", "y#"), &node, &nodelen)) return NULL; if (nodelen < 4) { @@ -2077,7 +2082,7 @@ Py_INCREF(&indexType); PyModule_AddObject(mod, "index", (PyObject *)&indexType); - nullentry = Py_BuildValue("iiiiiiis#", 0, 0, 0, + nullentry = Py_BuildValue(PY23("iiiiiiis#", "iiiiiiiy#"), 0, 0, 0, -1, -1, -1, -1, nullid, 20); if (nullentry) PyObject_GC_UnTrack(nullentry);
--- a/mercurial/cext/util.h Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cext/util.h Sun Mar 04 10:42:51 2018 -0500 @@ -14,6 +14,13 @@ #define IS_PY3K #endif +/* helper to switch things like string literal depending on Python version */ +#ifdef IS_PY3K +#define PY23(py2, py3) py3 +#else +#define PY23(py2, py3) py2 +#endif + /* clang-format off */ typedef struct { PyObject_HEAD
--- a/mercurial/changegroup.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/changegroup.py Sun Mar 04 10:42:51 2018 -0500 @@ -32,6 +32,10 @@ _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s" _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH" +# When narrowing is finalized and no longer subject to format changes, +# we should move this to just "narrow" or similar. +NARROW_REQUIREMENT = 'narrowhg-experimental' + readexactly = util.readexactly def getchunk(stream): @@ -894,6 +898,11 @@ # support versions 01 and 02. versions.discard('01') versions.discard('02') + if NARROW_REQUIREMENT in repo.requirements: + # Versions 01 and 02 don't support revlog flags, and we need to + # support that for stripping and unbundling to work. + versions.discard('01') + versions.discard('02') return versions def localversion(repo):
--- a/mercurial/changelog.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/changelog.py Sun Mar 04 10:42:51 2018 -0500 @@ -20,9 +20,11 @@ from . import ( encoding, error, + pycompat, revlog, util, ) +from .utils import dateutil _defaultextra = {'branch': 'default'} @@ -90,6 +92,11 @@ return self.offset def flush(self): pass + + @property + def closed(self): + return self.fp.closed + def close(self): self.fp.close() @@ -127,6 +134,13 @@ self.offset += len(s) self._end += len(s) + def __enter__(self): + self.fp.__enter__() + return self + + def __exit__(self, *args): + return self.fp.__exit__(*args) + def _divertopener(opener, target): """build an opener that writes in 'target.a' instead of 'target'""" def _divert(name, mode='r', checkambig=False): @@ -420,7 +434,7 @@ self._delaybuf = None self._divert = False # split when we're done - self.checkinlinesize(tr) + self._enforceinlinesize(tr) def _writepending(self, tr): "create a file containing the unfinalized state for pretxnchangegroup" @@ -446,9 +460,9 @@ return False - def checkinlinesize(self, tr, fp=None): + def _enforceinlinesize(self, tr, fp=None): if not self._delayed: - revlog.revlog.checkinlinesize(self, tr, fp) + revlog.revlog._enforceinlinesize(self, tr, fp) def read(self, node): """Obtain data from a parsed changelog revision. @@ -505,15 +519,15 @@ if not user: raise error.RevlogError(_("empty username")) if "\n" in user: - raise error.RevlogError(_("username %s contains a newline") - % repr(user)) + raise error.RevlogError(_("username %r contains a newline") + % pycompat.bytestr(user)) desc = stripdesc(desc) if date: - parseddate = "%d %d" % util.parsedate(date) + parseddate = "%d %d" % dateutil.parsedate(date) else: - parseddate = "%d %d" % util.makedate() + parseddate = "%d %d" % dateutil.makedate() if extra: branch = extra.get("branch") if branch in ("default", ""):
--- a/mercurial/cmdutil.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/cmdutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -8,7 +8,6 @@ from __future__ import absolute_import import errno -import itertools import os import re import tempfile @@ -26,30 +25,29 @@ changelog, copies, crecord as crecordmod, - dagop, dirstateguard, encoding, error, formatter, - graphmod, + logcmdutil, match as matchmod, - mdiff, + merge as mergemod, obsolete, patch, pathutil, pycompat, registrar, revlog, - revset, - revsetlang, rewriteutil, scmutil, smartset, + subrepoutil, templatekw, templater, util, vfs as vfsmod, ) +from .utils import dateutil stringio = util.stringio # templates of common command options @@ -225,7 +223,6 @@ def dorecord(ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts): - from . import merge as mergemod opts = pycompat.byteskwargs(opts) if not ui.interactive(): if cmdsuggest: @@ -562,8 +559,6 @@ return '\n'.join(commentedlines) + '\n' def _conflictsmsg(repo): - # avoid merge cycle - from . import merge as mergemod mergestate = mergemod.mergestate.read(repo) if not mergestate.active(): return @@ -898,65 +893,97 @@ else: return commiteditor -def loglimit(opts): - """get the log limit according to option -l/--limit""" - limit = opts.get('limit') - if limit: - try: - limit = int(limit) - except ValueError: - raise error.Abort(_('limit must be a positive integer')) - if limit <= 0: - raise error.Abort(_('limit must be positive')) - else: - limit = None - return limit - -def makefilename(repo, pat, node, desc=None, - total=None, seqno=None, revwidth=None, pathname=None): - node_expander = { - 'H': lambda: hex(node), - 'R': lambda: '%d' % repo.changelog.rev(node), - 'h': lambda: short(node), - 'm': lambda: re.sub('[^\w]', '_', desc or '') - } +def rendertemplate(ctx, tmpl, props=None): + """Expand a literal template 'tmpl' byte-string against one changeset + + Each props item must be a stringify-able value or a callable returning + such value, i.e. no bare list nor dict should be passed. + """ + repo = ctx.repo() + tres = formatter.templateresources(repo.ui, repo) + t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords, + resources=tres) + mapping = {'ctx': ctx, 'revcache': {}} + if props: + mapping.update(props) + return t.render(mapping) + +def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None): + r"""Convert old-style filename format string to template string + + >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0) + 'foo-{reporoot|basename}-{seqno}.patch' + >>> _buildfntemplate(b'%R{tags % "{tag}"}%H') + '{rev}{tags % "{tag}"}{node}' + + '\' in outermost strings has to be escaped because it is a directory + separator on Windows: + + >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0) + 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch' + >>> _buildfntemplate(b'\\\\foo\\bar.patch') + '\\\\\\\\foo\\\\bar.patch' + >>> _buildfntemplate(b'\\{tags % "{tag}"}') + '\\\\{tags % "{tag}"}' + + but inner strings follow the template rules (i.e. '\' is taken as an + escape character): + + >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0) + '{"c:\\tmp"}' + """ expander = { - '%': lambda: '%', - 'b': lambda: os.path.basename(repo.root), - } - - try: - if node: - expander.update(node_expander) - if node: - expander['r'] = (lambda: - ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0)) - if total is not None: - expander['N'] = lambda: '%d' % total - if seqno is not None: - expander['n'] = lambda: '%d' % seqno - if total is not None and seqno is not None: - expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total))) - if pathname is not None: - expander['s'] = lambda: os.path.basename(pathname) - expander['d'] = lambda: os.path.dirname(pathname) or '.' - expander['p'] = lambda: pathname - - newname = [] - patlen = len(pat) - i = 0 - while i < patlen: - c = pat[i:i + 1] - if c == '%': - i += 1 - c = pat[i:i + 1] - c = expander[c]() - newname.append(c) - i += 1 - return ''.join(newname) - except KeyError as inst: - raise error.Abort(_("invalid format spec '%%%s' in output filename") % - inst.args[0]) + b'H': b'{node}', + b'R': b'{rev}', + b'h': b'{node|short}', + b'm': br'{sub(r"[^\w]", "_", desc|firstline)}', + b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}', + b'%': b'%', + b'b': b'{reporoot|basename}', + } + if total is not None: + expander[b'N'] = b'{total}' + if seqno is not None: + expander[b'n'] = b'{seqno}' + if total is not None and seqno is not None: + expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}' + if pathname is not None: + expander[b's'] = b'{pathname|basename}' + expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}' + expander[b'p'] = b'{pathname}' + + newname = [] + for typ, start, end in templater.scantemplate(pat, raw=True): + if typ != b'string': + newname.append(pat[start:end]) + continue + i = start + while i < end: + n = pat.find(b'%', i, end) + if n < 0: + newname.append(util.escapestr(pat[i:end])) + break + newname.append(util.escapestr(pat[i:n])) + if n + 2 > end: + raise error.Abort(_("incomplete format spec in output " + "filename")) + c = pat[n + 1:n + 2] + i = n + 2 + try: + newname.append(expander[c]) + except KeyError: + raise error.Abort(_("invalid format spec '%%%s' in output " + "filename") % c) + return ''.join(newname) + +def makefilename(ctx, pat, **props): + if not pat: + return pat + tmpl = _buildfntemplate(pat, **props) + # BUG: alias expansion shouldn't be made against template fragments + # rewritten from %-format strings, but we have no easy way to partially + # disable the expansion. + return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props)) def isstdiofilename(pat): """True if the given pat looks like a filename denoting stdin/stdout""" @@ -981,19 +1008,17 @@ def __exit__(self, exc_type, exc_value, exc_tb): pass -def makefileobj(repo, pat, node=None, desc=None, total=None, - seqno=None, revwidth=None, mode='wb', modemap=None, - pathname=None): - +def makefileobj(ctx, pat, mode='wb', modemap=None, **props): writable = mode not in ('r', 'rb') if isstdiofilename(pat): + repo = ctx.repo() if writable: fp = repo.ui.fout else: fp = repo.ui.fin return _unclosablefile(fp) - fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname) + fn = makefilename(ctx, pat, **props) if modemap is not None: mode = modemap.get(fn, mode) if mode == 'wb': @@ -1453,7 +1478,7 @@ patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix, files, eolmode=None) except error.PatchError as e: - raise error.Abort(str(e)) + raise error.Abort(util.forcebytestr(e)) if opts.get('exact'): editor = None else: @@ -1506,7 +1531,7 @@ write("# HG changeset patch\n") write("# User %s\n" % ctx.user()) write("# Date %d %d\n" % ctx.date()) - write("# %s\n" % util.datestr(ctx.date())) + write("# %s\n" % dateutil.datestr(ctx.date())) if branch and branch != 'default': write("# Branch %s\n" % branch) write("# Node ID %s\n" % hex(node)) @@ -1568,11 +1593,8 @@ ctx = repo[rev] fo = None if not fp and fntemplate: - desc_lines = ctx.description().rstrip().split('\n') - desc = desc_lines[0] #Commit always has a first line. - fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc, - total=total, seqno=seqno, revwidth=revwidth, - mode='wb', modemap=filemode) + fo = makefileobj(ctx, fntemplate, mode='wb', modemap=filemode, + total=total, seqno=seqno, revwidth=revwidth) dest = fo.name def write(s, **kw): fo.write(s) @@ -1583,500 +1605,6 @@ if fo is not None: fo.close() -def diffordiffstat(ui, repo, diffopts, node1, node2, match, - changes=None, stat=False, fp=None, prefix='', - root='', listsubrepos=False, hunksfilterfn=None): - '''show diff or diffstat.''' - if fp is None: - write = ui.write - else: - def write(s, **kw): - fp.write(s) - - if root: - relroot = pathutil.canonpath(repo.root, repo.getcwd(), root) - else: - relroot = '' - if relroot != '': - # XXX relative roots currently don't work if the root is within a - # subrepo - uirelroot = match.uipath(relroot) - relroot += '/' - for matchroot in match.files(): - if not matchroot.startswith(relroot): - ui.warn(_('warning: %s not inside relative root %s\n') % ( - match.uipath(matchroot), uirelroot)) - - if stat: - diffopts = diffopts.copy(context=0, noprefix=False) - width = 80 - if not ui.plain(): - width = ui.termwidth() - chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts, - prefix=prefix, relroot=relroot, - hunksfilterfn=hunksfilterfn) - for chunk, label in patch.diffstatui(util.iterlines(chunks), - width=width): - write(chunk, label=label) - else: - for chunk, label in patch.diffui(repo, node1, node2, match, - changes, opts=diffopts, prefix=prefix, - relroot=relroot, - hunksfilterfn=hunksfilterfn): - write(chunk, label=label) - - if listsubrepos: - ctx1 = repo[node1] - ctx2 = repo[node2] - for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): - tempnode2 = node2 - try: - if node2 is not None: - tempnode2 = ctx2.substate[subpath][1] - except KeyError: - # A subrepo that existed in node1 was deleted between node1 and - # node2 (inclusive). Thus, ctx2's substate won't contain that - # subpath. The best we can do is to ignore it. - tempnode2 = None - submatch = matchmod.subdirmatcher(subpath, match) - sub.diff(ui, diffopts, tempnode2, submatch, changes=changes, - stat=stat, fp=fp, prefix=prefix) - -def _changesetlabels(ctx): - labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()] - if ctx.obsolete(): - labels.append('changeset.obsolete') - if ctx.isunstable(): - labels.append('changeset.unstable') - for instability in ctx.instabilities(): - labels.append('instability.%s' % instability) - return ' '.join(labels) - -class changeset_printer(object): - '''show changeset information when templating not requested.''' - - def __init__(self, ui, repo, matchfn, diffopts, buffered): - self.ui = ui - self.repo = repo - self.buffered = buffered - self.matchfn = matchfn - self.diffopts = diffopts - self.header = {} - self.hunk = {} - self.lastheader = None - self.footer = None - self._columns = templatekw.getlogcolumns() - - def flush(self, ctx): - rev = ctx.rev() - if rev in self.header: - h = self.header[rev] - if h != self.lastheader: - self.lastheader = h - self.ui.write(h) - del self.header[rev] - if rev in self.hunk: - self.ui.write(self.hunk[rev]) - del self.hunk[rev] - - def close(self): - if self.footer: - self.ui.write(self.footer) - - def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None, - **props): - props = pycompat.byteskwargs(props) - if self.buffered: - self.ui.pushbuffer(labeled=True) - self._show(ctx, copies, matchfn, hunksfilterfn, props) - self.hunk[ctx.rev()] = self.ui.popbuffer() - else: - self._show(ctx, copies, matchfn, hunksfilterfn, props) - - def _show(self, ctx, copies, matchfn, hunksfilterfn, props): - '''show a single changeset or file revision''' - changenode = ctx.node() - rev = ctx.rev() - - if self.ui.quiet: - self.ui.write("%s\n" % scmutil.formatchangeid(ctx), - label='log.node') - return - - columns = self._columns - self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx), - label=_changesetlabels(ctx)) - - # branches are shown first before any other names due to backwards - # compatibility - branch = ctx.branch() - # don't show the default branch name - if branch != 'default': - self.ui.write(columns['branch'] % branch, label='log.branch') - - for nsname, ns in self.repo.names.iteritems(): - # branches has special logic already handled above, so here we just - # skip it - if nsname == 'branches': - continue - # we will use the templatename as the color name since those two - # should be the same - for name in ns.names(self.repo, changenode): - self.ui.write(ns.logfmt % name, - label='log.%s' % ns.colorname) - if self.ui.debugflag: - self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase') - for pctx in scmutil.meaningfulparents(self.repo, ctx): - label = 'log.parent changeset.%s' % pctx.phasestr() - self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx), - label=label) - - if self.ui.debugflag and rev is not None: - mnode = ctx.manifestnode() - mrev = self.repo.manifestlog._revlog.rev(mnode) - self.ui.write(columns['manifest'] - % scmutil.formatrevnode(self.ui, mrev, mnode), - label='ui.debug log.manifest') - self.ui.write(columns['user'] % ctx.user(), label='log.user') - self.ui.write(columns['date'] % util.datestr(ctx.date()), - label='log.date') - - if ctx.isunstable(): - instabilities = ctx.instabilities() - self.ui.write(columns['instability'] % ', '.join(instabilities), - label='log.instability') - - elif ctx.obsolete(): - self._showobsfate(ctx) - - self._exthook(ctx) - - if self.ui.debugflag: - files = ctx.p1().status(ctx)[:3] - for key, value in zip(['files', 'files+', 'files-'], files): - if value: - self.ui.write(columns[key] % " ".join(value), - label='ui.debug log.files') - elif ctx.files() and self.ui.verbose: - self.ui.write(columns['files'] % " ".join(ctx.files()), - label='ui.note log.files') - if copies and self.ui.verbose: - copies = ['%s (%s)' % c for c in copies] - self.ui.write(columns['copies'] % ' '.join(copies), - label='ui.note log.copies') - - extra = ctx.extra() - if extra and self.ui.debugflag: - for key, value in sorted(extra.items()): - self.ui.write(columns['extra'] % (key, util.escapestr(value)), - label='ui.debug log.extra') - - description = ctx.description().strip() - if description: - if self.ui.verbose: - self.ui.write(_("description:\n"), - label='ui.note log.description') - self.ui.write(description, - label='ui.note log.description') - self.ui.write("\n\n") - else: - self.ui.write(columns['summary'] % description.splitlines()[0], - label='log.summary') - self.ui.write("\n") - - self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn) - - def _showobsfate(self, ctx): - obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui) - - if obsfate: - for obsfateline in obsfate: - self.ui.write(self._columns['obsolete'] % obsfateline, - label='log.obsfate') - - def _exthook(self, ctx): - '''empty method used by extension as a hook point - ''' - - def showpatch(self, ctx, matchfn, hunksfilterfn=None): - if not matchfn: - matchfn = self.matchfn - if matchfn: - stat = self.diffopts.get('stat') - diff = self.diffopts.get('patch') - diffopts = patch.diffallopts(self.ui, self.diffopts) - node = ctx.node() - prev = ctx.p1().node() - if stat: - diffordiffstat(self.ui, self.repo, diffopts, prev, node, - match=matchfn, stat=True, - hunksfilterfn=hunksfilterfn) - if diff: - if stat: - self.ui.write("\n") - diffordiffstat(self.ui, self.repo, diffopts, prev, node, - match=matchfn, stat=False, - hunksfilterfn=hunksfilterfn) - if stat or diff: - self.ui.write("\n") - -class jsonchangeset(changeset_printer): - '''format changeset information.''' - - def __init__(self, ui, repo, matchfn, diffopts, buffered): - changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered) - self.cache = {} - self._first = True - - def close(self): - if not self._first: - self.ui.write("\n]\n") - else: - self.ui.write("[]\n") - - def _show(self, ctx, copies, matchfn, hunksfilterfn, props): - '''show a single changeset or file revision''' - rev = ctx.rev() - if rev is None: - jrev = jnode = 'null' - else: - jrev = '%d' % rev - jnode = '"%s"' % hex(ctx.node()) - j = encoding.jsonescape - - if self._first: - self.ui.write("[\n {") - self._first = False - else: - self.ui.write(",\n {") - - if self.ui.quiet: - self.ui.write(('\n "rev": %s') % jrev) - self.ui.write((',\n "node": %s') % jnode) - self.ui.write('\n }') - return - - self.ui.write(('\n "rev": %s') % jrev) - self.ui.write((',\n "node": %s') % jnode) - self.ui.write((',\n "branch": "%s"') % j(ctx.branch())) - self.ui.write((',\n "phase": "%s"') % ctx.phasestr()) - self.ui.write((',\n "user": "%s"') % j(ctx.user())) - self.ui.write((',\n "date": [%d, %d]') % ctx.date()) - self.ui.write((',\n "desc": "%s"') % j(ctx.description())) - - self.ui.write((',\n "bookmarks": [%s]') % - ", ".join('"%s"' % j(b) for b in ctx.bookmarks())) - self.ui.write((',\n "tags": [%s]') % - ", ".join('"%s"' % j(t) for t in ctx.tags())) - self.ui.write((',\n "parents": [%s]') % - ", ".join('"%s"' % c.hex() for c in ctx.parents())) - - if self.ui.debugflag: - if rev is None: - jmanifestnode = 'null' - else: - jmanifestnode = '"%s"' % hex(ctx.manifestnode()) - self.ui.write((',\n "manifest": %s') % jmanifestnode) - - self.ui.write((',\n "extra": {%s}') % - ", ".join('"%s": "%s"' % (j(k), j(v)) - for k, v in ctx.extra().items())) - - files = ctx.p1().status(ctx) - self.ui.write((',\n "modified": [%s]') % - ", ".join('"%s"' % j(f) for f in files[0])) - self.ui.write((',\n "added": [%s]') % - ", ".join('"%s"' % j(f) for f in files[1])) - self.ui.write((',\n "removed": [%s]') % - ", ".join('"%s"' % j(f) for f in files[2])) - - elif self.ui.verbose: - self.ui.write((',\n "files": [%s]') % - ", ".join('"%s"' % j(f) for f in ctx.files())) - - if copies: - self.ui.write((',\n "copies": {%s}') % - ", ".join('"%s": "%s"' % (j(k), j(v)) - for k, v in copies)) - - matchfn = self.matchfn - if matchfn: - stat = self.diffopts.get('stat') - diff = self.diffopts.get('patch') - diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True) - node, prev = ctx.node(), ctx.p1().node() - if stat: - self.ui.pushbuffer() - diffordiffstat(self.ui, self.repo, diffopts, prev, node, - match=matchfn, stat=True) - self.ui.write((',\n "diffstat": "%s"') - % j(self.ui.popbuffer())) - if diff: - self.ui.pushbuffer() - diffordiffstat(self.ui, self.repo, diffopts, prev, node, - match=matchfn, stat=False) - self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer())) - - self.ui.write("\n }") - -class changeset_templater(changeset_printer): - '''format changeset information. - - Note: there are a variety of convenience functions to build a - changeset_templater for common cases. See functions such as: - makelogtemplater, show_changeset, buildcommittemplate, or other - functions that use changesest_templater. - ''' - - # Arguments before "buffered" used to be positional. Consider not - # adding/removing arguments before "buffered" to not break callers. - def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None, - buffered=False): - diffopts = diffopts or {} - - changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered) - tres = formatter.templateresources(ui, repo) - self.t = formatter.loadtemplater(ui, tmplspec, - defaults=templatekw.keywords, - resources=tres, - cache=templatekw.defaulttempl) - self._counter = itertools.count() - self.cache = tres['cache'] # shared with _graphnodeformatter() - - self._tref = tmplspec.ref - self._parts = {'header': '', 'footer': '', - tmplspec.ref: tmplspec.ref, - 'docheader': '', 'docfooter': '', - 'separator': ''} - if tmplspec.mapfile: - # find correct templates for current mode, for backward - # compatibility with 'log -v/-q/--debug' using a mapfile - tmplmodes = [ - (True, ''), - (self.ui.verbose, '_verbose'), - (self.ui.quiet, '_quiet'), - (self.ui.debugflag, '_debug'), - ] - for mode, postfix in tmplmodes: - for t in self._parts: - cur = t + postfix - if mode and cur in self.t: - self._parts[t] = cur - else: - partnames = [p for p in self._parts.keys() if p != tmplspec.ref] - m = formatter.templatepartsmap(tmplspec, self.t, partnames) - self._parts.update(m) - - if self._parts['docheader']: - self.ui.write(templater.stringify(self.t(self._parts['docheader']))) - - def close(self): - if self._parts['docfooter']: - if not self.footer: - self.footer = "" - self.footer += templater.stringify(self.t(self._parts['docfooter'])) - return super(changeset_templater, self).close() - - def _show(self, ctx, copies, matchfn, hunksfilterfn, props): - '''show a single changeset or file revision''' - props = props.copy() - props['ctx'] = ctx - props['index'] = index = next(self._counter) - props['revcache'] = {'copies': copies} - props = pycompat.strkwargs(props) - - # write separator, which wouldn't work well with the header part below - # since there's inherently a conflict between header (across items) and - # separator (per item) - if self._parts['separator'] and index > 0: - self.ui.write(templater.stringify(self.t(self._parts['separator']))) - - # write header - if self._parts['header']: - h = templater.stringify(self.t(self._parts['header'], **props)) - if self.buffered: - self.header[ctx.rev()] = h - else: - if self.lastheader != h: - self.lastheader = h - self.ui.write(h) - - # write changeset metadata, then patch if requested - key = self._parts[self._tref] - self.ui.write(templater.stringify(self.t(key, **props))) - self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn) - - if self._parts['footer']: - if not self.footer: - self.footer = templater.stringify( - self.t(self._parts['footer'], **props)) - -def logtemplatespec(tmpl, mapfile): - if mapfile: - return formatter.templatespec('changeset', tmpl, mapfile) - else: - return formatter.templatespec('', tmpl, None) - -def _lookuplogtemplate(ui, tmpl, style): - """Find the template matching the given template spec or style - - See formatter.lookuptemplate() for details. - """ - - # ui settings - if not tmpl and not style: # template are stronger than style - tmpl = ui.config('ui', 'logtemplate') - if tmpl: - return logtemplatespec(templater.unquotestring(tmpl), None) - else: - style = util.expandpath(ui.config('ui', 'style')) - - if not tmpl and style: - mapfile = style - if not os.path.split(mapfile)[0]: - mapname = (templater.templatepath('map-cmdline.' + mapfile) - or templater.templatepath(mapfile)) - if mapname: - mapfile = mapname - return logtemplatespec(None, mapfile) - - if not tmpl: - return logtemplatespec(None, None) - - return formatter.lookuptemplate(ui, 'changeset', tmpl) - -def makelogtemplater(ui, repo, tmpl, buffered=False): - """Create a changeset_templater from a literal template 'tmpl' - byte-string.""" - spec = logtemplatespec(tmpl, None) - return changeset_templater(ui, repo, spec, buffered=buffered) - -def show_changeset(ui, repo, opts, buffered=False): - """show one changeset using template or regular display. - - Display format will be the first non-empty hit of: - 1. option 'template' - 2. option 'style' - 3. [ui] setting 'logtemplate' - 4. [ui] setting 'style' - If all of these values are either the unset or the empty string, - regular display via changeset_printer() is done. - """ - # options - match = None - if opts.get('patch') or opts.get('stat'): - match = scmutil.matchall(repo) - - if opts.get('template') == 'json': - return jsonchangeset(ui, repo, match, opts, buffered) - - spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style')) - - if not spec.ref and not spec.tmpl and not spec.mapfile: - return changeset_printer(ui, repo, match, opts, buffered) - - return changeset_templater(ui, repo, spec, match, opts, buffered) - def showmarker(fm, marker, index=None): """utility function to display obsolescence marker in a readable way @@ -2095,13 +1623,14 @@ fm.write('date', '(%s) ', fm.formatdate(marker.date())) meta = marker.metadata().copy() meta.pop('date', None) - fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', ')) + smeta = util.rapply(pycompat.maybebytestr, meta) + fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', ')) fm.plain('\n') def finddate(ui, repo, date): """Find the tipmost changeset that matches the given date spec""" - df = util.matchdate(date) + df = dateutil.matchdate(date) m = scmutil.matchall(repo) results = {} @@ -2114,7 +1643,7 @@ rev = ctx.rev() if rev in results: ui.status(_("found revision %s from %s\n") % - (rev, util.datestr(results[rev]))) + (rev, dateutil.datestr(results[rev]))) return '%d' % rev raise error.Abort(_("revision matching date not found")) @@ -2352,7 +1881,7 @@ else: self.revs.discard(value) ctx = change(value) - matches = filter(match, ctx.files()) + matches = [f for f in ctx.files() if match(f)] if matches: fncache[value] = matches self.set.add(value) @@ -2415,394 +1944,6 @@ return iterate() -def _makelogmatcher(repo, revs, pats, opts): - """Build matcher and expanded patterns from log options - - If --follow, revs are the revisions to follow from. - - Returns (match, pats, slowpath) where - - match: a matcher built from the given pats and -I/-X opts - - pats: patterns used (globs are expanded on Windows) - - slowpath: True if patterns aren't as simple as scanning filelogs - """ - # pats/include/exclude are passed to match.match() directly in - # _matchfiles() revset but walkchangerevs() builds its matcher with - # scmutil.match(). The difference is input pats are globbed on - # platforms without shell expansion (windows). - wctx = repo[None] - match, pats = scmutil.matchandpats(wctx, pats, opts) - slowpath = match.anypats() or (not match.always() and opts.get('removed')) - if not slowpath: - follow = opts.get('follow') or opts.get('follow_first') - startctxs = [] - if follow and opts.get('rev'): - startctxs = [repo[r] for r in revs] - for f in match.files(): - if follow and startctxs: - # No idea if the path was a directory at that revision, so - # take the slow path. - if any(f not in c for c in startctxs): - slowpath = True - continue - elif follow and f not in wctx: - # If the file exists, it may be a directory, so let it - # take the slow path. - if os.path.exists(repo.wjoin(f)): - slowpath = True - continue - else: - raise error.Abort(_('cannot follow file not in parent ' - 'revision: "%s"') % f) - filelog = repo.file(f) - if not filelog: - # A zero count may be a directory or deleted file, so - # try to find matching entries on the slow path. - if follow: - raise error.Abort( - _('cannot follow nonexistent file: "%s"') % f) - slowpath = True - - # We decided to fall back to the slowpath because at least one - # of the paths was not a file. Check to see if at least one of them - # existed in history - in that case, we'll continue down the - # slowpath; otherwise, we can turn off the slowpath - if slowpath: - for path in match.files(): - if path == '.' or path in repo.store: - break - else: - slowpath = False - - return match, pats, slowpath - -def _fileancestors(repo, revs, match, followfirst): - fctxs = [] - for r in revs: - ctx = repo[r] - fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match)) - - # When displaying a revision with --patch --follow FILE, we have - # to know which file of the revision must be diffed. With - # --follow, we want the names of the ancestors of FILE in the - # revision, stored in "fcache". "fcache" is populated as a side effect - # of the graph traversal. - fcache = {} - def filematcher(rev): - return scmutil.matchfiles(repo, fcache.get(rev, [])) - - def revgen(): - for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst): - fcache[rev] = [c.path() for c in cs] - yield rev - return smartset.generatorset(revgen(), iterasc=False), filematcher - -def _makenofollowlogfilematcher(repo, pats, opts): - '''hook for extensions to override the filematcher for non-follow cases''' - return None - -_opt2logrevset = { - 'no_merges': ('not merge()', None), - 'only_merges': ('merge()', None), - '_matchfiles': (None, '_matchfiles(%ps)'), - 'date': ('date(%s)', None), - 'branch': ('branch(%s)', '%lr'), - '_patslog': ('filelog(%s)', '%lr'), - 'keyword': ('keyword(%s)', '%lr'), - 'prune': ('ancestors(%s)', 'not %lr'), - 'user': ('user(%s)', '%lr'), -} - -def _makelogrevset(repo, match, pats, slowpath, opts): - """Return a revset string built from log options and file patterns""" - opts = dict(opts) - # follow or not follow? - follow = opts.get('follow') or opts.get('follow_first') - - # branch and only_branch are really aliases and must be handled at - # the same time - opts['branch'] = opts.get('branch', []) + opts.get('only_branch', []) - opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']] - - if slowpath: - # See walkchangerevs() slow path. - # - # pats/include/exclude cannot be represented as separate - # revset expressions as their filtering logic applies at file - # level. For instance "-I a -X b" matches a revision touching - # "a" and "b" while "file(a) and not file(b)" does - # not. Besides, filesets are evaluated against the working - # directory. - matchargs = ['r:', 'd:relpath'] - for p in pats: - matchargs.append('p:' + p) - for p in opts.get('include', []): - matchargs.append('i:' + p) - for p in opts.get('exclude', []): - matchargs.append('x:' + p) - opts['_matchfiles'] = matchargs - elif not follow: - opts['_patslog'] = list(pats) - - expr = [] - for op, val in sorted(opts.iteritems()): - if not val: - continue - if op not in _opt2logrevset: - continue - revop, listop = _opt2logrevset[op] - if revop and '%' not in revop: - expr.append(revop) - elif not listop: - expr.append(revsetlang.formatspec(revop, val)) - else: - if revop: - val = [revsetlang.formatspec(revop, v) for v in val] - expr.append(revsetlang.formatspec(listop, val)) - - if expr: - expr = '(' + ' and '.join(expr) + ')' - else: - expr = None - return expr - -def _logrevs(repo, opts): - """Return the initial set of revisions to be filtered or followed""" - follow = opts.get('follow') or opts.get('follow_first') - if opts.get('rev'): - revs = scmutil.revrange(repo, opts['rev']) - elif follow and repo.dirstate.p1() == nullid: - revs = smartset.baseset() - elif follow: - revs = repo.revs('.') - else: - revs = smartset.spanset(repo) - revs.reverse() - return revs - -def getlogrevs(repo, pats, opts): - """Return (revs, filematcher) where revs is a smartset - - filematcher is a callable taking a revision number and returning a match - objects filtering the files to be detailed when displaying the revision. - """ - follow = opts.get('follow') or opts.get('follow_first') - followfirst = opts.get('follow_first') - limit = loglimit(opts) - revs = _logrevs(repo, opts) - if not revs: - return smartset.baseset(), None - match, pats, slowpath = _makelogmatcher(repo, revs, pats, opts) - filematcher = None - if follow: - if slowpath or match.always(): - revs = dagop.revancestors(repo, revs, followfirst=followfirst) - else: - revs, filematcher = _fileancestors(repo, revs, match, followfirst) - revs.reverse() - if filematcher is None: - filematcher = _makenofollowlogfilematcher(repo, pats, opts) - if filematcher is None: - def filematcher(rev): - return match - - expr = _makelogrevset(repo, match, pats, slowpath, opts) - if opts.get('graph') and opts.get('rev'): - # User-specified revs might be unsorted, but don't sort before - # _makelogrevset because it might depend on the order of revs - if not (revs.isdescending() or revs.istopo()): - revs.sort(reverse=True) - if expr: - matcher = revset.match(None, expr) - revs = matcher(repo, revs) - if limit is not None: - revs = revs.slice(0, limit) - return revs, filematcher - -def _parselinerangelogopt(repo, opts): - """Parse --line-range log option and return a list of tuples (filename, - (fromline, toline)). - """ - linerangebyfname = [] - for pat in opts.get('line_range', []): - try: - pat, linerange = pat.rsplit(',', 1) - except ValueError: - raise error.Abort(_('malformatted line-range pattern %s') % pat) - try: - fromline, toline = map(int, linerange.split(':')) - except ValueError: - raise error.Abort(_("invalid line range for %s") % pat) - msg = _("line range pattern '%s' must match exactly one file") % pat - fname = scmutil.parsefollowlinespattern(repo, None, pat, msg) - linerangebyfname.append( - (fname, util.processlinerange(fromline, toline))) - return linerangebyfname - -def getloglinerangerevs(repo, userrevs, opts): - """Return (revs, filematcher, hunksfilter). - - "revs" are revisions obtained by processing "line-range" log options and - walking block ancestors of each specified file/line-range. - - "filematcher(rev) -> match" is a factory function returning a match object - for a given revision for file patterns specified in --line-range option. - If neither --stat nor --patch options are passed, "filematcher" is None. - - "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function - returning a hunks filtering function. - If neither --stat nor --patch options are passed, "filterhunks" is None. - """ - wctx = repo[None] - - # Two-levels map of "rev -> file ctx -> [line range]". - linerangesbyrev = {} - for fname, (fromline, toline) in _parselinerangelogopt(repo, opts): - if fname not in wctx: - raise error.Abort(_('cannot follow file not in parent ' - 'revision: "%s"') % fname) - fctx = wctx.filectx(fname) - for fctx, linerange in dagop.blockancestors(fctx, fromline, toline): - rev = fctx.introrev() - if rev not in userrevs: - continue - linerangesbyrev.setdefault( - rev, {}).setdefault( - fctx.path(), []).append(linerange) - - filematcher = None - hunksfilter = None - if opts.get('patch') or opts.get('stat'): - - def nofilterhunksfn(fctx, hunks): - return hunks - - def hunksfilter(rev): - fctxlineranges = linerangesbyrev.get(rev) - if fctxlineranges is None: - return nofilterhunksfn - - def filterfn(fctx, hunks): - lineranges = fctxlineranges.get(fctx.path()) - if lineranges is not None: - for hr, lines in hunks: - if hr is None: # binary - yield hr, lines - continue - if any(mdiff.hunkinrange(hr[2:], lr) - for lr in lineranges): - yield hr, lines - else: - for hunk in hunks: - yield hunk - - return filterfn - - def filematcher(rev): - files = list(linerangesbyrev.get(rev, [])) - return scmutil.matchfiles(repo, files) - - revs = sorted(linerangesbyrev, reverse=True) - - return revs, filematcher, hunksfilter - -def _graphnodeformatter(ui, displayer): - spec = ui.config('ui', 'graphnodetemplate') - if not spec: - return templatekw.showgraphnode # fast path for "{graphnode}" - - spec = templater.unquotestring(spec) - tres = formatter.templateresources(ui) - if isinstance(displayer, changeset_templater): - tres['cache'] = displayer.cache # reuse cache of slow templates - templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords, - resources=tres) - def formatnode(repo, ctx): - props = {'ctx': ctx, 'repo': repo, 'revcache': {}} - return templ.render(props) - return formatnode - -def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, - filematcher=None, props=None): - props = props or {} - formatnode = _graphnodeformatter(ui, displayer) - state = graphmod.asciistate() - styles = state['styles'] - - # only set graph styling if HGPLAIN is not set. - if ui.plain('graph'): - # set all edge styles to |, the default pre-3.8 behaviour - styles.update(dict.fromkeys(styles, '|')) - else: - edgetypes = { - 'parent': graphmod.PARENT, - 'grandparent': graphmod.GRANDPARENT, - 'missing': graphmod.MISSINGPARENT - } - for name, key in edgetypes.items(): - # experimental config: experimental.graphstyle.* - styles[key] = ui.config('experimental', 'graphstyle.%s' % name, - styles[key]) - if not styles[key]: - styles[key] = None - - # experimental config: experimental.graphshorten - state['graphshorten'] = ui.configbool('experimental', 'graphshorten') - - for rev, type, ctx, parents in dag: - char = formatnode(repo, ctx) - copies = None - if getrenamed and ctx.rev(): - copies = [] - for fn in ctx.files(): - rename = getrenamed(fn, ctx.rev()) - if rename: - copies.append((fn, rename[0])) - revmatchfn = None - if filematcher is not None: - revmatchfn = filematcher(ctx.rev()) - edges = edgefn(type, char, state, rev, parents) - firstedge = next(edges) - width = firstedge[2] - displayer.show(ctx, copies=copies, matchfn=revmatchfn, - _graphwidth=width, **pycompat.strkwargs(props)) - lines = displayer.hunk.pop(rev).split('\n') - if not lines[-1]: - del lines[-1] - displayer.flush(ctx) - for type, char, width, coldata in itertools.chain([firstedge], edges): - graphmod.ascii(ui, state, type, char, lines, coldata) - lines = [] - displayer.close() - -def graphlog(ui, repo, revs, filematcher, opts): - # Parameters are identical to log command ones - revdag = graphmod.dagwalker(repo, revs) - - getrenamed = None - if opts.get('copies'): - endrev = None - if opts.get('rev'): - endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1 - getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) - - ui.pager('log') - displayer = show_changeset(ui, repo, opts, buffered=True) - displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed, - filematcher) - -def checkunsupportedgraphflags(pats, opts): - for op in ["newest_first"]: - if op in opts and opts[op]: - raise error.Abort(_("-G/--graph option is incompatible with --%s") - % op.replace("_", "-")) - -def graphrevs(repo, nodes, opts): - limit = loglimit(opts) - nodes.reverse() - if limit is not None: - nodes = nodes[:limit] - return graphmod.nodes(repo, nodes) - def add(ui, repo, match, prefix, explicitonly, **opts): join = lambda f: os.path.join(prefix, f) bad = [] @@ -3071,7 +2212,7 @@ def write(path): filename = None if fntemplate: - filename = makefilename(repo, fntemplate, ctx.node(), + filename = makefilename(ctx, fntemplate, pathname=os.path.join(prefix, path)) # attempt to create the directory if it does not already exist try: @@ -3089,12 +2230,16 @@ mfnode = ctx.manifestnode() try: if mfnode and mfl[mfnode].find(file)[0]: + scmutil.fileprefetchhooks(repo, ctx, [file]) write(file) return 0 except KeyError: pass - for abs in ctx.walk(matcher): + files = [f for f in ctx.walk(matcher)] + scmutil.fileprefetchhooks(repo, ctx, files) + + for abs in files: write(abs) err = 0 @@ -3117,7 +2262,7 @@ '''commit the specified files or all outstanding changes''' date = opts.get('date') if date: - opts['date'] = util.parsedate(date) + opts['date'] = dateutil.parsedate(date) message = logmessage(ui, opts) matcher = scmutil.match(repo[None], pats, opts) @@ -3182,7 +2327,7 @@ date = opts.get('date') or old.date() # Parse the date to allow comparison between date and old.date() - date = util.parsedate(date) + date = dateutil.parsedate(date) if len(old.parents()) > 1: # ctx.files() isn't reliable for merges, so fall back to the @@ -3204,13 +2349,12 @@ # subrepo.precommit(). To minimize the risk of this hack, we do # nothing if .hgsub does not exist. if '.hgsub' in wctx or '.hgsub' in old: - from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil - subs, commitsubs, newsubstate = subrepo.precommit( + subs, commitsubs, newsubstate = subrepoutil.precommit( ui, wctx, wctx._status, matcher) # amend should abort if commitsubrepos is enabled assert not commitsubs if subs: - subrepo.writestate(repo, newsubstate) + subrepoutil.writestate(repo, newsubstate) filestoamend = set(f for f in wctx.files() if matcher(f)) @@ -3398,7 +2542,7 @@ def buildcommittemplate(repo, ctx, subs, extramsg, ref): ui = repo.ui spec = formatter.templatespec(ref, None, None) - t = changeset_templater(ui, repo, spec, None, {}, False) + t = logcmdutil.changesettemplater(ui, repo, spec) t.t.cache.update((k, templater.unquotestring(v)) for k, v in repo.ui.configitems('committemplate')) @@ -3481,12 +2625,12 @@ if not opts.get('close_branch'): for r in parents: if r.closesbranch() and r.branch() == branch: - repo.ui.status(_('reopening closed branch head %d\n') % r) + repo.ui.status(_('reopening closed branch head %d\n') % r.rev()) if repo.ui.debugflag: - repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex())) + repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())) elif repo.ui.verbose: - repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx)) + repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx)) def postcommitstatus(repo, pats, opts): return repo.status(match=scmutil.match(repo[None], pats, opts)) @@ -3763,7 +2907,15 @@ if not opts.get('dry_run'): needdata = ('revert', 'add', 'undelete') - _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata]) + if _revertprefetch is not _revertprefetchstub: + ui.deprecwarn("'cmdutil._revertprefetch' is deprecated, " + "add a callback to 'scmutil.fileprefetchhooks'", + '4.6', stacklevel=1) + _revertprefetch(repo, ctx, + *[actions[name][0] for name in needdata]) + oplist = [actions[name][0] for name in needdata] + prefetch = scmutil.fileprefetchhooks + prefetch(repo, ctx, [f for sublist in oplist for f in sublist]) _performrevert(repo, parents, ctx, actions, interactive, tobackup) if targetsubs: @@ -3776,8 +2928,11 @@ raise error.Abort("subrepository '%s' does not exist in %s!" % (sub, short(ctx.node()))) -def _revertprefetch(repo, ctx, *files): - """Let extension changing the storage layer prefetch content""" +def _revertprefetchstub(repo, ctx, *files): + """Stub method for detecting extension wrapping of _revertprefetch(), to + issue a deprecation warning.""" + +_revertprefetch = _revertprefetchstub def _performrevert(repo, parents, ctx, actions, interactive=False, tobackup=None): @@ -3791,7 +2946,6 @@ parent, p2 = parents node = ctx.node() excluded_files = [] - matcher_opts = {"exclude": excluded_files} def checkout(f): fc = ctx[f] @@ -3812,7 +2966,7 @@ if choice == 0: repo.dirstate.drop(f) else: - excluded_files.append(repo.wjoin(f)) + excluded_files.append(f) else: repo.dirstate.drop(f) for f in actions['remove'][0]: @@ -3823,7 +2977,7 @@ if choice == 0: doremove(f) else: - excluded_files.append(repo.wjoin(f)) + excluded_files.append(f) else: doremove(f) for f in actions['drop'][0]: @@ -3843,8 +2997,8 @@ newlyaddedandmodifiedfiles = set() if interactive: # Prompt the user for changes to revert - torevert = [repo.wjoin(f) for f in actions['revert'][0]] - m = scmutil.match(ctx, torevert, matcher_opts) + torevert = [f for f in actions['revert'][0] if f not in excluded_files] + m = scmutil.matchfiles(repo, torevert) diffopts = patch.difffeatureopts(repo.ui, whitespace=True) diffopts.nodates = True diffopts.git = True @@ -4025,3 +3179,23 @@ if after[1]: hint = after[0] raise error.Abort(_('no %s in progress') % task, hint=hint) + +class changeset_printer(logcmdutil.changesetprinter): + + def __init__(self, ui, *args, **kwargs): + msg = ("'cmdutil.changeset_printer' is deprecated, " + "use 'logcmdutil.logcmdutil'") + ui.deprecwarn(msg, "4.6") + super(changeset_printer, self).__init__(ui, *args, **kwargs) + +def displaygraph(ui, *args, **kwargs): + msg = ("'cmdutil.displaygraph' is deprecated, " + "use 'logcmdutil.displaygraph'") + ui.deprecwarn(msg, "4.6") + return logcmdutil.displaygraph(ui, *args, **kwargs) + +def show_changeset(ui, *args, **kwargs): + msg = ("'cmdutil.show_changeset' is deprecated, " + "use 'logcmdutil.changesetdisplayer'") + ui.deprecwarn(msg, "4.6") + return logcmdutil.changesetdisplayer(ui, *args, **kwargs)
--- a/mercurial/color.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/color.py Sun Mar 04 10:42:51 2018 -0500 @@ -371,7 +371,7 @@ """add color control code according to the mode""" if ui._colormode == 'debug': if label and msg: - if msg[-1] == '\n': + if msg.endswith('\n'): msg = "[%s|%s]\n" % (label, msg[:-1]) else: msg = "[%s|%s]" % (label, msg)
--- a/mercurial/commands.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/commands.py Sun Mar 04 10:42:51 2018 -0500 @@ -41,6 +41,7 @@ help, hg, lock as lockmod, + logcmdutil, merge as mergemod, obsolete, obsutil, @@ -53,13 +54,14 @@ rewriteutil, scmutil, server, - sshserver, streamclone, tags as tagsmod, templatekw, ui as uimod, util, + wireprotoserver, ) +from .utils import dateutil release = lockmod.release @@ -301,9 +303,9 @@ rootfm = ui.formatter('annotate', opts) if ui.quiet: - datefunc = util.shortdate + datefunc = dateutil.shortdate else: - datefunc = util.datestr + datefunc = dateutil.datestr if ctx.rev() is None: def hexfn(node): if node is None: @@ -336,8 +338,8 @@ ('number', ' ', lambda x: x.fctx.rev(), formatrev), ('changeset', ' ', lambda x: hexfn(x.fctx.node()), formathex), ('date', ' ', lambda x: x.fctx.date(), util.cachefunc(datefunc)), - ('file', ' ', lambda x: x.fctx.path(), str), - ('line_number', ':', lambda x: x.lineno, str), + ('file', ' ', lambda x: x.fctx.path(), pycompat.bytestr), + ('line_number', ':', lambda x: x.lineno, pycompat.bytestr), ] fieldnamemap = {'number': 'rev', 'changeset': 'node'} @@ -475,7 +477,7 @@ if not ctx: raise error.Abort(_('no working directory: please specify a revision')) node = ctx.node() - dest = cmdutil.makefilename(repo, dest, node) + dest = cmdutil.makefilename(ctx, dest) if os.path.realpath(dest) == repo.root: raise error.Abort(_('repository root cannot be destination')) @@ -485,11 +487,11 @@ if dest == '-': if kind == 'files': raise error.Abort(_('cannot archive plain files to stdout')) - dest = cmdutil.makefileobj(repo, dest) + dest = cmdutil.makefileobj(ctx, dest) if not prefix: prefix = os.path.basename(repo.root) + '-%h' - prefix = cmdutil.makefilename(repo, prefix, node) + prefix = cmdutil.makefilename(ctx, prefix) match = scmutil.match(ctx, [], opts) archival.archive(repo, dest, node, kind, not opts.get('no_decode'), match, prefix, subrepos=opts.get('subrepos')) @@ -583,7 +585,7 @@ date = opts.get('date') if date: - opts['date'] = util.parsedate(date) + opts['date'] = dateutil.parsedate(date) cmdutil.checkunfinished(repo) cmdutil.bailifchanged(repo) @@ -823,7 +825,7 @@ cmdutil.bailifchanged(repo) return hg.clean(repo, node, show_stats=show_stats) - displayer = cmdutil.show_changeset(ui, repo, {}) + displayer = logcmdutil.changesetdisplayer(ui, repo, {}) if command: changesets = 1 @@ -859,7 +861,8 @@ transition = "bad" state[transition].append(node) ctx = repo[node] - ui.status(_('changeset %d:%s: %s\n') % (ctx, ctx, transition)) + ui.status(_('changeset %d:%s: %s\n') % (ctx.rev(), ctx, + transition)) hbisect.checkstate(state) # bisect nodes, changesets, bgood = hbisect.bisect(repo, state) @@ -1156,13 +1159,15 @@ def bundle(ui, repo, fname, dest=None, **opts): """create a bundle file - Generate a bundle file containing data to be added to a repository. + Generate a bundle file containing data to be transferred to another + repository. To create a bundle containing all changesets, use -a/--all (or --base null). Otherwise, hg assumes the destination will have all the nodes you specify with --base parameters. Otherwise, hg will assume the repository has all the nodes in destination, or - default-push/default if no destination is specified. + default-push/default if no destination is specified, where destination + is the repository you provide through DEST option. You can change bundle format with the -t/--type option. See :hg:`help bundlespec` for documentation on this format. By default, @@ -1219,7 +1224,7 @@ raise error.Abort(_("--base is incompatible with specifying " "a destination")) common = [repo.lookup(rev) for rev in base] - heads = revs and map(repo.lookup, revs) or None + heads = [repo.lookup(r) for r in revs] if revs else None outgoing = discovery.outgoing(repo, common, heads) else: dest = ui.expandpath(dest or 'default-push', dest or 'default') @@ -1281,7 +1286,9 @@ no revision is given, the parent of the working directory is used. Output may be to a file, in which case the name of the file is - given using a format string. The formatting rules as follows: + given using a template string. See :hg:`help templates`. In addition + to the common template keywords, the following formatting rules are + supported: :``%%``: literal "%" character :``%s``: basename of file being printed @@ -1292,6 +1299,7 @@ :``%h``: short-form changeset hash (12 hexadecimal digits) :``%r``: zero-padded changeset revision number :``%b``: basename of the exporting repository + :``\\``: literal "\\" character Returns 0 on success. """ @@ -1319,8 +1327,10 @@ 'directory (only a repository)')), ('u', 'updaterev', '', _('revision, tag, or branch to check out'), _('REV')), - ('r', 'rev', [], _('include the specified changeset'), _('REV')), - ('b', 'branch', [], _('clone only the specified branch'), _('BRANCH')), + ('r', 'rev', [], _('do not clone everything, but include this changeset' + ' and its ancestors'), _('REV')), + ('b', 'branch', [], _('do not clone everything, but include this branch\'s' + ' changesets and their ancestors'), _('BRANCH')), ('', 'pull', None, _('use pull protocol to copy metadata')), ('', 'uncompressed', None, _('an alias to --stream (DEPRECATED)')), @@ -1550,7 +1560,7 @@ extra = {} if opts.get('close_branch'): - extra['close'] = 1 + extra['close'] = '1' if not bheads: raise error.Abort(_('can only close branch heads')) @@ -1628,7 +1638,7 @@ of that config item. With multiple arguments, print names and values of all config - items with matching section names. + items with matching section names or section.names. With --edit, start an editor on the user-level config file. With --global, edit the system-wide config file. With --local, edit the @@ -1689,11 +1699,15 @@ else: raise error.ProgrammingError('unknown rctype: %s' % t) untrusted = bool(opts.get('untrusted')) + + selsections = selentries = [] if values: - sections = [v for v in values if '.' not in v] - items = [v for v in values if '.' in v] - if len(items) > 1 or items and sections: - raise error.Abort(_('only one config item permitted')) + selsections = [v for v in values if '.' not in v] + selentries = [v for v in values if '.' in v] + uniquesel = (len(selentries) == 1 and not selsections) + selsections = set(selsections) + selentries = set(selentries) + matched = False for section, name, value in ui.walkconfig(untrusted=untrusted): source = ui.configsource(section, name, untrusted) @@ -1702,24 +1716,16 @@ source = source or 'none' value = value.replace('\n', '\\n') entryname = section + '.' + name - if values: - for v in values: - if v == section: - fm.startitem() - fm.condwrite(ui.debugflag, 'source', '%s: ', source) - fm.write('name value', '%s=%s\n', entryname, value) - matched = True - elif v == entryname: - fm.startitem() - fm.condwrite(ui.debugflag, 'source', '%s: ', source) - fm.write('value', '%s\n', value) - fm.data(name=entryname) - matched = True + if values and not (section in selsections or entryname in selentries): + continue + fm.startitem() + fm.condwrite(ui.debugflag, 'source', '%s: ', source) + if uniquesel: + fm.data(name=entryname) + fm.write('value', '%s\n', value) else: - fm.startitem() - fm.condwrite(ui.debugflag, 'source', '%s: ', source) fm.write('name value', '%s=%s\n', entryname, value) - matched = True + matched = True fm.end() if matched: return 0 @@ -1873,9 +1879,9 @@ diffopts = patch.diffallopts(ui, opts) m = scmutil.match(repo[node2], pats, opts) ui.pager('diff') - cmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat, - listsubrepos=opts.get('subrepos'), - root=opts.get('root')) + logcmdutil.diffordiffstat(ui, repo, diffopts, node1, node2, m, stat=stat, + listsubrepos=opts.get('subrepos'), + root=opts.get('root')) @command('^export', [('o', 'output', '', @@ -1901,7 +1907,9 @@ first parent only. Output may be to a file, in which case the name of the file is - given using a format string. The formatting rules are as follows: + given using a template string. See :hg:`help templates`. In addition + to the common template keywords, the following formatting rules are + supported: :``%%``: literal "%" character :``%H``: changeset hash (40 hexadecimal digits) @@ -1912,6 +1920,7 @@ :``%m``: first line of the commit message (only alphanumeric characters) :``%n``: zero-padded sequence number, starting at 1 :``%r``: zero-padded changeset revision number + :``\\``: literal "\\" character Without the -a/--text option, export will avoid generating diffs of files it detects as binary. With -a, export will generate a @@ -2153,7 +2162,7 @@ if not opts.get('user') and opts.get('currentuser'): opts['user'] = ui.username() if not opts.get('date') and opts.get('currentdate'): - opts['date'] = "%d %d" % util.makedate() + opts['date'] = "%d %d" % dateutil.makedate() editor = cmdutil.getcommiteditor(editform='graft', **pycompat.strkwargs(opts)) @@ -2370,7 +2379,7 @@ try: regexp = util.re.compile(pattern, reflags) except re.error as inst: - ui.warn(_("grep: invalid match pattern: %s\n") % inst) + ui.warn(_("grep: invalid match pattern: %s\n") % pycompat.bytestr(inst)) return 1 sep, eol = ':', '\n' if opts.get('print0'): @@ -2647,7 +2656,7 @@ ui.pager('heads') heads = sorted(heads, key=lambda x: -x.rev()) - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) for ctx in heads: displayer.show(ctx) displayer.close() @@ -3003,7 +3012,7 @@ date = opts.get('date') if date: - opts['date'] = util.parsedate(date) + opts['date'] = dateutil.parsedate(date) exact = opts.get('exact') update = not opts.get('bypass') @@ -3155,11 +3164,11 @@ """ opts = pycompat.byteskwargs(opts) if opts.get('graph'): - cmdutil.checkunsupportedgraphflags([], opts) + logcmdutil.checkunsupportedgraphflags([], opts) def display(other, chlist, displayer): - revdag = cmdutil.graphrevs(other, chlist, opts) - cmdutil.displaygraph(ui, repo, revdag, displayer, - graphmod.asciiedges) + revdag = logcmdutil.graphrevs(other, chlist, opts) + logcmdutil.displaygraph(ui, repo, revdag, displayer, + graphmod.asciiedges) hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True) return 0 @@ -3414,33 +3423,17 @@ raise error.Abort(_('--line-range requires --follow')) if linerange and pats: + # TODO: take pats as patterns with no line-range filter raise error.Abort( _('FILE arguments are not compatible with --line-range option') ) repo = scmutil.unhidehashlikerevs(repo, opts.get('rev'), 'nowarn') - revs, filematcher = cmdutil.getlogrevs(repo, pats, opts) - hunksfilter = None - - if opts.get('graph'): - if linerange: - raise error.Abort(_('graph not supported with line range patterns')) - return cmdutil.graphlog(ui, repo, revs, filematcher, opts) - + revs, differ = logcmdutil.getrevs(repo, pats, opts) if linerange: - revs, lrfilematcher, hunksfilter = cmdutil.getloglinerangerevs( - repo, revs, opts) - - if filematcher is not None and lrfilematcher is not None: - basefilematcher = filematcher - - def filematcher(rev): - files = (basefilematcher(rev).files() - + lrfilematcher(rev).files()) - return scmutil.matchfiles(repo, files) - - elif filematcher is None: - filematcher = lrfilematcher + # TODO: should follow file history from logcmdutil._initialrevs(), + # then filter the result by logcmdutil._makerevset() and --limit + revs, differ = logcmdutil.getlinerangerevs(repo, revs, opts) getrenamed = None if opts.get('copies'): @@ -3450,29 +3443,13 @@ getrenamed = templatekw.getrenamedfn(repo, endrev=endrev) ui.pager('log') - displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True) - for rev in revs: - ctx = repo[rev] - copies = None - if getrenamed is not None and rev: - copies = [] - for fn in ctx.files(): - rename = getrenamed(fn, rev) - if rename: - copies.append((fn, rename[0])) - if filematcher: - revmatchfn = filematcher(ctx.rev()) - else: - revmatchfn = None - if hunksfilter: - revhunksfilter = hunksfilter(rev) - else: - revhunksfilter = None - displayer.show(ctx, copies=copies, matchfn=revmatchfn, - hunksfilterfn=revhunksfilter) - displayer.flush(ctx) - - displayer.close() + displayer = logcmdutil.changesetdisplayer(ui, repo, opts, differ, + buffered=True) + if opts.get('graph'): + displayfn = logcmdutil.displaygraphrevs + else: + displayfn = logcmdutil.displayrevs + displayfn(ui, repo, revs, displayer, getrenamed) @command('manifest', [('r', 'rev', '', _('revision to display'), _('REV')), @@ -3523,8 +3500,8 @@ if not node: node = rev - char = {'l': '@', 'x': '*', '': ''} - mode = {'l': '644', 'x': '755', '': '644'} + char = {'l': '@', 'x': '*', '': '', 't': 'd'} + mode = {'l': '644', 'x': '755', '': '644', 't': '755'} if node: repo = scmutil.unhidehashlikerevs(repo, [node], 'nowarn') ctx = scmutil.revsingle(repo, node) @@ -3604,7 +3581,7 @@ p2 = repo.lookup(node) nodes = repo.changelog.findmissing(common=[p1], heads=[p2]) - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) for node in nodes: displayer.show(repo[node]) displayer.close() @@ -3668,16 +3645,17 @@ """ opts = pycompat.byteskwargs(opts) if opts.get('graph'): - cmdutil.checkunsupportedgraphflags([], opts) + logcmdutil.checkunsupportedgraphflags([], opts) o, other = hg._outgoing(ui, repo, dest, opts) if not o: cmdutil.outgoinghooks(ui, repo, other, opts, o) return - revdag = cmdutil.graphrevs(repo, o, opts) + revdag = logcmdutil.graphrevs(repo, o, opts) ui.pager('outgoing') - displayer = cmdutil.show_changeset(ui, repo, opts, buffered=True) - cmdutil.displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts, buffered=True) + logcmdutil.displaygraph(ui, repo, revdag, displayer, + graphmod.asciiedges) cmdutil.outgoinghooks(ui, repo, other, opts, o) return 0 @@ -3752,7 +3730,7 @@ else: p = [cp.node() for cp in ctx.parents()] - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) for n in p: if n != nullid: displayer.show(repo[n]) @@ -3930,7 +3908,7 @@ try: return hg.updatetotally(ui, repo, checkout, brev) except error.UpdateAbort as inst: - msg = _("not updating: %s") % str(inst) + msg = _("not updating: %s") % util.forcebytestr(inst) hint = inst.hint raise error.UpdateAbort(msg, hint=hint) if modheads > 1: @@ -4043,7 +4021,7 @@ brev = None if checkout: - checkout = str(repo.changelog.rev(checkout)) + checkout = "%d" % repo.changelog.rev(checkout) # order below depends on implementation of # hg.addbranchrevs(). opts['bookmark'] is ignored, @@ -4513,7 +4491,7 @@ for f in ms: if not m(f): continue - flags = ''.join(['-%s ' % o[0] for o in flaglist + flags = ''.join(['-%s ' % o[0:1] for o in flaglist if opts.get(o)]) hint = _("(try: hg resolve %s%s)\n") % ( flags, @@ -4757,7 +4735,7 @@ if repo is None: raise error.RepoError(_("there is no Mercurial repository here" " (.hg not found)")) - s = sshserver.sshserver(ui, repo) + s = wireprotoserver.sshserver(ui, repo) s.serve_forever() service = server.createservice(ui, repo, opts) @@ -4984,7 +4962,7 @@ # shows a working directory parent *changeset*: # i18n: column positioning for "hg summary" ui.write(_('parent: %d:%s ') % (p.rev(), p), - label=cmdutil._changesetlabels(p)) + label=logcmdutil.changesetlabels(p)) ui.write(' '.join(p.tags()), label='log.tag') if p.bookmarks(): marks.extend(p.bookmarks()) @@ -5330,7 +5308,7 @@ date = opts.get('date') if date: - date = util.parsedate(date) + date = dateutil.parsedate(date) if opts.get('remove'): editform = 'tag.remove' @@ -5406,7 +5384,7 @@ Returns 0 on success. """ opts = pycompat.byteskwargs(opts) - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) displayer.show(repo['tip']) displayer.close()
--- a/mercurial/config.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/config.py Sun Mar 04 10:42:51 2018 -0500 @@ -154,7 +154,7 @@ if inst.errno != errno.ENOENT: raise error.ParseError(_("cannot include %s (%s)") % (inc, inst.strerror), - "%s:%s" % (src, line)) + "%s:%d" % (src, line)) continue if emptyre.match(l): continue @@ -185,7 +185,7 @@ self._unset.append((section, name)) continue - raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line))) + raise error.ParseError(l.rstrip(), ("%s:%d" % (src, line))) def read(self, path, fp=None, sections=None, remap=None): if not fp:
--- a/mercurial/configitems.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/configitems.py Sun Mar 04 10:42:51 2018 -0500 @@ -538,9 +538,6 @@ coreconfigitem('experimental', 'httppostargs', default=False, ) -coreconfigitem('experimental', 'manifestv2', - default=False, -) coreconfigitem('experimental', 'mergedriver', default=None, ) @@ -556,6 +553,9 @@ coreconfigitem('experimental', 'single-head-per-branch', default=False, ) +coreconfigitem('experimental', 'sshserver.support-v2', + default=False, +) coreconfigitem('experimental', 'spacemovesdown', default=False, ) @@ -574,6 +574,12 @@ coreconfigitem('experimental', 'update.atomic-file', default=False, ) +coreconfigitem('experimental', 'sshpeer.advertise-v2', + default=False, +) +coreconfigitem('experimental', 'xdiff', + default=False, +) coreconfigitem('extensions', '.*', default=None, generic=True, @@ -743,6 +749,16 @@ generic=True, priority=-1, ) +coreconfigitem('merge-tools', br'.*\.mergemarkers$', + default='basic', + generic=True, + priority=-1, +) +coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$', + default=dynamicdefault, # take from ui.mergemarkertemplate + generic=True, + priority=-1, +) coreconfigitem('merge-tools', br'.*\.priority$', default=0, generic=True, @@ -1013,9 +1029,6 @@ coreconfigitem('ui', 'graphnodetemplate', default=None, ) -coreconfigitem('ui', 'http2debuglevel', - default=None, -) coreconfigitem('ui', 'interactive', default=None, ) @@ -1114,9 +1127,6 @@ coreconfigitem('ui', 'tweakdefaults', default=False, ) -coreconfigitem('ui', 'usehttp2', - default=False, -) coreconfigitem('ui', 'username', alias=[('ui', 'user')] )
--- a/mercurial/context.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/context.py Sun Mar 04 10:42:51 2018 -0500 @@ -46,8 +46,10 @@ scmutil, sparse, subrepo, + subrepoutil, util, ) +from .utils import dateutil propertycache = util.propertycache @@ -77,9 +79,6 @@ __str__ = encoding.strmethod(__bytes__) - def __int__(self): - return self.rev() - def __repr__(self): return r"<%s %s>" % (type(self).__name__, str(self)) @@ -173,7 +172,7 @@ @propertycache def substate(self): - return subrepo.state(self, self._repo.ui) + return subrepoutil.state(self, self._repo.ui) def subrev(self, subpath): return self.substate[subpath][1] @@ -206,22 +205,10 @@ """True if the changeset is extinct""" return self.rev() in obsmod.getrevs(self._repo, 'extinct') - def unstable(self): - msg = ("'context.unstable' is deprecated, " - "use 'context.orphan'") - self._repo.ui.deprecwarn(msg, '4.4') - return self.orphan() - def orphan(self): """True if the changeset is not obsolete but it's ancestor are""" return self.rev() in obsmod.getrevs(self._repo, 'orphan') - def bumped(self): - msg = ("'context.bumped' is deprecated, " - "use 'context.phasedivergent'") - self._repo.ui.deprecwarn(msg, '4.4') - return self.phasedivergent() - def phasedivergent(self): """True if the changeset try to be a successor of a public changeset @@ -229,12 +216,6 @@ """ return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent') - def divergent(self): - msg = ("'context.divergent' is deprecated, " - "use 'context.contentdivergent'") - self._repo.ui.deprecwarn(msg, '4.4') - return self.contentdivergent() - def contentdivergent(self): """Is a successors of a changeset with multiple possible successors set @@ -242,33 +223,10 @@ """ return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent') - def troubled(self): - msg = ("'context.troubled' is deprecated, " - "use 'context.isunstable'") - self._repo.ui.deprecwarn(msg, '4.4') - return self.isunstable() - def isunstable(self): """True if the changeset is either unstable, bumped or divergent""" return self.orphan() or self.phasedivergent() or self.contentdivergent() - def troubles(self): - """Keep the old version around in order to avoid breaking extensions - about different return values. - """ - msg = ("'context.troubles' is deprecated, " - "use 'context.instabilities'") - self._repo.ui.deprecwarn(msg, '4.4') - - troubles = [] - if self.orphan(): - troubles.append('orphan') - if self.phasedivergent(): - troubles.append('bumped') - if self.contentdivergent(): - troubles.append('divergent') - return troubles - def instabilities(self): """return the list of instabilities affecting this changeset. @@ -954,7 +912,7 @@ """ lkr = self.linkrev() attrs = vars(self) - noctx = not ('_changeid' in attrs or '_changectx' in attrs) + noctx = not (r'_changeid' in attrs or r'_changectx' in attrs) if noctx or self.rev() == lkr: return self.linkrev() return self._adjustlinkrev(self.rev(), inclusive=True) @@ -970,14 +928,14 @@ def _parentfilectx(self, path, fileid, filelog): """create parent filectx keeping ancestry info for _adjustlinkrev()""" fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) - if '_changeid' in vars(self) or '_changectx' in vars(self): + if r'_changeid' in vars(self) or r'_changectx' in vars(self): # If self is associated with a changeset (probably explicitly # fed), ensure the created filectx is associated with a # changeset that is an ancestor of self.changectx. # This lets us later use _adjustlinkrev to get a correct link. fctx._descendantrev = self.rev() fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) - elif '_descendantrev' in vars(self): + elif r'_descendantrev' in vars(self): # Otherwise propagate _descendantrev if we have one associated. fctx._descendantrev = self._descendantrev fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) @@ -1051,7 +1009,7 @@ # renamed filectx won't have a filelog yet, so set it # from the cache to save time for p in pl: - if not '_filelog' in p.__dict__: + if not r'_filelog' in p.__dict__: p._filelog = getlog(p.path()) return pl @@ -1331,7 +1289,7 @@ self._node = None self._text = text if date: - self._date = util.parsedate(date) + self._date = dateutil.parsedate(date) if user: self._user = user if changes: @@ -1408,7 +1366,7 @@ ui = self._repo.ui date = ui.configdate('devel', 'default-date') if date is None: - date = util.makedate() + date = dateutil.makedate() return date def subrev(self, subpath): @@ -2155,11 +2113,11 @@ if data is None: raise error.ProgrammingError("data must be non-None") self._auditconflicts(path) - self._markdirty(path, exists=True, data=data, date=util.makedate(), + self._markdirty(path, exists=True, data=data, date=dateutil.makedate(), flags=flags) def setflags(self, path, l, x): - self._markdirty(path, exists=True, date=util.makedate(), + self._markdirty(path, exists=True, date=dateutil.makedate(), flags=(l and 'l' or '') + (x and 'x' or '')) def remove(self, path): @@ -2448,7 +2406,7 @@ user receives the committer name and defaults to current repository username, date is the commit date in any format - supported by util.parsedate() and defaults to current date, extra + supported by dateutil.parsedate() and defaults to current date, extra is a dictionary of metadata or is left empty. """ @@ -2663,7 +2621,7 @@ user receives the committer name and defaults to current repository username, date is the commit date in any format supported by - util.parsedate() and defaults to current date, extra is a dictionary of + dateutil.parsedate() and defaults to current date, extra is a dictionary of metadata or is left empty. """ def __new__(cls, repo, originalctx, *args, **kwargs):
--- a/mercurial/copies.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/copies.py Sun Mar 04 10:42:51 2018 -0500 @@ -123,7 +123,7 @@ t[k] = v # remove criss-crossed copies - for k, v in t.items(): + for k, v in list(t.items()): if k in src and v in dst: del t[k] @@ -685,8 +685,8 @@ # the base and present in the source. # Presence in the base is important to exclude added files, presence in the # source is important to exclude removed files. - missingfiles = filter(lambda f: f not in m1 and f in base and f in c2, - changedfiles) + filt = lambda f: f not in m1 and f in base and f in c2 + missingfiles = [f for f in changedfiles if filt(f)] if missingfiles: basenametofilename = collections.defaultdict(list)
--- a/mercurial/crecord.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/crecord.py Sun Mar 04 10:42:51 2018 -0500 @@ -547,7 +547,7 @@ chunkselector = curseschunkselector(headerlist, ui, operation) if testfn and os.path.exists(testfn): testf = open(testfn) - testcommands = map(lambda x: x.rstrip('\n'), testf.readlines()) + testcommands = [x.rstrip('\n') for x in testf.readlines()] testf.close() while True: if chunkselector.handlekeypressed(testcommands.pop(0), test=True): @@ -950,7 +950,7 @@ # preprocess the text, converting tabs to spaces text = text.expandtabs(4) # strip \n, and convert control characters to ^[char] representation - text = re.sub(r'[\x00-\x08\x0a-\x1f]', + text = re.sub(br'[\x00-\x08\x0a-\x1f]', lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n')) if pair is not None: @@ -1335,7 +1335,7 @@ # temporarily disable printing to windows by printstring patchdisplaystring = self.printitem(item, ignorefolding, recursechildren, towin=False) - numlines = len(patchdisplaystring) / self.xscreensize + numlines = len(patchdisplaystring) // self.xscreensize return numlines def sigwinchhandler(self, n, frame):
--- a/mercurial/debugcommands.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/debugcommands.py Sun Mar 04 10:42:51 2018 -0500 @@ -17,6 +17,7 @@ import socket import ssl import string +import subprocess import sys import tempfile import time @@ -48,6 +49,7 @@ hg, localrepo, lock as lockmod, + logcmdutil, merge as mergemod, obsolete, obsutil, @@ -64,6 +66,7 @@ setdiscovery, simplemerge, smartset, + sshpeer, sslutil, streamclone, templater, @@ -72,7 +75,9 @@ url as urlmod, util, vfs as vfsmod, + wireprotoserver, ) +from .utils import dateutil release = lockmod.release @@ -162,7 +167,7 @@ if mergeable_file: linesperrev = 2 # make a file with k lines per rev - initialmergedlines = [str(i) for i in xrange(0, total * linesperrev)] + initialmergedlines = ['%d' % i for i in xrange(0, total * linesperrev)] initialmergedlines.append("") tags = [] @@ -269,7 +274,7 @@ ui.write("\n%s%s\n" % (indent_string, named)) for deltadata in gen.deltaiter(): node, p1, p2, cs, deltabase, delta, flags = deltadata - ui.write("%s%s %s %s %s %s %s\n" % + ui.write("%s%s %s %s %s %s %d\n" % (indent_string, hex(node), hex(p1), hex(p2), hex(cs), hex(deltabase), len(delta))) @@ -556,13 +561,13 @@ def debugdate(ui, date, range=None, **opts): """parse and display a date""" if opts[r"extended"]: - d = util.parsedate(date, util.extendeddateformats) + d = dateutil.parsedate(date, util.extendeddateformats) else: - d = util.parsedate(date) - ui.write(("internal: %s %s\n") % d) - ui.write(("standard: %s\n") % util.datestr(d)) + d = dateutil.parsedate(date) + ui.write(("internal: %d %d\n") % d) + ui.write(("standard: %s\n") % dateutil.datestr(d)) if range: - m = util.matchdate(range) + m = dateutil.matchdate(range) ui.write(("match: %s\n") % m(d[0])) @command('debugdeltachain', @@ -1001,7 +1006,7 @@ ignore = repo.dirstate._ignore if not files: # Show all the patterns - ui.write("%s\n" % repr(ignore)) + ui.write("%s\n" % pycompat.byterepr(ignore)) else: m = scmutil.match(repo[None], pats=files) for f in m.files(): @@ -1239,16 +1244,17 @@ # editor editor = ui.geteditor() editor = util.expandpath(editor) - fm.write('editor', _("checking commit editor... (%s)\n"), editor) - cmdpath = util.findexe(pycompat.shlexsplit(editor)[0]) + editorbin = util.shellsplit(editor)[0] + fm.write('editor', _("checking commit editor... (%s)\n"), editorbin) + cmdpath = util.findexe(editorbin) fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', _(" No commit editor set and can't find %s in PATH\n" " (specify a commit editor in your configuration" - " file)\n"), not cmdpath and editor == 'vi' and editor) + " file)\n"), not cmdpath and editor == 'vi' and editorbin) fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', _(" Can't find editor '%s' in PATH\n" " (specify a commit editor in your configuration" - " file)\n"), not cmdpath and editor) + " file)\n"), not cmdpath and editorbin) if not cmdpath and editor != 'vi': problems += 1 @@ -1405,7 +1411,7 @@ return h def printrecords(version): - ui.write(('* version %s records\n') % version) + ui.write(('* version %d records\n') % version) if version == 1: records = v1records else: @@ -1573,7 +1579,7 @@ try: date = opts.get('date') if date: - date = util.parsedate(date) + date = dateutil.parsedate(date) else: date = None prec = parsenodeid(precursor) @@ -1589,7 +1595,8 @@ metadata=metadata, ui=ui) tr.close() except ValueError as exc: - raise error.Abort(_('bad obsmarker input: %s') % exc) + raise error.Abort(_('bad obsmarker input: %s') % + pycompat.bytestr(exc)) finally: tr.release() finally: @@ -1692,6 +1699,25 @@ ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) ui.write('\n') +@command('debugpeer', [], _('PATH'), norepo=True) +def debugpeer(ui, path): + """establish a connection to a peer repository""" + # Always enable peer request logging. Requires --debug to display + # though. + overrides = { + ('devel', 'debug.peer-request'): True, + } + + with ui.configoverride(overrides): + peer = hg.peer(ui, {}, path) + + local = peer.local() is not None + canpush = peer.canpush() + + ui.write(_('url: %s\n') % peer.url()) + ui.write(_('local: %s\n') % (_('yes') if local else _('no'))) + ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no'))) + @command('debugpickmergetool', [('r', 'rev', '', _('check for files in this revision'), _('REV')), ('', 'changedelete', None, _('emulate merging change and delete')), @@ -2206,7 +2232,38 @@ if not opts['show_revs']: return for c in revs: - ui.write("%s\n" % c) + ui.write("%d\n" % c) + +@command('debugserve', [ + ('', 'sshstdio', False, _('run an SSH server bound to process handles')), + ('', 'logiofd', '', _('file descriptor to log server I/O to')), + ('', 'logiofile', '', _('file to log server I/O to')), +], '') +def debugserve(ui, repo, **opts): + """run a server with advanced settings + + This command is similar to :hg:`serve`. It exists partially as a + workaround to the fact that ``hg serve --stdio`` must have specific + arguments for security reasons. + """ + opts = pycompat.byteskwargs(opts) + + if not opts['sshstdio']: + raise error.Abort(_('only --sshstdio is currently supported')) + + logfh = None + + if opts['logiofd'] and opts['logiofile']: + raise error.Abort(_('cannot use both --logiofd and --logiofile')) + + if opts['logiofd']: + # Line buffered because output is line based. + logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) + elif opts['logiofile']: + logfh = open(opts['logiofile'], 'ab', 1) + + s = wireprotoserver.sshserver(ui, repo, logfh=logfh) + s.serve_forever() @command('debugsetparents', [], _('REV1 [REV2]')) def debugsetparents(ui, repo, rev1, rev2=None): @@ -2336,7 +2393,7 @@ """ # passed to successorssets caching computation from one call to another cache = {} - ctx2str = str + ctx2str = bytes node2str = short for rev in scmutil.revrange(repo, revs): ctx = repo[rev] @@ -2396,7 +2453,7 @@ t = formatter.maketemplater(ui, tmpl, resources=tres) ui.write(t.render(props)) else: - displayer = cmdutil.makelogtemplater(ui, repo, tmpl) + displayer = logcmdutil.maketemplater(ui, repo, tmpl) for r in revs: displayer.show(repo[r], **pycompat.strkwargs(props)) displayer.close() @@ -2475,3 +2532,308 @@ ui.write("%s\n" % res1) if res1 != res2: ui.warn("%s\n" % res2) + +def _parsewirelangblocks(fh): + activeaction = None + blocklines = [] + + for line in fh: + line = line.rstrip() + if not line: + continue + + if line.startswith(b'#'): + continue + + if not line.startswith(' '): + # New block. Flush previous one. + if activeaction: + yield activeaction, blocklines + + activeaction = line + blocklines = [] + continue + + # Else we start with an indent. + + if not activeaction: + raise error.Abort(_('indented line outside of block')) + + blocklines.append(line) + + # Flush last block. + if activeaction: + yield activeaction, blocklines + +@command('debugwireproto', + [ + ('', 'localssh', False, _('start an SSH server for this repo')), + ('', 'peer', '', _('construct a specific version of the peer')), + ('', 'noreadstderr', False, _('do not read from stderr of the remote')), + ] + cmdutil.remoteopts, + _('[REPO]'), + optionalrepo=True) +def debugwireproto(ui, repo, **opts): + """send wire protocol commands to a server + + This command can be used to issue wire protocol commands to remote + peers and to debug the raw data being exchanged. + + ``--localssh`` will start an SSH server against the current repository + and connect to that. By default, the connection will perform a handshake + and establish an appropriate peer instance. + + ``--peer`` can be used to bypass the handshake protocol and construct a + peer instance using the specified class type. Valid values are ``raw``, + ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending raw data + payloads and don't support higher-level command actions. + + ``--noreadstderr`` can be used to disable automatic reading from stderr + of the peer (for SSH connections only). Disabling automatic reading of + stderr is useful for making output more deterministic. + + Commands are issued via a mini language which is specified via stdin. + The language consists of individual actions to perform. An action is + defined by a block. A block is defined as a line with no leading + space followed by 0 or more lines with leading space. Blocks are + effectively a high-level command with additional metadata. + + Lines beginning with ``#`` are ignored. + + The following sections denote available actions. + + raw + --- + + Send raw data to the server. + + The block payload contains the raw data to send as one atomic send + operation. The data may not actually be delivered in a single system + call: it depends on the abilities of the transport being used. + + Each line in the block is de-indented and concatenated. Then, that + value is evaluated as a Python b'' literal. This allows the use of + backslash escaping, etc. + + raw+ + ---- + + Behaves like ``raw`` except flushes output afterwards. + + command <X> + ----------- + + Send a request to run a named command, whose name follows the ``command`` + string. + + Arguments to the command are defined as lines in this block. The format of + each line is ``<key> <value>``. e.g.:: + + command listkeys + namespace bookmarks + + Values are interpreted as Python b'' literals. This allows encoding + special byte sequences via backslash escaping. + + The following arguments have special meaning: + + ``PUSHFILE`` + When defined, the *push* mechanism of the peer will be used instead + of the static request-response mechanism and the content of the + file specified in the value of this argument will be sent as the + command payload. + + This can be used to submit a local bundle file to the remote. + + batchbegin + ---------- + + Instruct the peer to begin a batched send. + + All ``command`` blocks are queued for execution until the next + ``batchsubmit`` block. + + batchsubmit + ----------- + + Submit previously queued ``command`` blocks as a batch request. + + This action MUST be paired with a ``batchbegin`` action. + + close + ----- + + Close the connection to the server. + + flush + ----- + + Flush data written to the server. + + readavailable + ------------- + + Read all available data from the server. + + If the connection to the server encompasses multiple pipes, we poll both + pipes and read available data. + + readline + -------- + + Read a line of output from the server. If there are multiple output + pipes, reads only the main pipe. + """ + opts = pycompat.byteskwargs(opts) + + if opts['localssh'] and not repo: + raise error.Abort(_('--localssh requires a repository')) + + if opts['peer'] and opts['peer'] not in ('raw', 'ssh1', 'ssh2'): + raise error.Abort(_('invalid value for --peer'), + hint=_('valid values are "raw", "ssh1", and "ssh2"')) + + if ui.interactive(): + ui.write(_('(waiting for commands on stdin)\n')) + + blocks = list(_parsewirelangblocks(ui.fin)) + + proc = None + + if opts['localssh']: + # We start the SSH server in its own process so there is process + # separation. This prevents a whole class of potential bugs around + # shared state from interfering with server operation. + args = util.hgcmd() + [ + '-R', repo.root, + 'debugserve', '--sshstdio', + ] + proc = subprocess.Popen(args, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + bufsize=0) + + stdin = proc.stdin + stdout = proc.stdout + stderr = proc.stderr + + # We turn the pipes into observers so we can log I/O. + if ui.verbose or opts['peer'] == 'raw': + stdin = util.makeloggingfileobject(ui, proc.stdin, b'i', + logdata=True) + stdout = util.makeloggingfileobject(ui, proc.stdout, b'o', + logdata=True) + stderr = util.makeloggingfileobject(ui, proc.stderr, b'e', + logdata=True) + + # --localssh also implies the peer connection settings. + + url = 'ssh://localserver' + autoreadstderr = not opts['noreadstderr'] + + if opts['peer'] == 'ssh1': + ui.write(_('creating ssh peer for wire protocol version 1\n')) + peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr, + None, autoreadstderr=autoreadstderr) + elif opts['peer'] == 'ssh2': + ui.write(_('creating ssh peer for wire protocol version 2\n')) + peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr, + None, autoreadstderr=autoreadstderr) + elif opts['peer'] == 'raw': + ui.write(_('using raw connection to peer\n')) + peer = None + else: + ui.write(_('creating ssh peer from handshake results\n')) + peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr, + autoreadstderr=autoreadstderr) + + else: + raise error.Abort(_('only --localssh is currently supported')) + + batchedcommands = None + + # Now perform actions based on the parsed wire language instructions. + for action, lines in blocks: + if action in ('raw', 'raw+'): + # Concatenate the data together. + data = ''.join(l.lstrip() for l in lines) + data = util.unescapestr(data) + stdin.write(data) + + if action == 'raw+': + stdin.flush() + elif action == 'flush': + stdin.flush() + elif action.startswith('command'): + if not peer: + raise error.Abort(_('cannot send commands unless peer instance ' + 'is available')) + + command = action.split(' ', 1)[1] + + args = {} + for line in lines: + # We need to allow empty values. + fields = line.lstrip().split(' ', 1) + if len(fields) == 1: + key = fields[0] + value = '' + else: + key, value = fields + + args[key] = util.unescapestr(value) + + if batchedcommands is not None: + batchedcommands.append((command, args)) + continue + + ui.status(_('sending %s command\n') % command) + + if 'PUSHFILE' in args: + with open(args['PUSHFILE'], r'rb') as fh: + del args['PUSHFILE'] + res, output = peer._callpush(command, fh, + **pycompat.strkwargs(args)) + ui.status(_('result: %s\n') % util.escapedata(res)) + ui.status(_('remote output: %s\n') % + util.escapedata(output)) + else: + res = peer._call(command, **pycompat.strkwargs(args)) + ui.status(_('response: %s\n') % util.escapedata(res)) + + elif action == 'batchbegin': + if batchedcommands is not None: + raise error.Abort(_('nested batchbegin not allowed')) + + batchedcommands = [] + elif action == 'batchsubmit': + # There is a batching API we could go through. But it would be + # difficult to normalize requests into function calls. It is easier + # to bypass this layer and normalize to commands + args. + ui.status(_('sending batch with %d sub-commands\n') % + len(batchedcommands)) + for i, chunk in enumerate(peer._submitbatch(batchedcommands)): + ui.status(_('response #%d: %s\n') % (i, util.escapedata(chunk))) + + batchedcommands = None + elif action == 'close': + peer.close() + elif action == 'readavailable': + fds = util.poll([stdout.fileno(), stderr.fileno()]) + + if stdout.fileno() in fds: + util.readpipe(stdout) + if stderr.fileno() in fds: + util.readpipe(stderr) + elif action == 'readline': + stdout.readline() + else: + raise error.Abort(_('unknown action: %s') % action) + + if batchedcommands is not None: + raise error.Abort(_('unclosed "batchbegin" request')) + + if peer: + peer.close() + + if proc: + proc.kill()
--- a/mercurial/default.d/mergetools.rc Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/default.d/mergetools.rc Sun Mar 04 10:42:51 2018 -0500 @@ -1,7 +1,7 @@ # Some default global settings for common merge tools [merge-tools] -kdiff3.args=--auto --L1 base --L2 local --L3 other $base $local $other -o $output +kdiff3.args=--auto --L1 $labelbase --L2 $labellocal --L3 $labelother $base $local $other -o $output kdiff3.regkey=Software\KDiff3 kdiff3.regkeyalt=Software\Wow6432Node\KDiff3 kdiff3.regappend=\kdiff3.exe @@ -26,7 +26,7 @@ gpyfm.gui=True meld.gui=True -meld.args=--label='local' $local --label='merged' $base --label='other' $other -o $output +meld.args=--label=$labellocal $local --label='merged' $base --label=$labelother $other -o $output meld.check=changed meld.diffargs=-a --label=$plabel1 $parent --label=$clabel $child @@ -35,7 +35,7 @@ tkdiff.priority=-8 tkdiff.diffargs=-L $plabel1 $parent -L $clabel $child -xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 local --title2 base --title3 other --merged-filename $output --merge $local $base $other +xxdiff.args=--show-merged-pane --exit-with-merge-status --title1 $labellocal --title2 $labelbase --title3 $labelother --merged-filename $output --merge $local $base $other xxdiff.gui=True xxdiff.priority=-8 xxdiff.diffargs=--title1 $plabel1 $parent --title2 $clabel $child @@ -44,7 +44,7 @@ diffmerge.regkeyalt=Software\Wow6432Node\SourceGear\SourceGear DiffMerge\ diffmerge.regname=Location diffmerge.priority=-7 -diffmerge.args=-nosplash -merge -title1=local -title2=merged -title3=other $local $base $other -result=$output +diffmerge.args=-nosplash -merge -title1=$labellocal -title2=merged -title3=$labelother $local $base $other -result=$output diffmerge.check=changed diffmerge.gui=True diffmerge.diffargs=--nosplash --title1=$plabel1 --title2=$clabel $parent $child @@ -72,7 +72,7 @@ tortoisemerge.priority=-8 tortoisemerge.diffargs=/base:$parent /mine:$child /basename:$plabel1 /minename:$clabel -ecmerge.args=$base $local $other --mode=merge3 --title0=base --title1=local --title2=other --to=$output +ecmerge.args=$base $local $other --mode=merge3 --title0=$labelbase --title1=$labellocal --title2=$labelother --to=$output ecmerge.regkey=Software\Elli\xc3\xa9 Computing\Merge ecmerge.regkeyalt=Software\Wow6432Node\Elli\xc3\xa9 Computing\Merge ecmerge.gui=True @@ -93,7 +93,7 @@ filemergexcode.gui=True ; Windows version of Beyond Compare -beyondcompare3.args=$local $other $base $output /ro /lefttitle=local /centertitle=base /righttitle=other /automerge /reviewconflicts /solo +beyondcompare3.args=$local $other $base $output /ro /lefttitle=$labellocal /centertitle=$labelbase /righttitle=$labelother /automerge /reviewconflicts /solo beyondcompare3.regkey=Software\Scooter Software\Beyond Compare 3 beyondcompare3.regname=ExePath beyondcompare3.gui=True @@ -113,7 +113,7 @@ bcomposx.priority=-1 bcomposx.diffargs=-lro -lefttitle=$plabel1 -righttitle=$clabel -solo -expandall $parent $child -winmerge.args=/e /x /wl /ub /dl other /dr local $other $local $output +winmerge.args=/e /x /wl /ub /dl $labelother /dr $labellocal $other $local $output winmerge.regkey=Software\Thingamahoochie\WinMerge winmerge.regkeyalt=Software\Wow6432Node\Thingamahoochie\WinMerge\ winmerge.regname=Executable
--- a/mercurial/dirstate.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/dirstate.py Sun Mar 04 10:42:51 2018 -0500 @@ -99,27 +99,6 @@ # normally, so we don't have a try/finally here on purpose. self._parentwriters -= 1 - def beginparentchange(self): - '''Marks the beginning of a set of changes that involve changing - the dirstate parents. If there is an exception during this time, - the dirstate will not be written when the wlock is released. This - prevents writing an incoherent dirstate where the parent doesn't - match the contents. - ''' - self._ui.deprecwarn('beginparentchange is obsoleted by the ' - 'parentchange context manager.', '4.3') - self._parentwriters += 1 - - def endparentchange(self): - '''Marks the end of a set of changes that involve changing the - dirstate parents. Once all parent changes have been marked done, - the wlock will be free to write the dirstate on release. - ''' - self._ui.deprecwarn('endparentchange is obsoleted by the ' - 'parentchange context manager.', '4.3') - if self._parentwriters > 0: - self._parentwriters -= 1 - def pendingparentchange(self): '''Returns true if the dirstate is in the middle of a set of changes that modify the dirstate parent. @@ -360,7 +339,7 @@ rereads the dirstate. Use localrepo.invalidatedirstate() if you want to check whether the dirstate has changed before rereading it.''' - for a in ("_map", "_branch", "_ignore"): + for a in (r"_map", r"_branch", r"_ignore"): if a in self.__dict__: delattr(self, a) self._lastnormaltime = 0 @@ -808,6 +787,17 @@ else: badfn(ff, encoding.strtolocal(inst.strerror)) + # match.files() may contain explicitly-specified paths that shouldn't + # be taken; drop them from the list of files found. dirsfound/notfound + # aren't filtered here because they will be tested later. + if match.anypats(): + for f in list(results): + if f == '.hg' or f in subrepos: + # keep sentinel to disable further out-of-repo walks + continue + if not match(f): + del results[f] + # Case insensitive filesystems cannot rely on lstat() failing to detect # a case-only rename. Prune the stat object for any file that does not # match the case in the filesystem, if there are multiple files that @@ -1237,9 +1227,12 @@ util.clearcachedproperty(self, "nonnormalset") util.clearcachedproperty(self, "otherparentset") - def iteritems(self): + def items(self): return self._map.iteritems() + # forward for python2,3 compat + iteritems = items + def __len__(self): return len(self._map) @@ -1264,9 +1257,9 @@ def addfile(self, f, oldstate, state, mode, size, mtime): """Add a tracked file to the dirstate.""" - if oldstate in "?r" and "_dirs" in self.__dict__: + if oldstate in "?r" and r"_dirs" in self.__dict__: self._dirs.addpath(f) - if oldstate == "?" and "_alldirs" in self.__dict__: + if oldstate == "?" and r"_alldirs" in self.__dict__: self._alldirs.addpath(f) self._map[f] = dirstatetuple(state, mode, size, mtime) if state != 'n' or mtime == -1: @@ -1282,11 +1275,11 @@ the file's previous state. In the future, we should refactor this to be more explicit about what that state is. """ - if oldstate not in "?r" and "_dirs" in self.__dict__: + if oldstate not in "?r" and r"_dirs" in self.__dict__: self._dirs.delpath(f) - if oldstate == "?" and "_alldirs" in self.__dict__: + if oldstate == "?" and r"_alldirs" in self.__dict__: self._alldirs.addpath(f) - if "filefoldmap" in self.__dict__: + if r"filefoldmap" in self.__dict__: normed = util.normcase(f) self.filefoldmap.pop(normed, None) self._map[f] = dirstatetuple('r', 0, size, 0) @@ -1299,11 +1292,11 @@ """ exists = self._map.pop(f, None) is not None if exists: - if oldstate != "r" and "_dirs" in self.__dict__: + if oldstate != "r" and r"_dirs" in self.__dict__: self._dirs.delpath(f) - if "_alldirs" in self.__dict__: + if r"_alldirs" in self.__dict__: self._alldirs.delpath(f) - if "filefoldmap" in self.__dict__: + if r"filefoldmap" in self.__dict__: normed = util.normcase(f) self.filefoldmap.pop(normed, None) self.nonnormalset.discard(f) @@ -1438,7 +1431,7 @@ # This heuristic is imperfect in many ways, so in a future dirstate # format update it makes sense to just record the number of entries # on write. - self._map = parsers.dict_new_presized(len(st) / 71) + self._map = parsers.dict_new_presized(len(st) // 71) # Python's garbage collector triggers a GC each time a certain number # of container objects (the number being defined by
--- a/mercurial/discovery.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/discovery.py Sun Mar 04 10:42:51 2018 -0500 @@ -53,16 +53,11 @@ return treediscovery.findcommonincoming(repo, remote, heads, force) if heads: - allknown = True knownnode = repo.changelog.hasnode # no nodemap until it is filtered - for h in heads: - if not knownnode(h): - allknown = False - break - if allknown: + if all(knownnode(h) for h in heads): return (heads, False, heads) - res = setdiscovery.findcommonheads(repo.ui, repo, remote, + res = setdiscovery.findcommonheads(repo.ui, repo, remote, heads, abortwhenunrelated=not force, ancestorsof=ancestorsof) common, anyinc, srvheads = res
--- a/mercurial/dispatch.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/dispatch.py Sun Mar 04 10:42:51 2018 -0500 @@ -85,7 +85,7 @@ req = request(pycompat.sysargv[1:]) err = None try: - status = (dispatch(req) or 0) & 255 + status = (dispatch(req) or 0) except error.StdioError as e: err = e status = -1 @@ -106,11 +106,36 @@ except IOError: status = -1 + _silencestdio() sys.exit(status & 255) -def _initstdio(): - for fp in (sys.stdin, sys.stdout, sys.stderr): - util.setbinary(fp) +if pycompat.ispy3: + def _initstdio(): + pass + + def _silencestdio(): + for fp in (sys.stdout, sys.stderr): + # Check if the file is okay + try: + fp.flush() + continue + except IOError: + pass + # Otherwise mark it as closed to silence "Exception ignored in" + # message emitted by the interpreter finalizer. Be careful to + # not close util.stdout, which may be a fdopen-ed file object and + # its close() actually closes the underlying file descriptor. + try: + fp.close() + except IOError: + pass +else: + def _initstdio(): + for fp in (sys.stdin, sys.stdout, sys.stderr): + util.setbinary(fp) + + def _silencestdio(): + pass def _getsimilar(symbols, value): sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio() @@ -132,7 +157,7 @@ similar = _getsimilar(inst.symbols, inst.function) if len(inst.args) > 1: write(_("hg: parse error at %s: %s\n") % - (inst.args[1], inst.args[0])) + (pycompat.bytestr(inst.args[1]), inst.args[0])) if (inst.args[0][0] == ' '): write(_("unexpected leading whitespace\n")) else: @@ -471,13 +496,14 @@ args = pycompat.shlexsplit(self.definition) except ValueError as inst: self.badalias = (_("error in definition for alias '%s': %s") - % (self.name, inst)) + % (self.name, util.forcebytestr(inst))) return earlyopts, args = _earlysplitopts(args) if earlyopts: self.badalias = (_("error in definition for alias '%s': %s may " "only be given on the command line") - % (self.name, '/'.join(zip(*earlyopts)[0]))) + % (self.name, '/'.join(pycompat.ziplist(*earlyopts) + [0]))) return self.cmdname = cmd = args.pop(0) self.givenargs = args @@ -597,7 +623,7 @@ try: args = fancyopts.fancyopts(args, commands.globalopts, options) except getopt.GetoptError as inst: - raise error.CommandError(None, inst) + raise error.CommandError(None, util.forcebytestr(inst)) if args: cmd, args = args[0], args[1:] @@ -621,7 +647,7 @@ try: args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True) except getopt.GetoptError as inst: - raise error.CommandError(cmd, inst) + raise error.CommandError(cmd, util.forcebytestr(inst)) # separate global options back out for o in commands.globalopts: @@ -646,7 +672,8 @@ configs.append((section, name, value)) except (IndexError, ValueError): raise error.Abort(_('malformed --config option: %r ' - '(use --config section.name=value)') % cfg) + '(use --config section.name=value)') + % pycompat.bytestr(cfg)) return configs @@ -821,9 +848,7 @@ if options['verbose'] or options['debug'] or options['quiet']: for opt in ('verbose', 'debug', 'quiet'): - val = str(bool(options[opt])) - if pycompat.ispy3: - val = val.encode('ascii') + val = pycompat.bytestr(bool(options[opt])) for ui_ in uis: ui_.setconfig('ui', opt, val, '--' + opt) @@ -941,9 +966,9 @@ worst = None, ct, '' if ui.config('ui', 'supportcontact') is None: for name, mod in extensions.extensions(): - testedwith = getattr(mod, 'testedwith', '') - if pycompat.ispy3 and isinstance(testedwith, str): - testedwith = testedwith.encode(u'utf-8') + # 'testedwith' should be bytes, but not all extensions are ported + # to py3 and we don't want UnicodeException because of that. + testedwith = util.forcebytestr(getattr(mod, 'testedwith', '')) report = getattr(mod, 'buglink', _('the extension author.')) if not testedwith.strip(): # We found an untested extension. It's likely the culprit. @@ -978,11 +1003,7 @@ bugtracker = _("https://mercurial-scm.org/wiki/BugTracker") warning = (_("** unknown exception encountered, " "please report by visiting\n** ") + bugtracker + '\n') - if pycompat.ispy3: - sysversion = sys.version.encode(u'utf-8') - else: - sysversion = sys.version - sysversion = sysversion.replace('\n', '') + sysversion = pycompat.sysbytes(sys.version).replace('\n', '') warning += ((_("** Python %s\n") % sysversion) + (_("** Mercurial Distributed SCM (version %s)\n") % util.version()) + @@ -997,6 +1018,7 @@ this function returns False, ignored otherwise. """ warning = _exceptionwarning(ui) - ui.log("commandexception", "%s\n%s\n", warning, traceback.format_exc()) + ui.log("commandexception", "%s\n%s\n", warning, + pycompat.sysbytes(traceback.format_exc())) ui.warn(warning) return False # re-raise the exception
--- a/mercurial/encoding.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/encoding.py Sun Mar 04 10:42:51 2018 -0500 @@ -181,7 +181,8 @@ return u.encode("utf-8") except UnicodeDecodeError as inst: sub = s[max(0, inst.start - 10):inst.start + 10] - raise error.Abort("decoding near '%s': %s!" % (sub, inst)) + raise error.Abort("decoding near '%s': %s!" + % (sub, pycompat.bytestr(inst))) except LookupError as k: raise error.Abort(k, hint="please check your locale settings")
--- a/mercurial/error.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/error.py Sun Mar 04 10:42:51 2018 -0500 @@ -47,7 +47,7 @@ # this can't be called 'message' because at least some installs of # Python 2.6+ complain about the 'message' property being deprecated self.lookupmessage = message - if isinstance(name, str) and len(name) == 20: + if isinstance(name, bytes) and len(name) == 20: from .node import short name = short(name) RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
--- a/mercurial/exchange.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/exchange.py Sun Mar 04 10:42:51 2018 -0500 @@ -283,7 +283,6 @@ This function is used to allow testing of the older bundle version""" ui = op.repo.ui - forcebundle1 = False # The goal is this config is to allow developer to choose the bundle # version used during exchanged. This is especially handy during test. # Value is a list of bundle version to be picked from, highest version @@ -1151,8 +1150,8 @@ for newremotehead in outdated: r = pushop.remote.pushkey('phases', newremotehead.hex(), - str(phases.draft), - str(phases.public)) + ('%d' % phases.draft), + ('%d' % phases.public)) if not r: pushop.ui.warn(_('updating %s to public failed!\n') % newremotehead) @@ -2149,7 +2148,8 @@ continue except error.UnsupportedBundleSpecification as e: repo.ui.debug('filtering %s because unsupported bundle ' - 'spec: %s\n' % (entry['URL'], str(e))) + 'spec: %s\n' % ( + entry['URL'], util.forcebytestr(e))) continue # If we don't have a spec and requested a stream clone, we don't know # what the entry is so don't attempt to apply it. @@ -2254,8 +2254,10 @@ bundle2.applybundle(repo, cg, tr, 'clonebundles', url) return True except urlerr.httperror as e: - ui.warn(_('HTTP error fetching bundle: %s\n') % str(e)) + ui.warn(_('HTTP error fetching bundle: %s\n') % + util.forcebytestr(e)) except urlerr.urlerror as e: - ui.warn(_('error fetching bundle: %s\n') % e.reason) + ui.warn(_('error fetching bundle: %s\n') % + util.forcebytestr(e.reason)) return False
--- a/mercurial/extensions.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/extensions.py Sun Mar 04 10:42:51 2018 -0500 @@ -122,6 +122,18 @@ if ui.debugflag: ui.traceback() +def _rejectunicode(name, xs): + if isinstance(xs, (list, set, tuple)): + for x in xs: + _rejectunicode(name, x) + elif isinstance(xs, dict): + for k, v in xs.items(): + _rejectunicode(name, k) + _rejectunicode(b'%s.%s' % (name, util.forcebytestr(k)), v) + elif isinstance(xs, type(u'')): + raise error.ProgrammingError(b"unicode %r found in %s" % (xs, name), + hint="use b'' to make it byte string") + # attributes set by registrar.command _cmdfuncattrs = ('norepo', 'optionalrepo', 'inferrepo') @@ -134,19 +146,22 @@ "registrar.command to register '%s'" % c, '4.6') missing = [a for a in _cmdfuncattrs if not util.safehasattr(f, a)] if not missing: - for option in e[1]: - default = option[2] - if isinstance(default, type(u'')): - raise error.ProgrammingError( - "option '%s.%s' has a unicode default value" - % (c, option[1]), - hint=("change the %s.%s default value to a " - "non-unicode string" % (c, option[1]))) continue raise error.ProgrammingError( 'missing attributes: %s' % ', '.join(missing), hint="use @command decorator to register '%s'" % c) +def _validatetables(ui, mod): + """Sanity check for loadable tables provided by extension module""" + for t in ['cmdtable', 'colortable', 'configtable']: + _rejectunicode(t, getattr(mod, t, {})) + for t in ['filesetpredicate', 'internalmerge', 'revsetpredicate', + 'templatefilter', 'templatefunc', 'templatekeyword']: + o = getattr(mod, t, None) + if o: + _rejectunicode(t, o._table) + _validatecmdtable(ui, getattr(mod, 'cmdtable', {})) + def load(ui, name, path): if name.startswith('hgext.') or name.startswith('hgext/'): shortname = name[6:] @@ -168,7 +183,7 @@ ui.warn(_('(third party extension %s requires version %s or newer ' 'of Mercurial; disabling)\n') % (shortname, minver)) return - _validatecmdtable(ui, getattr(mod, 'cmdtable', {})) + _validatetables(ui, mod) _extensions[shortname] = mod _order.append(shortname) @@ -195,11 +210,7 @@ try: extsetup(ui) except TypeError: - # Try to use getfullargspec (Python 3) first, and fall - # back to getargspec only if it doesn't exist so as to - # avoid warnings. - if getattr(inspect, 'getfullargspec', - getattr(inspect, 'getargspec'))(extsetup).args: + if pycompat.getargspec(extsetup).args: raise extsetup() # old extsetup with no ui argument except Exception as inst:
--- a/mercurial/fancyopts.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/fancyopts.py Sun Mar 04 10:42:51 2018 -0500 @@ -7,6 +7,7 @@ from __future__ import absolute_import +import abc import functools from .i18n import _ @@ -201,6 +202,64 @@ parsedargs.extend(args[pos:]) return parsedopts, parsedargs +class customopt(object): + """Manage defaults and mutations for any type of opt.""" + + __metaclass__ = abc.ABCMeta + + def __init__(self, defaultvalue): + self.defaultvalue = defaultvalue + + def _isboolopt(self): + return False + + @abc.abstractmethod + def newstate(self, oldstate, newparam, abort): + """Adds newparam to oldstate and returns the new state. + + On failure, abort can be called with a string error message.""" + +class _simpleopt(customopt): + def _isboolopt(self): + return isinstance(self.defaultvalue, (bool, type(None))) + + def newstate(self, oldstate, newparam, abort): + return newparam + +class _callableopt(customopt): + def __init__(self, callablefn): + self.callablefn = callablefn + super(_callableopt, self).__init__(None) + + def newstate(self, oldstate, newparam, abort): + return self.callablefn(newparam) + +class _listopt(customopt): + def newstate(self, oldstate, newparam, abort): + oldstate.append(newparam) + return oldstate + +class _intopt(customopt): + def newstate(self, oldstate, newparam, abort): + try: + return int(newparam) + except ValueError: + abort(_('expected int')) + +def _defaultopt(default): + """Returns a default opt implementation, given a default value.""" + + if isinstance(default, customopt): + return default + elif callable(default): + return _callableopt(default) + elif isinstance(default, list): + return _listopt(default[:]) + elif type(default) is type(1): + return _intopt(default) + else: + return _simpleopt(default) + def fancyopts(args, options, state, gnu=False, early=False, optaliases=None): """ read args, parse options, and store options in state @@ -220,6 +279,7 @@ list - parameter string is added to a list integer - parameter strings is stored as int function - call function with parameter + customopt - subclass of 'customopt' optaliases is a mapping from a canonical option name to a list of additional long options. This exists for preserving backward compatibility @@ -250,18 +310,13 @@ argmap['-' + short] = name for n in onames: argmap['--' + n] = name - defmap[name] = default + defmap[name] = _defaultopt(default) # copy defaults to state - if isinstance(default, list): - state[name] = default[:] - elif callable(default): - state[name] = None - else: - state[name] = default + state[name] = defmap[name].defaultvalue # does it take a parameter? - if not (default is None or default is True or default is False): + if not defmap[name]._isboolopt(): if short: short += ':' onames = [n + '=' for n in onames] @@ -301,21 +356,13 @@ boolval = False name = argmap[opt] obj = defmap[name] - t = type(obj) - if callable(obj): - state[name] = defmap[name](val) - elif t is type(1): - try: - state[name] = int(val) - except ValueError: - raise error.Abort(_('invalid value %r for option %s, ' - 'expected int') % (val, opt)) - elif t is type(''): - state[name] = val - elif t is type([]): - state[name].append(val) - elif t is type(None) or t is type(False): + if obj._isboolopt(): state[name] = boolval + else: + def abort(s): + raise error.Abort( + _('invalid value %r for option %s, %s') % (val, opt, s)) + state[name] = defmap[name].newstate(state[name], val, abort) # return unparsed args return args
--- a/mercurial/filemerge.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/filemerge.py Sun Mar 04 10:42:51 2018 -0500 @@ -513,13 +513,21 @@ b, c = _maketempfiles(repo, fco, fca) try: out = "" + mylabel, otherlabel = labels[:2] + if len(labels) >= 3: + baselabel = labels[2] + else: + baselabel = 'base' env = {'HG_FILE': fcd.path(), 'HG_MY_NODE': short(mynode), - 'HG_OTHER_NODE': str(fco.changectx()), - 'HG_BASE_NODE': str(fca.changectx()), + 'HG_OTHER_NODE': short(fco.changectx().node()), + 'HG_BASE_NODE': short(fca.changectx().node()), 'HG_MY_ISLINK': 'l' in fcd.flags(), 'HG_OTHER_ISLINK': 'l' in fco.flags(), 'HG_BASE_ISLINK': 'l' in fca.flags(), + 'HG_MY_LABEL': mylabel, + 'HG_OTHER_LABEL': otherlabel, + 'HG_BASE_LABEL': baselabel, } ui = repo.ui @@ -528,8 +536,10 @@ # read input from backup, write to original out = a a = repo.wvfs.join(back.path()) - replace = {'local': a, 'base': b, 'other': c, 'output': out} - args = util.interpolate(r'\$', replace, args, + replace = {'local': a, 'base': b, 'other': c, 'output': out, + 'labellocal': mylabel, 'labelother': otherlabel, + 'labelbase': baselabel} + args = util.interpolate(br'\$', replace, args, lambda s: util.shellquote(util.localpath(s))) cmd = toolpath + ' ' + args if _toolbool(ui, tool, "gui"): @@ -566,7 +576,7 @@ _defaultconflictlabels = ['local', 'other'] -def _formatlabels(repo, fcd, fco, fca, labels): +def _formatlabels(repo, fcd, fco, fca, labels, tool=None): """Formats the given labels using the conflict marker template. Returns a list of formatted labels. @@ -577,6 +587,8 @@ ui = repo.ui template = ui.config('ui', 'mergemarkertemplate') + if tool is not None: + template = _toolstr(ui, tool, 'mergemarkertemplate', template) template = templater.unquotestring(template) tres = formatter.templateresources(ui, repo) tmpl = formatter.maketemplater(ui, template, defaults=templatekw.keywords, @@ -706,6 +718,7 @@ mergetype = func.mergetype onfailure = func.onfailure precheck = func.precheck + isexternal = False else: if wctx.isinmemory(): func = _xmergeimm @@ -714,6 +727,7 @@ mergetype = fullmerge onfailure = _("merging %s failed!\n") precheck = None + isexternal = True toolconf = tool, toolpath, binary, symlink @@ -743,19 +757,42 @@ files = (None, None, None, back) r = 1 try: - markerstyle = ui.config('ui', 'mergemarkers') + internalmarkerstyle = ui.config('ui', 'mergemarkers') + if isexternal: + markerstyle = _toolstr(ui, tool, 'mergemarkers') + else: + markerstyle = internalmarkerstyle + if not labels: labels = _defaultconflictlabels + formattedlabels = labels if markerstyle != 'basic': - labels = _formatlabels(repo, fcd, fco, fca, labels) + formattedlabels = _formatlabels(repo, fcd, fco, fca, labels, + tool=tool) if premerge and mergetype == fullmerge: - r = _premerge(repo, fcd, fco, fca, toolconf, files, labels=labels) + # conflict markers generated by premerge will use 'detailed' + # settings if either ui.mergemarkers or the tool's mergemarkers + # setting is 'detailed'. This way tools can have basic labels in + # space-constrained areas of the UI, but still get full information + # in conflict markers if premerge is 'keep' or 'keep-merge3'. + premergelabels = labels + labeltool = None + if markerstyle != 'basic': + # respect 'tool's mergemarkertemplate (which defaults to + # ui.mergemarkertemplate) + labeltool = tool + if internalmarkerstyle != 'basic' or markerstyle != 'basic': + premergelabels = _formatlabels(repo, fcd, fco, fca, + premergelabels, tool=labeltool) + + r = _premerge(repo, fcd, fco, fca, toolconf, files, + labels=premergelabels) # complete if premerge successful (r is 0) return not r, r, False needcheck, r, deleted = func(repo, mynode, orig, fcd, fco, fca, - toolconf, files, labels=labels) + toolconf, files, labels=formattedlabels) if needcheck: r = _check(repo, r, ui, tool, fcd, files)
--- a/mercurial/fileset.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/fileset.py Sun Mar 04 10:42:51 2018 -0500 @@ -392,11 +392,10 @@ elif expr.startswith(">"): a = util.sizetoint(expr[1:]) return lambda x: x > a - elif expr[0].isdigit or expr[0] == '.': + else: a = util.sizetoint(expr) b = _sizetomax(expr) return lambda x: x >= a and x <= b - raise error.ParseError(_("couldn't parse size: %s") % expr) @predicate('size(expression)', callexisting=True) def size(mctx, x):
--- a/mercurial/formatter.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/formatter.py Sun Mar 04 10:42:51 2018 -0500 @@ -126,6 +126,7 @@ templater, util, ) +from .utils import dateutil pickle = util.pickle @@ -175,10 +176,10 @@ def formatdate(self, date, fmt='%a %b %d %H:%M:%S %Y %1%2'): '''convert date tuple to appropriate format''' return self._converter.formatdate(date, fmt) - def formatdict(self, data, key='key', value='value', fmt='%s=%s', sep=' '): + def formatdict(self, data, key='key', value='value', fmt=None, sep=' '): '''convert dict or key-value pairs to appropriate dict format''' return self._converter.formatdict(data, key, value, fmt, sep) - def formatlist(self, data, name, fmt='%s', sep=' '): + def formatlist(self, data, name, fmt=None, sep=' '): '''convert iterable to appropriate list format''' # name is mandatory argument for now, but it could be optional if # we have default template keyword, e.g. {item} @@ -243,15 +244,24 @@ @staticmethod def formatdate(date, fmt): '''stringify date tuple in the given format''' - return util.datestr(date, fmt) + return dateutil.datestr(date, fmt) @staticmethod def formatdict(data, key, value, fmt, sep): '''stringify key-value pairs separated by sep''' - return sep.join(fmt % (k, v) for k, v in _iteritems(data)) + prefmt = pycompat.identity + if fmt is None: + fmt = '%s=%s' + prefmt = pycompat.bytestr + return sep.join(fmt % (prefmt(k), prefmt(v)) + for k, v in _iteritems(data)) @staticmethod def formatlist(data, name, fmt, sep): '''stringify iterable separated by sep''' - return sep.join(fmt % e for e in data) + prefmt = pycompat.identity + if fmt is None: + fmt = '%s' + prefmt = pycompat.bytestr + return sep.join(fmt % prefmt(e) for e in data) class plainformatter(baseformatter): '''the default text output scheme''' @@ -291,7 +301,7 @@ self._out = out self._out.write("%s = [\n" % self._topic) def _showitem(self): - self._out.write(" " + repr(self._item) + ",\n") + self._out.write(' %s,\n' % pycompat.byterepr(self._item)) def end(self): baseformatter.end(self) self._out.write("]\n") @@ -383,9 +393,7 @@ return ref = self._parts[part] - # TODO: add support for filectx. probably each template keyword or - # function will have to declare dependent resources. e.g. - # @templatekeyword(..., requires=('ctx',)) + # TODO: add support for filectx props = {} # explicitly-defined fields precede templatekw props.update(item)
--- a/mercurial/graphmod.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/graphmod.py Sun Mar 04 10:42:51 2018 -0500 @@ -454,7 +454,7 @@ if any(len(char) > 1 for char in edgemap.values()): # limit drawing an edge to the first or last N lines of the current # section the rest of the edge is drawn like a parent line. - parent = state['styles'][PARENT][-1] + parent = state['styles'][PARENT][-1:] def _drawgp(char, i): # should a grandparent character be drawn for this line? if len(char) < 2: @@ -463,7 +463,7 @@ # either skip first num lines or take last num lines, based on sign return -num <= i if num < 0 else (len(lines) - i) <= num for i, line in enumerate(lines): - line[:] = [c[-1] if _drawgp(c, i) else parent for c in line] + line[:] = [c[-1:] if _drawgp(c, i) else parent for c in line] edgemap.update( (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
--- a/mercurial/help.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/help.py Sun Mar 04 10:42:51 2018 -0500 @@ -62,7 +62,8 @@ rst = loaddoc('extensions')(ui).splitlines(True) rst.extend(listexts( _('enabled extensions:'), extensions.enabled(), showdeprecated=True)) - rst.extend(listexts(_('disabled extensions:'), extensions.disabled())) + rst.extend(listexts(_('disabled extensions:'), extensions.disabled(), + showdeprecated=ui.verbose)) doc = ''.join(rst) return doc @@ -149,7 +150,7 @@ doclines = docs.splitlines() if doclines: summary = doclines[0] - cmdname = cmd.partition('|')[0].lstrip('^') + cmdname = cmdutil.parsealiases(cmd)[0] if filtercmd(ui, cmdname, kw, docs): continue results['commands'].append((cmdname, summary)) @@ -169,7 +170,7 @@ continue for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems(): if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])): - cmdname = cmd.partition('|')[0].lstrip('^') + cmdname = cmdutil.parsealiases(cmd)[0] cmddoc = pycompat.getdoc(entry[0]) if cmddoc: cmddoc = gettext(cmddoc).splitlines()[0] @@ -196,6 +197,8 @@ return loader internalstable = sorted([ + (['bundle2'], _('Bundle2'), + loaddoc('bundle2', subdir='internals')), (['bundles'], _('Bundles'), loaddoc('bundles', subdir='internals')), (['censor'], _('Censor'), @@ -327,7 +330,7 @@ # py3k fix: except vars can't be used outside the scope of the # except block, nor can be used inside a lambda. python issue4617 prefix = inst.args[0] - select = lambda c: c.lstrip('^').startswith(prefix) + select = lambda c: cmdutil.parsealiases(c)[0].startswith(prefix) rst = helplist(select) return rst @@ -418,15 +421,18 @@ h = {} cmds = {} for c, e in commands.table.iteritems(): - f = c.partition("|")[0] - if select and not select(f): + fs = cmdutil.parsealiases(c) + f = fs[0] + p = '' + if c.startswith("^"): + p = '^' + if select and not select(p + f): continue if (not select and name != 'shortlist' and e[0].__module__ != commands.__name__): continue - if name == "shortlist" and not f.startswith("^"): + if name == "shortlist" and not p: continue - f = f.lstrip("^") doc = pycompat.getdoc(e[0]) if filtercmd(ui, f, name, doc): continue @@ -434,7 +440,7 @@ if not doc: doc = _("(no help text available)") h[f] = doc.splitlines()[0].rstrip() - cmds[f] = c.lstrip("^") + cmds[f] = '|'.join(fs) rst = [] if not h:
--- a/mercurial/help/config.txt Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/help/config.txt Sun Mar 04 10:42:51 2018 -0500 @@ -1363,13 +1363,18 @@ ``args`` The arguments to pass to the tool executable. You can refer to the files being merged as well as the output file through these - variables: ``$base``, ``$local``, ``$other``, ``$output``. The meaning - of ``$local`` and ``$other`` can vary depending on which action is being - performed. During and update or merge, ``$local`` represents the original - state of the file, while ``$other`` represents the commit you are updating - to or the commit you are merging with. During a rebase ``$local`` - represents the destination of the rebase, and ``$other`` represents the - commit being rebased. + variables: ``$base``, ``$local``, ``$other``, ``$output``. + + The meaning of ``$local`` and ``$other`` can vary depending on which action is + being performed. During an update or merge, ``$local`` represents the original + state of the file, while ``$other`` represents the commit you are updating to or + the commit you are merging with. During a rebase, ``$local`` represents the + destination of the rebase, and ``$other`` represents the commit being rebased. + + Some operations define custom labels to assist with identifying the revisions, + accessible via ``$labellocal``, ``$labelother``, and ``$labelbase``. If custom + labels are not available, these will be ``local``, ``other``, and ``base``, + respectively. (default: ``$local $base $other``) ``premerge`` @@ -1405,6 +1410,21 @@ ``gui`` This tool requires a graphical interface to run. (default: False) +``mergemarkers`` + Controls whether the labels passed via ``$labellocal``, ``$labelother``, and + ``$labelbase`` are ``detailed`` (respecting ``mergemarkertemplate``) or + ``basic``. If ``premerge`` is ``keep`` or ``keep-merge3``, the conflict + markers generated during premerge will be ``detailed`` if either this option or + the corresponding option in the ``[ui]`` section is ``detailed``. + (default: ``basic``) + +``mergemarkertemplate`` + This setting can be used to override ``mergemarkertemplate`` from the ``[ui]`` + section on a per-tool basis; this applies to the ``$label``-prefixed variables + and to the conflict markers that are generated if ``premerge`` is ``keep` or + ``keep-merge3``. See the corresponding variable in ``[ui]`` for more + information. + .. container:: windows ``regkey`` @@ -2120,6 +2140,8 @@ markers is different from the encoding of the merged files, serious problems may occur. + Can be overridden per-merge-tool, see the ``[merge-tools]`` section. + ``origbackuppath`` The path to a directory used to store generated .orig files. If the path is not a directory, one will be created. If set, files stored in this
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/help/internals/bundle2.txt Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,677 @@ +Bundle2 refers to a data format that is used for both on-disk storage +and over-the-wire transfer of repository data and state. + +The data format allows the capture of multiple components of +repository data. Contrast with the initial bundle format, which +only captured *changegroup* data (and couldn't store bookmarks, +phases, etc). + +Bundle2 is used for: + +* Transferring data from a repository (e.g. as part of an ``hg clone`` + or ``hg pull`` operation). +* Transferring data to a repository (e.g. as part of an ``hg push`` + operation). +* Storing data on disk (e.g. the result of an ``hg bundle`` + operation). +* Transferring the results of a repository operation (e.g. the + reply to an ``hg push`` operation). + +At its highest level, a bundle2 payload is a stream that begins +with some metadata and consists of a series of *parts*, with each +part describing repository data or state or the result of an +operation. New bundle2 parts are introduced over time when there is +a need to capture a new form of data. A *capabilities* mechanism +exists to allow peers to understand which bundle2 parts the other +understands. + +Stream Format +============= + +A bundle2 payload consists of a magic string (``HG20``) followed by +stream level parameters, followed by any number of payload *parts*. + +It may help to think of the stream level parameters as *headers* and the +payload parts as the *body*. + +Stream Level Parameters +----------------------- + +Following the magic string is data that defines parameters applicable to the +entire payload. + +Stream level parameters begin with a 32-bit unsigned big-endian integer. +The value of this integer defines the number of bytes of stream level +parameters that follow. + +The *N* bytes of raw data contains a space separated list of parameters. +Each parameter consists of a required name and an optional value. + +Parameters have the form ``<name>`` or ``<name>=<value>``. + +Both the parameter name and value are URL quoted. + +Names MUST start with a letter. If the first letter is lower case, the +parameter is advisory and can safely be ignored. If the first letter +is upper case, the parameter is mandatory and the handler MUST stop if +it is unable to process it. + +Stream level parameters apply to the entire bundle2 payload. Lower-level +options should go into a bundle2 part instead. + +The following stream level parameters are defined: + +compression + Compression format of payload data. ``GZ`` denotes zlib. ``BZ`` + denotes bzip2. ``ZS`` denotes zstandard. + + When defined, all bytes after the stream level parameters are + compressed using the compression format defined by this parameter. + + If this parameter isn't present, data is raw/uncompressed. + + This parameter MUST be mandatory because attempting to consume + streams without knowing how to decode the underlying bytes will + result in errors. + +Payload Part +------------ + +Following the stream level parameters are 0 or more payload parts. Each +payload part consists of a header and a body. + +The payload part header consists of a 32-bit unsigned big-endian integer +defining the number of bytes in the header that follow. The special +value ``0`` indicates the end of the bundle2 stream. + +The binary format of the part header is as follows: + +* 8-bit unsigned size of the part name +* N-bytes alphanumeric part name +* 32-bit unsigned big-endian part ID +* N bytes part parameter data + +The *part name* identifies the type of the part. A part name with an +UPPERCASE letter is mandatory. Otherwise, the part is advisory. A +consumer should abort if it encounters a mandatory part it doesn't know +how to process. See the sections below for each defined part type. + +The *part ID* is a unique identifier within the bundle used to refer to a +specific part. It should be unique within the bundle2 payload. + +Part parameter data consists of: + +* 1 byte number of mandatory parameters +* 1 byte number of advisory parameters +* 2 * N bytes of sizes of parameter key and values +* N * M blobs of values for parameter key and values + +Following the 2 bytes of mandatory and advisory parameter counts are +2-tuples of bytes of the sizes of each parameter. e.g. +(<key size>, <value size>). + +Following that are the raw values, without padding. Mandatory parameters +come first, followed by advisory parameters. + +Each parameter's key MUST be unique within the part. + +Following the part parameter data is the part payload. The part payload +consists of a series of framed chunks. The frame header is a 32-bit +big-endian integer defining the size of the chunk. The N bytes of raw +payload data follows. + +The part payload consists of 0 or more chunks. + +A chunk with size ``0`` denotes the end of the part payload. Therefore, +there will always be at least 1 32-bit integer following the payload +part header. + +A chunk size of ``-1`` is used to signal an *interrupt*. If such a chunk +size is seen, the stream processor should process the next bytes as a new +payload part. After this payload part, processing of the original, +interrupted part should resume. + +Capabilities +============ + +Bundle2 is a dynamic format that can evolve over time. For example, +when a new repository data concept is invented, a new bundle2 part +is typically invented to hold that data. In addition, parts performing +similar functionality may come into existence if there is a better +mechanism for performing certain functionality. + +Because the bundle2 format evolves over time, peers need to understand +what bundle2 features the other can understand. The *capabilities* +mechanism is how those features are expressed. + +Bundle2 capabilities are logically expressed as a dictionary of +string key-value pairs where the keys are strings and the values +are lists of strings. + +Capabilities are encoded for exchange between peers. The encoded +capabilities blob consists of a newline (``\n``) delimited list of +entries. Each entry has the form ``<key>`` or ``<key>=<value>``, +depending if the capability has a value. + +The capability name is URL quoted (``%XX`` encoding of URL unsafe +characters). + +The value, if present, is formed by URL quoting each value in +the capability list and concatenating the result with a comma (``,``). + +For example, the capabilities ``novaluekey`` and ``listvaluekey`` +with values ``value 1`` and ``value 2``. This would be encoded as: + + listvaluekey=value%201,value%202\nnovaluekey + +The sections below detail the defined bundle2 capabilities. + +HG20 +---- + +Denotes that the peer supports the bundle2 data format. + +bookmarks +--------- + +Denotes that the peer supports the ``bookmarks`` part. + +Peers should not issue mandatory ``bookmarks`` parts unless this +capability is present. + +changegroup +----------- + +Denotes which versions of the *changegroup* format the peer can +receive. Values include ``01``, ``02``, and ``03``. + +The peer should not generate changegroup data for a version not +specified by this capability. + +checkheads +---------- + +Denotes which forms of heads checking the peer supports. + +If ``related`` is in the value, then the peer supports the ``check:heads`` +part and the peer is capable of detecting race conditions when applying +changelog data. + +digests +------- + +Denotes which hashing formats the peer supports. + +Values are names of hashing function. Values include ``md5``, ``sha1``, +and ``sha512``. + +error +----- + +Denotes which ``error:`` parts the peer supports. + +Value is a list of strings of ``error:`` part names. Valid values +include ``abort``, ``unsupportecontent``, ``pushraced``, and ``pushkey``. + +Peers should not issue an ``error:`` part unless the type of that +part is listed as supported by this capability. + +listkeys +-------- + +Denotes that the peer supports the ``listkeys`` part. + +hgtagsfnodes +------------ + +Denotes that the peer supports the ``hgtagsfnodes`` part. + +obsmarkers +---------- + +Denotes that the peer supports the ``obsmarker`` part and which versions +of the obsolescence data format it can receive. Values are strings like +``V<N>``. e.g. ``V1``. + +phases +------ + +Denotes that the peer supports the ``phases`` part. + +pushback +-------- + +Denotes that the peer supports sending/receiving bundle2 data in response +to a bundle2 request. + +This capability is typically used by servers that employ server-side +rewriting of pushed repository data. For example, a server may wish to +automatically rebase pushed changesets. When this capability is present, +the server can send a bundle2 response containing the rewritten changeset +data and the client will apply it. + +pushkey +------- + +Denotes that the peer supports the ``puskey`` part. + +remote-changegroup +------------------ + +Denotes that the peer supports the ``remote-changegroup`` part and +which protocols it can use to fetch remote changegroup data. + +Values are protocol names. e.g. ``http`` and ``https``. + +stream +------ + +Denotes that the peer supports ``stream*`` parts in order to support +*stream clone*. + +Values are which ``stream*`` parts the peer supports. ``v2`` denotes +support for the ``stream2`` part. + +Bundle2 Part Types +================== + +The sections below detail the various bundle2 part types. + +bookmarks +--------- + +The ``bookmarks`` part holds bookmarks information. + +This part has no parameters. + +The payload consists of entries defining bookmarks. Each entry consists of: + +* 20 bytes binary changeset node. +* 2 bytes big endian short defining bookmark name length. +* N bytes defining bookmark name. + +Receivers typically update bookmarks to match the state specified in +this part. + +changegroup +----------- + +The ``changegroup`` part contains *changegroup* data (changelog, manifestlog, +and filelog revision data). + +The following part parameters are defined for this part. + +version + Changegroup version string. e.g. ``01``, ``02``, and ``03``. This parameter + determines how to interpret the changegroup data within the part. + +nbchanges + The number of changesets in this changegroup. This parameter can be used + to aid in the display of progress bars, etc during part application. + +treemanifest + Whether the changegroup contains tree manifests. + +targetphase + The target phase of changesets in this part. Value is an integer of + the target phase. + +The payload of this part is raw changegroup data. See +:hg:`help internals.changegroups` for the format of changegroup data. + +check:bookmarks +--------------- + +The ``check:bookmarks`` part is inserted into a bundle as a means for the +receiver to validate that the sender's known state of bookmarks matches +the receiver's. + +This part has no parameters. + +The payload is a binary stream of bookmark data. Each entry in the stream +consists of: + +* 20 bytes binary node that bookmark is associated with +* 2 bytes unsigned short defining length of bookmark name +* N bytes containing the bookmark name + +If all bits in the node value are ``1``, then this signifies a missing +bookmark. + +When the receiver encounters this part, for each bookmark in the part +payload, it should validate that the current bookmark state matches +the specified state. If it doesn't, then the receiver should take +appropriate action. (In the case of pushes, this mismatch signifies +a race condition and the receiver should consider rejecting the push.) + +check:heads +----------- + +The ``check:heads`` part is a means to validate that the sender's state +of DAG heads matches the receiver's. + +This part has no parameters. + +The body of this part is an array of 20 byte binary nodes representing +changeset heads. + +Receivers should compare the set of heads defined in this part to the +current set of repo heads and take action if there is a mismatch in that +set. + +Note that this part applies to *all* heads in the repo. + +check:phases +------------ + +The ``check:phases`` part validates that the sender's state of phase +boundaries matches the receiver's. + +This part has no parameters. + +The payload consists of an array of 24 byte entries. Each entry is +a big endian 32-bit integer defining the phase integer and 20 byte +binary node value. + +For each changeset defined in this part, the receiver should validate +that its current phase matches the phase defined in this part. The +receiver should take appropriate action if a mismatch occurs. + +check:updated-heads +------------------- + +The ``check:updated-heads`` part validates that the sender's state of +DAG heads updated by this bundle matches the receiver's. + +This type is nearly identical to ``check:heads`` except the heads +in the payload are only a subset of heads in the repository. The +receiver should validate that all nodes specified by the sender are +branch heads and take appropriate action if not. + +error:abort +----------- + +The ``error:abort`` part conveys a fatal error. + +The following part parameters are defined: + +message + The string content of the error message. + +hint + Supplemental string giving a hint on how to fix the problem. + +error:pushkey +------------- + +The ``error:pushkey`` part conveys an error in the *pushkey* protocol. + +The following part parameters are defined: + +namespace + The pushkey domain that exhibited the error. + +key + The key whose update failed. + +new + The value we tried to set the key to. + +old + The old value of the key (as supplied by the client). + +ret + The integer result code for the pushkey request. + +in-reply-to + Part ID that triggered this error. + +This part is generated if there was an error applying *pushkey* data. +Pushkey data includes bookmarks, phases, and obsolescence markers. + +error:pushraced +--------------- + +The ``error:pushraced`` part conveys that an error occurred and +the likely cause is losing a race with another pusher. + +The following part parameters are defined: + +message + String error message. + +This part is typically emitted when a receiver examining ``check:*`` +parts encountered inconsistency between incoming state and local state. +The likely cause of that inconsistency is another repository change +operation (often another client performing an ``hg push``). + +error:unsupportedcontent +------------------------ + +The ``error:unsupportedcontent`` part conveys that a bundle2 receiver +encountered a part or content it was not able to handle. + +The following part parameters are defined: + +parttype + The name of the part that triggered this error. + +params + ``\0`` delimited list of parameters. + +hgtagsfnodes +------------ + +The ``hgtagsfnodes`` type defines file nodes for the ``.hgtags`` file +for various changesets. + +This part has no parameters. + +The payload is an array of pairs of 20 byte binary nodes. The first node +is a changeset node. The second node is the ``.hgtags`` file node. + +Resolving tags requires resolving the ``.hgtags`` file node for changesets. +On large repositories, this can be expensive. Repositories cache the +mapping of changeset to ``.hgtags`` file node on disk as a performance +optimization. This part allows that cached data to be transferred alongside +changeset data. + +Receivers should update their ``.hgtags`` cache file node mappings with +the incoming data. + +listkeys +-------- + +The ``listkeys`` part holds content for a *pushkey* namespace. + +The following part parameters are defined: + +namespace + The pushkey domain this data belongs to. + +The part payload contains a newline (``\n``) delimited list of +tab (``\t``) delimited key-value pairs defining entries in this pushkey +namespace. + +obsmarkers +---------- + +The ``obsmarkers`` part defines obsolescence markers. + +This part has no parameters. + +The payload consists of obsolescence markers using the on-disk markers +format. The first byte defines the version format. + +The receiver should apply the obsolescence markers defined in this +part. A ``reply:obsmarkers`` part should be sent to the sender, if possible. + +output +------ + +The ``output`` part is used to display output on the receiver. + +This part has no parameters. + +The payload consists of raw data to be printed on the receiver. + +phase-heads +----------- + +The ``phase-heads`` part defines phase boundaries. + +This part has no parameters. + +The payload consists of an array of 24 byte entries. Each entry is +a big endian 32-bit integer defining the phase integer and 20 byte +binary node value. + +pushkey +------- + +The ``pushkey`` part communicates an intent to perform a ``pushkey`` +request. + +The following part parameters are defined: + +namespace + The pushkey domain to operate on. + +key + The key within the pushkey namespace that is being changed. + +old + The old value for the key being changed. + +new + The new value for the key being changed. + +This part has no payload. + +The receiver should perform a pushkey operation as described by this +part's parameters. + +If the pushey operation fails, a ``reply:pushkey`` part should be sent +back to the sender, if possible. The ``in-reply-to`` part parameter +should reference the source part. + +pushvars +-------- + +The ``pushvars`` part defines environment variables that should be +set when processing this bundle2 payload. + +The part's advisory parameters define environment variables. + +There is no part payload. + +When received, part parameters are prefixed with ``USERVAR_`` and the +resulting variables are defined in the hooks context for the current +bundle2 application. This part provides a mechanism for senders to +inject extra state into the hook execution environment on the receiver. + +remote-changegroup +------------------ + +The ``remote-changegroup`` part defines an external location of a bundle +to apply. This part can be used by servers to serve pre-generated bundles +hosted at arbitrary URLs. + +The following part parameters are defined: + +url + The URL of the remote bundle. + +size + The size in bytes of the remote bundle. + +digests + A space separated list of the digest types provided in additional + part parameters. + +digest:<type> + The hexadecimal representation of the digest (hash) of the remote bundle. + +There is no payload for this part type. + +When encountered, clients should attempt to fetch the URL being advertised +and read and apply it as a bundle. + +The ``size`` and ``digest:<type>`` parameters should be used to validate +that the downloaded bundle matches what was advertised. If a mismatch occurs, +the client should abort. + +reply:changegroup +----------------- + +The ``reply:changegroup`` part conveys the results of application of a +``changegroup`` part. + +The following part parameters are defined: + +return + Integer return code from changegroup application. + +in-reply-to + Part ID of part this reply is in response to. + +reply:obsmarkers +---------------- + +The ``reply:obsmarkers`` part conveys the results of applying an +``obsmarkers`` part. + +The following part parameters are defined: + +new + The integer number of new markers that were applied. + +in-reply-to + The part ID that this part is in reply to. + +reply:pushkey +------------- + +The ``reply:pushkey`` part conveys the result of a *pushkey* operation. + +The following part parameters are defined: + +return + Integer result code from pushkey operation. + +in-reply-to + Part ID that triggered this pushkey operation. + +This part has no payload. + +replycaps +--------- + +The ``replycaps`` part notifies the receiver that a reply bundle should +be created. + +This part has no parameters. + +The payload consists of a bundle2 capabilities blob. + +stream2 +------- + +The ``stream2`` part contains *streaming clone* version 2 data. + +The following part parameters are defined: + +requirements + URL quoted repository requirements string. Requirements are delimited by a + command (``,``). + +filecount + The total number of files being transferred in the payload. + +bytecount + The total size of file content being transferred in the payload. + +The payload consists of raw stream clone version 2 data. + +The ``filecount`` and ``bytecount`` parameters can be used for progress and +reporting purposes. The values may not be exact.
--- a/mercurial/help/internals/bundles.txt Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/help/internals/bundles.txt Sun Mar 04 10:42:51 2018 -0500 @@ -63,8 +63,7 @@ ``HG20`` is currently the only defined bundle2 version. -The ``HG20`` format is not yet documented here. See the inline comments -in ``mercurial/exchange.py`` for now. +The ``HG20`` format is documented at :hg:`help internals.bundle2`. Initial ``HG20`` support was added in Mercurial 3.0 (released May 2014). However, bundle2 bundles were hidden behind an experimental flag
--- a/mercurial/help/internals/requirements.txt Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/help/internals/requirements.txt Sun Mar 04 10:42:51 2018 -0500 @@ -1,4 +1,3 @@ - Repositories contain a file (``.hg/requires``) containing a list of features/capabilities that are *required* for clients to interface with the repository. This file has been present in Mercurial since @@ -105,8 +104,10 @@ Denotes that version 2 of manifests are being used. Support for this requirement was added in Mercurial 3.4 (released -May 2015). The requirement is currently experimental and is disabled -by default. +May 2015). The new format failed to meet expectations and support +for the format and requirement were removed in Mercurial 4.6 +(released May 2018) since the feature never graduated frome experiment +status. treemanifest ============
--- a/mercurial/help/internals/wireprotocol.txt Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/help/internals/wireprotocol.txt Sun Mar 04 10:42:51 2018 -0500 @@ -10,11 +10,43 @@ The protocol is synchronous and does not support multiplexing (concurrent commands). -Transport Protocols -=================== +Handshake +========= + +It is required or common for clients to perform a *handshake* when connecting +to a server. The handshake serves the following purposes: + +* Negotiating protocol/transport level options +* Allows the client to learn about server capabilities to influence + future requests +* Ensures the underlying transport channel is in a *clean* state -HTTP Transport --------------- +An important goal of the handshake is to allow clients to use more modern +wire protocol features. By default, clients must assume they are talking +to an old version of Mercurial server (possibly even the very first +implementation). So, clients should not attempt to call or utilize modern +wire protocol features until they have confirmation that the server +supports them. The handshake implementation is designed to allow both +ends to utilize the latest set of features and capabilities with as +few round trips as possible. + +The handshake mechanism varies by transport and protocol and is documented +in the sections below. + +HTTP Protocol +============= + +Handshake +--------- + +The client sends a ``capabilities`` command request (``?cmd=capabilities``) +as soon as HTTP requests may be issued. + +The server responds with a capabilities string, which the client parses to +learn about the server's abilities. + +HTTP Version 1 Transport +------------------------ Commands are issued as HTTP/1.0 or HTTP/1.1 requests. Commands are sent to the base URL of the repository with the command name sent in @@ -112,11 +144,175 @@ ``application/mercurial-0.*`` media type and the HTTP response is typically using *chunked transfer* (``Transfer-Encoding: chunked``). -SSH Transport -============= +SSH Protocol +============ + +Handshake +--------- + +For all clients, the handshake consists of the client sending 1 or more +commands to the server using version 1 of the transport. Servers respond +to commands they know how to respond to and send an empty response (``0\n``) +for unknown commands (per standard behavior of version 1 of the transport). +Clients then typically look for a response to the newest sent command to +determine which transport version to use and what the available features for +the connection and server are. + +Preceding any response from client-issued commands, the server may print +non-protocol output. It is common for SSH servers to print banners, message +of the day announcements, etc when clients connect. It is assumed that any +such *banner* output will precede any Mercurial server output. So clients +must be prepared to handle server output on initial connect that isn't +in response to any client-issued command and doesn't conform to Mercurial's +wire protocol. This *banner* output should only be on stdout. However, +some servers may send output on stderr. + +Pre 0.9.1 clients issue a ``between`` command with the ``pairs`` argument +having the value +``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``. + +The ``between`` command has been supported since the original Mercurial +SSH server. Requesting the empty range will return a ``\n`` string response, +which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline +followed by the value, which happens to be a newline). + +For pre 0.9.1 clients and all servers, the exchange looks like:: + + c: between\n + c: pairs 81\n + c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + s: 1\n + s: \n + +0.9.1+ clients send a ``hello`` command (with no arguments) before the +``between`` command. The response to this command allows clients to +discover server capabilities and settings. + +An example exchange between 0.9.1+ clients and a ``hello`` aware server looks +like:: + + c: hello\n + c: between\n + c: pairs 81\n + c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + s: 324\n + s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n + s: 1\n + s: \n + +And a similar scenario but with servers sending a banner on connect:: + + c: hello\n + c: between\n + c: pairs 81\n + c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + s: welcome to the server\n + s: if you find any issues, email someone@somewhere.com\n + s: 324\n + s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n + s: 1\n + s: \n + +Note that output from the ``hello`` command is terminated by a ``\n``. This is +part of the response payload and not part of the wire protocol adding a newline +after responses. In other words, the length of the response contains the +trailing ``\n``. + +Clients supporting version 2 of the SSH transport send a line beginning +with ``upgrade`` before the ``hello`` and ``between`` commands. The line +(which isn't a well-formed command line because it doesn't consist of a +single command name) serves to both communicate the client's intent to +switch to transport version 2 (transports are version 1 by default) as +well as to advertise the client's transport-level capabilities so the +server may satisfy that request immediately. + +The upgrade line has the form: -The SSH transport is a custom text-based protocol suitable for use over any -bi-directional stream transport. It is most commonly used with SSH. + upgrade <token> <transport capabilities> + +That is the literal string ``upgrade`` followed by a space, followed by +a randomly generated string, followed by a space, followed by a string +denoting the client's transport capabilities. + +The token can be anything. However, a random UUID is recommended. (Use +of version 4 UUIDs is recommended because version 1 UUIDs can leak the +client's MAC address.) + +The transport capabilities string is a URL/percent encoded string +containing key-value pairs defining the client's transport-level +capabilities. The following capabilities are defined: + +proto + A comma-delimited list of transport protocol versions the client + supports. e.g. ``ssh-v2``. + +If the server does not recognize the ``upgrade`` line, it should issue +an empty response and continue processing the ``hello`` and ``between`` +commands. Here is an example handshake between a version 2 aware client +and a non version 2 aware server: + + c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2 + c: hello\n + c: between\n + c: pairs 81\n + c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + s: 0\n + s: 324\n + s: capabilities: lookup changegroupsubset branchmap pushkey known getbundle ...\n + s: 1\n + s: \n + +(The initial ``0\n`` line from the server indicates an empty response to +the unknown ``upgrade ..`` command/line.) + +If the server recognizes the ``upgrade`` line and is willing to satisfy that +upgrade request, it replies to with a payload of the following form: + + upgraded <token> <transport name>\n + +This line is the literal string ``upgraded``, a space, the token that was +specified by the client in its ``upgrade ...`` request line, a space, and the +name of the transport protocol that was chosen by the server. The transport +name MUST match one of the names the client specified in the ``proto`` field +of its ``upgrade ...`` request line. + +If a server issues an ``upgraded`` response, it MUST also read and ignore +the lines associated with the ``hello`` and ``between`` command requests +that were issued by the server. It is assumed that the negotiated transport +will respond with equivalent requested information following the transport +handshake. + +All data following the ``\n`` terminating the ``upgraded`` line is the +domain of the negotiated transport. It is common for the data immediately +following to contain additional metadata about the state of the transport and +the server. However, this isn't strictly speaking part of the transport +handshake and isn't covered by this section. + +Here is an example handshake between a version 2 aware client and a version +2 aware server: + + c: upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=ssh-v2 + c: hello\n + c: between\n + c: pairs 81\n + c: 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n + s: <additional transport specific data> + +The client-issued token that is echoed in the response provides a more +resilient mechanism for differentiating *banner* output from Mercurial +output. In version 1, properly formatted banner output could get confused +for Mercurial server output. By submitting a randomly generated token +that is then present in the response, the client can look for that token +in response lines and have reasonable certainty that the line did not +originate from a *banner* message. + +SSH Version 1 Transport +----------------------- + +The SSH transport (version 1) is a custom text-based protocol suitable for +use over any bi-directional stream transport. It is most commonly used with +SSH. A SSH transport server can be started with ``hg serve --stdio``. The stdin, stderr, and stdout file descriptors of the started process are used to exchange @@ -174,6 +370,31 @@ The server terminates if it receives an empty command (a ``\n`` character). +SSH Version 2 Transport +----------------------- + +**Experimental** + +Version 2 of the SSH transport behaves identically to version 1 of the SSH +transport with the exception of handshake semantics. See above for how +version 2 of the SSH transport is negotiated. + +Immediately following the ``upgraded`` line signaling a switch to version +2 of the SSH protocol, the server automatically sends additional details +about the capabilities of the remote server. This has the form: + + <integer length of value>\n + capabilities: ...\n + +e.g. + + s: upgraded 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a ssh-v2\n + s: 240\n + s: capabilities: known getbundle batch ...\n + +Following capabilities advertisement, the peers communicate using version +1 of the SSH transport. + Capabilities ============ @@ -463,53 +684,6 @@ reflects the priority/preference of that type, where the first value is the most preferred type. -Handshake Protocol -================== - -While not explicitly required, it is common for clients to perform a -*handshake* when connecting to a server. The handshake accomplishes 2 things: - -* Obtaining capabilities and other server features -* Flushing extra server output (e.g. SSH servers may print extra text - when connecting that may confuse the wire protocol) - -This isn't a traditional *handshake* as far as network protocols go because -there is no persistent state as a result of the handshake: the handshake is -simply the issuing of commands and commands are stateless. - -The canonical clients perform a capabilities lookup at connection establishment -time. This is because clients must assume a server only supports the features -of the original Mercurial server implementation until proven otherwise (from -advertised capabilities). Nearly every server running today supports features -that weren't present in the original Mercurial server implementation. Rather -than wait for a client to perform functionality that needs to consult -capabilities, it issues the lookup at connection start to avoid any delay later. - -For HTTP servers, the client sends a ``capabilities`` command request as -soon as the connection is established. The server responds with a capabilities -string, which the client parses. - -For SSH servers, the client sends the ``hello`` command (no arguments) -and a ``between`` command with the ``pairs`` argument having the value -``0000000000000000000000000000000000000000-0000000000000000000000000000000000000000``. - -The ``between`` command has been supported since the original Mercurial -server. Requesting the empty range will return a ``\n`` string response, -which will be encoded as ``1\n\n`` (value length of ``1`` followed by a newline -followed by the value, which happens to be a newline). - -The ``hello`` command was later introduced. Servers supporting it will issue -a response to that command before sending the ``1\n\n`` response to the -``between`` command. Servers not supporting ``hello`` will send an empty -response (``0\n``). - -In addition to the expected output from the ``hello`` and ``between`` commands, -servers may also send other output, such as *message of the day (MOTD)* -announcements. Clients assume servers will send this output before the -Mercurial server replies to the client-issued commands. So any server output -not conforming to the expected command responses is assumed to be not related -to Mercurial and can be ignored. - Content Negotiation =================== @@ -519,8 +693,8 @@ well-defined response type and only certain commands needed to support functionality like compression. -Currently, only the HTTP transport supports content negotiation at the protocol -layer. +Currently, only the HTTP version 1 transport supports content negotiation +at the protocol layer. HTTP requests advertise supported response formats via the ``X-HgProto-<N>`` request header, where ``<N>`` is an integer starting at 1 allowing the logical @@ -662,6 +836,8 @@ This command does not accept any arguments. Return type is a ``string``. +This command was introduced in Mercurial 0.9.1 (released July 2006). + changegroup ----------- @@ -737,7 +913,7 @@ Boolean indicating whether phases data is requested. The return type on success is a ``stream`` where the value is bundle. -On the HTTP transport, the response is zlib compressed. +On the HTTP version 1 transport, the response is zlib compressed. If an error occurs, a generic error response can be sent. @@ -779,6 +955,8 @@ This command does not accept any arguments. The return type is a ``string``. +This command was introduced in Mercurial 0.9.1 (released July 2006). + listkeys -------- @@ -838,13 +1016,14 @@ The return type is a ``string``. The value depends on the transport protocol. -The SSH transport sends a string encoded integer followed by a newline -(``\n``) which indicates operation result. The server may send additional -output on the ``stderr`` stream that should be displayed to the user. +The SSH version 1 transport sends a string encoded integer followed by a +newline (``\n``) which indicates operation result. The server may send +additional output on the ``stderr`` stream that should be displayed to the +user. -The HTTP transport sends a string encoded integer followed by a newline -followed by additional server output that should be displayed to the user. -This may include output from hooks, etc. +The HTTP version 1 transport sends a string encoded integer followed by a +newline followed by additional server output that should be displayed to +the user. This may include output from hooks, etc. The integer result varies by namespace. ``0`` means an error has occurred and there should be additional output to display to the user. @@ -908,18 +1087,18 @@ The encoding of the ``push response`` type varies by transport. -For the SSH transport, this type is composed of 2 ``string`` responses: an -empty response (``0\n``) followed by the integer result value. e.g. -``1\n2``. So the full response might be ``0\n1\n2``. +For the SSH version 1 transport, this type is composed of 2 ``string`` +responses: an empty response (``0\n``) followed by the integer result value. +e.g. ``1\n2``. So the full response might be ``0\n1\n2``. -For the HTTP transport, the response is a ``string`` type composed of an -integer result value followed by a newline (``\n``) followed by string +For the HTTP version 1 transport, the response is a ``string`` type composed +of an integer result value followed by a newline (``\n``) followed by string content holding server output that should be displayed on the client (output hooks, etc). In some cases, the server may respond with a ``bundle2`` bundle. In this -case, the response type is ``stream``. For the HTTP transport, the response -is zlib compressed. +case, the response type is ``stream``. For the HTTP version 1 transport, the +response is zlib compressed. The server may also respond with a generic error type, which contains a string indicating the failure.
--- a/mercurial/hg.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hg.py Sun Mar 04 10:42:51 2018 -0500 @@ -31,6 +31,7 @@ httppeer, localrepo, lock, + logcmdutil, logexchange, merge as mergemod, node, @@ -201,6 +202,24 @@ return '' return os.path.basename(os.path.normpath(path)) +def sharedreposource(repo): + """Returns repository object for source repository of a shared repo. + + If repo is not a shared repository, returns None. + """ + if repo.sharedpath == repo.path: + return None + + if util.safehasattr(repo, 'srcrepo') and repo.srcrepo: + return repo.srcrepo + + # the sharedpath always ends in the .hg; we want the path to the repo + source = repo.vfs.split(repo.sharedpath)[0] + srcurl, branches = parseurl(source) + srcrepo = repository(repo.ui, srcurl) + repo.srcrepo = srcrepo + return srcrepo + def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None, relative=False): '''create a shared repository''' @@ -213,7 +232,7 @@ else: dest = ui.expandpath(dest) - if isinstance(source, str): + if isinstance(source, bytes): origsource = ui.expandpath(source) source, branches = parseurl(origsource) srcrepo = repository(ui, source) @@ -885,7 +904,8 @@ ui.status(_("no changes found\n")) return subreporecurse() ui.pager('incoming') - displayer = cmdutil.show_changeset(ui, other, opts, buffered) + displayer = logcmdutil.changesetdisplayer(ui, other, opts, + buffered=buffered) displaychlist(other, chlist, displayer) displayer.close() finally: @@ -904,7 +924,7 @@ return ret def display(other, chlist, displayer): - limit = cmdutil.loglimit(opts) + limit = logcmdutil.getlimit(opts) if opts.get('newest_first'): chlist.reverse() count = 0 @@ -949,7 +969,7 @@ ret = min(ret, sub.outgoing(ui, dest, opts)) return ret - limit = cmdutil.loglimit(opts) + limit = logcmdutil.getlimit(opts) o, other = _outgoing(ui, repo, dest, opts) if not o: cmdutil.outgoinghooks(ui, repo, other, opts, o) @@ -958,7 +978,7 @@ if opts.get('newest_first'): o.reverse() ui.pager('outgoing') - displayer = cmdutil.show_changeset(ui, repo, opts) + displayer = logcmdutil.changesetdisplayer(ui, repo, opts) count = 0 for n in o: if limit is not None and count >= limit:
--- a/mercurial/hgweb/common.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/common.py Sun Mar 04 10:42:51 2018 -0500 @@ -45,7 +45,7 @@ authentication info). Return if op allowed, else raise an ErrorResponse exception.''' - user = req.env.get('REMOTE_USER') + user = req.env.get(r'REMOTE_USER') deny_read = hgweb.configlist('web', 'deny_read') if deny_read and (not user or ismember(hgweb.repo.ui, user, deny_read)): @@ -61,7 +61,7 @@ return # enforce that you can only push using POST requests - if req.env['REQUEST_METHOD'] != 'POST': + if req.env[r'REQUEST_METHOD'] != r'POST': msg = 'push requires POST request' raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg) @@ -93,7 +93,7 @@ def __init__(self, code, message=None, headers=None): if message is None: message = _statusmessage(code) - Exception.__init__(self, message) + Exception.__init__(self, pycompat.sysstr(message)) self.code = code if headers is None: headers = [] @@ -138,7 +138,7 @@ """Determine if a path is safe to use for filesystem access.""" parts = path.split('/') for part in parts: - if (part in ('', os.curdir, os.pardir) or + if (part in ('', pycompat.oscurdir, pycompat.ospardir) or pycompat.ossep in part or pycompat.osaltsep is not None and pycompat.osaltsep in part): return False @@ -185,7 +185,7 @@ if stripecount and offset: # account for offset, e.g. due to building the list in reverse count = (stripecount + offset) % stripecount - parity = (stripecount + offset) / stripecount & 1 + parity = (stripecount + offset) // stripecount & 1 else: count = 0 parity = 0
--- a/mercurial/hgweb/hgweb_mod.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/hgweb_mod.py Sun Mar 04 10:42:51 2018 -0500 @@ -27,6 +27,7 @@ from .. import ( encoding, error, + formatter, hg, hook, profiling, @@ -36,10 +37,10 @@ templater, ui as uimod, util, + wireprotoserver, ) from . import ( - protocol, webcommands, webutil, wsgicgi, @@ -63,8 +64,6 @@ def getstyle(req, configfn, templatepath): fromreq = req.form.get('style', [None])[0] - if fromreq is not None: - fromreq = pycompat.sysbytes(fromreq) styles = ( fromreq, configfn('web', 'style'), @@ -199,7 +198,7 @@ return templatefilters.websub(text, self.websubtable) # create the templater - + # TODO: export all keywords: defaults = templatekw.keywords.copy() defaults = { 'url': req.url, 'logourl': logourl, @@ -214,9 +213,11 @@ 'style': style, 'nonce': self.nonce, } + tres = formatter.templateresources(self.repo.ui, self.repo) tmpl = templater.templater.frommapfile(mapfile, filters={'websub': websubfilter}, - defaults=defaults) + defaults=defaults, + resources=tres) return tmpl @@ -357,31 +358,21 @@ query = req.env[r'QUERY_STRING'].partition(r'&')[0] query = query.partition(r';')[0] - # process this if it's a protocol request - # protocol bits don't need to create any URLs - # and the clients always use the old URL structure + # Route it to a wire protocol handler if it looks like a wire protocol + # request. + protohandler = wireprotoserver.parsehttprequest(rctx.repo, req, query) - cmd = pycompat.sysbytes(req.form.get(r'cmd', [r''])[0]) - if protocol.iscmd(cmd): + if protohandler: + cmd = protohandler['cmd'] try: if query: raise ErrorResponse(HTTP_NOT_FOUND) if cmd in perms: self.check_perm(rctx, req, perms[cmd]) - return protocol.call(rctx.repo, req, cmd) except ErrorResponse as inst: - # A client that sends unbundle without 100-continue will - # break if we respond early. - if (cmd == 'unbundle' and - (req.env.get('HTTP_EXPECT', - '').lower() != '100-continue') or - req.env.get('X-HgHttp2', '')): - req.drain() - else: - req.headers.append((r'Connection', r'Close')) - req.respond(inst, protocol.HGTYPE, - body='0\n%s\n' % inst) - return '' + return protohandler['handleerror'](inst) + + return protohandler['dispatch']() # translate user-visible url structure to internal structure @@ -417,6 +408,8 @@ if fn.endswith(ext): req.form['node'] = [fn[:-len(ext)]] req.form['type'] = [type_] + else: + cmd = pycompat.sysbytes(req.form.get(r'cmd', [r''])[0]) # process the web interface request @@ -451,20 +444,20 @@ except (error.LookupError, error.RepoLookupError) as err: req.respond(HTTP_NOT_FOUND, ctype) - msg = str(err) + msg = pycompat.bytestr(err) if (util.safehasattr(err, 'name') and not isinstance(err, error.ManifestLookupError)): msg = 'revision not found: %s' % err.name return tmpl('error', error=msg) except (error.RepoError, error.RevlogError) as inst: req.respond(HTTP_SERVER_ERROR, ctype) - return tmpl('error', error=str(inst)) + return tmpl('error', error=pycompat.bytestr(inst)) except ErrorResponse as inst: req.respond(inst, ctype) if inst.code == HTTP_NOT_MODIFIED: # Not allowed to return a body on a 304 return [''] - return tmpl('error', error=str(inst)) + return tmpl('error', error=pycompat.bytestr(inst)) def check_perm(self, rctx, req, op): for permhook in permhooks:
--- a/mercurial/hgweb/hgwebdir_mod.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/hgwebdir_mod.py Sun Mar 04 10:42:51 2018 -0500 @@ -46,6 +46,7 @@ webutil, wsgicgi, ) +from ..utils import dateutil def cleannames(items): return [(util.pconvert(name).strip('/'), path) for name, path in items] @@ -376,7 +377,7 @@ if directory: # get the directory's time information try: - d = (get_mtime(path), util.makedate()[1]) + d = (get_mtime(path), dateutil.makedate()[1]) except OSError: continue @@ -425,7 +426,7 @@ u.warn(_('error accessing repository at %s\n') % path) continue try: - d = (get_mtime(r.spath), util.makedate()[1]) + d = (get_mtime(r.spath), dateutil.makedate()[1]) except OSError: continue
--- a/mercurial/hgweb/protocol.py Sat Mar 03 22:29:24 2018 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,201 +0,0 @@ -# -# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> -# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -import cgi -import struct - -from .common import ( - HTTP_OK, -) - -from .. import ( - error, - pycompat, - util, - wireproto, -) -stringio = util.stringio - -urlerr = util.urlerr -urlreq = util.urlreq - -HGTYPE = 'application/mercurial-0.1' -HGTYPE2 = 'application/mercurial-0.2' -HGERRTYPE = 'application/hg-error' - -def decodevaluefromheaders(req, headerprefix): - """Decode a long value from multiple HTTP request headers. - - Returns the value as a bytes, not a str. - """ - chunks = [] - i = 1 - prefix = headerprefix.upper().replace(r'-', r'_') - while True: - v = req.env.get(r'HTTP_%s_%d' % (prefix, i)) - if v is None: - break - chunks.append(pycompat.bytesurl(v)) - i += 1 - - return ''.join(chunks) - -class webproto(wireproto.abstractserverproto): - def __init__(self, req, ui): - self.req = req - self.response = '' - self.ui = ui - self.name = 'http' - - def getargs(self, args): - knownargs = self._args() - data = {} - keys = args.split() - for k in keys: - if k == '*': - star = {} - for key in knownargs.keys(): - if key != 'cmd' and key not in keys: - star[key] = knownargs[key][0] - data['*'] = star - else: - data[k] = knownargs[k][0] - return [data[k] for k in keys] - def _args(self): - args = self.req.form.copy() - if pycompat.ispy3: - args = {k.encode('ascii'): [v.encode('ascii') for v in vs] - for k, vs in args.items()} - postlen = int(self.req.env.get(r'HTTP_X_HGARGS_POST', 0)) - if postlen: - args.update(cgi.parse_qs( - self.req.read(postlen), keep_blank_values=True)) - return args - - argvalue = decodevaluefromheaders(self.req, r'X-HgArg') - args.update(cgi.parse_qs(argvalue, keep_blank_values=True)) - return args - def getfile(self, fp): - length = int(self.req.env[r'CONTENT_LENGTH']) - # If httppostargs is used, we need to read Content-Length - # minus the amount that was consumed by args. - length -= int(self.req.env.get(r'HTTP_X_HGARGS_POST', 0)) - for s in util.filechunkiter(self.req, limit=length): - fp.write(s) - def redirect(self): - self.oldio = self.ui.fout, self.ui.ferr - self.ui.ferr = self.ui.fout = stringio() - def restore(self): - val = self.ui.fout.getvalue() - self.ui.ferr, self.ui.fout = self.oldio - return val - - def _client(self): - return 'remote:%s:%s:%s' % ( - self.req.env.get('wsgi.url_scheme') or 'http', - urlreq.quote(self.req.env.get('REMOTE_HOST', '')), - urlreq.quote(self.req.env.get('REMOTE_USER', ''))) - - def responsetype(self, prefer_uncompressed): - """Determine the appropriate response type and compression settings. - - Returns a tuple of (mediatype, compengine, engineopts). - """ - # Determine the response media type and compression engine based - # on the request parameters. - protocaps = decodevaluefromheaders(self.req, r'X-HgProto').split(' ') - - if '0.2' in protocaps: - # All clients are expected to support uncompressed data. - if prefer_uncompressed: - return HGTYPE2, util._noopengine(), {} - - # Default as defined by wire protocol spec. - compformats = ['zlib', 'none'] - for cap in protocaps: - if cap.startswith('comp='): - compformats = cap[5:].split(',') - break - - # Now find an agreed upon compression format. - for engine in wireproto.supportedcompengines(self.ui, self, - util.SERVERROLE): - if engine.wireprotosupport().name in compformats: - opts = {} - level = self.ui.configint('server', - '%slevel' % engine.name()) - if level is not None: - opts['level'] = level - - return HGTYPE2, engine, opts - - # No mutually supported compression format. Fall back to the - # legacy protocol. - - # Don't allow untrusted settings because disabling compression or - # setting a very high compression level could lead to flooding - # the server's network or CPU. - opts = {'level': self.ui.configint('server', 'zliblevel')} - return HGTYPE, util.compengines['zlib'], opts - -def iscmd(cmd): - return cmd in wireproto.commands - -def call(repo, req, cmd): - p = webproto(req, repo.ui) - - def genversion2(gen, engine, engineopts): - # application/mercurial-0.2 always sends a payload header - # identifying the compression engine. - name = engine.wireprotosupport().name - assert 0 < len(name) < 256 - yield struct.pack('B', len(name)) - yield name - - for chunk in gen: - yield chunk - - rsp = wireproto.dispatch(repo, p, cmd) - if isinstance(rsp, bytes): - req.respond(HTTP_OK, HGTYPE, body=rsp) - return [] - elif isinstance(rsp, wireproto.streamres_legacy): - gen = rsp.gen - req.respond(HTTP_OK, HGTYPE) - return gen - elif isinstance(rsp, wireproto.streamres): - gen = rsp.gen - - # This code for compression should not be streamres specific. It - # is here because we only compress streamres at the moment. - mediatype, engine, engineopts = p.responsetype(rsp.prefer_uncompressed) - gen = engine.compressstream(gen, engineopts) - - if mediatype == HGTYPE2: - gen = genversion2(gen, engine, engineopts) - - req.respond(HTTP_OK, mediatype) - return gen - elif isinstance(rsp, wireproto.pushres): - val = p.restore() - rsp = '%d\n%s' % (rsp.res, val) - req.respond(HTTP_OK, HGTYPE, body=rsp) - return [] - elif isinstance(rsp, wireproto.pusherr): - # drain the incoming bundle - req.drain() - p.restore() - rsp = '0\n%s\n' % rsp.res - req.respond(HTTP_OK, HGTYPE, body=rsp) - return [] - elif isinstance(rsp, wireproto.ooberror): - rsp = rsp.message - req.respond(HTTP_OK, HGERRTYPE, body=rsp) - return [] - raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
--- a/mercurial/hgweb/request.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/request.py Sun Mar 04 10:42:51 2018 -0500 @@ -115,13 +115,14 @@ self.headers = [(k, v) for (k, v) in self.headers if k in ('Date', 'ETag', 'Expires', 'Cache-Control', 'Vary')] - status = statusmessage(status.code, str(status)) + status = statusmessage(status.code, pycompat.bytestr(status)) elif status == 200: status = '200 Script output follows' elif isinstance(status, int): status = statusmessage(status) - self.server_write = self._start_response(status, self.headers) + self.server_write = self._start_response( + pycompat.sysstr(status), self.headers) self._start_response = None self.headers = [] if body is not None:
--- a/mercurial/hgweb/server.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/server.py Sun Mar 04 10:42:51 2018 -0500 @@ -273,7 +273,7 @@ def openlog(opt, default): if opt and opt != '-': - return open(opt, 'a') + return open(opt, 'ab') return default class MercurialHTTPServer(_mixin, httpservermod.httpserver, object):
--- a/mercurial/hgweb/webcommands.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/webcommands.py Sun Mar 04 10:42:51 2018 -0500 @@ -151,7 +151,7 @@ rename=webutil.renamelink(fctx), permissions=fctx.manifest().flags(f), ishead=int(ishead), - **webutil.commonentry(web.repo, fctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))) @webcommand('file') def file(web, req, tmpl): @@ -284,7 +284,7 @@ parity=next(parity), changelogtag=showtags, files=files, - **webutil.commonentry(web.repo, ctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) if count >= revcount: break @@ -300,7 +300,7 @@ pass lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = max(revcount / 2, 1) + lessvars['revcount'] = max(revcount // 2, 1) lessvars['rev'] = query morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -395,7 +395,7 @@ pass lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = max(revcount / 2, 1) + lessvars['revcount'] = max(revcount // 2, 1) morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -542,7 +542,7 @@ emptydirs = [] h = dirs[d] while isinstance(h, dict) and len(h) == 1: - k, v = h.items()[0] + k, v = next(iter(h.items())) if v: emptydirs.append(k) h = v @@ -561,7 +561,7 @@ fentries=filelist, dentries=dirlist, archives=web.archivelist(hex(node)), - **webutil.commonentry(web.repo, ctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) @webcommand('tags') def tags(web, req, tmpl): @@ -708,7 +708,7 @@ l.append(tmpl( 'shortlogentry', parity=next(parity), - **webutil.commonentry(web.repo, ctx))) + **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) for entry in reversed(l): yield entry @@ -777,7 +777,7 @@ symrev=webutil.symrevorshortnode(req, ctx), rename=rename, diff=diffs, - **webutil.commonentry(web.repo, ctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) diff = webcommand('diff')(filediff) @@ -852,7 +852,7 @@ rightrev=rightrev, rightnode=hex(rightnode), comparison=comparison, - **webutil.commonentry(web.repo, ctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) @webcommand('annotate') def annotate(web, req, tmpl): @@ -943,7 +943,7 @@ permissions=fctx.manifest().flags(f), ishead=int(ishead), diffopts=diffopts, - **webutil.commonentry(web.repo, fctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))) @webcommand('filelog') def filelog(web, req, tmpl): @@ -990,7 +990,7 @@ lrange = webutil.linerange(req) lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = max(revcount / 2, 1) + lessvars['revcount'] = max(revcount // 2, 1) morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -1044,7 +1044,7 @@ file=path, diff=diffs, linerange=webutil.formatlinerange(*lr), - **webutil.commonentry(repo, c))) + **pycompat.strkwargs(webutil.commonentry(repo, c)))) if i == revcount: break lessvars['linerange'] = webutil.formatlinerange(*lrange) @@ -1061,7 +1061,7 @@ file=f, diff=diffs, rename=webutil.renamelink(iterfctx), - **webutil.commonentry(repo, iterfctx))) + **pycompat.strkwargs(webutil.commonentry(repo, iterfctx)))) entries.reverse() revnav = webutil.filerevnav(web.repo, fctx.path()) nav = revnav.gen(end - 1, revcount, count) @@ -1080,7 +1080,7 @@ revcount=revcount, morevars=morevars, lessvars=lessvars, - **webutil.commonentry(web.repo, fctx)) + **pycompat.strkwargs(webutil.commonentry(web.repo, fctx))) @webcommand('archive') def archive(web, req, tmpl): @@ -1116,7 +1116,7 @@ msg = 'Archive type not allowed: %s' % type_ raise ErrorResponse(HTTP_FORBIDDEN, msg) - reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame)) + reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame)) cnode = web.repo.lookup(key) arch_version = key if cnode == key or key == 'tip': @@ -1208,7 +1208,7 @@ pass lessvars = copy.copy(tmpl.defaults['sessionvars']) - lessvars['revcount'] = max(revcount / 2, 1) + lessvars['revcount'] = max(revcount // 2, 1) morevars = copy.copy(tmpl.defaults['sessionvars']) morevars['revcount'] = revcount * 2 @@ -1403,7 +1403,7 @@ try: doc = helpmod.help_(u, commands, topic, subtopic=subtopic) - except error.UnknownCommand: + except error.Abort: raise ErrorResponse(HTTP_NOT_FOUND) return tmpl('help', topic=topicname, doc=doc)
--- a/mercurial/hgweb/webutil.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hgweb/webutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -347,20 +347,29 @@ try: return util.processlinerange(fromline, toline) except error.ParseError as exc: - raise ErrorResponse(HTTP_BAD_REQUEST, str(exc)) + raise ErrorResponse(HTTP_BAD_REQUEST, pycompat.bytestr(exc)) def formatlinerange(fromline, toline): return '%d:%d' % (fromline + 1, toline) -def succsandmarkers(repo, ctx): - for item in templatekw.showsuccsandmarkers(repo, ctx): +def succsandmarkers(context, mapping): + repo = context.resource(mapping, 'repo') + for item in templatekw.showsuccsandmarkers(context, mapping): item['successors'] = _siblings(repo[successor] for successor in item['successors']) yield item +# teach templater succsandmarkers is switched to (context, mapping) API +succsandmarkers._requires = {'repo', 'ctx', 'templ'} + def commonentry(repo, ctx): node = ctx.node() return { + # TODO: perhaps ctx.changectx() should be assigned if ctx is a + # filectx, but I'm not pretty sure if that would always work because + # fctx.parents() != fctx.changectx.parents() for example. + 'ctx': ctx, + 'revcache': {}, 'rev': ctx.rev(), 'node': hex(node), 'author': ctx.user(), @@ -369,7 +378,7 @@ 'extra': ctx.extra(), 'phase': ctx.phasestr(), 'obsolete': ctx.obsolete(), - 'succsandmarkers': lambda **x: succsandmarkers(repo, ctx), + 'succsandmarkers': succsandmarkers, 'instabilities': [{"instability": i} for i in ctx.instabilities()], 'branch': nodebranchnodefault(ctx), 'inbranch': nodeinbranch(repo, ctx), @@ -449,7 +458,7 @@ diffsummary=lambda **x: diffsummary(diffstatsgen), diffstat=diffstats, archives=web.archivelist(ctx.hex()), - **commonentry(web.repo, ctx)) + **pycompat.strkwargs(commonentry(web.repo, ctx))) def listfilediffs(tmpl, files, node, max): for f in files[:max]: @@ -619,14 +628,14 @@ websubdefs += repo.ui.configitems('interhg') for key, pattern in websubdefs: # grab the delimiter from the character after the "s" - unesc = pattern[1] + unesc = pattern[1:2] delim = re.escape(unesc) # identify portions of the pattern, taking care to avoid escaped # delimiters. the replace format and flags are optional, but # delimiters are required. match = re.match( - r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$' + br'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$' % (delim, delim, delim), pattern) if not match: repo.ui.warn(_("websub: invalid pattern for %s: %s\n") @@ -634,7 +643,7 @@ continue # we need to unescape the delimiter for regexp and format - delim_re = re.compile(r'(?<!\\)\\%s' % delim) + delim_re = re.compile(br'(?<!\\)\\%s' % delim) regexp = delim_re.sub(unesc, match.group(1)) format = delim_re.sub(unesc, match.group(2))
--- a/mercurial/hook.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/hook.py Sun Mar 04 10:42:51 2018 -0500 @@ -49,12 +49,12 @@ modname = modfile with demandimport.deactivated(): try: - obj = __import__(modname) + obj = __import__(pycompat.sysstr(modname)) except (ImportError, SyntaxError): e1 = sys.exc_info() try: # extensions are loaded with hgext_ prefix - obj = __import__("hgext_%s" % modname) + obj = __import__(r"hgext_%s" % pycompat.sysstr(modname)) except (ImportError, SyntaxError): e2 = sys.exc_info() if ui.tracebackflag:
--- a/mercurial/httpclient/__init__.py Sat Mar 03 22:29:24 2018 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,912 +0,0 @@ -# Copyright 2010, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Improved HTTP/1.1 client library - -This library contains an HTTPConnection which is similar to the one in -httplib, but has several additional features: - - * supports keepalives natively - * uses select() to block for incoming data - * notices when the server responds early to a request - * implements ssl inline instead of in a different class -""" -from __future__ import absolute_import - -# Many functions in this file have too many arguments. -# pylint: disable=R0913 -import email -import email.message -import errno -import inspect -import logging -import select -import socket -import ssl -import sys - -try: - import cStringIO as io - io.StringIO -except ImportError: - import io - -try: - import httplib - httplib.HTTPException -except ImportError: - import http.client as httplib - -from . import ( - _readers, -) - -logger = logging.getLogger(__name__) - -__all__ = ['HTTPConnection', 'HTTPResponse'] - -HTTP_VER_1_0 = b'HTTP/1.0' -HTTP_VER_1_1 = b'HTTP/1.1' - -OUTGOING_BUFFER_SIZE = 1 << 15 -INCOMING_BUFFER_SIZE = 1 << 20 - -HDR_ACCEPT_ENCODING = 'accept-encoding' -HDR_CONNECTION_CTRL = 'connection' -HDR_CONTENT_LENGTH = 'content-length' -HDR_XFER_ENCODING = 'transfer-encoding' - -XFER_ENCODING_CHUNKED = 'chunked' - -CONNECTION_CLOSE = 'close' - -EOL = b'\r\n' -_END_HEADERS = EOL * 2 - -# Based on some searching around, 1 second seems like a reasonable -# default here. -TIMEOUT_ASSUME_CONTINUE = 1 -TIMEOUT_DEFAULT = None - -if sys.version_info > (3, 0): - _unicode = str -else: - _unicode = unicode - -def _ensurebytes(data): - if not isinstance(data, (_unicode, bytes)): - data = str(data) - if not isinstance(data, bytes): - try: - return data.encode('latin-1') - except UnicodeEncodeError as err: - raise UnicodeEncodeError( - err.encoding, - err.object, - err.start, - err.end, - '%r is not valid Latin-1 Use .encode("utf-8") ' - 'if sending as utf-8 is desired.' % ( - data[err.start:err.end],)) - return data - -class _CompatMessage(email.message.Message): - """Workaround for rfc822.Message and email.message.Message API diffs.""" - - @classmethod - def from_string(cls, s): - if sys.version_info > (3, 0): - # Python 3 can't decode headers from bytes, so we have to - # trust RFC 2616 and decode the headers as iso-8859-1 - # bytes. - s = s.decode('iso-8859-1') - headers = email.message_from_string(s, _class=_CompatMessage) - # Fix multi-line headers to match httplib's behavior from - # Python 2.x, since email.message.Message handles them in - # slightly different ways. - if sys.version_info < (3, 0): - new = [] - for h, v in headers._headers: - if '\r\n' in v: - v = '\n'.join([' ' + x.lstrip() for x in v.split('\r\n')])[1:] - new.append((h, v)) - headers._headers = new - return headers - - def getheaders(self, key): - return self.get_all(key) - - def getheader(self, key, default=None): - return self.get(key, failobj=default) - - -class HTTPResponse(object): - """Response from an HTTP server. - - The response will continue to load as available. If you need the - complete response before continuing, check the .complete() method. - """ - def __init__(self, sock, timeout, method): - self.sock = sock - self.method = method - self.raw_response = b'' - self._headers_len = 0 - self.headers = None - self.will_close = False - self.status_line = b'' - self.status = None - self.continued = False - self.http_version = None - self.reason = None - self._reader = None - - self._read_location = 0 - self._eol = EOL - - self._timeout = timeout - - @property - def _end_headers(self): - return self._eol * 2 - - def complete(self): - """Returns true if this response is completely loaded. - - Note that if this is a connection where complete means the - socket is closed, this will nearly always return False, even - in cases where all the data has actually been loaded. - """ - if self._reader: - return self._reader.done() - - def _close(self): - if self._reader is not None: - # We're a friend of the reader class here. - # pylint: disable=W0212 - self._reader._close() - - def getheader(self, header, default=None): - return self.headers.getheader(header, default=default) - - def getheaders(self): - if sys.version_info < (3, 0): - return [(k.lower(), v) for k, v in self.headers.items()] - # Starting in Python 3, headers aren't lowercased before being - # returned here. - return self.headers.items() - - def readline(self): - """Read a single line from the response body. - - This may block until either a line ending is found or the - response is complete. - """ - blocks = [] - while True: - self._reader.readto(b'\n', blocks) - - if blocks and blocks[-1][-1:] == b'\n' or self.complete(): - break - - self._select() - - return b''.join(blocks) - - def read(self, length=None): - """Read data from the response body.""" - # if length is None, unbounded read - while (not self.complete() # never select on a finished read - and (not length # unbounded, so we wait for complete() - or length > self._reader.available_data)): - self._select() - if not length: - length = self._reader.available_data - r = self._reader.read(length) - if self.complete() and self.will_close: - self.sock.close() - return r - - def _select(self): - r, unused_write, unused_err = select.select( - [self.sock], [], [], self._timeout) - if not r: - # socket was not readable. If the response is not - # complete, raise a timeout. - if not self.complete(): - logger.info('timed out with timeout of %s', self._timeout) - raise HTTPTimeoutException('timeout reading data') - try: - data = self.sock.recv(INCOMING_BUFFER_SIZE) - except ssl.SSLError as e: - if e.args[0] != ssl.SSL_ERROR_WANT_READ: - raise - logger.debug('SSL_ERROR_WANT_READ in _select, should retry later') - return True - logger.debug('response read %d data during _select', len(data)) - # If the socket was readable and no data was read, that means - # the socket was closed. Inform the reader (if any) so it can - # raise an exception if this is an invalid situation. - if not data: - if self._reader: - # We're a friend of the reader class here. - # pylint: disable=W0212 - self._reader._close() - return False - else: - self._load_response(data) - return True - - # This method gets replaced by _load later, which confuses pylint. - def _load_response(self, data): # pylint: disable=E0202 - # Being here implies we're not at the end of the headers yet, - # since at the end of this method if headers were completely - # loaded we replace this method with the load() method of the - # reader we created. - self.raw_response += data - # This is a bogus server with bad line endings - if self._eol not in self.raw_response: - for bad_eol in (b'\n', b'\r'): - if (bad_eol in self.raw_response - # verify that bad_eol is not the end of the incoming data - # as this could be a response line that just got - # split between \r and \n. - and (self.raw_response.index(bad_eol) < - (len(self.raw_response) - 1))): - logger.info('bogus line endings detected, ' - 'using %r for EOL', bad_eol) - self._eol = bad_eol - break - # exit early if not at end of headers - if self._end_headers not in self.raw_response or self.headers: - return - - # handle 100-continue response - hdrs, body = self.raw_response.split(self._end_headers, 1) - unused_http_ver, status = hdrs.split(b' ', 1) - if status.startswith(b'100'): - self.raw_response = body - self.continued = True - logger.debug('continue seen, setting body to %r', body) - return - - # arriving here means we should parse response headers - # as all headers have arrived completely - hdrs, body = self.raw_response.split(self._end_headers, 1) - del self.raw_response - if self._eol in hdrs: - self.status_line, hdrs = hdrs.split(self._eol, 1) - else: - self.status_line = hdrs - hdrs = b'' - # TODO HTTP < 1.0 support - (self.http_version, self.status, - self.reason) = self.status_line.split(b' ', 2) - self.status = int(self.status) - if self._eol != EOL: - hdrs = hdrs.replace(self._eol, b'\r\n') - headers = _CompatMessage.from_string(hdrs) - content_len = None - if HDR_CONTENT_LENGTH in headers: - content_len = int(headers[HDR_CONTENT_LENGTH]) - if self.http_version == HTTP_VER_1_0: - self.will_close = True - elif HDR_CONNECTION_CTRL in headers: - self.will_close = ( - headers[HDR_CONNECTION_CTRL].lower() == CONNECTION_CLOSE) - if (HDR_XFER_ENCODING in headers - and headers[HDR_XFER_ENCODING].lower() == XFER_ENCODING_CHUNKED): - self._reader = _readers.ChunkedReader(self._eol) - logger.debug('using a chunked reader') - else: - # HEAD responses are forbidden from returning a body, and - # it's implausible for a CONNECT response to use - # close-is-end logic for an OK response. - if (self.method == b'HEAD' or - (self.method == b'CONNECT' and content_len is None)): - content_len = 0 - if content_len is not None: - logger.debug('using a content-length reader with length %d', - content_len) - self._reader = _readers.ContentLengthReader(content_len) - else: - # Response body had no length specified and is not - # chunked, so the end of the body will only be - # identifiable by the termination of the socket by the - # server. My interpretation of the spec means that we - # are correct in hitting this case if - # transfer-encoding, content-length, and - # connection-control were left unspecified. - self._reader = _readers.CloseIsEndReader() - logger.debug('using a close-is-end reader') - self.will_close = True - - if body: - # We're a friend of the reader class here. - # pylint: disable=W0212 - self._reader._load(body) - logger.debug('headers complete') - self.headers = headers - # We're a friend of the reader class here. - # pylint: disable=W0212 - self._load_response = self._reader._load - -def _foldheaders(headers): - """Given some headers, rework them so we can safely overwrite values. - - >>> _foldheaders({'Accept-Encoding': 'wat'}) - {'accept-encoding': ('Accept-Encoding', 'wat')} - """ - return dict((k.lower(), (k, v)) for k, v in headers.items()) - -try: - inspect.signature - def _handlesarg(func, arg): - """ Try to determine if func accepts arg - - If it takes arg, return True - If it happens to take **args, then it could do anything: - * It could throw a different TypeError, just for fun - * It could throw an ArgumentError or anything else - * It could choose not to throw an Exception at all - ... return 'unknown' - - Otherwise, return False - """ - params = inspect.signature(func).parameters - if arg in params: - return True - for p in params: - if params[p].kind == inspect._ParameterKind.VAR_KEYWORD: - return 'unknown' - return False -except AttributeError: - def _handlesarg(func, arg): - """ Try to determine if func accepts arg - - If it takes arg, return True - If it happens to take **args, then it could do anything: - * It could throw a different TypeError, just for fun - * It could throw an ArgumentError or anything else - * It could choose not to throw an Exception at all - ... return 'unknown' - - Otherwise, return False - """ - spec = inspect.getargspec(func) - if arg in spec.args: - return True - if spec.keywords: - return 'unknown' - return False - -class HTTPConnection(object): - """Connection to a single http server. - - Supports 100-continue and keepalives natively. Uses select() for - non-blocking socket operations. - """ - http_version = HTTP_VER_1_1 - response_class = HTTPResponse - - def __init__(self, host, port=None, use_ssl=None, ssl_validator=None, - timeout=TIMEOUT_DEFAULT, - continue_timeout=TIMEOUT_ASSUME_CONTINUE, - proxy_hostport=None, proxy_headers=None, - ssl_wrap_socket=None, **ssl_opts): - """Create a new HTTPConnection. - - Args: - host: The host to which we'll connect. - port: Optional. The port over which we'll connect. Default 80 for - non-ssl, 443 for ssl. - use_ssl: Optional. Whether to use ssl. Defaults to False if port is - not 443, true if port is 443. - ssl_validator: a function(socket) to validate the ssl cert - timeout: Optional. Connection timeout, default is TIMEOUT_DEFAULT. - continue_timeout: Optional. Timeout for waiting on an expected - "100 Continue" response. Default is TIMEOUT_ASSUME_CONTINUE. - proxy_hostport: Optional. Tuple of (host, port) to use as an http - proxy for the connection. Default is to not use a proxy. - proxy_headers: Optional dict of header keys and values to send to - a proxy when using CONNECT. For compatibility with - httplib, the Proxy-Authorization header may be - specified in headers for request(), which will clobber - any such header specified here if specified. Providing - this option and not proxy_hostport will raise an - ValueError. - ssl_wrap_socket: Optional function to use for wrapping - sockets. If unspecified, the one from the ssl module will - be used if available, or something that's compatible with - it if on a Python older than 2.6. - - Any extra keyword arguments to this function will be provided - to the ssl_wrap_socket method. If no ssl - """ - host = _ensurebytes(host) - if port is None and host.count(b':') == 1 or b']:' in host: - host, port = host.rsplit(b':', 1) - port = int(port) - if b'[' in host: - host = host[1:-1] - if ssl_wrap_socket is not None: - _wrap_socket = ssl_wrap_socket - else: - _wrap_socket = ssl.wrap_socket - call_wrap_socket = None - handlesubar = _handlesarg(_wrap_socket, 'server_hostname') - if handlesubar is True: - # supports server_hostname - call_wrap_socket = _wrap_socket - handlesnobar = _handlesarg(_wrap_socket, 'serverhostname') - if handlesnobar is True and handlesubar is not True: - # supports serverhostname - def call_wrap_socket(sock, server_hostname=None, **ssl_opts): - return _wrap_socket(sock, serverhostname=server_hostname, - **ssl_opts) - if handlesubar is False and handlesnobar is False: - # does not support either - def call_wrap_socket(sock, server_hostname=None, **ssl_opts): - return _wrap_socket(sock, **ssl_opts) - if call_wrap_socket is None: - # we assume it takes **args - def call_wrap_socket(sock, **ssl_opts): - if 'server_hostname' in ssl_opts: - ssl_opts['serverhostname'] = ssl_opts['server_hostname'] - return _wrap_socket(sock, **ssl_opts) - self._ssl_wrap_socket = call_wrap_socket - if use_ssl is None and port is None: - use_ssl = False - port = 80 - elif use_ssl is None: - use_ssl = (port == 443) - elif port is None: - port = (use_ssl and 443 or 80) - self.port = port - self.ssl = use_ssl - self.ssl_opts = ssl_opts - self._ssl_validator = ssl_validator - self.host = host - self.sock = None - self._current_response = None - self._current_response_taken = False - if proxy_hostport is None: - self._proxy_host = self._proxy_port = None - if proxy_headers: - raise ValueError( - 'proxy_headers may not be specified unless ' - 'proxy_hostport is also specified.') - else: - self._proxy_headers = {} - else: - self._proxy_host, self._proxy_port = proxy_hostport - self._proxy_headers = _foldheaders(proxy_headers or {}) - - self.timeout = timeout - self.continue_timeout = continue_timeout - - def _connect(self, proxy_headers): - """Connect to the host and port specified in __init__.""" - if self.sock: - return - if self._proxy_host is not None: - logger.info('Connecting to http proxy %s:%s', - self._proxy_host, self._proxy_port) - sock = socket.create_connection((self._proxy_host, - self._proxy_port)) - if self.ssl: - data = self._buildheaders(b'CONNECT', b'%s:%d' % (self.host, - self.port), - proxy_headers, HTTP_VER_1_0) - sock.send(data) - sock.setblocking(0) - r = self.response_class(sock, self.timeout, b'CONNECT') - timeout_exc = HTTPTimeoutException( - 'Timed out waiting for CONNECT response from proxy') - while not r.complete(): - try: - # We're a friend of the response class, so let - # us use the private attribute. - # pylint: disable=W0212 - if not r._select(): - if not r.complete(): - raise timeout_exc - except HTTPTimeoutException: - # This raise/except pattern looks goofy, but - # _select can raise the timeout as well as the - # loop body. I wish it wasn't this convoluted, - # but I don't have a better solution - # immediately handy. - raise timeout_exc - if r.status != 200: - raise HTTPProxyConnectFailedException( - 'Proxy connection failed: %d %s' % (r.status, - r.read())) - logger.info('CONNECT (for SSL) to %s:%s via proxy succeeded.', - self.host, self.port) - else: - sock = socket.create_connection((self.host, self.port)) - if self.ssl: - # This is the default, but in the case of proxied SSL - # requests the proxy logic above will have cleared - # blocking mode, so re-enable it just to be safe. - sock.setblocking(1) - logger.debug('wrapping socket for ssl with options %r', - self.ssl_opts) - sock = self._ssl_wrap_socket(sock, server_hostname=self.host, - **self.ssl_opts) - if self._ssl_validator: - self._ssl_validator(sock) - sock.setblocking(0) - self.sock = sock - - def _buildheaders(self, method, path, headers, http_ver): - if self.ssl and self.port == 443 or self.port == 80: - # default port for protocol, so leave it out - hdrhost = self.host - else: - # include nonstandard port in header - if b':' in self.host: # must be IPv6 - hdrhost = b'[%s]:%d' % (self.host, self.port) - else: - hdrhost = b'%s:%d' % (self.host, self.port) - if self._proxy_host and not self.ssl: - # When talking to a regular http proxy we must send the - # full URI, but in all other cases we must not (although - # technically RFC 2616 says servers must accept our - # request if we screw up, experimentally few do that - # correctly.) - assert path[0:1] == b'/', 'path must start with a /' - path = b'http://%s%s' % (hdrhost, path) - outgoing = [b'%s %s %s%s' % (method, path, http_ver, EOL)] - headers[b'host'] = (b'Host', hdrhost) - headers[HDR_ACCEPT_ENCODING] = (HDR_ACCEPT_ENCODING, 'identity') - for hdr, val in sorted((_ensurebytes(h), _ensurebytes(v)) - for h, v in headers.values()): - outgoing.append(b'%s: %s%s' % (hdr, val, EOL)) - outgoing.append(EOL) - return b''.join(outgoing) - - def close(self): - """Close the connection to the server. - - This is a no-op if the connection is already closed. The - connection may automatically close if requested by the server - or required by the nature of a response. - """ - if self.sock is None: - return - self.sock.close() - self.sock = None - logger.info('closed connection to %s on %s', self.host, self.port) - - def busy(self): - """Returns True if this connection object is currently in use. - - If a response is still pending, this will return True, even if - the request has finished sending. In the future, - HTTPConnection may transparently juggle multiple connections - to the server, in which case this will be useful to detect if - any of those connections is ready for use. - """ - cr = self._current_response - if cr is not None: - if self._current_response_taken: - if cr.will_close: - self.sock = None - self._current_response = None - return False - elif cr.complete(): - self._current_response = None - return False - return True - return False - - def _reconnect(self, where, pheaders): - logger.info('reconnecting during %s', where) - self.close() - self._connect(pheaders) - - def request(self, method, path, body=None, headers=None, - expect_continue=False): - """Send a request to the server. - - For increased flexibility, this does not return the response - object. Future versions of HTTPConnection that juggle multiple - sockets will be able to send (for example) 5 requests all at - once, and then let the requests arrive as data is - available. Use the `getresponse()` method to retrieve the - response. - """ - if headers is None: - headers = {} - method = _ensurebytes(method) - path = _ensurebytes(path) - if self.busy(): - raise httplib.CannotSendRequest( - 'Can not send another request before ' - 'current response is read!') - self._current_response_taken = False - - logger.info('sending %s request for %s to %s on port %s', - method, path, self.host, self.port) - - hdrs = _foldheaders(headers) - # Figure out headers that have to be computed from the request - # body. - chunked = False - if body and HDR_CONTENT_LENGTH not in hdrs: - if getattr(body, '__len__', False): - hdrs[HDR_CONTENT_LENGTH] = (HDR_CONTENT_LENGTH, - b'%d' % len(body)) - elif getattr(body, 'read', False): - hdrs[HDR_XFER_ENCODING] = (HDR_XFER_ENCODING, - XFER_ENCODING_CHUNKED) - chunked = True - else: - raise BadRequestData('body has no __len__() nor read()') - # Figure out expect-continue header - if hdrs.get('expect', ('', ''))[1].lower() == b'100-continue': - expect_continue = True - elif expect_continue: - hdrs['expect'] = (b'Expect', b'100-Continue') - # httplib compatibility: if the user specified a - # proxy-authorization header, that's actually intended for a - # proxy CONNECT action, not the real request, but only if - # we're going to use a proxy. - pheaders = dict(self._proxy_headers) - if self._proxy_host and self.ssl: - pa = hdrs.pop('proxy-authorization', None) - if pa is not None: - pheaders['proxy-authorization'] = pa - # Build header data - outgoing_headers = self._buildheaders( - method, path, hdrs, self.http_version) - - # If we're reusing the underlying socket, there are some - # conditions where we'll want to retry, so make a note of the - # state of self.sock - fresh_socket = self.sock is None - self._connect(pheaders) - response = None - first = True - - while ((outgoing_headers or body) - and not (response and response.complete())): - select_timeout = self.timeout - out = outgoing_headers or body - blocking_on_continue = False - if expect_continue and not outgoing_headers and not ( - response and (response.headers or response.continued)): - logger.info( - 'waiting up to %s seconds for' - ' continue response from server', - self.continue_timeout) - select_timeout = self.continue_timeout - blocking_on_continue = True - out = False - if out: - w = [self.sock] - else: - w = [] - r, w, x = select.select([self.sock], w, [], select_timeout) - # if we were expecting a 100 continue and it's been long - # enough, just go ahead and assume it's ok. This is the - # recommended behavior from the RFC. - if r == w == x == []: - if blocking_on_continue: - expect_continue = False - logger.info('no response to continue expectation from ' - 'server, optimistically sending request body') - else: - raise HTTPTimeoutException('timeout sending data') - was_first = first - - # incoming data - if r: - try: - try: - data = r[0].recv(INCOMING_BUFFER_SIZE) - except ssl.SSLError as e: - if e.args[0] != ssl.SSL_ERROR_WANT_READ: - raise - logger.debug('SSL_ERROR_WANT_READ while sending ' - 'data, retrying...') - continue - if not data: - logger.info('socket appears closed in read') - self.sock = None - self._current_response = None - if response is not None: - # We're a friend of the response class, so let - # us use the private attribute. - # pylint: disable=W0212 - response._close() - # This if/elif ladder is a bit subtle, - # comments in each branch should help. - if response is not None and response.complete(): - # Server responded completely and then - # closed the socket. We should just shut - # things down and let the caller get their - # response. - logger.info('Got an early response, ' - 'aborting remaining request.') - break - elif was_first and response is None: - # Most likely a keepalive that got killed - # on the server's end. Commonly happens - # after getting a really large response - # from the server. - logger.info( - 'Connection appeared closed in read on first' - ' request loop iteration, will retry.') - self._reconnect('read', pheaders) - continue - else: - # We didn't just send the first data hunk, - # and either have a partial response or no - # response at all. There's really nothing - # meaningful we can do here. - raise HTTPStateError( - 'Connection appears closed after ' - 'some request data was written, but the ' - 'response was missing or incomplete!') - logger.debug('read %d bytes in request()', len(data)) - if response is None: - response = self.response_class( - r[0], self.timeout, method) - # We're a friend of the response class, so let us - # use the private attribute. - # pylint: disable=W0212 - response._load_response(data) - # Jump to the next select() call so we load more - # data if the server is still sending us content. - continue - except socket.error as e: - if e[0] != errno.EPIPE and not was_first: - raise - - # outgoing data - if w and out: - try: - if getattr(out, 'read', False): - # pylint guesses the type of out incorrectly here - # pylint: disable=E1103 - data = out.read(OUTGOING_BUFFER_SIZE) - if not data: - continue - if len(data) < OUTGOING_BUFFER_SIZE: - if chunked: - body = b'0' + EOL + EOL - else: - body = None - if chunked: - # This encode is okay because we know - # hex() is building us only 0-9 and a-f - # digits. - asciilen = hex(len(data))[2:].encode('ascii') - out = asciilen + EOL + data + EOL - else: - out = data - amt = w[0].send(out) - except socket.error as e: - if e[0] == ssl.SSL_ERROR_WANT_WRITE and self.ssl: - # This means that SSL hasn't flushed its buffer into - # the socket yet. - # TODO: find a way to block on ssl flushing its buffer - # similar to selecting on a raw socket. - continue - if e[0] == errno.EWOULDBLOCK or e[0] == errno.EAGAIN: - continue - elif (e[0] not in (errno.ECONNRESET, errno.EPIPE) - and not first): - raise - self._reconnect('write', pheaders) - amt = self.sock.send(out) - logger.debug('sent %d', amt) - first = False - if out is body: - body = out[amt:] - else: - outgoing_headers = out[amt:] - # End of request-sending loop. - - # close if the server response said to or responded before eating - # the whole request - if response is None: - response = self.response_class(self.sock, self.timeout, method) - if not fresh_socket: - if not response._select(): - # This means the response failed to get any response - # data at all, and in all probability the socket was - # closed before the server even saw our request. Try - # the request again on a fresh socket. - logger.debug('response._select() failed during request().' - ' Assuming request needs to be retried.') - self.sock = None - # Call this method explicitly to re-try the - # request. We don't use self.request() because - # some tools (notably Mercurial) expect to be able - # to subclass and redefine request(), and they - # don't have the same argspec as we do. - # - # TODO restructure sending of requests to avoid - # this recursion - return HTTPConnection.request( - self, method, path, body=body, headers=headers, - expect_continue=expect_continue) - data_left = bool(outgoing_headers or body) - if data_left: - logger.info('stopped sending request early, ' - 'will close the socket to be safe.') - response.will_close = True - if response.will_close: - # The socket will be closed by the response, so we disown - # the socket - self.sock = None - self._current_response = response - - def getresponse(self): - """Returns the response to the most recent request.""" - if self._current_response is None: - raise httplib.ResponseNotReady() - r = self._current_response - while r.headers is None: - # We're a friend of the response class, so let us use the - # private attribute. - # pylint: disable=W0212 - if not r._select() and not r.complete(): - raise _readers.HTTPRemoteClosedError() - if r.will_close: - self.sock = None - self._current_response = None - elif r.complete(): - self._current_response = None - else: - self._current_response_taken = True - return r - - -class HTTPTimeoutException(httplib.HTTPException): - """A timeout occurred while waiting on the server.""" - - -class BadRequestData(httplib.HTTPException): - """Request body object has neither __len__ nor read.""" - - -class HTTPProxyConnectFailedException(httplib.HTTPException): - """Connecting to the HTTP proxy failed.""" - - -class HTTPStateError(httplib.HTTPException): - """Invalid internal state encountered.""" - -# Forward this exception type from _readers since it needs to be part -# of the public API. -HTTPRemoteClosedError = _readers.HTTPRemoteClosedError -# no-check-code
--- a/mercurial/httpclient/_readers.py Sat Mar 03 22:29:24 2018 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,239 +0,0 @@ -# Copyright 2011, Google Inc. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. - -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""Reader objects to abstract out different body response types. - -This module is package-private. It is not expected that these will -have any clients outside of httpplus. -""" -from __future__ import absolute_import - -try: - import httplib - httplib.HTTPException -except ImportError: - import http.client as httplib - -import logging - -logger = logging.getLogger(__name__) - - -class ReadNotReady(Exception): - """Raised when read() is attempted but not enough data is loaded.""" - - -class HTTPRemoteClosedError(httplib.HTTPException): - """The server closed the remote socket in the middle of a response.""" - - -class AbstractReader(object): - """Abstract base class for response readers. - - Subclasses must implement _load, and should implement _close if - it's not an error for the server to close their socket without - some termination condition being detected during _load. - """ - def __init__(self): - self._finished = False - self._done_chunks = [] - self.available_data = 0 - - def _addchunk(self, data): - self._done_chunks.append(data) - self.available_data += len(data) - - def _pushchunk(self, data): - self._done_chunks.insert(0, data) - self.available_data += len(data) - - def _popchunk(self): - b = self._done_chunks.pop(0) - self.available_data -= len(b) - - return b - - def done(self): - """Returns true if the response body is entirely read.""" - return self._finished - - def read(self, amt): - """Read amt bytes from the response body.""" - if self.available_data < amt and not self._finished: - raise ReadNotReady() - blocks = [] - need = amt - while self._done_chunks: - b = self._popchunk() - if len(b) > need: - nb = b[:need] - self._pushchunk(b[need:]) - b = nb - blocks.append(b) - need -= len(b) - if need == 0: - break - result = b''.join(blocks) - assert len(result) == amt or (self._finished and len(result) < amt) - - return result - - def readto(self, delimstr, blocks = None): - """return available data chunks up to the first one in which - delimstr occurs. No data will be returned after delimstr -- - the chunk in which it occurs will be split and the remainder - pushed back onto the available data queue. If blocks is - supplied chunks will be added to blocks, otherwise a new list - will be allocated. - """ - if blocks is None: - blocks = [] - - while self._done_chunks: - b = self._popchunk() - i = b.find(delimstr) + len(delimstr) - if i: - if i < len(b): - self._pushchunk(b[i:]) - blocks.append(b[:i]) - break - else: - blocks.append(b) - - return blocks - - def _load(self, data): # pragma: no cover - """Subclasses must implement this. - - As data is available to be read out of this object, it should - be placed into the _done_chunks list. Subclasses should not - rely on data remaining in _done_chunks forever, as it may be - reaped if the client is parsing data as it comes in. - """ - raise NotImplementedError - - def _close(self): - """Default implementation of close. - - The default implementation assumes that the reader will mark - the response as finished on the _finished attribute once the - entire response body has been read. In the event that this is - not true, the subclass should override the implementation of - close (for example, close-is-end responses have to set - self._finished in the close handler.) - """ - if not self._finished: - raise HTTPRemoteClosedError( - 'server appears to have closed the socket mid-response') - - -class AbstractSimpleReader(AbstractReader): - """Abstract base class for simple readers that require no response decoding. - - Examples of such responses are Connection: Close (close-is-end) - and responses that specify a content length. - """ - def _load(self, data): - if data: - assert not self._finished, ( - 'tried to add data (%r) to a closed reader!' % data) - logger.debug('%s read an additional %d data', - self.name, len(data)) # pylint: disable=E1101 - self._addchunk(data) - - -class CloseIsEndReader(AbstractSimpleReader): - """Reader for responses that specify Connection: Close for length.""" - name = 'close-is-end' - - def _close(self): - logger.info('Marking close-is-end reader as closed.') - self._finished = True - - -class ContentLengthReader(AbstractSimpleReader): - """Reader for responses that specify an exact content length.""" - name = 'content-length' - - def __init__(self, amount): - AbstractSimpleReader.__init__(self) - self._amount = amount - if amount == 0: - self._finished = True - self._amount_seen = 0 - - def _load(self, data): - AbstractSimpleReader._load(self, data) - self._amount_seen += len(data) - if self._amount_seen >= self._amount: - self._finished = True - logger.debug('content-length read complete') - - -class ChunkedReader(AbstractReader): - """Reader for chunked transfer encoding responses.""" - def __init__(self, eol): - AbstractReader.__init__(self) - self._eol = eol - self._leftover_skip_amt = 0 - self._leftover_data = '' - - def _load(self, data): - assert not self._finished, 'tried to add data to a closed reader!' - logger.debug('chunked read an additional %d data', len(data)) - position = 0 - if self._leftover_data: - logger.debug( - 'chunked reader trying to finish block from leftover data') - # TODO: avoid this string concatenation if possible - data = self._leftover_data + data - position = self._leftover_skip_amt - self._leftover_data = '' - self._leftover_skip_amt = 0 - datalen = len(data) - while position < datalen: - split = data.find(self._eol, position) - if split == -1: - self._leftover_data = data - self._leftover_skip_amt = position - return - amt = int(data[position:split], base=16) - block_start = split + len(self._eol) - # If the whole data chunk plus the eol trailer hasn't - # loaded, we'll wait for the next load. - if block_start + amt + len(self._eol) > len(data): - self._leftover_data = data - self._leftover_skip_amt = position - return - if amt == 0: - self._finished = True - logger.debug('closing chunked reader due to chunk of length 0') - return - self._addchunk(data[block_start:block_start + amt]) - position = block_start + amt + len(self._eol) -# no-check-code
--- a/mercurial/httpconnection.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/httpconnection.py Sun Mar 04 10:42:51 2018 -0500 @@ -10,15 +10,11 @@ from __future__ import absolute_import -import logging import os -import socket from .i18n import _ from . import ( - httpclient, - sslutil, - urllibcompat, + pycompat, util, ) @@ -67,6 +63,7 @@ # moved here from url.py to avoid a cycle def readauthforuri(ui, uri, user): + uri = pycompat.bytesurl(uri) # Read configuration groups = {} for key, val in ui.configitems('auth'): @@ -110,190 +107,3 @@ if user and not bestuser: auth['username'] = user return bestauth - -# Mercurial (at least until we can remove the old codepath) requires -# that the http response object be sufficiently file-like, so we -# provide a close() method here. -class HTTPResponse(httpclient.HTTPResponse): - def close(self): - pass - -class HTTPConnection(httpclient.HTTPConnection): - response_class = HTTPResponse - def request(self, method, uri, body=None, headers=None): - if headers is None: - headers = {} - if isinstance(body, httpsendfile): - body.seek(0) - httpclient.HTTPConnection.request(self, method, uri, body=body, - headers=headers) - - -_configuredlogging = False -LOGFMT = '%(levelname)s:%(name)s:%(lineno)d:%(message)s' -# Subclass BOTH of these because otherwise urllib2 "helpfully" -# reinserts them since it notices we don't include any subclasses of -# them. -class http2handler(urlreq.httphandler, urlreq.httpshandler): - def __init__(self, ui, pwmgr): - global _configuredlogging - urlreq.abstracthttphandler.__init__(self) - self.ui = ui - self.pwmgr = pwmgr - self._connections = {} - # developer config: ui.http2debuglevel - loglevel = ui.config('ui', 'http2debuglevel') - if loglevel and not _configuredlogging: - _configuredlogging = True - logger = logging.getLogger('mercurial.httpclient') - logger.setLevel(getattr(logging, loglevel.upper())) - handler = logging.StreamHandler() - handler.setFormatter(logging.Formatter(LOGFMT)) - logger.addHandler(handler) - - def close_all(self): - """Close and remove all connection objects being kept for reuse.""" - for openconns in self._connections.values(): - for conn in openconns: - conn.close() - self._connections = {} - - # shamelessly borrowed from urllib2.AbstractHTTPHandler - def do_open(self, http_class, req, use_ssl): - """Return an addinfourl object for the request, using http_class. - - http_class must implement the HTTPConnection API from httplib. - The addinfourl return value is a file-like object. It also - has methods and attributes including: - - info(): return a mimetools.Message object for the headers - - geturl(): return the original request URL - - code: HTTP status code - """ - # If using a proxy, the host returned by get_host() is - # actually the proxy. On Python 2.6.1, the real destination - # hostname is encoded in the URI in the urllib2 request - # object. On Python 2.6.5, it's stored in the _tunnel_host - # attribute which has no accessor. - tunhost = getattr(req, '_tunnel_host', None) - host = urllibcompat.gethost(req) - if tunhost: - proxyhost = host - host = tunhost - elif req.has_proxy(): - proxyhost = urllibcompat.gethost(req) - host = urllibcompat.getselector( - req).split('://', 1)[1].split('/', 1)[0] - else: - proxyhost = None - - if proxyhost: - if ':' in proxyhost: - # Note: this means we'll explode if we try and use an - # IPv6 http proxy. This isn't a regression, so we - # won't worry about it for now. - proxyhost, proxyport = proxyhost.rsplit(':', 1) - else: - proxyport = 3128 # squid default - proxy = (proxyhost, proxyport) - else: - proxy = None - - if not host: - raise urlerr.urlerror('no host given') - - connkey = use_ssl, host, proxy - allconns = self._connections.get(connkey, []) - conns = [c for c in allconns if not c.busy()] - if conns: - h = conns[0] - else: - if allconns: - self.ui.debug('all connections for %s busy, making a new ' - 'one\n' % host) - timeout = None - if req.timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: - timeout = req.timeout - h = http_class(host, timeout=timeout, proxy_hostport=proxy) - self._connections.setdefault(connkey, []).append(h) - - headers = dict(req.headers) - headers.update(req.unredirected_hdrs) - headers = dict( - (name.title(), val) for name, val in headers.items()) - try: - path = urllibcompat.getselector(req) - if '://' in path: - path = path.split('://', 1)[1].split('/', 1)[1] - if path[0] != '/': - path = '/' + path - h.request(req.get_method(), path, req.data, headers) - r = h.getresponse() - except socket.error as err: # XXX what error? - raise urlerr.urlerror(err) - - # Pick apart the HTTPResponse object to get the addinfourl - # object initialized properly. - r.recv = r.read - - resp = urlreq.addinfourl(r, r.headers, urllibcompat.getfullurl(req)) - resp.code = r.status - resp.msg = r.reason - return resp - - # httplib always uses the given host/port as the socket connect - # target, and then allows full URIs in the request path, which it - # then observes and treats as a signal to do proxying instead. - def http_open(self, req): - if urllibcompat.getfullurl(req).startswith('https'): - return self.https_open(req) - def makehttpcon(*args, **kwargs): - k2 = dict(kwargs) - k2[r'use_ssl'] = False - return HTTPConnection(*args, **k2) - return self.do_open(makehttpcon, req, False) - - def https_open(self, req): - # urllibcompat.getfullurl(req) does not contain credentials and we may - # need them to match the certificates. - url = urllibcompat.getfullurl(req) - user, password = self.pwmgr.find_stored_password(url) - res = readauthforuri(self.ui, url, user) - if res: - group, auth = res - self.auth = auth - self.ui.debug("using auth.%s.* for authentication\n" % group) - else: - self.auth = None - return self.do_open(self._makesslconnection, req, True) - - def _makesslconnection(self, host, port=443, *args, **kwargs): - keyfile = None - certfile = None - - if args: # key_file - keyfile = args.pop(0) - if args: # cert_file - certfile = args.pop(0) - - # if the user has specified different key/cert files in - # hgrc, we prefer these - if self.auth and 'key' in self.auth and 'cert' in self.auth: - keyfile = self.auth['key'] - certfile = self.auth['cert'] - - # let host port take precedence - if ':' in host and '[' not in host or ']:' in host: - host, port = host.rsplit(':', 1) - port = int(port) - if '[' in host: - host = host[1:-1] - - kwargs[r'keyfile'] = keyfile - kwargs[r'certfile'] = certfile - - con = HTTPConnection(host, port, use_ssl=True, - ssl_wrap_socket=sslutil.wrapsocket, - ssl_validator=sslutil.validatesocket, - ui=self.ui, - **kwargs) - return con
--- a/mercurial/httppeer.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/httppeer.py Sun Mar 04 10:42:51 2018 -0500 @@ -16,7 +16,6 @@ import tempfile from .i18n import _ -from .node import nullid from . import ( bundle2, error, @@ -222,13 +221,9 @@ # Begin of _basewirepeer interface. def capabilities(self): - if self._caps is None: - try: - self._fetchcaps() - except error.RepoError: - self._caps = set() - self.ui.debug('capabilities: %s\n' % - (' '.join(self._caps or ['none']))) + # self._fetchcaps() should have been called as part of peer + # handshake. So self._caps should always be set. + assert self._caps is not None return self._caps # End of _basewirepeer interface. @@ -253,6 +248,8 @@ # with infinite recursion when trying to look up capabilities # for the first time. postargsok = self._caps is not None and 'httppostargs' in self._caps + + # Send arguments via POST. if postargsok and args: strargs = urlreq.urlencode(sorted(args.items())) if not data: @@ -266,11 +263,16 @@ argsio.length = len(strargs) data = _multifile(argsio, data) headers[r'X-HgArgs-Post'] = len(strargs) - else: - if len(args) > 0: - httpheader = self.capable('httpheader') - if httpheader: - headersize = int(httpheader.split(',', 1)[0]) + elif args: + # Calling self.capable() can infinite loop if we are calling + # "capabilities". But that command should never accept wire + # protocol arguments. So this should never happen. + assert cmd != 'capabilities' + httpheader = self.capable('httpheader') + if httpheader: + headersize = int(httpheader.split(',', 1)[0]) + + # Send arguments via HTTP headers. if headersize > 0: # The headers can typically carry more data than the URL. encargs = urlreq.urlencode(sorted(args.items())) @@ -278,8 +280,10 @@ headersize): headers[header] = value varyheaders.append(header) + # Send arguments via query string (Mercurial <1.9). else: q += sorted(args.items()) + qs = '?%s' % urlreq.urlencode(q) cu = "%s%s" % (self._url, qs) size = 0 @@ -287,9 +291,6 @@ size = data.length elif data is not None: size = len(data) - if size and self.ui.configbool('ui', 'usehttp2'): - headers[r'Expect'] = r'100-Continue' - headers[r'X-HgHttp2'] = r'1' if data is not None and r'Content-Type' not in headers: headers[r'Content-Type'] = r'application/mercurial-0.1' @@ -330,8 +331,8 @@ req = self._requestbuilder(pycompat.strurl(cu), data, headers) if data is not None: - self.ui.debug("sending %s bytes\n" % size) - req.add_unredirected_header('Content-Length', '%d' % size) + self.ui.debug("sending %d bytes\n" % size) + req.add_unredirected_header(r'Content-Length', r'%d' % size) try: resp = self._openurl(req) except urlerr.httperror as inst: @@ -430,7 +431,7 @@ tempname = bundle2.writebundle(self.ui, cg, None, type) fp = httpconnection.httpsendfile(self.ui, tempname, "rb") - headers = {'Content-Type': 'application/mercurial-0.1'} + headers = {r'Content-Type': r'application/mercurial-0.1'} try: r = self._call(cmd, data=fp, headers=headers, **args) @@ -438,6 +439,11 @@ if len(vals) < 2: raise error.ResponseError(_("unexpected response:"), r) return vals + except urlerr.httperror: + # Catch and re-raise these so we don't try and treat them + # like generic socket errors. They lack any values in + # .args on Python 3 which breaks our socket.error block. + raise except socket.error as err: if err.args[0] in (errno.ECONNRESET, errno.EPIPE): raise error.Abort(_('push failed: %s') % err.args[1]) @@ -461,7 +467,7 @@ fh.close() # start http push fp_ = httpconnection.httpsendfile(self.ui, filename, "rb") - headers = {'Content-Type': 'application/mercurial-0.1'} + headers = {r'Content-Type': r'application/mercurial-0.1'} return self._callstream(cmd, data=fp_, headers=headers, **args) finally: if fp_ is not None: @@ -476,28 +482,17 @@ def _abort(self, exception): raise exception -class httpspeer(httppeer): - def __init__(self, ui, path): - if not url.has_https: - raise error.Abort(_('Python support for SSL and HTTPS ' - 'is not installed')) - httppeer.__init__(self, ui, path) - def instance(ui, path, create): if create: raise error.Abort(_('cannot create new http repository')) try: - if path.startswith('https:'): - inst = httpspeer(ui, path) - else: - inst = httppeer(ui, path) - try: - # Try to do useful work when checking compatibility. - # Usually saves a roundtrip since we want the caps anyway. - inst._fetchcaps() - except error.RepoError: - # No luck, try older compatibility check. - inst.between([(nullid, nullid)]) + if path.startswith('https:') and not url.has_https: + raise error.Abort(_('Python support for SSL and HTTPS ' + 'is not installed')) + + inst = httppeer(ui, path) + inst._fetchcaps() + return inst except error.RepoError as httpexception: try:
--- a/mercurial/keepalive.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/keepalive.py Sun Mar 04 10:42:51 2018 -0500 @@ -324,11 +324,11 @@ h.putrequest( req.get_method(), urllibcompat.getselector(req), **pycompat.strkwargs(skipheaders)) - if 'content-type' not in headers: - h.putheader('Content-type', - 'application/x-www-form-urlencoded') - if 'content-length' not in headers: - h.putheader('Content-length', '%d' % len(data)) + if r'content-type' not in headers: + h.putheader(r'Content-type', + r'application/x-www-form-urlencoded') + if r'content-length' not in headers: + h.putheader(r'Content-length', r'%d' % len(data)) else: h.putrequest( req.get_method(), urllibcompat.getselector(req),
--- a/mercurial/localrepo.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/localrepo.py Sun Mar 04 10:42:51 2018 -0500 @@ -9,7 +9,6 @@ import errno import hashlib -import inspect import os import random import time @@ -44,6 +43,7 @@ merge as mergemod, mergeutil, namespaces, + narrowspec, obsolete, pathutil, peer, @@ -57,7 +57,7 @@ scmutil, sparse, store, - subrepo, + subrepoutil, tags as tagsmod, transaction, txnutil, @@ -191,7 +191,9 @@ def debugwireargs(self, one, two, three=None, four=None, five=None): """Used to test argument passing over the wire""" - return "%s %s %s %s %s" % (one, two, three, four, five) + return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three), + pycompat.bytestr(four), + pycompat.bytestr(five)) def getbundle(self, source, heads=None, common=None, bundlecaps=None, **kwargs): @@ -260,7 +262,8 @@ bundle2.processbundle(self._repo, b) raise except error.PushRaced as exc: - raise error.ResponseError(_('push failed:'), str(exc)) + raise error.ResponseError(_('push failed:'), + util.forcebytestr(exc)) # End of _basewirecommands interface. @@ -304,11 +307,15 @@ class localrepository(object): + # obsolete experimental requirements: + # - manifestv2: An experimental new manifest format that allowed + # for stem compression of long paths. Experiment ended up not + # being successful (repository sizes went up due to worse delta + # chains), and the code was deleted in 4.6. supportedformats = { 'revlogv1', 'generaldelta', 'treemanifest', - 'manifestv2', REVLOGV2_REQUIREMENT, } _basesupported = supportedformats | { @@ -323,7 +330,6 @@ 'revlogv1', 'generaldelta', 'treemanifest', - 'manifestv2', } # a list of (ui, featureset) functions. @@ -733,6 +739,37 @@ " working parent %s!\n") % short(node)) return nullid + @repofilecache(narrowspec.FILENAME) + def narrowpats(self): + """matcher patterns for this repository's narrowspec + + A tuple of (includes, excludes). + """ + source = self + if self.shared(): + from . import hg + source = hg.sharedreposource(self) + return narrowspec.load(source) + + @repofilecache(narrowspec.FILENAME) + def _narrowmatch(self): + if changegroup.NARROW_REQUIREMENT not in self.requirements: + return matchmod.always(self.root, '') + include, exclude = self.narrowpats + return narrowspec.match(self.root, include=include, exclude=exclude) + + # TODO(martinvonz): make this property-like instead? + def narrowmatch(self): + return self._narrowmatch + + def setnarrowpats(self, newincludes, newexcludes): + target = self + if self.shared(): + from . import hg + target = hg.sharedreposource(self) + narrowspec.save(target, newincludes, newexcludes) + self.invalidate(clearfilecache=True) + def __getitem__(self, changeid): if changeid is None: return context.workingctx(self) @@ -1068,7 +1105,7 @@ if not fn: fn = lambda s, c, **kwargs: util.filter(s, c) # Wrap old filters not supporting keyword arguments - if not inspect.getargspec(fn)[2]: + if not pycompat.getargspec(fn)[2]: oldfn = fn fn = lambda s, c, **kwargs: oldfn(s, c) l.append((mf, fn, params)) @@ -1332,7 +1369,7 @@ """To be run if transaction is aborted """ reporef().hook('txnabort', throw=False, txnname=desc, - **tr2.hookargs) + **pycompat.strkwargs(tr2.hookargs)) tr.addabort('txnabort-hook', txnaborthook) # avoid eager cache invalidation. in-memory data should be identical # to stored data if transaction has no error. @@ -1574,7 +1611,8 @@ def _refreshfilecachestats(self, tr): """Reload stats of cached files so that they are flagged as valid""" for k, ce in self._filecache.items(): - if k == 'dirstate' or k not in self.__dict__: + k = pycompat.sysstr(k) + if k == r'dirstate' or k not in self.__dict__: continue ce.refresh() @@ -1832,7 +1870,7 @@ status.modified.extend(status.clean) # mq may commit clean files # check subrepos - subs, commitsubs, newstate = subrepo.precommit( + subs, commitsubs, newstate = subrepoutil.precommit( self.ui, wctx, status, match, force=force) # make sure all explicit patterns are matched @@ -1869,10 +1907,10 @@ for s in sorted(commitsubs): sub = wctx.sub(s) self.ui.status(_('committing subrepository %s\n') % - subrepo.subrelpath(sub)) + subrepoutil.subrelpath(sub)) sr = sub.commit(cctx._text, user, date) newstate[s] = (newstate[s][0], sr) - subrepo.writestate(self, newstate) + subrepoutil.writestate(self, newstate) p1, p2 = self.dirstate.parents() hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '') @@ -1982,7 +2020,7 @@ self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, parent2=xp2) # set the new commit is proper phase - targetphase = subrepo.newcommitphase(self.ui, ctx) + targetphase = subrepoutil.newcommitphase(self.ui, ctx) if targetphase: # retract boundary do not alter parent changeset. # if a parent have higher the resulting phase will @@ -2047,15 +2085,6 @@ # tag cache retrieval" case to work. self.invalidate() - def walk(self, match, node=None): - ''' - walk recursively through the directory tree or a given - changeset, finding all files matched by the match - function - ''' - self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3') - return self[node].walk(match) - def status(self, node1='.', node2=None, match=None, ignored=False, clean=False, unknown=False, listsubrepos=False): @@ -2176,10 +2205,11 @@ hookargs = {} if tr is not None: hookargs.update(tr.hookargs) - hookargs['namespace'] = namespace - hookargs['key'] = key - hookargs['old'] = old - hookargs['new'] = new + hookargs = pycompat.strkwargs(hookargs) + hookargs[r'namespace'] = namespace + hookargs[r'key'] = key + hookargs[r'old'] = old + hookargs[r'new'] = new self.hook('prepushkey', throw=True, **hookargs) except error.HookAbort as exc: self.ui.write_err(_("pushkey-abort: %s\n") % exc) @@ -2203,7 +2233,9 @@ def debugwireargs(self, one, two, three=None, four=None, five=None): '''used to test argument passing over the wire''' - return "%s %s %s %s %s" % (one, two, three, four, five) + return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three), + pycompat.bytestr(four), + pycompat.bytestr(five)) def savecommitmessage(self, text): fp = self.vfs('last-message.txt', 'wb') @@ -2270,8 +2302,6 @@ requirements.add('generaldelta') if ui.configbool('experimental', 'treemanifest'): requirements.add('treemanifest') - if ui.configbool('experimental', 'manifestv2'): - requirements.add('manifestv2') revlogv2 = ui.config('experimental', 'revlogv2') if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
--- a/mercurial/lock.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/lock.py Sun Mar 04 10:42:51 2018 -0500 @@ -30,9 +30,7 @@ confidence. Typically it's just hostname. On modern linux, we include an extra Linux-specific pid namespace identifier. """ - result = socket.gethostname() - if pycompat.ispy3: - result = result.encode(pycompat.sysstr(encoding.encoding), 'replace') + result = encoding.strtolocal(socket.gethostname()) if pycompat.sysplatform.startswith('linux'): try: result += '/%x' % os.stat('/proc/self/ns/pid').st_ino @@ -52,10 +50,12 @@ # show more details for new-style locks if ':' in locker: host, pid = locker.split(":", 1) - msg = _("waiting for lock on %s held by process %r " - "on host %r\n") % (l.desc, pid, host) + msg = (_("waiting for lock on %s held by process %r on host %r\n") + % (pycompat.bytestr(l.desc), pycompat.bytestr(pid), + pycompat.bytestr(host))) else: - msg = _("waiting for lock on %s held by %r\n") % (l.desc, locker) + msg = (_("waiting for lock on %s held by %r\n") + % (l.desc, pycompat.bytestr(locker))) printer(msg) l = lock(vfs, lockname, 0, *args, dolock=False, **kwargs) @@ -86,9 +86,9 @@ l.delay = delay if l.delay: if 0 <= warningidx <= l.delay: - ui.warn(_("got lock after %s seconds\n") % l.delay) + ui.warn(_("got lock after %d seconds\n") % l.delay) else: - ui.debug("got lock after %s seconds\n" % l.delay) + ui.debug("got lock after %d seconds\n" % l.delay) if l.acquirefn: l.acquirefn() return l
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/logcmdutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,937 @@ +# logcmdutil.py - utility for log-like commands +# +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import itertools +import os + +from .i18n import _ +from .node import ( + hex, + nullid, +) + +from . import ( + dagop, + encoding, + error, + formatter, + graphmod, + match as matchmod, + mdiff, + patch, + pathutil, + pycompat, + revset, + revsetlang, + scmutil, + smartset, + templatekw, + templater, + util, +) +from .utils import dateutil + +def getlimit(opts): + """get the log limit according to option -l/--limit""" + limit = opts.get('limit') + if limit: + try: + limit = int(limit) + except ValueError: + raise error.Abort(_('limit must be a positive integer')) + if limit <= 0: + raise error.Abort(_('limit must be positive')) + else: + limit = None + return limit + +def diffordiffstat(ui, repo, diffopts, node1, node2, match, + changes=None, stat=False, fp=None, prefix='', + root='', listsubrepos=False, hunksfilterfn=None): + '''show diff or diffstat.''' + if root: + relroot = pathutil.canonpath(repo.root, repo.getcwd(), root) + else: + relroot = '' + if relroot != '': + # XXX relative roots currently don't work if the root is within a + # subrepo + uirelroot = match.uipath(relroot) + relroot += '/' + for matchroot in match.files(): + if not matchroot.startswith(relroot): + ui.warn(_('warning: %s not inside relative root %s\n') % ( + match.uipath(matchroot), uirelroot)) + + if stat: + diffopts = diffopts.copy(context=0, noprefix=False) + width = 80 + if not ui.plain(): + width = ui.termwidth() + + chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts, + prefix=prefix, relroot=relroot, + hunksfilterfn=hunksfilterfn) + + if fp is not None or ui.canwritewithoutlabels(): + out = fp or ui + if stat: + chunks = [patch.diffstat(util.iterlines(chunks), width=width)] + for chunk in util.filechunkiter(util.chunkbuffer(chunks)): + out.write(chunk) + else: + if stat: + chunks = patch.diffstatui(util.iterlines(chunks), width=width) + else: + chunks = patch.difflabel(lambda chunks, **kwargs: chunks, chunks, + opts=diffopts) + if ui.canbatchlabeledwrites(): + def gen(): + for chunk, label in chunks: + yield ui.label(chunk, label=label) + for chunk in util.filechunkiter(util.chunkbuffer(gen())): + ui.write(chunk) + else: + for chunk, label in chunks: + ui.write(chunk, label=label) + + if listsubrepos: + ctx1 = repo[node1] + ctx2 = repo[node2] + for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): + tempnode2 = node2 + try: + if node2 is not None: + tempnode2 = ctx2.substate[subpath][1] + except KeyError: + # A subrepo that existed in node1 was deleted between node1 and + # node2 (inclusive). Thus, ctx2's substate won't contain that + # subpath. The best we can do is to ignore it. + tempnode2 = None + submatch = matchmod.subdirmatcher(subpath, match) + sub.diff(ui, diffopts, tempnode2, submatch, changes=changes, + stat=stat, fp=fp, prefix=prefix) + +class changesetdiffer(object): + """Generate diff of changeset with pre-configured filtering functions""" + + def _makefilematcher(self, ctx): + return scmutil.matchall(ctx.repo()) + + def _makehunksfilter(self, ctx): + return None + + def showdiff(self, ui, ctx, diffopts, stat=False): + repo = ctx.repo() + node = ctx.node() + prev = ctx.p1().node() + diffordiffstat(ui, repo, diffopts, prev, node, + match=self._makefilematcher(ctx), stat=stat, + hunksfilterfn=self._makehunksfilter(ctx)) + +def changesetlabels(ctx): + labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()] + if ctx.obsolete(): + labels.append('changeset.obsolete') + if ctx.isunstable(): + labels.append('changeset.unstable') + for instability in ctx.instabilities(): + labels.append('instability.%s' % instability) + return ' '.join(labels) + +class changesetprinter(object): + '''show changeset information when templating not requested.''' + + def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False): + self.ui = ui + self.repo = repo + self.buffered = buffered + self._differ = differ or changesetdiffer() + self.diffopts = diffopts or {} + self.header = {} + self.hunk = {} + self.lastheader = None + self.footer = None + self._columns = templatekw.getlogcolumns() + + def flush(self, ctx): + rev = ctx.rev() + if rev in self.header: + h = self.header[rev] + if h != self.lastheader: + self.lastheader = h + self.ui.write(h) + del self.header[rev] + if rev in self.hunk: + self.ui.write(self.hunk[rev]) + del self.hunk[rev] + + def close(self): + if self.footer: + self.ui.write(self.footer) + + def show(self, ctx, copies=None, **props): + props = pycompat.byteskwargs(props) + if self.buffered: + self.ui.pushbuffer(labeled=True) + self._show(ctx, copies, props) + self.hunk[ctx.rev()] = self.ui.popbuffer() + else: + self._show(ctx, copies, props) + + def _show(self, ctx, copies, props): + '''show a single changeset or file revision''' + changenode = ctx.node() + rev = ctx.rev() + + if self.ui.quiet: + self.ui.write("%s\n" % scmutil.formatchangeid(ctx), + label='log.node') + return + + columns = self._columns + self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx), + label=changesetlabels(ctx)) + + # branches are shown first before any other names due to backwards + # compatibility + branch = ctx.branch() + # don't show the default branch name + if branch != 'default': + self.ui.write(columns['branch'] % branch, label='log.branch') + + for nsname, ns in self.repo.names.iteritems(): + # branches has special logic already handled above, so here we just + # skip it + if nsname == 'branches': + continue + # we will use the templatename as the color name since those two + # should be the same + for name in ns.names(self.repo, changenode): + self.ui.write(ns.logfmt % name, + label='log.%s' % ns.colorname) + if self.ui.debugflag: + self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase') + for pctx in scmutil.meaningfulparents(self.repo, ctx): + label = 'log.parent changeset.%s' % pctx.phasestr() + self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx), + label=label) + + if self.ui.debugflag and rev is not None: + mnode = ctx.manifestnode() + mrev = self.repo.manifestlog._revlog.rev(mnode) + self.ui.write(columns['manifest'] + % scmutil.formatrevnode(self.ui, mrev, mnode), + label='ui.debug log.manifest') + self.ui.write(columns['user'] % ctx.user(), label='log.user') + self.ui.write(columns['date'] % dateutil.datestr(ctx.date()), + label='log.date') + + if ctx.isunstable(): + instabilities = ctx.instabilities() + self.ui.write(columns['instability'] % ', '.join(instabilities), + label='log.instability') + + elif ctx.obsolete(): + self._showobsfate(ctx) + + self._exthook(ctx) + + if self.ui.debugflag: + files = ctx.p1().status(ctx)[:3] + for key, value in zip(['files', 'files+', 'files-'], files): + if value: + self.ui.write(columns[key] % " ".join(value), + label='ui.debug log.files') + elif ctx.files() and self.ui.verbose: + self.ui.write(columns['files'] % " ".join(ctx.files()), + label='ui.note log.files') + if copies and self.ui.verbose: + copies = ['%s (%s)' % c for c in copies] + self.ui.write(columns['copies'] % ' '.join(copies), + label='ui.note log.copies') + + extra = ctx.extra() + if extra and self.ui.debugflag: + for key, value in sorted(extra.items()): + self.ui.write(columns['extra'] % (key, util.escapestr(value)), + label='ui.debug log.extra') + + description = ctx.description().strip() + if description: + if self.ui.verbose: + self.ui.write(_("description:\n"), + label='ui.note log.description') + self.ui.write(description, + label='ui.note log.description') + self.ui.write("\n\n") + else: + self.ui.write(columns['summary'] % description.splitlines()[0], + label='log.summary') + self.ui.write("\n") + + self._showpatch(ctx) + + def _showobsfate(self, ctx): + # TODO: do not depend on templater + tres = formatter.templateresources(self.repo.ui, self.repo) + t = formatter.maketemplater(self.repo.ui, '{join(obsfate, "\n")}', + defaults=templatekw.keywords, + resources=tres) + obsfate = t.render({'ctx': ctx, 'revcache': {}}).splitlines() + + if obsfate: + for obsfateline in obsfate: + self.ui.write(self._columns['obsolete'] % obsfateline, + label='log.obsfate') + + def _exthook(self, ctx): + '''empty method used by extension as a hook point + ''' + + def _showpatch(self, ctx): + stat = self.diffopts.get('stat') + diff = self.diffopts.get('patch') + diffopts = patch.diffallopts(self.ui, self.diffopts) + if stat: + self._differ.showdiff(self.ui, ctx, diffopts, stat=True) + if stat and diff: + self.ui.write("\n") + if diff: + self._differ.showdiff(self.ui, ctx, diffopts, stat=False) + if stat or diff: + self.ui.write("\n") + +class jsonchangeset(changesetprinter): + '''format changeset information.''' + + def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False): + changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered) + self.cache = {} + self._first = True + + def close(self): + if not self._first: + self.ui.write("\n]\n") + else: + self.ui.write("[]\n") + + def _show(self, ctx, copies, props): + '''show a single changeset or file revision''' + rev = ctx.rev() + if rev is None: + jrev = jnode = 'null' + else: + jrev = '%d' % rev + jnode = '"%s"' % hex(ctx.node()) + j = encoding.jsonescape + + if self._first: + self.ui.write("[\n {") + self._first = False + else: + self.ui.write(",\n {") + + if self.ui.quiet: + self.ui.write(('\n "rev": %s') % jrev) + self.ui.write((',\n "node": %s') % jnode) + self.ui.write('\n }') + return + + self.ui.write(('\n "rev": %s') % jrev) + self.ui.write((',\n "node": %s') % jnode) + self.ui.write((',\n "branch": "%s"') % j(ctx.branch())) + self.ui.write((',\n "phase": "%s"') % ctx.phasestr()) + self.ui.write((',\n "user": "%s"') % j(ctx.user())) + self.ui.write((',\n "date": [%d, %d]') % ctx.date()) + self.ui.write((',\n "desc": "%s"') % j(ctx.description())) + + self.ui.write((',\n "bookmarks": [%s]') % + ", ".join('"%s"' % j(b) for b in ctx.bookmarks())) + self.ui.write((',\n "tags": [%s]') % + ", ".join('"%s"' % j(t) for t in ctx.tags())) + self.ui.write((',\n "parents": [%s]') % + ", ".join('"%s"' % c.hex() for c in ctx.parents())) + + if self.ui.debugflag: + if rev is None: + jmanifestnode = 'null' + else: + jmanifestnode = '"%s"' % hex(ctx.manifestnode()) + self.ui.write((',\n "manifest": %s') % jmanifestnode) + + self.ui.write((',\n "extra": {%s}') % + ", ".join('"%s": "%s"' % (j(k), j(v)) + for k, v in ctx.extra().items())) + + files = ctx.p1().status(ctx) + self.ui.write((',\n "modified": [%s]') % + ", ".join('"%s"' % j(f) for f in files[0])) + self.ui.write((',\n "added": [%s]') % + ", ".join('"%s"' % j(f) for f in files[1])) + self.ui.write((',\n "removed": [%s]') % + ", ".join('"%s"' % j(f) for f in files[2])) + + elif self.ui.verbose: + self.ui.write((',\n "files": [%s]') % + ", ".join('"%s"' % j(f) for f in ctx.files())) + + if copies: + self.ui.write((',\n "copies": {%s}') % + ", ".join('"%s": "%s"' % (j(k), j(v)) + for k, v in copies)) + + stat = self.diffopts.get('stat') + diff = self.diffopts.get('patch') + diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True) + if stat: + self.ui.pushbuffer() + self._differ.showdiff(self.ui, ctx, diffopts, stat=True) + self.ui.write((',\n "diffstat": "%s"') + % j(self.ui.popbuffer())) + if diff: + self.ui.pushbuffer() + self._differ.showdiff(self.ui, ctx, diffopts, stat=False) + self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer())) + + self.ui.write("\n }") + +class changesettemplater(changesetprinter): + '''format changeset information. + + Note: there are a variety of convenience functions to build a + changesettemplater for common cases. See functions such as: + maketemplater, changesetdisplayer, buildcommittemplate, or other + functions that use changesest_templater. + ''' + + # Arguments before "buffered" used to be positional. Consider not + # adding/removing arguments before "buffered" to not break callers. + def __init__(self, ui, repo, tmplspec, differ=None, diffopts=None, + buffered=False): + changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered) + tres = formatter.templateresources(ui, repo) + self.t = formatter.loadtemplater(ui, tmplspec, + defaults=templatekw.keywords, + resources=tres, + cache=templatekw.defaulttempl) + self._counter = itertools.count() + self.cache = tres['cache'] # shared with _graphnodeformatter() + + self._tref = tmplspec.ref + self._parts = {'header': '', 'footer': '', + tmplspec.ref: tmplspec.ref, + 'docheader': '', 'docfooter': '', + 'separator': ''} + if tmplspec.mapfile: + # find correct templates for current mode, for backward + # compatibility with 'log -v/-q/--debug' using a mapfile + tmplmodes = [ + (True, ''), + (self.ui.verbose, '_verbose'), + (self.ui.quiet, '_quiet'), + (self.ui.debugflag, '_debug'), + ] + for mode, postfix in tmplmodes: + for t in self._parts: + cur = t + postfix + if mode and cur in self.t: + self._parts[t] = cur + else: + partnames = [p for p in self._parts.keys() if p != tmplspec.ref] + m = formatter.templatepartsmap(tmplspec, self.t, partnames) + self._parts.update(m) + + if self._parts['docheader']: + self.ui.write(templater.stringify(self.t(self._parts['docheader']))) + + def close(self): + if self._parts['docfooter']: + if not self.footer: + self.footer = "" + self.footer += templater.stringify(self.t(self._parts['docfooter'])) + return super(changesettemplater, self).close() + + def _show(self, ctx, copies, props): + '''show a single changeset or file revision''' + props = props.copy() + props['ctx'] = ctx + props['index'] = index = next(self._counter) + props['revcache'] = {'copies': copies} + props = pycompat.strkwargs(props) + + # write separator, which wouldn't work well with the header part below + # since there's inherently a conflict between header (across items) and + # separator (per item) + if self._parts['separator'] and index > 0: + self.ui.write(templater.stringify(self.t(self._parts['separator']))) + + # write header + if self._parts['header']: + h = templater.stringify(self.t(self._parts['header'], **props)) + if self.buffered: + self.header[ctx.rev()] = h + else: + if self.lastheader != h: + self.lastheader = h + self.ui.write(h) + + # write changeset metadata, then patch if requested + key = self._parts[self._tref] + self.ui.write(templater.stringify(self.t(key, **props))) + self._showpatch(ctx) + + if self._parts['footer']: + if not self.footer: + self.footer = templater.stringify( + self.t(self._parts['footer'], **props)) + +def templatespec(tmpl, mapfile): + if mapfile: + return formatter.templatespec('changeset', tmpl, mapfile) + else: + return formatter.templatespec('', tmpl, None) + +def _lookuptemplate(ui, tmpl, style): + """Find the template matching the given template spec or style + + See formatter.lookuptemplate() for details. + """ + + # ui settings + if not tmpl and not style: # template are stronger than style + tmpl = ui.config('ui', 'logtemplate') + if tmpl: + return templatespec(templater.unquotestring(tmpl), None) + else: + style = util.expandpath(ui.config('ui', 'style')) + + if not tmpl and style: + mapfile = style + if not os.path.split(mapfile)[0]: + mapname = (templater.templatepath('map-cmdline.' + mapfile) + or templater.templatepath(mapfile)) + if mapname: + mapfile = mapname + return templatespec(None, mapfile) + + if not tmpl: + return templatespec(None, None) + + return formatter.lookuptemplate(ui, 'changeset', tmpl) + +def maketemplater(ui, repo, tmpl, buffered=False): + """Create a changesettemplater from a literal template 'tmpl' + byte-string.""" + spec = templatespec(tmpl, None) + return changesettemplater(ui, repo, spec, buffered=buffered) + +def changesetdisplayer(ui, repo, opts, differ=None, buffered=False): + """show one changeset using template or regular display. + + Display format will be the first non-empty hit of: + 1. option 'template' + 2. option 'style' + 3. [ui] setting 'logtemplate' + 4. [ui] setting 'style' + If all of these values are either the unset or the empty string, + regular display via changesetprinter() is done. + """ + postargs = (differ, opts, buffered) + if opts.get('template') == 'json': + return jsonchangeset(ui, repo, *postargs) + + spec = _lookuptemplate(ui, opts.get('template'), opts.get('style')) + + if not spec.ref and not spec.tmpl and not spec.mapfile: + return changesetprinter(ui, repo, *postargs) + + return changesettemplater(ui, repo, spec, *postargs) + +def _makematcher(repo, revs, pats, opts): + """Build matcher and expanded patterns from log options + + If --follow, revs are the revisions to follow from. + + Returns (match, pats, slowpath) where + - match: a matcher built from the given pats and -I/-X opts + - pats: patterns used (globs are expanded on Windows) + - slowpath: True if patterns aren't as simple as scanning filelogs + """ + # pats/include/exclude are passed to match.match() directly in + # _matchfiles() revset but walkchangerevs() builds its matcher with + # scmutil.match(). The difference is input pats are globbed on + # platforms without shell expansion (windows). + wctx = repo[None] + match, pats = scmutil.matchandpats(wctx, pats, opts) + slowpath = match.anypats() or (not match.always() and opts.get('removed')) + if not slowpath: + follow = opts.get('follow') or opts.get('follow_first') + startctxs = [] + if follow and opts.get('rev'): + startctxs = [repo[r] for r in revs] + for f in match.files(): + if follow and startctxs: + # No idea if the path was a directory at that revision, so + # take the slow path. + if any(f not in c for c in startctxs): + slowpath = True + continue + elif follow and f not in wctx: + # If the file exists, it may be a directory, so let it + # take the slow path. + if os.path.exists(repo.wjoin(f)): + slowpath = True + continue + else: + raise error.Abort(_('cannot follow file not in parent ' + 'revision: "%s"') % f) + filelog = repo.file(f) + if not filelog: + # A zero count may be a directory or deleted file, so + # try to find matching entries on the slow path. + if follow: + raise error.Abort( + _('cannot follow nonexistent file: "%s"') % f) + slowpath = True + + # We decided to fall back to the slowpath because at least one + # of the paths was not a file. Check to see if at least one of them + # existed in history - in that case, we'll continue down the + # slowpath; otherwise, we can turn off the slowpath + if slowpath: + for path in match.files(): + if path == '.' or path in repo.store: + break + else: + slowpath = False + + return match, pats, slowpath + +def _fileancestors(repo, revs, match, followfirst): + fctxs = [] + for r in revs: + ctx = repo[r] + fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match)) + + # When displaying a revision with --patch --follow FILE, we have + # to know which file of the revision must be diffed. With + # --follow, we want the names of the ancestors of FILE in the + # revision, stored in "fcache". "fcache" is populated as a side effect + # of the graph traversal. + fcache = {} + def filematcher(ctx): + return scmutil.matchfiles(repo, fcache.get(ctx.rev(), [])) + + def revgen(): + for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst): + fcache[rev] = [c.path() for c in cs] + yield rev + return smartset.generatorset(revgen(), iterasc=False), filematcher + +def _makenofollowfilematcher(repo, pats, opts): + '''hook for extensions to override the filematcher for non-follow cases''' + return None + +_opt2logrevset = { + 'no_merges': ('not merge()', None), + 'only_merges': ('merge()', None), + '_matchfiles': (None, '_matchfiles(%ps)'), + 'date': ('date(%s)', None), + 'branch': ('branch(%s)', '%lr'), + '_patslog': ('filelog(%s)', '%lr'), + 'keyword': ('keyword(%s)', '%lr'), + 'prune': ('ancestors(%s)', 'not %lr'), + 'user': ('user(%s)', '%lr'), +} + +def _makerevset(repo, match, pats, slowpath, opts): + """Return a revset string built from log options and file patterns""" + opts = dict(opts) + # follow or not follow? + follow = opts.get('follow') or opts.get('follow_first') + + # branch and only_branch are really aliases and must be handled at + # the same time + opts['branch'] = opts.get('branch', []) + opts.get('only_branch', []) + opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']] + + if slowpath: + # See walkchangerevs() slow path. + # + # pats/include/exclude cannot be represented as separate + # revset expressions as their filtering logic applies at file + # level. For instance "-I a -X b" matches a revision touching + # "a" and "b" while "file(a) and not file(b)" does + # not. Besides, filesets are evaluated against the working + # directory. + matchargs = ['r:', 'd:relpath'] + for p in pats: + matchargs.append('p:' + p) + for p in opts.get('include', []): + matchargs.append('i:' + p) + for p in opts.get('exclude', []): + matchargs.append('x:' + p) + opts['_matchfiles'] = matchargs + elif not follow: + opts['_patslog'] = list(pats) + + expr = [] + for op, val in sorted(opts.iteritems()): + if not val: + continue + if op not in _opt2logrevset: + continue + revop, listop = _opt2logrevset[op] + if revop and '%' not in revop: + expr.append(revop) + elif not listop: + expr.append(revsetlang.formatspec(revop, val)) + else: + if revop: + val = [revsetlang.formatspec(revop, v) for v in val] + expr.append(revsetlang.formatspec(listop, val)) + + if expr: + expr = '(' + ' and '.join(expr) + ')' + else: + expr = None + return expr + +def _initialrevs(repo, opts): + """Return the initial set of revisions to be filtered or followed""" + follow = opts.get('follow') or opts.get('follow_first') + if opts.get('rev'): + revs = scmutil.revrange(repo, opts['rev']) + elif follow and repo.dirstate.p1() == nullid: + revs = smartset.baseset() + elif follow: + revs = repo.revs('.') + else: + revs = smartset.spanset(repo) + revs.reverse() + return revs + +def getrevs(repo, pats, opts): + """Return (revs, differ) where revs is a smartset + + differ is a changesetdiffer with pre-configured file matcher. + """ + follow = opts.get('follow') or opts.get('follow_first') + followfirst = opts.get('follow_first') + limit = getlimit(opts) + revs = _initialrevs(repo, opts) + if not revs: + return smartset.baseset(), None + match, pats, slowpath = _makematcher(repo, revs, pats, opts) + filematcher = None + if follow: + if slowpath or match.always(): + revs = dagop.revancestors(repo, revs, followfirst=followfirst) + else: + revs, filematcher = _fileancestors(repo, revs, match, followfirst) + revs.reverse() + if filematcher is None: + filematcher = _makenofollowfilematcher(repo, pats, opts) + if filematcher is None: + def filematcher(ctx): + return match + + expr = _makerevset(repo, match, pats, slowpath, opts) + if opts.get('graph') and opts.get('rev'): + # User-specified revs might be unsorted, but don't sort before + # _makerevset because it might depend on the order of revs + if not (revs.isdescending() or revs.istopo()): + revs.sort(reverse=True) + if expr: + matcher = revset.match(None, expr) + revs = matcher(repo, revs) + if limit is not None: + revs = revs.slice(0, limit) + + differ = changesetdiffer() + differ._makefilematcher = filematcher + return revs, differ + +def _parselinerangeopt(repo, opts): + """Parse --line-range log option and return a list of tuples (filename, + (fromline, toline)). + """ + linerangebyfname = [] + for pat in opts.get('line_range', []): + try: + pat, linerange = pat.rsplit(',', 1) + except ValueError: + raise error.Abort(_('malformatted line-range pattern %s') % pat) + try: + fromline, toline = map(int, linerange.split(':')) + except ValueError: + raise error.Abort(_("invalid line range for %s") % pat) + msg = _("line range pattern '%s' must match exactly one file") % pat + fname = scmutil.parsefollowlinespattern(repo, None, pat, msg) + linerangebyfname.append( + (fname, util.processlinerange(fromline, toline))) + return linerangebyfname + +def getlinerangerevs(repo, userrevs, opts): + """Return (revs, differ). + + "revs" are revisions obtained by processing "line-range" log options and + walking block ancestors of each specified file/line-range. + + "differ" is a changesetdiffer with pre-configured file matcher and hunks + filter. + """ + wctx = repo[None] + + # Two-levels map of "rev -> file ctx -> [line range]". + linerangesbyrev = {} + for fname, (fromline, toline) in _parselinerangeopt(repo, opts): + if fname not in wctx: + raise error.Abort(_('cannot follow file not in parent ' + 'revision: "%s"') % fname) + fctx = wctx.filectx(fname) + for fctx, linerange in dagop.blockancestors(fctx, fromline, toline): + rev = fctx.introrev() + if rev not in userrevs: + continue + linerangesbyrev.setdefault( + rev, {}).setdefault( + fctx.path(), []).append(linerange) + + def nofilterhunksfn(fctx, hunks): + return hunks + + def hunksfilter(ctx): + fctxlineranges = linerangesbyrev.get(ctx.rev()) + if fctxlineranges is None: + return nofilterhunksfn + + def filterfn(fctx, hunks): + lineranges = fctxlineranges.get(fctx.path()) + if lineranges is not None: + for hr, lines in hunks: + if hr is None: # binary + yield hr, lines + continue + if any(mdiff.hunkinrange(hr[2:], lr) + for lr in lineranges): + yield hr, lines + else: + for hunk in hunks: + yield hunk + + return filterfn + + def filematcher(ctx): + files = list(linerangesbyrev.get(ctx.rev(), [])) + return scmutil.matchfiles(repo, files) + + revs = sorted(linerangesbyrev, reverse=True) + + differ = changesetdiffer() + differ._makefilematcher = filematcher + differ._makehunksfilter = hunksfilter + return revs, differ + +def _graphnodeformatter(ui, displayer): + spec = ui.config('ui', 'graphnodetemplate') + if not spec: + return templatekw.getgraphnode # fast path for "{graphnode}" + + spec = templater.unquotestring(spec) + tres = formatter.templateresources(ui) + if isinstance(displayer, changesettemplater): + tres['cache'] = displayer.cache # reuse cache of slow templates + templ = formatter.maketemplater(ui, spec, defaults=templatekw.keywords, + resources=tres) + def formatnode(repo, ctx): + props = {'ctx': ctx, 'repo': repo, 'revcache': {}} + return templ.render(props) + return formatnode + +def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None, props=None): + props = props or {} + formatnode = _graphnodeformatter(ui, displayer) + state = graphmod.asciistate() + styles = state['styles'] + + # only set graph styling if HGPLAIN is not set. + if ui.plain('graph'): + # set all edge styles to |, the default pre-3.8 behaviour + styles.update(dict.fromkeys(styles, '|')) + else: + edgetypes = { + 'parent': graphmod.PARENT, + 'grandparent': graphmod.GRANDPARENT, + 'missing': graphmod.MISSINGPARENT + } + for name, key in edgetypes.items(): + # experimental config: experimental.graphstyle.* + styles[key] = ui.config('experimental', 'graphstyle.%s' % name, + styles[key]) + if not styles[key]: + styles[key] = None + + # experimental config: experimental.graphshorten + state['graphshorten'] = ui.configbool('experimental', 'graphshorten') + + for rev, type, ctx, parents in dag: + char = formatnode(repo, ctx) + copies = None + if getrenamed and ctx.rev(): + copies = [] + for fn in ctx.files(): + rename = getrenamed(fn, ctx.rev()) + if rename: + copies.append((fn, rename[0])) + edges = edgefn(type, char, state, rev, parents) + firstedge = next(edges) + width = firstedge[2] + displayer.show(ctx, copies=copies, + graphwidth=width, **pycompat.strkwargs(props)) + lines = displayer.hunk.pop(rev).split('\n') + if not lines[-1]: + del lines[-1] + displayer.flush(ctx) + for type, char, width, coldata in itertools.chain([firstedge], edges): + graphmod.ascii(ui, state, type, char, lines, coldata) + lines = [] + displayer.close() + +def displaygraphrevs(ui, repo, revs, displayer, getrenamed): + revdag = graphmod.dagwalker(repo, revs) + displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed) + +def displayrevs(ui, repo, revs, displayer, getrenamed): + for rev in revs: + ctx = repo[rev] + copies = None + if getrenamed is not None and rev: + copies = [] + for fn in ctx.files(): + rename = getrenamed(fn, rev) + if rename: + copies.append((fn, rename[0])) + displayer.show(ctx, copies=copies) + displayer.flush(ctx) + displayer.close() + +def checkunsupportedgraphflags(pats, opts): + for op in ["newest_first"]: + if op in opts and opts[op]: + raise error.Abort(_("-G/--graph option is incompatible with --%s") + % op.replace("_", "-")) + +def graphrevs(repo, nodes, opts): + limit = getlimit(opts) + nodes.reverse() + if limit is not None: + nodes = nodes[:limit] + return graphmod.nodes(repo, nodes)
--- a/mercurial/logexchange.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/logexchange.py Sun Mar 04 10:42:51 2018 -0500 @@ -11,6 +11,7 @@ from .node import hex from . import ( + util, vfs as vfsmod, ) @@ -94,6 +95,30 @@ finally: wlock.release() +def activepath(repo, remote): + """returns remote path""" + local = None + # is the remote a local peer + local = remote.local() + + # determine the remote path from the repo, if possible; else just + # use the string given to us + rpath = remote + if local: + rpath = remote._repo.root + elif not isinstance(remote, str): + rpath = remote._url + + # represent the remotepath with user defined path name if exists + for path, url in repo.ui.configitems('paths'): + # remove auth info from user defined url + url = util.removeauth(url) + if url == rpath: + rpath = path + break + + return rpath + def pullremotenames(localrepo, remoterepo): """ pulls bookmarks and branches information of the remote repo during a @@ -101,7 +126,7 @@ localrepo is our local repository remoterepo is the peer instance """ - remotepath = remoterepo.url() + remotepath = activepath(localrepo, remoterepo) bookmarks = remoterepo.listkeys('bookmarks') # on a push, we don't want to keep obsolete heads since # they won't show up as heads on the next pull, so we
--- a/mercurial/lsprof.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/lsprof.py Sun Mar 04 10:42:51 2018 -0500 @@ -27,7 +27,7 @@ def __init__(self, data): self.data = data - def sort(self, crit="inlinetime"): + def sort(self, crit=r"inlinetime"): """XXX docstring""" # profiler_entries isn't defined when running under PyPy. if profiler_entry:
--- a/mercurial/mail.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/mail.py Sun Mar 04 10:42:51 2018 -0500 @@ -20,6 +20,7 @@ from . import ( encoding, error, + pycompat, sslutil, util, ) @@ -186,7 +187,7 @@ def codec2iana(cs): '''''' - cs = email.charset.Charset(cs).input_charset.lower() + cs = pycompat.sysbytes(email.charset.Charset(cs).input_charset.lower()) # "latin1" normalizes to "iso8859-1", standard calls for "iso-8859-1" if cs.startswith("iso") and not cs.startswith("iso-"): @@ -205,7 +206,7 @@ return mimetextqp(s, subtype, 'us-ascii') for charset in cs: try: - s.decode(charset) + s.decode(pycompat.sysstr(charset)) return mimetextqp(s, subtype, codec2iana(charset)) except UnicodeDecodeError: pass @@ -218,7 +219,7 @@ ''' cs = email.charset.Charset(charset) msg = email.message.Message() - msg.set_type('text/' + subtype) + msg.set_type(pycompat.sysstr('text/' + subtype)) for line in body.splitlines(): if len(line) > 950: @@ -287,13 +288,13 @@ addr = addr.encode('ascii') except UnicodeDecodeError: raise error.Abort(_('invalid local address: %s') % addr) - return email.Utils.formataddr((name, addr)) + return email.utils.formataddr((name, addr)) def addressencode(ui, address, charsets=None, display=False): '''Turns address into RFC-2047 compliant header.''' if display or not address: return address or '' - name, addr = email.Utils.parseaddr(address) + name, addr = email.utils.parseaddr(address) return _addressencode(ui, name, addr, charsets) def addrlistencode(ui, addrs, charsets=None, display=False): @@ -304,7 +305,7 @@ return [a.strip() for a in addrs if a.strip()] result = [] - for name, addr in email.Utils.getaddresses(addrs): + for name, addr in email.utils.getaddresses(addrs): if name or addr: result.append(_addressencode(ui, name, addr, charsets)) return result
--- a/mercurial/manifest.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/manifest.py Sun Mar 04 10:42:51 2018 -0500 @@ -9,7 +9,6 @@ import heapq import itertools -import os import struct from .i18n import _ @@ -28,7 +27,7 @@ parsers = policy.importmod(r'parsers') propertycache = util.propertycache -def _parsev1(data): +def _parse(data): # This method does a little bit of excessive-looking # precondition checking. This is so that the behavior of this # class exactly matches its C counterpart to try and help @@ -47,43 +46,7 @@ else: yield f, bin(n), '' -def _parsev2(data): - metadataend = data.find('\n') - # Just ignore metadata for now - pos = metadataend + 1 - prevf = '' - while pos < len(data): - end = data.find('\n', pos + 1) # +1 to skip stem length byte - if end == -1: - raise ValueError('Manifest ended with incomplete file entry.') - stemlen = ord(data[pos:pos + 1]) - items = data[pos + 1:end].split('\0') - f = prevf[:stemlen] + items[0] - if prevf > f: - raise ValueError('Manifest entries not in sorted order.') - fl = items[1] - # Just ignore metadata (items[2:] for now) - n = data[end + 1:end + 21] - yield f, n, fl - pos = end + 22 - prevf = f - -def _parse(data): - """Generates (path, node, flags) tuples from a manifest text""" - if data.startswith('\0'): - return iter(_parsev2(data)) - else: - return iter(_parsev1(data)) - -def _text(it, usemanifestv2): - """Given an iterator over (path, node, flags) tuples, returns a manifest - text""" - if usemanifestv2: - return _textv2(it) - else: - return _textv1(it) - -def _textv1(it): +def _text(it): files = [] lines = [] _hex = revlog.hex @@ -96,19 +59,6 @@ _checkforbidden(files) return ''.join(lines) -def _textv2(it): - files = [] - lines = ['\0\n'] - prevf = '' - for f, n, fl in it: - files.append(f) - stem = os.path.commonprefix([prevf, f]) - stemlen = min(len(stem), 255) - lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n)) - prevf = f - _checkforbidden(files) - return ''.join(lines) - class lazymanifestiter(object): def __init__(self, lm): self.pos = 0 @@ -414,13 +364,7 @@ class manifestdict(object): def __init__(self, data=''): - if data.startswith('\0'): - #_lazymanifest can not parse v2 - self._lm = _lazymanifest('') - for f, n, fl in _parsev2(data): - self._lm[f] = n, fl - else: - self._lm = _lazymanifest(data) + self._lm = _lazymanifest(data) def __getitem__(self, key): return self._lm[key][0] @@ -589,12 +533,9 @@ def iterentries(self): return self._lm.iterentries() - def text(self, usemanifestv2=False): - if usemanifestv2: - return _textv2(self._lm.iterentries()) - else: - # use (probably) native version for v1 - return self._lm.text() + def text(self): + # most likely uses native version + return self._lm.text() def fastdelta(self, base, changes): """Given a base manifest text as a bytearray and a list of changes @@ -755,6 +696,12 @@ size += m.__len__() return size + def __nonzero__(self): + # Faster than "__len() != 0" since it avoids loading sub-manifests + return not self._isempty() + + __bool__ = __nonzero__ + def _isempty(self): self._load() # for consistency; already loaded by all callers return (not self._files and (not self._dirs or @@ -954,7 +901,7 @@ else: files.update(m1.iterkeys()) - for fn in t1._files.iterkeys(): + for fn in t1._files: if fn not in t2._files: files.add(t1._subpath(fn)) @@ -1013,7 +960,7 @@ # yield this dir's files and walk its submanifests self._load() - for p in sorted(self._dirs.keys() + self._files.keys()): + for p in sorted(list(self._dirs) + list(self._files)): if p in self._files: fullp = self._subpath(p) if match(fullp): @@ -1132,12 +1079,12 @@ if fl: self._flags[f] = fl - def text(self, usemanifestv2=False): + def text(self): """Get the full data of this manifest as a bytestring.""" self._load() - return _text(self.iterentries(), usemanifestv2) + return _text(self.iterentries()) - def dirtext(self, usemanifestv2=False): + def dirtext(self): """Get the full data of this directory as a bytestring. Make sure that any submanifests have been written first, so their nodeids are correct. """ @@ -1145,7 +1092,7 @@ flags = self.flags dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs] files = [(f, self._files[f], flags(f)) for f in self._files] - return _text(sorted(dirs + files), usemanifestv2) + return _text(sorted(dirs + files)) def read(self, gettext, readsubtree): def _load_for_read(s): @@ -1202,15 +1149,12 @@ # stacks of commits, the number can go up, hence the config knob below. cachesize = 4 optiontreemanifest = False - usemanifestv2 = False opts = getattr(opener, 'options', None) if opts is not None: cachesize = opts.get('manifestcachesize', cachesize) optiontreemanifest = opts.get('treemanifest', False) - usemanifestv2 = opts.get('manifestv2', usemanifestv2) self._treeondisk = optiontreemanifest or treemanifest - self._usemanifestv2 = usemanifestv2 self._fulltextcache = util.lrucachedict(cachesize) @@ -1245,19 +1189,18 @@ self._fulltextcache.clear() self._dirlogcache = {'': self} - def dirlog(self, dir): - if dir: + def dirlog(self, d): + if d: assert self._treeondisk - if dir not in self._dirlogcache: - mfrevlog = manifestrevlog(self.opener, dir, + if d not in self._dirlogcache: + mfrevlog = manifestrevlog(self.opener, d, self._dirlogcache, treemanifest=self._treeondisk) - self._dirlogcache[dir] = mfrevlog - return self._dirlogcache[dir] + self._dirlogcache[d] = mfrevlog + return self._dirlogcache[d] def add(self, m, transaction, link, p1, p2, added, removed, readtree=None): - if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta') - and not self._usemanifestv2): + if p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta'): # If our first parent is in the manifest cache, we can # compute a delta here using properties we know about the # manifest up-front, which may save time later for the @@ -1284,7 +1227,7 @@ n = self._addtree(m, transaction, link, m1, m2, readtree) arraytext = None else: - text = m.text(self._usemanifestv2) + text = m.text() n = self.addrevision(text, transaction, link, p1, p2) arraytext = bytearray(text) @@ -1303,13 +1246,13 @@ sublog.add(subm, transaction, link, subp1, subp2, None, None, readtree=readtree) m.writesubtrees(m1, m2, writesubtree) - text = m.dirtext(self._usemanifestv2) + text = m.dirtext() n = None if self._dir != '': # Double-check whether contents are unchanged to one parent - if text == m1.dirtext(self._usemanifestv2): + if text == m1.dirtext(): n = m1.node() - elif text == m2.dirtext(self._usemanifestv2): + elif text == m2.dirtext(): n = m2.node() if not n: @@ -1487,19 +1430,6 @@ Changing the value of `shallow` has no effect on flat manifests. ''' revlog = self._revlog() - if revlog._usemanifestv2: - # Need to perform a slow delta - r0 = revlog.deltaparent(revlog.rev(self._node)) - m0 = self._manifestlog[revlog.node(r0)].read() - m1 = self.read() - md = manifestdict() - for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems(): - if n1: - md[f] = n1 - if fl1: - md.setflag(f, fl1) - return md - r = revlog.rev(self._node) d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r)) return manifestdict(d) @@ -1602,7 +1532,7 @@ its 't' flag. ''' revlog = self._revlog() - if shallow and not revlog._usemanifestv2: + if shallow: r = revlog.rev(self._node) d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r)) return manifestdict(d)
--- a/mercurial/match.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/match.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,8 +13,10 @@ from .i18n import _ from . import ( + encoding, error, pathutil, + pycompat, util, ) @@ -225,7 +227,7 @@ except IOError as inst: if warn: warn(_("skipping unreadable pattern file '%s': %s\n") % - (pat, inst.strerror)) + (pat, util.forcebytestr(inst.strerror))) continue # else: re or relre - which cannot be normalized kindpats.append((kind, pat, '')) @@ -345,7 +347,7 @@ return 'all' def __repr__(self): - return '<alwaysmatcher>' + return r'<alwaysmatcher>' class nevermatcher(basematcher): '''Matches nothing.''' @@ -368,7 +370,7 @@ return False def __repr__(self): - return '<nevermatcher>' + return r'<nevermatcher>' class patternmatcher(basematcher): @@ -397,6 +399,7 @@ def prefix(self): return self._prefix + @encoding.strmethod def __repr__(self): return ('<patternmatcher patterns=%r>' % self._pats) @@ -424,8 +427,9 @@ any(parentdir in self._roots for parentdir in util.finddirs(dir))) + @encoding.strmethod def __repr__(self): - return ('<includematcher includes=%r>' % self._pats) + return ('<includematcher includes=%r>' % pycompat.bytestr(self._pats)) class exactmatcher(basematcher): '''Matches the input files exactly. They are interpreted as paths, not @@ -452,6 +456,7 @@ def isexact(self): return True + @encoding.strmethod def __repr__(self): return ('<exactmatcher files=%r>' % self._files) @@ -492,6 +497,7 @@ def isexact(self): return self._m1.isexact() + @encoding.strmethod def __repr__(self): return ('<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)) @@ -558,6 +564,7 @@ def isexact(self): return self._m1.isexact() or self._m2.isexact() + @encoding.strmethod def __repr__(self): return ('<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)) @@ -638,6 +645,7 @@ def prefix(self): return self._matcher.prefix() and not self._always + @encoding.strmethod def __repr__(self): return ('<subdirmatcher path=%r, matcher=%r>' % (self._path, self._matcher)) @@ -671,6 +679,7 @@ r |= v return r + @encoding.strmethod def __repr__(self): return ('<unionmatcher matchers=%r>' % self._matchers)
--- a/mercurial/mdiff.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/mdiff.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,11 +13,15 @@ from .i18n import _ from . import ( + encoding, error, policy, pycompat, util, ) +from .utils import dateutil + +_missing_newline_marker = "\\ No newline at end of file\n" bdiff = policy.importmod(r'bdiff') mpatch = policy.importmod(r'mpatch') @@ -27,16 +31,7 @@ patches = mpatch.patches patchedsize = mpatch.patchedsize textdiff = bdiff.bdiff - -def splitnewlines(text): - '''like str.splitlines, but only split on newlines.''' - lines = [l + '\n' for l in text.split('\n')] - if lines: - if lines[-1] == '\n': - lines.pop() - else: - lines[-1] = lines[-1][:-1] - return lines +splitnewlines = bdiff.splitnewlines class diffopts(object): '''context is the number of context lines @@ -68,6 +63,7 @@ 'upgrade': False, 'showsimilarity': False, 'worddiff': False, + 'xdiff': False, } def __init__(self, **opts): @@ -193,6 +189,13 @@ raise error.Abort(_('line range exceeds file size')) return filteredblocks, (lba, uba) +def chooseblocksfunc(opts=None): + if (opts is None or not opts.xdiff + or not util.safehasattr(bdiff, 'xdiffblocks')): + return bdiff.blocks + else: + return bdiff.xdiffblocks + def allblocks(text1, text2, opts=None, lines1=None, lines2=None): """Return (block, type) tuples, where block is an mdiff.blocks line entry. type is '=' for blocks matching exactly one another @@ -206,7 +209,7 @@ if opts.ignorews or opts.ignorewsamount or opts.ignorewseol: text1 = wsclean(opts, text1, False) text2 = wsclean(opts, text2, False) - diff = bdiff.blocks(text1, text2) + diff = chooseblocksfunc(opts)(text1, text2) for i, s1 in enumerate(diff): # The first match is special. # we've either found a match starting at line 0 or a match later @@ -234,13 +237,15 @@ yield s, type yield s1, '=' -def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts): +def unidiff(a, ad, b, bd, fn1, fn2, binary, opts=defaultopts): """Return a unified diff as a (headers, hunks) tuple. If the diff is not null, `headers` is a list with unified diff header lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding (hunkrange, hunklines) coming from _unidiff(). Otherwise, `headers` and `hunks` are empty. + + Set binary=True if either a or b should be taken as a binary file. """ def datetag(date, fn=None): if not opts.git and not opts.nodates: @@ -259,23 +264,18 @@ aprefix = 'a/' bprefix = 'b/' - epoch = util.datestr((0, 0)) + epoch = dateutil.datestr((0, 0)) fn1 = util.pconvert(fn1) fn2 = util.pconvert(fn2) - def checknonewline(lines): - for text in lines: - if text[-1:] != '\n': - text += "\n\ No newline at end of file\n" - yield text - - if not opts.text and (util.binary(a) or util.binary(b)): + if binary: if a and b and len(a) == len(b) and a == b: return sentinel headerlines = [] hunks = (None, ['Binary file %s has changed\n' % fn1]), elif not a: + without_newline = not b.endswith('\n') b = splitnewlines(b) if a is None: l1 = '--- /dev/null%s' % datetag(epoch) @@ -286,8 +286,12 @@ size = len(b) hunkrange = (0, 0, 1, size) hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b] - hunks = (hunkrange, checknonewline(hunklines)), + if without_newline: + hunklines[-1] += '\n' + hunklines.append(_missing_newline_marker) + hunks = (hunkrange, hunklines), elif not b: + without_newline = not a.endswith('\n') a = splitnewlines(a) l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)) if b is None: @@ -298,24 +302,19 @@ size = len(a) hunkrange = (1, size, 0, 0) hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a] - hunks = (hunkrange, checknonewline(hunklines)), + if without_newline: + hunklines[-1] += '\n' + hunklines.append(_missing_newline_marker) + hunks = (hunkrange, hunklines), else: - diffhunks = _unidiff(a, b, opts=opts) - try: - hunkrange, hunklines = next(diffhunks) - except StopIteration: + hunks = _unidiff(a, b, opts=opts) + if not next(hunks): return sentinel headerlines = [ "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)), "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)), ] - def rewindhunks(): - yield hunkrange, checknonewline(hunklines) - for hr, hl in diffhunks: - yield hr, checknonewline(hl) - - hunks = rewindhunks() return headerlines, hunks @@ -327,6 +326,8 @@ form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines of the hunk combining said header followed by line additions and deletions. + + The hunks are prefixed with a bool. """ l1 = splitnewlines(t1) l2 = splitnewlines(t2) @@ -357,7 +358,11 @@ # alphanumeric char. for i in xrange(astart - 1, lastpos - 1, -1): if l1[i][0:1].isalnum(): - func = ' ' + l1[i].rstrip()[:40] + func = b' ' + l1[i].rstrip() + # split long function name if ASCII. otherwise we have no + # idea where the multi-byte boundary is, so just leave it. + if encoding.isasciistr(func): + func = func[:41] lastfunc[1] = func break # by recording this hunk's starting point as the next place to @@ -377,6 +382,26 @@ + delta + [' ' + l1[x] for x in xrange(a2, aend)] ) + # If either file ends without a newline and the last line of + # that file is part of a hunk, a marker is printed. If the + # last line of both files is identical and neither ends in + # a newline, print only one marker. That's the only case in + # which the hunk can end in a shared line without a newline. + skip = False + if not t1.endswith('\n') and astart + alen == len(l1) + 1: + for i in xrange(len(hunklines) - 1, -1, -1): + if hunklines[i].startswith(('-', ' ')): + if hunklines[i].startswith(' '): + skip = True + hunklines[i] += '\n' + hunklines.insert(i + 1, _missing_newline_marker) + break + if not skip and not t2.endswith('\n') and bstart + blen == len(l2) + 1: + for i in xrange(len(hunklines) - 1, -1, -1): + if hunklines[i].startswith('+'): + hunklines[i] += '\n' + hunklines.insert(i + 1, _missing_newline_marker) + break yield hunkrange, hunklines # bdiff.blocks gives us the matching sequences in the files. The loop @@ -385,6 +410,7 @@ # hunk = None ignoredlines = 0 + has_hunks = False for s, stype in allblocks(t1, t2, opts, l1, l2): a1, a2, b1, b2 = s if stype != '!': @@ -411,6 +437,9 @@ astart = hunk[1] bstart = hunk[3] else: + if not has_hunks: + has_hunks = True + yield True for x in yieldhunk(hunk): yield x if prev: @@ -427,17 +456,22 @@ delta[len(delta):] = ['+' + x for x in new] if hunk: + if not has_hunks: + has_hunks = True + yield True for x in yieldhunk(hunk): yield x + elif not has_hunks: + yield False def b85diff(to, tn): '''print base85-encoded binary diff''' def fmtline(line): l = len(line) if l <= 26: - l = chr(ord('A') + l - 1) + l = pycompat.bytechr(ord('A') + l - 1) else: - l = chr(l - 26 + ord('a') - 1) + l = pycompat.bytechr(l - 26 + ord('a') - 1) return '%c%s\n' % (l, util.b85encode(line, True)) def chunk(text, csize=52):
--- a/mercurial/merge.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/merge.py Sun Mar 04 10:42:51 2018 -0500 @@ -25,13 +25,12 @@ from . import ( copies, error, - extensions, filemerge, match as matchmod, obsutil, pycompat, scmutil, - subrepo, + subrepoutil, util, worker, ) @@ -288,14 +287,14 @@ off = 0 end = len(data) while off < end: - rtype = data[off] + rtype = data[off:off + 1] off += 1 length = _unpack('>I', data[off:(off + 4)])[0] off += 4 record = data[off:(off + length)] off += length if rtype == 't': - rtype, record = record[0], record[1:] + rtype, record = record[0:1], record[1:] records.append((rtype, record)) f.close() except IOError as err: @@ -400,7 +399,7 @@ def _writerecordsv1(self, records): """Write current state on disk in a version 1 file""" - f = self._repo.vfs(self.statepathv1, 'w') + f = self._repo.vfs(self.statepathv1, 'wb') irecords = iter(records) lrecords = next(irecords) assert lrecords[0] == 'L' @@ -416,7 +415,7 @@ See the docstring for _readrecordsv2 for why we use 't'.""" # these are the records that all version 2 clients can read whitelist = 'LOF' - f = self._repo.vfs(self.statepathv2, 'w') + f = self._repo.vfs(self.statepathv2, 'wb') for key, data in records: assert len(key) == 1 if key not in whitelist: @@ -974,14 +973,14 @@ # Rename all local conflicting files that have not been deleted. for p in localconflicts: if p not in deletedfiles: - ctxname = str(wctx).rstrip('+') + ctxname = bytes(wctx).rstrip('+') pnew = util.safename(p, ctxname, wctx, set(actions.keys())) actions[pnew] = ('pr', (p,), "local path conflict") actions[p] = ('p', (pnew, 'l'), "path conflict") if remoteconflicts: # Check if all files in the conflicting directories have been removed. - ctxname = str(mctx).rstrip('+') + ctxname = bytes(mctx).rstrip('+') for f, p in _filesindirs(repo, mf, remoteconflicts): if f not in deletedfiles: m, args, msg = actions[p] @@ -1186,8 +1185,9 @@ def _resolvetrivial(repo, wctx, mctx, ancestor, actions): """Resolves false conflicts where the nodeid changed but the content remained the same.""" - - for f, (m, args, msg) in actions.items(): + # We force a copy of actions.items() because we're going to mutate + # actions as we resolve trivial conflicts. + for f, (m, args, msg) in list(actions.items()): if m == 'cd' and f in ancestor and not wctx[f].cmp(ancestor[f]): # local did change but ended up with same content actions[f] = 'r', None, "prompt same" @@ -1386,6 +1386,16 @@ if i > 0: yield i, f +def _prefetchfiles(repo, ctx, actions): + """Invoke ``scmutil.fileprefetchhooks()`` for the files relevant to the dict + of merge actions. ``ctx`` is the context being merged in.""" + + # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they + # don't touch the context to be merged in. 'cd' is skipped, because + # changed/deleted never resolves to something from the remote side. + oplist = [actions[a] for a in 'g dc dg m'.split()] + prefetch = scmutil.fileprefetchhooks + prefetch(repo, ctx, [f for sublist in oplist for f, args, msg in sublist]) def applyupdates(repo, actions, wctx, mctx, overwrite, labels=None): """apply the merge action list to the working directory @@ -1397,6 +1407,8 @@ describes how many files were affected by the update. """ + _prefetchfiles(repo, mctx, actions) + updated, merged, removed = 0, 0, 0 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels) moves = [] @@ -1445,7 +1457,7 @@ z = 0 if [a for a in actions['r'] if a[0] == '.hgsubstate']: - subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels) + subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) # record path conflicts for f, args, msg in actions['p']: @@ -1495,7 +1507,7 @@ updated = len(actions['g']) if [a for a in actions['g'] if a[0] == '.hgsubstate']: - subrepo.submerge(repo, wctx, mctx, wctx, overwrite, labels) + subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels) # forget (manifest only, just log it) (must come first) for f, args, msg in actions['f']: @@ -1583,8 +1595,8 @@ z += 1 progress(_updating, z, item=f, total=numupdates, unit=_files) if f == '.hgsubstate': # subrepo states need updating - subrepo.submerge(repo, wctx, mctx, wctx.ancestor(mctx), - overwrite, labels) + subrepoutil.submerge(repo, wctx, mctx, wctx.ancestor(mctx), + overwrite, labels) continue wctx[f].audit() complete, r = ms.preresolve(f, wctx) @@ -1835,7 +1847,7 @@ else: pas = [p1.ancestor(p2, warn=branchmerge)] - fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2) + fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2) ### check phase if not overwrite: @@ -1913,7 +1925,7 @@ # Prompt and create actions. Most of this is in the resolve phase # already, but we can't handle .hgsubstate in filemerge or - # subrepo.submerge yet so we have to keep prompting for it. + # subrepoutil.submerge yet so we have to keep prompting for it. if '.hgsubstate' in actionbyfile: f = '.hgsubstate' m, args, msg = actionbyfile[f] @@ -1992,6 +2004,8 @@ fsmonitorthreshold = repo.ui.configint('fsmonitor', 'warn_update_file_count') try: + # avoid cycle: extensions -> cmdutil -> merge + from . import extensions extensions.find('fsmonitor') fsmonitorenabled = repo.ui.config('fsmonitor', 'mode') != 'off' # We intentionally don't look at whether fsmonitor has disabled
--- a/mercurial/namespaces.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/namespaces.py Sun Mar 04 10:42:51 2018 -0500 @@ -2,6 +2,7 @@ from .i18n import _ from . import ( + registrar, templatekw, util, ) @@ -87,10 +88,10 @@ # we only generate a template keyword if one does not already exist if namespace.name not in templatekw.keywords: - def generatekw(**args): - return templatekw.shownames(namespace.name, **args) - - templatekw.keywords[namespace.name] = generatekw + templatekeyword = registrar.templatekeyword(templatekw.keywords) + @templatekeyword(namespace.name, requires={'repo', 'ctx', 'templ'}) + def generatekw(context, mapping): + return templatekw.shownames(context, mapping, namespace.name) def singlenode(self, repo, name): """
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/narrowspec.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,199 @@ +# narrowspec.py - methods for working with a narrow view of a repository +# +# Copyright 2017 Google, Inc. +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import errno + +from .i18n import _ +from . import ( + error, + match as matchmod, + util, +) + +FILENAME = 'narrowspec' + +def _parsestoredpatterns(text): + """Parses the narrowspec format that's stored on disk.""" + patlist = None + includepats = [] + excludepats = [] + for l in text.splitlines(): + if l == '[includes]': + if patlist is None: + patlist = includepats + else: + raise error.Abort(_('narrowspec includes section must appear ' + 'at most once, before excludes')) + elif l == '[excludes]': + if patlist is not excludepats: + patlist = excludepats + else: + raise error.Abort(_('narrowspec excludes section must appear ' + 'at most once')) + else: + patlist.append(l) + + return set(includepats), set(excludepats) + +def parseserverpatterns(text): + """Parses the narrowspec format that's returned by the server.""" + includepats = set() + excludepats = set() + + # We get one entry per line, in the format "<key> <value>". + # It's OK for value to contain other spaces. + for kp in (l.split(' ', 1) for l in text.splitlines()): + if len(kp) != 2: + raise error.Abort(_('Invalid narrowspec pattern line: "%s"') % kp) + key = kp[0] + pat = kp[1] + if key == 'include': + includepats.add(pat) + elif key == 'exclude': + excludepats.add(pat) + else: + raise error.Abort(_('Invalid key "%s" in server response') % key) + + return includepats, excludepats + +def normalizesplitpattern(kind, pat): + """Returns the normalized version of a pattern and kind. + + Returns a tuple with the normalized kind and normalized pattern. + """ + pat = pat.rstrip('/') + _validatepattern(pat) + return kind, pat + +def _numlines(s): + """Returns the number of lines in s, including ending empty lines.""" + # We use splitlines because it is Unicode-friendly and thus Python 3 + # compatible. However, it does not count empty lines at the end, so trick + # it by adding a character at the end. + return len((s + 'x').splitlines()) + +def _validatepattern(pat): + """Validates the pattern and aborts if it is invalid. + + Patterns are stored in the narrowspec as newline-separated + POSIX-style bytestring paths. There's no escaping. + """ + + # We use newlines as separators in the narrowspec file, so don't allow them + # in patterns. + if _numlines(pat) > 1: + raise error.Abort(_('newlines are not allowed in narrowspec paths')) + + components = pat.split('/') + if '.' in components or '..' in components: + raise error.Abort(_('"." and ".." are not allowed in narrowspec paths')) + +def normalizepattern(pattern, defaultkind='path'): + """Returns the normalized version of a text-format pattern. + + If the pattern has no kind, the default will be added. + """ + kind, pat = matchmod._patsplit(pattern, defaultkind) + return '%s:%s' % normalizesplitpattern(kind, pat) + +def parsepatterns(pats): + """Parses a list of patterns into a typed pattern set.""" + return set(normalizepattern(p) for p in pats) + +def format(includes, excludes): + output = '[includes]\n' + for i in sorted(includes - excludes): + output += i + '\n' + output += '[excludes]\n' + for e in sorted(excludes): + output += e + '\n' + return output + +def match(root, include=None, exclude=None): + if not include: + # Passing empty include and empty exclude to matchmod.match() + # gives a matcher that matches everything, so explicitly use + # the nevermatcher. + return matchmod.never(root, '') + return matchmod.match(root, '', [], include=include or [], + exclude=exclude or []) + +def needsexpansion(includes): + return [i for i in includes if i.startswith('include:')] + +def load(repo): + try: + spec = repo.vfs.read(FILENAME) + except IOError as e: + # Treat "narrowspec does not exist" the same as "narrowspec file exists + # and is empty". + if e.errno == errno.ENOENT: + # Without this the next call to load will use the cached + # non-existence of the file, which can cause some odd issues. + repo.invalidate(clearfilecache=True) + return set(), set() + raise + return _parsestoredpatterns(spec) + +def save(repo, includepats, excludepats): + spec = format(includepats, excludepats) + repo.vfs.write(FILENAME, spec) + +def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes): + r""" Restricts the patterns according to repo settings, + results in a logical AND operation + + :param req_includes: requested includes + :param req_excludes: requested excludes + :param repo_includes: repo includes + :param repo_excludes: repo excludes + :return: include patterns, exclude patterns, and invalid include patterns. + + >>> restrictpatterns({'f1','f2'}, {}, ['f1'], []) + (set(['f1']), {}, []) + >>> restrictpatterns({'f1'}, {}, ['f1','f2'], []) + (set(['f1']), {}, []) + >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], []) + (set(['f1/fc1']), {}, []) + >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], []) + ([], set(['path:.']), []) + >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], []) + (set(['f2/fc2']), {}, []) + >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], []) + ([], set(['path:.']), []) + >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], []) + (set(['f1/$non_exitent_var']), {}, []) + """ + res_excludes = set(req_excludes) + res_excludes.update(repo_excludes) + invalid_includes = [] + if not req_includes: + res_includes = set(repo_includes) + elif 'path:.' not in repo_includes: + res_includes = [] + for req_include in req_includes: + req_include = util.expandpath(util.normpath(req_include)) + if req_include in repo_includes: + res_includes.append(req_include) + continue + valid = False + for repo_include in repo_includes: + if req_include.startswith(repo_include + '/'): + valid = True + res_includes.append(req_include) + break + if not valid: + invalid_includes.append(req_include) + if len(res_includes) == 0: + res_excludes = {'path:.'} + else: + res_includes = set(res_includes) + else: + res_includes = set(req_includes) + return res_includes, res_excludes, invalid_includes
--- a/mercurial/node.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/node.py Sun Mar 04 10:42:51 2018 -0500 @@ -11,7 +11,14 @@ # This ugly style has a noticeable effect in manifest parsing hex = binascii.hexlify -bin = binascii.unhexlify +# Adapt to Python 3 API changes. If this ends up showing up in +# profiles, we can use this version only on Python 3, and forward +# binascii.unhexlify like we used to on Python 2. +def bin(s): + try: + return binascii.unhexlify(s) + except binascii.Error as e: + raise TypeError(e) nullrev = -1 nullid = b"\0" * 20
--- a/mercurial/obsolete.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/obsolete.py Sun Mar 04 10:42:51 2018 -0500 @@ -81,6 +81,7 @@ policy, util, ) +from .utils import dateutil parsers = policy.importmod(r'parsers') @@ -506,13 +507,6 @@ for mark in markers: successors.setdefault(mark[0], set()).add(mark) -def _addprecursors(*args, **kwargs): - msg = ("'obsolete._addprecursors' is deprecated, " - "use 'obsolete._addpredecessors'") - util.nouideprecwarn(msg, '4.4') - - return _addpredecessors(*args, **kwargs) - @util.nogc def _addpredecessors(predecessors, markers): for mark in markers: @@ -570,7 +564,7 @@ return len(self._all) def __nonzero__(self): - if not self._cached('_all'): + if not self._cached(r'_all'): try: return self.svfs.stat('obsstore').st_size > 1 except OSError as inst: @@ -608,13 +602,13 @@ if date is None: if 'date' in metadata: # as a courtesy for out-of-tree extensions - date = util.parsedate(metadata.pop('date')) + date = dateutil.parsedate(metadata.pop('date')) elif ui is not None: date = ui.configdate('devel', 'default-date') if date is None: - date = util.makedate() + date = dateutil.makedate() else: - date = util.makedate() + date = dateutil.makedate() if len(prec) != 20: raise ValueError(prec) for succ in succs: @@ -663,7 +657,7 @@ self.caches.clear() # records the number of new markers for the transaction hooks previous = int(transaction.hookargs.get('new_obsmarkers', '0')) - transaction.hookargs['new_obsmarkers'] = str(previous + len(new)) + transaction.hookargs['new_obsmarkers'] = '%d' % (previous + len(new)) return len(new) def mergemarkers(self, transaction, data): @@ -700,14 +694,6 @@ _addsuccessors(successors, self._all) return successors - @property - def precursors(self): - msg = ("'obsstore.precursors' is deprecated, " - "use 'obsstore.predecessors'") - util.nouideprecwarn(msg, '4.4') - - return self.predecessors - @propertycache def predecessors(self): predecessors = {} @@ -727,11 +713,11 @@ markers = list(markers) # to allow repeated iteration self._data = self._data + rawdata self._all.extend(markers) - if self._cached('successors'): + if self._cached(r'successors'): _addsuccessors(self.successors, markers) - if self._cached('predecessors'): + if self._cached(r'predecessors'): _addpredecessors(self.predecessors, markers) - if self._cached('children'): + if self._cached(r'children'): _addchildren(self.children, markers) _checkinvalidmarkers(markers) @@ -843,42 +829,6 @@ repo.invalidatevolatilesets() return True -# keep compatibility for the 4.3 cycle -def allprecursors(obsstore, nodes, ignoreflags=0): - movemsg = 'obsolete.allprecursors moved to obsutil.allprecursors' - util.nouideprecwarn(movemsg, '4.3') - return obsutil.allprecursors(obsstore, nodes, ignoreflags) - -def allsuccessors(obsstore, nodes, ignoreflags=0): - movemsg = 'obsolete.allsuccessors moved to obsutil.allsuccessors' - util.nouideprecwarn(movemsg, '4.3') - return obsutil.allsuccessors(obsstore, nodes, ignoreflags) - -def marker(repo, data): - movemsg = 'obsolete.marker moved to obsutil.marker' - repo.ui.deprecwarn(movemsg, '4.3') - return obsutil.marker(repo, data) - -def getmarkers(repo, nodes=None, exclusive=False): - movemsg = 'obsolete.getmarkers moved to obsutil.getmarkers' - repo.ui.deprecwarn(movemsg, '4.3') - return obsutil.getmarkers(repo, nodes=nodes, exclusive=exclusive) - -def exclusivemarkers(repo, nodes): - movemsg = 'obsolete.exclusivemarkers moved to obsutil.exclusivemarkers' - repo.ui.deprecwarn(movemsg, '4.3') - return obsutil.exclusivemarkers(repo, nodes) - -def foreground(repo, nodes): - movemsg = 'obsolete.foreground moved to obsutil.foreground' - repo.ui.deprecwarn(movemsg, '4.3') - return obsutil.foreground(repo, nodes) - -def successorssets(repo, initialnode, cache=None): - movemsg = 'obsolete.successorssets moved to obsutil.successorssets' - repo.ui.deprecwarn(movemsg, '4.3') - return obsutil.successorssets(repo, initialnode, cache=cache) - # mapping of 'set-name' -> <function to compute this set> cachefuncs = {} def cachefor(name): @@ -933,14 +883,6 @@ obs = set(r for r in notpublic if isobs(getnode(r))) return obs -@cachefor('unstable') -def _computeunstableset(repo): - msg = ("'unstable' volatile set is deprecated, " - "use 'orphan'") - repo.ui.deprecwarn(msg, '4.4') - - return _computeorphanset(repo) - @cachefor('orphan') def _computeorphanset(repo): """the set of non obsolete revisions with obsolete parents""" @@ -969,14 +911,6 @@ """the set of obsolete parents without non obsolete descendants""" return getrevs(repo, 'obsolete') - getrevs(repo, 'suspended') -@cachefor('bumped') -def _computebumpedset(repo): - msg = ("'bumped' volatile set is deprecated, " - "use 'phasedivergent'") - repo.ui.deprecwarn(msg, '4.4') - - return _computephasedivergentset(repo) - @cachefor('phasedivergent') def _computephasedivergentset(repo): """the set of revs trying to obsolete public revisions""" @@ -1000,14 +934,6 @@ break # Next draft! return bumped -@cachefor('divergent') -def _computedivergentset(repo): - msg = ("'divergent' volatile set is deprecated, " - "use 'contentdivergent'") - repo.ui.deprecwarn(msg, '4.4') - - return _computecontentdivergentset(repo) - @cachefor('contentdivergent') def _computecontentdivergentset(repo): """the set of rev that compete to be the final successors of some revision.
--- a/mercurial/obsutil.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/obsutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -15,6 +15,7 @@ phases, util, ) +from .utils import dateutil class marker(object): """Wrap obsolete marker raw data""" @@ -33,12 +34,6 @@ return False return self._data == other._data - def precnode(self): - msg = ("'marker.precnode' is deprecated, " - "use 'marker.prednode'") - util.nouideprecwarn(msg, '4.4') - return self.prednode() - def prednode(self): """Predecessor changeset node identifier""" return self._data[0] @@ -106,15 +101,6 @@ else: stack.append(precnodeid) -def allprecursors(*args, **kwargs): - """ (DEPRECATED) - """ - msg = ("'obsutil.allprecursors' is deprecated, " - "use 'obsutil.allpredecessors'") - util.nouideprecwarn(msg, '4.4') - - return allpredecessors(*args, **kwargs) - def allpredecessors(obsstore, nodes, ignoreflags=0): """Yield node for every precursors of <nodes>. @@ -421,10 +407,10 @@ # Check if other meta has changed changeextra = changectx.extra().items() - ctxmeta = filter(metanotblacklisted, changeextra) + ctxmeta = list(filter(metanotblacklisted, changeextra)) sourceextra = source.extra().items() - srcmeta = filter(metanotblacklisted, sourceextra) + srcmeta = list(filter(metanotblacklisted, sourceextra)) if ctxmeta != srcmeta: effects |= METACHANGED @@ -856,11 +842,11 @@ max_date = max(dates) if min_date == max_date: - fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2') + fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2') line.append(" (at %s)" % fmtmin_date) else: - fmtmin_date = util.datestr(min_date, '%Y-%m-%d %H:%M %1%2') - fmtmax_date = util.datestr(max_date, '%Y-%m-%d %H:%M %1%2') + fmtmin_date = dateutil.datestr(min_date, '%Y-%m-%d %H:%M %1%2') + fmtmax_date = dateutil.datestr(max_date, '%Y-%m-%d %H:%M %1%2') line.append(" (between %s and %s)" % (fmtmin_date, fmtmax_date)) return "".join(line)
--- a/mercurial/parser.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/parser.py Sun Mar 04 10:42:51 2018 -0500 @@ -22,6 +22,7 @@ from . import ( encoding, error, + pycompat, util, ) @@ -192,7 +193,7 @@ return util.unescapestr(s) except ValueError as e: # mangle Python's exception into our format - raise error.ParseError(str(e).lower()) + raise error.ParseError(pycompat.bytestr(e).lower()) def _brepr(obj): if isinstance(obj, bytes):
--- a/mercurial/patch.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/patch.py Sun Mar 04 10:42:51 2018 -0500 @@ -12,7 +12,6 @@ import copy import difflib import email -import email.parser as emailparser import errno import hashlib import os @@ -41,6 +40,7 @@ util, vfs as vfsmod, ) +from .utils import dateutil diffhelpers = policy.importmod(r'diffhelpers') stringio = util.stringio @@ -109,7 +109,7 @@ cur.append(line) c = chunk(cur) - m = emailparser.Parser().parse(c) + m = pycompat.emailparser().parse(c) if not m.is_multipart(): yield msgfp(m) else: @@ -216,9 +216,9 @@ data = {} fd, tmpname = tempfile.mkstemp(prefix='hg-patch-') - tmpfp = os.fdopen(fd, pycompat.sysstr('w')) + tmpfp = os.fdopen(fd, pycompat.sysstr('wb')) try: - msg = emailparser.Parser().parse(fileobj) + msg = pycompat.emailparser().parse(fileobj) subject = msg['Subject'] and mail.headdecode(msg['Subject']) data['user'] = msg['From'] and mail.headdecode(msg['From']) @@ -242,7 +242,7 @@ ok_types = ('text/plain', 'text/x-diff', 'text/x-patch') message = '' for part in msg.walk(): - content_type = part.get_content_type() + content_type = pycompat.bytestr(part.get_content_type()) ui.debug('Content-Type: %s\n' % content_type) if content_type not in ok_types: continue @@ -567,7 +567,7 @@ root = tempfile.mkdtemp(prefix='hg-patch-') self.opener = vfsmod.vfs(root) # Avoid filename issues with these simple names - fn = str(self.created) + fn = '%d' % self.created self.opener.write(fn, data) self.created += 1 self.files[fname] = (fn, mode, copied) @@ -1451,7 +1451,7 @@ dec = [] line = getline(lr, self.hunk) while len(line) > 1: - l = line[0] + l = line[0:1] if l <= 'Z' and l >= 'A': l = ord(l) - ord('A') + 1 else: @@ -1460,7 +1460,7 @@ dec.append(util.b85decode(line[1:])[:l]) except ValueError as e: raise PatchError(_('could not decode "%s" binary patch: %s') - % (self._fname, str(e))) + % (self._fname, util.forcebytestr(e))) line = getline(lr, self.hunk) text = zlib.decompress(''.join(dec)) if len(text) != size: @@ -1852,7 +1852,7 @@ for x in iter(lr.readline, ''): if state == BFILE and ( - (not context and x[0] == '@') + (not context and x.startswith('@')) or (context is not False and x.startswith('***************')) or x.startswith('GIT binary patch')): gp = None @@ -2256,6 +2256,7 @@ 'context': get('unified', getter=ui.config), } buildopts['worddiff'] = ui.configbool('experimental', 'worddiff') + buildopts['xdiff'] = ui.configbool('experimental', 'xdiff') if git: buildopts['git'] = get('git') @@ -2342,7 +2343,7 @@ if hunksfilterfn is not None: # If the file has been removed, fctx2 is None; but this should # not occur here since we catch removed files early in - # cmdutil.getloglinerangerevs() for 'hg log -L'. + # logcmdutil.getlinerangerevs() for 'hg log -L'. assert fctx2 is not None, \ 'fctx2 unexpectly None in diff hunks filtering' hunks = hunksfilterfn(fctx2, hunks) @@ -2519,7 +2520,7 @@ yield (t, l) else: for token in tabsplitter.findall(stripline): - if '\t' == token[0]: + if token.startswith('\t'): yield (token, 'diff.tab') else: yield (token, label) @@ -2670,8 +2671,8 @@ def isempty(fctx): return fctx is None or fctx.size() == 0 - date1 = util.datestr(ctx1.date()) - date2 = util.datestr(ctx2.date()) + date1 = dateutil.datestr(ctx1.date()) + date2 = dateutil.datestr(ctx2.date()) gitmode = {'l': '120000', 'x': '100755', '': '100644'} @@ -2698,8 +2699,10 @@ if opts.git or losedatafn: flag2 = ctx2.flags(f2) # if binary is True, output "summary" or "base85", but not "text diff" - binary = not opts.text and any(f.isbinary() - for f in [fctx1, fctx2] if f is not None) + if opts.text: + binary = False + else: + binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None) if losedatafn and not opts.git: if (binary or @@ -2789,7 +2792,8 @@ uheaders, hunks = mdiff.unidiff(content1, date1, content2, date2, - path1, path2, opts=opts) + path1, path2, + binary=binary, opts=opts) header.extend(uheaders) yield fctx1, fctx2, header, hunks
--- a/mercurial/pathutil.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/pathutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -65,7 +65,7 @@ parts = util.splitpath(path) if (os.path.splitdrive(path)[0] or _lowerclean(parts[0]) in ('.hg', '.hg.', '') - or os.pardir in parts): + or pycompat.ospardir in parts): raise error.Abort(_("path contains illegal component: %s") % path) # Windows shortname aliases for p in parts: @@ -81,7 +81,7 @@ pos = lparts.index(p) base = os.path.join(*parts[:pos]) raise error.Abort(_("path '%s' is inside nested repo %r") - % (path, base)) + % (path, pycompat.bytestr(base))) normparts = util.splitpath(normpath) assert len(parts) == len(normparts) @@ -119,13 +119,14 @@ raise else: if stat.S_ISLNK(st.st_mode): - msg = _('path %r traverses symbolic link %r') % (path, prefix) + msg = (_('path %r traverses symbolic link %r') + % (pycompat.bytestr(path), pycompat.bytestr(prefix))) raise error.Abort(msg) elif (stat.S_ISDIR(st.st_mode) and os.path.isdir(os.path.join(curpath, '.hg'))): if not self.callback or not self.callback(curpath): msg = _("path '%s' is inside nested repo %r") - raise error.Abort(msg % (path, prefix)) + raise error.Abort(msg % (path, pycompat.bytestr(prefix))) def check(self, path): try:
--- a/mercurial/phases.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/phases.py Sun Mar 04 10:42:51 2018 -0500 @@ -262,7 +262,8 @@ repo = repo.unfiltered() nativeroots = [] for phase in trackedphases: - nativeroots.append(map(repo.changelog.rev, self.phaseroots[phase])) + nativeroots.append(pycompat.maplist(repo.changelog.rev, + self.phaseroots[phase])) return repo.changelog.computephases(nativeroots) def _computephaserevspure(self, repo): @@ -326,7 +327,7 @@ def _write(self, fp): for phase, roots in enumerate(self.phaseroots): - for h in roots: + for h in sorted(roots): fp.write('%i %s\n' % (phase, hex(h))) self.dirty = False
--- a/mercurial/policy.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/policy.py Sun Mar 04 10:42:51 2018 -0500 @@ -44,11 +44,6 @@ if r'__pypy__' in sys.builtin_module_names: policy = b'cffi' -# Our C extensions aren't yet compatible with Python 3. So use pure Python -# on Python 3 for now. -if sys.version_info[0] >= 3: - policy = b'py' - # Environment variable can always force settings. if sys.version_info[0] >= 3: if r'HGMODULEPOLICY' in os.environ: @@ -71,7 +66,7 @@ # keep in sync with "version" in C modules _cextversions = { (r'cext', r'base85'): 1, - (r'cext', r'bdiff'): 1, + (r'cext', r'bdiff'): 3, (r'cext', r'diffhelpers'): 1, (r'cext', r'mpatch'): 1, (r'cext', r'osutil'): 3,
--- a/mercurial/posix.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/posix.py Sun Mar 04 10:42:51 2018 -0500 @@ -113,7 +113,7 @@ if l: if not stat.S_ISLNK(s): # switch file to link - fp = open(f) + fp = open(f, 'rb') data = fp.read() fp.close() unlink(f) @@ -121,7 +121,7 @@ os.symlink(data, f) except OSError: # failed to make a link, rewrite file - fp = open(f, "w") + fp = open(f, "wb") fp.write(data) fp.close() # no chmod needed at this point @@ -130,7 +130,7 @@ # switch link to file data = os.readlink(f) unlink(f) - fp = open(f, "w") + fp = open(f, "wb") fp.write(data) fp.close() s = 0o666 & ~umask # avoid restatting for chmod @@ -461,6 +461,10 @@ else: return "'%s'" % s.replace("'", "'\\''") +def shellsplit(s): + """Parse a command string in POSIX shell way (best-effort)""" + return pycompat.shlexsplit(s, posix=True) + def quotecommand(cmd): return cmd
--- a/mercurial/profiling.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/profiling.py Sun Mar 04 10:42:51 2018 -0500 @@ -14,6 +14,7 @@ encoding, error, extensions, + pycompat, util, ) @@ -143,7 +144,7 @@ elif profformat == 'hotpath': # inconsistent config: profiling.showmin limit = ui.configwith(fraction, 'profiling', 'showmin', 0.05) - kwargs['limit'] = limit + kwargs[r'limit'] = limit statprof.display(fp, data=data, format=displayformat, **kwargs) @@ -200,6 +201,17 @@ elif self._output: path = self._ui.expandpath(self._output) self._fp = open(path, 'wb') + elif pycompat.iswindows: + # parse escape sequence by win32print() + class uifp(object): + def __init__(self, ui): + self._ui = ui + def write(self, data): + self._ui.write_err(data) + def flush(self): + self._ui.flush() + self._fpdoclose = False + self._fp = uifp(self._ui) else: self._fpdoclose = False self._fp = self._ui.ferr
--- a/mercurial/progress.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/progress.py Sun Mar 04 10:42:51 2018 -0500 @@ -119,10 +119,9 @@ add = topic elif indicator == 'number': if total: - add = ('% ' + str(len(str(total))) + - 's/%s') % (pos, total) + add = b'%*d/%d' % (len(str(total)), pos, total) else: - add = str(pos) + add = b'%d' % pos elif indicator.startswith('item') and item: slice = 'end' if '-' in indicator:
--- a/mercurial/pure/base85.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/pure/base85.py Sun Mar 04 10:42:51 2018 -0500 @@ -9,8 +9,10 @@ import struct -_b85chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" \ - "abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~" +from .. import pycompat + +_b85chars = pycompat.bytestr("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdef" + "ghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] _b85dec = {} @@ -51,6 +53,7 @@ out = [] for i in range(0, len(text), 5): chunk = text[i:i + 5] + chunk = pycompat.bytestr(chunk) acc = 0 for j, c in enumerate(chunk): try:
--- a/mercurial/pure/bdiff.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/pure/bdiff.py Sun Mar 04 10:42:51 2018 -0500 @@ -90,3 +90,13 @@ text = re.sub('[ \t\r]+', ' ', text) text = text.replace(' \n', '\n') return text + +def splitnewlines(text): + '''like str.splitlines, but only split on newlines.''' + lines = [l + '\n' for l in text.split('\n')] + if lines: + if lines[-1] == '\n': + lines.pop() + else: + lines[-1] = lines[-1][:-1] + return lines
--- a/mercurial/pycompat.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/pycompat.py Sun Mar 04 10:42:51 2018 -0500 @@ -11,6 +11,7 @@ from __future__ import absolute_import import getopt +import inspect import os import shlex import sys @@ -47,9 +48,11 @@ fsencode = os.fsencode fsdecode = os.fsdecode + oscurdir = os.curdir.encode('ascii') oslinesep = os.linesep.encode('ascii') osname = os.name.encode('ascii') ospathsep = os.pathsep.encode('ascii') + ospardir = os.pardir.encode('ascii') ossep = os.sep.encode('ascii') osaltsep = os.altsep if osaltsep: @@ -65,6 +68,7 @@ maplist = lambda *args: list(map(*args)) ziplist = lambda *args: list(zip(*args)) rawinput = input + getargspec = inspect.getfullargspec # TODO: .buffer might not exist if std streams were replaced; we'll need # a silly wrapper to make a bytes stream backed by a unicode one. @@ -83,12 +87,13 @@ sysargv = list(map(os.fsencode, sys.argv)) bytechr = struct.Struct('>B').pack + byterepr = b'%r'.__mod__ class bytestr(bytes): """A bytes which mostly acts as a Python 2 str >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1) - (b'', b'foo', b'ascii', b'1') + ('', 'foo', 'ascii', '1') >>> s = bytestr(b'foo') >>> assert s is bytestr(s) @@ -98,7 +103,7 @@ ... def __bytes__(self): ... return b'bytes' >>> bytestr(bytesable()) - b'bytes' + 'bytes' There's no implicit conversion from non-ascii str as its encoding is unknown: @@ -154,10 +159,19 @@ def __iter__(self): return iterbytestr(bytes.__iter__(self)) + def __repr__(self): + return bytes.__repr__(self)[1:] # drop b'' + def iterbytestr(s): """Iterate bytes as if it were a str object of Python 2""" return map(bytechr, s) + def maybebytestr(s): + """Promote bytes to bytestr""" + if isinstance(s, bytes): + return bytestr(s) + return s + def sysbytes(s): """Convert an internal str (e.g. keyword, __doc__) back to bytes @@ -180,11 +194,15 @@ def strurl(url): """Converts a bytes url back to str""" - return url.decode(u'ascii') + if isinstance(url, bytes): + return url.decode(u'ascii') + return url def bytesurl(url): """Converts a str url to bytes by encoding in ascii""" - return url.encode(u'ascii') + if isinstance(url, str): + return url.encode(u'ascii') + return url def raisewithtb(exc, tb): """Raise exception with the given traceback""" @@ -212,8 +230,8 @@ xrange = builtins.range unicode = str - def open(name, mode='r', buffering=-1): - return builtins.open(name, sysstr(mode), buffering) + def open(name, mode='r', buffering=-1, encoding=None): + return builtins.open(name, sysstr(mode), buffering, encoding) def _getoptbwrapper(orig, args, shortlist, namelist): """ @@ -249,21 +267,27 @@ return dic # TODO: handle shlex.shlex(). - def shlexsplit(s): + def shlexsplit(s, comments=False, posix=True): """ Takes bytes argument, convert it to str i.e. unicodes, pass that into shlex.split(), convert the returned value to bytes and return that for Python 3 compatibility as shelx.split() don't accept bytes on Python 3. """ - ret = shlex.split(s.decode('latin-1')) + ret = shlex.split(s.decode('latin-1'), comments, posix) return [a.encode('latin-1') for a in ret] + def emailparser(*args, **kwargs): + import email.parser + return email.parser.BytesParser(*args, **kwargs) + else: import cStringIO bytechr = chr + byterepr = repr bytestr = str iterbytestr = iter + maybebytestr = identity sysbytes = identity sysstr = identity strurl = identity @@ -298,9 +322,11 @@ strkwargs = identity byteskwargs = identity + oscurdir = os.curdir oslinesep = os.linesep osname = os.name ospathsep = os.pathsep + ospardir = os.pardir ossep = os.sep osaltsep = os.altsep stdin = sys.stdin @@ -316,6 +342,11 @@ maplist = map ziplist = zip rawinput = raw_input + getargspec = inspect.getargspec + + def emailparser(*args, **kwargs): + import email.parser + return email.parser.Parser(*args, **kwargs) isjython = sysplatform.startswith('java')
--- a/mercurial/registrar.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/registrar.py Sun Mar 04 10:42:51 2018 -0500 @@ -283,6 +283,14 @@ templatekeyword = registrar.templatekeyword() + # new API (since Mercurial 4.6) + @templatekeyword('mykeyword', requires={'repo', 'ctx'}) + def mykeywordfunc(context, mapping): + '''Explanation of this template keyword .... + ''' + pass + + # old API @templatekeyword('mykeyword') def mykeywordfunc(repo, ctx, templ, cache, revcache, **args): '''Explanation of this template keyword .... @@ -291,6 +299,11 @@ The first string argument is used also in online help. + Optional argument 'requires' should be a collection of resource names + which the template keyword depends on. This also serves as a flag to + switch to the new API. If 'requires' is unspecified, all template + keywords and resources are expanded to the function arguments. + 'templatekeyword' instance in example above can be used to decorate multiple functions. @@ -301,6 +314,9 @@ Otherwise, explicit 'templatekw.loadkeyword()' is needed. """ + def _extrasetup(self, name, func, requires=None): + func._requires = requires + class templatefilter(_templateregistrarbase): """Decorator to register template filer
--- a/mercurial/revlog.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/revlog.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,8 +13,8 @@ from __future__ import absolute_import -import binascii import collections +import contextlib import errno import hashlib import heapq @@ -621,13 +621,12 @@ indexdata = '' self._initempty = True try: - f = self.opener(self.indexfile) - if (mmapindexthreshold is not None and - self.opener.fstat(f).st_size >= mmapindexthreshold): - indexdata = util.buffer(util.mmapread(f)) - else: - indexdata = f.read() - f.close() + with self._indexfp() as f: + if (mmapindexthreshold is not None and + self.opener.fstat(f).st_size >= mmapindexthreshold): + indexdata = util.buffer(util.mmapread(f)) + else: + indexdata = f.read() if len(indexdata) > 0: v = versionformat_unpack(indexdata[:4])[0] self._initempty = False @@ -682,6 +681,32 @@ def _compressor(self): return util.compengines[self._compengine].revlogcompressor() + def _indexfp(self, mode='r'): + """file object for the revlog's index file""" + args = {r'mode': mode} + if mode != 'r': + args[r'checkambig'] = self._checkambig + if mode == 'w': + args[r'atomictemp'] = True + return self.opener(self.indexfile, **args) + + def _datafp(self, mode='r'): + """file object for the revlog's data file""" + return self.opener(self.datafile, mode=mode) + + @contextlib.contextmanager + def _datareadfp(self, existingfp=None): + """file object suitable to read data""" + if existingfp is not None: + yield existingfp + else: + if self._inline: + func = self._indexfp + else: + func = self._datafp + with func() as fp: + yield fp + def tip(self): return self.node(len(self.index) - 2) def __contains__(self, rev): @@ -1404,7 +1429,7 @@ if maybewdir: raise error.WdirUnsupported return None - except (TypeError, binascii.Error): + except TypeError: pass def lookup(self, id): @@ -1490,15 +1515,6 @@ Returns a str or buffer of raw byte data. """ - if df is not None: - closehandle = False - else: - if self._inline: - df = self.opener(self.indexfile) - else: - df = self.opener(self.datafile) - closehandle = True - # Cache data both forward and backward around the requested # data, in a fixed size window. This helps speed up operations # involving reading the revlog backwards. @@ -1506,10 +1522,9 @@ realoffset = offset & ~(cachesize - 1) reallength = (((offset + length + cachesize) & ~(cachesize - 1)) - realoffset) - df.seek(realoffset) - d = df.read(reallength) - if closehandle: - df.close() + with self._datareadfp(df) as df: + df.seek(realoffset) + d = df.read(reallength) self._cachesegment(realoffset, d) if offset != realoffset or reallength != length: return util.buffer(d, offset - realoffset, length) @@ -1818,7 +1833,7 @@ raise RevlogError(_("integrity check failed on %s:%s") % (self.indexfile, pycompat.bytestr(revornode))) - def checkinlinesize(self, tr, fp=None): + def _enforceinlinesize(self, tr, fp=None): """Check if the revlog is too big for inline and convert if so. This should be called after revisions are added to the revlog. If the @@ -1847,24 +1862,20 @@ fp.flush() fp.close() - df = self.opener(self.datafile, 'w') - try: + with self._datafp('w') as df: for r in self: df.write(self._getsegmentforrevs(r, r)[1]) - finally: - df.close() - fp = self.opener(self.indexfile, 'w', atomictemp=True, - checkambig=self._checkambig) - self.version &= ~FLAG_INLINE_DATA - self._inline = False - for i in self: - e = self._io.packentry(self.index[i], self.node, self.version, i) - fp.write(e) + with self._indexfp('w') as fp: + self.version &= ~FLAG_INLINE_DATA + self._inline = False + io = self._io + for i in self: + e = io.packentry(self.index[i], self.node, self.version, i) + fp.write(e) - # if we don't call close, the temp file will never replace the - # real index - fp.close() + # the temp file replace the real index when we exit the context + # manager tr.replace(self.indexfile, trindex * self._io.size) self._chunkclear() @@ -1923,8 +1934,8 @@ """ dfh = None if not self._inline: - dfh = self.opener(self.datafile, "a+") - ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig) + dfh = self._datafp("a+") + ifh = self._indexfp("a+") try: return self._addrevision(node, rawtext, transaction, link, p1, p2, flags, cachedelta, ifh, dfh, @@ -2099,7 +2110,7 @@ if alwayscache and rawtext is None: rawtext = deltacomputer._buildtext(revinfo, fh) - if type(rawtext) == str: # only accept immutable objects + if type(rawtext) == bytes: # only accept immutable objects self._cache = (node, curr, rawtext) self._chainbasecache[curr] = chainbase return node @@ -2133,7 +2144,7 @@ ifh.write(entry) ifh.write(data[0]) ifh.write(data[1]) - self.checkinlinesize(transaction, ifh) + self._enforceinlinesize(transaction, ifh) def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None): """ @@ -2153,7 +2164,7 @@ end = 0 if r: end = self.end(r - 1) - ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig) + ifh = self._indexfp("a+") isize = r * self._io.size if self._inline: transaction.add(self.indexfile, end + isize, r) @@ -2161,7 +2172,7 @@ else: transaction.add(self.indexfile, isize, r) transaction.add(self.datafile, end) - dfh = self.opener(self.datafile, "a+") + dfh = self._datafp("a+") def flush(): if dfh: dfh.flush() @@ -2224,9 +2235,8 @@ # addrevision switched from inline to conventional # reopen the index ifh.close() - dfh = self.opener(self.datafile, "a+") - ifh = self.opener(self.indexfile, "a+", - checkambig=self._checkambig) + dfh = self._datafp("a+") + ifh = self._indexfp("a+") finally: if dfh: dfh.close() @@ -2328,10 +2338,9 @@ expected = max(0, self.end(len(self) - 1)) try: - f = self.opener(self.datafile) - f.seek(0, 2) - actual = f.tell() - f.close() + with self._datafp() as f: + f.seek(0, 2) + actual = f.tell() dd = actual - expected except IOError as inst: if inst.errno != errno.ENOENT:
--- a/mercurial/revset.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/revset.py Sun Mar 04 10:42:51 2018 -0500 @@ -30,6 +30,7 @@ smartset, util, ) +from .utils import dateutil # helpers for processing parsed tree getsymbol = revsetlang.getsymbol @@ -105,6 +106,9 @@ pass return None +def _sortedb(xs): + return sorted(util.rapply(pycompat.maybebytestr, xs)) + # operator methods def stringset(repo, subset, x, order): @@ -507,15 +511,7 @@ b.add(getbranch(r)) c = s.__contains__ return subset.filter(lambda r: c(r) or getbranch(r) in b, - condrepr=lambda: '<branch %r>' % sorted(b)) - -@predicate('bumped()', safe=True) -def bumped(repo, subset, x): - msg = ("'bumped()' is deprecated, " - "use 'phasedivergent()'") - repo.ui.deprecwarn(msg, '4.4') - - return phasedivergent(repo, subset, x) + condrepr=lambda: '<branch %r>' % _sortedb(b)) @predicate('phasedivergent()', safe=True) def phasedivergent(repo, subset, x): @@ -663,7 +659,7 @@ """ # i18n: "date" is a keyword ds = getstring(x, _("date requires a string")) - dm = util.matchdate(ds) + dm = dateutil.matchdate(ds) return subset.filter(lambda x: dm(repo[x].date()[0]), condrepr=('<date %r>', ds)) @@ -768,15 +764,7 @@ src = _getrevsource(repo, r) return subset.filter(dests.__contains__, - condrepr=lambda: '<destination %r>' % sorted(dests)) - -@predicate('divergent()', safe=True) -def divergent(repo, subset, x): - msg = ("'divergent()' is deprecated, " - "use 'contentdivergent()'") - repo.ui.deprecwarn(msg, '4.4') - - return contentdivergent(repo, subset, x) + condrepr=lambda: '<destination %r>' % _sortedb(dests)) @predicate('contentdivergent()', safe=True) def contentdivergent(repo, subset, x): @@ -1024,7 +1012,8 @@ # i18n: "grep" is a keyword gr = re.compile(getstring(x, _("grep requires a string"))) except re.error as e: - raise error.ParseError(_('invalid match pattern: %s') % e) + raise error.ParseError( + _('invalid match pattern: %s') % util.forcebytestr(e)) def matches(x): c = repo[x] @@ -1854,11 +1843,12 @@ keyflags = [] for k in keys.split(): fk = k - reverse = (k[0] == '-') + reverse = (k.startswith('-')) if reverse: k = k[1:] if k not in _sortkeyfuncs and k != 'topo': - raise error.ParseError(_("unknown sort key %r") % fk) + raise error.ParseError( + _("unknown sort key %r") % pycompat.bytestr(fk)) keyflags.append((k, reverse)) if len(keyflags) > 1 and any(k == 'topo' for k, reverse in keyflags): @@ -2031,14 +2021,6 @@ def tagged(repo, subset, x): return tag(repo, subset, x) -@predicate('unstable()', safe=True) -def unstable(repo, subset, x): - msg = ("'unstable()' is deprecated, " - "use 'orphan()'") - repo.ui.deprecwarn(msg, '4.4') - - return orphan(repo, subset, x) - @predicate('orphan()', safe=True) def orphan(repo, subset, x): """Non-obsolete changesets with obsolete ancestors. (EXPERIMENTAL) @@ -2080,7 +2062,7 @@ try: # fast path for integer revision r = int(t) - if str(r) != t or r not in cl: + if ('%d' % r) != t or r not in cl: raise ValueError revs = [r] except ValueError:
--- a/mercurial/revsetlang.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/revsetlang.py Sun Mar 04 10:42:51 2018 -0500 @@ -539,7 +539,21 @@ return tuple(foldconcat(t) for t in tree) def parse(spec, lookup=None): - return _parsewith(spec, lookup=lookup) + try: + return _parsewith(spec, lookup=lookup) + except error.ParseError as inst: + if len(inst.args) > 1: # has location + # Add 1 to location because unlike templates, revset parse errors + # point to the char where the error happened, not the char after. + loc = inst.args[1] + 1 + # Remove newlines -- spaces are equivalent whitespace. + spec = spec.replace('\n', ' ') + # We want the caret to point to the place in the template that + # failed to parse, but in a hint we get a open paren at the + # start. Therefore, we print "loc + 1" spaces (instead of "loc") + # to line up the caret with the location of the error. + inst.hint = spec + '\n' + ' ' * loc + '^ ' + _('here') + raise def _quote(s): r"""Quote a value in order to make it safe for the revset engine. @@ -635,7 +649,7 @@ "root(_list('a\\\\x00b\\\\x00c\\\\x00d'))" >>> formatspec(b'sort(%r, %ps)', b':', [b'desc', b'user']) "sort((:), 'desc', 'user')" - >>> formatspec('%ls', ['a', "'"]) + >>> formatspec(b'%ls', [b'a', b"'"]) "_list('a\\\\x00\\\\'')" ''' expr = pycompat.bytestr(expr) @@ -717,13 +731,13 @@ def gethashlikesymbols(tree): """returns the list of symbols of the tree that look like hashes - >>> gethashlikesymbols(('dagrange', ('symbol', '3'), ('symbol', 'abe3ff'))) + >>> gethashlikesymbols(parse(b'3::abe3ff')) ['3', 'abe3ff'] - >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '.'))) + >>> gethashlikesymbols(parse(b'precursors(.)')) [] - >>> gethashlikesymbols(('func', ('symbol', 'precursors'), ('symbol', '34'))) + >>> gethashlikesymbols(parse(b'precursors(34)')) ['34'] - >>> gethashlikesymbols(('symbol', 'abe3ffZ')) + >>> gethashlikesymbols(parse(b'abe3ffZ')) [] """ if not tree:
--- a/mercurial/scmutil.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/scmutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -162,13 +162,14 @@ reason = _('timed out waiting for lock held by %r') % inst.locker else: reason = _('lock held by %r') % inst.locker - ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason)) + ui.warn(_("abort: %s: %s\n") + % (inst.desc or util.forcebytestr(inst.filename), reason)) if not inst.locker: ui.warn(_("(lock might be very busy)\n")) except error.LockUnavailable as inst: ui.warn(_("abort: could not lock %s: %s\n") % - (inst.desc or inst.filename, - encoding.strtolocal(inst.strerror))) + (inst.desc or util.forcebytestr(inst.filename), + encoding.strtolocal(inst.strerror))) except error.OutOfBandError as inst: if inst.args: msg = _("abort: remote error:\n") @@ -185,7 +186,10 @@ ui.warn(_("(%s)\n") % inst.hint) except error.ResponseError as inst: ui.warn(_("abort: %s") % inst.args[0]) - if not isinstance(inst.args[1], basestring): + msg = inst.args[1] + if isinstance(msg, type(u'')): + msg = pycompat.sysbytes(msg) + elif not isinstance(inst.args[1], bytes): ui.warn(" %r\n" % (inst.args[1],)) elif not inst.args[1]: ui.warn(_(" empty string\n")) @@ -207,15 +211,15 @@ if inst.hint: ui.warn(_("(%s)\n") % inst.hint) except ImportError as inst: - ui.warn(_("abort: %s!\n") % inst) - m = str(inst).split()[-1] + ui.warn(_("abort: %s!\n") % util.forcebytestr(inst)) + m = util.forcebytestr(inst).split()[-1] if m in "mpatch bdiff".split(): ui.warn(_("(did you forget to compile extensions?)\n")) elif m in "zlib".split(): ui.warn(_("(is your Python install correct?)\n")) except IOError as inst: if util.safehasattr(inst, "code"): - ui.warn(_("abort: %s\n") % inst) + ui.warn(_("abort: %s\n") % util.forcebytestr(inst)) elif util.safehasattr(inst, "reason"): try: # usually it is in the form (errno, strerror) reason = inst.reason.args[1] @@ -232,7 +236,8 @@ elif getattr(inst, "strerror", None): if getattr(inst, "filename", None): ui.warn(_("abort: %s: %s\n") % ( - encoding.strtolocal(inst.strerror), inst.filename)) + encoding.strtolocal(inst.strerror), + util.forcebytestr(inst.filename))) else: ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) else: @@ -240,7 +245,8 @@ except OSError as inst: if getattr(inst, "filename", None) is not None: ui.warn(_("abort: %s: '%s'\n") % ( - encoding.strtolocal(inst.strerror), inst.filename)) + encoding.strtolocal(inst.strerror), + util.forcebytestr(inst.filename))) else: ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) except MemoryError: @@ -250,7 +256,7 @@ # Just in case catch this and and pass exit code to caller. return inst.code except socket.error as inst: - ui.warn(_("abort: %s\n") % inst.args[-1]) + ui.warn(_("abort: %s\n") % util.forcebytestr(inst.args[-1])) return -1 @@ -261,12 +267,15 @@ raise error.Abort(_("the name '%s' is reserved") % lbl) for c in (':', '\0', '\n', '\r'): if c in lbl: - raise error.Abort(_("%r cannot be used in a name") % c) + raise error.Abort( + _("%r cannot be used in a name") % pycompat.bytestr(c)) try: int(lbl) raise error.Abort(_("cannot use an integer as a name")) except ValueError: pass + if lbl.strip() != lbl: + raise error.Abort(_("leading or trailing whitespace in name %r") % lbl) def checkfilename(f): '''Check that the filename f is an acceptable filename for a tracked file''' @@ -355,12 +364,8 @@ samestat = getattr(os.path, 'samestat', None) if followsym and samestat is not None: def adddir(dirlst, dirname): - match = False dirstat = os.stat(dirname) - for lstdirstat in dirlst: - if samestat(dirstat, lstdirstat): - match = True - break + match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) if not match: dirlst.append(dirstat) return not match @@ -411,7 +416,7 @@ def formatchangeid(ctx): """Format changectx as '{rev}:{node|formatnode}', which is the default - template provided by cmdutil.changeset_templater""" + template provided by logcmdutil.changesettemplater""" repo = ctx.repo() return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) @@ -885,7 +890,7 @@ missings = [] for r in requirements: if r not in supported: - if not r or not r[0].isalnum(): + if not r or not r[0:1].isalnum(): raise error.RequirementError(_(".hg/requires file is corrupt")) missings.append(r) missings.sort() @@ -1196,7 +1201,7 @@ if k == self.firstlinekey: e = "key name '%s' is reserved" % self.firstlinekey raise error.ProgrammingError(e) - if not k[0].isalpha(): + if not k[0:1].isalpha(): e = "keys must start with a letter in a key-value file" raise error.ProgrammingError(e) if not k.isalnum(): @@ -1222,6 +1227,11 @@ 'unbundle', ] +# a list of (repo, ctx, files) functions called by various commands to allow +# extensions to ensure the corresponding files are available locally, before the +# command uses them. +fileprefetchhooks = util.hooks() + # A marker that tells the evolve extension to suppress its own reporting _reportstroubledchangesets = True
--- a/mercurial/setdiscovery.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/setdiscovery.py Sun Mar 04 10:42:51 2018 -0500 @@ -130,7 +130,7 @@ sample = set(random.sample(sample, desiredlen)) return sample -def findcommonheads(ui, local, remote, +def findcommonheads(ui, local, remote, heads=None, initialsamplesize=100, fullsamplesize=200, abortwhenunrelated=True, @@ -155,11 +155,15 @@ sample = _limitsample(ownheads, initialsamplesize) # indices between sample and externalized version must match sample = list(sample) - batch = remote.iterbatch() - batch.heads() - batch.known(dag.externalizeall(sample)) - batch.submit() - srvheadhashes, yesno = batch.results() + if heads: + srvheadhashes = heads + yesno = remote.known(dag.externalizeall(sample)) + else: + batch = remote.iterbatch() + batch.heads() + batch.known(dag.externalizeall(sample)) + batch.submit() + srvheadhashes, yesno = batch.results() if cl.tip() == nullid: if srvheadhashes != [nullid]:
--- a/mercurial/smartset.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/smartset.py Sun Mar 04 10:42:51 2018 -0500 @@ -8,7 +8,9 @@ from __future__ import absolute_import from . import ( + encoding, error, + pycompat, util, ) @@ -19,7 +21,7 @@ type(r) example ======== ================================= tuple ('<not %r>', other) - str '<branch closed>' + bytes '<branch closed>' callable lambda: '<branch %r>' % sorted(b) object other ======== ================================= @@ -27,13 +29,16 @@ if r is None: return '' elif isinstance(r, tuple): - return r[0] % r[1:] - elif isinstance(r, str): + return r[0] % util.rapply(pycompat.maybebytestr, r[1:]) + elif isinstance(r, bytes): return r elif callable(r): return r() else: - return repr(r) + return pycompat.byterepr(r) + +def _typename(o): + return pycompat.sysbytes(type(o).__name__).lstrip('_') class abstractsmartset(object): @@ -306,7 +311,7 @@ self._istopo = False def __len__(self): - if '_list' in self.__dict__: + if r'_list' in self.__dict__: return len(self._list) else: return len(self._set) @@ -384,6 +389,7 @@ s._ascending = self._ascending return s + @encoding.strmethod def __repr__(self): d = {None: '', False: '-', True: '+'}[self._ascending] s = _formatsetrepr(self._datarepr) @@ -394,8 +400,8 @@ # We fallback to the sorted version for a stable output. if self._ascending is not None: l = self._asclist - s = repr(l) - return '<%s%s %s>' % (type(self).__name__, d, s) + s = pycompat.byterepr(l) + return '<%s%s %s>' % (_typename(self), d, s) class filteredset(abstractsmartset): """Duck type for baseset class which iterates lazily over the revisions in @@ -505,12 +511,13 @@ pass return x + @encoding.strmethod def __repr__(self): - xs = [repr(self._subset)] + xs = [pycompat.byterepr(self._subset)] s = _formatsetrepr(self._condrepr) if s: xs.append(s) - return '<%s %s>' % (type(self).__name__, ', '.join(xs)) + return '<%s %s>' % (_typename(self), ', '.join(xs)) def _iterordered(ascending, iter1, iter2): """produce an ordered iteration from two iterators with the same order @@ -755,9 +762,10 @@ self.reverse() return val + @encoding.strmethod def __repr__(self): d = {None: '', False: '-', True: '+'}[self._ascending] - return '<%s%s %r, %r>' % (type(self).__name__, d, self._r1, self._r2) + return '<%s%s %r, %r>' % (_typename(self), d, self._r1, self._r2) class generatorset(abstractsmartset): """Wrap a generator for lazy iteration @@ -918,9 +926,10 @@ return self.last() return next(it(), None) + @encoding.strmethod def __repr__(self): d = {False: '-', True: '+'}[self._ascending] - return '<%s%s>' % (type(self).__name__.lstrip('_'), d) + return '<%s%s>' % (_typename(self), d) class _generatorsetasc(generatorset): """Special case of generatorset optimized for ascending generators.""" @@ -1087,10 +1096,10 @@ y = max(self._end - start, self._start) return _spanset(x, y, self._ascending, self._hiddenrevs) + @encoding.strmethod def __repr__(self): d = {False: '-', True: '+'}[self._ascending] - return '<%s%s %d:%d>' % (type(self).__name__.lstrip('_'), d, - self._start, self._end) + return '<%s%s %d:%d>' % (_typename(self), d, self._start, self._end) class fullreposet(_spanset): """a set containing all revisions in the repo @@ -1123,7 +1132,7 @@ def prettyformat(revs): lines = [] - rs = repr(revs) + rs = pycompat.byterepr(revs) p = 0 while p < len(rs): q = rs.find('<', p + 1)
--- a/mercurial/sshpeer.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/sshpeer.py Sun Mar 04 10:42:51 2018 -0500 @@ -8,6 +8,7 @@ from __future__ import absolute_import import re +import uuid from .i18n import _ from . import ( @@ -15,6 +16,8 @@ pycompat, util, wireproto, + wireprotoserver, + wireprototypes, ) def _serverquote(s): @@ -63,8 +66,11 @@ (This will only wait for data if the setup is supported by `util.poll`) """ - if getattr(self._main, 'hasbuffer', False): # getattr for classic pipe - return (True, True) # main has data, assume side is worth poking at. + if (isinstance(self._main, util.bufferedinputpipe) and + self._main.hasbuffer): + # Main has data. Assume side is worth poking at. + return True, True + fds = [self._main.fileno(), self._side.fileno()] try: act = util.poll(fds) @@ -114,43 +120,258 @@ def flush(self): return self._main.flush() -class sshpeer(wireproto.wirepeer): - def __init__(self, ui, path, create=False): - self._url = path - self._ui = ui - self._pipeo = self._pipei = self._pipee = None +def _cleanuppipes(ui, pipei, pipeo, pipee): + """Clean up pipes used by an SSH connection.""" + if pipeo: + pipeo.close() + if pipei: + pipei.close() + + if pipee: + # Try to read from the err descriptor until EOF. + try: + for l in pipee: + ui.status(_('remote: '), l) + except (IOError, ValueError): + pass + + pipee.close() + +def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None): + """Create an SSH connection to a server. + + Returns a tuple of (process, stdin, stdout, stderr) for the + spawned process. + """ + cmd = '%s %s %s' % ( + sshcmd, + args, + util.shellquote('%s -R %s serve --stdio' % ( + _serverquote(remotecmd), _serverquote(path)))) + + ui.debug('running %s\n' % cmd) + cmd = util.quotecommand(cmd) + + # no buffer allow the use of 'select' + # feel free to remove buffering and select usage when we ultimately + # move to threading. + stdin, stdout, stderr, proc = util.popen4(cmd, bufsize=0, env=sshenv) + + return proc, stdin, stdout, stderr + +def _performhandshake(ui, stdin, stdout, stderr): + def badresponse(): + # Flush any output on stderr. + _forwardoutput(ui, stderr) + + msg = _('no suitable response from remote hg') + hint = ui.config('ui', 'ssherrorhint') + raise error.RepoError(msg, hint=hint) - u = util.url(path, parsequery=False, parsefragment=False) - if u.scheme != 'ssh' or not u.host or u.path is None: - self._abort(error.RepoError(_("couldn't parse location %s") % path)) + # The handshake consists of sending wire protocol commands in reverse + # order of protocol implementation and then sniffing for a response + # to one of them. + # + # Those commands (from oldest to newest) are: + # + # ``between`` + # Asks for the set of revisions between a pair of revisions. Command + # present in all Mercurial server implementations. + # + # ``hello`` + # Instructs the server to advertise its capabilities. Introduced in + # Mercurial 0.9.1. + # + # ``upgrade`` + # Requests upgrade from default transport protocol version 1 to + # a newer version. Introduced in Mercurial 4.6 as an experimental + # feature. + # + # The ``between`` command is issued with a request for the null + # range. If the remote is a Mercurial server, this request will + # generate a specific response: ``1\n\n``. This represents the + # wire protocol encoded value for ``\n``. We look for ``1\n\n`` + # in the output stream and know this is the response to ``between`` + # and we're at the end of our handshake reply. + # + # The response to the ``hello`` command will be a line with the + # length of the value returned by that command followed by that + # value. If the server doesn't support ``hello`` (which should be + # rare), that line will be ``0\n``. Otherwise, the value will contain + # RFC 822 like lines. Of these, the ``capabilities:`` line contains + # the capabilities of the server. + # + # The ``upgrade`` command isn't really a command in the traditional + # sense of version 1 of the transport because it isn't using the + # proper mechanism for formatting insteads: instead, it just encodes + # arguments on the line, delimited by spaces. + # + # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``. + # If the server doesn't support protocol upgrades, it will reply to + # this line with ``0\n``. Otherwise, it emits an + # ``upgraded <token> <protocol>`` line to both stdout and stderr. + # Content immediately following this line describes additional + # protocol and server state. + # + # In addition to the responses to our command requests, the server + # may emit "banner" output on stdout. SSH servers are allowed to + # print messages to stdout on login. Issuing commands on connection + # allows us to flush this banner output from the server by scanning + # for output to our well-known ``between`` command. Of course, if + # the banner contains ``1\n\n``, this will throw off our detection. - util.checksafessh(path) + requestlog = ui.configbool('devel', 'debug.peer-request') + + # Generate a random token to help identify responses to version 2 + # upgrade request. + token = pycompat.sysbytes(str(uuid.uuid4())) + upgradecaps = [ + ('proto', wireprotoserver.SSHV2), + ] + upgradecaps = util.urlreq.urlencode(upgradecaps) - if u.passwd is not None: - self._abort(error.RepoError(_("password in URL not supported"))) + try: + pairsarg = '%s-%s' % ('0' * 40, '0' * 40) + handshake = [ + 'hello\n', + 'between\n', + 'pairs %d\n' % len(pairsarg), + pairsarg, + ] + + # Request upgrade to version 2 if configured. + if ui.configbool('experimental', 'sshpeer.advertise-v2'): + ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps)) + handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps)) - self._user = u.user - self._host = u.host - self._port = u.port - self._path = u.path or '.' + if requestlog: + ui.debug('devel-peer-request: hello\n') + ui.debug('sending hello command\n') + if requestlog: + ui.debug('devel-peer-request: between\n') + ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg)) + ui.debug('sending between command\n') + + stdin.write(''.join(handshake)) + stdin.flush() + except IOError: + badresponse() + + # Assume version 1 of wire protocol by default. + protoname = wireprototypes.SSHV1 + reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token)) + + lines = ['', 'dummy'] + max_noise = 500 + while lines[-1] and max_noise: + try: + l = stdout.readline() + _forwardoutput(ui, stderr) - sshcmd = self.ui.config("ui", "ssh") - remotecmd = self.ui.config("ui", "remotecmd") - sshaddenv = dict(self.ui.configitems("sshenv")) - sshenv = util.shellenviron(sshaddenv) + # Look for reply to protocol upgrade request. It has a token + # in it, so there should be no false positives. + m = reupgraded.match(l) + if m: + protoname = m.group(1) + ui.debug('protocol upgraded to %s\n' % protoname) + # If an upgrade was handled, the ``hello`` and ``between`` + # requests are ignored. The next output belongs to the + # protocol, so stop scanning lines. + break + + # Otherwise it could be a banner, ``0\n`` response if server + # doesn't support upgrade. + + if lines[-1] == '1\n' and l == '\n': + break + if l: + ui.debug('remote: ', l) + lines.append(l) + max_noise -= 1 + except IOError: + badresponse() + else: + badresponse() + + caps = set() - args = util.sshargs(sshcmd, self._host, self._user, self._port) + # For version 1, we should see a ``capabilities`` line in response to the + # ``hello`` command. + if protoname == wireprototypes.SSHV1: + for l in reversed(lines): + # Look for response to ``hello`` command. Scan from the back so + # we don't misinterpret banner output as the command reply. + if l.startswith('capabilities:'): + caps.update(l[:-1].split(':')[1].split()) + break + elif protoname == wireprotoserver.SSHV2: + # We see a line with number of bytes to follow and then a value + # looking like ``capabilities: *``. + line = stdout.readline() + try: + valuelen = int(line) + except ValueError: + badresponse() + + capsline = stdout.read(valuelen) + if not capsline.startswith('capabilities: '): + badresponse() + + ui.debug('remote: %s\n' % capsline) + + caps.update(capsline.split(':')[1].split()) + # Trailing newline. + stdout.read(1) + + # Error if we couldn't find capabilities, this means: + # + # 1. Remote isn't a Mercurial server + # 2. Remote is a <0.9.1 Mercurial server + # 3. Remote is a future Mercurial server that dropped ``hello`` + # and other attempted handshake mechanisms. + if not caps: + badresponse() - if create: - cmd = '%s %s %s' % (sshcmd, args, - util.shellquote("%s init %s" % - (_serverquote(remotecmd), _serverquote(self._path)))) - ui.debug('running %s\n' % cmd) - res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv) - if res != 0: - self._abort(error.RepoError(_("could not create remote repo"))) + # Flush any output on stderr before proceeding. + _forwardoutput(ui, stderr) + + return protoname, caps + +class sshv1peer(wireproto.wirepeer): + def __init__(self, ui, url, proc, stdin, stdout, stderr, caps, + autoreadstderr=True): + """Create a peer from an existing SSH connection. - self._validaterepo(sshcmd, args, remotecmd, sshenv) + ``proc`` is a handle on the underlying SSH process. + ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio + pipes for that process. + ``caps`` is a set of capabilities supported by the remote. + ``autoreadstderr`` denotes whether to automatically read from + stderr and to forward its output. + """ + self._url = url + self._ui = ui + # self._subprocess is unused. Keeping a handle on the process + # holds a reference and prevents it from being garbage collected. + self._subprocess = proc + + # And we hook up our "doublepipe" wrapper to allow querying + # stderr any time we perform I/O. + if autoreadstderr: + stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr) + stdin = doublepipe(ui, stdin, stderr) + + self._pipeo = stdin + self._pipei = stdout + self._pipee = stderr + self._caps = caps + self._autoreadstderr = autoreadstderr + + # Commands that have a "framed" response where the first line of the + # response contains the length of that response. + _FRAMED_COMMANDS = { + 'batch', + } # Begin of _basepeer interface. @@ -182,64 +403,6 @@ # End of _basewirecommands interface. - def _validaterepo(self, sshcmd, args, remotecmd, sshenv=None): - # cleanup up previous run - self._cleanup() - - cmd = '%s %s %s' % (sshcmd, args, - util.shellquote("%s -R %s serve --stdio" % - (_serverquote(remotecmd), _serverquote(self._path)))) - self.ui.debug('running %s\n' % cmd) - cmd = util.quotecommand(cmd) - - # while self._subprocess isn't used, having it allows the subprocess to - # to clean up correctly later - # - # no buffer allow the use of 'select' - # feel free to remove buffering and select usage when we ultimately - # move to threading. - sub = util.popen4(cmd, bufsize=0, env=sshenv) - self._pipeo, self._pipei, self._pipee, self._subprocess = sub - - self._pipei = util.bufferedinputpipe(self._pipei) - self._pipei = doublepipe(self.ui, self._pipei, self._pipee) - self._pipeo = doublepipe(self.ui, self._pipeo, self._pipee) - - def badresponse(): - msg = _("no suitable response from remote hg") - hint = self.ui.config("ui", "ssherrorhint") - self._abort(error.RepoError(msg, hint=hint)) - - try: - # skip any noise generated by remote shell - self._callstream("hello") - r = self._callstream("between", pairs=("%s-%s" % ("0"*40, "0"*40))) - except IOError: - badresponse() - - lines = ["", "dummy"] - max_noise = 500 - while lines[-1] and max_noise: - try: - l = r.readline() - self._readerr() - if lines[-1] == "1\n" and l == "\n": - break - if l: - self.ui.debug("remote: ", l) - lines.append(l) - max_noise -= 1 - except IOError: - badresponse() - else: - badresponse() - - self._caps = set() - for l in reversed(lines): - if l.startswith("capabilities:"): - self._caps.update(l[:-1].split(":")[1].split()) - break - def _readerr(self): _forwardoutput(self.ui, self._pipee) @@ -248,41 +411,11 @@ raise exception def _cleanup(self): - if self._pipeo is None: - return - self._pipeo.close() - self._pipei.close() - try: - # read the error descriptor until EOF - for l in self._pipee: - self.ui.status(_("remote: "), l) - except (IOError, ValueError): - pass - self._pipee.close() + _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee) __del__ = _cleanup - def _submitbatch(self, req): - rsp = self._callstream("batch", cmds=wireproto.encodebatchcmds(req)) - available = self._getamount() - # TODO this response parsing is probably suboptimal for large - # batches with large responses. - toread = min(available, 1024) - work = rsp.read(toread) - available -= toread - chunk = work - while chunk: - while ';' in work: - one, work = work.split(';', 1) - yield wireproto.unescapearg(one) - toread = min(available, 1024) - chunk = rsp.read(toread) - available -= toread - work += chunk - yield wireproto.unescapearg(work) - - def _callstream(self, cmd, **args): - args = pycompat.byteskwargs(args) + def _sendrequest(self, cmd, args, framed=False): if (self.ui.debugflag and self.ui.configbool('devel', 'debug.peer-request')): dbg = self.ui.debug @@ -316,58 +449,164 @@ self._pipeo.write(v) self._pipeo.flush() + # We know exactly how many bytes are in the response. So return a proxy + # around the raw output stream that allows reading exactly this many + # bytes. Callers then can read() without fear of overrunning the + # response. + if framed: + amount = self._getamount() + return util.cappedreader(self._pipei, amount) + return self._pipei + def _callstream(self, cmd, **args): + args = pycompat.byteskwargs(args) + return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) + def _callcompressable(self, cmd, **args): - return self._callstream(cmd, **args) + args = pycompat.byteskwargs(args) + return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS) def _call(self, cmd, **args): - self._callstream(cmd, **args) - return self._recv() + args = pycompat.byteskwargs(args) + return self._sendrequest(cmd, args, framed=True).read() def _callpush(self, cmd, fp, **args): + # The server responds with an empty frame if the client should + # continue submitting the payload. r = self._call(cmd, **args) if r: return '', r + + # The payload consists of frames with content followed by an empty + # frame. for d in iter(lambda: fp.read(4096), ''): - self._send(d) - self._send("", flush=True) - r = self._recv() + self._writeframed(d) + self._writeframed("", flush=True) + + # In case of success, there is an empty frame and a frame containing + # the integer result (as a string). + # In case of error, there is a non-empty frame containing the error. + r = self._readframed() if r: return '', r - return self._recv(), '' + return self._readframed(), '' def _calltwowaystream(self, cmd, fp, **args): + # The server responds with an empty frame if the client should + # continue submitting the payload. r = self._call(cmd, **args) if r: # XXX needs to be made better raise error.Abort(_('unexpected remote reply: %s') % r) + + # The payload consists of frames with content followed by an empty + # frame. for d in iter(lambda: fp.read(4096), ''): - self._send(d) - self._send("", flush=True) + self._writeframed(d) + self._writeframed("", flush=True) + return self._pipei def _getamount(self): l = self._pipei.readline() if l == '\n': - self._readerr() + if self._autoreadstderr: + self._readerr() msg = _('check previous remote output') self._abort(error.OutOfBandError(hint=msg)) - self._readerr() + if self._autoreadstderr: + self._readerr() try: return int(l) except ValueError: self._abort(error.ResponseError(_("unexpected response:"), l)) - def _recv(self): - return self._pipei.read(self._getamount()) + def _readframed(self): + size = self._getamount() + if not size: + return b'' - def _send(self, data, flush=False): + return self._pipei.read(size) + + def _writeframed(self, data, flush=False): self._pipeo.write("%d\n" % len(data)) if data: self._pipeo.write(data) if flush: self._pipeo.flush() - self._readerr() + if self._autoreadstderr: + self._readerr() + +class sshv2peer(sshv1peer): + """A peer that speakers version 2 of the transport protocol.""" + # Currently version 2 is identical to version 1 post handshake. + # And handshake is performed before the peer is instantiated. So + # we need no custom code. + +def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True): + """Make a peer instance from existing pipes. + + ``path`` and ``proc`` are stored on the eventual peer instance and may + not be used for anything meaningful. + + ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the + SSH server's stdio handles. + + This function is factored out to allow creating peers that don't + actually spawn a new process. It is useful for starting SSH protocol + servers and clients via non-standard means, which can be useful for + testing. + """ + try: + protoname, caps = _performhandshake(ui, stdin, stdout, stderr) + except Exception: + _cleanuppipes(ui, stdout, stdin, stderr) + raise -instance = sshpeer + if protoname == wireprototypes.SSHV1: + return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps, + autoreadstderr=autoreadstderr) + elif protoname == wireprototypes.SSHV2: + return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps, + autoreadstderr=autoreadstderr) + else: + _cleanuppipes(ui, stdout, stdin, stderr) + raise error.RepoError(_('unknown version of SSH protocol: %s') % + protoname) + +def instance(ui, path, create): + """Create an SSH peer. + + The returned object conforms to the ``wireproto.wirepeer`` interface. + """ + u = util.url(path, parsequery=False, parsefragment=False) + if u.scheme != 'ssh' or not u.host or u.path is None: + raise error.RepoError(_("couldn't parse location %s") % path) + + util.checksafessh(path) + + if u.passwd is not None: + raise error.RepoError(_('password in URL not supported')) + + sshcmd = ui.config('ui', 'ssh') + remotecmd = ui.config('ui', 'remotecmd') + sshaddenv = dict(ui.configitems('sshenv')) + sshenv = util.shellenviron(sshaddenv) + remotepath = u.path or '.' + + args = util.sshargs(sshcmd, u.host, u.user, u.port) + + if create: + cmd = '%s %s %s' % (sshcmd, args, + util.shellquote('%s init %s' % + (_serverquote(remotecmd), _serverquote(remotepath)))) + ui.debug('running %s\n' % cmd) + res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv) + if res != 0: + raise error.RepoError(_('could not create remote repo')) + + proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd, + remotepath, sshenv) + + return makepeer(ui, path, proc, stdin, stdout, stderr)
--- a/mercurial/sshserver.py Sat Mar 03 22:29:24 2018 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,131 +0,0 @@ -# sshserver.py - ssh protocol server support for mercurial -# -# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> -# Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> -# -# This software may be used and distributed according to the terms of the -# GNU General Public License version 2 or any later version. - -from __future__ import absolute_import - -import sys - -from .i18n import _ -from . import ( - encoding, - error, - hook, - util, - wireproto, -) - -class sshserver(wireproto.abstractserverproto): - def __init__(self, ui, repo): - self.ui = ui - self.repo = repo - self.lock = None - self.fin = ui.fin - self.fout = ui.fout - self.name = 'ssh' - - hook.redirect(True) - ui.fout = repo.ui.fout = ui.ferr - - # Prevent insertion/deletion of CRs - util.setbinary(self.fin) - util.setbinary(self.fout) - - def getargs(self, args): - data = {} - keys = args.split() - for n in xrange(len(keys)): - argline = self.fin.readline()[:-1] - arg, l = argline.split() - if arg not in keys: - raise error.Abort(_("unexpected parameter %r") % arg) - if arg == '*': - star = {} - for k in xrange(int(l)): - argline = self.fin.readline()[:-1] - arg, l = argline.split() - val = self.fin.read(int(l)) - star[arg] = val - data['*'] = star - else: - val = self.fin.read(int(l)) - data[arg] = val - return [data[k] for k in keys] - - def getarg(self, name): - return self.getargs(name)[0] - - def getfile(self, fpout): - self.sendresponse('') - count = int(self.fin.readline()) - while count: - fpout.write(self.fin.read(count)) - count = int(self.fin.readline()) - - def redirect(self): - pass - - def sendresponse(self, v): - self.fout.write("%d\n" % len(v)) - self.fout.write(v) - self.fout.flush() - - def sendstream(self, source): - write = self.fout.write - for chunk in source.gen: - write(chunk) - self.fout.flush() - - def sendpushresponse(self, rsp): - self.sendresponse('') - self.sendresponse(str(rsp.res)) - - def sendpusherror(self, rsp): - self.sendresponse(rsp.res) - - def sendooberror(self, rsp): - self.ui.ferr.write('%s\n-\n' % rsp.message) - self.ui.ferr.flush() - self.fout.write('\n') - self.fout.flush() - - def serve_forever(self): - try: - while self.serve_one(): - pass - finally: - if self.lock is not None: - self.lock.release() - sys.exit(0) - - handlers = { - str: sendresponse, - wireproto.streamres: sendstream, - wireproto.streamres_legacy: sendstream, - wireproto.pushres: sendpushresponse, - wireproto.pusherr: sendpusherror, - wireproto.ooberror: sendooberror, - } - - def serve_one(self): - cmd = self.fin.readline()[:-1] - if cmd and cmd in wireproto.commands: - rsp = wireproto.dispatch(self.repo, self, cmd) - self.handlers[rsp.__class__](self, rsp) - elif cmd: - impl = getattr(self, 'do_' + cmd, None) - if impl: - r = impl() - if r is not None: - self.sendresponse(r) - else: - self.sendresponse("") - return cmd != '' - - def _client(self): - client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0] - return 'remote:ssh:' + client
--- a/mercurial/statichttprepo.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/statichttprepo.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,7 +13,6 @@ from .i18n import _ from . import ( - byterange, changelog, error, localrepo, @@ -82,10 +81,36 @@ def close(self): pass +# _RangeError and _HTTPRangeHandler were originally in byterange.py, +# which was itself extracted from urlgrabber. See the last version of +# byterange.py from history if you need more information. +class _RangeError(IOError): + """Error raised when an unsatisfiable range is requested.""" + +class _HTTPRangeHandler(urlreq.basehandler): + """Handler that enables HTTP Range headers. + + This was extremely simple. The Range header is a HTTP feature to + begin with so all this class does is tell urllib2 that the + "206 Partial Content" response from the HTTP server is what we + expected. + """ + + def http_error_206(self, req, fp, code, msg, hdrs): + # 206 Partial Content Response + r = urlreq.addinfourl(fp, hdrs, req.get_full_url()) + r.code = code + r.msg = msg + return r + + def http_error_416(self, req, fp, code, msg, hdrs): + # HTTP's Range Not Satisfiable error + raise _RangeError('Requested Range Not Satisfiable') + def build_opener(ui, authinfo): # urllib cannot handle URLs with embedded user or passwd urlopener = url.opener(ui, authinfo) - urlopener.add_handler(byterange.HTTPRangeHandler()) + urlopener.add_handler(_HTTPRangeHandler()) class statichttpvfs(vfsmod.abstractvfs): def __init__(self, base):
--- a/mercurial/subrepo.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/subrepo.py Sun Mar 04 10:42:51 2018 -0500 @@ -1,4 +1,4 @@ -# subrepo.py - sub-repository handling for Mercurial +# subrepo.py - sub-repository classes and factory # # Copyright 2009-2010 Matt Mackall <mpm@selenic.com> # @@ -19,30 +19,31 @@ import tarfile import xml.dom.minidom - from .i18n import _ from . import ( cmdutil, - config, encoding, error, exchange, - filemerge, + logcmdutil, match as matchmod, node, pathutil, phases, pycompat, scmutil, + subrepoutil, util, vfs as vfsmod, ) +from .utils import dateutil hg = None +reporelpath = subrepoutil.reporelpath +subrelpath = subrepoutil.subrelpath +_abssource = subrepoutil._abssource propertycache = util.propertycache -nullstate = ('', '', 'empty') - def _expandedabspath(path): ''' get a path or url and if it is a path expand it and return an absolute path @@ -73,291 +74,14 @@ raise ex except error.Abort as ex: subrepo = subrelpath(self) - errormsg = str(ex) + ' ' + _('(in subrepository "%s")') % subrepo + errormsg = (util.forcebytestr(ex) + ' ' + + _('(in subrepository "%s")') % subrepo) # avoid handling this exception by raising a SubrepoAbort exception raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo, cause=sys.exc_info()) return res return decoratedmethod -def state(ctx, ui): - """return a state dict, mapping subrepo paths configured in .hgsub - to tuple: (source from .hgsub, revision from .hgsubstate, kind - (key in types dict)) - """ - p = config.config() - repo = ctx.repo() - def read(f, sections=None, remap=None): - if f in ctx: - try: - data = ctx[f].data() - except IOError as err: - if err.errno != errno.ENOENT: - raise - # handle missing subrepo spec files as removed - ui.warn(_("warning: subrepo spec file \'%s\' not found\n") % - repo.pathto(f)) - return - p.parse(f, data, sections, remap, read) - else: - raise error.Abort(_("subrepo spec file \'%s\' not found") % - repo.pathto(f)) - if '.hgsub' in ctx: - read('.hgsub') - - for path, src in ui.configitems('subpaths'): - p.set('subpaths', path, src, ui.configsource('subpaths', path)) - - rev = {} - if '.hgsubstate' in ctx: - try: - for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()): - l = l.lstrip() - if not l: - continue - try: - revision, path = l.split(" ", 1) - except ValueError: - raise error.Abort(_("invalid subrepository revision " - "specifier in \'%s\' line %d") - % (repo.pathto('.hgsubstate'), (i + 1))) - rev[path] = revision - except IOError as err: - if err.errno != errno.ENOENT: - raise - - def remap(src): - for pattern, repl in p.items('subpaths'): - # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub - # does a string decode. - repl = util.escapestr(repl) - # However, we still want to allow back references to go - # through unharmed, so we turn r'\\1' into r'\1'. Again, - # extra escapes are needed because re.sub string decodes. - repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl) - try: - src = re.sub(pattern, repl, src, 1) - except re.error as e: - raise error.Abort(_("bad subrepository pattern in %s: %s") - % (p.source('subpaths', pattern), e)) - return src - - state = {} - for path, src in p[''].items(): - kind = 'hg' - if src.startswith('['): - if ']' not in src: - raise error.Abort(_('missing ] in subrepository source')) - kind, src = src.split(']', 1) - kind = kind[1:] - src = src.lstrip() # strip any extra whitespace after ']' - - if not util.url(src).isabs(): - parent = _abssource(repo, abort=False) - if parent: - parent = util.url(parent) - parent.path = posixpath.join(parent.path or '', src) - parent.path = posixpath.normpath(parent.path) - joined = str(parent) - # Remap the full joined path and use it if it changes, - # else remap the original source. - remapped = remap(joined) - if remapped == joined: - src = remap(src) - else: - src = remapped - - src = remap(src) - state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind) - - return state - -def writestate(repo, state): - """rewrite .hgsubstate in (outer) repo with these subrepo states""" - lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state) - if state[s][1] != nullstate[1]] - repo.wwrite('.hgsubstate', ''.join(lines), '') - -def submerge(repo, wctx, mctx, actx, overwrite, labels=None): - """delegated from merge.applyupdates: merging of .hgsubstate file - in working context, merging context and ancestor context""" - if mctx == actx: # backwards? - actx = wctx.p1() - s1 = wctx.substate - s2 = mctx.substate - sa = actx.substate - sm = {} - - repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx)) - - def debug(s, msg, r=""): - if r: - r = "%s:%s:%s" % r - repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r)) - - promptssrc = filemerge.partextras(labels) - for s, l in sorted(s1.iteritems()): - prompts = None - a = sa.get(s, nullstate) - ld = l # local state with possible dirty flag for compares - if wctx.sub(s).dirty(): - ld = (l[0], l[1] + "+") - if wctx == actx: # overwrite - a = ld - - prompts = promptssrc.copy() - prompts['s'] = s - if s in s2: - r = s2[s] - if ld == r or r == a: # no change or local is newer - sm[s] = l - continue - elif ld == a: # other side changed - debug(s, "other changed, get", r) - wctx.sub(s).get(r, overwrite) - sm[s] = r - elif ld[0] != r[0]: # sources differ - prompts['lo'] = l[0] - prompts['ro'] = r[0] - if repo.ui.promptchoice( - _(' subrepository sources for %(s)s differ\n' - 'use (l)ocal%(l)s source (%(lo)s)' - ' or (r)emote%(o)s source (%(ro)s)?' - '$$ &Local $$ &Remote') % prompts, 0): - debug(s, "prompt changed, get", r) - wctx.sub(s).get(r, overwrite) - sm[s] = r - elif ld[1] == a[1]: # local side is unchanged - debug(s, "other side changed, get", r) - wctx.sub(s).get(r, overwrite) - sm[s] = r - else: - debug(s, "both sides changed") - srepo = wctx.sub(s) - prompts['sl'] = srepo.shortid(l[1]) - prompts['sr'] = srepo.shortid(r[1]) - option = repo.ui.promptchoice( - _(' subrepository %(s)s diverged (local revision: %(sl)s, ' - 'remote revision: %(sr)s)\n' - '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?' - '$$ &Merge $$ &Local $$ &Remote') - % prompts, 0) - if option == 0: - wctx.sub(s).merge(r) - sm[s] = l - debug(s, "merge with", r) - elif option == 1: - sm[s] = l - debug(s, "keep local subrepo revision", l) - else: - wctx.sub(s).get(r, overwrite) - sm[s] = r - debug(s, "get remote subrepo revision", r) - elif ld == a: # remote removed, local unchanged - debug(s, "remote removed, remove") - wctx.sub(s).remove() - elif a == nullstate: # not present in remote or ancestor - debug(s, "local added, keep") - sm[s] = l - continue - else: - if repo.ui.promptchoice( - _(' local%(l)s changed subrepository %(s)s' - ' which remote%(o)s removed\n' - 'use (c)hanged version or (d)elete?' - '$$ &Changed $$ &Delete') % prompts, 0): - debug(s, "prompt remove") - wctx.sub(s).remove() - - for s, r in sorted(s2.items()): - prompts = None - if s in s1: - continue - elif s not in sa: - debug(s, "remote added, get", r) - mctx.sub(s).get(r) - sm[s] = r - elif r != sa[s]: - prompts = promptssrc.copy() - prompts['s'] = s - if repo.ui.promptchoice( - _(' remote%(o)s changed subrepository %(s)s' - ' which local%(l)s removed\n' - 'use (c)hanged version or (d)elete?' - '$$ &Changed $$ &Delete') % prompts, 0) == 0: - debug(s, "prompt recreate", r) - mctx.sub(s).get(r) - sm[s] = r - - # record merged .hgsubstate - writestate(repo, sm) - return sm - -def precommit(ui, wctx, status, match, force=False): - """Calculate .hgsubstate changes that should be applied before committing - - Returns (subs, commitsubs, newstate) where - - subs: changed subrepos (including dirty ones) - - commitsubs: dirty subrepos which the caller needs to commit recursively - - newstate: new state dict which the caller must write to .hgsubstate - - This also updates the given status argument. - """ - subs = [] - commitsubs = set() - newstate = wctx.substate.copy() - - # only manage subrepos and .hgsubstate if .hgsub is present - if '.hgsub' in wctx: - # we'll decide whether to track this ourselves, thanks - for c in status.modified, status.added, status.removed: - if '.hgsubstate' in c: - c.remove('.hgsubstate') - - # compare current state to last committed state - # build new substate based on last committed state - oldstate = wctx.p1().substate - for s in sorted(newstate.keys()): - if not match(s): - # ignore working copy, use old state if present - if s in oldstate: - newstate[s] = oldstate[s] - continue - if not force: - raise error.Abort( - _("commit with new subrepo %s excluded") % s) - dirtyreason = wctx.sub(s).dirtyreason(True) - if dirtyreason: - if not ui.configbool('ui', 'commitsubrepos'): - raise error.Abort(dirtyreason, - hint=_("use --subrepos for recursive commit")) - subs.append(s) - commitsubs.add(s) - else: - bs = wctx.sub(s).basestate() - newstate[s] = (newstate[s][0], bs, newstate[s][2]) - if oldstate.get(s, (None, None, None))[1] != bs: - subs.append(s) - - # check for removed subrepos - for p in wctx.parents(): - r = [s for s in p.substate if s not in newstate] - subs += [s for s in r if match(s)] - if subs: - if (not match('.hgsub') and - '.hgsub' in (wctx.modified() + wctx.added())): - raise error.Abort(_("can't commit subrepos without .hgsub")) - status.modified.insert(0, '.hgsubstate') - - elif '.hgsub' in status.removed: - # clean up .hgsubstate when .hgsub is removed - if ('.hgsubstate' in wctx and - '.hgsubstate' not in (status.modified + status.added + - status.removed)): - status.removed.insert(0, '.hgsubstate') - - return subs, commitsubs, newstate - def _updateprompt(ui, sub, dirty, local, remote): if dirty: msg = (_(' subrepository sources for %s differ\n' @@ -372,64 +96,6 @@ % (subrelpath(sub), local, remote)) return ui.promptchoice(msg, 0) -def reporelpath(repo): - """return path to this (sub)repo as seen from outermost repo""" - parent = repo - while util.safehasattr(parent, '_subparent'): - parent = parent._subparent - return repo.root[len(pathutil.normasprefix(parent.root)):] - -def subrelpath(sub): - """return path to this subrepo as seen from outermost repo""" - return sub._relpath - -def _abssource(repo, push=False, abort=True): - """return pull/push path of repo - either based on parent repo .hgsub info - or on the top repo config. Abort or return None if no source found.""" - if util.safehasattr(repo, '_subparent'): - source = util.url(repo._subsource) - if source.isabs(): - return bytes(source) - source.path = posixpath.normpath(source.path) - parent = _abssource(repo._subparent, push, abort=False) - if parent: - parent = util.url(util.pconvert(parent)) - parent.path = posixpath.join(parent.path or '', source.path) - parent.path = posixpath.normpath(parent.path) - return bytes(parent) - else: # recursion reached top repo - path = None - if util.safehasattr(repo, '_subtoppath'): - path = repo._subtoppath - elif push and repo.ui.config('paths', 'default-push'): - path = repo.ui.config('paths', 'default-push') - elif repo.ui.config('paths', 'default'): - path = repo.ui.config('paths', 'default') - elif repo.shared(): - # chop off the .hg component to get the default path form. This has - # already run through vfsmod.vfs(..., realpath=True), so it doesn't - # have problems with 'C:' - return os.path.dirname(repo.sharedpath) - if path: - # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is - # as expected: an absolute path to the root of the C: drive. The - # latter is a relative path, and works like so: - # - # C:\>cd C:\some\path - # C:\>D: - # D:\>python -c "import os; print os.path.abspath('C:')" - # C:\some\path - # - # D:\>python -c "import os; print os.path.abspath('C:relative')" - # C:\some\path\relative - if util.hasdriveletter(path): - if len(path) == 2 or path[2:3] not in br'\/': - path = os.path.abspath(path) - return path - - if abort: - raise error.Abort(_("default path for subrepository not found")) - def _sanitize(ui, vfs, ignore): for dirname, dirs, names in vfs.walk(): for i, d in enumerate(dirs): @@ -508,37 +174,6 @@ subrev = "0" * 40 return types[state[2]](pctx, path, (state[0], subrev), True) -def newcommitphase(ui, ctx): - commitphase = phases.newcommitphase(ui) - substate = getattr(ctx, "substate", None) - if not substate: - return commitphase - check = ui.config('phases', 'checksubrepos') - if check not in ('ignore', 'follow', 'abort'): - raise error.Abort(_('invalid phases.checksubrepos configuration: %s') - % (check)) - if check == 'ignore': - return commitphase - maxphase = phases.public - maxsub = None - for s in sorted(substate): - sub = ctx.sub(s) - subphase = sub.phase(substate[s][1]) - if maxphase < subphase: - maxphase = subphase - maxsub = s - if commitphase < maxphase: - if check == 'abort': - raise error.Abort(_("can't commit in %s phase" - " conflicting %s from subrepository %s") % - (phases.phasenames[commitphase], - phases.phasenames[maxphase], maxsub)) - ui.warn(_("warning: changes are committed in" - " %s phase from subrepository %s\n") % - (phases.phasenames[maxphase], maxsub)) - return maxphase - return commitphase - # subrepo classes need to implement the following abstract class: class abstractsubrepo(object): @@ -907,10 +542,10 @@ # in hex format if node2 is not None: node2 = node.bin(node2) - cmdutil.diffordiffstat(ui, self._repo, diffopts, - node1, node2, match, - prefix=posixpath.join(prefix, self._path), - listsubrepos=True, **opts) + logcmdutil.diffordiffstat(ui, self._repo, diffopts, + node1, node2, match, + prefix=posixpath.join(prefix, self._path), + listsubrepos=True, **opts) except error.RepoLookupError as inst: self.ui.warn(_('warning: error "%s" in subrepository "%s"\n') % (inst, subrelpath(self))) @@ -918,9 +553,13 @@ @annotatesubrepoerror def archive(self, archiver, prefix, match=None, decode=True): self._get(self._state + ('hg',)) - total = abstractsubrepo.archive(self, archiver, prefix, match) + files = self.files() + if match: + files = [f for f in files if match(f)] rev = self._state[1] ctx = self._repo[rev] + scmutil.fileprefetchhooks(self._repo, ctx, files) + total = abstractsubrepo.archive(self, archiver, prefix, match) for subpath in ctx.substate: s = subrepo(ctx, subpath, True) submatch = matchmod.subdirmatcher(subpath, match) @@ -1849,7 +1488,7 @@ if date: # git's date parser silently ignores when seconds < 1e9 # convert to ISO8601 - env['GIT_AUTHOR_DATE'] = util.datestr(date, + env['GIT_AUTHOR_DATE'] = dateutil.datestr(date, '%Y-%m-%dT%H:%M:%S %1%2') self._gitcommand(cmd, env=env) # make sure commit works otherwise HEAD might not exist under certain @@ -2025,8 +1664,7 @@ # TODO: add support for non-plain formatter (see cmdutil.cat()) for f in match.files(): output = self._gitcommand(["show", "%s:%s" % (rev, f)]) - fp = cmdutil.makefileobj(self._subparent, fntemplate, - self._ctx.node(), + fp = cmdutil.makefileobj(self._ctx, fntemplate, pathname=self.wvfs.reljoin(prefix, f)) fp.write(output) fp.close()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/subrepoutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,392 @@ +# subrepoutil.py - sub-repository operations and substate handling +# +# Copyright 2009-2010 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import errno +import os +import posixpath +import re + +from .i18n import _ +from . import ( + config, + error, + filemerge, + pathutil, + phases, + util, +) + +nullstate = ('', '', 'empty') + +def state(ctx, ui): + """return a state dict, mapping subrepo paths configured in .hgsub + to tuple: (source from .hgsub, revision from .hgsubstate, kind + (key in types dict)) + """ + p = config.config() + repo = ctx.repo() + def read(f, sections=None, remap=None): + if f in ctx: + try: + data = ctx[f].data() + except IOError as err: + if err.errno != errno.ENOENT: + raise + # handle missing subrepo spec files as removed + ui.warn(_("warning: subrepo spec file \'%s\' not found\n") % + repo.pathto(f)) + return + p.parse(f, data, sections, remap, read) + else: + raise error.Abort(_("subrepo spec file \'%s\' not found") % + repo.pathto(f)) + if '.hgsub' in ctx: + read('.hgsub') + + for path, src in ui.configitems('subpaths'): + p.set('subpaths', path, src, ui.configsource('subpaths', path)) + + rev = {} + if '.hgsubstate' in ctx: + try: + for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()): + l = l.lstrip() + if not l: + continue + try: + revision, path = l.split(" ", 1) + except ValueError: + raise error.Abort(_("invalid subrepository revision " + "specifier in \'%s\' line %d") + % (repo.pathto('.hgsubstate'), (i + 1))) + rev[path] = revision + except IOError as err: + if err.errno != errno.ENOENT: + raise + + def remap(src): + for pattern, repl in p.items('subpaths'): + # Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub + # does a string decode. + repl = util.escapestr(repl) + # However, we still want to allow back references to go + # through unharmed, so we turn r'\\1' into r'\1'. Again, + # extra escapes are needed because re.sub string decodes. + repl = re.sub(br'\\\\([0-9]+)', br'\\\1', repl) + try: + src = re.sub(pattern, repl, src, 1) + except re.error as e: + raise error.Abort(_("bad subrepository pattern in %s: %s") + % (p.source('subpaths', pattern), e)) + return src + + state = {} + for path, src in p[''].items(): + kind = 'hg' + if src.startswith('['): + if ']' not in src: + raise error.Abort(_('missing ] in subrepository source')) + kind, src = src.split(']', 1) + kind = kind[1:] + src = src.lstrip() # strip any extra whitespace after ']' + + if not util.url(src).isabs(): + parent = _abssource(repo, abort=False) + if parent: + parent = util.url(parent) + parent.path = posixpath.join(parent.path or '', src) + parent.path = posixpath.normpath(parent.path) + joined = str(parent) + # Remap the full joined path and use it if it changes, + # else remap the original source. + remapped = remap(joined) + if remapped == joined: + src = remap(src) + else: + src = remapped + + src = remap(src) + state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind) + + return state + +def writestate(repo, state): + """rewrite .hgsubstate in (outer) repo with these subrepo states""" + lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state) + if state[s][1] != nullstate[1]] + repo.wwrite('.hgsubstate', ''.join(lines), '') + +def submerge(repo, wctx, mctx, actx, overwrite, labels=None): + """delegated from merge.applyupdates: merging of .hgsubstate file + in working context, merging context and ancestor context""" + if mctx == actx: # backwards? + actx = wctx.p1() + s1 = wctx.substate + s2 = mctx.substate + sa = actx.substate + sm = {} + + repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx)) + + def debug(s, msg, r=""): + if r: + r = "%s:%s:%s" % r + repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r)) + + promptssrc = filemerge.partextras(labels) + for s, l in sorted(s1.iteritems()): + prompts = None + a = sa.get(s, nullstate) + ld = l # local state with possible dirty flag for compares + if wctx.sub(s).dirty(): + ld = (l[0], l[1] + "+") + if wctx == actx: # overwrite + a = ld + + prompts = promptssrc.copy() + prompts['s'] = s + if s in s2: + r = s2[s] + if ld == r or r == a: # no change or local is newer + sm[s] = l + continue + elif ld == a: # other side changed + debug(s, "other changed, get", r) + wctx.sub(s).get(r, overwrite) + sm[s] = r + elif ld[0] != r[0]: # sources differ + prompts['lo'] = l[0] + prompts['ro'] = r[0] + if repo.ui.promptchoice( + _(' subrepository sources for %(s)s differ\n' + 'use (l)ocal%(l)s source (%(lo)s)' + ' or (r)emote%(o)s source (%(ro)s)?' + '$$ &Local $$ &Remote') % prompts, 0): + debug(s, "prompt changed, get", r) + wctx.sub(s).get(r, overwrite) + sm[s] = r + elif ld[1] == a[1]: # local side is unchanged + debug(s, "other side changed, get", r) + wctx.sub(s).get(r, overwrite) + sm[s] = r + else: + debug(s, "both sides changed") + srepo = wctx.sub(s) + prompts['sl'] = srepo.shortid(l[1]) + prompts['sr'] = srepo.shortid(r[1]) + option = repo.ui.promptchoice( + _(' subrepository %(s)s diverged (local revision: %(sl)s, ' + 'remote revision: %(sr)s)\n' + '(M)erge, keep (l)ocal%(l)s or keep (r)emote%(o)s?' + '$$ &Merge $$ &Local $$ &Remote') + % prompts, 0) + if option == 0: + wctx.sub(s).merge(r) + sm[s] = l + debug(s, "merge with", r) + elif option == 1: + sm[s] = l + debug(s, "keep local subrepo revision", l) + else: + wctx.sub(s).get(r, overwrite) + sm[s] = r + debug(s, "get remote subrepo revision", r) + elif ld == a: # remote removed, local unchanged + debug(s, "remote removed, remove") + wctx.sub(s).remove() + elif a == nullstate: # not present in remote or ancestor + debug(s, "local added, keep") + sm[s] = l + continue + else: + if repo.ui.promptchoice( + _(' local%(l)s changed subrepository %(s)s' + ' which remote%(o)s removed\n' + 'use (c)hanged version or (d)elete?' + '$$ &Changed $$ &Delete') % prompts, 0): + debug(s, "prompt remove") + wctx.sub(s).remove() + + for s, r in sorted(s2.items()): + prompts = None + if s in s1: + continue + elif s not in sa: + debug(s, "remote added, get", r) + mctx.sub(s).get(r) + sm[s] = r + elif r != sa[s]: + prompts = promptssrc.copy() + prompts['s'] = s + if repo.ui.promptchoice( + _(' remote%(o)s changed subrepository %(s)s' + ' which local%(l)s removed\n' + 'use (c)hanged version or (d)elete?' + '$$ &Changed $$ &Delete') % prompts, 0) == 0: + debug(s, "prompt recreate", r) + mctx.sub(s).get(r) + sm[s] = r + + # record merged .hgsubstate + writestate(repo, sm) + return sm + +def precommit(ui, wctx, status, match, force=False): + """Calculate .hgsubstate changes that should be applied before committing + + Returns (subs, commitsubs, newstate) where + - subs: changed subrepos (including dirty ones) + - commitsubs: dirty subrepos which the caller needs to commit recursively + - newstate: new state dict which the caller must write to .hgsubstate + + This also updates the given status argument. + """ + subs = [] + commitsubs = set() + newstate = wctx.substate.copy() + + # only manage subrepos and .hgsubstate if .hgsub is present + if '.hgsub' in wctx: + # we'll decide whether to track this ourselves, thanks + for c in status.modified, status.added, status.removed: + if '.hgsubstate' in c: + c.remove('.hgsubstate') + + # compare current state to last committed state + # build new substate based on last committed state + oldstate = wctx.p1().substate + for s in sorted(newstate.keys()): + if not match(s): + # ignore working copy, use old state if present + if s in oldstate: + newstate[s] = oldstate[s] + continue + if not force: + raise error.Abort( + _("commit with new subrepo %s excluded") % s) + dirtyreason = wctx.sub(s).dirtyreason(True) + if dirtyreason: + if not ui.configbool('ui', 'commitsubrepos'): + raise error.Abort(dirtyreason, + hint=_("use --subrepos for recursive commit")) + subs.append(s) + commitsubs.add(s) + else: + bs = wctx.sub(s).basestate() + newstate[s] = (newstate[s][0], bs, newstate[s][2]) + if oldstate.get(s, (None, None, None))[1] != bs: + subs.append(s) + + # check for removed subrepos + for p in wctx.parents(): + r = [s for s in p.substate if s not in newstate] + subs += [s for s in r if match(s)] + if subs: + if (not match('.hgsub') and + '.hgsub' in (wctx.modified() + wctx.added())): + raise error.Abort(_("can't commit subrepos without .hgsub")) + status.modified.insert(0, '.hgsubstate') + + elif '.hgsub' in status.removed: + # clean up .hgsubstate when .hgsub is removed + if ('.hgsubstate' in wctx and + '.hgsubstate' not in (status.modified + status.added + + status.removed)): + status.removed.insert(0, '.hgsubstate') + + return subs, commitsubs, newstate + +def reporelpath(repo): + """return path to this (sub)repo as seen from outermost repo""" + parent = repo + while util.safehasattr(parent, '_subparent'): + parent = parent._subparent + return repo.root[len(pathutil.normasprefix(parent.root)):] + +def subrelpath(sub): + """return path to this subrepo as seen from outermost repo""" + return sub._relpath + +def _abssource(repo, push=False, abort=True): + """return pull/push path of repo - either based on parent repo .hgsub info + or on the top repo config. Abort or return None if no source found.""" + if util.safehasattr(repo, '_subparent'): + source = util.url(repo._subsource) + if source.isabs(): + return bytes(source) + source.path = posixpath.normpath(source.path) + parent = _abssource(repo._subparent, push, abort=False) + if parent: + parent = util.url(util.pconvert(parent)) + parent.path = posixpath.join(parent.path or '', source.path) + parent.path = posixpath.normpath(parent.path) + return bytes(parent) + else: # recursion reached top repo + path = None + if util.safehasattr(repo, '_subtoppath'): + path = repo._subtoppath + elif push and repo.ui.config('paths', 'default-push'): + path = repo.ui.config('paths', 'default-push') + elif repo.ui.config('paths', 'default'): + path = repo.ui.config('paths', 'default') + elif repo.shared(): + # chop off the .hg component to get the default path form. This has + # already run through vfsmod.vfs(..., realpath=True), so it doesn't + # have problems with 'C:' + return os.path.dirname(repo.sharedpath) + if path: + # issue5770: 'C:\' and 'C:' are not equivalent paths. The former is + # as expected: an absolute path to the root of the C: drive. The + # latter is a relative path, and works like so: + # + # C:\>cd C:\some\path + # C:\>D: + # D:\>python -c "import os; print os.path.abspath('C:')" + # C:\some\path + # + # D:\>python -c "import os; print os.path.abspath('C:relative')" + # C:\some\path\relative + if util.hasdriveletter(path): + if len(path) == 2 or path[2:3] not in br'\/': + path = os.path.abspath(path) + return path + + if abort: + raise error.Abort(_("default path for subrepository not found")) + +def newcommitphase(ui, ctx): + commitphase = phases.newcommitphase(ui) + substate = getattr(ctx, "substate", None) + if not substate: + return commitphase + check = ui.config('phases', 'checksubrepos') + if check not in ('ignore', 'follow', 'abort'): + raise error.Abort(_('invalid phases.checksubrepos configuration: %s') + % (check)) + if check == 'ignore': + return commitphase + maxphase = phases.public + maxsub = None + for s in sorted(substate): + sub = ctx.sub(s) + subphase = sub.phase(substate[s][1]) + if maxphase < subphase: + maxphase = subphase + maxsub = s + if commitphase < maxphase: + if check == 'abort': + raise error.Abort(_("can't commit in %s phase" + " conflicting %s from subrepository %s") % + (phases.phasenames[commitphase], + phases.phasenames[maxphase], maxsub)) + ui.warn(_("warning: changes are committed in" + " %s phase from subrepository %s\n") % + (phases.phasenames[maxphase], maxsub)) + return maxphase + return commitphase
--- a/mercurial/tags.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/tags.py Sun Mar 04 10:42:51 2018 -0500 @@ -244,7 +244,7 @@ # remove tags pointing to invalid nodes cl = repo.changelog - for t in filetags.keys(): + for t in list(filetags): try: cl.rev(filetags[t][0]) except (LookupError, ValueError): @@ -276,7 +276,7 @@ count = 0 def dbg(msg): - ui.debug("%s, line %s: %s\n" % (fn, count, msg)) + ui.debug("%s, line %d: %s\n" % (fn, count, msg)) for nline, line in enumerate(lines): count += 1 @@ -559,7 +559,7 @@ def writetags(fp, names, munge, prevtags): fp.seek(0, 2) - if prevtags and prevtags[-1] != '\n': + if prevtags and not prevtags.endswith('\n'): fp.write('\n') for name in names: if munge: @@ -739,7 +739,7 @@ entry = bytearray(prefix + fnode) self._raw[offset:offset + _fnodesrecsize] = entry # self._dirtyoffset could be None. - self._dirtyoffset = min(self._dirtyoffset, offset) or 0 + self._dirtyoffset = min(self._dirtyoffset or 0, offset or 0) def write(self): """Perform all necessary writes to cache file. @@ -783,6 +783,6 @@ except (IOError, OSError) as inst: repo.ui.log('tagscache', "couldn't write cache/%s: %s\n" % ( - _fnodescachefile, inst)) + _fnodescachefile, util.forcebytestr(inst))) finally: lock.release()
--- a/mercurial/templatefilters.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templatefilters.py Sun Mar 04 10:42:51 2018 -0500 @@ -22,6 +22,7 @@ url, util, ) +from .utils import dateutil urlerr = util.urlerr urlreq = util.urlreq @@ -78,7 +79,7 @@ else: delta = max(1, int(now - then)) if delta > agescales[0][1] * 2: - return util.shortdate(date) + return dateutil.shortdate(date) for t, s, a in agescales: n = delta // s @@ -100,6 +101,13 @@ """List or text. Returns the length as an integer.""" return len(i) +@templatefilter('dirname') +def dirname(path): + """Any text. Treats the text as a path, and strips the last + component of the path after splitting by the path separator. + """ + return os.path.dirname(path) + @templatefilter('domain') def domain(author): """Any text. Finds the first string that looks like an email @@ -138,19 +146,19 @@ global para_re, space_re if para_re is None: para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M) - space_re = re.compile(r' +') + space_re = re.compile(br' +') def findparas(): start = 0 while True: m = para_re.search(text, start) if not m: - uctext = unicode(text[start:], encoding.encoding) + uctext = encoding.unifromlocal(text[start:]) w = len(uctext) while 0 < w and uctext[w - 1].isspace(): w -= 1 - yield (uctext[:w].encode(encoding.encoding), - uctext[w:].encode(encoding.encoding)) + yield (encoding.unitolocal(uctext[:w]), + encoding.unitolocal(uctext[w:])) break yield text[start:m.start(0)], m.group(1) start = m.end(1) @@ -196,7 +204,7 @@ """Date. Returns the date in ISO 8601 format: "2009-08-18 13:00 +0200". """ - return util.datestr(text, '%Y-%m-%d %H:%M %1%2') + return dateutil.datestr(text, '%Y-%m-%d %H:%M %1%2') @templatefilter('isodatesec') def isodatesec(text): @@ -204,7 +212,7 @@ seconds: "2009-08-18 13:00:13 +0200". See also the rfc3339date filter. """ - return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2') + return dateutil.datestr(text, '%Y-%m-%d %H:%M:%S %1%2') def indent(text, prefix): '''indent each non-empty line of text after first with prefix.''' @@ -257,16 +265,16 @@ return encoding.lower(text) @templatefilter('nonempty') -def nonempty(str): +def nonempty(text): """Any text. Returns '(none)' if the string is empty.""" - return str or "(none)" + return text or "(none)" @templatefilter('obfuscate') def obfuscate(text): """Any text. Returns the input text rendered as a sequence of XML entities. """ - text = unicode(text, encoding.encoding, 'replace') + text = unicode(text, pycompat.sysstr(encoding.encoding), r'replace') return ''.join(['&#%d;' % ord(c) for c in text]) @templatefilter('permissions') @@ -318,14 +326,14 @@ """Date. Returns a date using the Internet date format specified in RFC 3339: "2009-08-18T13:00:13+02:00". """ - return util.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2") + return dateutil.datestr(text, "%Y-%m-%dT%H:%M:%S%1:%2") @templatefilter('rfc822date') def rfc822date(text): """Date. Returns a date using the same format used in email headers: "Tue, 18 Aug 2009 13:00:13 +0200". """ - return util.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2") + return dateutil.datestr(text, "%a, %d %b %Y %H:%M:%S %1%2") @templatefilter('short') def short(text): @@ -346,7 +354,7 @@ @templatefilter('shortdate') def shortdate(text): """Date. Returns a date like "2006-09-18".""" - return util.shortdate(text) + return dateutil.shortdate(text) @templatefilter('slashpath') def slashpath(path): @@ -369,6 +377,12 @@ """ thing = templatekw.unwraphybrid(thing) if util.safehasattr(thing, '__iter__') and not isinstance(thing, bytes): + if isinstance(thing, str): + # This is only reachable on Python 3 (otherwise + # isinstance(thing, bytes) would have been true), and is + # here to prevent infinite recursion bugs on Python 3. + raise error.ProgrammingError( + 'stringify got unexpected unicode string: %r' % thing) return "".join([stringify(t) for t in thing if t is not None]) if thing is None: return ""
--- a/mercurial/templatekw.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templatekw.py Sun Mar 04 10:42:51 2018 -0500 @@ -64,8 +64,8 @@ def __iter__(self): return iter(self._values) def __getattr__(self, name): - if name not in ('get', 'items', 'iteritems', 'iterkeys', 'itervalues', - 'keys', 'values'): + if name not in (r'get', r'items', r'iteritems', r'iterkeys', + r'itervalues', r'keys', r'values'): raise AttributeError(name) return getattr(self._values, name) @@ -97,14 +97,22 @@ def itermaps(self): yield self.tomap() -def hybriddict(data, key='key', value='value', fmt='%s=%s', gen=None): +def hybriddict(data, key='key', value='value', fmt=None, gen=None): """Wrap data to support both dict-like and string-like operations""" + prefmt = pycompat.identity + if fmt is None: + fmt = '%s=%s' + prefmt = pycompat.bytestr return _hybrid(gen, data, lambda k: {key: k, value: data[k]}, - lambda k: fmt % (k, data[k])) + lambda k: fmt % (prefmt(k), prefmt(data[k]))) -def hybridlist(data, name, fmt='%s', gen=None): +def hybridlist(data, name, fmt=None, gen=None): """Wrap data to support both list-like and string-like operations""" - return _hybrid(gen, data, lambda x: {name: x}, lambda x: fmt % x) + prefmt = pycompat.identity + if fmt is None: + fmt = '%s' + prefmt = pycompat.bytestr + return _hybrid(gen, data, lambda x: {name: x}, lambda x: fmt % prefmt(x)) def unwraphybrid(thing): """Return an object which can be stringified possibly by using a legacy @@ -136,19 +144,50 @@ return value return _mappable(None, key, value, makemap) +def compatdict(context, mapping, name, data, key='key', value='value', + fmt=None, plural=None, separator=' '): + """Wrap data like hybriddict(), but also supports old-style list template + + This exists for backward compatibility with the old-style template. Use + hybriddict() for new template keywords. + """ + c = [{key: k, value: v} for k, v in data.iteritems()] + t = context.resource(mapping, 'templ') + f = _showlist(name, c, t, mapping, plural, separator) + return hybriddict(data, key=key, value=value, fmt=fmt, gen=f) + +def compatlist(context, mapping, name, data, element=None, fmt=None, + plural=None, separator=' '): + """Wrap data like hybridlist(), but also supports old-style list template + + This exists for backward compatibility with the old-style template. Use + hybridlist() for new template keywords. + """ + t = context.resource(mapping, 'templ') + f = _showlist(name, data, t, mapping, plural, separator) + return hybridlist(data, name=element or name, fmt=fmt, gen=f) + def showdict(name, data, mapping, plural=None, key='key', value='value', - fmt='%s=%s', separator=' '): + fmt=None, separator=' '): + ui = mapping.get('ui') + if ui: + ui.deprecwarn("templatekw.showdict() is deprecated, use compatdict()", + '4.6') c = [{key: k, value: v} for k, v in data.iteritems()] - f = _showlist(name, c, mapping, plural, separator) + f = _showlist(name, c, mapping['templ'], mapping, plural, separator) return hybriddict(data, key=key, value=value, fmt=fmt, gen=f) def showlist(name, values, mapping, plural=None, element=None, separator=' '): + ui = mapping.get('ui') + if ui: + ui.deprecwarn("templatekw.showlist() is deprecated, use compatlist()", + '4.6') if not element: element = name - f = _showlist(name, values, mapping, plural, separator) + f = _showlist(name, values, mapping['templ'], mapping, plural, separator) return hybridlist(values, name=element, gen=f) -def _showlist(name, values, mapping, plural=None, separator=' '): +def _showlist(name, values, templ, mapping, plural=None, separator=' '): '''expand set of values. name is name of key in template map. values is list of strings or dicts. @@ -169,7 +208,6 @@ expand 'end_foos'. ''' - templ = mapping['templ'] strmapping = pycompat.strkwargs(mapping) if not plural: plural = name + 's' @@ -183,7 +221,9 @@ yield separator.join(values) else: for v in values: - yield dict(v, **strmapping) + r = dict(v) + r.update(mapping) + yield r return startname = 'start_' + plural if startname in templ: @@ -192,11 +232,15 @@ def one(v, tag=name): try: vmapping.update(v) - except (AttributeError, ValueError): + # Python 2 raises ValueError if the type of v is wrong. Python + # 3 raises TypeError. + except (AttributeError, TypeError, ValueError): try: + # Python 2 raises ValueError trying to destructure an e.g. + # bytes. Python 3 raises TypeError. for a, b in v: vmapping[a] = b - except ValueError: + except (TypeError, ValueError): vmapping[name] = v return templ(tag, **pycompat.strkwargs(vmapping)) lastname = 'last_' + name @@ -212,13 +256,11 @@ if endname in templ: yield templ(endname, **strmapping) -def getfiles(repo, ctx, revcache): - if 'files' not in revcache: - revcache['files'] = repo.status(ctx.p1(), ctx)[:3] - return revcache['files'] - -def getlatesttags(repo, ctx, cache, pattern=None): +def getlatesttags(context, mapping, pattern=None): '''return date, distance and name for the latest tag of rev''' + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + cache = context.resource(mapping, 'cache') cachename = 'latesttags' if pattern is not None: @@ -337,91 +379,92 @@ # filecopy is preserved for compatibility reasons defaulttempl['filecopy'] = defaulttempl['file_copy'] -# keywords are callables like: -# fn(repo, ctx, templ, cache, revcache, **args) -# with: -# repo - current repository instance -# ctx - the changectx being displayed -# templ - the templater instance -# cache - a cache dictionary for the whole templater run -# revcache - a cache dictionary for the current revision +# keywords are callables (see registrar.templatekeyword for details) keywords = {} - templatekeyword = registrar.templatekeyword(keywords) -@templatekeyword('author') -def showauthor(repo, ctx, templ, **args): +@templatekeyword('author', requires={'ctx'}) +def showauthor(context, mapping): """String. The unmodified author of the changeset.""" + ctx = context.resource(mapping, 'ctx') return ctx.user() -@templatekeyword('bisect') -def showbisect(repo, ctx, templ, **args): +@templatekeyword('bisect', requires={'repo', 'ctx'}) +def showbisect(context, mapping): """String. The changeset bisection status.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') return hbisect.label(repo, ctx.node()) -@templatekeyword('branch') -def showbranch(**args): +@templatekeyword('branch', requires={'ctx'}) +def showbranch(context, mapping): """String. The name of the branch on which the changeset was committed. """ - return args[r'ctx'].branch() + ctx = context.resource(mapping, 'ctx') + return ctx.branch() -@templatekeyword('branches') -def showbranches(**args): +@templatekeyword('branches', requires={'ctx', 'templ'}) +def showbranches(context, mapping): """List of strings. The name of the branch on which the changeset was committed. Will be empty if the branch name was default. (DEPRECATED) """ - args = pycompat.byteskwargs(args) - branch = args['ctx'].branch() + ctx = context.resource(mapping, 'ctx') + branch = ctx.branch() if branch != 'default': - return showlist('branch', [branch], args, plural='branches') - return showlist('branch', [], args, plural='branches') + return compatlist(context, mapping, 'branch', [branch], + plural='branches') + return compatlist(context, mapping, 'branch', [], plural='branches') -@templatekeyword('bookmarks') -def showbookmarks(**args): +@templatekeyword('bookmarks', requires={'repo', 'ctx', 'templ'}) +def showbookmarks(context, mapping): """List of strings. Any bookmarks associated with the changeset. Also sets 'active', the name of the active bookmark. """ - args = pycompat.byteskwargs(args) - repo = args['ctx']._repo - bookmarks = args['ctx'].bookmarks() + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') + bookmarks = ctx.bookmarks() active = repo._activebookmark makemap = lambda v: {'bookmark': v, 'active': active, 'current': active} - f = _showlist('bookmark', bookmarks, args) + f = _showlist('bookmark', bookmarks, templ, mapping) return _hybrid(f, bookmarks, makemap, pycompat.identity) -@templatekeyword('children') -def showchildren(**args): +@templatekeyword('children', requires={'ctx', 'templ'}) +def showchildren(context, mapping): """List of strings. The children of the changeset.""" - args = pycompat.byteskwargs(args) - ctx = args['ctx'] - childrevs = ['%d:%s' % (cctx, cctx) for cctx in ctx.children()] - return showlist('children', childrevs, args, element='child') + ctx = context.resource(mapping, 'ctx') + childrevs = ['%d:%s' % (cctx.rev(), cctx) for cctx in ctx.children()] + return compatlist(context, mapping, 'children', childrevs, element='child') # Deprecated, but kept alive for help generation a purpose. -@templatekeyword('currentbookmark') -def showcurrentbookmark(**args): +@templatekeyword('currentbookmark', requires={'repo', 'ctx'}) +def showcurrentbookmark(context, mapping): """String. The active bookmark, if it is associated with the changeset. (DEPRECATED)""" - return showactivebookmark(**args) + return showactivebookmark(context, mapping) -@templatekeyword('activebookmark') -def showactivebookmark(**args): +@templatekeyword('activebookmark', requires={'repo', 'ctx'}) +def showactivebookmark(context, mapping): """String. The active bookmark, if it is associated with the changeset.""" - active = args[r'repo']._activebookmark - if active and active in args[r'ctx'].bookmarks(): + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + active = repo._activebookmark + if active and active in ctx.bookmarks(): return active return '' -@templatekeyword('date') -def showdate(repo, ctx, templ, **args): +@templatekeyword('date', requires={'ctx'}) +def showdate(context, mapping): """Date information. The date when the changeset was committed.""" + ctx = context.resource(mapping, 'ctx') return ctx.date() -@templatekeyword('desc') -def showdescription(repo, ctx, templ, **args): +@templatekeyword('desc', requires={'ctx'}) +def showdescription(context, mapping): """String. The text of the changeset description.""" + ctx = context.resource(mapping, 'ctx') s = ctx.description() if isinstance(s, encoding.localstr): # try hard to preserve utf-8 bytes @@ -429,55 +472,65 @@ else: return s.strip() -@templatekeyword('diffstat') -def showdiffstat(repo, ctx, templ, **args): +@templatekeyword('diffstat', requires={'ctx'}) +def showdiffstat(context, mapping): """String. Statistics of changes with the following format: "modified files: +added/-removed lines" """ + ctx = context.resource(mapping, 'ctx') stats = patch.diffstatdata(util.iterlines(ctx.diff(noprefix=False))) maxname, maxtotal, adds, removes, binary = patch.diffstatsum(stats) - return '%s: +%s/-%s' % (len(stats), adds, removes) + return '%d: +%d/-%d' % (len(stats), adds, removes) -@templatekeyword('envvars') -def showenvvars(repo, **args): +@templatekeyword('envvars', requires={'ui', 'templ'}) +def showenvvars(context, mapping): """A dictionary of environment variables. (EXPERIMENTAL)""" - args = pycompat.byteskwargs(args) - env = repo.ui.exportableenviron() + ui = context.resource(mapping, 'ui') + env = ui.exportableenviron() env = util.sortdict((k, env[k]) for k in sorted(env)) - return showdict('envvar', env, args, plural='envvars') + return compatdict(context, mapping, 'envvar', env, plural='envvars') -@templatekeyword('extras') -def showextras(**args): +@templatekeyword('extras', requires={'ctx', 'templ'}) +def showextras(context, mapping): """List of dicts with key, value entries of the 'extras' field of this changeset.""" - args = pycompat.byteskwargs(args) - extras = args['ctx'].extra() + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') + extras = ctx.extra() extras = util.sortdict((k, extras[k]) for k in sorted(extras)) makemap = lambda k: {'key': k, 'value': extras[k]} c = [makemap(k) for k in extras] - f = _showlist('extra', c, args, plural='extras') + f = _showlist('extra', c, templ, mapping, plural='extras') return _hybrid(f, extras, makemap, lambda k: '%s=%s' % (k, util.escapestr(extras[k]))) -@templatekeyword('file_adds') -def showfileadds(**args): +def _showfilesbystat(context, mapping, name, index): + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + revcache = context.resource(mapping, 'revcache') + if 'files' not in revcache: + revcache['files'] = repo.status(ctx.p1(), ctx)[:3] + files = revcache['files'][index] + return compatlist(context, mapping, name, files, element='file') + +@templatekeyword('file_adds', requires={'repo', 'ctx', 'revcache', 'templ'}) +def showfileadds(context, mapping): """List of strings. Files added by this changeset.""" - args = pycompat.byteskwargs(args) - repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] - return showlist('file_add', getfiles(repo, ctx, revcache)[1], args, - element='file') + return _showfilesbystat(context, mapping, 'file_add', 1) -@templatekeyword('file_copies') -def showfilecopies(**args): +@templatekeyword('file_copies', + requires={'repo', 'ctx', 'cache', 'revcache', 'templ'}) +def showfilecopies(context, mapping): """List of strings. Files copied in this changeset with their sources. """ - args = pycompat.byteskwargs(args) - cache, ctx = args['cache'], args['ctx'] - copies = args['revcache'].get('copies') + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + cache = context.resource(mapping, 'cache') + copies = context.resource(mapping, 'revcache').get('copies') if copies is None: if 'getrenamed' not in cache: - cache['getrenamed'] = getrenamedfn(args['repo']) + cache['getrenamed'] = getrenamedfn(repo) copies = [] getrenamed = cache['getrenamed'] for fn in ctx.files(): @@ -486,51 +539,51 @@ copies.append((fn, rename[0])) copies = util.sortdict(copies) - return showdict('file_copy', copies, args, plural='file_copies', - key='name', value='source', fmt='%s (%s)') + return compatdict(context, mapping, 'file_copy', copies, + key='name', value='source', fmt='%s (%s)', + plural='file_copies') # showfilecopiesswitch() displays file copies only if copy records are # provided before calling the templater, usually with a --copies # command line switch. -@templatekeyword('file_copies_switch') -def showfilecopiesswitch(**args): +@templatekeyword('file_copies_switch', requires={'revcache', 'templ'}) +def showfilecopiesswitch(context, mapping): """List of strings. Like "file_copies" but displayed only if the --copied switch is set. """ - args = pycompat.byteskwargs(args) - copies = args['revcache'].get('copies') or [] + copies = context.resource(mapping, 'revcache').get('copies') or [] copies = util.sortdict(copies) - return showdict('file_copy', copies, args, plural='file_copies', - key='name', value='source', fmt='%s (%s)') + return compatdict(context, mapping, 'file_copy', copies, + key='name', value='source', fmt='%s (%s)', + plural='file_copies') -@templatekeyword('file_dels') -def showfiledels(**args): +@templatekeyword('file_dels', requires={'repo', 'ctx', 'revcache', 'templ'}) +def showfiledels(context, mapping): """List of strings. Files removed by this changeset.""" - args = pycompat.byteskwargs(args) - repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] - return showlist('file_del', getfiles(repo, ctx, revcache)[2], args, - element='file') + return _showfilesbystat(context, mapping, 'file_del', 2) -@templatekeyword('file_mods') -def showfilemods(**args): +@templatekeyword('file_mods', requires={'repo', 'ctx', 'revcache', 'templ'}) +def showfilemods(context, mapping): """List of strings. Files modified by this changeset.""" - args = pycompat.byteskwargs(args) - repo, ctx, revcache = args['repo'], args['ctx'], args['revcache'] - return showlist('file_mod', getfiles(repo, ctx, revcache)[0], args, - element='file') + return _showfilesbystat(context, mapping, 'file_mod', 0) -@templatekeyword('files') -def showfiles(**args): +@templatekeyword('files', requires={'ctx', 'templ'}) +def showfiles(context, mapping): """List of strings. All files modified, added, or removed by this changeset. """ - args = pycompat.byteskwargs(args) - return showlist('file', args['ctx'].files(), args) + ctx = context.resource(mapping, 'ctx') + return compatlist(context, mapping, 'file', ctx.files()) -@templatekeyword('graphnode') -def showgraphnode(repo, ctx, **args): +@templatekeyword('graphnode', requires={'repo', 'ctx'}) +def showgraphnode(context, mapping): """String. The character representing the changeset node in an ASCII revision graph.""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + return getgraphnode(repo, ctx) + +def getgraphnode(repo, ctx): wpnodes = repo.dirstate.parents() if wpnodes[1] == nullid: wpnodes = wpnodes[:1] @@ -545,33 +598,29 @@ else: return 'o' -@templatekeyword('graphwidth') -def showgraphwidth(repo, ctx, templ, **args): +@templatekeyword('graphwidth', requires=()) +def showgraphwidth(context, mapping): """Integer. The width of the graph drawn by 'log --graph' or zero.""" - # The value args['graphwidth'] will be this function, so we use an internal - # name to pass the value through props into this function. - return args.get('_graphwidth', 0) + # just hosts documentation; should be overridden by template mapping + return 0 -@templatekeyword('index') -def showindex(**args): +@templatekeyword('index', requires=()) +def showindex(context, mapping): """Integer. The current iteration of the loop. (0 indexed)""" # just hosts documentation; should be overridden by template mapping raise error.Abort(_("can't use index in this context")) -@templatekeyword('latesttag') -def showlatesttag(**args): +@templatekeyword('latesttag', requires={'repo', 'ctx', 'cache', 'templ'}) +def showlatesttag(context, mapping): """List of strings. The global tags on the most recent globally tagged ancestor of this changeset. If no such tags exist, the list consists of the single string "null". """ - return showlatesttags(None, **args) + return showlatesttags(context, mapping, None) -def showlatesttags(pattern, **args): +def showlatesttags(context, mapping, pattern): """helper method for the latesttag keyword and function""" - args = pycompat.byteskwargs(args) - repo, ctx = args['repo'], args['ctx'] - cache = args['cache'] - latesttags = getlatesttags(repo, ctx, cache, pattern) + latesttags = getlatesttags(context, mapping, pattern) # latesttag[0] is an implementation detail for sorting csets on different # branches in a stable manner- it is the date the tagged cset was created, @@ -584,25 +633,28 @@ } tags = latesttags[2] - f = _showlist('latesttag', tags, args, separator=':') + templ = context.resource(mapping, 'templ') + f = _showlist('latesttag', tags, templ, mapping, separator=':') return _hybrid(f, tags, makemap, pycompat.identity) -@templatekeyword('latesttagdistance') -def showlatesttagdistance(repo, ctx, templ, cache, **args): +@templatekeyword('latesttagdistance', requires={'repo', 'ctx', 'cache'}) +def showlatesttagdistance(context, mapping): """Integer. Longest path to the latest tag.""" - return getlatesttags(repo, ctx, cache)[1] + return getlatesttags(context, mapping)[1] -@templatekeyword('changessincelatesttag') -def showchangessincelatesttag(repo, ctx, templ, cache, **args): +@templatekeyword('changessincelatesttag', requires={'repo', 'ctx', 'cache'}) +def showchangessincelatesttag(context, mapping): """Integer. All ancestors not in the latest tag.""" - latesttag = getlatesttags(repo, ctx, cache)[2][0] + mapping = mapping.copy() + mapping['tag'] = getlatesttags(context, mapping)[2][0] + return _showchangessincetag(context, mapping) - return _showchangessincetag(repo, ctx, tag=latesttag, **args) - -def _showchangessincetag(repo, ctx, **args): +def _showchangessincetag(context, mapping): + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') offset = 0 revs = [ctx.rev()] - tag = args[r'tag'] + tag = context.symbol(mapping, 'tag') # The only() revset doesn't currently support wdir() if ctx.rev() is None: @@ -611,56 +663,59 @@ return len(repo.revs('only(%ld, %s)', revs, tag)) + offset -@templatekeyword('manifest') -def showmanifest(**args): - repo, ctx, templ = args[r'repo'], args[r'ctx'], args[r'templ'] +# teach templater latesttags.changes is switched to (context, mapping) API +_showchangessincetag._requires = {'repo', 'ctx'} + +@templatekeyword('manifest', requires={'repo', 'ctx', 'templ'}) +def showmanifest(context, mapping): + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') mnode = ctx.manifestnode() if mnode is None: # just avoid crash, we might want to use the 'ff...' hash in future return mrev = repo.manifestlog._revlog.rev(mnode) mhex = hex(mnode) - args = args.copy() - args.update({r'rev': mrev, r'node': mhex}) - f = templ('manifest', **args) + mapping = mapping.copy() + mapping.update({'rev': mrev, 'node': mhex}) + f = templ('manifest', **pycompat.strkwargs(mapping)) # TODO: perhaps 'ctx' should be dropped from mapping because manifest # rev and node are completely different from changeset's. return _mappable(f, None, f, lambda x: {'rev': mrev, 'node': mhex}) -@templatekeyword('obsfate') -def showobsfate(**args): +@templatekeyword('obsfate', requires={'ui', 'repo', 'ctx', 'templ'}) +def showobsfate(context, mapping): # this function returns a list containing pre-formatted obsfate strings. # # This function will be replaced by templates fragments when we will have # the verbosity templatekw available. - succsandmarkers = showsuccsandmarkers(**args) + succsandmarkers = showsuccsandmarkers(context, mapping) - args = pycompat.byteskwargs(args) - ui = args['ui'] - + ui = context.resource(mapping, 'ui') values = [] for x in succsandmarkers: values.append(obsutil.obsfateprinter(x['successors'], x['markers'], ui)) - return showlist("fate", values, args) + return compatlist(context, mapping, "fate", values) -def shownames(namespace, **args): +def shownames(context, mapping, namespace): """helper method to generate a template keyword for a namespace""" - args = pycompat.byteskwargs(args) - ctx = args['ctx'] - repo = ctx.repo() + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') ns = repo.names[namespace] names = ns.names(repo, ctx.node()) - return showlist(ns.templatename, names, args, plural=namespace) + return compatlist(context, mapping, ns.templatename, names, + plural=namespace) -@templatekeyword('namespaces') -def shownamespaces(**args): +@templatekeyword('namespaces', requires={'repo', 'ctx', 'templ'}) +def shownamespaces(context, mapping): """Dict of lists. Names attached to this changeset per namespace.""" - args = pycompat.byteskwargs(args) - ctx = args['ctx'] - repo = ctx.repo() + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') namespaces = util.sortdict() def makensmapfn(ns): @@ -669,10 +724,10 @@ for k, ns in repo.names.iteritems(): names = ns.names(repo, ctx.node()) - f = _showlist('name', names, args) + f = _showlist('name', names, templ, mapping) namespaces[k] = _hybrid(f, names, makensmapfn(ns), pycompat.identity) - f = _showlist('namespace', list(namespaces), args) + f = _showlist('namespace', list(namespaces), templ, mapping) def makemap(ns): return { @@ -684,24 +739,27 @@ return _hybrid(f, namespaces, makemap, pycompat.identity) -@templatekeyword('node') -def shownode(repo, ctx, templ, **args): +@templatekeyword('node', requires={'ctx'}) +def shownode(context, mapping): """String. The changeset identification hash, as a 40 hexadecimal digit string. """ + ctx = context.resource(mapping, 'ctx') return ctx.hex() -@templatekeyword('obsolete') -def showobsolete(repo, ctx, templ, **args): +@templatekeyword('obsolete', requires={'ctx'}) +def showobsolete(context, mapping): """String. Whether the changeset is obsolete. (EXPERIMENTAL)""" + ctx = context.resource(mapping, 'ctx') if ctx.obsolete(): return 'obsolete' return '' -@templatekeyword('peerurls') -def showpeerurls(repo, **args): +@templatekeyword('peerurls', requires={'repo'}) +def showpeerurls(context, mapping): """A dictionary of repository locations defined in the [paths] section of your configuration file.""" + repo = context.resource(mapping, 'repo') # see commands.paths() for naming of dictionary keys paths = repo.ui.paths urls = util.sortdict((k, p.rawloc) for k, p in sorted(paths.iteritems())) @@ -712,9 +770,11 @@ return d return _hybrid(None, urls, makemap, lambda k: '%s=%s' % (k, urls[k])) -@templatekeyword("predecessors") -def showpredecessors(repo, ctx, **args): +@templatekeyword("predecessors", requires={'repo', 'ctx'}) +def showpredecessors(context, mapping): """Returns the list if the closest visible successors. (EXPERIMENTAL)""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') predecessors = sorted(obsutil.closestpredecessors(repo, ctx.node())) predecessors = map(hex, predecessors) @@ -722,14 +782,21 @@ lambda x: {'ctx': repo[x], 'revcache': {}}, lambda x: scmutil.formatchangeid(repo[x])) -@templatekeyword("successorssets") -def showsuccessorssets(repo, ctx, **args): +@templatekeyword('reporoot', requires={'repo'}) +def showreporoot(context, mapping): + """String. The root directory of the current repository.""" + repo = context.resource(mapping, 'repo') + return repo.root + +@templatekeyword("successorssets", requires={'repo', 'ctx'}) +def showsuccessorssets(context, mapping): """Returns a string of sets of successors for a changectx. Format used is: [ctx1, ctx2], [ctx3] if ctx has been splitted into ctx1 and ctx2 while also diverged into ctx3. (EXPERIMENTAL)""" + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') if not ctx.obsolete(): return '' - args = pycompat.byteskwargs(args) ssets = obsutil.successorssets(repo, ctx.node(), closest=True) ssets = [[hex(n) for n in ss] for ss in ssets] @@ -753,13 +820,16 @@ return _hybrid(gen(data), data, lambda x: {'successorset': x}, pycompat.identity) -@templatekeyword("succsandmarkers") -def showsuccsandmarkers(repo, ctx, **args): +@templatekeyword("succsandmarkers", requires={'repo', 'ctx', 'templ'}) +def showsuccsandmarkers(context, mapping): """Returns a list of dict for each final successor of ctx. The dict contains successors node id in "successors" keys and the list of obs-markers from ctx to the set of successors in "markers". (EXPERIMENTAL) """ + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') values = obsutil.successorsandmarkers(repo, ctx) @@ -790,86 +860,92 @@ data.append({'successors': successors, 'markers': finalmarkers}) - f = _showlist('succsandmarkers', data, args) + f = _showlist('succsandmarkers', data, templ, mapping) return _hybrid(f, data, lambda x: x, pycompat.identity) -@templatekeyword('p1rev') -def showp1rev(repo, ctx, templ, **args): +@templatekeyword('p1rev', requires={'ctx'}) +def showp1rev(context, mapping): """Integer. The repository-local revision number of the changeset's first parent, or -1 if the changeset has no parents.""" + ctx = context.resource(mapping, 'ctx') return ctx.p1().rev() -@templatekeyword('p2rev') -def showp2rev(repo, ctx, templ, **args): +@templatekeyword('p2rev', requires={'ctx'}) +def showp2rev(context, mapping): """Integer. The repository-local revision number of the changeset's second parent, or -1 if the changeset has no second parent.""" + ctx = context.resource(mapping, 'ctx') return ctx.p2().rev() -@templatekeyword('p1node') -def showp1node(repo, ctx, templ, **args): +@templatekeyword('p1node', requires={'ctx'}) +def showp1node(context, mapping): """String. The identification hash of the changeset's first parent, as a 40 digit hexadecimal string. If the changeset has no parents, all digits are 0.""" + ctx = context.resource(mapping, 'ctx') return ctx.p1().hex() -@templatekeyword('p2node') -def showp2node(repo, ctx, templ, **args): +@templatekeyword('p2node', requires={'ctx'}) +def showp2node(context, mapping): """String. The identification hash of the changeset's second parent, as a 40 digit hexadecimal string. If the changeset has no second parent, all digits are 0.""" + ctx = context.resource(mapping, 'ctx') return ctx.p2().hex() -@templatekeyword('parents') -def showparents(**args): +@templatekeyword('parents', requires={'repo', 'ctx', 'templ'}) +def showparents(context, mapping): """List of strings. The parents of the changeset in "rev:node" format. If the changeset has only one "natural" parent (the predecessor revision) nothing is shown.""" - args = pycompat.byteskwargs(args) - repo = args['repo'] - ctx = args['ctx'] + repo = context.resource(mapping, 'repo') + ctx = context.resource(mapping, 'ctx') + templ = context.resource(mapping, 'templ') pctxs = scmutil.meaningfulparents(repo, ctx) prevs = [p.rev() for p in pctxs] parents = [[('rev', p.rev()), ('node', p.hex()), ('phase', p.phasestr())] for p in pctxs] - f = _showlist('parent', parents, args) + f = _showlist('parent', parents, templ, mapping) return _hybrid(f, prevs, lambda x: {'ctx': repo[x], 'revcache': {}}, lambda x: scmutil.formatchangeid(repo[x]), keytype=int) -@templatekeyword('phase') -def showphase(repo, ctx, templ, **args): +@templatekeyword('phase', requires={'ctx'}) +def showphase(context, mapping): """String. The changeset phase name.""" + ctx = context.resource(mapping, 'ctx') return ctx.phasestr() -@templatekeyword('phaseidx') -def showphaseidx(repo, ctx, templ, **args): +@templatekeyword('phaseidx', requires={'ctx'}) +def showphaseidx(context, mapping): """Integer. The changeset phase index. (ADVANCED)""" + ctx = context.resource(mapping, 'ctx') return ctx.phase() -@templatekeyword('rev') -def showrev(repo, ctx, templ, **args): +@templatekeyword('rev', requires={'ctx'}) +def showrev(context, mapping): """Integer. The repository-local changeset revision number.""" + ctx = context.resource(mapping, 'ctx') return scmutil.intrev(ctx) -def showrevslist(name, revs, **args): +def showrevslist(context, mapping, name, revs): """helper to generate a list of revisions in which a mapped template will be evaluated""" - args = pycompat.byteskwargs(args) - repo = args['ctx'].repo() - f = _showlist(name, ['%d' % r for r in revs], args) + repo = context.resource(mapping, 'repo') + templ = context.resource(mapping, 'templ') + f = _showlist(name, ['%d' % r for r in revs], templ, mapping) return _hybrid(f, revs, lambda x: {name: x, 'ctx': repo[x], 'revcache': {}}, pycompat.identity, keytype=int) -@templatekeyword('subrepos') -def showsubrepos(**args): +@templatekeyword('subrepos', requires={'ctx', 'templ'}) +def showsubrepos(context, mapping): """List of strings. Updated subrepositories in the changeset.""" - args = pycompat.byteskwargs(args) - ctx = args['ctx'] + ctx = context.resource(mapping, 'ctx') substate = ctx.substate if not substate: - return showlist('subrepo', [], args) + return compatlist(context, mapping, 'subrepo', []) psubstate = ctx.parents()[0].substate or {} subrepos = [] for sub in substate: @@ -878,46 +954,37 @@ for sub in psubstate: if sub not in substate: subrepos.append(sub) # removed in ctx - return showlist('subrepo', sorted(subrepos), args) + return compatlist(context, mapping, 'subrepo', sorted(subrepos)) # don't remove "showtags" definition, even though namespaces will put # a helper function for "tags" keyword into "keywords" map automatically, # because online help text is built without namespaces initialization -@templatekeyword('tags') -def showtags(**args): +@templatekeyword('tags', requires={'repo', 'ctx', 'templ'}) +def showtags(context, mapping): """List of strings. Any tags associated with the changeset.""" - return shownames('tags', **args) - -@templatekeyword('termwidth') -def showtermwidth(repo, ctx, templ, **args): - """Integer. The width of the current terminal.""" - return repo.ui.termwidth() + return shownames(context, mapping, 'tags') -@templatekeyword('troubles') -def showtroubles(repo, **args): - """List of strings. Evolution troubles affecting the changeset. - (DEPRECATED) - """ - msg = ("'troubles' is deprecated, " - "use 'instabilities'") - repo.ui.deprecwarn(msg, '4.4') +@templatekeyword('termwidth', requires={'ui'}) +def showtermwidth(context, mapping): + """Integer. The width of the current terminal.""" + ui = context.resource(mapping, 'ui') + return ui.termwidth() - return showinstabilities(repo=repo, **args) - -@templatekeyword('instabilities') -def showinstabilities(**args): +@templatekeyword('instabilities', requires={'ctx', 'templ'}) +def showinstabilities(context, mapping): """List of strings. Evolution instabilities affecting the changeset. (EXPERIMENTAL) """ - args = pycompat.byteskwargs(args) - return showlist('instability', args['ctx'].instabilities(), args, - plural='instabilities') + ctx = context.resource(mapping, 'ctx') + return compatlist(context, mapping, 'instability', ctx.instabilities(), + plural='instabilities') -@templatekeyword('verbosity') -def showverbosity(ui, **args): +@templatekeyword('verbosity', requires={'ui'}) +def showverbosity(context, mapping): """String. The current output verbosity in 'debug', 'quiet', 'verbose', or ''.""" - # see cmdutil.changeset_templater for priority of these flags + ui = context.resource(mapping, 'ui') + # see logcmdutil.changesettemplater for priority of these flags if ui.debugflag: return 'debug' elif ui.quiet:
--- a/mercurial/templater.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templater.py Sun Mar 04 10:42:51 2018 -0500 @@ -29,6 +29,13 @@ templatekw, util, ) +from .utils import dateutil + +class ResourceUnavailable(error.Abort): + pass + +class TemplateNotFound(error.Abort): + pass # template parsing @@ -92,8 +99,8 @@ pos += 1 yield ('integer', program[s:pos], s) pos -= 1 - elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"') - or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')): + elif (c == '\\' and program[pos:pos + 2] in (br"\'", br'\"') + or c == 'r' and program[pos:pos + 3] in (br"r\'", br'r\"')): # handle escaped quoted strings for compatibility with 2.9.2-3.4, # where some of nested templates were preprocessed as strings and # then compiled. therefore, \"...\" was allowed. (issue4733) @@ -161,36 +168,96 @@ ([('string', 'foo\\')], 6) """ parsed = [] + for typ, val, pos in _scantemplate(tmpl, start, stop, quote): + if typ == 'string': + parsed.append((typ, val)) + elif typ == 'template': + parsed.append(val) + elif typ == 'end': + return parsed, pos + else: + raise error.ProgrammingError('unexpected type: %s' % typ) + raise error.ProgrammingError('unterminated scanning of template') + +def scantemplate(tmpl, raw=False): + r"""Scan (type, start, end) positions of outermost elements in template + + If raw=True, a backslash is not taken as an escape character just like + r'' string in Python. Note that this is different from r'' literal in + template in that no template fragment can appear in r'', e.g. r'{foo}' + is a literal '{foo}', but ('{foo}', raw=True) is a template expression + 'foo'. + + >>> list(scantemplate(b'foo{bar}"baz')) + [('string', 0, 3), ('template', 3, 8), ('string', 8, 12)] + >>> list(scantemplate(b'outer{"inner"}outer')) + [('string', 0, 5), ('template', 5, 14), ('string', 14, 19)] + >>> list(scantemplate(b'foo\\{escaped}')) + [('string', 0, 5), ('string', 5, 13)] + >>> list(scantemplate(b'foo\\{escaped}', raw=True)) + [('string', 0, 4), ('template', 4, 13)] + """ + last = None + for typ, val, pos in _scantemplate(tmpl, 0, len(tmpl), raw=raw): + if last: + yield last + (pos,) + if typ == 'end': + return + else: + last = (typ, pos) + raise error.ProgrammingError('unterminated scanning of template') + +def _scantemplate(tmpl, start, stop, quote='', raw=False): + """Parse template string into chunks of strings and template expressions""" sepchars = '{' + quote + unescape = [parser.unescapestr, pycompat.identity][raw] pos = start p = parser.parser(elements) - while pos < stop: - n = min((tmpl.find(c, pos, stop) for c in sepchars), - key=lambda n: (n < 0, n)) - if n < 0: - parsed.append(('string', parser.unescapestr(tmpl[pos:stop]))) - pos = stop - break - c = tmpl[n:n + 1] - bs = (n - pos) - len(tmpl[pos:n].rstrip('\\')) - if bs % 2 == 1: - # escaped (e.g. '\{', '\\\{', but not '\\{') - parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c)) - pos = n + 1 - continue - if n > pos: - parsed.append(('string', parser.unescapestr(tmpl[pos:n]))) - if c == quote: - return parsed, n + 1 + try: + while pos < stop: + n = min((tmpl.find(c, pos, stop) for c in sepchars), + key=lambda n: (n < 0, n)) + if n < 0: + yield ('string', unescape(tmpl[pos:stop]), pos) + pos = stop + break + c = tmpl[n:n + 1] + bs = 0 # count leading backslashes + if not raw: + bs = (n - pos) - len(tmpl[pos:n].rstrip('\\')) + if bs % 2 == 1: + # escaped (e.g. '\{', '\\\{', but not '\\{') + yield ('string', unescape(tmpl[pos:n - 1]) + c, pos) + pos = n + 1 + continue + if n > pos: + yield ('string', unescape(tmpl[pos:n]), pos) + if c == quote: + yield ('end', None, n + 1) + return - parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}')) - if not tmpl.endswith('}', n + 1, pos): - raise error.ParseError(_("invalid token"), pos) - parsed.append(parseres) + parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}')) + if not tmpl.endswith('}', n + 1, pos): + raise error.ParseError(_("invalid token"), pos) + yield ('template', parseres, n) - if quote: - raise error.ParseError(_("unterminated string"), start) - return parsed, pos + if quote: + raise error.ParseError(_("unterminated string"), start) + except error.ParseError as inst: + if len(inst.args) > 1: # has location + loc = inst.args[1] + # Offset the caret location by the number of newlines before the + # location of the error, since we will replace one-char newlines + # with the two-char literal r'\n'. + offset = tmpl[:loc].count('\n') + tmpl = tmpl.replace('\n', br'\n') + # We want the caret to point to the place in the template that + # failed to parse, but in a hint we get a open paren at the + # start. Therefore, we print "loc" spaces (instead of "loc - 1") + # to line up the caret with the location of the error. + inst.hint = tmpl + '\n' + ' ' * (loc + offset) + '^ ' + _('here') + raise + yield ('end', None, pos) def _unnesttemplatelist(tree): """Expand list of templates to node tuple @@ -396,12 +463,18 @@ v = context.process(key, safemapping) except TemplateNotFound: v = default - if callable(v): - # TODO: templatekw functions will be updated to take (context, mapping) - # pair instead of **props + if callable(v) and getattr(v, '_requires', None) is None: + # old templatekw: expand all keywords and resources props = context._resources.copy() props.update(mapping) return v(**pycompat.strkwargs(props)) + if callable(v): + # new templatekw + try: + return v(context, mapping) + except ResourceUnavailable: + # unsupported keyword is mapped to empty just like unknown keyword + return None return v def buildtemplate(exp, context): @@ -491,7 +564,7 @@ if sym: raise error.ParseError(_("keyword '%s' has no member") % sym) else: - raise error.ParseError(_("%r has no member") % d) + raise error.ParseError(_("%r has no member") % pycompat.bytestr(d)) def buildnegate(exp, context): arg = compileexp(exp[1], context, exprmethods) @@ -592,9 +665,9 @@ fmt = evalstring(context, mapping, args[1]) try: if fmt is None: - return util.datestr(date) + return dateutil.datestr(date) else: - return util.datestr(date, fmt) + return dateutil.datestr(date, fmt) except (TypeError, ValueError): # i18n: "date" is a keyword raise error.ParseError(_("date expects a date information")) @@ -665,10 +738,7 @@ ctx = context.resource(mapping, 'ctx') m = ctx.match([raw]) files = list(ctx.matches(m)) - # TODO: pass (context, mapping) pair to keyword function - props = context._resources.copy() - props.update(mapping) - return templatekw.showlist("file", files, props) + return templatekw.compatlist(context, mapping, "file", files) @templatefunc('fill(text[, width[, initialident[, hangindent]]])') def fill(context, mapping, args): @@ -851,7 +921,7 @@ joiner = evalstring(context, mapping, args[1]) first = True - for x in joinset: + for x in pycompat.maybebytestr(joinset): if first: first = False else: @@ -888,11 +958,7 @@ pattern = None if len(args) == 1: pattern = evalstring(context, mapping, args[0]) - - # TODO: pass (context, mapping) pair to keyword function - props = context._resources.copy() - props.update(mapping) - return templatekw.showlatesttags(pattern, **pycompat.strkwargs(props)) + return templatekw.showlatesttags(context, mapping, pattern) @templatefunc('localdate(date[, tz])') def localdate(context, mapping, args): @@ -904,15 +970,15 @@ date = evalfuncarg(context, mapping, args[0]) try: - date = util.parsedate(date) + date = dateutil.parsedate(date) except AttributeError: # not str nor date tuple # i18n: "localdate" is a keyword raise error.ParseError(_("localdate expects a date information")) if len(args) >= 2: tzoffset = None tz = evalfuncarg(context, mapping, args[1]) - if isinstance(tz, str): - tzoffset, remainder = util.parsetimezone(tz) + if isinstance(tz, bytes): + tzoffset, remainder = dateutil.parsetimezone(tz) if remainder: tzoffset = None if tzoffset is None: @@ -922,7 +988,7 @@ # i18n: "localdate" is a keyword raise error.ParseError(_("localdate expects a timezone")) else: - tzoffset = util.makedate()[1] + tzoffset = dateutil.makedate()[1] return (date[0], tzoffset) @templatefunc('max(iterable)') @@ -934,7 +1000,7 @@ iterable = evalfuncarg(context, mapping, args[0]) try: - x = max(iterable) + x = max(pycompat.maybebytestr(iterable)) except (TypeError, ValueError): # i18n: "max" is a keyword raise error.ParseError(_("max first argument should be an iterable")) @@ -949,7 +1015,7 @@ iterable = evalfuncarg(context, mapping, args[0]) try: - x = min(iterable) + x = min(pycompat.maybebytestr(iterable)) except (TypeError, ValueError): # i18n: "min" is a keyword raise error.ParseError(_("min first argument should be an iterable")) @@ -1075,12 +1141,7 @@ revs = query(raw) revs = list(revs) revsetcache[raw] = revs - - # TODO: pass (context, mapping) pair to keyword function - props = context._resources.copy() - props.update(mapping) - return templatekw.showrevslist("revision", revs, - **pycompat.strkwargs(props)) + return templatekw.showrevslist(context, mapping, "revision", revs) @templatefunc('rstdoc(text, style)') def rstdoc(context, mapping, args): @@ -1340,7 +1401,8 @@ if v is None: v = self._resources.get(key) if v is None: - raise error.Abort(_('template resource not available: %s') % key) + raise ResourceUnavailable(_('template resource not available: %s') + % key) return v def _load(self, t): @@ -1431,9 +1493,6 @@ aliases.extend(conf['templatealias'].items()) return cache, tmap, aliases -class TemplateNotFound(error.Abort): - pass - class templater(object): def __init__(self, filters=None, defaults=None, resources=None, @@ -1493,8 +1552,9 @@ raise TemplateNotFound(_('"%s" not in template map') % inst.args[0]) except IOError as inst: - raise IOError(inst.args[0], _('template file %s: %s') % - (self.map[t][1], inst.args[1])) + reason = (_('template file %s: %s') + % (self.map[t][1], util.forcebytestr(inst.args[1]))) + raise IOError(inst.args[0], encoding.strfromlocal(reason)) return self.cache[t] def render(self, mapping): @@ -1546,16 +1606,16 @@ if paths is None: paths = templatepaths() - elif isinstance(paths, str): + elif isinstance(paths, bytes): paths = [paths] - if isinstance(styles, str): + if isinstance(styles, bytes): styles = [styles] for style in styles: # only plain name is allowed to honor template paths if (not style - or style in (os.curdir, os.pardir) + or style in (pycompat.oscurdir, pycompat.ospardir) or pycompat.ossep in style or pycompat.osaltsep and pycompat.osaltsep in style): continue
--- a/mercurial/templates/gitweb/changeset.tmpl Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/gitweb/changeset.tmpl Sun Mar 04 10:42:51 2018 -0500 @@ -44,7 +44,7 @@ <td>changeset {rev}</td> <td style="font-family:monospace"><a class="list" href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></td> </tr> -{if(obsolete, '<tr><td>obsolete</td><td>{succsandmarkers%obsfateentry}</td></tr>')} +{if(obsolete, succsandmarkers%obsfateentry)} {ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)} {child%changesetchild} </table></div>
--- a/mercurial/templates/gitweb/map Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/gitweb/map Sun Mar 04 10:42:51 2018 -0500 @@ -275,7 +275,13 @@ obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}' obsfateverb = '{obsfateverb(successors, markers)}' obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}' -obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}' +obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}' +obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}' +obsfateentry = ' + <tr> + <td>obsolete</td> + <td>{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</td> + </tr>' shortlogentry = ' <tr class="parity{parity}"> <td class="age"><i class="age">{date|rfc822date}</i></td>
--- a/mercurial/templates/monoblue/changeset.tmpl Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/monoblue/changeset.tmpl Sun Mar 04 10:42:51 2018 -0500 @@ -48,7 +48,7 @@ {branch%changesetbranch} <dt>changeset {rev}</dt> <dd><a href="{url|urlescape}rev/{node|short}{sessionvars%urlparameter}">{node|short}</a></dd> - {if(obsolete, '<dt>obsolete</dt><dd>{succsandmarkers%obsfateentry}</dd>')} + {if(obsolete, succsandmarkers%obsfateentry)} {ifeq(count(parent), '2', parent%changesetparentdiff, parent%changesetparent)} {child%changesetchild} </dl>
--- a/mercurial/templates/monoblue/map Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/monoblue/map Sun Mar 04 10:42:51 2018 -0500 @@ -233,7 +233,11 @@ obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}' obsfateverb = '{obsfateverb(successors, markers)}' obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}' -obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}' +obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}' +obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}' +obsfateentry = ' + <dt>obsolete</dt> + <dd>{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</dd>' shortlogentry = ' <tr class="parity{parity}"> <td class="nowrap age">{date|rfc822date}</td>
--- a/mercurial/templates/paper/changeset.tmpl Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/paper/changeset.tmpl Sun Mar 04 10:42:51 2018 -0500 @@ -51,7 +51,7 @@ </tr> {if(obsolete, '<tr> <th>obsolete</th> - <td>{succsandmarkers%obsfateentry}</td> + <td>{join(succsandmarkers%obsfateentry, '<br>\n')}</td> </tr>')} <tr> <th class="author">parents</th>
--- a/mercurial/templates/paper/map Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/paper/map Sun Mar 04 10:42:51 2018 -0500 @@ -213,7 +213,9 @@ obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}' obsfateverb = '{obsfateverb(successors, markers)}' obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}' -obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}' +obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}' +obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}' +obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}' filediffparent = ' <tr>
--- a/mercurial/templates/spartan/changelogentry.tmpl Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/spartan/changelogentry.tmpl Sun Mar 04 10:42:51 2018 -0500 @@ -22,10 +22,7 @@ <th class="phase">phase:</th> <td class="phase">{phase|escape}</td> </tr>')} - {if(obsolete, '<tr> - <th class="obsolete">obsolete:</th> - <td class="obsolete">{succsandmarkers%obsfateentry}</td> - </tr>')} + {if(obsolete, succsandmarkers%obsfateentry)} {ifeq(count(instabilities), '0', '', '<tr> <th class="instabilities">instabilities:</th> <td class="instabilities">{instabilities%"{instability} "|escape}</td>
--- a/mercurial/templates/spartan/changeset.tmpl Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/spartan/changeset.tmpl Sun Mar 04 10:42:51 2018 -0500 @@ -37,10 +37,7 @@ <th class="phase">phase:</th> <td class="phase">{phase|escape}</td> </tr>')} -{if(obsolete, '<tr> - <th class="obsolete">obsolete:</th> - <td class="obsolete">{succsandmarkers%obsfateentry}</td> -</tr>')} +{if(obsolete, succsandmarkers%obsfateentry)} {ifeq(count(instabilities), '0', '', '<tr> <th class="instabilities">instabilities:</th> <td class="instabilities">{instabilities%"{instability} "|escape}</td>
--- a/mercurial/templates/spartan/map Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/spartan/map Sun Mar 04 10:42:51 2018 -0500 @@ -170,7 +170,13 @@ obsfatesuccessors = '{if(successors, ' as ')}{successors%successorlink}' obsfateverb = '{obsfateverb(successors, markers)}' obsfateoperations = '{if(obsfateoperations(markers), ' using {join(obsfateoperations(markers), ', ')}')}' -obsfateentry = '{obsfateverb}{obsfateoperations}{obsfatesuccessors}' +obsfateusers = '{if(obsfateusers(markers), ' by {join(obsfateusers(markers)%'{user|obfuscate}', ', ')}')}' +obsfatedate = '{if(obsfatedate(markers), ' {ifeq(min(obsfatedate(markers)), max(obsfatedate(markers)), '<span class="age">{min(obsfatedate(markers))|rfc822date}</span>', 'between <span class="age">{min(obsfatedate(markers))|rfc822date}</span> and <span class="age">{max(obsfatedate(markers))|rfc822date}</span>')}')}' +obsfateentry = ' + <tr> + <th class="obsolete">obsolete:</th> + <td class="obsolete">{obsfateverb}{obsfateoperations}{obsfatesuccessors}{obsfateusers}{obsfatedate}</td> + </tr>' filediffparent = ' <tr> <th class="parent">parent {rev}:</th>
--- a/mercurial/templates/static/style-gitweb.css Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/templates/static/style-gitweb.css Sun Mar 04 10:42:51 2018 -0500 @@ -29,7 +29,7 @@ div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; } div.log_body { padding:8px 8px 8px 150px; } .age { white-space:nowrap; } -span.age { position:relative; float:left; width:142px; font-style:italic; } +a.title span.age { position:relative; float:left; width:142px; font-style:italic; } div.log_link { padding:0px 8px; font-size:10px; font-family:sans-serif; font-style:normal;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xdiff.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,150 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XDIFF_H) +#define XDIFF_H + +#ifdef __cplusplus +extern "C" { +#endif /* #ifdef __cplusplus */ + +#include <stddef.h> /* size_t */ + +/* xpparm_t.flags */ +#define XDF_NEED_MINIMAL (1 << 0) + +#define XDF_IGNORE_WHITESPACE (1 << 1) +#define XDF_IGNORE_WHITESPACE_CHANGE (1 << 2) +#define XDF_IGNORE_WHITESPACE_AT_EOL (1 << 3) +#define XDF_IGNORE_CR_AT_EOL (1 << 4) +#define XDF_WHITESPACE_FLAGS (XDF_IGNORE_WHITESPACE | \ + XDF_IGNORE_WHITESPACE_CHANGE | \ + XDF_IGNORE_WHITESPACE_AT_EOL | \ + XDF_IGNORE_CR_AT_EOL) + +#define XDF_IGNORE_BLANK_LINES (1 << 7) + +#define XDF_INDENT_HEURISTIC (1 << 23) + +/* xdemitconf_t.flags */ +#define XDL_EMIT_FUNCNAMES (1 << 0) +#define XDL_EMIT_FUNCCONTEXT (1 << 2) +/* emit bdiff-style "matched" (a1, a2, b1, b2) hunks instead of "different" + * (a1, a2 - a1, b1, b2 - b1) hunks */ +#define XDL_EMIT_BDIFFHUNK (1 << 4) + +#define XDL_MMB_READONLY (1 << 0) + +#define XDL_MMF_ATOMIC (1 << 0) + +#define XDL_BDOP_INS 1 +#define XDL_BDOP_CPY 2 +#define XDL_BDOP_INSB 3 + +/* merge simplification levels */ +#define XDL_MERGE_MINIMAL 0 +#define XDL_MERGE_EAGER 1 +#define XDL_MERGE_ZEALOUS 2 +#define XDL_MERGE_ZEALOUS_ALNUM 3 + +/* merge favor modes */ +#define XDL_MERGE_FAVOR_OURS 1 +#define XDL_MERGE_FAVOR_THEIRS 2 +#define XDL_MERGE_FAVOR_UNION 3 + +/* merge output styles */ +#define XDL_MERGE_DIFF3 1 + +typedef struct s_mmfile { + char *ptr; + long size; +} mmfile_t; + +typedef struct s_mmbuffer { + char *ptr; + long size; +} mmbuffer_t; + +typedef struct s_xpparam { + unsigned long flags; + + /* See Documentation/diff-options.txt. */ + char **anchors; + size_t anchors_nr; +} xpparam_t; + +typedef struct s_xdemitcb { + void *priv; + int (*outf)(void *, mmbuffer_t *, int); +} xdemitcb_t; + +typedef long (*find_func_t)(const char *line, long line_len, char *buffer, long buffer_size, void *priv); + +typedef int (*xdl_emit_hunk_consume_func_t)(long start_a, long count_a, + long start_b, long count_b, + void *cb_data); + +typedef struct s_xdemitconf { + long ctxlen; + long interhunkctxlen; + unsigned long flags; + find_func_t find_func; + void *find_func_priv; + xdl_emit_hunk_consume_func_t hunk_func; +} xdemitconf_t; + +typedef struct s_bdiffparam { + long bsize; +} bdiffparam_t; + + +#define xdl_malloc(x) malloc(x) +#define xdl_free(ptr) free(ptr) +#define xdl_realloc(ptr,x) realloc(ptr,x) + +void *xdl_mmfile_first(mmfile_t *mmf, long *size); +long xdl_mmfile_size(mmfile_t *mmf); + +int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdemitconf_t const *xecfg, xdemitcb_t *ecb); + +typedef struct s_xmparam { + xpparam_t xpp; + int marker_size; + int level; + int favor; + int style; + const char *ancestor; /* label for orig */ + const char *file1; /* label for mf1 */ + const char *file2; /* label for mf2 */ +} xmparam_t; + +#define DEFAULT_CONFLICT_MARKER_SIZE 7 + +int xdl_merge(mmfile_t *orig, mmfile_t *mf1, mmfile_t *mf2, + xmparam_t const *xmp, mmbuffer_t *result); + +#ifdef __cplusplus +} +#endif /* #ifdef __cplusplus */ + +#endif /* #if !defined(XDIFF_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xdiffi.c Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,1090 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#include "xinclude.h" + + + +#define XDL_MAX_COST_MIN 256 +#define XDL_HEUR_MIN_COST 256 +#define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1) +#define XDL_SNAKE_CNT 20 +#define XDL_K_HEUR 4 + + + +typedef struct s_xdpsplit { + long i1, i2; + int min_lo, min_hi; +} xdpsplit_t; + + + + +static long xdl_split(unsigned long const *ha1, long off1, long lim1, + unsigned long const *ha2, long off2, long lim2, + long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl, + xdalgoenv_t *xenv); +static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2); + + + + + +/* + * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers. + * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both + * the forward diagonal starting from (off1, off2) and the backward diagonal + * starting from (lim1, lim2). If the K values on the same diagonal crosses + * returns the furthest point of reach. We might end up having to expensive + * cases using this algorithm is full, so a little bit of heuristic is needed + * to cut the search and to return a suboptimal point. + */ +static long xdl_split(unsigned long const *ha1, long off1, long lim1, + unsigned long const *ha2, long off2, long lim2, + long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl, + xdalgoenv_t *xenv) { + long dmin = off1 - lim2, dmax = lim1 - off2; + long fmid = off1 - off2, bmid = lim1 - lim2; + long odd = (fmid - bmid) & 1; + long fmin = fmid, fmax = fmid; + long bmin = bmid, bmax = bmid; + long ec, d, i1, i2, prev1, best, dd, v, k; + + /* + * Set initial diagonal values for both forward and backward path. + */ + kvdf[fmid] = off1; + kvdb[bmid] = lim1; + + for (ec = 1;; ec++) { + int got_snake = 0; + + /* + * We need to extent the diagonal "domain" by one. If the next + * values exits the box boundaries we need to change it in the + * opposite direction because (max - min) must be a power of two. + * Also we initialize the external K value to -1 so that we can + * avoid extra conditions check inside the core loop. + */ + if (fmin > dmin) + kvdf[--fmin - 1] = -1; + else + ++fmin; + if (fmax < dmax) + kvdf[++fmax + 1] = -1; + else + --fmax; + + for (d = fmax; d >= fmin; d -= 2) { + if (kvdf[d - 1] >= kvdf[d + 1]) + i1 = kvdf[d - 1] + 1; + else + i1 = kvdf[d + 1]; + prev1 = i1; + i2 = i1 - d; + for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++); + if (i1 - prev1 > xenv->snake_cnt) + got_snake = 1; + kvdf[d] = i1; + if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) { + spl->i1 = i1; + spl->i2 = i2; + spl->min_lo = spl->min_hi = 1; + return ec; + } + } + + /* + * We need to extent the diagonal "domain" by one. If the next + * values exits the box boundaries we need to change it in the + * opposite direction because (max - min) must be a power of two. + * Also we initialize the external K value to -1 so that we can + * avoid extra conditions check inside the core loop. + */ + if (bmin > dmin) + kvdb[--bmin - 1] = XDL_LINE_MAX; + else + ++bmin; + if (bmax < dmax) + kvdb[++bmax + 1] = XDL_LINE_MAX; + else + --bmax; + + for (d = bmax; d >= bmin; d -= 2) { + if (kvdb[d - 1] < kvdb[d + 1]) + i1 = kvdb[d - 1]; + else + i1 = kvdb[d + 1] - 1; + prev1 = i1; + i2 = i1 - d; + for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--); + if (prev1 - i1 > xenv->snake_cnt) + got_snake = 1; + kvdb[d] = i1; + if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) { + spl->i1 = i1; + spl->i2 = i2; + spl->min_lo = spl->min_hi = 1; + return ec; + } + } + + if (need_min) + continue; + + /* + * If the edit cost is above the heuristic trigger and if + * we got a good snake, we sample current diagonals to see + * if some of the, have reached an "interesting" path. Our + * measure is a function of the distance from the diagonal + * corner (i1 + i2) penalized with the distance from the + * mid diagonal itself. If this value is above the current + * edit cost times a magic factor (XDL_K_HEUR) we consider + * it interesting. + */ + if (got_snake && ec > xenv->heur_min) { + for (best = 0, d = fmax; d >= fmin; d -= 2) { + dd = d > fmid ? d - fmid: fmid - d; + i1 = kvdf[d]; + i2 = i1 - d; + v = (i1 - off1) + (i2 - off2) - dd; + + if (v > XDL_K_HEUR * ec && v > best && + off1 + xenv->snake_cnt <= i1 && i1 < lim1 && + off2 + xenv->snake_cnt <= i2 && i2 < lim2) { + for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++) + if (k == xenv->snake_cnt) { + best = v; + spl->i1 = i1; + spl->i2 = i2; + break; + } + } + } + if (best > 0) { + spl->min_lo = 1; + spl->min_hi = 0; + return ec; + } + + for (best = 0, d = bmax; d >= bmin; d -= 2) { + dd = d > bmid ? d - bmid: bmid - d; + i1 = kvdb[d]; + i2 = i1 - d; + v = (lim1 - i1) + (lim2 - i2) - dd; + + if (v > XDL_K_HEUR * ec && v > best && + off1 < i1 && i1 <= lim1 - xenv->snake_cnt && + off2 < i2 && i2 <= lim2 - xenv->snake_cnt) { + for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++) + if (k == xenv->snake_cnt - 1) { + best = v; + spl->i1 = i1; + spl->i2 = i2; + break; + } + } + } + if (best > 0) { + spl->min_lo = 0; + spl->min_hi = 1; + return ec; + } + } + + /* + * Enough is enough. We spent too much time here and now we collect + * the furthest reaching path using the (i1 + i2) measure. + */ + if (ec >= xenv->mxcost) { + long fbest, fbest1, bbest, bbest1; + + fbest = fbest1 = -1; + for (d = fmax; d >= fmin; d -= 2) { + i1 = XDL_MIN(kvdf[d], lim1); + i2 = i1 - d; + if (lim2 < i2) + i1 = lim2 + d, i2 = lim2; + if (fbest < i1 + i2) { + fbest = i1 + i2; + fbest1 = i1; + } + } + + bbest = bbest1 = XDL_LINE_MAX; + for (d = bmax; d >= bmin; d -= 2) { + i1 = XDL_MAX(off1, kvdb[d]); + i2 = i1 - d; + if (i2 < off2) + i1 = off2 + d, i2 = off2; + if (i1 + i2 < bbest) { + bbest = i1 + i2; + bbest1 = i1; + } + } + + if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) { + spl->i1 = fbest1; + spl->i2 = fbest - fbest1; + spl->min_lo = 1; + spl->min_hi = 0; + } else { + spl->i1 = bbest1; + spl->i2 = bbest - bbest1; + spl->min_lo = 0; + spl->min_hi = 1; + } + return ec; + } + } +} + + +/* + * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling + * the box splitting function. Note that the real job (marking changed lines) + * is done in the two boundary reaching checks. + */ +int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1, + diffdata_t *dd2, long off2, long lim2, + long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) { + unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha; + + /* + * Shrink the box by walking through each diagonal snake (SW and NE). + */ + for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++); + for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--); + + /* + * If one dimension is empty, then all records on the other one must + * be obviously changed. + */ + if (off1 == lim1) { + char *rchg2 = dd2->rchg; + long *rindex2 = dd2->rindex; + + for (; off2 < lim2; off2++) + rchg2[rindex2[off2]] = 1; + } else if (off2 == lim2) { + char *rchg1 = dd1->rchg; + long *rindex1 = dd1->rindex; + + for (; off1 < lim1; off1++) + rchg1[rindex1[off1]] = 1; + } else { + xdpsplit_t spl; + spl.i1 = spl.i2 = 0; + + /* + * Divide ... + */ + if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb, + need_min, &spl, xenv) < 0) { + + return -1; + } + + /* + * ... et Impera. + */ + if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2, + kvdf, kvdb, spl.min_lo, xenv) < 0 || + xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2, + kvdf, kvdb, spl.min_hi, xenv) < 0) { + + return -1; + } + } + + return 0; +} + + +int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdfenv_t *xe) { + long ndiags; + long *kvd, *kvdf, *kvdb; + xdalgoenv_t xenv; + diffdata_t dd1, dd2; + + if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) { + + return -1; + } + + /* + * Allocate and setup K vectors to be used by the differential algorithm. + * One is to store the forward path and one to store the backward path. + */ + ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3; + if (!(kvd = (long *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) { + + xdl_free_env(xe); + return -1; + } + kvdf = kvd; + kvdb = kvdf + ndiags; + kvdf += xe->xdf2.nreff + 1; + kvdb += xe->xdf2.nreff + 1; + + xenv.mxcost = xdl_bogosqrt(ndiags); + if (xenv.mxcost < XDL_MAX_COST_MIN) + xenv.mxcost = XDL_MAX_COST_MIN; + xenv.snake_cnt = XDL_SNAKE_CNT; + xenv.heur_min = XDL_HEUR_MIN_COST; + + dd1.nrec = xe->xdf1.nreff; + dd1.ha = xe->xdf1.ha; + dd1.rchg = xe->xdf1.rchg; + dd1.rindex = xe->xdf1.rindex; + dd2.nrec = xe->xdf2.nreff; + dd2.ha = xe->xdf2.ha; + dd2.rchg = xe->xdf2.rchg; + dd2.rindex = xe->xdf2.rindex; + + if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec, + kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) { + + xdl_free(kvd); + xdl_free_env(xe); + return -1; + } + + xdl_free(kvd); + + return 0; +} + + +static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2) { + xdchange_t *xch; + + if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t)))) + return NULL; + + xch->next = xscr; + xch->i1 = i1; + xch->i2 = i2; + xch->chg1 = chg1; + xch->chg2 = chg2; + xch->ignore = 0; + + return xch; +} + + +static int recs_match(xrecord_t *rec1, xrecord_t *rec2, long flags) +{ + return (rec1->ha == rec2->ha && + xdl_recmatch(rec1->ptr, rec1->size, + rec2->ptr, rec2->size, + flags)); +} + +/* + * If a line is indented more than this, get_indent() just returns this value. + * This avoids having to do absurd amounts of work for data that are not + * human-readable text, and also ensures that the output of get_indent fits within + * an int. + */ +#define MAX_INDENT 200 + +/* + * Return the amount of indentation of the specified line, treating TAB as 8 + * columns. Return -1 if line is empty or contains only whitespace. Clamp the + * output value at MAX_INDENT. + */ +static int get_indent(xrecord_t *rec) +{ + long i; + int ret = 0; + + for (i = 0; i < rec->size; i++) { + char c = rec->ptr[i]; + + if (!XDL_ISSPACE(c)) + return ret; + else if (c == ' ') + ret += 1; + else if (c == '\t') + ret += 8 - ret % 8; + /* ignore other whitespace characters */ + + if (ret >= MAX_INDENT) + return MAX_INDENT; + } + + /* The line contains only whitespace. */ + return -1; +} + +/* + * If more than this number of consecutive blank rows are found, just return this + * value. This avoids requiring O(N^2) work for pathological cases, and also + * ensures that the output of score_split fits in an int. + */ +#define MAX_BLANKS 20 + +/* Characteristics measured about a hypothetical split position. */ +struct split_measurement { + /* + * Is the split at the end of the file (aside from any blank lines)? + */ + int end_of_file; + + /* + * How much is the line immediately following the split indented (or -1 if + * the line is blank): + */ + int indent; + + /* + * How many consecutive lines above the split are blank? + */ + int pre_blank; + + /* + * How much is the nearest non-blank line above the split indented (or -1 + * if there is no such line)? + */ + int pre_indent; + + /* + * How many lines after the line following the split are blank? + */ + int post_blank; + + /* + * How much is the nearest non-blank line after the line following the + * split indented (or -1 if there is no such line)? + */ + int post_indent; +}; + +struct split_score { + /* The effective indent of this split (smaller is preferred). */ + int effective_indent; + + /* Penalty for this split (smaller is preferred). */ + int penalty; +}; + +/* + * Fill m with information about a hypothetical split of xdf above line split. + */ +static void measure_split(const xdfile_t *xdf, long split, + struct split_measurement *m) +{ + long i; + + if (split >= xdf->nrec) { + m->end_of_file = 1; + m->indent = -1; + } else { + m->end_of_file = 0; + m->indent = get_indent(xdf->recs[split]); + } + + m->pre_blank = 0; + m->pre_indent = -1; + for (i = split - 1; i >= 0; i--) { + m->pre_indent = get_indent(xdf->recs[i]); + if (m->pre_indent != -1) + break; + m->pre_blank += 1; + if (m->pre_blank == MAX_BLANKS) { + m->pre_indent = 0; + break; + } + } + + m->post_blank = 0; + m->post_indent = -1; + for (i = split + 1; i < xdf->nrec; i++) { + m->post_indent = get_indent(xdf->recs[i]); + if (m->post_indent != -1) + break; + m->post_blank += 1; + if (m->post_blank == MAX_BLANKS) { + m->post_indent = 0; + break; + } + } +} + +/* + * The empirically-determined weight factors used by score_split() below. + * Larger values means that the position is a less favorable place to split. + * + * Note that scores are only ever compared against each other, so multiplying + * all of these weight/penalty values by the same factor wouldn't change the + * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*. + * In practice, these numbers are chosen to be large enough that they can be + * adjusted relative to each other with sufficient precision despite using + * integer math. + */ + +/* Penalty if there are no non-blank lines before the split */ +#define START_OF_FILE_PENALTY 1 + +/* Penalty if there are no non-blank lines after the split */ +#define END_OF_FILE_PENALTY 21 + +/* Multiplier for the number of blank lines around the split */ +#define TOTAL_BLANK_WEIGHT (-30) + +/* Multiplier for the number of blank lines after the split */ +#define POST_BLANK_WEIGHT 6 + +/* + * Penalties applied if the line is indented more than its predecessor + */ +#define RELATIVE_INDENT_PENALTY (-4) +#define RELATIVE_INDENT_WITH_BLANK_PENALTY 10 + +/* + * Penalties applied if the line is indented less than both its predecessor and + * its successor + */ +#define RELATIVE_OUTDENT_PENALTY 24 +#define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17 + +/* + * Penalties applied if the line is indented less than its predecessor but not + * less than its successor + */ +#define RELATIVE_DEDENT_PENALTY 23 +#define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17 + +/* + * We only consider whether the sum of the effective indents for splits are + * less than (-1), equal to (0), or greater than (+1) each other. The resulting + * value is multiplied by the following weight and combined with the penalty to + * determine the better of two scores. + */ +#define INDENT_WEIGHT 60 + +/* + * Compute a badness score for the hypothetical split whose measurements are + * stored in m. The weight factors were determined empirically using the tools and + * corpus described in + * + * https://github.com/mhagger/diff-slider-tools + * + * Also see that project if you want to improve the weights based on, for example, + * a larger or more diverse corpus. + */ +static void score_add_split(const struct split_measurement *m, struct split_score *s) +{ + /* + * A place to accumulate penalty factors (positive makes this index more + * favored): + */ + int post_blank, total_blank, indent, any_blanks; + + if (m->pre_indent == -1 && m->pre_blank == 0) + s->penalty += START_OF_FILE_PENALTY; + + if (m->end_of_file) + s->penalty += END_OF_FILE_PENALTY; + + /* + * Set post_blank to the number of blank lines following the split, + * including the line immediately after the split: + */ + post_blank = (m->indent == -1) ? 1 + m->post_blank : 0; + total_blank = m->pre_blank + post_blank; + + /* Penalties based on nearby blank lines: */ + s->penalty += TOTAL_BLANK_WEIGHT * total_blank; + s->penalty += POST_BLANK_WEIGHT * post_blank; + + if (m->indent != -1) + indent = m->indent; + else + indent = m->post_indent; + + any_blanks = (total_blank != 0); + + /* Note that the effective indent is -1 at the end of the file: */ + s->effective_indent += indent; + + if (indent == -1) { + /* No additional adjustments needed. */ + } else if (m->pre_indent == -1) { + /* No additional adjustments needed. */ + } else if (indent > m->pre_indent) { + /* + * The line is indented more than its predecessor. + */ + s->penalty += any_blanks ? + RELATIVE_INDENT_WITH_BLANK_PENALTY : + RELATIVE_INDENT_PENALTY; + } else if (indent == m->pre_indent) { + /* + * The line has the same indentation level as its predecessor. + * No additional adjustments needed. + */ + } else { + /* + * The line is indented less than its predecessor. It could be + * the block terminator of the previous block, but it could + * also be the start of a new block (e.g., an "else" block, or + * maybe the previous block didn't have a block terminator). + * Try to distinguish those cases based on what comes next: + */ + if (m->post_indent != -1 && m->post_indent > indent) { + /* + * The following line is indented more. So it is likely + * that this line is the start of a block. + */ + s->penalty += any_blanks ? + RELATIVE_OUTDENT_WITH_BLANK_PENALTY : + RELATIVE_OUTDENT_PENALTY; + } else { + /* + * That was probably the end of a block. + */ + s->penalty += any_blanks ? + RELATIVE_DEDENT_WITH_BLANK_PENALTY : + RELATIVE_DEDENT_PENALTY; + } + } +} + +static int score_cmp(struct split_score *s1, struct split_score *s2) +{ + /* -1 if s1.effective_indent < s2->effective_indent, etc. */ + int cmp_indents = ((s1->effective_indent > s2->effective_indent) - + (s1->effective_indent < s2->effective_indent)); + + return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty); +} + +/* + * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group + * of lines that was inserted or deleted from the corresponding version of the + * file). We consider there to be such a group at the beginning of the file, at + * the end of the file, and between any two unchanged lines, though most such + * groups will usually be empty. + * + * If the first line in a group is equal to the line following the group, then + * the group can be slid down. Similarly, if the last line in a group is equal + * to the line preceding the group, then the group can be slid up. See + * group_slide_down() and group_slide_up(). + * + * Note that loops that are testing for changed lines in xdf->rchg do not need + * index bounding since the array is prepared with a zero at position -1 and N. + */ +struct xdlgroup { + /* + * The index of the first changed line in the group, or the index of + * the unchanged line above which the (empty) group is located. + */ + long start; + + /* + * The index of the first unchanged line after the group. For an empty + * group, end is equal to start. + */ + long end; +}; + +/* + * Initialize g to point at the first group in xdf. + */ +static void group_init(xdfile_t *xdf, struct xdlgroup *g) +{ + g->start = g->end = 0; + while (xdf->rchg[g->end]) + g->end++; +} + +/* + * Move g to describe the next (possibly empty) group in xdf and return 0. If g + * is already at the end of the file, do nothing and return -1. + */ +static inline int group_next(xdfile_t *xdf, struct xdlgroup *g) +{ + if (g->end == xdf->nrec) + return -1; + + g->start = g->end + 1; + for (g->end = g->start; xdf->rchg[g->end]; g->end++) + ; + + return 0; +} + +/* + * Move g to describe the previous (possibly empty) group in xdf and return 0. + * If g is already at the beginning of the file, do nothing and return -1. + */ +static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g) +{ + if (g->start == 0) + return -1; + + g->end = g->start - 1; + for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--) + ; + + return 0; +} + +/* + * If g can be slid toward the end of the file, do so, and if it bumps into a + * following group, expand this group to include it. Return 0 on success or -1 + * if g cannot be slid down. + */ +static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g, long flags) +{ + if (g->end < xdf->nrec && + recs_match(xdf->recs[g->start], xdf->recs[g->end], flags)) { + xdf->rchg[g->start++] = 0; + xdf->rchg[g->end++] = 1; + + while (xdf->rchg[g->end]) + g->end++; + + return 0; + } else { + return -1; + } +} + +/* + * If g can be slid toward the beginning of the file, do so, and if it bumps + * into a previous group, expand this group to include it. Return 0 on success + * or -1 if g cannot be slid up. + */ +static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g, long flags) +{ + if (g->start > 0 && + recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1], flags)) { + xdf->rchg[--g->start] = 1; + xdf->rchg[--g->end] = 0; + + while (xdf->rchg[g->start - 1]) + g->start--; + + return 0; + } else { + return -1; + } +} + +static void xdl_bug(const char *msg) +{ + fprintf(stderr, "BUG: %s\n", msg); + exit(1); +} + +/* + * For indentation heuristic, skip searching for better slide position after + * checking MAX_BORING lines without finding an improvement. This defends the + * indentation heuristic logic against pathological cases. The value is not + * picked scientifically but should be good enough. + */ +#define MAX_BORING 100 + +/* + * Move back and forward change groups for a consistent and pretty diff output. + * This also helps in finding joinable change groups and reducing the diff + * size. + */ +int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) { + struct xdlgroup g, go; + long earliest_end, end_matching_other; + long groupsize; + + group_init(xdf, &g); + group_init(xdfo, &go); + + while (1) { + /* If the group is empty in the to-be-compacted file, skip it: */ + if (g.end == g.start) + goto next; + + /* + * Now shift the change up and then down as far as possible in + * each direction. If it bumps into any other changes, merge them. + */ + do { + groupsize = g.end - g.start; + + /* + * Keep track of the last "end" index that causes this + * group to align with a group of changed lines in the + * other file. -1 indicates that we haven't found such + * a match yet: + */ + end_matching_other = -1; + + /* Shift the group backward as much as possible: */ + while (!group_slide_up(xdf, &g, flags)) + if (group_previous(xdfo, &go)) + xdl_bug("group sync broken sliding up"); + + /* + * This is this highest that this group can be shifted. + * Record its end index: + */ + earliest_end = g.end; + + if (go.end > go.start) + end_matching_other = g.end; + + /* Now shift the group forward as far as possible: */ + while (1) { + if (group_slide_down(xdf, &g, flags)) + break; + if (group_next(xdfo, &go)) + xdl_bug("group sync broken sliding down"); + + if (go.end > go.start) + end_matching_other = g.end; + } + } while (groupsize != g.end - g.start); + + /* + * If the group can be shifted, then we can possibly use this + * freedom to produce a more intuitive diff. + * + * The group is currently shifted as far down as possible, so the + * heuristics below only have to handle upwards shifts. + */ + + if (g.end == earliest_end) { + /* no shifting was possible */ + } else if (end_matching_other != -1) { + /* + * Move the possibly merged group of changes back to line + * up with the last group of changes from the other file + * that it can align with. + */ + while (go.end == go.start) { + if (group_slide_up(xdf, &g, flags)) + xdl_bug("match disappeared"); + if (group_previous(xdfo, &go)) + xdl_bug("group sync broken sliding to match"); + } + } else if (flags & XDF_INDENT_HEURISTIC) { + /* + * Indent heuristic: a group of pure add/delete lines + * implies two splits, one between the end of the "before" + * context and the start of the group, and another between + * the end of the group and the beginning of the "after" + * context. Some splits are aesthetically better and some + * are worse. We compute a badness "score" for each split, + * and add the scores for the two splits to define a + * "score" for each position that the group can be shifted + * to. Then we pick the shift with the lowest score. + */ + long shift, best_shift = -1; + struct split_score best_score; + + /* + * This is O(N * MAX_BLANKS) (N = shift-able lines). + * Even with MAX_BLANKS bounded to a small value, a + * large N could still make this loop take several + * times longer than the main diff algorithm. The + * "boring" value is to help cut down N to something + * like (MAX_BORING + groupsize). + * + * Scan from bottom to top. So we can exit the loop + * without compromising the assumption "for a same best + * score, pick the bottommost shift". + */ + int boring = 0; + for (shift = g.end; shift >= earliest_end; shift--) { + struct split_measurement m; + struct split_score score = {0, 0}; + int cmp; + + measure_split(xdf, shift, &m); + score_add_split(&m, &score); + measure_split(xdf, shift - groupsize, &m); + score_add_split(&m, &score); + + if (best_shift == -1) { + cmp = -1; + } else { + cmp = score_cmp(&score, &best_score); + } + if (cmp < 0) { + boring = 0; + best_score.effective_indent = score.effective_indent; + best_score.penalty = score.penalty; + best_shift = shift; + } else { + boring += 1; + if (boring >= MAX_BORING) + break; + } + } + + while (g.end > best_shift) { + if (group_slide_up(xdf, &g, flags)) + xdl_bug("best shift unreached"); + if (group_previous(xdfo, &go)) + xdl_bug("group sync broken sliding to blank line"); + } + } + + next: + /* Move past the just-processed group: */ + if (group_next(xdf, &g)) + break; + if (group_next(xdfo, &go)) + xdl_bug("group sync broken moving to next group"); + } + + if (!group_next(xdfo, &go)) + xdl_bug("group sync broken at end of file"); + + return 0; +} + + +int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) { + xdchange_t *cscr = NULL, *xch; + char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg; + long i1, i2, l1, l2; + + /* + * Trivial. Collects "groups" of changes and creates an edit script. + */ + for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--) + if (rchg1[i1 - 1] || rchg2[i2 - 1]) { + for (l1 = i1; rchg1[i1 - 1]; i1--); + for (l2 = i2; rchg2[i2 - 1]; i2--); + + if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) { + xdl_free_script(cscr); + return -1; + } + cscr = xch; + } + + *xscr = cscr; + + return 0; +} + + +void xdl_free_script(xdchange_t *xscr) { + xdchange_t *xch; + + while ((xch = xscr) != NULL) { + xscr = xscr->next; + xdl_free(xch); + } +} + +static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, + xdemitconf_t const *xecfg) +{ + xdchange_t *xch, *xche; + if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) { + long i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec; + for (xch = xscr; xch; xch = xche->next) { + xche = xdl_get_hunk(&xch, xecfg); + if (!xch) + break; + if (xch->i1 > i1 || xch->i2 > i2) { + if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0) + return -1; + } + i1 = xche->i1 + xche->chg1; + i2 = xche->i2 + xche->chg2; + } + if (xecfg->hunk_func(i1, n1, i2, n2, ecb->priv) < 0) + return -1; + } else { + for (xch = xscr; xch; xch = xche->next) { + xche = xdl_get_hunk(&xch, xecfg); + if (!xch) + break; + if (xecfg->hunk_func( + xch->i1, xche->i1 + xche->chg1 - xch->i1, + xch->i2, xche->i2 + xche->chg2 - xch->i2, + ecb->priv) < 0) + return -1; + } + } + return 0; +} + +static void xdl_mark_ignorable(xdchange_t *xscr, xdfenv_t *xe, long flags) +{ + xdchange_t *xch; + + for (xch = xscr; xch; xch = xch->next) { + int ignore = 1; + xrecord_t **rec; + long i; + + rec = &xe->xdf1.recs[xch->i1]; + for (i = 0; i < xch->chg1 && ignore; i++) + ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags); + + rec = &xe->xdf2.recs[xch->i2]; + for (i = 0; i < xch->chg2 && ignore; i++) + ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags); + + xch->ignore = ignore; + } +} + +int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdemitconf_t const *xecfg, xdemitcb_t *ecb) { + xdchange_t *xscr; + xdfenv_t xe; + emit_func_t ef = xecfg->hunk_func ? xdl_call_hunk_func : xdl_emit_diff; + + if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) { + + return -1; + } + if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 || + xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 || + xdl_build_script(&xe, &xscr) < 0) { + + xdl_free_env(&xe); + return -1; + } + + if (xpp->flags & XDF_IGNORE_BLANK_LINES) + xdl_mark_ignorable(xscr, &xe, xpp->flags); + if (ef(&xe, xscr, ecb, xecfg) < 0) { + xdl_free_script(xscr); + xdl_free_env(&xe); + return -1; + } + xdl_free_script(xscr); + xdl_free_env(&xe); + + return 0; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xdiffi.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,64 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XDIFFI_H) +#define XDIFFI_H + + +typedef struct s_diffdata { + long nrec; + unsigned long const *ha; + long *rindex; + char *rchg; +} diffdata_t; + +typedef struct s_xdalgoenv { + long mxcost; + long snake_cnt; + long heur_min; +} xdalgoenv_t; + +typedef struct s_xdchange { + struct s_xdchange *next; + long i1, i2; + long chg1, chg2; + int ignore; +} xdchange_t; + + + +int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1, + diffdata_t *dd2, long off2, long lim2, + long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv); +int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdfenv_t *xe); +int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags); +int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr); +void xdl_free_script(xdchange_t *xscr); +int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, + xdemitconf_t const *xecfg); +int xdl_do_patience_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdfenv_t *env); +int xdl_do_histogram_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdfenv_t *env); + +#endif /* #if !defined(XDIFFI_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xemit.c Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,312 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#include "xinclude.h" + +static long xdl_get_rec(xdfile_t *xdf, long ri, char const **rec) { + + *rec = xdf->recs[ri]->ptr; + + return xdf->recs[ri]->size; +} + + +static int xdl_emit_record(xdfile_t *xdf, long ri, char const *pre, xdemitcb_t *ecb) { + long size, psize = strlen(pre); + char const *rec; + + size = xdl_get_rec(xdf, ri, &rec); + if (xdl_emit_diffrec(rec, size, pre, psize, ecb) < 0) { + + return -1; + } + + return 0; +} + + +/* + * Starting at the passed change atom, find the latest change atom to be included + * inside the differential hunk according to the specified configuration. + * Also advance xscr if the first changes must be discarded. + */ +xdchange_t *xdl_get_hunk(xdchange_t **xscr, xdemitconf_t const *xecfg) +{ + xdchange_t *xch, *xchp, *lxch; + long max_common = 2 * xecfg->ctxlen + xecfg->interhunkctxlen; + long max_ignorable = xecfg->ctxlen; + unsigned long ignored = 0; /* number of ignored blank lines */ + + /* remove ignorable changes that are too far before other changes */ + for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) { + xch = xchp->next; + + if (xch == NULL || + xch->i1 - (xchp->i1 + xchp->chg1) >= max_ignorable) + *xscr = xch; + } + + if (*xscr == NULL) + return NULL; + + lxch = *xscr; + + for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) { + long distance = xch->i1 - (xchp->i1 + xchp->chg1); + if (distance > max_common) + break; + + if (distance < max_ignorable && (!xch->ignore || lxch == xchp)) { + lxch = xch; + ignored = 0; + } else if (distance < max_ignorable && xch->ignore) { + ignored += xch->chg2; + } else if (lxch != xchp && + xch->i1 + ignored - (lxch->i1 + lxch->chg1) > max_common) { + break; + } else if (!xch->ignore) { + lxch = xch; + ignored = 0; + } else { + ignored += xch->chg2; + } + } + + return lxch; +} + + +static long def_ff(const char *rec, long len, char *buf, long sz, void *priv) +{ + if (len > 0 && + (isalpha((unsigned char)*rec) || /* identifier? */ + *rec == '_' || /* also identifier? */ + *rec == '$')) { /* identifiers from VMS and other esoterico */ + if (len > sz) + len = sz; + while (0 < len && isspace((unsigned char)rec[len - 1])) + len--; + memcpy(buf, rec, len); + return len; + } + return -1; +} + +static long match_func_rec(xdfile_t *xdf, xdemitconf_t const *xecfg, long ri, + char *buf, long sz) +{ + const char *rec; + long len = xdl_get_rec(xdf, ri, &rec); + if (!xecfg->find_func) + return def_ff(rec, len, buf, sz, xecfg->find_func_priv); + return xecfg->find_func(rec, len, buf, sz, xecfg->find_func_priv); +} + +static int is_func_rec(xdfile_t *xdf, xdemitconf_t const *xecfg, long ri) +{ + char dummy[1]; + return match_func_rec(xdf, xecfg, ri, dummy, sizeof(dummy)) >= 0; +} + +struct func_line { + long len; + char buf[80]; +}; + +static long get_func_line(xdfenv_t *xe, xdemitconf_t const *xecfg, + struct func_line *func_line, long start, long limit) +{ + long l, size, step = (start > limit) ? -1 : 1; + char *buf, dummy[1]; + + buf = func_line ? func_line->buf : dummy; + size = func_line ? sizeof(func_line->buf) : sizeof(dummy); + + for (l = start; l != limit && 0 <= l && l < xe->xdf1.nrec; l += step) { + long len = match_func_rec(&xe->xdf1, xecfg, l, buf, size); + if (len >= 0) { + if (func_line) + func_line->len = len; + return l; + } + } + return -1; +} + +static int is_empty_rec(xdfile_t *xdf, long ri) +{ + const char *rec; + long len = xdl_get_rec(xdf, ri, &rec); + + while (len > 0 && XDL_ISSPACE(*rec)) { + rec++; + len--; + } + return !len; +} + +int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, + xdemitconf_t const *xecfg) { + long s1, s2, e1, e2, lctx; + xdchange_t *xch, *xche; + long funclineprev = -1; + struct func_line func_line = { 0 }; + + for (xch = xscr; xch; xch = xche->next) { + xche = xdl_get_hunk(&xch, xecfg); + if (!xch) + break; + + s1 = XDL_MAX(xch->i1 - xecfg->ctxlen, 0); + s2 = XDL_MAX(xch->i2 - xecfg->ctxlen, 0); + + if (xecfg->flags & XDL_EMIT_FUNCCONTEXT) { + long fs1, i1 = xch->i1; + + /* Appended chunk? */ + if (i1 >= xe->xdf1.nrec) { + long i2 = xch->i2; + + /* + * We don't need additional context if + * a whole function was added. + */ + while (i2 < xe->xdf2.nrec) { + if (is_func_rec(&xe->xdf2, xecfg, i2)) + goto post_context_calculation; + i2++; + } + + /* + * Otherwise get more context from the + * pre-image. + */ + i1 = xe->xdf1.nrec - 1; + } + + fs1 = get_func_line(xe, xecfg, NULL, i1, -1); + while (fs1 > 0 && !is_empty_rec(&xe->xdf1, fs1 - 1) && + !is_func_rec(&xe->xdf1, xecfg, fs1 - 1)) + fs1--; + if (fs1 < 0) + fs1 = 0; + if (fs1 < s1) { + s2 -= s1 - fs1; + s1 = fs1; + } + } + + post_context_calculation: + lctx = xecfg->ctxlen; + lctx = XDL_MIN(lctx, xe->xdf1.nrec - (xche->i1 + xche->chg1)); + lctx = XDL_MIN(lctx, xe->xdf2.nrec - (xche->i2 + xche->chg2)); + + e1 = xche->i1 + xche->chg1 + lctx; + e2 = xche->i2 + xche->chg2 + lctx; + + if (xecfg->flags & XDL_EMIT_FUNCCONTEXT) { + long fe1 = get_func_line(xe, xecfg, NULL, + xche->i1 + xche->chg1, + xe->xdf1.nrec); + while (fe1 > 0 && is_empty_rec(&xe->xdf1, fe1 - 1)) + fe1--; + if (fe1 < 0) + fe1 = xe->xdf1.nrec; + if (fe1 > e1) { + e2 += fe1 - e1; + e1 = fe1; + } + + /* + * Overlap with next change? Then include it + * in the current hunk and start over to find + * its new end. + */ + if (xche->next) { + long l = XDL_MIN(xche->next->i1, + xe->xdf1.nrec - 1); + if (l - xecfg->ctxlen <= e1 || + get_func_line(xe, xecfg, NULL, l, e1) < 0) { + xche = xche->next; + goto post_context_calculation; + } + } + } + + /* + * Emit current hunk header. + */ + + if (xecfg->flags & XDL_EMIT_FUNCNAMES) { + get_func_line(xe, xecfg, &func_line, + s1 - 1, funclineprev); + funclineprev = s1 - 1; + } + if (xdl_emit_hunk_hdr(s1 + 1, e1 - s1, s2 + 1, e2 - s2, + func_line.buf, func_line.len, ecb) < 0) + return -1; + + /* + * Emit pre-context. + */ + for (; s2 < xch->i2; s2++) + if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0) + return -1; + + for (s1 = xch->i1, s2 = xch->i2;; xch = xch->next) { + /* + * Merge previous with current change atom. + */ + for (; s1 < xch->i1 && s2 < xch->i2; s1++, s2++) + if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0) + return -1; + + /* + * Removes lines from the first file. + */ + for (s1 = xch->i1; s1 < xch->i1 + xch->chg1; s1++) + if (xdl_emit_record(&xe->xdf1, s1, "-", ecb) < 0) + return -1; + + /* + * Adds lines from the second file. + */ + for (s2 = xch->i2; s2 < xch->i2 + xch->chg2; s2++) + if (xdl_emit_record(&xe->xdf2, s2, "+", ecb) < 0) + return -1; + + if (xch == xche) + break; + s1 = xch->i1 + xch->chg1; + s2 = xch->i2 + xch->chg2; + } + + /* + * Emit post-context. + */ + for (s2 = xche->i2 + xche->chg2; s2 < e2; s2++) + if (xdl_emit_record(&xe->xdf2, s2, " ", ecb) < 0) + return -1; + } + + return 0; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xemit.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,36 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XEMIT_H) +#define XEMIT_H + + +typedef int (*emit_func_t)(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, + xdemitconf_t const *xecfg); + +xdchange_t *xdl_get_hunk(xdchange_t **xscr, xdemitconf_t const *xecfg); +int xdl_emit_diff(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, + xdemitconf_t const *xecfg); + + + +#endif /* #if !defined(XEMIT_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xinclude.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,42 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XINCLUDE_H) +#define XINCLUDE_H + +#include <ctype.h> +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> +#include <string.h> +#include <limits.h> + +#include "xmacros.h" +#include "xdiff.h" +#include "xtypes.h" +#include "xutils.h" +#include "xprepare.h" +#include "xdiffi.h" +#include "xemit.h" + + +#endif /* #if !defined(XINCLUDE_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xmacros.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,54 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XMACROS_H) +#define XMACROS_H + + + + +#define XDL_MIN(a, b) ((a) < (b) ? (a): (b)) +#define XDL_MAX(a, b) ((a) > (b) ? (a): (b)) +#define XDL_ABS(v) ((v) >= 0 ? (v): -(v)) +#define XDL_ISDIGIT(c) ((c) >= '0' && (c) <= '9') +#define XDL_ISSPACE(c) (isspace((unsigned char)(c))) +#define XDL_ADDBITS(v,b) ((v) + ((v) >> (b))) +#define XDL_MASKBITS(b) ((1UL << (b)) - 1) +#define XDL_HASHLONG(v,b) (XDL_ADDBITS((unsigned long)(v), b) & XDL_MASKBITS(b)) +#define XDL_PTRFREE(p) do { if (p) { xdl_free(p); (p) = NULL; } } while (0) +#define XDL_LE32_PUT(p, v) \ +do { \ + unsigned char *__p = (unsigned char *) (p); \ + *__p++ = (unsigned char) (v); \ + *__p++ = (unsigned char) ((v) >> 8); \ + *__p++ = (unsigned char) ((v) >> 16); \ + *__p = (unsigned char) ((v) >> 24); \ +} while (0) +#define XDL_LE32_GET(p, v) \ +do { \ + unsigned char const *__p = (unsigned char const *) (p); \ + (v) = (unsigned long) __p[0] | ((unsigned long) __p[1]) << 8 | \ + ((unsigned long) __p[2]) << 16 | ((unsigned long) __p[3]) << 24; \ +} while (0) + + +#endif /* #if !defined(XMACROS_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xmerge.c Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,686 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003-2006 Davide Libenzi, Johannes E. Schindelin + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#include "xinclude.h" + +typedef struct s_xdmerge { + struct s_xdmerge *next; + /* + * 0 = conflict, + * 1 = no conflict, take first, + * 2 = no conflict, take second. + * 3 = no conflict, take both. + */ + int mode; + /* + * These point at the respective postimages. E.g. <i1,chg1> is + * how side #1 wants to change the common ancestor; if there is no + * overlap, lines before i1 in the postimage of side #1 appear + * in the merge result as a region touched by neither side. + */ + long i1, i2; + long chg1, chg2; + /* + * These point at the preimage; of course there is just one + * preimage, that is from the shared common ancestor. + */ + long i0; + long chg0; +} xdmerge_t; + +static int xdl_append_merge(xdmerge_t **merge, int mode, + long i0, long chg0, + long i1, long chg1, + long i2, long chg2) +{ + xdmerge_t *m = *merge; + if (m && (i1 <= m->i1 + m->chg1 || i2 <= m->i2 + m->chg2)) { + if (mode != m->mode) + m->mode = 0; + m->chg0 = i0 + chg0 - m->i0; + m->chg1 = i1 + chg1 - m->i1; + m->chg2 = i2 + chg2 - m->i2; + } else { + m = xdl_malloc(sizeof(xdmerge_t)); + if (!m) + return -1; + m->next = NULL; + m->mode = mode; + m->i0 = i0; + m->chg0 = chg0; + m->i1 = i1; + m->chg1 = chg1; + m->i2 = i2; + m->chg2 = chg2; + if (*merge) + (*merge)->next = m; + *merge = m; + } + return 0; +} + +static int xdl_cleanup_merge(xdmerge_t *c) +{ + int count = 0; + xdmerge_t *next_c; + + /* were there conflicts? */ + for (; c; c = next_c) { + if (c->mode == 0) + count++; + next_c = c->next; + free(c); + } + return count; +} + +static int xdl_merge_cmp_lines(xdfenv_t *xe1, int i1, xdfenv_t *xe2, int i2, + int line_count, long flags) +{ + int i; + xrecord_t **rec1 = xe1->xdf2.recs + i1; + xrecord_t **rec2 = xe2->xdf2.recs + i2; + + for (i = 0; i < line_count; i++) { + int result = xdl_recmatch(rec1[i]->ptr, rec1[i]->size, + rec2[i]->ptr, rec2[i]->size, flags); + if (!result) + return -1; + } + return 0; +} + +static int xdl_recs_copy_0(int use_orig, xdfenv_t *xe, int i, int count, int needs_cr, int add_nl, char *dest) +{ + xrecord_t **recs; + int size = 0; + + recs = (use_orig ? xe->xdf1.recs : xe->xdf2.recs) + i; + + if (count < 1) + return 0; + + for (i = 0; i < count; size += recs[i++]->size) + if (dest) + memcpy(dest + size, recs[i]->ptr, recs[i]->size); + if (add_nl) { + i = recs[count - 1]->size; + if (i == 0 || recs[count - 1]->ptr[i - 1] != '\n') { + if (needs_cr) { + if (dest) + dest[size] = '\r'; + size++; + } + + if (dest) + dest[size] = '\n'; + size++; + } + } + return size; +} + +static int xdl_recs_copy(xdfenv_t *xe, int i, int count, int needs_cr, int add_nl, char *dest) +{ + return xdl_recs_copy_0(0, xe, i, count, needs_cr, add_nl, dest); +} + +static int xdl_orig_copy(xdfenv_t *xe, int i, int count, int needs_cr, int add_nl, char *dest) +{ + return xdl_recs_copy_0(1, xe, i, count, needs_cr, add_nl, dest); +} + +/* + * Returns 1 if the i'th line ends in CR/LF (if it is the last line and + * has no eol, the preceding line, if any), 0 if it ends in LF-only, and + * -1 if the line ending cannot be determined. + */ +static int is_eol_crlf(xdfile_t *file, int i) +{ + long size; + + if (i < file->nrec - 1) + /* All lines before the last *must* end in LF */ + return (size = file->recs[i]->size) > 1 && + file->recs[i]->ptr[size - 2] == '\r'; + if (!file->nrec) + /* Cannot determine eol style from empty file */ + return -1; + if ((size = file->recs[i]->size) && + file->recs[i]->ptr[size - 1] == '\n') + /* Last line; ends in LF; Is it CR/LF? */ + return size > 1 && + file->recs[i]->ptr[size - 2] == '\r'; + if (!i) + /* The only line has no eol */ + return -1; + /* Determine eol from second-to-last line */ + return (size = file->recs[i - 1]->size) > 1 && + file->recs[i - 1]->ptr[size - 2] == '\r'; +} + +static int is_cr_needed(xdfenv_t *xe1, xdfenv_t *xe2, xdmerge_t *m) +{ + int needs_cr; + + /* Match post-images' preceding, or first, lines' end-of-line style */ + needs_cr = is_eol_crlf(&xe1->xdf2, m->i1 ? m->i1 - 1 : 0); + if (needs_cr) + needs_cr = is_eol_crlf(&xe2->xdf2, m->i2 ? m->i2 - 1 : 0); + /* Look at pre-image's first line, unless we already settled on LF */ + if (needs_cr) + needs_cr = is_eol_crlf(&xe1->xdf1, 0); + /* If still undecided, use LF-only */ + return needs_cr < 0 ? 0 : needs_cr; +} + +static int fill_conflict_hunk(xdfenv_t *xe1, const char *name1, + xdfenv_t *xe2, const char *name2, + const char *name3, + int size, int i, int style, + xdmerge_t *m, char *dest, int marker_size) +{ + int marker1_size = (name1 ? strlen(name1) + 1 : 0); + int marker2_size = (name2 ? strlen(name2) + 1 : 0); + int marker3_size = (name3 ? strlen(name3) + 1 : 0); + int needs_cr = is_cr_needed(xe1, xe2, m); + + if (marker_size <= 0) + marker_size = DEFAULT_CONFLICT_MARKER_SIZE; + + /* Before conflicting part */ + size += xdl_recs_copy(xe1, i, m->i1 - i, 0, 0, + dest ? dest + size : NULL); + + if (!dest) { + size += marker_size + 1 + needs_cr + marker1_size; + } else { + memset(dest + size, '<', marker_size); + size += marker_size; + if (marker1_size) { + dest[size] = ' '; + memcpy(dest + size + 1, name1, marker1_size - 1); + size += marker1_size; + } + if (needs_cr) + dest[size++] = '\r'; + dest[size++] = '\n'; + } + + /* Postimage from side #1 */ + size += xdl_recs_copy(xe1, m->i1, m->chg1, needs_cr, 1, + dest ? dest + size : NULL); + + if (style == XDL_MERGE_DIFF3) { + /* Shared preimage */ + if (!dest) { + size += marker_size + 1 + needs_cr + marker3_size; + } else { + memset(dest + size, '|', marker_size); + size += marker_size; + if (marker3_size) { + dest[size] = ' '; + memcpy(dest + size + 1, name3, marker3_size - 1); + size += marker3_size; + } + if (needs_cr) + dest[size++] = '\r'; + dest[size++] = '\n'; + } + size += xdl_orig_copy(xe1, m->i0, m->chg0, needs_cr, 1, + dest ? dest + size : NULL); + } + + if (!dest) { + size += marker_size + 1 + needs_cr; + } else { + memset(dest + size, '=', marker_size); + size += marker_size; + if (needs_cr) + dest[size++] = '\r'; + dest[size++] = '\n'; + } + + /* Postimage from side #2 */ + size += xdl_recs_copy(xe2, m->i2, m->chg2, needs_cr, 1, + dest ? dest + size : NULL); + if (!dest) { + size += marker_size + 1 + needs_cr + marker2_size; + } else { + memset(dest + size, '>', marker_size); + size += marker_size; + if (marker2_size) { + dest[size] = ' '; + memcpy(dest + size + 1, name2, marker2_size - 1); + size += marker2_size; + } + if (needs_cr) + dest[size++] = '\r'; + dest[size++] = '\n'; + } + return size; +} + +static int xdl_fill_merge_buffer(xdfenv_t *xe1, const char *name1, + xdfenv_t *xe2, const char *name2, + const char *ancestor_name, + int favor, + xdmerge_t *m, char *dest, int style, + int marker_size) +{ + int size, i; + + for (size = i = 0; m; m = m->next) { + if (favor && !m->mode) + m->mode = favor; + + if (m->mode == 0) + size = fill_conflict_hunk(xe1, name1, xe2, name2, + ancestor_name, + size, i, style, m, dest, + marker_size); + else if (m->mode & 3) { + /* Before conflicting part */ + size += xdl_recs_copy(xe1, i, m->i1 - i, 0, 0, + dest ? dest + size : NULL); + /* Postimage from side #1 */ + if (m->mode & 1) { + int needs_cr = is_cr_needed(xe1, xe2, m); + + size += xdl_recs_copy(xe1, m->i1, m->chg1, needs_cr, (m->mode & 2), + dest ? dest + size : NULL); + } + /* Postimage from side #2 */ + if (m->mode & 2) + size += xdl_recs_copy(xe2, m->i2, m->chg2, 0, 0, + dest ? dest + size : NULL); + } else + continue; + i = m->i1 + m->chg1; + } + size += xdl_recs_copy(xe1, i, xe1->xdf2.nrec - i, 0, 0, + dest ? dest + size : NULL); + return size; +} + +/* + * Sometimes, changes are not quite identical, but differ in only a few + * lines. Try hard to show only these few lines as conflicting. + */ +static int xdl_refine_conflicts(xdfenv_t *xe1, xdfenv_t *xe2, xdmerge_t *m, + xpparam_t const *xpp) +{ + for (; m; m = m->next) { + mmfile_t t1, t2; + xdfenv_t xe; + xdchange_t *xscr, *x; + int i1 = m->i1, i2 = m->i2; + + /* let's handle just the conflicts */ + if (m->mode) + continue; + + /* no sense refining a conflict when one side is empty */ + if (m->chg1 == 0 || m->chg2 == 0) + continue; + + /* + * This probably does not work outside git, since + * we have a very simple mmfile structure. + */ + t1.ptr = (char *)xe1->xdf2.recs[m->i1]->ptr; + t1.size = xe1->xdf2.recs[m->i1 + m->chg1 - 1]->ptr + + xe1->xdf2.recs[m->i1 + m->chg1 - 1]->size - t1.ptr; + t2.ptr = (char *)xe2->xdf2.recs[m->i2]->ptr; + t2.size = xe2->xdf2.recs[m->i2 + m->chg2 - 1]->ptr + + xe2->xdf2.recs[m->i2 + m->chg2 - 1]->size - t2.ptr; + if (xdl_do_diff(&t1, &t2, xpp, &xe) < 0) + return -1; + if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 || + xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 || + xdl_build_script(&xe, &xscr) < 0) { + xdl_free_env(&xe); + return -1; + } + if (!xscr) { + /* If this happens, the changes are identical. */ + xdl_free_env(&xe); + m->mode = 4; + continue; + } + x = xscr; + m->i1 = xscr->i1 + i1; + m->chg1 = xscr->chg1; + m->i2 = xscr->i2 + i2; + m->chg2 = xscr->chg2; + while (xscr->next) { + xdmerge_t *m2 = xdl_malloc(sizeof(xdmerge_t)); + if (!m2) { + xdl_free_env(&xe); + xdl_free_script(x); + return -1; + } + xscr = xscr->next; + m2->next = m->next; + m->next = m2; + m = m2; + m->mode = 0; + m->i1 = xscr->i1 + i1; + m->chg1 = xscr->chg1; + m->i2 = xscr->i2 + i2; + m->chg2 = xscr->chg2; + } + xdl_free_env(&xe); + xdl_free_script(x); + } + return 0; +} + +static int line_contains_alnum(const char *ptr, long size) +{ + while (size--) + if (isalnum((unsigned char)*(ptr++))) + return 1; + return 0; +} + +static int lines_contain_alnum(xdfenv_t *xe, int i, int chg) +{ + for (; chg; chg--, i++) + if (line_contains_alnum(xe->xdf2.recs[i]->ptr, + xe->xdf2.recs[i]->size)) + return 1; + return 0; +} + +/* + * This function merges m and m->next, marking everything between those hunks + * as conflicting, too. + */ +static void xdl_merge_two_conflicts(xdmerge_t *m) +{ + xdmerge_t *next_m = m->next; + m->chg1 = next_m->i1 + next_m->chg1 - m->i1; + m->chg2 = next_m->i2 + next_m->chg2 - m->i2; + m->next = next_m->next; + free(next_m); +} + +/* + * If there are less than 3 non-conflicting lines between conflicts, + * it appears simpler -- because it takes up less (or as many) lines -- + * if the lines are moved into the conflicts. + */ +static int xdl_simplify_non_conflicts(xdfenv_t *xe1, xdmerge_t *m, + int simplify_if_no_alnum) +{ + int result = 0; + + if (!m) + return result; + for (;;) { + xdmerge_t *next_m = m->next; + int begin, end; + + if (!next_m) + return result; + + begin = m->i1 + m->chg1; + end = next_m->i1; + + if (m->mode != 0 || next_m->mode != 0 || + (end - begin > 3 && + (!simplify_if_no_alnum || + lines_contain_alnum(xe1, begin, end - begin)))) { + m = next_m; + } else { + result++; + xdl_merge_two_conflicts(m); + } + } +} + +/* + * level == 0: mark all overlapping changes as conflict + * level == 1: mark overlapping changes as conflict only if not identical + * level == 2: analyze non-identical changes for minimal conflict set + * level == 3: analyze non-identical changes for minimal conflict set, but + * treat hunks not containing any letter or number as conflicting + * + * returns < 0 on error, == 0 for no conflicts, else number of conflicts + */ +static int xdl_do_merge(xdfenv_t *xe1, xdchange_t *xscr1, + xdfenv_t *xe2, xdchange_t *xscr2, + xmparam_t const *xmp, mmbuffer_t *result) +{ + xdmerge_t *changes, *c; + xpparam_t const *xpp = &xmp->xpp; + const char *const ancestor_name = xmp->ancestor; + const char *const name1 = xmp->file1; + const char *const name2 = xmp->file2; + int i0, i1, i2, chg0, chg1, chg2; + int level = xmp->level; + int style = xmp->style; + int favor = xmp->favor; + + if (style == XDL_MERGE_DIFF3) { + /* + * "diff3 -m" output does not make sense for anything + * more aggressive than XDL_MERGE_EAGER. + */ + if (XDL_MERGE_EAGER < level) + level = XDL_MERGE_EAGER; + } + + c = changes = NULL; + + while (xscr1 && xscr2) { + if (!changes) + changes = c; + if (xscr1->i1 + xscr1->chg1 < xscr2->i1) { + i0 = xscr1->i1; + i1 = xscr1->i2; + i2 = xscr2->i2 - xscr2->i1 + xscr1->i1; + chg0 = xscr1->chg1; + chg1 = xscr1->chg2; + chg2 = xscr1->chg1; + if (xdl_append_merge(&c, 1, + i0, chg0, i1, chg1, i2, chg2)) { + xdl_cleanup_merge(changes); + return -1; + } + xscr1 = xscr1->next; + continue; + } + if (xscr2->i1 + xscr2->chg1 < xscr1->i1) { + i0 = xscr2->i1; + i1 = xscr1->i2 - xscr1->i1 + xscr2->i1; + i2 = xscr2->i2; + chg0 = xscr2->chg1; + chg1 = xscr2->chg1; + chg2 = xscr2->chg2; + if (xdl_append_merge(&c, 2, + i0, chg0, i1, chg1, i2, chg2)) { + xdl_cleanup_merge(changes); + return -1; + } + xscr2 = xscr2->next; + continue; + } + if (level == XDL_MERGE_MINIMAL || xscr1->i1 != xscr2->i1 || + xscr1->chg1 != xscr2->chg1 || + xscr1->chg2 != xscr2->chg2 || + xdl_merge_cmp_lines(xe1, xscr1->i2, + xe2, xscr2->i2, + xscr1->chg2, xpp->flags)) { + /* conflict */ + int off = xscr1->i1 - xscr2->i1; + int ffo = off + xscr1->chg1 - xscr2->chg1; + + i0 = xscr1->i1; + i1 = xscr1->i2; + i2 = xscr2->i2; + if (off > 0) { + i0 -= off; + i1 -= off; + } + else + i2 += off; + chg0 = xscr1->i1 + xscr1->chg1 - i0; + chg1 = xscr1->i2 + xscr1->chg2 - i1; + chg2 = xscr2->i2 + xscr2->chg2 - i2; + if (ffo < 0) { + chg0 -= ffo; + chg1 -= ffo; + } else + chg2 += ffo; + if (xdl_append_merge(&c, 0, + i0, chg0, i1, chg1, i2, chg2)) { + xdl_cleanup_merge(changes); + return -1; + } + } + + i1 = xscr1->i1 + xscr1->chg1; + i2 = xscr2->i1 + xscr2->chg1; + + if (i1 >= i2) + xscr2 = xscr2->next; + if (i2 >= i1) + xscr1 = xscr1->next; + } + while (xscr1) { + if (!changes) + changes = c; + i0 = xscr1->i1; + i1 = xscr1->i2; + i2 = xscr1->i1 + xe2->xdf2.nrec - xe2->xdf1.nrec; + chg0 = xscr1->chg1; + chg1 = xscr1->chg2; + chg2 = xscr1->chg1; + if (xdl_append_merge(&c, 1, + i0, chg0, i1, chg1, i2, chg2)) { + xdl_cleanup_merge(changes); + return -1; + } + xscr1 = xscr1->next; + } + while (xscr2) { + if (!changes) + changes = c; + i0 = xscr2->i1; + i1 = xscr2->i1 + xe1->xdf2.nrec - xe1->xdf1.nrec; + i2 = xscr2->i2; + chg0 = xscr2->chg1; + chg1 = xscr2->chg1; + chg2 = xscr2->chg2; + if (xdl_append_merge(&c, 2, + i0, chg0, i1, chg1, i2, chg2)) { + xdl_cleanup_merge(changes); + return -1; + } + xscr2 = xscr2->next; + } + if (!changes) + changes = c; + /* refine conflicts */ + if (XDL_MERGE_ZEALOUS <= level && + (xdl_refine_conflicts(xe1, xe2, changes, xpp) < 0 || + xdl_simplify_non_conflicts(xe1, changes, + XDL_MERGE_ZEALOUS < level) < 0)) { + xdl_cleanup_merge(changes); + return -1; + } + /* output */ + if (result) { + int marker_size = xmp->marker_size; + int size = xdl_fill_merge_buffer(xe1, name1, xe2, name2, + ancestor_name, + favor, changes, NULL, style, + marker_size); + result->ptr = xdl_malloc(size); + if (!result->ptr) { + xdl_cleanup_merge(changes); + return -1; + } + result->size = size; + xdl_fill_merge_buffer(xe1, name1, xe2, name2, + ancestor_name, favor, changes, + result->ptr, style, marker_size); + } + return xdl_cleanup_merge(changes); +} + +int xdl_merge(mmfile_t *orig, mmfile_t *mf1, mmfile_t *mf2, + xmparam_t const *xmp, mmbuffer_t *result) +{ + xdchange_t *xscr1, *xscr2; + xdfenv_t xe1, xe2; + int status; + xpparam_t const *xpp = &xmp->xpp; + + result->ptr = NULL; + result->size = 0; + + if (xdl_do_diff(orig, mf1, xpp, &xe1) < 0) { + return -1; + } + if (xdl_do_diff(orig, mf2, xpp, &xe2) < 0) { + xdl_free_env(&xe1); + return -1; + } + if (xdl_change_compact(&xe1.xdf1, &xe1.xdf2, xpp->flags) < 0 || + xdl_change_compact(&xe1.xdf2, &xe1.xdf1, xpp->flags) < 0 || + xdl_build_script(&xe1, &xscr1) < 0) { + xdl_free_env(&xe1); + return -1; + } + if (xdl_change_compact(&xe2.xdf1, &xe2.xdf2, xpp->flags) < 0 || + xdl_change_compact(&xe2.xdf2, &xe2.xdf1, xpp->flags) < 0 || + xdl_build_script(&xe2, &xscr2) < 0) { + xdl_free_script(xscr1); + xdl_free_env(&xe1); + xdl_free_env(&xe2); + return -1; + } + status = 0; + if (!xscr1) { + result->ptr = xdl_malloc(mf2->size); + memcpy(result->ptr, mf2->ptr, mf2->size); + result->size = mf2->size; + } else if (!xscr2) { + result->ptr = xdl_malloc(mf1->size); + memcpy(result->ptr, mf1->ptr, mf1->size); + result->size = mf1->size; + } else { + status = xdl_do_merge(&xe1, xscr1, + &xe2, xscr2, + xmp, result); + } + xdl_free_script(xscr1); + xdl_free_script(xscr2); + + xdl_free_env(&xe1); + xdl_free_env(&xe2); + + return status; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xprepare.c Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,466 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#include "xinclude.h" + + +#define XDL_KPDIS_RUN 4 +#define XDL_MAX_EQLIMIT 1024 +#define XDL_SIMSCAN_WINDOW 100 +#define XDL_GUESS_NLINES1 256 + + +typedef struct s_xdlclass { + struct s_xdlclass *next; + unsigned long ha; + char const *line; + long size; + long idx; + long len1, len2; +} xdlclass_t; + +typedef struct s_xdlclassifier { + unsigned int hbits; + long hsize; + xdlclass_t **rchash; + chastore_t ncha; + xdlclass_t **rcrecs; + long alloc; + long count; + long flags; +} xdlclassifier_t; + + + + +static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags); +static void xdl_free_classifier(xdlclassifier_t *cf); +static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash, + unsigned int hbits, xrecord_t *rec); +static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp, + xdlclassifier_t *cf, xdfile_t *xdf); +static void xdl_free_ctx(xdfile_t *xdf); +static int xdl_clean_mmatch(char const *dis, long i, long s, long e); +static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2); +static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2); +static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2); + + + + +static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags) { + cf->flags = flags; + + cf->hbits = xdl_hashbits((unsigned int) size); + cf->hsize = 1 << cf->hbits; + + if (xdl_cha_init(&cf->ncha, sizeof(xdlclass_t), size / 4 + 1) < 0) { + + return -1; + } + if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) { + + xdl_cha_free(&cf->ncha); + return -1; + } + memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *)); + + cf->alloc = size; + if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) { + + xdl_free(cf->rchash); + xdl_cha_free(&cf->ncha); + return -1; + } + + cf->count = 0; + + return 0; +} + + +static void xdl_free_classifier(xdlclassifier_t *cf) { + + xdl_free(cf->rcrecs); + xdl_free(cf->rchash); + xdl_cha_free(&cf->ncha); +} + + +static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash, + unsigned int hbits, xrecord_t *rec) { + long hi; + char const *line; + xdlclass_t *rcrec; + xdlclass_t **rcrecs; + + line = rec->ptr; + hi = (long) XDL_HASHLONG(rec->ha, cf->hbits); + for (rcrec = cf->rchash[hi]; rcrec; rcrec = rcrec->next) + if (rcrec->ha == rec->ha && + xdl_recmatch(rcrec->line, rcrec->size, + rec->ptr, rec->size, cf->flags)) + break; + + if (!rcrec) { + if (!(rcrec = xdl_cha_alloc(&cf->ncha))) { + + return -1; + } + rcrec->idx = cf->count++; + if (cf->count > cf->alloc) { + cf->alloc *= 2; + if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) { + + return -1; + } + cf->rcrecs = rcrecs; + } + cf->rcrecs[rcrec->idx] = rcrec; + rcrec->line = line; + rcrec->size = rec->size; + rcrec->ha = rec->ha; + rcrec->len1 = rcrec->len2 = 0; + rcrec->next = cf->rchash[hi]; + cf->rchash[hi] = rcrec; + } + + (pass == 1) ? rcrec->len1++ : rcrec->len2++; + + rec->ha = (unsigned long) rcrec->idx; + + hi = (long) XDL_HASHLONG(rec->ha, hbits); + rec->next = rhash[hi]; + rhash[hi] = rec; + + return 0; +} + + +static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp, + xdlclassifier_t *cf, xdfile_t *xdf) { + unsigned int hbits; + long nrec, hsize, bsize; + unsigned long hav; + char const *blk, *cur, *top, *prev; + xrecord_t *crec; + xrecord_t **recs, **rrecs; + xrecord_t **rhash; + unsigned long *ha; + char *rchg; + long *rindex; + + ha = NULL; + rindex = NULL; + rchg = NULL; + rhash = NULL; + recs = NULL; + + if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0) + goto abort; + if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *)))) + goto abort; + + { + hbits = xdl_hashbits((unsigned int) narec); + hsize = 1 << hbits; + if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *)))) + goto abort; + memset(rhash, 0, hsize * sizeof(xrecord_t *)); + } + + nrec = 0; + if ((cur = blk = xdl_mmfile_first(mf, &bsize)) != NULL) { + for (top = blk + bsize; cur < top; ) { + prev = cur; + hav = xdl_hash_record(&cur, top, xpp->flags); + if (nrec >= narec) { + narec *= 2; + if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *)))) + goto abort; + recs = rrecs; + } + if (!(crec = xdl_cha_alloc(&xdf->rcha))) + goto abort; + crec->ptr = prev; + crec->size = (long) (cur - prev); + crec->ha = hav; + recs[nrec++] = crec; + + if (xdl_classify_record(pass, cf, rhash, hbits, crec) < 0) + goto abort; + } + } + + if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char)))) + goto abort; + memset(rchg, 0, (nrec + 2) * sizeof(char)); + + if (!(rindex = (long *) xdl_malloc((nrec + 1) * sizeof(long)))) + goto abort; + if (!(ha = (unsigned long *) xdl_malloc((nrec + 1) * sizeof(unsigned long)))) + goto abort; + + xdf->nrec = nrec; + xdf->recs = recs; + xdf->hbits = hbits; + xdf->rhash = rhash; + xdf->rchg = rchg + 1; + xdf->rindex = rindex; + xdf->nreff = 0; + xdf->ha = ha; + xdf->dstart = 0; + xdf->dend = nrec - 1; + + return 0; + +abort: + xdl_free(ha); + xdl_free(rindex); + xdl_free(rchg); + xdl_free(rhash); + xdl_free(recs); + xdl_cha_free(&xdf->rcha); + return -1; +} + + +static void xdl_free_ctx(xdfile_t *xdf) { + + xdl_free(xdf->rhash); + xdl_free(xdf->rindex); + xdl_free(xdf->rchg - 1); + xdl_free(xdf->ha); + xdl_free(xdf->recs); + xdl_cha_free(&xdf->rcha); +} + + +int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdfenv_t *xe) { + long enl1, enl2, sample; + xdlclassifier_t cf; + + memset(&cf, 0, sizeof(cf)); + + sample = XDL_GUESS_NLINES1; + + enl1 = xdl_guess_lines(mf1, sample) + 1; + enl2 = xdl_guess_lines(mf2, sample) + 1; + + if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0) + return -1; + + if (xdl_prepare_ctx(1, mf1, enl1, xpp, &cf, &xe->xdf1) < 0) { + + xdl_free_classifier(&cf); + return -1; + } + if (xdl_prepare_ctx(2, mf2, enl2, xpp, &cf, &xe->xdf2) < 0) { + + xdl_free_ctx(&xe->xdf1); + xdl_free_classifier(&cf); + return -1; + } + + if (xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) { + xdl_free_ctx(&xe->xdf2); + xdl_free_ctx(&xe->xdf1); + xdl_free_classifier(&cf); + return -1; + } + + xdl_free_classifier(&cf); + + return 0; +} + + +void xdl_free_env(xdfenv_t *xe) { + + xdl_free_ctx(&xe->xdf2); + xdl_free_ctx(&xe->xdf1); +} + + +static int xdl_clean_mmatch(char const *dis, long i, long s, long e) { + long r, rdis0, rpdis0, rdis1, rpdis1; + + /* + * Limits the window the is examined during the similar-lines + * scan. The loops below stops when dis[i - r] == 1 (line that + * has no match), but there are corner cases where the loop + * proceed all the way to the extremities by causing huge + * performance penalties in case of big files. + */ + if (i - s > XDL_SIMSCAN_WINDOW) + s = i - XDL_SIMSCAN_WINDOW; + if (e - i > XDL_SIMSCAN_WINDOW) + e = i + XDL_SIMSCAN_WINDOW; + + /* + * Scans the lines before 'i' to find a run of lines that either + * have no match (dis[j] == 0) or have multiple matches (dis[j] > 1). + * Note that we always call this function with dis[i] > 1, so the + * current line (i) is already a multimatch line. + */ + for (r = 1, rdis0 = 0, rpdis0 = 1; (i - r) >= s; r++) { + if (!dis[i - r]) + rdis0++; + else if (dis[i - r] == 2) + rpdis0++; + else + break; + } + /* + * If the run before the line 'i' found only multimatch lines, we + * return 0 and hence we don't make the current line (i) discarded. + * We want to discard multimatch lines only when they appear in the + * middle of runs with nomatch lines (dis[j] == 0). + */ + if (rdis0 == 0) + return 0; + for (r = 1, rdis1 = 0, rpdis1 = 1; (i + r) <= e; r++) { + if (!dis[i + r]) + rdis1++; + else if (dis[i + r] == 2) + rpdis1++; + else + break; + } + /* + * If the run after the line 'i' found only multimatch lines, we + * return 0 and hence we don't make the current line (i) discarded. + */ + if (rdis1 == 0) + return 0; + rdis1 += rdis0; + rpdis1 += rpdis0; + + return rpdis1 * XDL_KPDIS_RUN < (rpdis1 + rdis1); +} + + +/* + * Try to reduce the problem complexity, discard records that have no + * matches on the other file. Also, lines that have multiple matches + * might be potentially discarded if they happear in a run of discardable. + */ +static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) { + long i, nm, nreff, mlim; + xrecord_t **recs; + xdlclass_t *rcrec; + char *dis, *dis1, *dis2; + + if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) { + + return -1; + } + memset(dis, 0, xdf1->nrec + xdf2->nrec + 2); + dis1 = dis; + dis2 = dis1 + xdf1->nrec + 1; + + if ((mlim = xdl_bogosqrt(xdf1->nrec)) > XDL_MAX_EQLIMIT) + mlim = XDL_MAX_EQLIMIT; + for (i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; i <= xdf1->dend; i++, recs++) { + rcrec = cf->rcrecs[(*recs)->ha]; + nm = rcrec ? rcrec->len2 : 0; + dis1[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1; + } + + if ((mlim = xdl_bogosqrt(xdf2->nrec)) > XDL_MAX_EQLIMIT) + mlim = XDL_MAX_EQLIMIT; + for (i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; i <= xdf2->dend; i++, recs++) { + rcrec = cf->rcrecs[(*recs)->ha]; + nm = rcrec ? rcrec->len1 : 0; + dis2[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1; + } + + for (nreff = 0, i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; + i <= xdf1->dend; i++, recs++) { + if (dis1[i] == 1 || + (dis1[i] == 2 && !xdl_clean_mmatch(dis1, i, xdf1->dstart, xdf1->dend))) { + xdf1->rindex[nreff] = i; + xdf1->ha[nreff] = (*recs)->ha; + nreff++; + } else + xdf1->rchg[i] = 1; + } + xdf1->nreff = nreff; + + for (nreff = 0, i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; + i <= xdf2->dend; i++, recs++) { + if (dis2[i] == 1 || + (dis2[i] == 2 && !xdl_clean_mmatch(dis2, i, xdf2->dstart, xdf2->dend))) { + xdf2->rindex[nreff] = i; + xdf2->ha[nreff] = (*recs)->ha; + nreff++; + } else + xdf2->rchg[i] = 1; + } + xdf2->nreff = nreff; + + xdl_free(dis); + + return 0; +} + + +/* + * Early trim initial and terminal matching records. + */ +static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) { + long i, lim; + xrecord_t **recs1, **recs2; + + recs1 = xdf1->recs; + recs2 = xdf2->recs; + for (i = 0, lim = XDL_MIN(xdf1->nrec, xdf2->nrec); i < lim; + i++, recs1++, recs2++) + if ((*recs1)->ha != (*recs2)->ha) + break; + + xdf1->dstart = xdf2->dstart = i; + + recs1 = xdf1->recs + xdf1->nrec - 1; + recs2 = xdf2->recs + xdf2->nrec - 1; + for (lim -= i, i = 0; i < lim; i++, recs1--, recs2--) + if ((*recs1)->ha != (*recs2)->ha) + break; + + xdf1->dend = xdf1->nrec - i - 1; + xdf2->dend = xdf2->nrec - i - 1; + + return 0; +} + + +static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) { + + if (xdl_trim_ends(xdf1, xdf2) < 0 || + xdl_cleanup_records(cf, xdf1, xdf2) < 0) { + + return -1; + } + + return 0; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xprepare.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,34 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XPREPARE_H) +#define XPREPARE_H + + + +int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, + xdfenv_t *xe); +void xdl_free_env(xdfenv_t *xe); + + + +#endif /* #if !defined(XPREPARE_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xtypes.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,67 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XTYPES_H) +#define XTYPES_H + + + +typedef struct s_chanode { + struct s_chanode *next; + long icurr; +} chanode_t; + +typedef struct s_chastore { + chanode_t *head, *tail; + long isize, nsize; + chanode_t *ancur; + chanode_t *sncur; + long scurr; +} chastore_t; + +typedef struct s_xrecord { + struct s_xrecord *next; + char const *ptr; + long size; + unsigned long ha; +} xrecord_t; + +typedef struct s_xdfile { + chastore_t rcha; + long nrec; + unsigned int hbits; + xrecord_t **rhash; + long dstart, dend; + xrecord_t **recs; + char *rchg; + long *rindex; + long nreff; + unsigned long *ha; +} xdfile_t; + +typedef struct s_xdfenv { + xdfile_t xdf1, xdf2; +} xdfenv_t; + + + +#endif /* #if !defined(XTYPES_H) */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xutils.c Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,425 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#include <limits.h> +#include <assert.h> +#include "xinclude.h" + + + + +long xdl_bogosqrt(long n) { + long i; + + /* + * Classical integer square root approximation using shifts. + */ + for (i = 1; n > 0; n >>= 2) + i <<= 1; + + return i; +} + + +int xdl_emit_diffrec(char const *rec, long size, char const *pre, long psize, + xdemitcb_t *ecb) { + int i = 2; + mmbuffer_t mb[3]; + + mb[0].ptr = (char *) pre; + mb[0].size = psize; + mb[1].ptr = (char *) rec; + mb[1].size = size; + if (size > 0 && rec[size - 1] != '\n') { + mb[2].ptr = (char *) "\n\\ No newline at end of file\n"; + mb[2].size = strlen(mb[2].ptr); + i++; + } + if (ecb->outf(ecb->priv, mb, i) < 0) { + + return -1; + } + + return 0; +} + +void *xdl_mmfile_first(mmfile_t *mmf, long *size) +{ + *size = mmf->size; + return mmf->ptr; +} + + +long xdl_mmfile_size(mmfile_t *mmf) +{ + return mmf->size; +} + + +int xdl_cha_init(chastore_t *cha, long isize, long icount) { + + cha->head = cha->tail = NULL; + cha->isize = isize; + cha->nsize = icount * isize; + cha->ancur = cha->sncur = NULL; + cha->scurr = 0; + + return 0; +} + + +void xdl_cha_free(chastore_t *cha) { + chanode_t *cur, *tmp; + + for (cur = cha->head; (tmp = cur) != NULL;) { + cur = cur->next; + xdl_free(tmp); + } +} + + +void *xdl_cha_alloc(chastore_t *cha) { + chanode_t *ancur; + void *data; + + if (!(ancur = cha->ancur) || ancur->icurr == cha->nsize) { + if (!(ancur = (chanode_t *) xdl_malloc(sizeof(chanode_t) + cha->nsize))) { + + return NULL; + } + ancur->icurr = 0; + ancur->next = NULL; + if (cha->tail) + cha->tail->next = ancur; + if (!cha->head) + cha->head = ancur; + cha->tail = ancur; + cha->ancur = ancur; + } + + data = (char *) ancur + sizeof(chanode_t) + ancur->icurr; + ancur->icurr += cha->isize; + + return data; +} + +long xdl_guess_lines(mmfile_t *mf, long sample) { + long nl = 0, size, tsize = 0; + char const *data, *cur, *top; + + if ((cur = data = xdl_mmfile_first(mf, &size)) != NULL) { + for (top = data + size; nl < sample && cur < top; ) { + nl++; + if (!(cur = memchr(cur, '\n', top - cur))) + cur = top; + else + cur++; + } + tsize += (long) (cur - data); + } + + if (nl && tsize) + nl = xdl_mmfile_size(mf) / (tsize / nl); + + return nl + 1; +} + +int xdl_blankline(const char *line, long size, long flags) +{ + long i; + + if (!(flags & XDF_WHITESPACE_FLAGS)) + return (size <= 1); + + for (i = 0; i < size && XDL_ISSPACE(line[i]); i++) + ; + + return (i == size); +} + +/* + * Have we eaten everything on the line, except for an optional + * CR at the very end? + */ +static int ends_with_optional_cr(const char *l, long s, long i) +{ + int complete = s && l[s-1] == '\n'; + + if (complete) + s--; + if (s == i) + return 1; + /* do not ignore CR at the end of an incomplete line */ + if (complete && s == i + 1 && l[i] == '\r') + return 1; + return 0; +} + +int xdl_recmatch(const char *l1, long s1, const char *l2, long s2, long flags) +{ + int i1, i2; + + if (s1 == s2 && !memcmp(l1, l2, s1)) + return 1; + if (!(flags & XDF_WHITESPACE_FLAGS)) + return 0; + + i1 = 0; + i2 = 0; + + /* + * -w matches everything that matches with -b, and -b in turn + * matches everything that matches with --ignore-space-at-eol, + * which in turn matches everything that matches with --ignore-cr-at-eol. + * + * Each flavor of ignoring needs different logic to skip whitespaces + * while we have both sides to compare. + */ + if (flags & XDF_IGNORE_WHITESPACE) { + goto skip_ws; + while (i1 < s1 && i2 < s2) { + if (l1[i1++] != l2[i2++]) + return 0; + skip_ws: + while (i1 < s1 && XDL_ISSPACE(l1[i1])) + i1++; + while (i2 < s2 && XDL_ISSPACE(l2[i2])) + i2++; + } + } else if (flags & XDF_IGNORE_WHITESPACE_CHANGE) { + while (i1 < s1 && i2 < s2) { + if (XDL_ISSPACE(l1[i1]) && XDL_ISSPACE(l2[i2])) { + /* Skip matching spaces and try again */ + while (i1 < s1 && XDL_ISSPACE(l1[i1])) + i1++; + while (i2 < s2 && XDL_ISSPACE(l2[i2])) + i2++; + continue; + } + if (l1[i1++] != l2[i2++]) + return 0; + } + } else if (flags & XDF_IGNORE_WHITESPACE_AT_EOL) { + while (i1 < s1 && i2 < s2 && l1[i1] == l2[i2]) { + i1++; + i2++; + } + } else if (flags & XDF_IGNORE_CR_AT_EOL) { + /* Find the first difference and see how the line ends */ + while (i1 < s1 && i2 < s2 && l1[i1] == l2[i2]) { + i1++; + i2++; + } + return (ends_with_optional_cr(l1, s1, i1) && + ends_with_optional_cr(l2, s2, i2)); + } + + /* + * After running out of one side, the remaining side must have + * nothing but whitespace for the lines to match. Note that + * ignore-whitespace-at-eol case may break out of the loop + * while there still are characters remaining on both lines. + */ + if (i1 < s1) { + while (i1 < s1 && XDL_ISSPACE(l1[i1])) + i1++; + if (s1 != i1) + return 0; + } + if (i2 < s2) { + while (i2 < s2 && XDL_ISSPACE(l2[i2])) + i2++; + return (s2 == i2); + } + return 1; +} + +static unsigned long xdl_hash_record_with_whitespace(char const **data, + char const *top, long flags) { + unsigned long ha = 5381; + char const *ptr = *data; + int cr_at_eol_only = (flags & XDF_WHITESPACE_FLAGS) == XDF_IGNORE_CR_AT_EOL; + + for (; ptr < top && *ptr != '\n'; ptr++) { + if (cr_at_eol_only) { + /* do not ignore CR at the end of an incomplete line */ + if (*ptr == '\r' && + (ptr + 1 < top && ptr[1] == '\n')) + continue; + } + else if (XDL_ISSPACE(*ptr)) { + const char *ptr2 = ptr; + int at_eol; + while (ptr + 1 < top && XDL_ISSPACE(ptr[1]) + && ptr[1] != '\n') + ptr++; + at_eol = (top <= ptr + 1 || ptr[1] == '\n'); + if (flags & XDF_IGNORE_WHITESPACE) + ; /* already handled */ + else if (flags & XDF_IGNORE_WHITESPACE_CHANGE + && !at_eol) { + ha += (ha << 5); + ha ^= (unsigned long) ' '; + } + else if (flags & XDF_IGNORE_WHITESPACE_AT_EOL + && !at_eol) { + while (ptr2 != ptr + 1) { + ha += (ha << 5); + ha ^= (unsigned long) *ptr2; + ptr2++; + } + } + continue; + } + ha += (ha << 5); + ha ^= (unsigned long) *ptr; + } + *data = ptr < top ? ptr + 1: ptr; + + return ha; +} + +unsigned long xdl_hash_record(char const **data, char const *top, long flags) { + unsigned long ha = 5381; + char const *ptr = *data; + + if (flags & XDF_WHITESPACE_FLAGS) + return xdl_hash_record_with_whitespace(data, top, flags); + + for (; ptr < top && *ptr != '\n'; ptr++) { + ha += (ha << 5); + ha ^= (unsigned long) *ptr; + } + *data = ptr < top ? ptr + 1: ptr; + + return ha; +} + +unsigned int xdl_hashbits(unsigned int size) { + unsigned int val = 1, bits = 0; + + for (; val < size && bits < CHAR_BIT * sizeof(unsigned int); val <<= 1, bits++); + return bits ? bits: 1; +} + + +int xdl_num_out(char *out, long val) { + char *ptr, *str = out; + char buf[32]; + + ptr = buf + sizeof(buf) - 1; + *ptr = '\0'; + if (val < 0) { + *--ptr = '-'; + val = -val; + } + for (; val && ptr > buf; val /= 10) + *--ptr = "0123456789"[val % 10]; + if (*ptr) + for (; *ptr; ptr++, str++) + *str = *ptr; + else + *str++ = '0'; + *str = '\0'; + + return str - out; +} + +int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2, + const char *func, long funclen, xdemitcb_t *ecb) { + int nb = 0; + mmbuffer_t mb; + char buf[128]; + + memcpy(buf, "@@ -", 4); + nb += 4; + + nb += xdl_num_out(buf + nb, c1 ? s1: s1 - 1); + + if (c1 != 1) { + memcpy(buf + nb, ",", 1); + nb += 1; + + nb += xdl_num_out(buf + nb, c1); + } + + memcpy(buf + nb, " +", 2); + nb += 2; + + nb += xdl_num_out(buf + nb, c2 ? s2: s2 - 1); + + if (c2 != 1) { + memcpy(buf + nb, ",", 1); + nb += 1; + + nb += xdl_num_out(buf + nb, c2); + } + + memcpy(buf + nb, " @@", 3); + nb += 3; + if (func && funclen) { + buf[nb++] = ' '; + if (funclen > sizeof(buf) - nb - 1) + funclen = sizeof(buf) - nb - 1; + memcpy(buf + nb, func, funclen); + nb += funclen; + } + buf[nb++] = '\n'; + + mb.ptr = buf; + mb.size = nb; + if (ecb->outf(ecb->priv, &mb, 1) < 0) + return -1; + + return 0; +} + +int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp, + int line1, int count1, int line2, int count2) +{ + /* + * This probably does not work outside Git, since + * we have a very simple mmfile structure. + * + * Note: ideally, we would reuse the prepared environment, but + * the libxdiff interface does not (yet) allow for diffing only + * ranges of lines instead of the whole files. + */ + mmfile_t subfile1, subfile2; + xdfenv_t env; + + subfile1.ptr = (char *)diff_env->xdf1.recs[line1 - 1]->ptr; + subfile1.size = diff_env->xdf1.recs[line1 + count1 - 2]->ptr + + diff_env->xdf1.recs[line1 + count1 - 2]->size - subfile1.ptr; + subfile2.ptr = (char *)diff_env->xdf2.recs[line2 - 1]->ptr; + subfile2.size = diff_env->xdf2.recs[line2 + count2 - 2]->ptr + + diff_env->xdf2.recs[line2 + count2 - 2]->size - subfile2.ptr; + if (xdl_do_diff(&subfile1, &subfile2, xpp, &env) < 0) + return -1; + + memcpy(diff_env->xdf1.rchg + line1 - 1, env.xdf1.rchg, count1); + memcpy(diff_env->xdf2.rchg + line2 - 1, env.xdf2.rchg, count2); + + xdl_free_env(&env); + + return 0; +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/thirdparty/xdiff/xutils.h Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,47 @@ +/* + * LibXDiff by Davide Libenzi ( File Differential Library ) + * Copyright (C) 2003 Davide Libenzi + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see + * <http://www.gnu.org/licenses/>. + * + * Davide Libenzi <davidel@xmailserver.org> + * + */ + +#if !defined(XUTILS_H) +#define XUTILS_H + + + +long xdl_bogosqrt(long n); +int xdl_emit_diffrec(char const *rec, long size, char const *pre, long psize, + xdemitcb_t *ecb); +int xdl_cha_init(chastore_t *cha, long isize, long icount); +void xdl_cha_free(chastore_t *cha); +void *xdl_cha_alloc(chastore_t *cha); +long xdl_guess_lines(mmfile_t *mf, long sample); +int xdl_blankline(const char *line, long size, long flags); +int xdl_recmatch(const char *l1, long s1, const char *l2, long s2, long flags); +unsigned long xdl_hash_record(char const **data, char const *top, long flags); +unsigned int xdl_hashbits(unsigned int size); +int xdl_num_out(char *out, long val); +int xdl_emit_hunk_hdr(long s1, long c1, long s2, long c2, + const char *func, long funclen, xdemitcb_t *ecb); +int xdl_fall_back_diff(xdfenv_t *diff_env, xpparam_t const *xpp, + int line1, int count1, int line2, int count2); + + + +#endif /* #if !defined(XUTILS_H) */
--- a/mercurial/transaction.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/transaction.py Sun Mar 04 10:42:51 2018 -0500 @@ -612,7 +612,7 @@ lines = fp.readlines() if lines: ver = lines[0][:-1] - if ver == str(version): + if ver == (b'%d' % version): for line in lines[1:]: if line: # Shave off the trailing newline
--- a/mercurial/ui.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/ui.py Sun Mar 04 10:42:51 2018 -0500 @@ -37,6 +37,7 @@ scmutil, util, ) +from .utils import dateutil urlreq = util.urlreq @@ -45,7 +46,7 @@ if not c.isalnum()) # The config knobs that will be altered (if unset) by ui.tweakdefaults. -tweakrc = """ +tweakrc = b""" [ui] # The rollback command is dangerous. As a rule, don't use it. rollback = False @@ -148,14 +149,10 @@ } def _maybestrurl(maybebytes): - if maybebytes is None: - return None - return pycompat.strurl(maybebytes) + return util.rapply(pycompat.strurl, maybebytes) def _maybebytesurl(maybestr): - if maybestr is None: - return None - return pycompat.bytesurl(maybestr) + return util.rapply(pycompat.bytesurl, maybestr) class httppasswordmgrdbproxy(object): """Delays loading urllib2 until it's needed.""" @@ -168,18 +165,14 @@ return self._mgr def add_password(self, realm, uris, user, passwd): - if isinstance(uris, tuple): - uris = tuple(_maybestrurl(u) for u in uris) - else: - uris = _maybestrurl(uris) return self._get_mgr().add_password( - _maybestrurl(realm), uris, + _maybestrurl(realm), _maybestrurl(uris), _maybestrurl(user), _maybestrurl(passwd)) def find_user_password(self, realm, uri): - return tuple(_maybebytesurl(v) for v in - self._get_mgr().find_user_password(_maybestrurl(realm), - _maybestrurl(uri))) + mgr = self._get_mgr() + return _maybebytesurl(mgr.find_user_password(_maybestrurl(realm), + _maybestrurl(uri))) def _catchterm(*args): raise error.SignalInterrupt @@ -374,7 +367,7 @@ except error.ConfigError as inst: if trusted: raise - self.warn(_("ignored: %s\n") % str(inst)) + self.warn(_("ignored: %s\n") % util.forcebytestr(inst)) if self.plain(): for k in ('debug', 'fallbackencoding', 'quiet', 'slash', @@ -506,7 +499,7 @@ and default != itemdefault): msg = ("specifying a mismatched default value for a registered " "config item: '%s.%s' '%s'") - msg %= (section, name, default) + msg %= (section, name, pycompat.bytestr(default)) self.develwarn(msg, 2, 'warn-config-default') for s, n in alternates: @@ -722,7 +715,7 @@ (0, 0) """ if self.config(section, name, default, untrusted): - return self.configwith(util.parsedate, section, name, default, + return self.configwith(dateutil.parsedate, section, name, default, 'date', untrusted) if default is _unset: return None @@ -816,8 +809,8 @@ hint=_("use 'hg config --edit' " 'to set your username')) if "\n" in user: - raise error.Abort(_("username %s contains a newline\n") - % repr(user)) + raise error.Abort(_("username %r contains a newline\n") + % pycompat.bytestr(user)) return user def shortuser(self, user): @@ -878,6 +871,17 @@ return "".join(self._buffers.pop()) + def canwritewithoutlabels(self): + '''check if write skips the label''' + if self._buffers and not self._bufferapplylabels: + return True + return self._colormode is None + + def canbatchlabeledwrites(self): + '''check if write calls with labels are batchable''' + # Windows color printing is special, see ``write``. + return self._colormode != 'win32' + def write(self, *args, **opts): '''write args to output @@ -894,13 +898,17 @@ "cmdname.type" is recommended. For example, status issues a label of "status.modified" for modified files. ''' - if self._buffers and not opts.get(r'prompt', False): + if self._buffers: if self._bufferapplylabels: label = opts.get(r'label', '') self._buffers[-1].extend(self.label(a, label) for a in args) else: self._buffers[-1].extend(args) - elif self._colormode == 'win32': + else: + self._writenobuf(*args, **opts) + + def _writenobuf(self, *args, **opts): + if self._colormode == 'win32': # windows color printing is its own can of crab, defer to # the color module and that is it. color.win32print(self, self._write, *args, **opts) @@ -916,8 +924,7 @@ # opencode timeblockedsection because this is a critical path starttime = util.timer() try: - for a in msgs: - self.fout.write(a) + self.fout.write(''.join(msgs)) except IOError as err: raise error.StdioError(err) finally: @@ -1255,7 +1262,7 @@ return i - def _readline(self, prompt=''): + def _readline(self): if self._isatty(self.fin): try: # magically add command line editing support, where @@ -1267,11 +1274,6 @@ except Exception: pass - # call write() so output goes through subclassed implementation - # e.g. color extension on Windows - self.write(prompt, prompt=True) - self.flush() - # prompt ' ' must exist; otherwise readline may delete entire line # - http://bugs.python.org/issue12833 with self.timeblockedsection('stdio'): @@ -1290,8 +1292,10 @@ if not self.interactive(): self.write(msg, ' ', default or '', "\n") return default + self._writenobuf(msg, label='ui.prompt') + self.flush() try: - r = self._readline(self.label(msg, 'ui.prompt')) + r = self._readline() if not r: r = default if self.configbool('ui', 'promptecho'): @@ -1509,11 +1513,7 @@ ''.join(exconly)) else: output = traceback.format_exception(exc[0], exc[1], exc[2]) - data = r''.join(output) - if pycompat.ispy3: - enc = pycompat.sysstr(encoding.encoding) - data = data.encode(enc, errors=r'replace') - self.write_err(data) + self.write_err(encoding.strtolocal(r''.join(output))) return self.tracebackflag or force def geteditor(self): @@ -1621,13 +1621,15 @@ else: curframe = inspect.currentframe() calframe = inspect.getouterframes(curframe, 2) - self.write_err('%s at: %s:%s (%s)\n' - % ((msg,) + calframe[stacklevel][1:4])) - self.log('develwarn', '%s at: %s:%s (%s)\n', - msg, *calframe[stacklevel][1:4]) + fname, lineno, fmsg = calframe[stacklevel][1:4] + fname, fmsg = pycompat.sysbytes(fname), pycompat.sysbytes(fmsg) + self.write_err('%s at: %s:%d (%s)\n' + % (msg, fname, lineno, fmsg)) + self.log('develwarn', '%s at: %s:%d (%s)\n', + msg, fname, lineno, fmsg) curframe = calframe = None # avoid cycles - def deprecwarn(self, msg, version): + def deprecwarn(self, msg, version, stacklevel=2): """issue a deprecation warning - msg: message explaining what is deprecated and how to upgrade, @@ -1638,7 +1640,7 @@ return msg += ("\n(compatibility will be dropped after Mercurial-%s," " update your code.)") % version - self.develwarn(msg, stacklevel=2, config='deprec-warn') + self.develwarn(msg, stacklevel=stacklevel, config='deprec-warn') def exportableenviron(self): """The environment variables that are safe to export, e.g. through
--- a/mercurial/upgrade.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/upgrade.py Sun Mar 04 10:42:51 2018 -0500 @@ -46,7 +46,6 @@ return { # The upgrade code does not yet support these experimental features. # This is an artificial limitation. - 'manifestv2', 'treemanifest', # This was a precursor to generaldelta and was never enabled by default. # It should (hopefully) not exist in the wild.
--- a/mercurial/url.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/url.py Sun Mar 04 10:42:51 2018 -0500 @@ -67,15 +67,15 @@ user, passwd = auth.get('username'), auth.get('password') self.ui.debug("using auth.%s.* for authentication\n" % group) if not user or not passwd: - u = util.url(authuri) + u = util.url(pycompat.bytesurl(authuri)) u.query = None if not self.ui.interactive(): raise error.Abort(_('http authorization required for %s') % - util.hidepassword(str(u))) + util.hidepassword(bytes(u))) self.ui.write(_("http authorization required for %s\n") % - util.hidepassword(str(u))) - self.ui.write(_("realm: %s\n") % realm) + util.hidepassword(bytes(u))) + self.ui.write(_("realm: %s\n") % pycompat.bytesurl(realm)) if user: self.ui.write(_("user: %s\n") % user) else: @@ -124,10 +124,9 @@ else: self.no_list = no_list - proxyurl = str(proxy) + proxyurl = bytes(proxy) proxies = {'http': proxyurl, 'https': proxyurl} - ui.debug('proxying through http://%s:%s\n' % - (proxy.host, proxy.port)) + ui.debug('proxying through %s\n' % util.hidepassword(proxyurl)) else: proxies = {} @@ -425,8 +424,8 @@ user, pw = self.passwd.find_user_password( realm, urllibcompat.getfullurl(req)) if pw is not None: - raw = "%s:%s" % (user, pw) - auth = 'Basic %s' % base64.b64encode(raw).strip() + raw = "%s:%s" % (pycompat.bytesurl(user), pycompat.bytesurl(pw)) + auth = r'Basic %s' % pycompat.strurl(base64.b64encode(raw).strip()) if req.get_header(self.auth_header, None) == auth: return None self.auth = auth @@ -450,7 +449,7 @@ self.cookiejar = cookiejar except util.cookielib.LoadError as e: ui.warn(_('(error loading cookie file %s: %s; continuing without ' - 'cookies)\n') % (cookiefile, str(e))) + 'cookies)\n') % (cookiefile, util.forcebytestr(e))) def http_request(self, request): if self.cookiejar: @@ -471,17 +470,9 @@ construct an opener suitable for urllib2 authinfo will be added to the password manager ''' - # experimental config: ui.usehttp2 - if ui.configbool('ui', 'usehttp2'): - handlers = [ - httpconnectionmod.http2handler( - ui, - passwordmgr(ui, ui.httppasswordmgrdb)) - ] - else: - handlers = [httphandler()] - if has_https: - handlers.append(httpshandler(ui)) + handlers = [httphandler()] + if has_https: + handlers.append(httpshandler(ui)) handlers.append(proxyhandler(ui)) @@ -537,4 +528,4 @@ path = util.normpath(os.path.abspath(url_)) url_ = 'file://' + urlreq.pathname2url(path) authinfo = None - return opener(ui, authinfo).open(url_, data) + return opener(ui, authinfo).open(pycompat.strurl(url_), data)
--- a/mercurial/urllibcompat.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/urllibcompat.py Sun Mar 04 10:42:51 2018 -0500 @@ -47,6 +47,7 @@ "urlparse", "urlunparse", )) + urlreq._registeralias(urllib.parse, "parse_qs", "parseqs") urlreq._registeralias(urllib.parse, "unquote_to_bytes", "unquote") import urllib.request urlreq._registeraliases(urllib.request, ( @@ -157,6 +158,7 @@ "urlparse", "urlunparse", )) + urlreq._registeralias(urlparse, "parse_qs", "parseqs") urlerr._registeraliases(urllib2, ( "HTTPError", "URLError",
--- a/mercurial/util.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/util.py Sun Mar 04 10:42:51 2018 -0500 @@ -17,15 +17,14 @@ import abc import bz2 -import calendar import codecs import collections import contextlib -import datetime import errno import gc import hashlib import imp +import io import itertools import mmap import os @@ -54,6 +53,7 @@ pycompat, urllibcompat, ) +from .utils import dateutil base85 = policy.importmod(r'base85') osutil = policy.importmod(r'osutil') @@ -147,6 +147,7 @@ setflags = platform.setflags setsignalhandler = platform.setsignalhandler shellquote = platform.shellquote +shellsplit = platform.shellsplit spawndetached = platform.spawndetached split = platform.split sshargs = platform.sshargs @@ -183,6 +184,39 @@ def safehasattr(thing, attr): return getattr(thing, attr, _notset) is not _notset +def _rapply(f, xs): + if xs is None: + # assume None means non-value of optional data + return xs + if isinstance(xs, (list, set, tuple)): + return type(xs)(_rapply(f, x) for x in xs) + if isinstance(xs, dict): + return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items()) + return f(xs) + +def rapply(f, xs): + """Apply function recursively to every item preserving the data structure + + >>> def f(x): + ... return 'f(%s)' % x + >>> rapply(f, None) is None + True + >>> rapply(f, 'a') + 'f(a)' + >>> rapply(f, {'a'}) == {'f(a)'} + True + >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []]) + ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []] + + >>> xs = [object()] + >>> rapply(pycompat.identity, xs) is xs + True + """ + if f is pycompat.identity: + # fast path mainly for py2 + return xs + return _rapply(f, xs) + def bytesinput(fin, fout, *args, **kwargs): sin, sout = sys.stdin, sys.stdout try: @@ -211,6 +245,10 @@ warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial') warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext') warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd') +if _dowarn and pycompat.ispy3: + # silence warning emitted by passing user string to re.sub() + warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning, + r'mercurial') def nouideprecwarn(msg, version, stacklevel=1): """Issue an python native deprecation warning @@ -220,7 +258,7 @@ if _dowarn: msg += ("\n(compatibility will be dropped after Mercurial-%s," " update your code.)") % version - warnings.warn(msg, DeprecationWarning, stacklevel + 1) + warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1) DIGESTS = { 'md5': hashlib.md5, @@ -338,6 +376,13 @@ This class lives in the 'util' module because it makes use of the 'os' module from the python stdlib. """ + def __new__(cls, fh): + # If we receive a fileobjectproxy, we need to use a variation of this + # class that notifies observers about activity. + if isinstance(fh, fileobjectproxy): + cls = observedbufferedinputpipe + + return super(bufferedinputpipe, cls).__new__(cls) def __init__(self, input): self._input = input @@ -418,6 +463,8 @@ self._lenbuf += len(data) self._buffer.append(data) + return data + def mmapread(fp): try: fd = getattr(fp, 'fileno', lambda: fp)() @@ -453,6 +500,300 @@ env=env) return p.stdin, p.stdout, p.stderr, p +class fileobjectproxy(object): + """A proxy around file objects that tells a watcher when events occur. + + This type is intended to only be used for testing purposes. Think hard + before using it in important code. + """ + __slots__ = ( + r'_orig', + r'_observer', + ) + + def __init__(self, fh, observer): + object.__setattr__(self, r'_orig', fh) + object.__setattr__(self, r'_observer', observer) + + def __getattribute__(self, name): + ours = { + r'_observer', + + # IOBase + r'close', + # closed if a property + r'fileno', + r'flush', + r'isatty', + r'readable', + r'readline', + r'readlines', + r'seek', + r'seekable', + r'tell', + r'truncate', + r'writable', + r'writelines', + # RawIOBase + r'read', + r'readall', + r'readinto', + r'write', + # BufferedIOBase + # raw is a property + r'detach', + # read defined above + r'read1', + # readinto defined above + # write defined above + } + + # We only observe some methods. + if name in ours: + return object.__getattribute__(self, name) + + return getattr(object.__getattribute__(self, r'_orig'), name) + + def __delattr__(self, name): + return delattr(object.__getattribute__(self, r'_orig'), name) + + def __setattr__(self, name, value): + return setattr(object.__getattribute__(self, r'_orig'), name, value) + + def __iter__(self): + return object.__getattribute__(self, r'_orig').__iter__() + + def _observedcall(self, name, *args, **kwargs): + # Call the original object. + orig = object.__getattribute__(self, r'_orig') + res = getattr(orig, name)(*args, **kwargs) + + # Call a method on the observer of the same name with arguments + # so it can react, log, etc. + observer = object.__getattribute__(self, r'_observer') + fn = getattr(observer, name, None) + if fn: + fn(res, *args, **kwargs) + + return res + + def close(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'close', *args, **kwargs) + + def fileno(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'fileno', *args, **kwargs) + + def flush(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'flush', *args, **kwargs) + + def isatty(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'isatty', *args, **kwargs) + + def readable(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'readable', *args, **kwargs) + + def readline(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'readline', *args, **kwargs) + + def readlines(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'readlines', *args, **kwargs) + + def seek(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'seek', *args, **kwargs) + + def seekable(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'seekable', *args, **kwargs) + + def tell(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'tell', *args, **kwargs) + + def truncate(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'truncate', *args, **kwargs) + + def writable(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'writable', *args, **kwargs) + + def writelines(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'writelines', *args, **kwargs) + + def read(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'read', *args, **kwargs) + + def readall(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'readall', *args, **kwargs) + + def readinto(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'readinto', *args, **kwargs) + + def write(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'write', *args, **kwargs) + + def detach(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'detach', *args, **kwargs) + + def read1(self, *args, **kwargs): + return object.__getattribute__(self, r'_observedcall')( + r'read1', *args, **kwargs) + +class observedbufferedinputpipe(bufferedinputpipe): + """A variation of bufferedinputpipe that is aware of fileobjectproxy. + + ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that + bypass ``fileobjectproxy``. Because of this, we need to make + ``bufferedinputpipe`` aware of these operations. + + This variation of ``bufferedinputpipe`` can notify observers about + ``os.read()`` events. It also re-publishes other events, such as + ``read()`` and ``readline()``. + """ + def _fillbuffer(self): + res = super(observedbufferedinputpipe, self)._fillbuffer() + + fn = getattr(self._input._observer, r'osread', None) + if fn: + fn(res, _chunksize) + + return res + + # We use different observer methods because the operation isn't + # performed on the actual file object but on us. + def read(self, size): + res = super(observedbufferedinputpipe, self).read(size) + + fn = getattr(self._input._observer, r'bufferedread', None) + if fn: + fn(res, size) + + return res + + def readline(self, *args, **kwargs): + res = super(observedbufferedinputpipe, self).readline(*args, **kwargs) + + fn = getattr(self._input._observer, r'bufferedreadline', None) + if fn: + fn(res) + + return res + +DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)} +DATA_ESCAPE_MAP.update({ + b'\\': b'\\\\', + b'\r': br'\r', + b'\n': br'\n', +}) +DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]') + +def escapedata(s): + if isinstance(s, bytearray): + s = bytes(s) + + return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s) + +class fileobjectobserver(object): + """Logs file object activity.""" + def __init__(self, fh, name, reads=True, writes=True, logdata=False): + self.fh = fh + self.name = name + self.logdata = logdata + self.reads = reads + self.writes = writes + + def _writedata(self, data): + if not self.logdata: + self.fh.write('\n') + return + + # Simple case writes all data on a single line. + if b'\n' not in data: + self.fh.write(': %s\n' % escapedata(data)) + return + + # Data with newlines is written to multiple lines. + self.fh.write(':\n') + lines = data.splitlines(True) + for line in lines: + self.fh.write('%s> %s\n' % (self.name, escapedata(line))) + + def read(self, res, size=-1): + if not self.reads: + return + # Python 3 can return None from reads at EOF instead of empty strings. + if res is None: + res = '' + + self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res))) + self._writedata(res) + + def readline(self, res, limit=-1): + if not self.reads: + return + + self.fh.write('%s> readline() -> %d' % (self.name, len(res))) + self._writedata(res) + + def readinto(self, res, dest): + if not self.reads: + return + + self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest), + res)) + data = dest[0:res] if res is not None else b'' + self._writedata(data) + + def write(self, res, data): + if not self.writes: + return + + # Python 2 returns None from some write() calls. Python 3 (reasonably) + # returns the integer bytes written. + if res is None and data: + res = len(data) + + self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res)) + self._writedata(data) + + def flush(self, res): + if not self.writes: + return + + self.fh.write('%s> flush() -> %r\n' % (self.name, res)) + + # For observedbufferedinputpipe. + def bufferedread(self, res, size): + self.fh.write('%s> bufferedread(%d) -> %d' % ( + self.name, size, len(res))) + self._writedata(res) + + def bufferedreadline(self, res): + self.fh.write('%s> bufferedreadline() -> %d' % (self.name, len(res))) + self._writedata(res) + +def makeloggingfileobject(logh, fh, name, reads=True, writes=True, + logdata=False): + """Turn a file object into a logging file object.""" + + observer = fileobjectobserver(logh, name, reads=reads, writes=writes, + logdata=logdata) + return fileobjectproxy(fh, observer) + def version(): """Return version information if available.""" try: @@ -530,48 +871,6 @@ if n == 4: return (vints[0], vints[1], vints[2], extra) -# used by parsedate -defaultdateformats = ( - '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601 - '%Y-%m-%dT%H:%M', # without seconds - '%Y-%m-%dT%H%M%S', # another awful but legal variant without : - '%Y-%m-%dT%H%M', # without seconds - '%Y-%m-%d %H:%M:%S', # our common legal variant - '%Y-%m-%d %H:%M', # without seconds - '%Y-%m-%d %H%M%S', # without : - '%Y-%m-%d %H%M', # without seconds - '%Y-%m-%d %I:%M:%S%p', - '%Y-%m-%d %H:%M', - '%Y-%m-%d %I:%M%p', - '%Y-%m-%d', - '%m-%d', - '%m/%d', - '%m/%d/%y', - '%m/%d/%Y', - '%a %b %d %H:%M:%S %Y', - '%a %b %d %I:%M:%S%p %Y', - '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822" - '%b %d %H:%M:%S %Y', - '%b %d %I:%M:%S%p %Y', - '%b %d %H:%M:%S', - '%b %d %I:%M:%S%p', - '%b %d %H:%M', - '%b %d %I:%M%p', - '%b %d %Y', - '%b %d', - '%H:%M:%S', - '%I:%M:%S%p', - '%H:%M', - '%I:%M%p', -) - -extendeddateformats = defaultdateformats + ( - "%Y", - "%Y-%m", - "%b", - "%b %Y", - ) - def cachefunc(func): '''cache the result of function calls''' # XXX doesn't handle keywords args @@ -1144,7 +1443,10 @@ def _isstdout(f): fileno = getattr(f, 'fileno', None) - return fileno and fileno() == sys.__stdout__.fileno() + try: + return fileno and fileno() == sys.__stdout__.fileno() + except io.UnsupportedOperation: + return False # fileno() raised UnsupportedOperation def shellenviron(environ=None): """return environ with optional override, useful for shelling out""" @@ -1154,7 +1456,7 @@ return '0' if val is True: return '1' - return str(val) + return pycompat.bytestr(val) env = dict(encoding.environ) if environ: env.update((k, py2shell(v)) for k, v in environ.iteritems()) @@ -1947,274 +2249,34 @@ limit -= len(s) yield s -def makedate(timestamp=None): - '''Return a unix timestamp (or the current time) as a (unixtime, - offset) tuple based off the local timezone.''' - if timestamp is None: - timestamp = time.time() - if timestamp < 0: - hint = _("check your clock") - raise Abort(_("negative timestamp: %d") % timestamp, hint=hint) - delta = (datetime.datetime.utcfromtimestamp(timestamp) - - datetime.datetime.fromtimestamp(timestamp)) - tz = delta.days * 86400 + delta.seconds - return timestamp, tz - -def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'): - """represent a (unixtime, offset) tuple as a localized time. - unixtime is seconds since the epoch, and offset is the time zone's - number of seconds away from UTC. - - >>> datestr((0, 0)) - 'Thu Jan 01 00:00:00 1970 +0000' - >>> datestr((42, 0)) - 'Thu Jan 01 00:00:42 1970 +0000' - >>> datestr((-42, 0)) - 'Wed Dec 31 23:59:18 1969 +0000' - >>> datestr((0x7fffffff, 0)) - 'Tue Jan 19 03:14:07 2038 +0000' - >>> datestr((-0x80000000, 0)) - 'Fri Dec 13 20:45:52 1901 +0000' - """ - t, tz = date or makedate() - if "%1" in format or "%2" in format or "%z" in format: - sign = (tz > 0) and "-" or "+" - minutes = abs(tz) // 60 - q, r = divmod(minutes, 60) - format = format.replace("%z", "%1%2") - format = format.replace("%1", "%c%02d" % (sign, q)) - format = format.replace("%2", "%02d" % r) - d = t - tz - if d > 0x7fffffff: - d = 0x7fffffff - elif d < -0x80000000: - d = -0x80000000 - # Never use time.gmtime() and datetime.datetime.fromtimestamp() - # because they use the gmtime() system call which is buggy on Windows - # for negative values. - t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d) - s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format))) - return s - -def shortdate(date=None): - """turn (timestamp, tzoff) tuple into iso 8631 date.""" - return datestr(date, format='%Y-%m-%d') - -def parsetimezone(s): - """find a trailing timezone, if any, in string, and return a - (offset, remainder) pair""" - - if s.endswith("GMT") or s.endswith("UTC"): - return 0, s[:-3].rstrip() - - # Unix-style timezones [+-]hhmm - if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit(): - sign = (s[-5] == "+") and 1 or -1 - hours = int(s[-4:-2]) - minutes = int(s[-2:]) - return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip() - - # ISO8601 trailing Z - if s.endswith("Z") and s[-2:-1].isdigit(): - return 0, s[:-1] - - # ISO8601-style [+-]hh:mm - if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and - s[-5:-3].isdigit() and s[-2:].isdigit()): - sign = (s[-6] == "+") and 1 or -1 - hours = int(s[-5:-3]) - minutes = int(s[-2:]) - return -sign * (hours * 60 + minutes) * 60, s[:-6] - - return None, s - -def strdate(string, format, defaults=None): - """parse a localized time string and return a (unixtime, offset) tuple. - if the string cannot be parsed, ValueError is raised.""" - if defaults is None: - defaults = {} - - # NOTE: unixtime = localunixtime + offset - offset, date = parsetimezone(string) - - # add missing elements from defaults - usenow = False # default to using biased defaults - for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity - part = pycompat.bytestr(part) - found = [True for p in part if ("%"+p) in format] - if not found: - date += "@" + defaults[part][usenow] - format += "@%" + part[0] - else: - # We've found a specific time element, less specific time - # elements are relative to today - usenow = True - - timetuple = time.strptime(encoding.strfromlocal(date), - encoding.strfromlocal(format)) - localunixtime = int(calendar.timegm(timetuple)) - if offset is None: - # local timezone - unixtime = int(time.mktime(timetuple)) - offset = unixtime - localunixtime - else: - unixtime = localunixtime + offset - return unixtime, offset - -def parsedate(date, formats=None, bias=None): - """parse a localized date/time and return a (unixtime, offset) tuple. - - The date may be a "unixtime offset" string or in one of the specified - formats. If the date already is a (unixtime, offset) tuple, it is returned. - - >>> parsedate(b' today ') == parsedate( - ... datetime.date.today().strftime('%b %d').encode('ascii')) - True - >>> parsedate(b'yesterday ') == parsedate( - ... (datetime.date.today() - datetime.timedelta(days=1) - ... ).strftime('%b %d').encode('ascii')) - True - >>> now, tz = makedate() - >>> strnow, strtz = parsedate(b'now') - >>> (strnow - now) < 1 - True - >>> tz == strtz - True +class cappedreader(object): + """A file object proxy that allows reading up to N bytes. + + Given a source file object, instances of this type allow reading up to + N bytes from that source file object. Attempts to read past the allowed + limit are treated as EOF. + + It is assumed that I/O is not performed on the original file object + in addition to I/O that is performed by this instance. If there is, + state tracking will get out of sync and unexpected results will ensue. """ - if bias is None: - bias = {} - if not date: - return 0, 0 - if isinstance(date, tuple) and len(date) == 2: - return date - if not formats: - formats = defaultdateformats - date = date.strip() - - if date == 'now' or date == _('now'): - return makedate() - if date == 'today' or date == _('today'): - date = datetime.date.today().strftime(r'%b %d') - date = encoding.strtolocal(date) - elif date == 'yesterday' or date == _('yesterday'): - date = (datetime.date.today() - - datetime.timedelta(days=1)).strftime(r'%b %d') - date = encoding.strtolocal(date) - - try: - when, offset = map(int, date.split(' ')) - except ValueError: - # fill out defaults - now = makedate() - defaults = {} - for part in ("d", "mb", "yY", "HI", "M", "S"): - # this piece is for rounding the specific end of unknowns - b = bias.get(part) - if b is None: - if part[0:1] in "HMS": - b = "00" - else: - b = "0" - - # this piece is for matching the generic end to today's date - n = datestr(now, "%" + part[0:1]) - - defaults[part] = (b, n) - - for format in formats: - try: - when, offset = strdate(date, format, defaults) - except (ValueError, OverflowError): - pass - else: - break - else: - raise error.ParseError(_('invalid date: %r') % date) - # validate explicit (probably user-specified) date and - # time zone offset. values must fit in signed 32 bits for - # current 32-bit linux runtimes. timezones go from UTC-12 - # to UTC+14 - if when < -0x80000000 or when > 0x7fffffff: - raise error.ParseError(_('date exceeds 32 bits: %d') % when) - if offset < -50400 or offset > 43200: - raise error.ParseError(_('impossible time zone offset: %d') % offset) - return when, offset - -def matchdate(date): - """Return a function that matches a given date match specifier - - Formats include: - - '{date}' match a given date to the accuracy provided - - '<{date}' on or before a given date - - '>{date}' on or after a given date - - >>> p1 = parsedate(b"10:29:59") - >>> p2 = parsedate(b"10:30:00") - >>> p3 = parsedate(b"10:30:59") - >>> p4 = parsedate(b"10:31:00") - >>> p5 = parsedate(b"Sep 15 10:30:00 1999") - >>> f = matchdate(b"10:30") - >>> f(p1[0]) - False - >>> f(p2[0]) - True - >>> f(p3[0]) - True - >>> f(p4[0]) - False - >>> f(p5[0]) - False - """ - - def lower(date): - d = {'mb': "1", 'd': "1"} - return parsedate(date, extendeddateformats, d)[0] - - def upper(date): - d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"} - for days in ("31", "30", "29"): - try: - d["d"] = days - return parsedate(date, extendeddateformats, d)[0] - except error.ParseError: - pass - d["d"] = "28" - return parsedate(date, extendeddateformats, d)[0] - - date = date.strip() - - if not date: - raise Abort(_("dates cannot consist entirely of whitespace")) - elif date[0] == "<": - if not date[1:]: - raise Abort(_("invalid day spec, use '<DATE'")) - when = upper(date[1:]) - return lambda x: x <= when - elif date[0] == ">": - if not date[1:]: - raise Abort(_("invalid day spec, use '>DATE'")) - when = lower(date[1:]) - return lambda x: x >= when - elif date[0] == "-": - try: - days = int(date[1:]) - except ValueError: - raise Abort(_("invalid day spec: %s") % date[1:]) - if days < 0: - raise Abort(_("%s must be nonnegative (see 'hg help dates')") - % date[1:]) - when = makedate()[0] - days * 3600 * 24 - return lambda x: x >= when - elif " to " in date: - a, b = date.split(" to ") - start, stop = lower(a), upper(b) - return lambda x: x >= start and x <= stop - else: - start, stop = lower(date), upper(date) - return lambda x: x >= start and x <= stop + def __init__(self, fh, limit): + """Allow reading up to <limit> bytes from <fh>.""" + self._fh = fh + self._left = limit + + def read(self, n=-1): + if not self._left: + return b'' + + if n < 0: + n = self._left + + data = self._fh.read(min(n, self._left)) + self._left -= len(data) + assert self._left >= 0 + + return data def stringmatcher(pattern, casesensitive=True): """ @@ -2394,7 +2456,7 @@ def uirepr(s): # Avoid double backslash in Windows path repr() - return repr(s).replace('\\\\', '\\') + return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\') # delay import of textwrap def MBTextWrapper(**kwargs): @@ -2684,7 +2746,7 @@ pass try: - return socket.getservbyname(port) + return socket.getservbyname(pycompat.sysstr(port)) except socket.error: raise Abort(_("no port number associated with service '%s'") % port) @@ -3126,7 +3188,7 @@ results.append(hook(*args)) return results -def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0): +def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0): '''Yields lines for a nicely formatted stacktrace. Skips the 'skip' last entries, then return the last 'depth' entries. Each file+linenumber is formatted according to fileline. @@ -3138,7 +3200,7 @@ Not be used in production code but very convenient while developing. ''' - entries = [(fileline % (fn, ln), func) + entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func)) for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1] ][-depth:] if entries: @@ -3571,7 +3633,7 @@ return zlib.decompress(data) except zlib.error as e: raise error.RevlogError(_('revlog decompress error: %s') % - str(e)) + forcebytestr(e)) def revlogcompressor(self, opts=None): return self.zlibrevlogcompressor() @@ -3797,7 +3859,7 @@ return ''.join(chunks) except Exception as e: raise error.RevlogError(_('revlog decompress error: %s') % - str(e)) + forcebytestr(e)) def revlogcompressor(self, opts=None): opts = opts or {} @@ -3944,3 +4006,54 @@ if not (byte & 0x80): return result shift += 7 + +### +# Deprecation warnings for util.py splitting +### + +defaultdateformats = dateutil.defaultdateformats + +extendeddateformats = dateutil.extendeddateformats + +def makedate(*args, **kwargs): + msg = ("'util.makedate' is deprecated, " + "use 'utils.dateutil.makedate'") + nouideprecwarn(msg, "4.6") + return dateutil.makedate(*args, **kwargs) + +def datestr(*args, **kwargs): + msg = ("'util.datestr' is deprecated, " + "use 'utils.dateutil.datestr'") + nouideprecwarn(msg, "4.6") + debugstacktrace() + return dateutil.datestr(*args, **kwargs) + +def shortdate(*args, **kwargs): + msg = ("'util.shortdate' is deprecated, " + "use 'utils.dateutil.shortdate'") + nouideprecwarn(msg, "4.6") + return dateutil.shortdate(*args, **kwargs) + +def parsetimezone(*args, **kwargs): + msg = ("'util.parsetimezone' is deprecated, " + "use 'utils.dateutil.parsetimezone'") + nouideprecwarn(msg, "4.6") + return dateutil.parsetimezone(*args, **kwargs) + +def strdate(*args, **kwargs): + msg = ("'util.strdate' is deprecated, " + "use 'utils.dateutil.strdate'") + nouideprecwarn(msg, "4.6") + return dateutil.strdate(*args, **kwargs) + +def parsedate(*args, **kwargs): + msg = ("'util.parsedate' is deprecated, " + "use 'utils.dateutil.parsedate'") + nouideprecwarn(msg, "4.6") + return dateutil.parsedate(*args, **kwargs) + +def matchdate(*args, **kwargs): + msg = ("'util.matchdate' is deprecated, " + "use 'utils.dateutil.matchdate'") + nouideprecwarn(msg, "4.6") + return dateutil.matchdate(*args, **kwargs)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/utils/dateutil.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,332 @@ +# util.py - Mercurial utility functions relative to dates +# +# Copyright 2018 Boris Feld <boris.feld@octobus.net> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import, print_function + +import calendar +import datetime +import time + +from ..i18n import _ +from .. import ( + encoding, + error, + pycompat, +) + +# used by parsedate +defaultdateformats = ( + '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601 + '%Y-%m-%dT%H:%M', # without seconds + '%Y-%m-%dT%H%M%S', # another awful but legal variant without : + '%Y-%m-%dT%H%M', # without seconds + '%Y-%m-%d %H:%M:%S', # our common legal variant + '%Y-%m-%d %H:%M', # without seconds + '%Y-%m-%d %H%M%S', # without : + '%Y-%m-%d %H%M', # without seconds + '%Y-%m-%d %I:%M:%S%p', + '%Y-%m-%d %H:%M', + '%Y-%m-%d %I:%M%p', + '%Y-%m-%d', + '%m-%d', + '%m/%d', + '%m/%d/%y', + '%m/%d/%Y', + '%a %b %d %H:%M:%S %Y', + '%a %b %d %I:%M:%S%p %Y', + '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822" + '%b %d %H:%M:%S %Y', + '%b %d %I:%M:%S%p %Y', + '%b %d %H:%M:%S', + '%b %d %I:%M:%S%p', + '%b %d %H:%M', + '%b %d %I:%M%p', + '%b %d %Y', + '%b %d', + '%H:%M:%S', + '%I:%M:%S%p', + '%H:%M', + '%I:%M%p', +) + +extendeddateformats = defaultdateformats + ( + "%Y", + "%Y-%m", + "%b", + "%b %Y", +) + +def makedate(timestamp=None): + '''Return a unix timestamp (or the current time) as a (unixtime, + offset) tuple based off the local timezone.''' + if timestamp is None: + timestamp = time.time() + if timestamp < 0: + hint = _("check your clock") + raise error.Abort(_("negative timestamp: %d") % timestamp, hint=hint) + delta = (datetime.datetime.utcfromtimestamp(timestamp) - + datetime.datetime.fromtimestamp(timestamp)) + tz = delta.days * 86400 + delta.seconds + return timestamp, tz + +def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'): + """represent a (unixtime, offset) tuple as a localized time. + unixtime is seconds since the epoch, and offset is the time zone's + number of seconds away from UTC. + + >>> datestr((0, 0)) + 'Thu Jan 01 00:00:00 1970 +0000' + >>> datestr((42, 0)) + 'Thu Jan 01 00:00:42 1970 +0000' + >>> datestr((-42, 0)) + 'Wed Dec 31 23:59:18 1969 +0000' + >>> datestr((0x7fffffff, 0)) + 'Tue Jan 19 03:14:07 2038 +0000' + >>> datestr((-0x80000000, 0)) + 'Fri Dec 13 20:45:52 1901 +0000' + """ + t, tz = date or makedate() + if "%1" in format or "%2" in format or "%z" in format: + sign = (tz > 0) and "-" or "+" + minutes = abs(tz) // 60 + q, r = divmod(minutes, 60) + format = format.replace("%z", "%1%2") + format = format.replace("%1", "%c%02d" % (sign, q)) + format = format.replace("%2", "%02d" % r) + d = t - tz + if d > 0x7fffffff: + d = 0x7fffffff + elif d < -0x80000000: + d = -0x80000000 + # Never use time.gmtime() and datetime.datetime.fromtimestamp() + # because they use the gmtime() system call which is buggy on Windows + # for negative values. + t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d) + s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format))) + return s + +def shortdate(date=None): + """turn (timestamp, tzoff) tuple into iso 8631 date.""" + return datestr(date, format='%Y-%m-%d') + +def parsetimezone(s): + """find a trailing timezone, if any, in string, and return a + (offset, remainder) pair""" + s = pycompat.bytestr(s) + + if s.endswith("GMT") or s.endswith("UTC"): + return 0, s[:-3].rstrip() + + # Unix-style timezones [+-]hhmm + if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit(): + sign = (s[-5] == "+") and 1 or -1 + hours = int(s[-4:-2]) + minutes = int(s[-2:]) + return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip() + + # ISO8601 trailing Z + if s.endswith("Z") and s[-2:-1].isdigit(): + return 0, s[:-1] + + # ISO8601-style [+-]hh:mm + if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and + s[-5:-3].isdigit() and s[-2:].isdigit()): + sign = (s[-6] == "+") and 1 or -1 + hours = int(s[-5:-3]) + minutes = int(s[-2:]) + return -sign * (hours * 60 + minutes) * 60, s[:-6] + + return None, s + +def strdate(string, format, defaults=None): + """parse a localized time string and return a (unixtime, offset) tuple. + if the string cannot be parsed, ValueError is raised.""" + if defaults is None: + defaults = {} + + # NOTE: unixtime = localunixtime + offset + offset, date = parsetimezone(string) + + # add missing elements from defaults + usenow = False # default to using biased defaults + for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity + part = pycompat.bytestr(part) + found = [True for p in part if ("%"+p) in format] + if not found: + date += "@" + defaults[part][usenow] + format += "@%" + part[0] + else: + # We've found a specific time element, less specific time + # elements are relative to today + usenow = True + + timetuple = time.strptime(encoding.strfromlocal(date), + encoding.strfromlocal(format)) + localunixtime = int(calendar.timegm(timetuple)) + if offset is None: + # local timezone + unixtime = int(time.mktime(timetuple)) + offset = unixtime - localunixtime + else: + unixtime = localunixtime + offset + return unixtime, offset + +def parsedate(date, formats=None, bias=None): + """parse a localized date/time and return a (unixtime, offset) tuple. + + The date may be a "unixtime offset" string or in one of the specified + formats. If the date already is a (unixtime, offset) tuple, it is returned. + + >>> parsedate(b' today ') == parsedate( + ... datetime.date.today().strftime('%b %d').encode('ascii')) + True + >>> parsedate(b'yesterday ') == parsedate( + ... (datetime.date.today() - datetime.timedelta(days=1) + ... ).strftime('%b %d').encode('ascii')) + True + >>> now, tz = makedate() + >>> strnow, strtz = parsedate(b'now') + >>> (strnow - now) < 1 + True + >>> tz == strtz + True + """ + if bias is None: + bias = {} + if not date: + return 0, 0 + if isinstance(date, tuple) and len(date) == 2: + return date + if not formats: + formats = defaultdateformats + date = date.strip() + + if date == 'now' or date == _('now'): + return makedate() + if date == 'today' or date == _('today'): + date = datetime.date.today().strftime(r'%b %d') + date = encoding.strtolocal(date) + elif date == 'yesterday' or date == _('yesterday'): + date = (datetime.date.today() - + datetime.timedelta(days=1)).strftime(r'%b %d') + date = encoding.strtolocal(date) + + try: + when, offset = map(int, date.split(' ')) + except ValueError: + # fill out defaults + now = makedate() + defaults = {} + for part in ("d", "mb", "yY", "HI", "M", "S"): + # this piece is for rounding the specific end of unknowns + b = bias.get(part) + if b is None: + if part[0:1] in "HMS": + b = "00" + else: + b = "0" + + # this piece is for matching the generic end to today's date + n = datestr(now, "%" + part[0:1]) + + defaults[part] = (b, n) + + for format in formats: + try: + when, offset = strdate(date, format, defaults) + except (ValueError, OverflowError): + pass + else: + break + else: + raise error.ParseError( + _('invalid date: %r') % pycompat.bytestr(date)) + # validate explicit (probably user-specified) date and + # time zone offset. values must fit in signed 32 bits for + # current 32-bit linux runtimes. timezones go from UTC-12 + # to UTC+14 + if when < -0x80000000 or when > 0x7fffffff: + raise error.ParseError(_('date exceeds 32 bits: %d') % when) + if offset < -50400 or offset > 43200: + raise error.ParseError(_('impossible time zone offset: %d') % offset) + return when, offset + +def matchdate(date): + """Return a function that matches a given date match specifier + + Formats include: + + '{date}' match a given date to the accuracy provided + + '<{date}' on or before a given date + + '>{date}' on or after a given date + + >>> p1 = parsedate(b"10:29:59") + >>> p2 = parsedate(b"10:30:00") + >>> p3 = parsedate(b"10:30:59") + >>> p4 = parsedate(b"10:31:00") + >>> p5 = parsedate(b"Sep 15 10:30:00 1999") + >>> f = matchdate(b"10:30") + >>> f(p1[0]) + False + >>> f(p2[0]) + True + >>> f(p3[0]) + True + >>> f(p4[0]) + False + >>> f(p5[0]) + False + """ + + def lower(date): + d = {'mb': "1", 'd': "1"} + return parsedate(date, extendeddateformats, d)[0] + + def upper(date): + d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"} + for days in ("31", "30", "29"): + try: + d["d"] = days + return parsedate(date, extendeddateformats, d)[0] + except error.ParseError: + pass + d["d"] = "28" + return parsedate(date, extendeddateformats, d)[0] + + date = date.strip() + + if not date: + raise error.Abort(_("dates cannot consist entirely of whitespace")) + elif date[0] == "<": + if not date[1:]: + raise error.Abort(_("invalid day spec, use '<DATE'")) + when = upper(date[1:]) + return lambda x: x <= when + elif date[0] == ">": + if not date[1:]: + raise error.Abort(_("invalid day spec, use '>DATE'")) + when = lower(date[1:]) + return lambda x: x >= when + elif date[0] == "-": + try: + days = int(date[1:]) + except ValueError: + raise error.Abort(_("invalid day spec: %s") % date[1:]) + if days < 0: + raise error.Abort(_("%s must be nonnegative (see 'hg help dates')") + % date[1:]) + when = makedate()[0] - days * 3600 * 24 + return lambda x: x >= when + elif " to " in date: + a, b = date.split(" to ") + start, stop = lower(a), upper(b) + return lambda x: x >= start and x <= stop + else: + start, stop = lower(date), upper(date) + return lambda x: x >= start and x <= stop
--- a/mercurial/verify.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/verify.py Sun Mar 04 10:42:51 2018 -0500 @@ -60,6 +60,7 @@ def err(self, linkrev, msg, filename=None): if linkrev is not None: self.badrevs.add(linkrev) + linkrev = "%d" % linkrev else: linkrev = '?' msg = "%s: %s" % (linkrev, msg) @@ -69,9 +70,10 @@ self.errors += 1 def exc(self, linkrev, msg, inst, filename=None): - if not str(inst): - inst = repr(inst) - self.err(linkrev, "%s: %s" % (msg, inst), filename) + fmsg = pycompat.bytestr(inst) + if not fmsg: + fmsg = pycompat.byterepr(inst) + self.err(linkrev, "%s: %s" % (msg, fmsg), filename) def checklog(self, obj, name, linkrev): if not len(obj) and (self.havecl or self.havemf): @@ -455,12 +457,7 @@ if rp: if lr is not None and ui.verbose: ctx = lrugetctx(lr) - found = False - for pctx in ctx.parents(): - if rp[0] in pctx: - found = True - break - if not found: + if not any(rp[0] in pctx for pctx in ctx.parents()): self.warn(_("warning: copy source of '%s' not" " in parents of %s") % (f, ctx)) fl2 = repo.file(rp[0])
--- a/mercurial/windows.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/windows.py Sun Mar 04 10:42:51 2018 -0500 @@ -296,6 +296,15 @@ return s return '"%s"' % _quotere.sub(r'\1\1\\\2', s) +def _unquote(s): + if s.startswith(b'"') and s.endswith(b'"'): + return s[1:-1] + return s + +def shellsplit(s): + """Parse a command string in cmd.exe way (best-effort)""" + return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) + def quotecommand(cmd): """Build a command string suitable for os.popen* calls.""" if sys.version_info < (2, 7, 1): @@ -307,7 +316,7 @@ # Work around "popen spawned process may not write to stdout # under windows" # http://bugs.python.org/issue1366 - command += " 2> %s" % os.devnull + command += " 2> %s" % pycompat.bytestr(os.devnull) return os.popen(quotecommand(command), mode) def explainexit(code):
--- a/mercurial/wireproto.py Sat Mar 03 22:29:24 2018 -0500 +++ b/mercurial/wireproto.py Sun Mar 04 10:42:51 2018 -0500 @@ -31,56 +31,24 @@ repository, streamclone, util, + wireprototypes, ) urlerr = util.urlerr urlreq = util.urlreq +bytesresponse = wireprototypes.bytesresponse +ooberror = wireprototypes.ooberror +pushres = wireprototypes.pushres +pusherr = wireprototypes.pusherr +streamres = wireprototypes.streamres +streamres_legacy = wireprototypes.streamreslegacy + bundle2requiredmain = _('incompatible Mercurial client; bundle2 required') bundle2requiredhint = _('see https://www.mercurial-scm.org/wiki/' 'IncompatibleClient') bundle2required = '%s\n(%s)\n' % (bundle2requiredmain, bundle2requiredhint) -class abstractserverproto(object): - """abstract class that summarizes the protocol API - - Used as reference and documentation. - """ - - def getargs(self, args): - """return the value for arguments in <args> - - returns a list of values (same order as <args>)""" - raise NotImplementedError() - - def getfile(self, fp): - """write the whole content of a file into a file like object - - The file is in the form:: - - (<chunk-size>\n<chunk>)+0\n - - chunk size is the ascii version of the int. - """ - raise NotImplementedError() - - def redirect(self): - """may setup interception for stdout and stderr - - See also the `restore` method.""" - raise NotImplementedError() - - # If the `redirect` function does install interception, the `restore` - # function MUST be defined. If interception is not used, this function - # MUST NOT be defined. - # - # left commented here on purpose - # - #def restore(self): - # """reinstall previous stdout and stderr and return intercepted stdout - # """ - # raise NotImplementedError() - class remoteiterbatcher(peer.iterbatcher): def __init__(self, remote): super(remoteiterbatcher, self).__init__() @@ -517,58 +485,6 @@ # server side # wire protocol command can either return a string or one of these classes. -class streamres(object): - """wireproto reply: binary stream - - The call was successful and the result is a stream. - - Accepts a generator containing chunks of data to be sent to the client. - - ``prefer_uncompressed`` indicates that the data is expected to be - uncompressable and that the stream should therefore use the ``none`` - engine. - """ - def __init__(self, gen=None, prefer_uncompressed=False): - self.gen = gen - self.prefer_uncompressed = prefer_uncompressed - -class streamres_legacy(object): - """wireproto reply: uncompressed binary stream - - The call was successful and the result is a stream. - - Accepts a generator containing chunks of data to be sent to the client. - - Like ``streamres``, but sends an uncompressed data for "version 1" clients - using the application/mercurial-0.1 media type. - """ - def __init__(self, gen=None): - self.gen = gen - -class pushres(object): - """wireproto reply: success with simple integer return - - The call was successful and returned an integer contained in `self.res`. - """ - def __init__(self, res): - self.res = res - -class pusherr(object): - """wireproto reply: failure - - The call failed. The `self.res` attribute contains the error message. - """ - def __init__(self, res): - self.res = res - -class ooberror(object): - """wireproto reply: failure of a batch of operation - - Something failed during a batch call. The error message is stored in - `self.message`. - """ - def __init__(self, message): - self.message = message def getdispatchrepo(repo, proto, command): """Obtain the repo used for processing wire protocol commands. @@ -625,7 +541,7 @@ return ui.configbool('server', 'bundle1') -def supportedcompengines(ui, proto, role): +def supportedcompengines(ui, role): """Obtain the list of supported compression engines for a request.""" assert role in (util.CLIENTROLE, util.SERVERROLE) @@ -674,13 +590,114 @@ return compengines -# list of commands -commands = {} +class commandentry(object): + """Represents a declared wire protocol command.""" + def __init__(self, func, args='', transports=None): + self.func = func + self.args = args + self.transports = transports or set() + + def _merge(self, func, args): + """Merge this instance with an incoming 2-tuple. + + This is called when a caller using the old 2-tuple API attempts + to replace an instance. The incoming values are merged with + data not captured by the 2-tuple and a new instance containing + the union of the two objects is returned. + """ + return commandentry(func, args=args, transports=set(self.transports)) + + # Old code treats instances as 2-tuples. So expose that interface. + def __iter__(self): + yield self.func + yield self.args + + def __getitem__(self, i): + if i == 0: + return self.func + elif i == 1: + return self.args + else: + raise IndexError('can only access elements 0 and 1') + +class commanddict(dict): + """Container for registered wire protocol commands. + + It behaves like a dict. But __setitem__ is overwritten to allow silent + coercion of values from 2-tuples for API compatibility. + """ + def __setitem__(self, k, v): + if isinstance(v, commandentry): + pass + # Cast 2-tuples to commandentry instances. + elif isinstance(v, tuple): + if len(v) != 2: + raise ValueError('command tuples must have exactly 2 elements') -def wireprotocommand(name, args=''): - """decorator for wire protocol command""" + # It is common for extensions to wrap wire protocol commands via + # e.g. ``wireproto.commands[x] = (newfn, args)``. Because callers + # doing this aren't aware of the new API that uses objects to store + # command entries, we automatically merge old state with new. + if k in self: + v = self[k]._merge(v[0], v[1]) + else: + # Use default values from @wireprotocommand. + v = commandentry(v[0], args=v[1], + transports=set(wireprototypes.TRANSPORTS)) + else: + raise ValueError('command entries must be commandentry instances ' + 'or 2-tuples') + + return super(commanddict, self).__setitem__(k, v) + + def commandavailable(self, command, proto): + """Determine if a command is available for the requested protocol.""" + assert proto.name in wireprototypes.TRANSPORTS + + entry = self.get(command) + + if not entry: + return False + + if proto.name not in entry.transports: + return False + + return True + +# Constants specifying which transports a wire protocol command should be +# available on. For use with @wireprotocommand. +POLICY_ALL = 'all' +POLICY_V1_ONLY = 'v1-only' +POLICY_V2_ONLY = 'v2-only' + +commands = commanddict() + +def wireprotocommand(name, args='', transportpolicy=POLICY_ALL): + """Decorator to declare a wire protocol command. + + ``name`` is the name of the wire protocol command being provided. + + ``args`` is a space-delimited list of named arguments that the command + accepts. ``*`` is a special value that says to accept all arguments. + + ``transportpolicy`` is a POLICY_* constant denoting which transports + this wire protocol command should be exposed to. By default, commands + are exposed to all wire protocol transports. + """ + if transportpolicy == POLICY_ALL: + transports = set(wireprototypes.TRANSPORTS) + elif transportpolicy == POLICY_V1_ONLY: + transports = {k for k, v in wireprototypes.TRANSPORTS.items() + if v['version'] == 1} + elif transportpolicy == POLICY_V2_ONLY: + transports = {k for k, v in wireprototypes.TRANSPORTS.items() + if v['version'] == 2} + else: + raise error.Abort(_('invalid transport policy value: %s') % + transportpolicy) + def register(func): - commands[name] = (func, args) + commands[name] = commandentry(func, args=args, transports=transports) return func return register @@ -713,16 +730,24 @@ result = func(repo, proto) if isinstance(result, ooberror): return result + + # For now, all batchable commands must return bytesresponse or + # raw bytes (for backwards compatibility). + assert isinstance(result, (bytesresponse, bytes)) + if isinstance(result, bytesresponse): + result = result.data res.append(escapearg(result)) - return ';'.join(res) -@wireprotocommand('between', 'pairs') + return bytesresponse(';'.join(res)) + +@wireprotocommand('between', 'pairs', transportpolicy=POLICY_V1_ONLY) def between(repo, proto, pairs): pairs = [decodelist(p, '-') for p in pairs.split(" ")] r = [] for b in repo.between(pairs): r.append(encodelist(b) + "\n") - return "".join(r) + + return bytesresponse(''.join(r)) @wireprotocommand('branchmap') def branchmap(repo, proto): @@ -732,15 +757,17 @@ branchname = urlreq.quote(encoding.fromlocal(branch)) branchnodes = encodelist(nodes) heads.append('%s %s' % (branchname, branchnodes)) - return '\n'.join(heads) -@wireprotocommand('branches', 'nodes') + return bytesresponse('\n'.join(heads)) + +@wireprotocommand('branches', 'nodes', transportpolicy=POLICY_V1_ONLY) def branches(repo, proto, nodes): nodes = decodelist(nodes) r = [] for b in repo.branches(nodes): r.append(encodelist(b) + "\n") - return "".join(r) + + return bytesresponse(''.join(r)) @wireprotocommand('clonebundles', '') def clonebundles(repo, proto): @@ -752,9 +779,9 @@ depending on the request. e.g. you could advertise URLs for the closest data center given the client's IP address. """ - return repo.vfs.tryread('clonebundles.manifest') + return bytesresponse(repo.vfs.tryread('clonebundles.manifest')) -wireprotocaps = ['lookup', 'changegroupsubset', 'branchmap', 'pushkey', +wireprotocaps = ['lookup', 'branchmap', 'pushkey', 'known', 'getbundle', 'unbundlehash', 'batch'] def _capabilities(repo, proto): @@ -769,6 +796,12 @@ """ # copy to prevent modification of the global list caps = list(wireprotocaps) + + # Command of same name as capability isn't exposed to version 1 of + # transports. So conditionally add it. + if commands.commandavailable('changegroupsubset', proto): + caps.append('changegroupsubset') + if streamclone.allowservergeneration(repo): if repo.ui.configbool('server', 'preferuncompressed'): caps.append('stream-preferred') @@ -784,31 +817,15 @@ caps.append('bundle2=' + urlreq.quote(capsblob)) caps.append('unbundle=%s' % ','.join(bundle2.bundlepriority)) - if proto.name == 'http': - caps.append('httpheader=%d' % - repo.ui.configint('server', 'maxhttpheaderlen')) - if repo.ui.configbool('experimental', 'httppostargs'): - caps.append('httppostargs') - - # FUTURE advertise 0.2rx once support is implemented - # FUTURE advertise minrx and mintx after consulting config option - caps.append('httpmediatype=0.1rx,0.1tx,0.2tx') - - compengines = supportedcompengines(repo.ui, proto, util.SERVERROLE) - if compengines: - comptypes = ','.join(urlreq.quote(e.wireprotosupport().name) - for e in compengines) - caps.append('compression=%s' % comptypes) - - return caps + return proto.addcapabilities(repo, caps) # If you are writing an extension and consider wrapping this function. Wrap # `_capabilities` instead. @wireprotocommand('capabilities') def capabilities(repo, proto): - return ' '.join(_capabilities(repo, proto)) + return bytesresponse(' '.join(_capabilities(repo, proto))) -@wireprotocommand('changegroup', 'roots') +@wireprotocommand('changegroup', 'roots', transportpolicy=POLICY_V1_ONLY) def changegroup(repo, proto, roots): nodes = decodelist(roots) outgoing = discovery.outgoing(repo, missingroots=nodes, @@ -817,7 +834,8 @@ gen = iter(lambda: cg.read(32768), '') return streamres(gen=gen) -@wireprotocommand('changegroupsubset', 'bases heads') +@wireprotocommand('changegroupsubset', 'bases heads', + transportpolicy=POLICY_V1_ONLY) def changegroupsubset(repo, proto, bases, heads): bases = decodelist(bases) heads = decodelist(heads) @@ -831,7 +849,8 @@ def debugwireargs(repo, proto, one, two, others): # only accept optional args from the known set opts = options('debugwireargs', ['three', 'four'], others) - return repo.debugwireargs(one, two, **pycompat.strkwargs(opts)) + return bytesresponse(repo.debugwireargs(one, two, + **pycompat.strkwargs(opts))) @wireprotocommand('getbundle', '*') def getbundle(repo, proto, others): @@ -857,7 +876,7 @@ if not bundle1allowed(repo, 'pull'): if not exchange.bundle2requested(opts.get('bundlecaps')): - if proto.name == 'http': + if proto.name == 'http-v1': return ooberror(bundle2required) raise error.Abort(bundle2requiredmain, hint=bundle2requiredhint) @@ -883,12 +902,12 @@ except error.Abort as exc: # cleanly forward Abort error to the client if not exchange.bundle2requested(opts.get('bundlecaps')): - if proto.name == 'http': - return ooberror(str(exc) + '\n') + if proto.name == 'http-v1': + return ooberror(pycompat.bytestr(exc) + '\n') raise # cannot do better for bundle1 + ssh # bundle2 request expect a bundle2 reply bundler = bundle2.bundle20(repo.ui) - manargs = [('message', str(exc))] + manargs = [('message', pycompat.bytestr(exc))] advargs = [] if exc.hint is not None: advargs.append(('hint', exc.hint)) @@ -902,23 +921,27 @@ @wireprotocommand('heads') def heads(repo, proto): h = repo.heads() - return encodelist(h) + "\n" + return bytesresponse(encodelist(h) + '\n') @wireprotocommand('hello') def hello(repo, proto): - '''the hello command returns a set of lines describing various - interesting things about the server, in an RFC822-like format. - Currently the only one defined is "capabilities", which - consists of a line in the form: + """Called as part of SSH handshake to obtain server info. + + Returns a list of lines describing interesting things about the + server, in an RFC822-like format. - capabilities: space separated list of tokens - ''' - return "capabilities: %s\n" % (capabilities(repo, proto)) + Currently, the only one defined is ``capabilities``, which consists of a + line of space separated tokens describing server abilities: + + capabilities: <token0> <token1> <token2> + """ + caps = capabilities(repo, proto).data + return bytesresponse('capabilities: %s\n' % caps) @wireprotocommand('listkeys', 'namespace') def listkeys(repo, proto, namespace): - d = repo.listkeys(encoding.tolocal(namespace)).items() - return pushkeymod.encodekeys(d) + d = sorted(repo.listkeys(encoding.tolocal(namespace)).items()) + return bytesresponse(pushkeymod.encodekeys(d)) @wireprotocommand('lookup', 'key') def lookup(repo, proto, key): @@ -928,13 +951,14 @@ r = c.hex() success = 1 except Exception as inst: - r = str(inst) + r = util.forcebytestr(inst) success = 0 - return "%d %s\n" % (success, r) + return bytesresponse('%d %s\n' % (success, r)) @wireprotocommand('known', 'nodes *') def known(repo, proto, nodes, others): - return ''.join(b and "1" or "0" for b in repo.known(decodelist(nodes))) + v = ''.join(b and '1' or '0' for b in repo.known(decodelist(nodes))) + return bytesresponse(v) @wireprotocommand('pushkey', 'namespace key old new') def pushkey(repo, proto, namespace, key, old, new): @@ -950,23 +974,12 @@ else: new = encoding.tolocal(new) # normal path - if util.safehasattr(proto, 'restore'): - - proto.redirect() + with proto.mayberedirectstdio() as output: + r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key), + encoding.tolocal(old), new) or False - try: - r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key), - encoding.tolocal(old), new) or False - except error.Abort: - r = False - - output = proto.restore() - - return '%s\n%s' % (int(r), output) - - r = repo.pushkey(encoding.tolocal(namespace), encoding.tolocal(key), - encoding.tolocal(old), new) - return '%s\n' % int(r) + output = output.getvalue() if output else '' + return bytesresponse('%d\n%s' % (int(r), output)) @wireprotocommand('stream_out') def stream(repo, proto): @@ -980,97 +993,99 @@ def unbundle(repo, proto, heads): their_heads = decodelist(heads) - try: - proto.redirect() - - exchange.check_heads(repo, their_heads, 'preparing changes') - - # write bundle data to temporary file because it can be big - fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') - fp = os.fdopen(fd, pycompat.sysstr('wb+')) - r = 0 + with proto.mayberedirectstdio() as output: try: - proto.getfile(fp) - fp.seek(0) - gen = exchange.readbundle(repo.ui, fp, None) - if (isinstance(gen, changegroupmod.cg1unpacker) - and not bundle1allowed(repo, 'push')): - if proto.name == 'http': - # need to special case http because stderr do not get to - # the http client on failed push so we need to abuse some - # other error type to make sure the message get to the - # user. - return ooberror(bundle2required) - raise error.Abort(bundle2requiredmain, - hint=bundle2requiredhint) + exchange.check_heads(repo, their_heads, 'preparing changes') - r = exchange.unbundle(repo, gen, their_heads, 'serve', - proto._client()) - if util.safehasattr(r, 'addpart'): - # The return looks streamable, we are in the bundle2 case and - # should return a stream. - return streamres_legacy(gen=r.getchunks()) - return pushres(r) - - finally: - fp.close() - os.unlink(tempname) - - except (error.BundleValueError, error.Abort, error.PushRaced) as exc: - # handle non-bundle2 case first - if not getattr(exc, 'duringunbundle2', False): + # write bundle data to temporary file because it can be big + fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') + fp = os.fdopen(fd, pycompat.sysstr('wb+')) + r = 0 try: - raise - except error.Abort: - # The old code we moved used util.stderr directly. - # We did not change it to minimise code change. - # This need to be moved to something proper. - # Feel free to do it. - util.stderr.write("abort: %s\n" % exc) - if exc.hint is not None: - util.stderr.write("(%s)\n" % exc.hint) - return pushres(0) - except error.PushRaced: - return pusherr(str(exc)) + proto.forwardpayload(fp) + fp.seek(0) + gen = exchange.readbundle(repo.ui, fp, None) + if (isinstance(gen, changegroupmod.cg1unpacker) + and not bundle1allowed(repo, 'push')): + if proto.name == 'http-v1': + # need to special case http because stderr do not get to + # the http client on failed push so we need to abuse + # some other error type to make sure the message get to + # the user. + return ooberror(bundle2required) + raise error.Abort(bundle2requiredmain, + hint=bundle2requiredhint) - bundler = bundle2.bundle20(repo.ui) - for out in getattr(exc, '_bundle2salvagedoutput', ()): - bundler.addpart(out) - try: - try: - raise - except error.PushkeyFailed as exc: - # check client caps - remotecaps = getattr(exc, '_replycaps', None) - if (remotecaps is not None - and 'pushkey' not in remotecaps.get('error', ())): - # no support remote side, fallback to Abort handler. + r = exchange.unbundle(repo, gen, their_heads, 'serve', + proto.client()) + if util.safehasattr(r, 'addpart'): + # The return looks streamable, we are in the bundle2 case + # and should return a stream. + return streamres_legacy(gen=r.getchunks()) + return pushres(r, output.getvalue() if output else '') + + finally: + fp.close() + os.unlink(tempname) + + except (error.BundleValueError, error.Abort, error.PushRaced) as exc: + # handle non-bundle2 case first + if not getattr(exc, 'duringunbundle2', False): + try: raise - part = bundler.newpart('error:pushkey') - part.addparam('in-reply-to', exc.partid) - if exc.namespace is not None: - part.addparam('namespace', exc.namespace, mandatory=False) - if exc.key is not None: - part.addparam('key', exc.key, mandatory=False) - if exc.new is not None: - part.addparam('new', exc.new, mandatory=False) - if exc.old is not None: - part.addparam('old', exc.old, mandatory=False) - if exc.ret is not None: - part.addparam('ret', exc.ret, mandatory=False) - except error.BundleValueError as exc: - errpart = bundler.newpart('error:unsupportedcontent') - if exc.parttype is not None: - errpart.addparam('parttype', exc.parttype) - if exc.params: - errpart.addparam('params', '\0'.join(exc.params)) - except error.Abort as exc: - manargs = [('message', str(exc))] - advargs = [] - if exc.hint is not None: - advargs.append(('hint', exc.hint)) - bundler.addpart(bundle2.bundlepart('error:abort', - manargs, advargs)) - except error.PushRaced as exc: - bundler.newpart('error:pushraced', [('message', str(exc))]) - return streamres_legacy(gen=bundler.getchunks()) + except error.Abort: + # The old code we moved used util.stderr directly. + # We did not change it to minimise code change. + # This need to be moved to something proper. + # Feel free to do it. + util.stderr.write("abort: %s\n" % exc) + if exc.hint is not None: + util.stderr.write("(%s)\n" % exc.hint) + return pushres(0, output.getvalue() if output else '') + except error.PushRaced: + return pusherr(str(exc), + output.getvalue() if output else '') + + bundler = bundle2.bundle20(repo.ui) + for out in getattr(exc, '_bundle2salvagedoutput', ()): + bundler.addpart(out) + try: + try: + raise + except error.PushkeyFailed as exc: + # check client caps + remotecaps = getattr(exc, '_replycaps', None) + if (remotecaps is not None + and 'pushkey' not in remotecaps.get('error', ())): + # no support remote side, fallback to Abort handler. + raise + part = bundler.newpart('error:pushkey') + part.addparam('in-reply-to', exc.partid) + if exc.namespace is not None: + part.addparam('namespace', exc.namespace, + mandatory=False) + if exc.key is not None: + part.addparam('key', exc.key, mandatory=False) + if exc.new is not None: + part.addparam('new', exc.new, mandatory=False) + if exc.old is not None: + part.addparam('old', exc.old, mandatory=False) + if exc.ret is not None: + part.addparam('ret', exc.ret, mandatory=False) + except error.BundleValueError as exc: + errpart = bundler.newpart('error:unsupportedcontent') + if exc.parttype is not None: + errpart.addparam('parttype', exc.parttype) + if exc.params: + errpart.addparam('params', '\0'.join(exc.params)) + except error.Abort as exc: + manargs = [('message', util.forcebytestr(exc))] + advargs = [] + if exc.hint is not None: + advargs.append(('hint', exc.hint)) + bundler.addpart(bundle2.bundlepart('error:abort', + manargs, advargs)) + except error.PushRaced as exc: + bundler.newpart('error:pushraced', + [('message', util.forcebytestr(exc))]) + return streamres_legacy(gen=bundler.getchunks())
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/wireprotoserver.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,639 @@ +# Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> +# Copyright 2005-2007 Matt Mackall <mpm@selenic.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import contextlib +import struct +import sys +import threading + +from .i18n import _ +from . import ( + encoding, + error, + hook, + pycompat, + util, + wireproto, + wireprototypes, +) + +stringio = util.stringio + +urlerr = util.urlerr +urlreq = util.urlreq + +HTTP_OK = 200 + +HGTYPE = 'application/mercurial-0.1' +HGTYPE2 = 'application/mercurial-0.2' +HGERRTYPE = 'application/hg-error' + +SSHV1 = wireprototypes.SSHV1 +SSHV2 = wireprototypes.SSHV2 + +def decodevaluefromheaders(req, headerprefix): + """Decode a long value from multiple HTTP request headers. + + Returns the value as a bytes, not a str. + """ + chunks = [] + i = 1 + prefix = headerprefix.upper().replace(r'-', r'_') + while True: + v = req.env.get(r'HTTP_%s_%d' % (prefix, i)) + if v is None: + break + chunks.append(pycompat.bytesurl(v)) + i += 1 + + return ''.join(chunks) + +class httpv1protocolhandler(wireprototypes.baseprotocolhandler): + def __init__(self, req, ui): + self._req = req + self._ui = ui + + @property + def name(self): + return 'http-v1' + + def getargs(self, args): + knownargs = self._args() + data = {} + keys = args.split() + for k in keys: + if k == '*': + star = {} + for key in knownargs.keys(): + if key != 'cmd' and key not in keys: + star[key] = knownargs[key][0] + data['*'] = star + else: + data[k] = knownargs[k][0] + return [data[k] for k in keys] + + def _args(self): + args = util.rapply(pycompat.bytesurl, self._req.form.copy()) + postlen = int(self._req.env.get(r'HTTP_X_HGARGS_POST', 0)) + if postlen: + args.update(urlreq.parseqs( + self._req.read(postlen), keep_blank_values=True)) + return args + + argvalue = decodevaluefromheaders(self._req, r'X-HgArg') + args.update(urlreq.parseqs(argvalue, keep_blank_values=True)) + return args + + def forwardpayload(self, fp): + if r'HTTP_CONTENT_LENGTH' in self._req.env: + length = int(self._req.env[r'HTTP_CONTENT_LENGTH']) + else: + length = int(self._req.env[r'CONTENT_LENGTH']) + # If httppostargs is used, we need to read Content-Length + # minus the amount that was consumed by args. + length -= int(self._req.env.get(r'HTTP_X_HGARGS_POST', 0)) + for s in util.filechunkiter(self._req, limit=length): + fp.write(s) + + @contextlib.contextmanager + def mayberedirectstdio(self): + oldout = self._ui.fout + olderr = self._ui.ferr + + out = util.stringio() + + try: + self._ui.fout = out + self._ui.ferr = out + yield out + finally: + self._ui.fout = oldout + self._ui.ferr = olderr + + def client(self): + return 'remote:%s:%s:%s' % ( + self._req.env.get('wsgi.url_scheme') or 'http', + urlreq.quote(self._req.env.get('REMOTE_HOST', '')), + urlreq.quote(self._req.env.get('REMOTE_USER', ''))) + + def addcapabilities(self, repo, caps): + caps.append('httpheader=%d' % + repo.ui.configint('server', 'maxhttpheaderlen')) + if repo.ui.configbool('experimental', 'httppostargs'): + caps.append('httppostargs') + + # FUTURE advertise 0.2rx once support is implemented + # FUTURE advertise minrx and mintx after consulting config option + caps.append('httpmediatype=0.1rx,0.1tx,0.2tx') + + compengines = wireproto.supportedcompengines(repo.ui, util.SERVERROLE) + if compengines: + comptypes = ','.join(urlreq.quote(e.wireprotosupport().name) + for e in compengines) + caps.append('compression=%s' % comptypes) + + return caps + +# This method exists mostly so that extensions like remotefilelog can +# disable a kludgey legacy method only over http. As of early 2018, +# there are no other known users, so with any luck we can discard this +# hook if remotefilelog becomes a first-party extension. +def iscmd(cmd): + return cmd in wireproto.commands + +def parsehttprequest(repo, req, query): + """Parse the HTTP request for a wire protocol request. + + If the current request appears to be a wire protocol request, this + function returns a dict with details about that request, including + an ``abstractprotocolserver`` instance suitable for handling the + request. Otherwise, ``None`` is returned. + + ``req`` is a ``wsgirequest`` instance. + """ + # HTTP version 1 wire protocol requests are denoted by a "cmd" query + # string parameter. If it isn't present, this isn't a wire protocol + # request. + if r'cmd' not in req.form: + return None + + cmd = pycompat.sysbytes(req.form[r'cmd'][0]) + + # The "cmd" request parameter is used by both the wire protocol and hgweb. + # While not all wire protocol commands are available for all transports, + # if we see a "cmd" value that resembles a known wire protocol command, we + # route it to a protocol handler. This is better than routing possible + # wire protocol requests to hgweb because it prevents hgweb from using + # known wire protocol commands and it is less confusing for machine + # clients. + if not iscmd(cmd): + return None + + proto = httpv1protocolhandler(req, repo.ui) + + return { + 'cmd': cmd, + 'proto': proto, + 'dispatch': lambda: _callhttp(repo, req, proto, cmd), + 'handleerror': lambda ex: _handlehttperror(ex, req, cmd), + } + +def _httpresponsetype(ui, req, prefer_uncompressed): + """Determine the appropriate response type and compression settings. + + Returns a tuple of (mediatype, compengine, engineopts). + """ + # Determine the response media type and compression engine based + # on the request parameters. + protocaps = decodevaluefromheaders(req, r'X-HgProto').split(' ') + + if '0.2' in protocaps: + # All clients are expected to support uncompressed data. + if prefer_uncompressed: + return HGTYPE2, util._noopengine(), {} + + # Default as defined by wire protocol spec. + compformats = ['zlib', 'none'] + for cap in protocaps: + if cap.startswith('comp='): + compformats = cap[5:].split(',') + break + + # Now find an agreed upon compression format. + for engine in wireproto.supportedcompengines(ui, util.SERVERROLE): + if engine.wireprotosupport().name in compformats: + opts = {} + level = ui.configint('server', '%slevel' % engine.name()) + if level is not None: + opts['level'] = level + + return HGTYPE2, engine, opts + + # No mutually supported compression format. Fall back to the + # legacy protocol. + + # Don't allow untrusted settings because disabling compression or + # setting a very high compression level could lead to flooding + # the server's network or CPU. + opts = {'level': ui.configint('server', 'zliblevel')} + return HGTYPE, util.compengines['zlib'], opts + +def _callhttp(repo, req, proto, cmd): + def genversion2(gen, engine, engineopts): + # application/mercurial-0.2 always sends a payload header + # identifying the compression engine. + name = engine.wireprotosupport().name + assert 0 < len(name) < 256 + yield struct.pack('B', len(name)) + yield name + + for chunk in gen: + yield chunk + + rsp = wireproto.dispatch(repo, proto, cmd) + + if not wireproto.commands.commandavailable(cmd, proto): + req.respond(HTTP_OK, HGERRTYPE, + body=_('requested wire protocol command is not available ' + 'over HTTP')) + return [] + + if isinstance(rsp, bytes): + req.respond(HTTP_OK, HGTYPE, body=rsp) + return [] + elif isinstance(rsp, wireprototypes.bytesresponse): + req.respond(HTTP_OK, HGTYPE, body=rsp.data) + return [] + elif isinstance(rsp, wireprototypes.streamreslegacy): + gen = rsp.gen + req.respond(HTTP_OK, HGTYPE) + return gen + elif isinstance(rsp, wireprototypes.streamres): + gen = rsp.gen + + # This code for compression should not be streamres specific. It + # is here because we only compress streamres at the moment. + mediatype, engine, engineopts = _httpresponsetype( + repo.ui, req, rsp.prefer_uncompressed) + gen = engine.compressstream(gen, engineopts) + + if mediatype == HGTYPE2: + gen = genversion2(gen, engine, engineopts) + + req.respond(HTTP_OK, mediatype) + return gen + elif isinstance(rsp, wireprototypes.pushres): + rsp = '%d\n%s' % (rsp.res, rsp.output) + req.respond(HTTP_OK, HGTYPE, body=rsp) + return [] + elif isinstance(rsp, wireprototypes.pusherr): + # This is the httplib workaround documented in _handlehttperror(). + req.drain() + + rsp = '0\n%s\n' % rsp.res + req.respond(HTTP_OK, HGTYPE, body=rsp) + return [] + elif isinstance(rsp, wireprototypes.ooberror): + rsp = rsp.message + req.respond(HTTP_OK, HGERRTYPE, body=rsp) + return [] + raise error.ProgrammingError('hgweb.protocol internal failure', rsp) + +def _handlehttperror(e, req, cmd): + """Called when an ErrorResponse is raised during HTTP request processing.""" + + # Clients using Python's httplib are stateful: the HTTP client + # won't process an HTTP response until all request data is + # sent to the server. The intent of this code is to ensure + # we always read HTTP request data from the client, thus + # ensuring httplib transitions to a state that allows it to read + # the HTTP response. In other words, it helps prevent deadlocks + # on clients using httplib. + + if (req.env[r'REQUEST_METHOD'] == r'POST' and + # But not if Expect: 100-continue is being used. + (req.env.get('HTTP_EXPECT', + '').lower() != '100-continue') or + # Or the non-httplib HTTP library is being advertised by + # the client. + req.env.get('X-HgHttp2', '')): + req.drain() + else: + req.headers.append((r'Connection', r'Close')) + + # TODO This response body assumes the failed command was + # "unbundle." That assumption is not always valid. + req.respond(e, HGTYPE, body='0\n%s\n' % pycompat.bytestr(e)) + + return '' + +def _sshv1respondbytes(fout, value): + """Send a bytes response for protocol version 1.""" + fout.write('%d\n' % len(value)) + fout.write(value) + fout.flush() + +def _sshv1respondstream(fout, source): + write = fout.write + for chunk in source.gen: + write(chunk) + fout.flush() + +def _sshv1respondooberror(fout, ferr, rsp): + ferr.write(b'%s\n-\n' % rsp) + ferr.flush() + fout.write(b'\n') + fout.flush() + +class sshv1protocolhandler(wireprototypes.baseprotocolhandler): + """Handler for requests services via version 1 of SSH protocol.""" + def __init__(self, ui, fin, fout): + self._ui = ui + self._fin = fin + self._fout = fout + + @property + def name(self): + return wireprototypes.SSHV1 + + def getargs(self, args): + data = {} + keys = args.split() + for n in xrange(len(keys)): + argline = self._fin.readline()[:-1] + arg, l = argline.split() + if arg not in keys: + raise error.Abort(_("unexpected parameter %r") % arg) + if arg == '*': + star = {} + for k in xrange(int(l)): + argline = self._fin.readline()[:-1] + arg, l = argline.split() + val = self._fin.read(int(l)) + star[arg] = val + data['*'] = star + else: + val = self._fin.read(int(l)) + data[arg] = val + return [data[k] for k in keys] + + def forwardpayload(self, fpout): + # We initially send an empty response. This tells the client it is + # OK to start sending data. If a client sees any other response, it + # interprets it as an error. + _sshv1respondbytes(self._fout, b'') + + # The file is in the form: + # + # <chunk size>\n<chunk> + # ... + # 0\n + count = int(self._fin.readline()) + while count: + fpout.write(self._fin.read(count)) + count = int(self._fin.readline()) + + @contextlib.contextmanager + def mayberedirectstdio(self): + yield None + + def client(self): + client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0] + return 'remote:ssh:' + client + + def addcapabilities(self, repo, caps): + return caps + +class sshv2protocolhandler(sshv1protocolhandler): + """Protocol handler for version 2 of the SSH protocol.""" + + @property + def name(self): + return wireprototypes.SSHV2 + +def _runsshserver(ui, repo, fin, fout, ev): + # This function operates like a state machine of sorts. The following + # states are defined: + # + # protov1-serving + # Server is in protocol version 1 serving mode. Commands arrive on + # new lines. These commands are processed in this state, one command + # after the other. + # + # protov2-serving + # Server is in protocol version 2 serving mode. + # + # upgrade-initial + # The server is going to process an upgrade request. + # + # upgrade-v2-filter-legacy-handshake + # The protocol is being upgraded to version 2. The server is expecting + # the legacy handshake from version 1. + # + # upgrade-v2-finish + # The upgrade to version 2 of the protocol is imminent. + # + # shutdown + # The server is shutting down, possibly in reaction to a client event. + # + # And here are their transitions: + # + # protov1-serving -> shutdown + # When server receives an empty request or encounters another + # error. + # + # protov1-serving -> upgrade-initial + # An upgrade request line was seen. + # + # upgrade-initial -> upgrade-v2-filter-legacy-handshake + # Upgrade to version 2 in progress. Server is expecting to + # process a legacy handshake. + # + # upgrade-v2-filter-legacy-handshake -> shutdown + # Client did not fulfill upgrade handshake requirements. + # + # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish + # Client fulfilled version 2 upgrade requirements. Finishing that + # upgrade. + # + # upgrade-v2-finish -> protov2-serving + # Protocol upgrade to version 2 complete. Server can now speak protocol + # version 2. + # + # protov2-serving -> protov1-serving + # Ths happens by default since protocol version 2 is the same as + # version 1 except for the handshake. + + state = 'protov1-serving' + proto = sshv1protocolhandler(ui, fin, fout) + protoswitched = False + + while not ev.is_set(): + if state == 'protov1-serving': + # Commands are issued on new lines. + request = fin.readline()[:-1] + + # Empty lines signal to terminate the connection. + if not request: + state = 'shutdown' + continue + + # It looks like a protocol upgrade request. Transition state to + # handle it. + if request.startswith(b'upgrade '): + if protoswitched: + _sshv1respondooberror(fout, ui.ferr, + b'cannot upgrade protocols multiple ' + b'times') + state = 'shutdown' + continue + + state = 'upgrade-initial' + continue + + available = wireproto.commands.commandavailable(request, proto) + + # This command isn't available. Send an empty response and go + # back to waiting for a new command. + if not available: + _sshv1respondbytes(fout, b'') + continue + + rsp = wireproto.dispatch(repo, proto, request) + + if isinstance(rsp, bytes): + _sshv1respondbytes(fout, rsp) + elif isinstance(rsp, wireprototypes.bytesresponse): + _sshv1respondbytes(fout, rsp.data) + elif isinstance(rsp, wireprototypes.streamres): + _sshv1respondstream(fout, rsp) + elif isinstance(rsp, wireprototypes.streamreslegacy): + _sshv1respondstream(fout, rsp) + elif isinstance(rsp, wireprototypes.pushres): + _sshv1respondbytes(fout, b'') + _sshv1respondbytes(fout, b'%d' % rsp.res) + elif isinstance(rsp, wireprototypes.pusherr): + _sshv1respondbytes(fout, rsp.res) + elif isinstance(rsp, wireprototypes.ooberror): + _sshv1respondooberror(fout, ui.ferr, rsp.message) + else: + raise error.ProgrammingError('unhandled response type from ' + 'wire protocol command: %s' % rsp) + + # For now, protocol version 2 serving just goes back to version 1. + elif state == 'protov2-serving': + state = 'protov1-serving' + continue + + elif state == 'upgrade-initial': + # We should never transition into this state if we've switched + # protocols. + assert not protoswitched + assert proto.name == wireprototypes.SSHV1 + + # Expected: upgrade <token> <capabilities> + # If we get something else, the request is malformed. It could be + # from a future client that has altered the upgrade line content. + # We treat this as an unknown command. + try: + token, caps = request.split(b' ')[1:] + except ValueError: + _sshv1respondbytes(fout, b'') + state = 'protov1-serving' + continue + + # Send empty response if we don't support upgrading protocols. + if not ui.configbool('experimental', 'sshserver.support-v2'): + _sshv1respondbytes(fout, b'') + state = 'protov1-serving' + continue + + try: + caps = urlreq.parseqs(caps) + except ValueError: + _sshv1respondbytes(fout, b'') + state = 'protov1-serving' + continue + + # We don't see an upgrade request to protocol version 2. Ignore + # the upgrade request. + wantedprotos = caps.get(b'proto', [b''])[0] + if SSHV2 not in wantedprotos: + _sshv1respondbytes(fout, b'') + state = 'protov1-serving' + continue + + # It looks like we can honor this upgrade request to protocol 2. + # Filter the rest of the handshake protocol request lines. + state = 'upgrade-v2-filter-legacy-handshake' + continue + + elif state == 'upgrade-v2-filter-legacy-handshake': + # Client should have sent legacy handshake after an ``upgrade`` + # request. Expected lines: + # + # hello + # between + # pairs 81 + # 0000...-0000... + + ok = True + for line in (b'hello', b'between', b'pairs 81'): + request = fin.readline()[:-1] + + if request != line: + _sshv1respondooberror(fout, ui.ferr, + b'malformed handshake protocol: ' + b'missing %s' % line) + ok = False + state = 'shutdown' + break + + if not ok: + continue + + request = fin.read(81) + if request != b'%s-%s' % (b'0' * 40, b'0' * 40): + _sshv1respondooberror(fout, ui.ferr, + b'malformed handshake protocol: ' + b'missing between argument value') + state = 'shutdown' + continue + + state = 'upgrade-v2-finish' + continue + + elif state == 'upgrade-v2-finish': + # Send the upgrade response. + fout.write(b'upgraded %s %s\n' % (token, SSHV2)) + servercaps = wireproto.capabilities(repo, proto) + rsp = b'capabilities: %s' % servercaps.data + fout.write(b'%d\n%s\n' % (len(rsp), rsp)) + fout.flush() + + proto = sshv2protocolhandler(ui, fin, fout) + protoswitched = True + + state = 'protov2-serving' + continue + + elif state == 'shutdown': + break + + else: + raise error.ProgrammingError('unhandled ssh server state: %s' % + state) + +class sshserver(object): + def __init__(self, ui, repo, logfh=None): + self._ui = ui + self._repo = repo + self._fin = ui.fin + self._fout = ui.fout + + # Log write I/O to stdout and stderr if configured. + if logfh: + self._fout = util.makeloggingfileobject( + logfh, self._fout, 'o', logdata=True) + ui.ferr = util.makeloggingfileobject( + logfh, ui.ferr, 'e', logdata=True) + + hook.redirect(True) + ui.fout = repo.ui.fout = ui.ferr + + # Prevent insertion/deletion of CRs + util.setbinary(self._fin) + util.setbinary(self._fout) + + def serve_forever(self): + self.serveuntil(threading.Event()) + sys.exit(0) + + def serveuntil(self, ev): + """Serve until a threading.Event is set.""" + _runsshserver(self._ui, self._repo, self._fin, self._fout, ev)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/mercurial/wireprototypes.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,148 @@ +# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +from __future__ import absolute_import + +import abc + +# Names of the SSH protocol implementations. +SSHV1 = 'ssh-v1' +# This is advertised over the wire. Incremental the counter at the end +# to reflect BC breakages. +SSHV2 = 'exp-ssh-v2-0001' + +# All available wire protocol transports. +TRANSPORTS = { + SSHV1: { + 'transport': 'ssh', + 'version': 1, + }, + SSHV2: { + 'transport': 'ssh', + 'version': 2, + }, + 'http-v1': { + 'transport': 'http', + 'version': 1, + } +} + +class bytesresponse(object): + """A wire protocol response consisting of raw bytes.""" + def __init__(self, data): + self.data = data + +class ooberror(object): + """wireproto reply: failure of a batch of operation + + Something failed during a batch call. The error message is stored in + `self.message`. + """ + def __init__(self, message): + self.message = message + +class pushres(object): + """wireproto reply: success with simple integer return + + The call was successful and returned an integer contained in `self.res`. + """ + def __init__(self, res, output): + self.res = res + self.output = output + +class pusherr(object): + """wireproto reply: failure + + The call failed. The `self.res` attribute contains the error message. + """ + def __init__(self, res, output): + self.res = res + self.output = output + +class streamres(object): + """wireproto reply: binary stream + + The call was successful and the result is a stream. + + Accepts a generator containing chunks of data to be sent to the client. + + ``prefer_uncompressed`` indicates that the data is expected to be + uncompressable and that the stream should therefore use the ``none`` + engine. + """ + def __init__(self, gen=None, prefer_uncompressed=False): + self.gen = gen + self.prefer_uncompressed = prefer_uncompressed + +class streamreslegacy(object): + """wireproto reply: uncompressed binary stream + + The call was successful and the result is a stream. + + Accepts a generator containing chunks of data to be sent to the client. + + Like ``streamres``, but sends an uncompressed data for "version 1" clients + using the application/mercurial-0.1 media type. + """ + def __init__(self, gen=None): + self.gen = gen + +class baseprotocolhandler(object): + """Abstract base class for wire protocol handlers. + + A wire protocol handler serves as an interface between protocol command + handlers and the wire protocol transport layer. Protocol handlers provide + methods to read command arguments, redirect stdio for the duration of + the request, handle response types, etc. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractproperty + def name(self): + """The name of the protocol implementation. + + Used for uniquely identifying the transport type. + """ + + @abc.abstractmethod + def getargs(self, args): + """return the value for arguments in <args> + + returns a list of values (same order as <args>)""" + + @abc.abstractmethod + def forwardpayload(self, fp): + """Read the raw payload and forward to a file. + + The payload is read in full before the function returns. + """ + + @abc.abstractmethod + def mayberedirectstdio(self): + """Context manager to possibly redirect stdio. + + The context manager yields a file-object like object that receives + stdout and stderr output when the context manager is active. Or it + yields ``None`` if no I/O redirection occurs. + + The intent of this context manager is to capture stdio output + so it may be sent in the response. Some transports support streaming + stdio to the client in real time. For these transports, stdio output + won't be captured. + """ + + @abc.abstractmethod + def client(self): + """Returns a string representation of this client (as bytes).""" + + @abc.abstractmethod + def addcapabilities(self, repo, caps): + """Adds advertised capabilities specific to this protocol. + + Receives the list of capabilities collected so far. + + Returns a list of capabilities. The passed in argument can be returned. + """
--- a/setup.py Sat Mar 03 22:29:24 2018 -0500 +++ b/setup.py Sun Mar 04 10:42:51 2018 -0500 @@ -255,6 +255,7 @@ if (not e.startswith(b'not trusting file') and not e.startswith(b'warning: Not importing') and not e.startswith(b'obsolete feature not enabled') + and not e.startswith(b'*** failed to import extension') and not e.startswith(b'devel-warn:'))] return b'\n'.join(b' ' + e for e in err) @@ -806,13 +807,14 @@ 'mercurial.cext', 'mercurial.cffi', 'mercurial.hgweb', - 'mercurial.httpclient', 'mercurial.pure', 'mercurial.thirdparty', 'mercurial.thirdparty.attr', + 'mercurial.utils', 'hgext', 'hgext.convert', 'hgext.fsmonitor', 'hgext.fsmonitor.pywatchman', 'hgext.highlight', - 'hgext.largefiles', 'hgext.lfs', 'hgext.zeroconf', 'hgext3rd', + 'hgext.largefiles', 'hgext.lfs', 'hgext.narrow', + 'hgext.zeroconf', 'hgext3rd', 'hgdemandimport'] common_depends = ['mercurial/bitmanipulation.h', @@ -846,14 +848,33 @@ if sys.platform == 'darwin': osutil_ldflags += ['-framework', 'ApplicationServices'] +xdiff_srcs = [ + 'mercurial/thirdparty/xdiff/xdiffi.c', + 'mercurial/thirdparty/xdiff/xemit.c', + 'mercurial/thirdparty/xdiff/xmerge.c', + 'mercurial/thirdparty/xdiff/xprepare.c', + 'mercurial/thirdparty/xdiff/xutils.c', +] + +xdiff_headers = [ + 'mercurial/thirdparty/xdiff/xdiff.h', + 'mercurial/thirdparty/xdiff/xdiffi.h', + 'mercurial/thirdparty/xdiff/xemit.h', + 'mercurial/thirdparty/xdiff/xinclude.h', + 'mercurial/thirdparty/xdiff/xmacros.h', + 'mercurial/thirdparty/xdiff/xprepare.h', + 'mercurial/thirdparty/xdiff/xtypes.h', + 'mercurial/thirdparty/xdiff/xutils.h', +] + extmodules = [ Extension('mercurial.cext.base85', ['mercurial/cext/base85.c'], include_dirs=common_include_dirs, depends=common_depends), Extension('mercurial.cext.bdiff', ['mercurial/bdiff.c', - 'mercurial/cext/bdiff.c'], + 'mercurial/cext/bdiff.c'] + xdiff_srcs, include_dirs=common_include_dirs, - depends=common_depends + ['mercurial/bdiff.h']), + depends=common_depends + ['mercurial/bdiff.h'] + xdiff_headers), Extension('mercurial.cext.diffhelpers', ['mercurial/cext/diffhelpers.c'], include_dirs=common_include_dirs, depends=common_depends),
--- a/tests/badserverext.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/badserverext.py Sun Mar 04 10:42:51 2018 -0500 @@ -44,16 +44,16 @@ configtable = {} configitem = registrar.configitem(configtable) -configitem('badserver', 'closeafteraccept', +configitem(b'badserver', b'closeafteraccept', default=False, ) -configitem('badserver', 'closeafterrecvbytes', +configitem(b'badserver', b'closeafterrecvbytes', default=0, ) -configitem('badserver', 'closeaftersendbytes', +configitem(b'badserver', b'closeaftersendbytes', default=0, ) -configitem('badserver', 'closebeforeaccept', +configitem(b'badserver', b'closebeforeaccept', default=False, )
--- a/tests/bruterebase.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/bruterebase.py Sun Mar 04 10:42:51 2018 -0500 @@ -65,7 +65,7 @@ desc += getdesc(prev) descs.append(desc) descs.sort() - summary = ' '.join(descs) + summary = b' '.join(descs) ui.popbuffer() repo.vfs.tryunlink(b'rebasestate')
--- a/tests/common-pattern.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/common-pattern.py Sun Mar 04 10:42:51 2018 -0500 @@ -69,8 +69,8 @@ br'$USUAL_BUNDLE2_CAPS_SERVER$' ), # HTTP log dates - (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "GET', - br' - - [$LOGDATE$] "GET' + (br' - - \[\d\d/.../2\d\d\d \d\d:\d\d:\d\d] "(GET|PUT|POST)', + lambda m: br' - - [$LOGDATE$] "' + m.group(1) ), # Windows has an extra '/' in the following lines that get globbed away: # pushing to file:/*/$TESTTMP/r2 (glob)
--- a/tests/dummysmtpd.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/dummysmtpd.py Sun Mar 04 10:42:51 2018 -0500 @@ -12,6 +12,7 @@ import traceback from mercurial import ( + pycompat, server, sslutil, ui as uimod, @@ -63,6 +64,19 @@ except KeyboardInterrupt: pass +def _encodestrsonly(v): + if isinstance(v, type(u'')): + return v.encode('ascii') + return v + +def bytesvars(obj): + unidict = vars(obj) + bd = {k.encode('ascii'): _encodestrsonly(v) for k, v in unidict.items()} + if bd[b'daemon_postexec'] is not None: + bd[b'daemon_postexec'] = [ + _encodestrsonly(v) for v in bd[b'daemon_postexec']] + return bd + def main(): op = optparse.OptionParser() op.add_option('-d', '--daemon', action='store_true') @@ -85,8 +99,10 @@ dummysmtpsecureserver(addr, opts.certificate) log('listening at %s:%d\n' % addr) - server.runservice(vars(opts), initfn=init, runfn=run, - runargs=[sys.executable, __file__] + sys.argv[1:]) + server.runservice( + bytesvars(opts), initfn=init, runfn=run, + runargs=[pycompat.sysexecutable, + pycompat.fsencode(__file__)] + pycompat.sysargv[1:]) if __name__ == '__main__': main()
--- a/tests/dummyssh Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/dummyssh Sun Mar 04 10:42:51 2018 -0500 @@ -15,8 +15,8 @@ log = open("dummylog", "ab") log.write(b"Got arguments") for i, arg in enumerate(sys.argv[1:]): - log.write(b" %d:%s" % (i + 1, arg)) -log.write("\n") + log.write(b" %d:%s" % (i + 1, arg.encode('latin1'))) +log.write(b"\n") log.close() hgcmd = sys.argv[2] if os.name == 'nt':
--- a/tests/f Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/f Sun Mar 04 10:42:51 2018 -0500 @@ -25,6 +25,7 @@ from __future__ import absolute_import +import binascii import glob import hashlib import optparse @@ -58,46 +59,47 @@ facts = [] if isfile: if opts.type: - facts.append('file') + facts.append(b'file') if any((opts.hexdump, opts.dump, opts.md5, opts.sha1, opts.sha256)): content = open(f, 'rb').read() elif islink: if opts.type: - facts.append('link') + facts.append(b'link') content = os.readlink(f) elif isstdin: content = getattr(sys.stdin, 'buffer', sys.stdin).read() if opts.size: - facts.append('size=%s' % len(content)) + facts.append(b'size=%d' % len(content)) elif isdir: if opts.recurse or opts.type: dirfiles = glob.glob(f + '/*') - facts.append('directory with %s files' % len(dirfiles)) + facts.append(b'directory with %d files' % len(dirfiles)) elif opts.type: - facts.append('type unknown') + facts.append(b'type unknown') if not isstdin: stat = os.lstat(f) if opts.size and not isdir: - facts.append('size=%s' % stat.st_size) + facts.append(b'size=%d' % stat.st_size) if opts.mode and not islink: - facts.append('mode=%o' % (stat.st_mode & 0o777)) + facts.append(b'mode=%o' % (stat.st_mode & 0o777)) if opts.links: - facts.append('links=%s' % stat.st_nlink) + facts.append(b'links=%s' % stat.st_nlink) if opts.newer: # mtime might be in whole seconds so newer file might be same if stat.st_mtime >= os.stat(opts.newer).st_mtime: - facts.append('newer than %s' % opts.newer) + facts.append(b'newer than %s' % opts.newer) else: - facts.append('older than %s' % opts.newer) + facts.append(b'older than %s' % opts.newer) if opts.md5 and content is not None: h = hashlib.md5(content) - facts.append('md5=%s' % h.hexdigest()[:opts.bytes]) + facts.append(b'md5=%s' % binascii.hexlify(h.digest())[:opts.bytes]) if opts.sha1 and content is not None: h = hashlib.sha1(content) - facts.append('sha1=%s' % h.hexdigest()[:opts.bytes]) + facts.append(b'sha1=%s' % binascii.hexlify(h.digest())[:opts.bytes]) if opts.sha256 and content is not None: h = hashlib.sha256(content) - facts.append('sha256=%s' % h.hexdigest()[:opts.bytes]) + facts.append(b'sha256=%s' % + binascii.hexlify(h.digest())[:opts.bytes]) if isstdin: outfile.write(b', '.join(facts) + b'\n') elif facts:
--- a/tests/fakedirstatewritetime.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/fakedirstatewritetime.py Sun Mar 04 10:42:51 2018 -0500 @@ -13,13 +13,13 @@ extensions, policy, registrar, - util, ) +from mercurial.utils import dateutil configtable = {} configitem = registrar.configitem(configtable) -configitem('fakedirstatewritetime', 'fakenow', +configitem(b'fakedirstatewritetime', b'fakenow', default=None, ) @@ -29,7 +29,7 @@ # execute what original parsers.pack_dirstate should do actually # for consistency actualnow = int(now) - for f, e in dmap.iteritems(): + for f, e in dmap.items(): if e[0] == 'n' and e[3] == actualnow: e = parsers.dirstatetuple(e[0], e[1], e[2], -1) dmap[f] = e @@ -39,7 +39,7 @@ def fakewrite(ui, func): # fake "now" of 'pack_dirstate' only if it is invoked while 'func' - fakenow = ui.config('fakedirstatewritetime', 'fakenow') + fakenow = ui.config(b'fakedirstatewritetime', b'fakenow') if not fakenow: # Execute original one, if fakenow isn't configured. This is # useful to prevent subrepos from executing replaced one, @@ -49,7 +49,7 @@ # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy - fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0] + fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0] orig_pack_dirstate = parsers.pack_dirstate orig_dirstate_getfsnow = dirstate._getfsnow
--- a/tests/fakemergerecord.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/fakemergerecord.py Sun Mar 04 10:42:51 2018 -0500 @@ -12,15 +12,15 @@ cmdtable = {} command = registrar.command(cmdtable) -@command('fakemergerecord', - [('X', 'mandatory', None, 'add a fake mandatory record'), - ('x', 'advisory', None, 'add a fake advisory record')], '') +@command(b'fakemergerecord', + [(b'X', b'mandatory', None, b'add a fake mandatory record'), + (b'x', b'advisory', None, b'add a fake advisory record')], '') def fakemergerecord(ui, repo, *pats, **opts): with repo.wlock(): ms = merge.mergestate.read(repo) records = ms._makerecords() if opts.get('mandatory'): - records.append(('X', 'mandatory record')) + records.append((b'X', b'mandatory record')) if opts.get('advisory'): - records.append(('x', 'advisory record')) + records.append((b'x', b'advisory record')) ms._writerecords(records)
--- a/tests/fakepatchtime.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/fakepatchtime.py Sun Mar 04 10:42:51 2018 -0500 @@ -7,30 +7,30 @@ extensions, patch as patchmod, registrar, - util, ) +from mercurial.utils import dateutil configtable = {} configitem = registrar.configitem(configtable) -configitem('fakepatchtime', 'fakenow', +configitem(b'fakepatchtime', b'fakenow', default=None, ) def internalpatch(orig, ui, repo, patchobj, strip, - prefix='', files=None, - eolmode='strict', similarity=0): + prefix=b'', files=None, + eolmode=b'strict', similarity=0): if files is None: files = set() r = orig(ui, repo, patchobj, strip, prefix=prefix, files=files, eolmode=eolmode, similarity=similarity) - fakenow = ui.config('fakepatchtime', 'fakenow') + fakenow = ui.config(b'fakepatchtime', b'fakenow') if fakenow: # parsing 'fakenow' in YYYYmmddHHMM format makes comparison between # 'fakenow' value and 'touch -t YYYYmmddHHMM' argument easy - fakenow = util.parsedate(fakenow, ['%Y%m%d%H%M'])[0] + fakenow = dateutil.parsedate(fakenow, [b'%Y%m%d%H%M'])[0] for f in files: repo.wvfs.utime(f, (fakenow, fakenow))
--- a/tests/flagprocessorext.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/flagprocessorext.py Sun Mar 04 10:42:51 2018 -0500 @@ -45,14 +45,14 @@ def supportedoutgoingversions(orig, repo): versions = orig(repo) - versions.discard('01') - versions.discard('02') - versions.add('03') + versions.discard(b'01') + versions.discard(b'02') + versions.add(b'03') return versions def allsupportedversions(orig, ui): versions = orig(ui) - versions.add('03') + versions.add(b'03') return versions def noopaddrevision(orig, self, text, transaction, link, p1, p2, @@ -106,7 +106,7 @@ # Teach exchange to use changegroup 3 for k in exchange._bundlespeccgversions.keys(): - exchange._bundlespeccgversions[k] = '03' + exchange._bundlespeccgversions[k] = b'03' # Add wrappers for addrevision, responsible to set flags depending on the # revision data contents.
--- a/tests/generate-working-copy-states.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/generate-working-copy-states.py Sun Mar 04 10:42:51 2018 -0500 @@ -42,12 +42,12 @@ def generatestates(maxchangesets, parentcontents): depth = len(parentcontents) if depth == maxchangesets + 1: - for tracked in ('untracked', 'tracked'): - filename = "_".join([(content is None and 'missing' or content) for - content in parentcontents]) + "-" + tracked + for tracked in (b'untracked', b'tracked'): + filename = b"_".join([(content is None and b'missing' or content) + for content in parentcontents]) + b"-" + tracked yield (filename, parentcontents) else: - for content in ({None, 'content' + str(depth + 1)} | + for content in ({None, b'content' + (b"%d" % (depth + 1))} | set(parentcontents)): for combination in generatestates(maxchangesets, parentcontents + [content]): @@ -71,7 +71,7 @@ if depth == 'wc': # Make sure there is content so the file gets written and can be # tracked. It will be deleted outside of this script. - content.append((filename, states[maxchangesets] or 'TOBEDELETED')) + content.append((filename, states[maxchangesets] or b'TOBEDELETED')) else: content.append((filename, states[int(depth) - 1])) else: @@ -82,7 +82,7 @@ for filename, data in content: if data is not None: f = open(filename, 'wb') - f.write(data + '\n') + f.write(data + b'\n') f.close() elif os.path.exists(filename): os.remove(filename)
--- a/tests/get-with-headers.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/get-with-headers.py Sun Mar 04 10:42:51 2018 -0500 @@ -3,7 +3,7 @@ """This does HTTP GET requests given a host:port and path and returns a subset of the headers plus the body of the result.""" -from __future__ import absolute_import, print_function +from __future__ import absolute_import import argparse import json @@ -23,6 +23,8 @@ except ImportError: pass +stdout = getattr(sys.stdout, 'buffer', sys.stdout) + parser = argparse.ArgumentParser() parser.add_argument('--twice', action='store_true') parser.add_argument('--headeronly', action='store_true') @@ -62,21 +64,23 @@ conn = httplib.HTTPConnection(host) conn.request("GET", '/' + path, None, headers) response = conn.getresponse() - print(response.status, response.reason) + stdout.write(b'%d %s\n' % (response.status, + response.reason.encode('ascii'))) if show[:1] == ['-']: show = sorted(h for h, v in response.getheaders() if h.lower() not in show) for h in [h.lower() for h in show]: if response.getheader(h, None) is not None: - print("%s: %s" % (h, response.getheader(h))) + stdout.write(b"%s: %s\n" % (h.encode('ascii'), + response.getheader(h).encode('ascii'))) if not headeronly: - print() + stdout.write(b'\n') data = response.read() if args.bodyfile: bodyfh = open(args.bodyfile, 'wb') else: - bodyfh = sys.stdout + bodyfh = stdout # Pretty print JSON. This also has the beneficial side-effect # of verifying emitted JSON is well-formed.
--- a/tests/hghave.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/hghave.py Sun Mar 04 10:42:51 2018 -0500 @@ -703,8 +703,17 @@ @check("clang-libfuzzer", "clang new enough to include libfuzzer") def has_clang_libfuzzer(): - mat = matchoutput('clang --version', 'clang version (\d)') + mat = matchoutput('clang --version', b'clang version (\d)') if mat: # libfuzzer is new in clang 6 return int(mat.group(1)) > 5 return False + +@check("xdiff", "xdiff algorithm") +def has_xdiff(): + try: + from mercurial import policy + bdiff = policy.importmod('bdiff') + return bdiff.xdiffblocks('', '') == [(0, 0, 0, 0)] + except (ImportError, AttributeError) as ex: + return False
--- a/tests/logexceptions.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/logexceptions.py Sun Mar 04 10:42:51 2018 -0500 @@ -65,6 +65,7 @@ primaryframe, hgframe, hgline, + ui.environ[b'TESTNAME'].decode('utf-8', 'replace'), ] fh.write(b'\0'.join(p.encode('utf-8', 'replace') for p in parts))
--- a/tests/mockblackbox.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/mockblackbox.py Sun Mar 04 10:42:51 2018 -0500 @@ -5,7 +5,7 @@ # XXX: we should probably offer a devel option to do this in blackbox directly def getuser(): - return 'bob' + return b'bob' def getpid(): return 5000
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/narrow-library.sh Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,8 @@ +cat >> $HGRCPATH <<EOF +[extensions] +narrow= +[ui] +ssh=python "$TESTDIR/dummyssh" +[experimental] +changegroup3 = True +EOF
--- a/tests/printenv.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/printenv.py Sun Mar 04 10:42:51 2018 -0500 @@ -35,7 +35,7 @@ # variables with empty values may not exist on all platforms, filter # them now for portability sake. -env = [(k, v) for k, v in os.environ.iteritems() +env = [(k, v) for k, v in os.environ.items() if k.startswith("HG_") and v] env.sort()
--- a/tests/revlog-formatv0.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/revlog-formatv0.py Sun Mar 04 10:42:51 2018 -0500 @@ -18,6 +18,7 @@ """ from __future__ import absolute_import +import binascii import os import sys @@ -56,7 +57,7 @@ for name, data in files: f = open(name, 'wb') - f.write(data.decode('hex')) + f.write(binascii.unhexlify(data)) f.close() sys.exit(0)
--- a/tests/revnamesext.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/revnamesext.py Sun Mar 04 10:42:51 2018 -0500 @@ -7,12 +7,12 @@ ) def reposetup(ui, repo): - names = {'r%d' % rev: repo[rev].node() for rev in repo} + names = {b'r%d' % rev: repo[rev].node() for rev in repo} namemap = lambda r, name: names.get(name) - nodemap = lambda r, node: ['r%d' % repo[node].rev()] + nodemap = lambda r, node: [b'r%d' % repo[node].rev()] - ns = namespaces.namespace('revnames', templatename='revname', - logname='revname', + ns = namespaces.namespace(b'revnames', templatename=b'revname', + logname=b'revname', listnames=lambda r: names.keys(), namemap=namemap, nodemap=nodemap) repo.names.addnamespace(ns)
--- a/tests/run-tests.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/run-tests.py Sun Mar 04 10:42:51 2018 -0500 @@ -120,6 +120,7 @@ } class TestRunnerLexer(lexer.RegexLexer): + testpattern = r'[\w-]+\.(t|py)( \(case [\w-]+\))?' tokens = { 'root': [ (r'^Skipped', token.Generic.Skipped, 'skipped'), @@ -127,11 +128,11 @@ (r'^ERROR: ', token.Generic.Failed, 'failed'), ], 'skipped': [ - (r'[\w-]+\.(t|py)', token.Generic.SName), + (testpattern, token.Generic.SName), (r':.*', token.Generic.Skipped), ], 'failed': [ - (r'[\w-]+\.(t|py)', token.Generic.FName), + (testpattern, token.Generic.FName), (r'(:| ).*', token.Generic.Failed), ] } @@ -344,6 +345,8 @@ help="loop tests repeatedly") harness.add_argument('--random', action="store_true", help='run tests in random order') + harness.add_argument('--order-by-runtime', action="store_true", + help='run slowest tests first, according to .testtimes') harness.add_argument("-p", "--port", type=int, help="port on which servers should listen" " (default: $%s or %d)" % defaults['port']) @@ -989,7 +992,12 @@ # the intermediate 'compile' step help with debugging code = compile(source.read(), replacementfile, 'exec') exec(code, data) - r.extend(data.get('substitutions', ())) + for value in data.get('substitutions', ()): + if len(value) != 2: + msg = 'malformatted substitution in %s: %r' + msg %= (replacementfile, value) + raise ValueError(msg) + r.append(value) return r def _escapepath(self, p): @@ -1046,6 +1054,7 @@ env['PYTHONUSERBASE'] = sysconfig.get_config_var('userbase') or '' env['HGEMITWARNINGS'] = '1' env['TESTTMP'] = self._testtmp + env['TESTNAME'] = self.name env['HOME'] = self._testtmp # This number should match portneeded in _getport for port in xrange(3): @@ -1080,7 +1089,7 @@ del env[k] # unset env related to hooks - for k in env.keys(): + for k in list(env): if k.startswith('HG_'): del env[k] @@ -1229,6 +1238,7 @@ self.name = '%s (case %s)' % (self.name, _strpath(case)) self.errpath = b'%s.%s.err' % (self.errpath[:-4], case) self._tmpname += b'-%s' % case + self._have = {} @property def refpath(self): @@ -1268,11 +1278,15 @@ return self._processoutput(exitcode, output, salt, after, expected) def _hghave(self, reqs): + allreqs = b' '.join(reqs) + if allreqs in self._have: + return self._have.get(allreqs) + # TODO do something smarter when all other uses of hghave are gone. runtestdir = os.path.abspath(os.path.dirname(_bytespath(__file__))) tdir = runtestdir.replace(b'\\', b'/') proc = Popen4(b'%s -c "%s/hghave %s"' % - (self._shell, tdir, b' '.join(reqs)), + (self._shell, tdir, allreqs), self._testtmp, 0, self._getenv()) stdout, stderr = proc.communicate() ret = proc.wait() @@ -1283,10 +1297,13 @@ sys.exit(1) if ret != 0: + self._have[allreqs] = (False, stdout) return False, stdout if b'slow' in reqs: self._timeout = self._slowtimeout + + self._have[allreqs] = (True, None) return True, None def _iftest(self, args): @@ -1341,7 +1358,11 @@ if os.getenv('MSYSTEM'): script.append(b'alias pwd="pwd -W"\n') if self._case: - script.append(b'TESTCASE=%s\n' % shellquote(self._case)) + if isinstance(self._case, str): + quoted = shellquote(self._case) + else: + quoted = shellquote(self._case.decode('utf8')).encode('utf8') + script.append(b'TESTCASE=%s\n' % quoted) script.append(b'export TESTCASE\n') n = 0 @@ -1352,10 +1373,11 @@ lsplit = l.split() if len(lsplit) < 2 or lsplit[0] != b'#require': after.setdefault(pos, []).append(' !!! invalid #require\n') - haveresult, message = self._hghave(lsplit[1:]) - if not haveresult: - script = [b'echo "%s"\nexit 80\n' % message] - break + if not skipping: + haveresult, message = self._hghave(lsplit[1:]) + if not haveresult: + script = [b'echo "%s"\nexit 80\n' % message] + break after.setdefault(pos, []).append(l) elif l.startswith(b'#if'): lsplit = l.split() @@ -1751,20 +1773,20 @@ else: servefail, lines = getdiff(expected, got, test.refpath, test.errpath) + self.stream.write('\n') + for line in lines: + line = highlightdiff(line, self.color) + if PYTHON3: + self.stream.flush() + self.stream.buffer.write(line) + self.stream.buffer.flush() + else: + self.stream.write(line) + self.stream.flush() + if servefail: raise test.failureException( 'server failed to start (HGPORT=%s)' % test._startport) - else: - self.stream.write('\n') - for line in lines: - line = highlightdiff(line, self.color) - if PYTHON3: - self.stream.flush() - self.stream.buffer.write(line) - self.stream.buffer.flush() - else: - self.stream.write(line) - self.stream.flush() # handle interactive prompt without releasing iolock if self._options.interactive: @@ -2012,10 +2034,11 @@ def loadtimes(outputdir): times = [] try: - with open(os.path.join(outputdir, b'.testtimes-')) as fp: + with open(os.path.join(outputdir, b'.testtimes')) as fp: for line in fp: - ts = line.split() - times.append((ts[0], [float(t) for t in ts[1:]])) + m = re.match('(.*?) ([0-9. ]+)', line) + times.append((m.group(1), + [float(t) for t in m.group(2).split()])) except IOError as err: if err.errno != errno.ENOENT: raise @@ -2124,13 +2147,21 @@ if self._runner.options.exceptions: exceptions = aggregateexceptions( os.path.join(self._runner._outputdir, b'exceptions')) - total = sum(exceptions.values()) self.stream.writeln('Exceptions Report:') self.stream.writeln('%d total from %d frames' % - (total, len(exceptions))) - for (frame, line, exc), count in exceptions.most_common(): - self.stream.writeln('%d\t%s: %s' % (count, frame, exc)) + (exceptions['total'], + len(exceptions['exceptioncounts']))) + combined = exceptions['combined'] + for key in sorted(combined, key=combined.get, reverse=True): + frame, line, exc = key + totalcount, testcount, leastcount, leasttest = combined[key] + + self.stream.writeln('%d (%d tests)\t%s: %s (%s - %d total)' + % (totalcount, + testcount, + frame, exc, + leasttest, leastcount)) self.stream.flush() @@ -2279,47 +2310,57 @@ separators=(',', ': ')) outf.writelines(("testreport =", jsonout)) -def sorttests(testdescs, shuffle=False): +def sorttests(testdescs, previoustimes, shuffle=False): """Do an in-place sort of tests.""" if shuffle: random.shuffle(testdescs) return - # keywords for slow tests - slow = {b'svn': 10, - b'cvs': 10, - b'hghave': 10, - b'largefiles-update': 10, - b'run-tests': 10, - b'corruption': 10, - b'race': 10, - b'i18n': 10, - b'check': 100, - b'gendoc': 100, - b'contrib-perf': 200, - } - perf = {} - - def sortkey(f): - # run largest tests first, as they tend to take the longest - f = f['path'] - try: - return perf[f] - except KeyError: + if previoustimes: + def sortkey(f): + f = f['path'] + if f in previoustimes: + # Use most recent time as estimate + return -previoustimes[f][-1] + else: + # Default to a rather arbitrary value of 1 second for new tests + return -1.0 + else: + # keywords for slow tests + slow = {b'svn': 10, + b'cvs': 10, + b'hghave': 10, + b'largefiles-update': 10, + b'run-tests': 10, + b'corruption': 10, + b'race': 10, + b'i18n': 10, + b'check': 100, + b'gendoc': 100, + b'contrib-perf': 200, + } + perf = {} + + def sortkey(f): + # run largest tests first, as they tend to take the longest + f = f['path'] try: - val = -os.stat(f).st_size - except OSError as e: - if e.errno != errno.ENOENT: - raise - perf[f] = -1e9 # file does not exist, tell early - return -1e9 - for kw, mul in slow.items(): - if kw in f: - val *= mul - if f.endswith(b'.py'): - val /= 10.0 - perf[f] = val / 1000.0 - return perf[f] + return perf[f] + except KeyError: + try: + val = -os.stat(f).st_size + except OSError as e: + if e.errno != errno.ENOENT: + raise + perf[f] = -1e9 # file does not exist, tell early + return -1e9 + for kw, mul in slow.items(): + if kw in f: + val *= mul + if f.endswith(b'.py'): + val /= 10.0 + perf[f] = val / 1000.0 + return perf[f] testdescs.sort(key=sortkey) @@ -2390,8 +2431,6 @@ os.umask(oldmask) def _run(self, testdescs): - sorttests(testdescs, shuffle=self.options.random) - self._testdir = osenvironb[b'TESTDIR'] = getattr( os, 'getcwdb', os.getcwd)() # assume all tests in same folder for now @@ -2406,6 +2445,10 @@ self._outputdir = self._testdir if testdescs and pathname: self._outputdir = os.path.join(self._outputdir, pathname) + previoustimes = {} + if self.options.order_by_runtime: + previoustimes = dict(loadtimes(self._outputdir)) + sorttests(testdescs, previoustimes, shuffle=self.options.random) if 'PYTHONHASHSEED' not in os.environ: # use a random python hash seed all the time @@ -3001,22 +3044,57 @@ p.decode("utf-8")) def aggregateexceptions(path): - exceptions = collections.Counter() + exceptioncounts = collections.Counter() + testsbyfailure = collections.defaultdict(set) + failuresbytest = collections.defaultdict(set) for f in os.listdir(path): with open(os.path.join(path, f), 'rb') as fh: data = fh.read().split(b'\0') - if len(data) != 4: + if len(data) != 5: continue - exc, mainframe, hgframe, hgline = data + exc, mainframe, hgframe, hgline, testname = data exc = exc.decode('utf-8') mainframe = mainframe.decode('utf-8') hgframe = hgframe.decode('utf-8') hgline = hgline.decode('utf-8') - exceptions[(hgframe, hgline, exc)] += 1 - - return exceptions + testname = testname.decode('utf-8') + + key = (hgframe, hgline, exc) + exceptioncounts[key] += 1 + testsbyfailure[key].add(testname) + failuresbytest[testname].add(key) + + # Find test having fewest failures for each failure. + leastfailing = {} + for key, tests in testsbyfailure.items(): + fewesttest = None + fewestcount = 99999999 + for test in sorted(tests): + if len(failuresbytest[test]) < fewestcount: + fewesttest = test + fewestcount = len(failuresbytest[test]) + + leastfailing[key] = (fewestcount, fewesttest) + + # Create a combined counter so we can sort by total occurrences and + # impacted tests. + combined = {} + for key in exceptioncounts: + combined[key] = (exceptioncounts[key], + len(testsbyfailure[key]), + leastfailing[key][0], + leastfailing[key][1]) + + return { + 'exceptioncounts': exceptioncounts, + 'total': sum(exceptioncounts.values()), + 'combined': combined, + 'leastfailing': leastfailing, + 'byfailure': testsbyfailure, + 'bytest': failuresbytest, + } if __name__ == '__main__': runner = TestRunner()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/sshprotoext.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,98 @@ +# sshprotoext.py - Extension to test behavior of SSH protocol +# +# Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com> +# +# This software may be used and distributed according to the terms of the +# GNU General Public License version 2 or any later version. + +# This extension replaces the SSH server started via `hg serve --stdio`. +# The server behaves differently depending on environment variables. + +from __future__ import absolute_import + +from mercurial import ( + error, + extensions, + registrar, + sshpeer, + wireproto, + wireprotoserver, +) + +configtable = {} +configitem = registrar.configitem(configtable) + +configitem(b'sshpeer', b'mode', default=None) +configitem(b'sshpeer', b'handshake-mode', default=None) + +class bannerserver(wireprotoserver.sshserver): + """Server that sends a banner to stdout.""" + def serve_forever(self): + for i in range(10): + self._fout.write(b'banner: line %d\n' % i) + + super(bannerserver, self).serve_forever() + +class prehelloserver(wireprotoserver.sshserver): + """Tests behavior when connecting to <0.9.1 servers. + + The ``hello`` wire protocol command was introduced in Mercurial + 0.9.1. Modern clients send the ``hello`` command when connecting + to SSH servers. This mock server tests behavior of the handshake + when ``hello`` is not supported. + """ + def serve_forever(self): + l = self._fin.readline() + assert l == b'hello\n' + # Respond to unknown commands with an empty reply. + wireprotoserver._sshv1respondbytes(self._fout, b'') + l = self._fin.readline() + assert l == b'between\n' + proto = wireprotoserver.sshv1protocolhandler(self._ui, self._fin, + self._fout) + rsp = wireproto.dispatch(self._repo, proto, b'between') + wireprotoserver._sshv1respondbytes(self._fout, rsp.data) + + super(prehelloserver, self).serve_forever() + +def performhandshake(orig, ui, stdin, stdout, stderr): + """Wrapped version of sshpeer._performhandshake to send extra commands.""" + mode = ui.config(b'sshpeer', b'handshake-mode') + if mode == b'pre-no-args': + ui.debug(b'sending no-args command\n') + stdin.write(b'no-args\n') + stdin.flush() + return orig(ui, stdin, stdout, stderr) + elif mode == b'pre-multiple-no-args': + ui.debug(b'sending unknown1 command\n') + stdin.write(b'unknown1\n') + ui.debug(b'sending unknown2 command\n') + stdin.write(b'unknown2\n') + ui.debug(b'sending unknown3 command\n') + stdin.write(b'unknown3\n') + stdin.flush() + return orig(ui, stdin, stdout, stderr) + else: + raise error.ProgrammingError(b'unknown HANDSHAKECOMMANDMODE: %s' % + mode) + +def extsetup(ui): + # It's easier for tests to define the server behavior via environment + # variables than config options. This is because `hg serve --stdio` + # has to be invoked with a certain form for security reasons and + # `dummyssh` can't just add `--config` flags to the command line. + servermode = ui.environ.get(b'SSHSERVERMODE') + + if servermode == b'banner': + wireprotoserver.sshserver = bannerserver + elif servermode == b'no-hello': + wireprotoserver.sshserver = prehelloserver + elif servermode: + raise error.ProgrammingError(b'unknown server mode: %s' % servermode) + + peermode = ui.config(b'sshpeer', b'mode') + + if peermode == b'extra-handshake-commands': + extensions.wrapfunction(sshpeer, '_performhandshake', performhandshake) + elif peermode: + raise error.ProgrammingError(b'unknown peer mode: %s' % peermode)
--- a/tests/test-abort-checkin.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-abort-checkin.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,9 +1,9 @@ $ cat > abortcommit.py <<EOF > from mercurial import error > def hook(**args): - > raise error.Abort("no commits allowed") + > raise error.Abort(b"no commits allowed") > def reposetup(ui, repo): - > repo.ui.setconfig("hooks", "pretxncommit.nocommits", hook) + > repo.ui.setconfig(b"hooks", b"pretxncommit.nocommits", hook) > EOF $ abspath=`pwd`/abortcommit.py
--- a/tests/test-acl.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-acl.t Sun Mar 04 10:42:51 2018 -0500 @@ -21,6 +21,15 @@ > echo > } + > cat > posixgetuser.py <<'EOF' + > import getpass + > from mercurial import pycompat, util + > def posixgetuser(): + > return pycompat.fsencode(getpass.getuser()) + > if not pycompat.isposix: + > util.getuser = posixgetuser # forcibly trust $LOGNAME + > EOF + > init_config() > { > cat > fakegroups.py <<EOF @@ -41,6 +50,7 @@ > sources = push > [extensions] > f=`pwd`/fakegroups.py + > posixgetuser=$TESTTMP/posixgetuser.py > EOF > } @@ -72,6 +82,10 @@ 3 files updated, 0 files merged, 0 files removed, 0 files unresolved $ config=b/.hg/hgrc + $ cat >> "$config" <<EOF + > [extensions] + > posixgetuser=$TESTTMP/posixgetuser.py + > EOF Extension disabled for lack of a hook @@ -1126,6 +1140,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow] ** = fred """ @@ -1206,6 +1221,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow] ** = fred [acl.deny] @@ -1287,6 +1303,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow] ** = @group1 """ @@ -1368,6 +1385,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow] ** = @group1 [acl.deny] @@ -1491,6 +1509,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py """ pushing to ../b query 1; heads @@ -1573,6 +1592,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.deny.branches] foobar = * """ @@ -1651,6 +1671,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow.branches] """ pushing to ../b @@ -1723,6 +1744,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow.branches] * = george """ @@ -1790,6 +1812,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow.branches] * = george """ @@ -1878,6 +1901,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.allow.branches] foobar = astro * = george @@ -1965,6 +1989,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.deny.branches] foobar = astro default = astro @@ -2039,6 +2064,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.deny.branches] default = !astro """ @@ -2121,6 +2147,7 @@ [acl] sources = push [extensions] + posixgetuser=$TESTTMP/posixgetuser.py [acl.deny.branches] default = !astro """
--- a/tests/test-add.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-add.t Sun Mar 04 10:42:51 2018 -0500 @@ -146,6 +146,13 @@ M a ? a.orig +excluded file shouldn't be added even if it is explicitly specified + + $ hg add a.orig -X '*.orig' + $ hg st + M a + ? a.orig + Forgotten file can be added back (as either clean or modified) $ hg forget b
--- a/tests/test-alias.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-alias.t Sun Mar 04 10:42:51 2018 -0500 @@ -548,12 +548,12 @@ > from mercurial import cmdutil, commands, registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command('expandalias') + > @command(b'expandalias') > def expandalias(ui, repo, name): > alias = cmdutil.findcmd(name, commands.table)[1][0] - > ui.write('%s args: %s\n' % (name, ' '.join(alias.args))) + > ui.write(b'%s args: %s\n' % (name, b' '.join(alias.args))) > os.environ['COUNT'] = '2' - > ui.write('%s args: %s (with COUNT=2)\n' % (name, ' '.join(alias.args))) + > ui.write(b'%s args: %s (with COUNT=2)\n' % (name, b' '.join(alias.args))) > EOF $ cat >> $HGRCPATH <<'EOF'
--- a/tests/test-ancestor.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-ancestor.py Sun Mar 04 10:42:51 2018 -0500 @@ -220,9 +220,9 @@ # DAGs that have been known to be problematic, and, optionally, known pairs # of revisions and their expected ancestor list. dagtests = [ - ('+2*2*2/*3/2', {}), - ('+3*3/*2*2/*4*4/*4/2*4/2*2', {}), - ('+2*2*/2*4*/4*/3*2/4', {(6, 7): [3, 5]}), + (b'+2*2*2/*3/2', {}), + (b'+3*3/*2*2/*4*4/*4/2*4/2*2', {}), + (b'+2*2*/2*4*/4*/3*2/4', {(6, 7): [3, 5]}), ] def test_gca(): u = uimod.ui.load()
--- a/tests/test-annotate.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-annotate.py Sun Mar 04 10:42:51 2018 -0500 @@ -27,7 +27,7 @@ def decorate(text, rev): return ([annotateline(fctx=rev, lineno=i) - for i in xrange(1, text.count(b'\n') + 1)], + for i in range(1, text.count(b'\n') + 1)], text) # Basic usage @@ -36,17 +36,17 @@ p1ann = decorate(p1data, p1fctx) p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts) self.assertEqual(p1ann[0], [ - annotateline('old', 1), - annotateline('old', 2), - annotateline('p1', 3), + annotateline(b'old', 1), + annotateline(b'old', 2), + annotateline(b'p1', 3), ]) p2ann = decorate(p2data, p2fctx) p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts) self.assertEqual(p2ann[0], [ - annotateline('old', 1), - annotateline('p2', 2), - annotateline('p2', 3), + annotateline(b'old', 1), + annotateline(b'p2', 2), + annotateline(b'p2', 3), ]) # Test with multiple parents (note the difference caused by ordering) @@ -55,22 +55,22 @@ childann = _annotatepair([p1ann, p2ann], childfctx, childann, False, diffopts) self.assertEqual(childann[0], [ - annotateline('old', 1), - annotateline('c', 2), - annotateline('p2', 2), - annotateline('c', 4), - annotateline('p2', 3), + annotateline(b'old', 1), + annotateline(b'c', 2), + annotateline(b'p2', 2), + annotateline(b'c', 4), + annotateline(b'p2', 3), ]) childann = decorate(childdata, childfctx) childann = _annotatepair([p2ann, p1ann], childfctx, childann, False, diffopts) self.assertEqual(childann[0], [ - annotateline('old', 1), - annotateline('c', 2), - annotateline('p1', 3), - annotateline('c', 4), - annotateline('p2', 3), + annotateline(b'old', 1), + annotateline(b'c', 2), + annotateline(b'p1', 3), + annotateline(b'c', 4), + annotateline(b'p2', 3), ]) # Test with skipchild (note the difference caused by ordering) @@ -79,24 +79,24 @@ childann = _annotatepair([p1ann, p2ann], childfctx, childann, True, diffopts) self.assertEqual(childann[0], [ - annotateline('old', 1), - annotateline('old', 2, True), + annotateline(b'old', 1), + annotateline(b'old', 2, True), # note that this line was carried over from earlier so it is *not* # marked skipped - annotateline('p2', 2), - annotateline('p2', 2, True), - annotateline('p2', 3), + annotateline(b'p2', 2), + annotateline(b'p2', 2, True), + annotateline(b'p2', 3), ]) childann = decorate(childdata, childfctx) childann = _annotatepair([p2ann, p1ann], childfctx, childann, True, diffopts) self.assertEqual(childann[0], [ - annotateline('old', 1), - annotateline('old', 2, True), - annotateline('p1', 3), - annotateline('p1', 3, True), - annotateline('p2', 3), + annotateline(b'old', 1), + annotateline(b'old', 2, True), + annotateline(b'p1', 3), + annotateline(b'p1', 3, True), + annotateline(b'p2', 3), ]) if __name__ == '__main__':
--- a/tests/test-annotate.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-annotate.t Sun Mar 04 10:42:51 2018 -0500 @@ -814,6 +814,8 @@ [255] $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=[1])' hg: parse error at 43: not a prefix: [ + (followlines(baz, 2:4, startrev=20, descend=[1]) + ^ here) [255] $ hg log -r 'followlines(baz, 2:4, startrev=20, descend=a)' hg: parse error: descend argument must be a boolean
--- a/tests/test-arbitraryfilectx.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-arbitraryfilectx.t Sun Mar 04 10:42:51 2018 -0500 @@ -5,11 +5,11 @@ > from mercurial import commands, context, registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command(b'eval', [], 'hg eval CMD') + > @command(b'eval', [], b'hg eval CMD') > def eval_(ui, repo, *cmds, **opts): - > cmd = " ".join(cmds) + > cmd = b" ".join(cmds) > res = str(eval(cmd, globals(), locals())) - > ui.warn("%s" % res) + > ui.warn(b"%s" % res) > EOF $ echo "[extensions]" >> $HGRCPATH
--- a/tests/test-atomictempfile.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-atomictempfile.py Sun Mar 04 10:42:51 2018 -0500 @@ -7,14 +7,18 @@ import unittest from mercurial import ( + pycompat, util, ) atomictempfile = util.atomictempfile +if pycompat.ispy3: + xrange = range + class testatomictempfile(unittest.TestCase): def setUp(self): - self._testdir = tempfile.mkdtemp('atomictempfiletest') - self._filename = os.path.join(self._testdir, 'testfilename') + self._testdir = tempfile.mkdtemp(b'atomictempfiletest') + self._filename = os.path.join(self._testdir, b'testfilename') def tearDown(self): shutil.rmtree(self._testdir, True) @@ -24,14 +28,14 @@ self.assertFalse(os.path.isfile(self._filename)) tempfilename = file._tempname self.assertTrue(tempfilename in glob.glob( - os.path.join(self._testdir, '.testfilename-*'))) + os.path.join(self._testdir, b'.testfilename-*'))) file.write(b'argh\n') file.close() self.assertTrue(os.path.isfile(self._filename)) self.assertTrue(tempfilename not in glob.glob( - os.path.join(self._testdir, '.testfilename-*'))) + os.path.join(self._testdir, b'.testfilename-*'))) # discard() removes the temp file without making the write permanent def testdiscard(self): @@ -42,7 +46,7 @@ file.discard() self.assertFalse(os.path.isfile(self._filename)) - self.assertTrue(basename not in os.listdir('.')) + self.assertTrue(basename not in os.listdir(b'.')) # if a programmer screws up and passes bad args to atomictempfile, they # get a plain ordinary TypeError, not infinite recursion @@ -54,7 +58,7 @@ def testcheckambig(self): def atomicwrite(checkambig): f = atomictempfile(self._filename, checkambig=checkambig) - f.write('FOO') + f.write(b'FOO') f.close() # try some times, because reproduction of ambiguity depends on @@ -93,27 +97,27 @@ def testread(self): with open(self._filename, 'wb') as f: f.write(b'foobar\n') - file = atomictempfile(self._filename, mode='rb') + file = atomictempfile(self._filename, mode=b'rb') self.assertTrue(file.read(), b'foobar\n') file.discard() def testcontextmanagersuccess(self): """When the context closes, the file is closed""" - with atomictempfile('foo') as f: - self.assertFalse(os.path.isfile('foo')) + with atomictempfile(b'foo') as f: + self.assertFalse(os.path.isfile(b'foo')) f.write(b'argh\n') - self.assertTrue(os.path.isfile('foo')) + self.assertTrue(os.path.isfile(b'foo')) def testcontextmanagerfailure(self): """On exception, the file is discarded""" try: - with atomictempfile('foo') as f: - self.assertFalse(os.path.isfile('foo')) + with atomictempfile(b'foo') as f: + self.assertFalse(os.path.isfile(b'foo')) f.write(b'argh\n') raise ValueError except ValueError: pass - self.assertFalse(os.path.isfile('foo')) + self.assertFalse(os.path.isfile(b'foo')) if __name__ == '__main__': import silenttestrunner
--- a/tests/test-basic.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-basic.t Sun Mar 04 10:42:51 2018 -0500 @@ -59,7 +59,7 @@ $ cat <<EOF > update_to_rev0.py > from mercurial import ui, hg, commands > myui = ui.ui.load() - > repo = hg.repository(myui, path='.') + > repo = hg.repository(myui, path=b'.') > commands.update(myui, repo, rev=0) > EOF $ hg up null @@ -87,6 +87,13 @@ checking files 1 files, 1 changesets, 1 total revisions +Repository root: + + $ hg root + $TESTTMP/t + $ hg log -l1 -T '{reporoot}\n' + $TESTTMP/t + At the end... $ cd ..
--- a/tests/test-bookmarks.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-bookmarks.t Sun Mar 04 10:42:51 2018 -0500 @@ -980,14 +980,14 @@ > tr = orig(self, desc, report) > def sleep(*args, **kwargs): > retry = 20 - > while retry > 0 and not os.path.exists("$TESTTMP/unpause"): + > while retry > 0 and not os.path.exists(b"$TESTTMP/unpause"): > retry -= 1 > time.sleep(0.5) - > if os.path.exists("$TESTTMP/unpause"): - > os.remove("$TESTTMP/unpause") + > if os.path.exists(b"$TESTTMP/unpause"): + > os.remove(b"$TESTTMP/unpause") > # It is important that this finalizer start with 'a', so it runs before > # the changelog finalizer appends to the changelog. - > tr.addfinalize('a-sleep', sleep) + > tr.addfinalize(b'a-sleep', sleep) > return tr > > def extsetup(ui):
--- a/tests/test-bundle2-exchange.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-bundle2-exchange.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,3 +1,13 @@ +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + Test exchange of common information using bundle2
--- a/tests/test-bundle2-pushback.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-bundle2-pushback.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,3 +1,13 @@ +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + $ cat > bundle2.py << EOF > """A small extension to test bundle2 pushback parts. > Current bundle2 implementation doesn't provide a way to generate those
--- a/tests/test-bundle2-remote-changegroup.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-bundle2-remote-changegroup.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,5 +1,15 @@ #require killdaemons +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + Create an extension to test bundle2 remote-changegroup parts $ cat > bundle2.py << EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-cappedreader.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,91 @@ +from __future__ import absolute_import, print_function + +import io +import unittest + +from mercurial import ( + util, +) + +class CappedReaderTests(unittest.TestCase): + def testreadfull(self): + source = io.BytesIO(b'x' * 100) + + reader = util.cappedreader(source, 10) + res = reader.read(10) + self.assertEqual(res, b'x' * 10) + self.assertEqual(source.tell(), 10) + source.seek(0) + + reader = util.cappedreader(source, 15) + res = reader.read(16) + self.assertEqual(res, b'x' * 15) + self.assertEqual(source.tell(), 15) + source.seek(0) + + reader = util.cappedreader(source, 100) + res = reader.read(100) + self.assertEqual(res, b'x' * 100) + self.assertEqual(source.tell(), 100) + source.seek(0) + + reader = util.cappedreader(source, 50) + res = reader.read() + self.assertEqual(res, b'x' * 50) + self.assertEqual(source.tell(), 50) + source.seek(0) + + def testreadnegative(self): + source = io.BytesIO(b'x' * 100) + + reader = util.cappedreader(source, 20) + res = reader.read(-1) + self.assertEqual(res, b'x' * 20) + self.assertEqual(source.tell(), 20) + source.seek(0) + + reader = util.cappedreader(source, 100) + res = reader.read(-1) + self.assertEqual(res, b'x' * 100) + self.assertEqual(source.tell(), 100) + source.seek(0) + + def testreadmultiple(self): + source = io.BytesIO(b'x' * 100) + + reader = util.cappedreader(source, 10) + for i in range(10): + res = reader.read(1) + self.assertEqual(res, b'x') + self.assertEqual(source.tell(), i + 1) + + self.assertEqual(source.tell(), 10) + res = reader.read(1) + self.assertEqual(res, b'') + self.assertEqual(source.tell(), 10) + source.seek(0) + + reader = util.cappedreader(source, 45) + for i in range(4): + res = reader.read(10) + self.assertEqual(res, b'x' * 10) + self.assertEqual(source.tell(), (i + 1) * 10) + + res = reader.read(10) + self.assertEqual(res, b'x' * 5) + self.assertEqual(source.tell(), 45) + + def readlimitpasteof(self): + source = io.BytesIO(b'x' * 100) + + reader = util.cappedreader(source, 1024) + res = reader.read(1000) + self.assertEqual(res, b'x' * 100) + self.assertEqual(source.tell(), 100) + res = reader.read(1000) + self.assertEqual(res, b'') + self.assertEqual(source.tell(), 100) + +if __name__ == '__main__': + import silenttestrunner + silenttestrunner.main(__name__)
--- a/tests/test-check-code.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-check-code.t Sun Mar 04 10:42:51 2018 -0500 @@ -13,8 +13,6 @@ > -X mercurial/thirdparty \ > | sed 's-\\-/-g' | "$check_code" --warnings --per-file=0 - || false Skipping i18n/polib.py it has no-che?k-code (glob) - Skipping mercurial/httpclient/__init__.py it has no-che?k-code (glob) - Skipping mercurial/httpclient/_readers.py it has no-che?k-code (glob) Skipping mercurial/statprof.py it has no-che?k-code (glob) Skipping tests/badserverext.py it has no-che?k-code (glob)
--- a/tests/test-check-help.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-check-help.t Sun Mar 04 10:42:51 2018 -0500 @@ -10,9 +10,9 @@ > import os, msvcrt > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) > topics = set() - > topicre = re.compile(r':hg:`help ([a-z0-9\-.]+)`') + > topicre = re.compile(br':hg:`help ([a-z0-9\-.]+)`') > for fname in sys.argv: - > with open(fname) as f: + > with open(fname, 'rb') as f: > topics.update(m.group(1) for m in topicre.finditer(f.read())) > for s in sorted(topics): > print(s)
--- a/tests/test-check-interfaces.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-check-interfaces.py Sun Mar 04 10:42:51 2018 -0500 @@ -51,10 +51,6 @@ pass # Facilitates testing sshpeer without requiring an SSH server. -class testingsshpeer(sshpeer.sshpeer): - def _validaterepo(self, *args, **kwargs): - pass - class badpeer(httppeer.httppeer): def __init__(self): super(badpeer, self).__init__(uimod.ui(), 'http://localhost') @@ -63,13 +59,20 @@ def badmethod(self): pass +class dummypipe(object): + def close(self): + pass + def main(): ui = uimod.ui() checkobject(badpeer()) checkobject(httppeer.httppeer(ui, 'http://localhost')) checkobject(localrepo.localpeer(dummyrepo())) - checkobject(testingsshpeer(ui, 'ssh://localhost/foo')) + checkobject(sshpeer.sshv1peer(ui, 'ssh://localhost/foo', None, dummypipe(), + dummypipe(), None, None)) + checkobject(sshpeer.sshv2peer(ui, 'ssh://localhost/foo', None, dummypipe(), + dummypipe(), None, None)) checkobject(bundlerepo.bundlepeer(dummyrepo())) checkobject(statichttprepo.statichttppeer(dummyrepo())) checkobject(unionrepo.unionpeer(dummyrepo()))
--- a/tests/test-clone.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-clone.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,3 +1,13 @@ +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + Prepare repo a: $ hg init a @@ -10,7 +20,7 @@ Create a non-inlined filelog: - $ $PYTHON -c 'file("data1", "wb").write("".join("%s\n" % x for x in range(10000)))' + $ $PYTHON -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))' $ for j in 0 1 2 3 4 5 6 7 8 9; do > cat data1 >> b > hg commit -m test @@ -1142,12 +1152,14 @@ #if windows $ hg clone "ssh://%26touch%20owned%20/" --debug running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio" + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg! [255] $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio" + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg! @@ -1155,12 +1167,14 @@ #else $ hg clone "ssh://%3btouch%20owned%20/" --debug running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio' + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg! [255] $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio' + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg! @@ -1169,6 +1183,7 @@ $ hg clone "ssh://v-alid.example.com/" --debug running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re) + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) sending hello command sending between command abort: no suitable response from remote hg!
--- a/tests/test-clonebundles.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-clonebundles.t Sun Mar 04 10:42:51 2018 -0500 @@ -53,7 +53,7 @@ $ echo 'http://does.not.exist/bundle.hg' > server/.hg/clonebundles.manifest $ hg clone http://localhost:$HGPORT 404-url applying clone bundle from http://does.not.exist/bundle.hg - error fetching bundle: (.* not known|No address associated with hostname) (re) (no-windows !) + error fetching bundle: (.* not known|(\[Errno -?\d+])? No address associated with hostname) (re) (no-windows !) error fetching bundle: [Errno 11004] getaddrinfo failed (windows !) abort: error applying bundle (if this error persists, consider contacting the server operator or disable clone bundles via "--config ui.clonebundles=false")
--- a/tests/test-command-template.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-command-template.t Sun Mar 04 10:42:51 2018 -0500 @@ -214,6 +214,8 @@ abort: template resource not available: ctx [255] + $ hg config -T '{author}' + Quoting for ui.logtemplate $ hg tip --config "ui.logtemplate={rev}\n" @@ -2215,9 +2217,9 @@ >>> from __future__ import absolute_import >>> import datetime - >>> fp = open('a', 'w') + >>> fp = open('a', 'wb') >>> n = datetime.datetime.now() + datetime.timedelta(366 * 7) - >>> fp.write('%d-%d-%d 00:00' % (n.year, n.month, n.day)) + >>> fp.write(b'%d-%d-%d 00:00' % (n.year, n.month, n.day)) and None >>> fp.close() $ hg add a $ hg commit -m future -d "`cat a`" @@ -2232,6 +2234,10 @@ $ hg debugtemplate '{"foo/bar"|basename}|{"foo/"|basename}|{"foo"|basename}|\n' bar||foo| + $ hg debugtemplate '{"foo/bar"|dirname}|{"foo/"|dirname}|{"foo"|dirname}|\n' + foo|foo|| + $ hg debugtemplate '{"foo/bar"|stripdir}|{"foo/"|stripdir}|{"foo"|stripdir}|\n' + foo|foo|foo| Add a dummy commit to make up for the instability of the above: @@ -2760,19 +2766,29 @@ $ hg log -T '{date' hg: parse error at 1: unterminated template expansion + ({date + ^ here) [255] $ hg log -T '{date(}' hg: parse error at 7: not a prefix: end + ({date(} + ^ here) [255] $ hg log -T '{date)}' hg: parse error at 5: invalid token + ({date)} + ^ here) [255] $ hg log -T '{date date}' hg: parse error at 6: invalid token + ({date date} + ^ here) [255] $ hg log -T '{}' hg: parse error at 2: not a prefix: end + ({} + ^ here) [255] $ hg debugtemplate -v '{()}' (template @@ -2821,10 +2837,14 @@ $ hg log -T '{"date' hg: parse error at 2: unterminated string + ({"date + ^ here) [255] $ hg log -T '{"foo{date|?}"}' hg: parse error at 11: syntax error + ({"foo{date|?}"} + ^ here) [255] Thrown an error if a template function doesn't exist @@ -3356,6 +3376,8 @@ -4 $ hg debugtemplate '{(-)}\n' hg: parse error at 3: not a prefix: ) + ({(-)}\n + ^ here) [255] $ hg debugtemplate '{(-a)}\n' hg: parse error: negation needs an integer argument @@ -3521,6 +3543,8 @@ foo $ hg log -r 2 -T '{if(rev, "{if(rev, \")}")}\n' hg: parse error at 21: unterminated string + ({if(rev, "{if(rev, \")}")}\n + ^ here) [255] $ hg log -r 2 -T '{if(rev, \"\\"")}\n' hg: parse error: trailing \ in string @@ -4432,7 +4456,7 @@ hg: parse error: trailing \ in string [255] $ hg log -T "\\xy" -R a - hg: parse error: invalid \x escape + hg: parse error: invalid \x escape* (glob) [255] json filter should escape HTML tags so that the output can be embedded in hgweb: @@ -4567,8 +4591,8 @@ $ hg init nonascii $ cd nonascii $ $PYTHON <<EOF - > open('latin1', 'w').write('\xe9') - > open('utf-8', 'w').write('\xc3\xa9') + > open('latin1', 'wb').write(b'\xe9') + > open('utf-8', 'wb').write(b'\xc3\xa9') > EOF $ HGENCODING=utf-8 hg branch -q `cat utf-8` $ HGENCODING=utf-8 hg ci -qAm "non-ascii branch: `cat utf-8`" utf-8 @@ -4616,9 +4640,9 @@ > > templatefunc = registrar.templatefunc() > - > @templatefunc('custom()') + > @templatefunc(b'custom()') > def custom(context, mapping, args): - > return 'custom' + > return b'custom' > EOF $ cat <<EOF > .hg/hgrc > [extensions]
--- a/tests/test-commandserver.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-commandserver.t Sun Mar 04 10:42:51 2018 -0500 @@ -211,7 +211,6 @@ ui.slash=True ui.interactive=False ui.mergemarkers=detailed - ui.usehttp2=true (?) ui.foo=bar ui.nontty=true web.address=localhost @@ -221,7 +220,6 @@ ui.slash=True ui.interactive=False ui.mergemarkers=detailed - ui.usehttp2=true (?) ui.nontty=true $ rm -R foo @@ -411,7 +409,7 @@ ... # load _phasecache._phaserevs and _phasesets ... runcommand(server, ['log', '-qr', 'draft()']) ... # create draft commits by another process - ... for i in xrange(5, 7): + ... for i in range(5, 7): ... f = open('a', 'ab') ... f.seek(0, os.SEEK_END) ... f.write('a\n')
--- a/tests/test-commit.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-commit.t Sun Mar 04 10:42:51 2018 -0500 @@ -644,14 +644,14 @@ $ cat > evil-commit.py <<EOF > from __future__ import absolute_import > from mercurial import context, hg, node, ui as uimod - > notrc = u".h\u200cg".encode('utf-8') + '/hgrc' + > notrc = u".h\u200cg".encode('utf-8') + b'/hgrc' > u = uimod.ui.load() - > r = hg.repository(u, '.') + > r = hg.repository(u, b'.') > def filectxfn(repo, memctx, path): > return context.memfilectx(repo, memctx, path, - > '[hooks]\nupdate = echo owned') - > c = context.memctx(r, [r['tip'].node(), node.nullid], - > 'evil', [notrc], filectxfn, 0) + > b'[hooks]\nupdate = echo owned') + > c = context.memctx(r, [r[b'tip'].node(), node.nullid], + > b'evil', [notrc], filectxfn, 0) > r.commitctx(c) > EOF $ $PYTHON evil-commit.py @@ -670,14 +670,14 @@ $ cat > evil-commit.py <<EOF > from __future__ import absolute_import > from mercurial import context, hg, node, ui as uimod - > notrc = "HG~1/hgrc" + > notrc = b"HG~1/hgrc" > u = uimod.ui.load() - > r = hg.repository(u, '.') + > r = hg.repository(u, b'.') > def filectxfn(repo, memctx, path): > return context.memfilectx(repo, memctx, path, - > '[hooks]\nupdate = echo owned') - > c = context.memctx(r, [r['tip'].node(), node.nullid], - > 'evil', [notrc], filectxfn, 0) + > b'[hooks]\nupdate = echo owned') + > c = context.memctx(r, [r[b'tip'].node(), node.nullid], + > b'evil', [notrc], filectxfn, 0) > r.commitctx(c) > EOF $ $PYTHON evil-commit.py @@ -690,14 +690,14 @@ $ cat > evil-commit.py <<EOF > from __future__ import absolute_import > from mercurial import context, hg, node, ui as uimod - > notrc = "HG8B6C~2/hgrc" + > notrc = b"HG8B6C~2/hgrc" > u = uimod.ui.load() - > r = hg.repository(u, '.') + > r = hg.repository(u, b'.') > def filectxfn(repo, memctx, path): > return context.memfilectx(repo, memctx, path, - > '[hooks]\nupdate = echo owned') - > c = context.memctx(r, [r['tip'].node(), node.nullid], - > 'evil', [notrc], filectxfn, 0) + > b'[hooks]\nupdate = echo owned') + > c = context.memctx(r, [r[b'tip'].node(), node.nullid], + > b'evil', [notrc], filectxfn, 0) > r.commitctx(c) > EOF $ $PYTHON evil-commit.py
--- a/tests/test-completion.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-completion.t Sun Mar 04 10:42:51 2018 -0500 @@ -102,6 +102,7 @@ debugnamecomplete debugobsolete debugpathcomplete + debugpeer debugpickmergetool debugpushkey debugpvec @@ -110,6 +111,7 @@ debugrename debugrevlog debugrevspec + debugserve debugsetparents debugssl debugsub @@ -119,6 +121,7 @@ debugupgraderepo debugwalk debugwireargs + debugwireproto Do not show the alias of a debug command if there are other candidates (this should hide rawcommit) @@ -281,6 +284,7 @@ debugnamecomplete: debugobsolete: flags, record-parents, rev, exclusive, index, delete, date, user, template debugpathcomplete: full, normal, added, removed + debugpeer: debugpickmergetool: rev, changedelete, include, exclude, tool debugpushkey: debugpvec: @@ -289,6 +293,7 @@ debugrename: rev debugrevlog: changelog, manifest, dir, dump debugrevspec: optimize, show-revs, show-set, show-stage, no-optimized, verify-optimized + debugserve: sshstdio, logiofd, logiofile debugsetparents: debugssl: debugsub: rev @@ -298,6 +303,7 @@ debugupgraderepo: optimize, run debugwalk: include, exclude debugwireargs: three, four, five, ssh, remotecmd, insecure + debugwireproto: localssh, peer, noreadstderr, ssh, remotecmd, insecure files: rev, print0, include, exclude, template, subrepos graft: rev, continue, edit, log, force, currentdate, currentuser, date, user, tool, dry-run grep: print0, all, text, follow, ignore-case, files-with-matches, line-number, rev, user, date, template, include, exclude
--- a/tests/test-conflict.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-conflict.t Sun Mar 04 10:42:51 2018 -0500 @@ -138,9 +138,9 @@ $ hg up -q --clean . $ $PYTHON <<EOF - > fp = open('logfile', 'w') - > fp.write('12345678901234567890123456789012345678901234567890' + - > '1234567890') # there are 5 more columns for 80 columns + > fp = open('logfile', 'wb') + > fp.write(b'12345678901234567890123456789012345678901234567890' + + > b'1234567890') # there are 5 more columns for 80 columns > > # 2 x 4 = 8 columns, but 3 x 4 = 12 bytes > fp.write(u'\u3042\u3044\u3046\u3048'.encode('utf-8'))
--- a/tests/test-context-metadata.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-context-metadata.t Sun Mar 04 10:42:51 2018 -0500 @@ -13,18 +13,19 @@ $ cat > metaedit.py <<EOF > from __future__ import absolute_import - > from mercurial import context, registrar + > from mercurial import context, pycompat, registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command('metaedit') + > @command(b'metaedit') > def metaedit(ui, repo, arg): > # Modify commit message to "FOO" - > with repo.wlock(), repo.lock(), repo.transaction('metaedit'): - > old = repo['.'] - > kwargs = dict(s.split('=', 1) for s in arg.split(';')) + > with repo.wlock(), repo.lock(), repo.transaction(b'metaedit'): + > old = repo[b'.'] + > kwargs = dict(s.split(b'=', 1) for s in arg.split(b';')) > if 'parents' in kwargs: - > kwargs['parents'] = kwargs['parents'].split(',') - > new = context.metadataonlyctx(repo, old, **kwargs) + > kwargs[b'parents'] = kwargs[b'parents'].split(b',') + > new = context.metadataonlyctx(repo, old, + > **pycompat.strkwargs(kwargs)) > new.commit() > EOF $ hg --config extensions.metaedit=$TESTTMP/metaedit.py metaedit 'text=Changed'
--- a/tests/test-contrib-perf.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-contrib-perf.t Sun Mar 04 10:42:51 2018 -0500 @@ -114,6 +114,7 @@ perftags (no help text available) perftemplating (no help text available) + perfunidiff benchmark a unified diff between revisions perfvolatilesets benchmark the computation of various volatile set perfwalk (no help text available) @@ -126,6 +127,8 @@ $ hg perfannotate a $ hg perfbdiff -c 1 $ hg perfbdiff --alldata 1 + $ hg perfunidiff -c 1 + $ hg perfunidiff --alldata 1 $ hg perfbookmarks $ hg perfbranchmap $ hg perfcca
--- a/tests/test-contrib.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-contrib.t Sun Mar 04 10:42:51 2018 -0500 @@ -201,7 +201,7 @@ binary file - $ $PYTHON -c "f = file('binary-local', 'w'); f.write('\x00'); f.close()" + $ $PYTHON -c "f = open('binary-local', 'w'); f.write('\x00'); f.close()" $ cat orig >> binary-local $ $PYTHON simplemerge -p binary-local base other warning: binary-local looks like a binary file.
--- a/tests/test-convert-git.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-convert-git.t Sun Mar 04 10:42:51 2018 -0500 @@ -420,7 +420,7 @@ $ mkdir git-repo3 $ cd git-repo3 $ git init-db >/dev/null 2>/dev/null - $ $PYTHON -c 'file("b", "wb").write("".join([chr(i) for i in range(256)])*16)' + $ $PYTHON -c 'import struct; open("b", "wb").write(b"".join([struct.Struct(">B").pack(i) for i in range(256)])*16)' $ git add b $ commit -a -m addbinary $ cd .. @@ -437,7 +437,7 @@ $ cd git-repo3-hg $ hg up -C 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ $PYTHON -c 'print len(file("b", "rb").read())' + $ $PYTHON -c 'print len(open("b", "rb").read())' 4096 $ cd ..
--- a/tests/test-convert-hg-source.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-convert-hg-source.t Sun Mar 04 10:42:51 2018 -0500 @@ -126,9 +126,9 @@ $ cat > rewrite.py <<EOF > import sys > # Interlace LF and CRLF - > lines = [(l.rstrip() + ((i % 2) and '\n' or '\r\n')) - > for i, l in enumerate(file(sys.argv[1]))] - > file(sys.argv[1], 'wb').write(''.join(lines)) + > lines = [(l.rstrip() + ((i % 2) and b'\n' or b'\r\n')) + > for i, l in enumerate(open(sys.argv[1], 'rb'))] + > open(sys.argv[1], 'wb').write(b''.join(lines)) > EOF $ $PYTHON rewrite.py new/.hg/shamap $ cd orig
--- a/tests/test-convert-mtn.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-convert-mtn.t Sun Mar 04 10:42:51 2018 -0500 @@ -43,7 +43,7 @@ $ mkdir dir $ echo b > dir/b $ echo d > dir/d - $ $PYTHON -c 'file("bin", "wb").write("a\\x00b")' + $ $PYTHON -c 'open("bin", "wb").write(b"a\\x00b")' $ echo c > c $ mtn add a dir/b dir/d c bin mtn: adding 'a' to workspace manifest @@ -65,7 +65,7 @@ $ echo b >> dir/b $ mtn drop c mtn: dropping 'c' from workspace manifest - $ $PYTHON -c 'file("bin", "wb").write("b\\x00c")' + $ $PYTHON -c 'open("bin", "wb").write(b"b\\x00c")' $ mtn ci -m update1 mtn: beginning commit on branch 'com.selenic.test' mtn: committed revision 51d0a982464573a2a2cf5ee2c9219c652aaebeff @@ -217,8 +217,8 @@ test large file support (> 32kB) - >>> fp = file('large-file', 'wb') - >>> for x in xrange(10000): fp.write('%d\n' % x) + >>> fp = open('large-file', 'wb') + >>> for x in range(10000): fp.write(b'%d\n' % x) >>> fp.close() $ md5sum.py large-file 5d6de8a95c3b6bf9e0ffb808ba5299c1 large-file
--- a/tests/test-convert-p4-filetypes.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-convert-p4-filetypes.t Sun Mar 04 10:42:51 2018 -0500 @@ -52,7 +52,7 @@ > p4 add -t $T file_$T2 > ;; > binary*) - > $PYTHON -c "file('file_$T2', 'wb').write('this is $T')" + > $PYTHON -c "open('file_$T2', 'wb').write(b'this is $T')" > p4 add -t $T file_$T2 > ;; > *)
--- a/tests/test-debugcommands.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-debugcommands.t Sun Mar 04 10:42:51 2018 -0500 @@ -381,3 +381,25 @@ https stream v2 + +Test debugpeer + + $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" debugpeer ssh://user@dummy/debugrevlog + url: ssh://user@dummy/debugrevlog + local: no + pushable: yes + + $ hg --config ui.ssh="\"$PYTHON\" \"$TESTDIR/dummyssh\"" --debug debugpeer ssh://user@dummy/debugrevlog + running "*" "*/tests/dummyssh" 'user@dummy' 'hg -R debugrevlog serve --stdio' (glob) (no-windows !) + running "*" "*\tests/dummyssh" "user@dummy" "hg -R debugrevlog serve --stdio" (glob) (windows !) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: 384 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 + url: ssh://user@dummy/debugrevlog + local: no + pushable: yes
--- a/tests/test-debugextensions.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-debugextensions.t Sun Mar 04 10:42:51 2018 -0500 @@ -5,8 +5,8 @@ $ cat > extwithoutinfos.py <<EOF > EOF $ cat > extwithinfos.py <<EOF - > testedwith = '3.0 3.1 3.2.1' - > buglink = 'https://example.org/bts' + > testedwith = b'3.0 3.1 3.2.1' + > buglink = b'https://example.org/bts' > EOF $ cat >> $HGRCPATH <<EOF
--- a/tests/test-default-push.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-default-push.t Sun Mar 04 10:42:51 2018 -0500 @@ -142,6 +142,8 @@ $ hg --config 'paths.default:pushrev=(' push pushing to file:/*/$TESTTMP/pushurlsource/../pushurldest (glob) hg: parse error at 1: not a prefix: end + (( + ^ here) [255] $ cd ..
--- a/tests/test-demandimport.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-demandimport.py Sun Mar 04 10:42:51 2018 -0500 @@ -31,6 +31,27 @@ l = rsub("'<[a-z]*>'", "'<whatever>'", l) return l +demandimport.disable() +os.environ['HGDEMANDIMPORT'] = 'disable' +# this enable call should not actually enable demandimport! +demandimport.enable() +from mercurial import node +print("node =", f(node)) +# now enable it for real +del os.environ['HGDEMANDIMPORT'] +demandimport.enable() + +# Test access to special attributes through demandmod proxy +from mercurial import error as errorproxy +print("errorproxy =", f(errorproxy)) +print("errorproxy.__doc__ = %r" + % (' '.join(errorproxy.__doc__.split()[:3]) + ' ...')) +print("errorproxy.__name__ = %r" % errorproxy.__name__) +# __name__ must be accessible via __dict__ so the relative imports can be +# resolved +print("errorproxy.__dict__['__name__'] = %r" % errorproxy.__dict__['__name__']) +print("errorproxy =", f(errorproxy)) + import os print("os =", f(os)) @@ -69,17 +90,6 @@ print("re.stderr =", f(re.stderr)) print("re =", f(re)) -# Test access to special attributes through demandmod proxy -from mercurial import pvec as pvecproxy -print("pvecproxy =", f(pvecproxy)) -print("pvecproxy.__doc__ = %r" - % (' '.join(pvecproxy.__doc__.split()[:3]) + ' ...')) -print("pvecproxy.__name__ = %r" % pvecproxy.__name__) -# __name__ must be accessible via __dict__ so the relative imports can be -# resolved -print("pvecproxy.__dict__['__name__'] = %r" % pvecproxy.__dict__['__name__']) -print("pvecproxy =", f(pvecproxy)) - import contextlib print("contextlib =", f(contextlib)) try: @@ -97,10 +107,3 @@ print("__import__('contextlib', ..., ['unknownattr']) =", f(contextlibimp)) print("hasattr(contextlibimp, 'unknownattr') =", util.safehasattr(contextlibimp, 'unknownattr')) - -demandimport.disable() -os.environ['HGDEMANDIMPORT'] = 'disable' -# this enable call should not actually enable demandimport! -demandimport.enable() -from mercurial import node -print("node =", f(node))
--- a/tests/test-demandimport.py.out Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-demandimport.py.out Sun Mar 04 10:42:51 2018 -0500 @@ -1,3 +1,9 @@ +node = <module 'mercurial.node' from '?'> +errorproxy = <unloaded module 'error'> +errorproxy.__doc__ = 'Mercurial exceptions. This ...' +errorproxy.__name__ = 'mercurial.error' +errorproxy.__dict__['__name__'] = 'mercurial.error' +errorproxy = <proxied module 'error'> os = <unloaded module 'os'> os.system = <built-in function system> os = <module 'os' from '?'> @@ -18,13 +24,7 @@ re = <unloaded module 'sys'> re.stderr = <open file '<whatever>', mode 'w' at 0x?> re = <proxied module 'sys'> -pvecproxy = <unloaded module 'pvec'> -pvecproxy.__doc__ = 'A "pvec" is ...' -pvecproxy.__name__ = 'mercurial.pvec' -pvecproxy.__dict__['__name__'] = 'mercurial.pvec' -pvecproxy = <proxied module 'pvec'> contextlib = <unloaded module 'contextlib'> contextlib.unknownattr = ImportError: cannot import name unknownattr __import__('contextlib', ..., ['unknownattr']) = <module 'contextlib' from '?'> hasattr(contextlibimp, 'unknownattr') = False -node = <module 'mercurial.node' from '?'>
--- a/tests/test-devel-warnings.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-devel-warnings.t Sun Mar 04 10:42:51 2018 -0500 @@ -17,7 +17,7 @@ > > @command(b'buggytransaction', [], '') > def buggylocking(ui, repo): - > tr = repo.transaction('buggy') + > tr = repo.transaction(b'buggy') > # make sure we rollback the transaction as we don't want to rely on the__del__ > tr.release() > @@ -26,8 +26,8 @@ > """check that reentrance is fine""" > wl = repo.wlock() > lo = repo.lock() - > tr = repo.transaction('proper') - > tr2 = repo.transaction('proper') + > tr = repo.transaction(b'proper') + > tr2 = repo.transaction(b'proper') > lo2 = repo.lock() > wl2 = repo.wlock() > wl2.release() @@ -46,34 +46,34 @@ > > @command(b'no-wlock-write', [], '') > def nowlockwrite(ui, repo): - > with repo.vfs(b'branch', 'a'): + > with repo.vfs(b'branch', b'a'): > pass > > @command(b'no-lock-write', [], '') > def nolockwrite(ui, repo): - > with repo.svfs(b'fncache', 'a'): + > with repo.svfs(b'fncache', b'a'): > pass > > @command(b'stripintr', [], '') > def stripintr(ui, repo): > lo = repo.lock() - > tr = repo.transaction('foobar') + > tr = repo.transaction(b'foobar') > try: - > repair.strip(repo.ui, repo, [repo['.'].node()]) + > repair.strip(repo.ui, repo, [repo[b'.'].node()]) > finally: > lo.release() > @command(b'oldanddeprecated', [], '') > def oldanddeprecated(ui, repo): > """test deprecation warning API""" > def foobar(ui): - > ui.deprecwarn('foorbar is deprecated, go shopping', '42.1337') + > ui.deprecwarn(b'foorbar is deprecated, go shopping', b'42.1337') > foobar(ui) > @command(b'nouiwarning', [], '') > def nouiwarning(ui, repo): - > util.nouideprecwarn('this is a test', '13.37') + > util.nouideprecwarn(b'this is a test', b'13.37') > @command(b'programmingerror', [], '') > def programmingerror(ui, repo): - > raise error.ProgrammingError('something went wrong', hint='try again') + > raise error.ProgrammingError(b'something went wrong', hint=b'try again') > EOF $ cat << EOF >> $HGRCPATH @@ -331,7 +331,7 @@ $ hg nouiwarning $TESTTMP/buggylocking.py:*: DeprecationWarning: this is a test (glob) (compatibility will be dropped after Mercurial-13.37, update your code.) - util.nouideprecwarn('this is a test', '13.37') + util.nouideprecwarn(b'this is a test', b'13.37') (disabled outside of test run) @@ -350,25 +350,25 @@ > configtable = {} > configitem = registrar.configitem(configtable) > - > configitem('test', 'some', default='foo') - > configitem('test', 'dynamic', default=configitems.dynamicdefault) - > configitem('test', 'callable', default=list) + > configitem(b'test', b'some', default=b'foo') + > configitem(b'test', b'dynamic', default=configitems.dynamicdefault) + > configitem(b'test', b'callable', default=list) > # overwrite a core config - > configitem('ui', 'quiet', default=False) - > configitem('ui', 'interactive', default=None) + > configitem(b'ui', b'quiet', default=False) + > configitem(b'ui', b'interactive', default=None) > > @command(b'buggyconfig') > def cmdbuggyconfig(ui, repo): - > repo.ui.config('ui', 'quiet', True) - > repo.ui.config('ui', 'interactive', False) - > repo.ui.config('test', 'some', 'bar') - > repo.ui.config('test', 'some', 'foo') - > repo.ui.config('test', 'dynamic', 'some-required-default') - > repo.ui.config('test', 'dynamic') - > repo.ui.config('test', 'callable', []) - > repo.ui.config('test', 'callable', 'foo') - > repo.ui.config('test', 'unregistered') - > repo.ui.config('unregistered', 'unregistered') + > repo.ui.config(b'ui', b'quiet', True) + > repo.ui.config(b'ui', b'interactive', False) + > repo.ui.config(b'test', b'some', b'bar') + > repo.ui.config(b'test', b'some', b'foo') + > repo.ui.config(b'test', b'dynamic', b'some-required-default') + > repo.ui.config(b'test', b'dynamic') + > repo.ui.config(b'test', b'callable', []) + > repo.ui.config(b'test', b'callable', b'foo') + > repo.ui.config(b'test', b'unregistered') + > repo.ui.config(b'unregistered', b'unregistered') > EOF $ hg --config "extensions.buggyconfig=${TESTTMP}/buggyconfig.py" buggyconfig
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-diff-antipatience.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,91 @@ +#testcases bdiff xdiff + +#if xdiff +#require xdiff + $ cat >> $HGRCPATH <<EOF + > [experimental] + > xdiff = true + > EOF +#endif + +Test case that makes use of the weakness of patience diff algorithm + + $ hg init + >>> open('a', 'wb').write(b'\n'.join(list(b'a' + b'x' * 10 + b'u' + b'x' * 30 + b'a\n'))) + $ hg commit -m 1 -A a + >>> open('a', 'wb').write(b'\n'.join(list(b'b' + b'x' * 30 + b'u' + b'x' * 10 + b'b\n'))) +#if xdiff + $ hg diff + diff -r f0aeecb49805 a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:00 1970 +0000 + @@ -1,4 +1,4 @@ + -a + +b + x + x + x + @@ -9,7 +9,6 @@ + x + x + x + -u + x + x + x + @@ -30,6 +29,7 @@ + x + x + x + +u + x + x + x + @@ -40,5 +40,5 @@ + x + x + x + -a + +b + +#else + $ hg diff + diff -r f0aeecb49805 a + --- a/a Thu Jan 01 00:00:00 1970 +0000 + +++ b/a Thu Jan 01 00:00:00 1970 +0000 + @@ -1,15 +1,4 @@ + -a + -x + -x + -x + -x + -x + -x + -x + -x + -x + -x + -u + +b + x + x + x + @@ -40,5 +29,16 @@ + x + x + x + -a + +u + +x + +x + +x + +x + +x + +x + +x + +x + +x + +x + +b + +#endif
--- a/tests/test-diff-binary-file.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-diff-binary-file.t Sun Mar 04 10:42:51 2018 -0500 @@ -81,7 +81,7 @@ $ cat > writebin.py <<EOF > import sys > path = sys.argv[1] - > open(path, 'wb').write('\x00\x01\x02\x03') + > open(path, 'wb').write(b'\x00\x01\x02\x03') > EOF $ $PYTHON writebin.py binfile.bin $ hg add binfile.bin
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-diff-indent-heuristic.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,339 @@ +#testcases bdiff xdiff + +#if xdiff +#require xdiff + $ cat >> $HGRCPATH <<EOF + > [experimental] + > xdiff = true + > EOF +#endif + + $ hg init + + $ cat > a.c <<'EOF' + > /* + > * This function returns 1. + > */ + > int f() { + > return 1; + > } + > /* + > * This function returns 2. + > */ + > int g() { + > return 2; + > } + > /* + > * This function returns 3. + > */ + > int h() { + > return 3; + > } + > EOF + + $ cat > b.c <<'EOF' + > if (x) { + > do_something(); + > } + > + > if (y) { + > do_something_else(); + > } + > EOF + + $ cat > c.rb <<'EOF' + > #!ruby + > ["foo", "bar", "baz"].map do |i| + > i.upcase + > end + > EOF + + $ cat > d.py <<'EOF' + > try: + > import foo + > except ImportError: + > pass + > try: + > import bar + > except ImportError: + > pass + > EOF + +The below two files are taken from git: t/t4061-diff-indent.sh + + $ cat > spaces.txt <<'EOF' + > 1 + > 2 + > a + > + > b + > 3 + > 4 + > EOF + + $ cat > functions.c <<'EOF' + > 1 + > 2 + > /* function */ + > foo() { + > foo + > } + > + > 3 + > 4 + > EOF + + $ hg commit -m 1 -A . -q + + $ cat > a.c <<'EOF' + > /* + > * This function returns 1. + > */ + > int f() { + > return 1; + > } + > /* + > * This function returns 3. + > */ + > int h() { + > return 3; + > } + > EOF + + $ cat > b.c <<'EOF' + > if (x) { + > do_something(); + > } + > + > if (y) { + > do_another_thing(); + > } + > + > if (y) { + > do_something_else(); + > } + > EOF + + $ cat > c.rb <<'EOF' + > #!ruby + > ["foo", "bar", "baz"].map do |i| + > i + > end + > ["foo", "bar", "baz"].map do |i| + > i.upcase + > end + > EOF + + $ cat > d.py <<'EOF' + > try: + > import foo + > except ImportError: + > pass + > try: + > import baz + > except ImportError: + > pass + > try: + > import bar + > except ImportError: + > pass + > EOF + + $ cat > spaces.txt <<'EOF' + > 1 + > 2 + > a + > + > b + > a + > + > b + > 3 + > 4 + > EOF + + $ cat > functions.c <<'EOF' + > 1 + > 2 + > /* function */ + > bar() { + > foo + > } + > + > /* function */ + > foo() { + > foo + > } + > + > 3 + > 4 + > EOF + +#if xdiff + $ hg diff --git + diff --git a/a.c b/a.c + --- a/a.c + +++ b/a.c + @@ -4,12 +4,6 @@ + int f() { + return 1; + } + -/* + - * This function returns 2. + - */ + -int g() { + - return 2; + -} + /* + * This function returns 3. + */ + diff --git a/b.c b/b.c + --- a/b.c + +++ b/b.c + @@ -2,6 +2,10 @@ + do_something(); + } + + +if (y) { + + do_another_thing(); + +} + + + if (y) { + do_something_else(); + } + diff --git a/c.rb b/c.rb + --- a/c.rb + +++ b/c.rb + @@ -1,4 +1,7 @@ + #!ruby + +["foo", "bar", "baz"].map do |i| + + i + +end + ["foo", "bar", "baz"].map do |i| + i.upcase + end + diff --git a/d.py b/d.py + --- a/d.py + +++ b/d.py + @@ -2,6 +2,10 @@ + import foo + except ImportError: + pass + +try: + + import baz + +except ImportError: + + pass + try: + import bar + except ImportError: + diff --git a/functions.c b/functions.c + --- a/functions.c + +++ b/functions.c + @@ -1,5 +1,10 @@ + 1 + 2 + +/* function */ + +bar() { + + foo + +} + + + /* function */ + foo() { + foo + diff --git a/spaces.txt b/spaces.txt + --- a/spaces.txt + +++ b/spaces.txt + @@ -2,6 +2,9 @@ + 2 + a + + +b + +a + + + b + 3 + 4 +#else + $ hg diff --git + diff --git a/a.c b/a.c + --- a/a.c + +++ b/a.c + @@ -5,12 +5,6 @@ + return 1; + } + /* + - * This function returns 2. + - */ + -int g() { + - return 2; + -} + -/* + * This function returns 3. + */ + int h() { + diff --git a/b.c b/b.c + --- a/b.c + +++ b/b.c + @@ -3,5 +3,9 @@ + } + + if (y) { + + do_another_thing(); + +} + + + +if (y) { + do_something_else(); + } + diff --git a/c.rb b/c.rb + --- a/c.rb + +++ b/c.rb + @@ -1,4 +1,7 @@ + #!ruby + ["foo", "bar", "baz"].map do |i| + + i + +end + +["foo", "bar", "baz"].map do |i| + i.upcase + end + diff --git a/d.py b/d.py + --- a/d.py + +++ b/d.py + @@ -3,6 +3,10 @@ + except ImportError: + pass + try: + + import baz + +except ImportError: + + pass + +try: + import bar + except ImportError: + pass + diff --git a/functions.c b/functions.c + --- a/functions.c + +++ b/functions.c + @@ -1,6 +1,11 @@ + 1 + 2 + /* function */ + +bar() { + + foo + +} + + + +/* function */ + foo() { + foo + } + diff --git a/spaces.txt b/spaces.txt + --- a/spaces.txt + +++ b/spaces.txt + @@ -3,5 +3,8 @@ + a + + b + +a + + + +b + 3 + 4 +#endif
--- a/tests/test-diff-unified.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-diff-unified.t Sun Mar 04 10:42:51 2018 -0500 @@ -386,3 +386,73 @@ } $ cd .. + +Long function names should be abbreviated, but multi-byte character shouldn't +be broken up + + $ hg init longfunc + $ cd longfunc + + >>> with open('a', 'wb') as f: + ... f.write(b'a' * 39 + b'bb' + b'\n') + ... f.write(b' .\n' * 3) + ... f.write(b' 0 b\n') + ... f.write(b' .\n' * 3) + ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n') + ... f.write(b' .\n' * 3) + ... f.write(b' 0 a with grave (single code point)\n') + ... f.write(b' .\n' * 3) + ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n') + ... f.write(b' .\n' * 3) + ... f.write(b' 0 a with grave (composition)\n') + ... f.write(b' .\n' * 3) + $ hg ci -qAm0 + + >>> with open('a', 'wb') as f: + ... f.write(b'a' * 39 + b'bb' + b'\n') + ... f.write(b' .\n' * 3) + ... f.write(b' 1 b\n') + ... f.write(b' .\n' * 3) + ... f.write(b'a' * 39 + b'\xc3\xa0' + b'\n') + ... f.write(b' .\n' * 3) + ... f.write(b' 1 a with grave (single code point)\n') + ... f.write(b' .\n' * 3) + ... f.write(b'a' * 39 + b'a\xcc\x80' + b'\n') + ... f.write(b' .\n' * 3) + ... f.write(b' 1 a with grave (composition)\n') + ... f.write(b' .\n' * 3) + $ hg ci -m1 + + $ hg diff -c1 --nodates --show-function + diff -r 3e92dd6fa812 -r a256341606cb a + --- a/a + +++ b/a + @@ -2,7 +2,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaab + . + . + . + - 0 b + + 1 b + . + . + . + @@ -10,7 +10,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xc3\xa0 (esc) + . + . + . + - 0 a with grave (single code point) + + 1 a with grave (single code point) + . + . + . + @@ -18,7 +18,7 @@ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\xcc\x80 (esc) + . + . + . + - 0 a with grave (composition) + + 1 a with grave (composition) + . + . + . + + $ cd ..
--- a/tests/test-dispatch.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-dispatch.py Sun Mar 04 10:42:51 2018 -0500 @@ -9,27 +9,27 @@ Prints command and result value, but does not handle quoting. """ - print("running: %s" % (cmd,)) + print(b"running: %s" % (cmd,)) req = dispatch.request(cmd.split()) result = dispatch.dispatch(req) - print("result: %r" % (result,)) + print(b"result: %r" % (result,)) -testdispatch("init test1") +testdispatch(b"init test1") os.chdir('test1') # create file 'foo', add and commit f = open('foo', 'wb') -f.write('foo\n') +f.write(b'foo\n') f.close() -testdispatch("add foo") -testdispatch("commit -m commit1 -d 2000-01-01 foo") +testdispatch(b"add foo") +testdispatch(b"commit -m commit1 -d 2000-01-01 foo") # append to file 'foo' and commit f = open('foo', 'ab') -f.write('bar\n') +f.write(b'bar\n') f.close() -testdispatch("commit -m commit2 -d 2000-01-02 foo") +testdispatch(b"commit -m commit2 -d 2000-01-02 foo") # check 88803a69b24 (fancyopts modified command table) -testdispatch("log -r 0") -testdispatch("log -r tip") +testdispatch(b"log -r 0") +testdispatch(b"log -r tip")
--- a/tests/test-doctest.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-doctest.py Sun Mar 04 10:42:51 2018 -0500 @@ -42,6 +42,7 @@ testmod('mercurial.changegroup') testmod('mercurial.changelog') +testmod('mercurial.cmdutil') testmod('mercurial.color') testmod('mercurial.config') testmod('mercurial.context')
--- a/tests/test-encoding-align.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-encoding-align.t Sun Mar 04 10:42:51 2018 -0500 @@ -6,16 +6,16 @@ $ cd t $ $PYTHON << EOF > # (byte, width) = (6, 4) - > s = "\xe7\x9f\xad\xe5\x90\x8d" + > s = b"\xe7\x9f\xad\xe5\x90\x8d" > # (byte, width) = (7, 7): odd width is good for alignment test - > m = "MIDDLE_" + > m = b"MIDDLE_" > # (byte, width) = (18, 12) - > l = "\xe9\x95\xb7\xe3\x81\x84\xe9\x95\xb7\xe3\x81\x84\xe5\x90\x8d\xe5\x89\x8d" - > f = file('s', 'w'); f.write(s); f.close() - > f = file('m', 'w'); f.write(m); f.close() - > f = file('l', 'w'); f.write(l); f.close() + > l = b"\xe9\x95\xb7\xe3\x81\x84\xe9\x95\xb7\xe3\x81\x84\xe5\x90\x8d\xe5\x89\x8d" + > f = open('s', 'wb'); f.write(s); f.close() + > f = open('m', 'wb'); f.write(m); f.close() + > f = open('l', 'wb'); f.write(l); f.close() > # instant extension to show list of options - > f = file('showoptlist.py', 'w'); f.write("""# encoding: utf-8 + > f = open('showoptlist.py', 'wb'); f.write(b"""# encoding: utf-8 > from mercurial import registrar > cmdtable = {} > command = registrar.command(cmdtable)
--- a/tests/test-encoding.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-encoding.t Sun Mar 04 10:42:51 2018 -0500 @@ -15,9 +15,9 @@ $ hg co 1 files updated, 0 files merged, 0 files removed, 0 files unresolved $ $PYTHON << EOF - > f = file('latin-1', 'w'); f.write("latin-1 e' encoded: \xe9"); f.close() - > f = file('utf-8', 'w'); f.write("utf-8 e' encoded: \xc3\xa9"); f.close() - > f = file('latin-1-tag', 'w'); f.write("\xe9"); f.close() + > f = open('latin-1', 'wb'); f.write(b"latin-1 e' encoded: \xe9"); f.close() + > f = open('utf-8', 'wb'); f.write(b"utf-8 e' encoded: \xc3\xa9"); f.close() + > f = open('latin-1-tag', 'wb'); f.write(b"\xe9"); f.close() > EOF should fail with encoding error
--- a/tests/test-eol.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-eol.t Sun Mar 04 10:42:51 2018 -0500 @@ -17,12 +17,12 @@ > msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) > except ImportError: > pass - > (old, new) = sys.argv[1] == 'LF' and ('\n', '\r\n') or ('\r\n', '\n') + > (old, new) = sys.argv[1] == 'LF' and (b'\n', b'\r\n') or (b'\r\n', b'\n') > print("%% switching encoding from %r to %r" % (old, new)) > for path in sys.argv[2:]: - > data = file(path, 'rb').read() + > data = open(path, 'rb').read() > data = data.replace(old, new) - > file(path, 'wb').write(data) + > open(path, 'wb').write(data) > EOF $ seteol () {
--- a/tests/test-export.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-export.t Sun Mar 04 10:42:51 2018 -0500 @@ -184,7 +184,49 @@ $ hg commit -m " !\"#$%&(,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]"'^'"_\`abcdefghijklmnopqrstuvwxyz{|}~" $ hg export -v -o %m.patch tip exporting patch: - ____________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz____.patch + ___________0123456789_______ABCDEFGHIJKLMNOPQRSTUVWXYZ______abcdefghijklmnopqrstuvwxyz____.patch + +Template fragments in file name: + + $ hg export -v -o '{node|shortest}.patch' tip + exporting patch: + 197e.patch + +Backslash should be preserved because it is a directory separator on Windows: + + $ mkdir out + $ hg export -v -o 'out\{node|shortest}.patch' tip + exporting patch: + out\197e.patch + +Still backslash is taken as an escape character in inner template strings: + + $ hg export -v -o '{"out\{foo}.patch"}' tip + exporting patch: + out{foo}.patch + +Invalid pattern in file name: + + $ hg export -o '%x.patch' tip + abort: invalid format spec '%x' in output filename + [255] + $ hg export -o '%' tip + abort: incomplete format spec in output filename + [255] + $ hg export -o '%{"foo"}' tip + abort: incomplete format spec in output filename + [255] + $ hg export -o '%m{' tip + hg: parse error at 3: unterminated template expansion + (%m{ + ^ here) + [255] + $ hg export -o '%\' tip + abort: invalid format spec '%\' in output filename + [255] + $ hg export -o '\%' tip + abort: incomplete format spec in output filename + [255] Catch exporting unknown revisions (especially empty revsets, see issue3353)
--- a/tests/test-extdiff.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-extdiff.t Sun Mar 04 10:42:51 2018 -0500 @@ -252,8 +252,8 @@ > #!$PYTHON > import time > time.sleep(1) # avoid unchanged-timestamp problems - > file('a/a', 'ab').write('edited\n') - > file('a/b', 'ab').write('edited\n') + > open('a/a', 'ab').write(b'edited\n') + > open('a/b', 'ab').write(b'edited\n') > EOT #if execbit @@ -424,7 +424,8 @@ Test handling of non-ASCII paths in generated docstrings (issue5301) - >>> open("u", "w").write("\xa5\xa5") + >>> with open("u", "wb") as f: + ... n = f.write(b"\xa5\xa5") $ U=`cat u` $ HGPLAIN=1 hg --config hgext.extdiff= --config extdiff.cmd.td=hi help -k xyzzy
--- a/tests/test-extension.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-extension.t Sun Mar 04 10:42:51 2018 -0500 @@ -7,22 +7,22 @@ > command = registrar.command(cmdtable) > configtable = {} > configitem = registrar.configitem(configtable) - > configitem('tests', 'foo', default="Foo") + > configitem(b'tests', b'foo', default=b"Foo") > def uisetup(ui): - > ui.write("uisetup called\\n") + > ui.write(b"uisetup called\\n") > ui.flush() > def reposetup(ui, repo): - > ui.write("reposetup called for %s\\n" % os.path.basename(repo.root)) - > ui.write("ui %s= repo.ui\\n" % (ui == repo.ui and "=" or "!")) + > ui.write(b"reposetup called for %s\\n" % os.path.basename(repo.root)) + > ui.write(b"ui %s= repo.ui\\n" % (ui == repo.ui and b"=" or b"!")) > ui.flush() - > @command(b'foo', [], 'hg foo') + > @command(b'foo', [], b'hg foo') > def foo(ui, *args, **kwargs): - > foo = ui.config('tests', 'foo') + > foo = ui.config(b'tests', b'foo') > ui.write(foo) - > ui.write("\\n") - > @command(b'bar', [], 'hg bar', norepo=True) + > ui.write(b"\\n") + > @command(b'bar', [], b'hg bar', norepo=True) > def bar(ui, *args, **kwargs): - > ui.write("Bar\\n") + > ui.write(b"Bar\\n") > EOF $ abspath=`pwd`/foobar.py @@ -440,12 +440,12 @@ > @command(b'showabsolute', [], norepo=True) > def showabsolute(ui, *args, **opts): > from absextroot import absolute - > ui.write('ABS: %s\n' % '\nABS: '.join(absolute.getresult())) + > ui.write(b'ABS: %s\n' % '\nABS: '.join(absolute.getresult())) > > @command(b'showrelative', [], norepo=True) > def showrelative(ui, *args, **opts): > from . import relative - > ui.write('REL: %s\n' % '\nREL: '.join(relative.getresult())) + > ui.write(b'REL: %s\n' % '\nREL: '.join(relative.getresult())) > > # import modules from external library > from extlibroot.lsub1.lsub2 import used as lused, unused as lunused @@ -564,11 +564,11 @@ > from mercurial import registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command(b'debugfoobar', [], 'hg debugfoobar') + > @command(b'debugfoobar', [], b'hg debugfoobar') > def debugfoobar(ui, repo, *args, **opts): > "yet another debug command" > pass - > @command(b'foo', [], 'hg foo') + > @command(b'foo', [], b'hg foo') > def foo(ui, repo, *args, **opts): > """yet another foo command > This command has been DEPRECATED since forever. @@ -805,7 +805,7 @@ > command = registrar.command(cmdtable) > """multirevs extension > Big multi-line module docstring.""" - > @command(b'multirevs', [], 'ARG', norepo=True) + > @command(b'multirevs', [], b'ARG', norepo=True) > def multirevs(ui, repo, arg, *args, **opts): > """multirevs command""" > pass @@ -880,14 +880,14 @@ > from mercurial import commands, registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command(b'dodo', [], 'hg dodo') + > @command(b'dodo', [], b'hg dodo') > def dodo(ui, *args, **kwargs): > """Does nothing""" - > ui.write("I do nothing. Yay\\n") - > @command(b'foofoo', [], 'hg foofoo') + > ui.write(b"I do nothing. Yay\\n") + > @command(b'foofoo', [], b'hg foofoo') > def foofoo(ui, *args, **kwargs): > """Writes 'Foo foo'""" - > ui.write("Foo foo\\n") + > ui.write(b"Foo foo\\n") > EOF $ dodopath=$TESTTMP/d/dodo.py @@ -991,14 +991,14 @@ > from mercurial import commands, registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command(b'something', [], 'hg something') + > @command(b'something', [], b'hg something') > def something(ui, *args, **kwargs): > """Does something""" - > ui.write("I do something. Yaaay\\n") - > @command(b'beep', [], 'hg beep') + > ui.write(b"I do something. Yaaay\\n") + > @command(b'beep', [], b'hg beep') > def beep(ui, *args, **kwargs): > """Writes 'Beep beep'""" - > ui.write("Beep beep\\n") + > ui.write(b"Beep beep\\n") > EOF $ dudupath=$TESTTMP/d/dudu.py @@ -1235,7 +1235,7 @@ > cmdtable = {} > command = registrar.command(cmdtable) > class Bogon(Exception): pass - > @command(b'throw', [], 'hg throw', norepo=True) + > @command(b'throw', [], b'hg throw', norepo=True) > def throw(ui, **opts): > """throws an exception""" > raise Bogon() @@ -1278,8 +1278,8 @@ If the extensions declare outdated versions, accuse the older extension first: $ echo "from mercurial import util" >> older.py $ echo "util.version = lambda:'2.2'" >> older.py - $ echo "testedwith = '1.9.3'" >> older.py - $ echo "testedwith = '2.1.1'" >> throw.py + $ echo "testedwith = b'1.9.3'" >> older.py + $ echo "testedwith = b'2.1.1'" >> throw.py $ rm -f throw.pyc throw.pyo $ rm -Rf __pycache__ $ hg --config extensions.throw=throw.py --config extensions.older=older.py \ @@ -1293,7 +1293,7 @@ ** Extensions loaded: throw, older One extension only tested with older, one only with newer versions: - $ echo "util.version = lambda:'2.1'" >> older.py + $ echo "util.version = lambda:b'2.1'" >> older.py $ rm -f older.pyc older.pyo $ rm -Rf __pycache__ $ hg --config extensions.throw=throw.py --config extensions.older=older.py \ @@ -1307,7 +1307,7 @@ ** Extensions loaded: throw, older Older extension is tested with current version, the other only with newer: - $ echo "util.version = lambda:'1.9.3'" >> older.py + $ echo "util.version = lambda:b'1.9.3'" >> older.py $ rm -f older.pyc older.pyo $ rm -Rf __pycache__ $ hg --config extensions.throw=throw.py --config extensions.older=older.py \ @@ -1345,8 +1345,8 @@ ** Extensions loaded: throw Patch version is ignored during compatibility check - $ echo "testedwith = '3.2'" >> throw.py - $ echo "util.version = lambda:'3.2.2'" >> throw.py + $ echo "testedwith = b'3.2'" >> throw.py + $ echo "util.version = lambda:b'3.2.2'" >> throw.py $ rm -f throw.pyc throw.pyo $ rm -Rf __pycache__ $ hg --config extensions.throw=throw.py throw 2>&1 | egrep '^\*\*' @@ -1438,8 +1438,8 @@ $ cat > minversion1.py << EOF > from mercurial import util - > util.version = lambda: '3.5.2' - > minimumhgversion = '3.6' + > util.version = lambda: b'3.5.2' + > minimumhgversion = b'3.6' > EOF $ hg --config extensions.minversion=minversion1.py version (third party extension minversion requires version 3.6 or newer of Mercurial; disabling) @@ -1452,8 +1452,8 @@ $ cat > minversion2.py << EOF > from mercurial import util - > util.version = lambda: '3.6' - > minimumhgversion = '3.7' + > util.version = lambda: b'3.6' + > minimumhgversion = b'3.7' > EOF $ hg --config extensions.minversion=minversion2.py version 2>&1 | egrep '\(third' (third party extension minversion requires version 3.7 or newer of Mercurial; disabling) @@ -1462,8 +1462,8 @@ $ cat > minversion2.py << EOF > from mercurial import util - > util.version = lambda: '3.6.1' - > minimumhgversion = '3.6' + > util.version = lambda: b'3.6.1' + > minimumhgversion = b'3.6' > EOF $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third' [1] @@ -1472,8 +1472,8 @@ $ cat > minversion3.py << EOF > from mercurial import util - > util.version = lambda: '3.5' - > minimumhgversion = '3.5' + > util.version = lambda: b'3.5' + > minimumhgversion = b'3.5' > EOF $ hg --config extensions.minversion=minversion3.py version 2>&1 | egrep '\(third' [1] @@ -1492,7 +1492,7 @@ $ cat > $TESTTMP/reposetuptest.py <<EOF > from mercurial import extensions > def reposetup(ui, repo): - > ui.write('reposetup() for %s\n' % (repo.root)) + > ui.write(b'reposetup() for %s\n' % (repo.root)) > ui.flush() > EOF $ hg init src @@ -1626,7 +1626,7 @@ > def deprecatedcmd(repo, ui): > pass > cmdtable = { - > 'deprecatedcmd': (deprecatedcmd, [], ''), + > b'deprecatedcmd': (deprecatedcmd, [], b''), > } > EOF $ cat <<EOF > .hg/hgrc @@ -1663,7 +1663,7 @@ > docstring = ''' > GREPME make sure that this is in the help! > ''' - > extensions.wrapcommand(commands.table, 'bookmarks', exbookmarks, + > extensions.wrapcommand(commands.table, b'bookmarks', exbookmarks, > synopsis, docstring) > EOF $ abspath=`pwd`/exthelp.py @@ -1698,7 +1698,7 @@ > from mercurial import registrar > cmdtable = {} > command = registrar.command(cmdtable) - > @command('dummy', [('', 'opt', u'value', u'help')], 'ext [OPTIONS]') + > @command(b'dummy', [('', 'opt', u'value', u'help')], 'ext [OPTIONS]') > def ext(*args, **opts): > print(opts['opt']) > EOF @@ -1707,8 +1707,8 @@ > test_unicode_default_value = $TESTTMP/test_unicode_default_value.py > EOF $ hg -R $TESTTMP/opt-unicode-default dummy - *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: option 'dummy.opt' has a unicode default value - *** (change the dummy.opt default value to a non-unicode string) + *** failed to import extension test_unicode_default_value from $TESTTMP/test_unicode_default_value.py: unicode u'value' found in cmdtable.dummy + *** (use b'' to make it byte string) hg: unknown command 'dummy' (did you mean summary?) [255]
--- a/tests/test-filecache.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-filecache.py Sun Mar 04 10:42:51 2018 -0500 @@ -11,11 +11,15 @@ extensions, hg, localrepo, + pycompat, ui as uimod, util, vfs as vfsmod, ) +if pycompat.ispy3: + xrange = range + class fakerepo(object): def __init__(self): self._filecache = {}
--- a/tests/test-fileset.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-fileset.t Sun Mar 04 10:42:51 2018 -0500 @@ -180,7 +180,7 @@ Test files properties - >>> file('bin', 'wb').write('\0a') + >>> open('bin', 'wb').write(b'\0a') $ fileset 'binary()' $ fileset 'binary() and unknown()' bin @@ -219,8 +219,8 @@ $ hg --config ui.portablefilenames=ignore add con.xml #endif - >>> file('1k', 'wb').write(' '*1024) - >>> file('2k', 'wb').write(' '*2048) + >>> open('1k', 'wb').write(b' '*1024) + >>> open('2k', 'wb').write(b' '*2048) $ hg add 1k 2k $ fileset 'size("bar")' hg: parse error: couldn't parse size: bar @@ -666,7 +666,11 @@ $ fileset "status(' ', '4', added())" hg: parse error at 1: not a prefix: end + ( + ^ here) [255] $ fileset "status('2', ' ', added())" hg: parse error at 1: not a prefix: end + ( + ^ here) [255]
--- a/tests/test-fncache.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-fncache.t Sun Mar 04 10:42:51 2018 -0500 @@ -236,7 +236,7 @@ > wlock.release() > > def extsetup(ui): - > extensions.wrapcommand(commands.table, "commit", commitwrap) + > extensions.wrapcommand(commands.table, b"commit", commitwrap) > EOF $ extpath=`pwd`/exceptionext.py $ hg init fncachetxn @@ -259,14 +259,14 @@ > def wrapper(orig, self, *args, **kwargs): > tr = orig(self, *args, **kwargs) > def fail(tr): - > raise error.Abort("forced transaction failure") + > raise error.Abort(b"forced transaction failure") > # zzz prefix to ensure it sorted after store.write - > tr.addfinalize('zzz-forcefails', fail) + > tr.addfinalize(b'zzz-forcefails', fail) > return tr > > def uisetup(ui): > extensions.wrapfunction( - > localrepo.localrepository, 'transaction', wrapper) + > localrepo.localrepository, b'transaction', wrapper) > > cmdtable = {} >
--- a/tests/test-glog.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-glog.t Sun Mar 04 10:42:51 2018 -0500 @@ -87,21 +87,22 @@ > cmdutil, > commands, > extensions, + > logcmdutil, > revsetlang, > smartset, > ) > > def logrevset(repo, pats, opts): - > revs = cmdutil._logrevs(repo, opts) + > revs = logcmdutil._initialrevs(repo, opts) > if not revs: > return None - > match, pats, slowpath = cmdutil._makelogmatcher(repo, revs, pats, opts) - > return cmdutil._makelogrevset(repo, match, pats, slowpath, opts) + > match, pats, slowpath = logcmdutil._makematcher(repo, revs, pats, opts) + > return logcmdutil._makerevset(repo, match, pats, slowpath, opts) > > def uisetup(ui): > def printrevset(orig, repo, pats, opts): > revs, filematcher = orig(repo, pats, opts) - > if opts.get('print_revset'): + > if opts.get(b'print_revset'): > expr = logrevset(repo, pats, opts) > if expr: > tree = revsetlang.parse(expr) @@ -109,15 +110,15 @@ > else: > tree = [] > ui = repo.ui - > ui.write('%r\n' % (opts.get('rev', []),)) - > ui.write(revsetlang.prettyformat(tree) + '\n') - > ui.write(smartset.prettyformat(revs) + '\n') + > ui.write(b'%r\n' % (opts.get(b'rev', []),)) + > ui.write(revsetlang.prettyformat(tree) + b'\n') + > ui.write(smartset.prettyformat(revs) + b'\n') > revs = smartset.baseset() # display no revisions > return revs, filematcher - > extensions.wrapfunction(cmdutil, 'getlogrevs', printrevset) - > aliases, entry = cmdutil.findcmd('log', commands.table) - > entry[1].append(('', 'print-revset', False, - > 'print generated revset and exit (DEPRECATED)')) + > extensions.wrapfunction(logcmdutil, 'getrevs', printrevset) + > aliases, entry = cmdutil.findcmd(b'log', commands.table) + > entry[1].append((b'', b'print-revset', False, + > b'print generated revset and exit (DEPRECATED)')) > EOF $ echo "[extensions]" >> $HGRCPATH @@ -2420,7 +2421,7 @@ | ~ -node template with changeset_printer: +node template with changesetprinter: $ hg log -Gqr 5:7 --config ui.graphnodetemplate='"{rev}"' 7 7:02dbb8e276b8 @@ -2432,7 +2433,7 @@ | ~ -node template with changeset_templater (shared cache variable): +node template with changesettemplater (shared cache variable): $ hg log -Gr 5:7 -T '{latesttag % "{rev} {tag}+{distance}"}\n' \ > --config ui.graphnodetemplate='{ifeq(latesttagdistance, 0, "#", graphnode)}'
--- a/tests/test-grep.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-grep.t Sun Mar 04 10:42:51 2018 -0500 @@ -271,7 +271,7 @@ match in last "line" without newline - $ $PYTHON -c 'fp = open("noeol", "wb"); fp.write("no infinite loop"); fp.close();' + $ $PYTHON -c 'fp = open("noeol", "wb"); fp.write(b"no infinite loop"); fp.close();' $ hg ci -Amnoeol adding noeol $ hg grep loop
--- a/tests/test-help.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-help.t Sun Mar 04 10:42:51 2018 -0500 @@ -274,6 +274,7 @@ purge command to delete untracked files from the working directory relink recreates hardlinks between repository clones + remotenames showing remotebookmarks and remotebranches in UI schemes extend schemes with shortcuts to repository swarms share share a common history between several working directories shelve save and restore changes to the working directory @@ -282,6 +283,11 @@ win32mbcs allow the use of MBCS paths with problematic encodings zeroconf discover and advertise repositories on the local network +Verify that deprecated extensions are included if --verbose: + + $ hg -v help extensions | grep children + children command to display child changesets (DEPRECATED) + Verify that extension keywords appear in help templates $ hg help --config extensions.transplant= templating|grep transplant > /dev/null @@ -948,6 +954,7 @@ debugoptEXP (no help text available) debugpathcomplete complete part or all of a tracked path + debugpeer establish a connection to a peer repository debugpickmergetool examine which merge tool is chosen for specified file debugpushkey access the pushkey key/value protocol @@ -960,6 +967,7 @@ debugrename dump rename information debugrevlog show data and statistics about a revlog debugrevspec parse and apply a revision specification + debugserve run a server with advanced settings debugsetparents manually set the parents of the current working directory debugssl test a secure connection to a server @@ -975,6 +983,8 @@ debugwalk show how files match on given patterns debugwireargs (no help text available) + debugwireproto + send wire protocol commands to a server (use 'hg help -v debug' to show built-in aliases and global options) @@ -986,6 +996,7 @@ To access a subtopic, use "hg help internals.{subtopic-name}" + bundle2 Bundle2 bundles Bundles censor Censor changegroups Changegroups @@ -1492,6 +1503,8 @@ Extensions: clonebundles advertise pre-generated bundles to seed clones + narrow create clones which fetch history data for subset of files + (EXPERIMENTAL) prefixedname matched against word "clone" relink recreates hardlinks between repository clones @@ -3050,6 +3063,13 @@ <tr><td colspan="2"><h2><a name="topics" href="#topics">Topics</a></h2></td></tr> <tr><td> + <a href="/help/internals.bundle2"> + bundle2 + </a> + </td><td> + Bundle2 + </td></tr> + <tr><td> <a href="/help/internals.bundles"> bundles </a> @@ -3387,6 +3407,70 @@ </html> + $ get-with-headers.py 127.0.0.1:$HGPORT "help/unknowntopic" + 404 Not Found + + <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> + <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en-US"> + <head> + <link rel="icon" href="/static/hgicon.png" type="image/png" /> + <meta name="robots" content="index, nofollow" /> + <link rel="stylesheet" href="/static/style-paper.css" type="text/css" /> + <script type="text/javascript" src="/static/mercurial.js"></script> + + <title>test: error</title> + </head> + <body> + + <div class="container"> + <div class="menu"> + <div class="logo"> + <a href="https://mercurial-scm.org/"> + <img src="/static/hglogo.png" width=75 height=90 border=0 alt="mercurial" /></a> + </div> + <ul> + <li><a href="/shortlog">log</a></li> + <li><a href="/graph">graph</a></li> + <li><a href="/tags">tags</a></li> + <li><a href="/bookmarks">bookmarks</a></li> + <li><a href="/branches">branches</a></li> + </ul> + <ul> + <li><a href="/help">help</a></li> + </ul> + </div> + + <div class="main"> + + <h2 class="breadcrumb"><a href="/">Mercurial</a> </h2> + <h3>error</h3> + + + <form class="search" action="/log"> + + <p><input name="rev" id="search1" type="text" size="30" value="" /></p> + <div id="hint">Find changesets by keywords (author, files, the commit message), revision + number or hash, or <a href="/help/revsets">revset expression</a>.</div> + </form> + + <div class="description"> + <p> + An error occurred while processing your request: + </p> + <p> + Not Found + </p> + </div> + </div> + </div> + + + + </body> + </html> + + [1] + $ killdaemons.py #endif
--- a/tests/test-hgrc.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-hgrc.t Sun Mar 04 10:42:51 2018 -0500 @@ -58,7 +58,7 @@ unexpected leading whitespace [255] - $ $PYTHON -c "print '[foo]\nbar = a\n b\n c \n de\n fg \nbaz = bif cb \n'" \ + $ $PYTHON -c "from __future__ import print_function; print('[foo]\nbar = a\n b\n c \n de\n fg \nbaz = bif cb \n')" \ > > $HGRC $ hg showconfig foo foo.bar=a\nb\nc\nde\nfg @@ -126,12 +126,16 @@ $ hg showconfig alias defaults alias.log=log -g defaults.identify=-n + $ hg showconfig alias alias + alias.log=log -g + $ hg showconfig alias.log alias.log + alias.log=log -g $ hg showconfig alias defaults.identify - abort: only one config item permitted - [255] + alias.log=log -g + defaults.identify=-n $ hg showconfig alias.log defaults.identify - abort: only one config item permitted - [255] + alias.log=log -g + defaults.identify=-n HGPLAIN
--- a/tests/test-hgweb-auth.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-hgweb-auth.py Sun Mar 04 10:42:51 2018 -0500 @@ -19,7 +19,7 @@ def writeauth(items): ui = origui.copy() - for name, value in items.iteritems(): + for name, value in items.items(): ui.setconfig('auth', name, value) return ui @@ -36,7 +36,7 @@ for name in ('.username', '.password'): if (p + name) not in auth: auth[p + name] = p - auth = dict((k, v) for k, v in auth.iteritems() if v is not None) + auth = dict((k, v) for k, v in auth.items() if v is not None) ui = writeauth(auth)
--- a/tests/test-hgweb-commands.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-hgweb-commands.t Sun Mar 04 10:42:51 2018 -0500 @@ -1914,7 +1914,7 @@ $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities'; echo 200 Script output follows - lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$ + lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=$BUNDLE2_COMPRESSIONS$ heads @@ -2113,10 +2113,10 @@ (plain version to check the format) - $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=75 count=1 2> /dev/null; echo + $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=capabilities' | dd ibs=76 count=1 2> /dev/null; echo 200 Script output follows - lookup changegroupsubset branchmap pushkey known + lookup branchmap pushkey known getbundle unbundle (spread version to check the content) @@ -2127,13 +2127,13 @@ follows lookup - changegroupsubset branchmap pushkey known getbundle unbundlehash batch + changegroupsubset stream-preferred streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$
--- a/tests/test-hgweb.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-hgweb.t Sun Mar 04 10:42:51 2018 -0500 @@ -333,14 +333,14 @@ Test the access/error files are opened in append mode - $ $PYTHON -c "print len(file('access.log').readlines()), 'log lines written'" + $ $PYTHON -c "print len(open('access.log', 'rb').readlines()), 'log lines written'" 14 log lines written static file $ get-with-headers.py --twice localhost:$HGPORT 'static/style-gitweb.css' - date etag server 200 Script output follows - content-length: 9118 + content-length: 9126 content-type: text/css body { font-family: sans-serif; font-size: 12px; border:solid #d9d8d1; border-width:1px; margin:10px; background: white; color: black; } @@ -374,7 +374,7 @@ div.title_text { padding:6px 0px; border: solid #d9d8d1; border-width:0px 0px 1px; } div.log_body { padding:8px 8px 8px 150px; } .age { white-space:nowrap; } - span.age { position:relative; float:left; width:142px; font-style:italic; } + a.title span.age { position:relative; float:left; width:142px; font-style:italic; } div.log_link { padding:0px 8px; font-size:10px; font-family:sans-serif; font-style:normal;
--- a/tests/test-histedit-arguments.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-histedit-arguments.t Sun Mar 04 10:42:51 2018 -0500 @@ -280,9 +280,9 @@ -------------------------------------------------------------------- $ $PYTHON <<EOF - > fp = open('logfile', 'w') - > fp.write('12345678901234567890123456789012345678901234567890' + - > '12345') # there are 5 more columns for 80 columns + > fp = open('logfile', 'wb') + > fp.write(b'12345678901234567890123456789012345678901234567890' + + > b'12345') # there are 5 more columns for 80 columns > > # 2 x 4 = 8 columns, but 3 x 4 = 12 bytes > fp.write(u'\u3042\u3044\u3046\u3048'.encode('utf-8'))
--- a/tests/test-histedit-fold.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-histedit-fold.t Sun Mar 04 10:42:51 2018 -0500 @@ -154,9 +154,9 @@ > from mercurial import util > def abortfolding(ui, repo, hooktype, **kwargs): > ctx = repo[kwargs.get('node')] - > if set(ctx.files()) == {'c', 'd', 'f'}: + > if set(ctx.files()) == {b'c', b'd', b'f'}: > return True # abort folding commit only - > ui.warn('allow non-folding commit\\n') + > ui.warn(b'allow non-folding commit\\n') > EOF $ cat > .hg/hgrc <<EOF > [hooks]
--- a/tests/test-hook.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-hook.t Sun Mar 04 10:42:51 2018 -0500 @@ -417,9 +417,9 @@ > def printargs(ui, args): > a = list(args.items()) > a.sort() - > ui.write('hook args:\n') + > ui.write(b'hook args:\n') > for k, v in a: - > ui.write(' %s %s\n' % (k, v)) + > ui.write(b' %s %s\n' % (k, v)) > > def passhook(ui, repo, **args): > printargs(ui, args) @@ -432,19 +432,19 @@ > pass > > def raisehook(**args): - > raise LocalException('exception from hook') + > raise LocalException(b'exception from hook') > > def aborthook(**args): - > raise error.Abort('raise abort from hook') + > raise error.Abort(b'raise abort from hook') > > def brokenhook(**args): > return 1 + {} > > def verbosehook(ui, **args): - > ui.note('verbose output from hook\n') + > ui.note(b'verbose output from hook\n') > > def printtags(ui, repo, **args): - > ui.write('%s\n' % sorted(repo.tags())) + > ui.write(b'%s\n' % sorted(repo.tags())) > > class container: > unreachable = 1 @@ -667,7 +667,7 @@ $ cd hooks $ cat > testhooks.py <<EOF > def testhook(ui, **args): - > ui.write('hook works\n') + > ui.write(b'hook works\n') > EOF $ echo '[hooks]' > ../repo/.hg/hgrc $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc @@ -886,7 +886,7 @@ > def uisetup(ui): > class untrustedui(ui.__class__): > def _trusted(self, fp, f): - > if util.normpath(fp.name).endswith('untrusted/.hg/hgrc'): + > if util.normpath(fp.name).endswith(b'untrusted/.hg/hgrc'): > return False > return super(untrustedui, self)._trusted(fp, f) > ui.__class__ = untrustedui
--- a/tests/test-http-bad-server.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-http-bad-server.t Sun Mar 04 10:42:51 2018 -0500 @@ -120,7 +120,7 @@ write(41) -> Content-Type: application/mercurial-0.1\r\n write(21) -> Content-Length: 417\r\n write(2) -> \r\n - write(417) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417) -> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(4? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob) readline(1? from -1) -> (1?) Accept-Encoding* (glob) read limit reached; closing socket @@ -161,7 +161,7 @@ write(41) -> Content-Type: application/mercurial-0.1\r\n write(21) -> Content-Length: 417\r\n write(2) -> \r\n - write(417) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417) -> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(13? from 65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n (glob) readline(1?? from -1) -> (27) Accept-Encoding: identity\r\n (glob) readline(8? from -1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n (glob) @@ -218,7 +218,7 @@ write(41) -> Content-Type: application/mercurial-0.1\r\n write(21) -> Content-Length: 430\r\n write(2) -> \r\n - write(430) -> lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httppostargs httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(430) -> lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httppostargs httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline\(14[67] from 65537\) -> \(2[67]\) POST /\?cmd=batch HTTP/1.1\\r\\n (re) readline\(1(19|20) from -1\) -> \(27\) Accept-Encoding: identity\\r\\n (re) readline(9? from -1) -> (41) content-type: application/mercurial-0.1\r\n (glob) @@ -294,7 +294,7 @@ write(41 from 41) -> (43) Content-Type: application/mercurial-0.1\r\n write(21 from 21) -> (22) Content-Length: 417\r\n write(2 from 2) -> (20) \r\n - write(20 from 417) -> (0) lookup changegroupsu + write(20 from 417) -> (0) lookup branchmap pus write limit reached; closing socket $ rm -f error.log @@ -329,7 +329,7 @@ write(41 from 41) -> (558) Content-Type: application/mercurial-0.1\r\n write(21 from 21) -> (537) Content-Length: 417\r\n write(2 from 2) -> (535) \r\n - write(417 from 417) -> (118) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417 from 417) -> (118) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n readline(-1) -> (27) Accept-Encoding: identity\r\n readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n @@ -377,7 +377,7 @@ write(41 from 41) -> (623) Content-Type: application/mercurial-0.1\r\n write(21 from 21) -> (602) Content-Length: 417\r\n write(2 from 2) -> (600) \r\n - write(417 from 417) -> (183) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417 from 417) -> (183) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n readline(-1) -> (27) Accept-Encoding: identity\r\n readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n @@ -429,7 +429,7 @@ write(41 from 41) -> (770) Content-Type: application/mercurial-0.1\r\n write(21 from 21) -> (749) Content-Length: 417\r\n write(2 from 2) -> (747) \r\n - write(417 from 417) -> (330) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417 from 417) -> (330) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n readline(-1) -> (27) Accept-Encoding: identity\r\n readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n @@ -490,7 +490,7 @@ write(41 from 41) -> (808) Content-Type: application/mercurial-0.1\r\n write(21 from 21) -> (787) Content-Length: 417\r\n write(2 from 2) -> (785) \r\n - write(417 from 417) -> (368) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417 from 417) -> (368) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n readline(-1) -> (27) Accept-Encoding: identity\r\n readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n @@ -553,7 +553,7 @@ write(41 from 41) -> (832) Content-Type: application/mercurial-0.1\r\n write(21 from 21) -> (811) Content-Length: 417\r\n write(2 from 2) -> (809) \r\n - write(417 from 417) -> (392) lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none + write(417 from 417) -> (392) lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 bundle2=HG20%0Abookmarks%0Achangegroup%3D01%2C02%0Adigests%3Dmd5%2Csha1%2Csha512%0Aerror%3Dabort%2Cunsupportedcontent%2Cpushraced%2Cpushkey%0Ahgtagsfnodes%0Alistkeys%0Apushkey%0Aremote-changegroup%3Dhttp%2Chttps unbundle=HG10GZ,HG10BZ,HG10UN httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx compression=none readline(65537) -> (26) GET /?cmd=batch HTTP/1.1\r\n readline(-1) -> (27) Accept-Encoding: identity\r\n readline(-1) -> (29) vary: X-HgArg-1,X-HgProto-1\r\n
--- a/tests/test-http-branchmap.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-http-branchmap.t Sun Mar 04 10:42:51 2018 -0500 @@ -68,22 +68,22 @@ > self._file = stdout > > def write(self, data): - > if data == '47\n': + > if data == b'47\n': > # latin1 encoding is one %xx (3 bytes) shorter - > data = '44\n' - > elif data.startswith('%C3%A6 '): + > data = b'44\n' + > elif data.startswith(b'%C3%A6 '): > # translate to latin1 encoding - > data = '%%E6 %s' % data[7:] + > data = b'%%E6 %s' % data[7:] > self._file.write(data) > > def __getattr__(self, name): > return getattr(self._file, name) > - > sys.stdout = StdoutWrapper(sys.stdout) - > sys.stderr = StdoutWrapper(sys.stderr) + > sys.stdout = StdoutWrapper(getattr(sys.stdout, 'buffer', sys.stdout)) + > sys.stderr = StdoutWrapper(getattr(sys.stderr, 'buffer', sys.stderr)) > > myui = ui.ui.load() - > repo = hg.repository(myui, 'a') + > repo = hg.repository(myui, b'a') > commands.serve(myui, repo, stdio=True, cmdserver=False) > EOF $ echo baz >> b/foo
--- a/tests/test-http-bundle1.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-http-bundle1.t Sun Mar 04 10:42:51 2018 -0500 @@ -68,7 +68,7 @@ $ cat > $TESTTMP/removesupportedformat.py << EOF > from mercurial import localrepo > def extsetup(ui): - > localrepo.localrepository.supportedformats.remove('generaldelta') + > localrepo.localrepository.supportedformats.remove(b'generaldelta') > EOF $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3 @@ -181,7 +181,8 @@ > if not auth: > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who', > [('WWW-Authenticate', 'Basic Realm="mercurial"')]) - > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']: + > if base64.b64decode(auth.split()[1]).split(b':', 1) != [b'user', + > b'pass']: > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no') > def extsetup(): > common.permhooks.insert(0, perform_authentication)
--- a/tests/test-i18n.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-i18n.t Sun Mar 04 10:42:51 2018 -0500 @@ -54,8 +54,8 @@ Check i18n cache isn't reused after encoding change: $ cat > $TESTTMP/encodingchange.py << EOF + > from mercurial.i18n import _ > from mercurial import encoding, registrar - > from mercurial.i18n import _ > cmdtable = {} > command = registrar.command(cmdtable) > @command(b'encodingchange', norepo=True)
--- a/tests/test-impexp-branch.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-impexp-branch.t Sun Mar 04 10:42:51 2018 -0500 @@ -74,9 +74,9 @@ $ hg strip --no-backup . 1 files updated, 0 files merged, 0 files removed, 0 files unresolved >>> import re - >>> p = file('../r1.patch', 'rb').read() + >>> p = open('../r1.patch', 'rb').read() >>> p = re.sub(r'Parent\s+', 'Parent ', p) - >>> file('../r1-ws.patch', 'wb').write(p) + >>> open('../r1-ws.patch', 'wb').write(p) $ hg import --exact ../r1-ws.patch applying ../r1-ws.patch
--- a/tests/test-import-bypass.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-import-bypass.t Sun Mar 04 10:42:51 2018 -0500 @@ -227,7 +227,7 @@ (this also tests that editor is not invoked for '--bypass', if the commit message is explicitly specified, regardless of '--edit') - $ $PYTHON -c 'file("a", "wb").write("a\r\n")' + $ $PYTHON -c 'open("a", "wb").write(b"a\r\n")' $ hg ci -m makeacrlf $ HGEDITOR=cat hg import -m 'should fail because of eol' --edit --bypass ../test.diff applying ../test.diff
--- a/tests/test-import-context.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-import-context.t Sun Mar 04 10:42:51 2018 -0500 @@ -7,7 +7,7 @@ > lasteol = sys.argv[2] == '1' > patterns = sys.argv[3:] > - > fp = file(path, 'wb') + > fp = open(path, 'wb') > for i, pattern in enumerate(patterns): > count = int(pattern[0:-1]) > char = pattern[-1] + '\n' @@ -19,7 +19,7 @@ > EOF $ cat > cat.py <<EOF > import sys - > sys.stdout.write(repr(file(sys.argv[1], 'rb').read()) + '\n') + > sys.stdout.write(repr(open(sys.argv[1], 'rb').read()) + '\n') > EOF Initialize the test repository
--- a/tests/test-import-eol.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-import-eol.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,21 +1,21 @@ $ cat > makepatch.py <<EOF - > f = file('eol.diff', 'wb') + > f = open('eol.diff', 'wb') > w = f.write - > w('test message\n') - > w('diff --git a/a b/a\n') - > w('--- a/a\n') - > w('+++ b/a\n') - > w('@@ -1,5 +1,5 @@\n') - > w(' a\n') - > w('-bbb\r\n') - > w('+yyyy\r\n') - > w(' cc\r\n') - > w(' \n') - > w(' d\n') - > w('-e\n') - > w('\ No newline at end of file\n') - > w('+z\r\n') - > w('\ No newline at end of file\r\n') + > w(b'test message\n') + > w(b'diff --git a/a b/a\n') + > w(b'--- a/a\n') + > w(b'+++ b/a\n') + > w(b'@@ -1,5 +1,5 @@\n') + > w(b' a\n') + > w(b'-bbb\r\n') + > w(b'+yyyy\r\n') + > w(b' cc\r\n') + > w(b' \n') + > w(b' d\n') + > w(b'-e\n') + > w(b'\ No newline at end of file\n') + > w(b'+z\r\n') + > w(b'\ No newline at end of file\r\n') > EOF $ hg init repo @@ -25,7 +25,7 @@ Test different --eol values - $ $PYTHON -c 'file("a", "wb").write("a\nbbb\ncc\n\nd\ne")' + $ $PYTHON -c 'open("a", "wb").write(b"a\nbbb\ncc\n\nd\ne")' $ hg ci -Am adda adding .hgignore adding a @@ -89,7 +89,7 @@ auto EOL on CRLF file - $ $PYTHON -c 'file("a", "wb").write("a\r\nbbb\r\ncc\r\n\r\nd\r\ne")' + $ $PYTHON -c 'open("a", "wb").write(b"a\r\nbbb\r\ncc\r\n\r\nd\r\ne")' $ hg commit -m 'switch EOLs in a' $ hg --traceback --config patch.eol='auto' import eol.diff applying eol.diff @@ -105,11 +105,11 @@ auto EOL on new file or source without any EOL - $ $PYTHON -c 'file("noeol", "wb").write("noeol")' + $ $PYTHON -c 'open("noeol", "wb").write(b"noeol")' $ hg add noeol $ hg commit -m 'add noeol' - $ $PYTHON -c 'file("noeol", "wb").write("noeol\r\nnoeol\n")' - $ $PYTHON -c 'file("neweol", "wb").write("neweol\nneweol\r\n")' + $ $PYTHON -c 'open("noeol", "wb").write(b"noeol\r\nnoeol\n")' + $ $PYTHON -c 'open("neweol", "wb").write(b"neweol\nneweol\r\n")' $ hg add neweol $ hg diff --git > noeol.diff $ hg revert --no-backup noeol neweol @@ -127,10 +127,10 @@ Test --eol and binary patches - $ $PYTHON -c 'file("b", "wb").write("a\x00\nb\r\nd")' + $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nb\r\nd")' $ hg ci -Am addb adding b - $ $PYTHON -c 'file("b", "wb").write("a\x00\nc\r\nd")' + $ $PYTHON -c 'open("b", "wb").write(b"a\x00\nc\r\nd")' $ hg diff --git > bin.diff $ hg revert --no-backup b
--- a/tests/test-import-git.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-import-git.t Sun Mar 04 10:42:51 2018 -0500 @@ -563,10 +563,10 @@ > Mc$`b*O5$Pw00T?_*Z=?k > > EOF - >>> fp = file('binary.diff', 'rb') + >>> fp = open('binary.diff', 'rb') >>> data = fp.read() >>> fp.close() - >>> file('binary.diff', 'wb').write(data.replace('\n', '\r\n')) + >>> open('binary.diff', 'wb').write(data.replace(b'\n', b'\r\n')) $ rm binary2 $ hg import --no-commit binary.diff applying binary.diff
--- a/tests/test-import.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-import.t Sun Mar 04 10:42:51 2018 -0500 @@ -56,7 +56,7 @@ $ cat > dummypatch.py <<EOF > from __future__ import print_function > print('patching file a') - > file('a', 'wb').write('line2\n') + > open('a', 'wb').write(b'line2\n') > EOF $ hg clone -r0 a b adding changesets @@ -291,7 +291,7 @@ > msg.set_payload('email commit message\n' + patch) > msg['Subject'] = 'email patch' > msg['From'] = 'email patcher' - > file(sys.argv[2], 'wb').write(msg.as_string()) + > open(sys.argv[2], 'wb').write(msg.as_string()) > EOF @@ -389,7 +389,7 @@ > msg.set_payload('email patch\n\nnext line\n---\n' + patch) > msg['Subject'] = '[PATCH] email patch' > msg['From'] = 'email patcher' - > file(sys.argv[2], 'wb').write(msg.as_string()) + > open(sys.argv[2], 'wb').write(msg.as_string()) > EOF @@ -829,7 +829,7 @@ $ hg init binaryremoval $ cd binaryremoval $ echo a > a - $ $PYTHON -c "file('b', 'wb').write('a\x00b')" + $ $PYTHON -c "open('b', 'wb').write(b'a\x00b')" $ hg ci -Am addall adding a adding b
--- a/tests/test-install.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-install.t Sun Mar 04 10:42:51 2018 -0500 @@ -17,7 +17,7 @@ checking "re2" regexp engine \((available|missing)\) (re) checking templates (*mercurial?templates)... (glob) checking default template (*mercurial?templates?map-cmdline.default) (glob) - checking commit editor... (* -c "import sys; sys.exit(0)") (glob) + checking commit editor... (*) (glob) checking username (test) no problems detected @@ -31,7 +31,7 @@ "defaulttemplate": "*mercurial?templates?map-cmdline.default", (glob) "defaulttemplateerror": null, "defaulttemplatenotfound": "default", - "editor": "* -c \"import sys; sys.exit(0)\"", (glob) + "editor": "*", (glob) "editornotfound": false, "encoding": "ascii", "encodingerror": null, @@ -72,7 +72,7 @@ checking "re2" regexp engine \((available|missing)\) (re) checking templates (*mercurial?templates)... (glob) checking default template (*mercurial?templates?map-cmdline.default) (glob) - checking commit editor... (* -c "import sys; sys.exit(0)") (glob) + checking commit editor... (*) (glob) checking username... no username supplied (specify a username in your configuration file) @@ -120,6 +120,35 @@ checking username (test) no problems detected +print out the binary post-shlexsplit in the error message when commit editor is +not found (this is intentionally using backslashes to mimic a windows usecase). + $ HGEDITOR="c:\foo\bar\baz.exe -y -z" hg debuginstall + checking encoding (ascii)... + checking Python executable (*) (glob) + checking Python version (*) (glob) + checking Python lib (*lib*)... (glob) + checking Python security support (*) (glob) + TLS 1.2 not supported by Python install; network connections lack modern security (?) + SNI not supported by Python install; may have connectivity issues with some servers (?) + checking Mercurial version (*) (glob) + checking Mercurial custom build (*) (glob) + checking module policy (*) (glob) + checking installed modules (*mercurial)... (glob) + checking registered compression engines (*zlib*) (glob) + checking available compression engines (*zlib*) (glob) + checking available compression engines for wire protocol (*zlib*) (glob) + checking "re2" regexp engine \((available|missing)\) (re) + checking templates (*mercurial?templates)... (glob) + checking default template (*mercurial?templates?map-cmdline.default) (glob) + checking commit editor... (c:\foo\bar\baz.exe) (windows !) + Can't find editor 'c:\foo\bar\baz.exe' in PATH (windows !) + checking commit editor... (c:foobarbaz.exe) (no-windows !) + Can't find editor 'c:foobarbaz.exe' in PATH (no-windows !) + (specify a commit editor in your configuration file) + checking username (test) + 1 problems detected, please check your install! + [1] + #if test-repo $ . "$TESTDIR/helpers-testrepo.sh"
--- a/tests/test-issue2137.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-issue2137.t Sun Mar 04 10:42:51 2018 -0500 @@ -18,7 +18,7 @@ > tip1 = node.short(repo.changelog.tip()) > tip2 = node.short(repo.lookup(tip1)) > assert tip1 == tip2 - > ui.write('new tip: %s\n' % tip1) + > ui.write(b'new tip: %s\n' % tip1) > return result > repo.__class__ = wraprepo >
--- a/tests/test-issue4074.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-issue4074.t Sun Mar 04 10:42:51 2018 -0500 @@ -4,7 +4,7 @@ $ cat > s.py <<EOF > import random - > for x in xrange(100000): + > for x in range(100000): > print > if random.randint(0, 100) >= 50: > x += 1
--- a/tests/test-journal.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-journal.t Sun Mar 04 10:42:51 2018 -0500 @@ -4,6 +4,7 @@ > # mock out util.getuser() and util.makedate() to supply testable values > import os > from mercurial import util + > from mercurial.utils import dateutil > def mockgetuser(): > return 'foobar' > @@ -19,7 +20,7 @@ > return (time, 0) > > util.getuser = mockgetuser - > util.makedate = mockmakedate + > dateutil.makedate = mockmakedate > EOF $ cat >> $HGRCPATH << EOF
--- a/tests/test-largefiles-small-disk.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-largefiles-small-disk.t Sun Mar 04 10:42:51 2018 -0500 @@ -11,7 +11,7 @@ > _origcopyfileobj = shutil.copyfileobj > def copyfileobj(fsrc, fdst, length=16*1024): > # allow journal files (used by transaction) to be written - > if 'journal.' in fdst.name: + > if b'journal.' in fdst.name: > return _origcopyfileobj(fsrc, fdst, length) > fdst.write(fsrc.read(4)) > raise IOError(errno.ENOSPC, os.strerror(errno.ENOSPC))
--- a/tests/test-largefiles-wireproto.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-largefiles-wireproto.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,3 +1,13 @@ +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + This file contains testcases that tend to be related to the wire protocol part of largefiles.
--- a/tests/test-lfs-largefiles.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-lfs-largefiles.t Sun Mar 04 10:42:51 2018 -0500 @@ -298,7 +298,7 @@ $TESTTMP/nolargefiles/.hg/hgrc:*: extensions.lfs= (glob) $ hg log -r 'all()' -G -T '{rev} {join(lfs_files, ", ")} ({desc})\n' - o 8 (remove large_by_size.bin) + o 8 large_by_size.bin (remove large_by_size.bin) | o 7 large_by_size.bin (large by size) | @@ -338,7 +338,10 @@ No diffs when comparing merge and p1 that kept p1's changes. Diff of lfs to largefiles no longer operates in standin files. - $ hg diff -r 2:3 +This `head -n 20` looks dumb (since we expect no output), but if something +breaks you can get 1048576 lines of +y in the output, which takes a looooooong +time to print. + $ hg diff -r 2:3 | head -n 20 $ hg diff -r 2:6 diff -r e989d0fa3764 -r 752e3a0d8488 large.bin --- a/large.bin Thu Jan 01 00:00:00 1970 +0000
--- a/tests/test-lfs-test-server.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-lfs-test-server.t Sun Mar 04 10:42:51 2018 -0500 @@ -48,6 +48,7 @@ searching for changes lfs: uploading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b + lfs: uploaded 1 files (12 bytes) 1 changesets found uncompressed size of bundle content: * (changelog) (glob) @@ -65,10 +66,10 @@ $ cd ../repo2 $ hg update tip -v resolving manifests - getting a lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b + getting a lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store 1 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -86,6 +87,7 @@ lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 lfs: uploading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 + lfs: uploaded 2 files (39 bytes) 1 changesets found uncompressed size of bundle content: adding changesets @@ -97,17 +99,18 @@ $ rm -rf `hg config lfs.usercache` $ hg --repo ../repo1 update tip -v resolving manifests - getting b - lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store - getting c + lfs: need to transfer 2 objects (39 bytes) + lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes) + lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache + lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 + getting b + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + getting c lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store getting d - lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes) - lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache - lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store 3 files updated, 0 files merged, 0 files removed, 0 files unresolved @@ -121,11 +124,6 @@ $ hg --repo ../repo1 update -C tip -v resolving manifests - getting a - lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store - getting b - lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store - getting c lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) abort: corrupt remote lfs object: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 [255] @@ -151,6 +149,75 @@ (run hg verify) [255] +Archive will prefetch blobs in a group + + $ rm -rf .hg/store/lfs `hg config lfs.usercache` + $ hg archive -vr 1 ../archive + lfs: need to transfer 3 objects (51 bytes) + lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) + lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache + lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b + lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes) + lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache + lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 + lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) + lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache + lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store + lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store + $ find ../archive | sort + ../archive + ../archive/.hg_archival.txt + ../archive/a + ../archive/b + ../archive/c + ../archive/d + +Cat will prefetch blobs in a group + + $ rm -rf .hg/store/lfs `hg config lfs.usercache` + $ hg cat -vr 1 a b c + lfs: need to transfer 2 objects (31 bytes) + lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) + lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache + lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b + lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) + lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache + lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + THIS-IS-LFS + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + THIS-IS-LFS + lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store + ANOTHER-LARGE-FILE + +Revert will prefetch blobs in a group + + $ rm -rf .hg/store/lfs + $ rm -rf `hg config lfs.usercache` + $ rm * + $ hg revert --all -r 1 -v + adding a + reverting b + reverting c + reverting d + lfs: need to transfer 3 objects (51 bytes) + lfs: downloading 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b (12 bytes) + lfs: adding 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b to the usercache + lfs: processed: 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b + lfs: downloading 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 (20 bytes) + lfs: adding 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 to the usercache + lfs: processed: 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 + lfs: downloading d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 (19 bytes) + lfs: adding d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 to the usercache + lfs: processed: d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + lfs: found d11e1a642b60813aee592094109b406089b8dff4cb157157f753418ec7857998 in the local lfs store + lfs: found 37a65ab78d5ecda767e8622c248b5dbff1e68b1678ab0e730d5eb8601ec8ad19 in the local lfs store + lfs: found 31cf46fbc4ecd458a0943c5b4881f1f5a6dd36c53d6167d5b69ac45149b38e5b in the local lfs store + Check error message when the remote missed a blob: $ echo FFFFF > b
--- a/tests/test-lfs.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-lfs.t Sun Mar 04 10:42:51 2018 -0500 @@ -154,10 +154,32 @@ $ hg add . -q $ hg commit -m 'commit with lfs content' + $ hg files -r . 'set:added()' + large + small + $ hg files -r . 'set:added() & lfs()' + large + $ hg mv large l $ hg mv small s + $ hg status 'set:removed()' + R large + R small + $ hg status 'set:removed() & lfs()' + R large $ hg commit -m 'renames' + $ hg files -r . 'set:copied()' + l + s + $ hg files -r . 'set:copied() & lfs()' + l + $ hg status --change . 'set:removed()' + R large + R small + $ hg status --change . 'set:removed() & lfs()' + R large + $ echo SHORT > l $ echo BECOME-LARGER-FROM-SHORTER > s $ hg commit -m 'large to small, small to large' @@ -174,7 +196,7 @@ $ hg log -r 'all()' -T '{rev} {join(lfs_files, ", ")}\n' 0 large - 1 l + 1 l, large 2 s 3 s 4 l @@ -594,8 +616,8 @@ $ cat > $TESTTMP/dumpflog.py << EOF > # print raw revision sizes, flags, and hashes for certain files > import hashlib + > from mercurial.node import short > from mercurial import revlog - > from mercurial.node import short > def hash(rawtext): > h = hashlib.sha512() > h.update(rawtext) @@ -760,7 +782,6 @@ $ hg --config lfs.usercache=emptycache clone -v repo5 fromcorrupt2 updating to branch default resolving manifests - getting l abort: corrupt remote lfs object: 22f66a3fc0b9bf3f012c814303995ec07099b3a9ce02a7af84b5970811074a3b [255] @@ -1007,7 +1028,7 @@ The LFS policy stops when the .hglfs is gone - $ hg rm .hglfs + $ mv .hglfs .hglfs_ $ echo 'largefile3' > lfs.test $ echo '012345678901234567890abc' > nolfs.exclude $ echo '01234567890123456abc' > lfs.catchall @@ -1015,6 +1036,28 @@ $ hg log -r . -T '{rev}: {lfs_files % "{file}: {lfsoid}\n"}\n' 4: + $ mv .hglfs_ .hglfs + $ echo '012345678901234567890abc' > lfs.test + $ hg ci -m 'back to lfs' + $ hg rm lfs.test + $ hg ci -qm 'remove lfs' + +{lfs_files} will list deleted files too + + $ hg log -T "{lfs_files % '{rev} {file}: {lfspointer.oid}\n'}" + 6 lfs.test: + 5 lfs.test: sha256:43f8f41171b6f62a6b61ba4ce98a8a6c1649240a47ebafd43120aa215ac9e7f6 + 3 lfs.catchall: sha256:31f43b9c62b540126b0ad5884dc013d21a61c9329b77de1fceeae2fc58511573 + 3 lfs.test: sha256:8acd23467967bc7b8cc5a280056589b0ba0b17ff21dbd88a7b6474d6290378a6 + 2 lfs.catchall: sha256:d4ec46c2869ba22eceb42a729377432052d9dd75d82fc40390ebaadecee87ee9 + 2 lfs.test: sha256:5489e6ced8c36a7b267292bde9fd5242a5f80a7482e8f23fa0477393dfaa4d6c + + $ hg log -r 'file("set:lfs()")' -T '{rev} {join(lfs_files, ", ")}\n' + 2 lfs.catchall, lfs.test + 3 lfs.catchall, lfs.test + 5 lfs.test + 6 lfs.test + $ cd .. Unbundling adds a requirement to a non-lfs repo, if necessary.
--- a/tests/test-lock-badness.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-lock-badness.t Sun Mar 04 10:42:51 2018 -0500 @@ -22,8 +22,8 @@ > def acquiretestlock(repo, releaseexc): > def unlock(): > if releaseexc: - > raise error.Abort('expected release exception') - > l = repo._lock(repo.vfs, 'testlock', False, unlock, None, 'test lock') + > raise error.Abort(b'expected release exception') + > l = repo._lock(repo.vfs, b'testlock', False, unlock, None, b'test lock') > return l > > @command(b'testlockexc') @@ -35,7 +35,7 @@ > try: > testlock = acquiretestlock(repo, False) > except error.LockHeld: - > raise error.Abort('lockfile on disk even after releasing!') + > raise error.Abort(b'lockfile on disk even after releasing!') > testlock.release() > EOF $ cat >> $HGRCPATH << EOF
--- a/tests/test-log-exthook.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-log-exthook.t Sun Mar 04 10:42:51 2018 -0500 @@ -4,8 +4,8 @@ $ cat > $TESTTMP/logexthook.py <<EOF > from __future__ import absolute_import > from mercurial import ( - > cmdutil, > commands, + > logcmdutil, > repair, > ) > def rot13description(self, ctx): @@ -13,7 +13,7 @@ > description = ctx.description().strip().splitlines()[0].encode('rot13') > self.ui.write("%s: %s\n" % (summary, description)) > def reposetup(ui, repo): - > cmdutil.changeset_printer._exthook = rot13description + > logcmdutil.changesetprinter._exthook = rot13description > EOF Prepare the repository
--- a/tests/test-log-linerange.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-log-linerange.t Sun Mar 04 10:42:51 2018 -0500 @@ -172,6 +172,77 @@ +3 +4 + $ hg log -f --graph -L foo,5:7 -p + @ changeset: 5:cfdf972b3971 + | tag: tip + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: foo: 3 -> 3+ and 11+ -> 11-; bar: a -> a+ + | + | diff --git a/foo b/foo + | --- a/foo + | +++ b/foo + | @@ -4,7 +4,7 @@ + | 0 + | 1 + | 2+ + | -3 + | +3+ + | 4 + | 5 + | 6 + | + o changeset: 4:eaec41c1a0c9 + : user: test + : date: Thu Jan 01 00:00:00 1970 +0000 + : summary: 11 -> 11+; leading space before "1" + : + : diff --git a/foo b/foo + : --- a/foo + : +++ b/foo + : @@ -2,7 +2,7 @@ + : 0 + : 0 + : 0 + : -1 + : + 1 + : 2+ + : 3 + : 4 + : + o changeset: 2:63a884426fd0 + : user: test + : date: Thu Jan 01 00:00:00 1970 +0000 + : summary: 2 -> 2+; added bar + : + : diff --git a/foo b/foo + : --- a/foo + : +++ b/foo + : @@ -3,6 +3,6 @@ + : 0 + : 0 + : 1 + : -2 + : +2+ + : 3 + : 4 + : + o changeset: 0:5ae1f82b9a00 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: init + + diff --git a/foo b/foo + new file mode 100644 + --- /dev/null + +++ b/foo + @@ -0,0 +1,5 @@ + +0 + +1 + +2 + +3 + +4 + With --template. @@ -849,9 +920,3 @@ $ hg log -f -L dir/baz,5:7 -p abort: cannot follow file not in parent revision: "dir/baz" [255] - -Graph log does work yet. - - $ hg log -f -L dir/baz,5:7 --graph - abort: graph not supported with line range patterns - [255]
--- a/tests/test-log.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-log.t Sun Mar 04 10:42:51 2018 -0500 @@ -2258,7 +2258,7 @@ > foo = {'foo': repo[0].node()} > names = lambda r: foo.keys() > namemap = lambda r, name: foo.get(name) - > nodemap = lambda r, node: [name for name, n in foo.iteritems() + > nodemap = lambda r, node: [name for name, n in foo.items() > if n == node] > ns = namespaces.namespace( > "bars", templatename="bar", logname="barlog", @@ -2289,6 +2289,25 @@ $ hg --config extensions.names=../names.py log -r 0 --template '{bars}\n' foo +Templater parse errors: + +simple error + $ hg log -r . -T '{shortest(node}' + hg: parse error at 15: unexpected token: end + ({shortest(node} + ^ here) + [255] + +multi-line template with error + $ hg log -r . -T 'line 1 + > line2 + > {shortest(node} + > line4\nline5' + hg: parse error at 28: unexpected token: end + (line 1\nline2\n{shortest(node}\nline4\nline5 + ^ here) + [255] + $ cd .. hg log -f dir across branches
--- a/tests/test-logexchange.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-logexchange.t Sun Mar 04 10:42:51 2018 -0500 @@ -6,6 +6,9 @@ > glog = log -G -T '{rev}:{node|short} {desc}' > [experimental] > remotenames = True + > [extensions] + > remotenames = + > show = > EOF Making a server repo @@ -57,14 +60,27 @@ $ cat .hg/logexchange/bookmarks 0 - 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc) - 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc) + 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc) + 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc) $ cat .hg/logexchange/branches 0 - ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc) - 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc) + ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc) + 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc) + + $ hg show work + o 3e14 (wat) (default/wat) added bar + | + ~ + @ ec24 (default/default) Added h + | + ~ + + $ hg update "default/wat" + 1 files updated, 0 files merged, 3 files removed, 0 files unresolved + $ hg identify + 3e1487808078 (wat) tip Making a new server ------------------- @@ -94,15 +110,152 @@ $ cat .hg/logexchange/bookmarks 0 - 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc) - 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc) - 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc) - 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc) + 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00default\x00foo (esc) + 87d6d66763085b629e6d7ed56778c79827273022\x00default\x00bar (esc) + 87d6d66763085b629e6d7ed56778c79827273022\x00$TESTTMP/server2\x00bar (esc) + 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00$TESTTMP/server2\x00foo (esc) $ cat .hg/logexchange/branches 0 - 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc) - ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc) - ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc) - 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc) + 3e1487808078543b0af6d10dadf5d46943578db0\x00default\x00wat (esc) + ec2426147f0e39dbc9cef599b066be6035ce691d\x00default\x00default (esc) + ec2426147f0e39dbc9cef599b066be6035ce691d\x00$TESTTMP/server2\x00default (esc) + 3e1487808078543b0af6d10dadf5d46943578db0\x00$TESTTMP/server2\x00wat (esc) + + $ hg log -G + @ changeset: 8:3e1487808078 + | branch: wat + | tag: tip + | remote branch: $TESTTMP/server2/wat + | remote branch: default/wat + | parent: 4:aa98ab95a928 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: added bar + | + | o changeset: 7:ec2426147f0e + | | remote branch: $TESTTMP/server2/default + | | remote branch: default/default + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: Added h + | | + | o changeset: 6:87d6d6676308 + | | bookmark: bar + | | remote bookmark: $TESTTMP/server2/bar + | | remote bookmark: default/bar + | | user: test + | | date: Thu Jan 01 00:00:00 1970 +0000 + | | summary: Added g + | | + | o changeset: 5:825660c69f0c + |/ user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: Added f + | + o changeset: 4:aa98ab95a928 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: Added e + | + o changeset: 3:62615734edd5 + | bookmark: foo + | remote bookmark: $TESTTMP/server2/foo + | remote bookmark: default/foo + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: Added d + | + o changeset: 2:28ad74487de9 + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: Added c + | + o changeset: 1:29becc82797a + | user: test + | date: Thu Jan 01 00:00:00 1970 +0000 + | summary: Added b + | + o changeset: 0:18d04c59bb5d + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: Added a + +Testing the templates provided by remotenames extension + +`remotenames` keyword + + $ hg log -G -T "{rev}:{node|short} {remotenames}\n" + @ 8:3e1487808078 $TESTTMP/server2/wat default/wat + | + | o 7:ec2426147f0e $TESTTMP/server2/default default/default + | | + | o 6:87d6d6676308 $TESTTMP/server2/bar default/bar + | | + | o 5:825660c69f0c + |/ + o 4:aa98ab95a928 + | + o 3:62615734edd5 $TESTTMP/server2/foo default/foo + | + o 2:28ad74487de9 + | + o 1:29becc82797a + | + o 0:18d04c59bb5d + +`remotebookmarks` and `remotebranches` keywords + + $ hg log -G -T "{rev}:{node|short} [{remotebookmarks}] ({remotebranches})" + @ 8:3e1487808078 [] ($TESTTMP/server2/wat default/wat) + | + | o 7:ec2426147f0e [] ($TESTTMP/server2/default default/default) + | | + | o 6:87d6d6676308 [$TESTTMP/server2/bar default/bar] () + | | + | o 5:825660c69f0c [] () + |/ + o 4:aa98ab95a928 [] () + | + o 3:62615734edd5 [$TESTTMP/server2/foo default/foo] () + | + o 2:28ad74487de9 [] () + | + o 1:29becc82797a [] () + | + o 0:18d04c59bb5d [] () + +Testing the revsets provided by remotenames extension + +`remotenames` revset + + $ hg log -r "remotenames()" -GT "{rev}:{node|short} {remotenames}\n" + @ 8:3e1487808078 $TESTTMP/server2/wat default/wat + : + : o 7:ec2426147f0e $TESTTMP/server2/default default/default + : | + : o 6:87d6d6676308 $TESTTMP/server2/bar default/bar + :/ + o 3:62615734edd5 $TESTTMP/server2/foo default/foo + | + ~ + +`remotebranches` revset + + $ hg log -r "remotebranches()" -GT "{rev}:{node|short} {remotenames}\n" + @ 8:3e1487808078 $TESTTMP/server2/wat default/wat + | + ~ + o 7:ec2426147f0e $TESTTMP/server2/default default/default + | + ~ + +`remotebookmarks` revset + + $ hg log -r "remotebookmarks()" -GT "{rev}:{node|short} {remotenames}\n" + o 6:87d6d6676308 $TESTTMP/server2/bar default/bar + : + o 3:62615734edd5 $TESTTMP/server2/foo default/foo + | + ~
--- a/tests/test-mactext.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-mactext.t Sun Mar 04 10:42:51 2018 -0500 @@ -3,9 +3,9 @@ > import sys > > for path in sys.argv[1:]: - > data = file(path, 'rb').read() - > data = data.replace('\n', '\r') - > file(path, 'wb').write(data) + > data = open(path, 'rb').read() + > data = data.replace(b'\n', b'\r') + > open(path, 'wb').write(data) > EOF $ cat > print.py <<EOF > import sys
--- a/tests/test-manifest.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-manifest.py Sun Mar 04 10:42:51 2018 -0500 @@ -11,7 +11,6 @@ ) EMTPY_MANIFEST = b'' -EMTPY_MANIFEST_V2 = b'\0\n' HASH_1 = b'1' * 40 BIN_HASH_1 = binascii.unhexlify(HASH_1) @@ -28,42 +27,6 @@ b'flag2': b'l', } -# Same data as A_SHORT_MANIFEST -A_SHORT_MANIFEST_V2 = ( - b'\0\n' - b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n' - b'\x00foo\0%(flag1)s\n%(hash1)s\n' - ) % {b'hash1': BIN_HASH_1, - b'flag1': b'', - b'hash2': BIN_HASH_2, - b'flag2': b'l', - } - -# Same data as A_SHORT_MANIFEST -A_METADATA_MANIFEST = ( - b'\0foo\0bar\n' - b'\x00bar/baz/qux.py\0%(flag2)s\0foo\0bar\n%(hash2)s\n' # flag and metadata - b'\x00foo\0%(flag1)s\0foo\n%(hash1)s\n' # no flag, but metadata - ) % {b'hash1': BIN_HASH_1, - b'flag1': b'', - b'hash2': BIN_HASH_2, - b'flag2': b'l', - } - -A_STEM_COMPRESSED_MANIFEST = ( - b'\0\n' - b'\x00bar/baz/qux.py\0%(flag2)s\n%(hash2)s\n' - b'\x04qux/foo.py\0%(flag1)s\n%(hash1)s\n' # simple case of 4 stem chars - b'\x0az.py\0%(flag1)s\n%(hash1)s\n' # tricky newline = 10 stem characters - b'\x00%(verylongdir)sx/x\0\n%(hash1)s\n' - b'\xffx/y\0\n%(hash2)s\n' # more than 255 stem chars - ) % {b'hash1': BIN_HASH_1, - b'flag1': b'', - b'hash2': BIN_HASH_2, - b'flag2': b'l', - b'verylongdir': 255 * b'x', - } - A_DEEPER_MANIFEST = ( b'a/b/c/bar.py\0%(hash3)s%(flag1)s\n' b'a/b/c/bar.txt\0%(hash1)s%(flag1)s\n' @@ -111,11 +74,6 @@ self.assertEqual(0, len(m)) self.assertEqual([], list(m)) - def testEmptyManifestv2(self): - m = self.parsemanifest(EMTPY_MANIFEST_V2) - self.assertEqual(0, len(m)) - self.assertEqual([], list(m)) - def testManifest(self): m = self.parsemanifest(A_SHORT_MANIFEST) self.assertEqual([b'bar/baz/qux.py', b'foo'], list(m)) @@ -126,31 +84,6 @@ with self.assertRaises(KeyError): m[b'wat'] - def testParseManifestV2(self): - m1 = self.parsemanifest(A_SHORT_MANIFEST) - m2 = self.parsemanifest(A_SHORT_MANIFEST_V2) - # Should have same content as A_SHORT_MANIFEST - self.assertEqual(m1.text(), m2.text()) - - def testParseManifestMetadata(self): - # Metadata is for future-proofing and should be accepted but ignored - m = self.parsemanifest(A_METADATA_MANIFEST) - self.assertEqual(A_SHORT_MANIFEST, m.text()) - - def testParseManifestStemCompression(self): - m = self.parsemanifest(A_STEM_COMPRESSED_MANIFEST) - self.assertIn(b'bar/baz/qux.py', m) - self.assertIn(b'bar/qux/foo.py', m) - self.assertIn(b'bar/qux/foz.py', m) - self.assertIn(256 * b'x' + b'/x', m) - self.assertIn(256 * b'x' + b'/y', m) - self.assertEqual(A_STEM_COMPRESSED_MANIFEST, m.text(usemanifestv2=True)) - - def testTextV2(self): - m1 = self.parsemanifest(A_SHORT_MANIFEST) - v2text = m1.text(usemanifestv2=True) - self.assertEqual(A_SHORT_MANIFEST_V2, v2text) - def testSetItem(self): want = BIN_HASH_1 @@ -223,7 +156,7 @@ self.assertEqual(want, m[b'foo']) self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', BIN_HASH_1 + b'a')], - list(m.iteritems())) + list(m.items())) # Sometimes it even tries a 22-byte fake hash, but we can # return 21 and it'll work out m[b'foo'] = want + b'+' @@ -238,7 +171,7 @@ # suffix with iteration self.assertEqual([(b'bar/baz/qux.py', BIN_HASH_2), (b'foo', want)], - list(m.iteritems())) + list(m.items())) # shows up in diff self.assertEqual({b'foo': ((want, f), (h, b''))}, m.diff(clean))
--- a/tests/test-manifestv2.t Sat Mar 03 22:29:24 2018 -0500 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,102 +0,0 @@ -Create repo with old manifest - - $ cat << EOF >> $HGRCPATH - > [format] - > usegeneraldelta=yes - > EOF - - $ hg init existing - $ cd existing - $ echo footext > foo - $ hg add foo - $ hg commit -m initial - -We're using v1, so no manifestv2 entry is in requires yet. - - $ grep manifestv2 .hg/requires - [1] - -Let's clone this with manifestv2 enabled to switch to the new format for -future commits. - - $ cd .. - $ hg clone --pull existing new --config experimental.manifestv2=1 - requesting all changes - adding changesets - adding manifests - adding file changes - added 1 changesets with 1 changes to 1 files - new changesets 0fc9a4fafa44 - updating to branch default - 1 files updated, 0 files merged, 0 files removed, 0 files unresolved - $ cd new - -Check that entry was added to .hg/requires. - - $ grep manifestv2 .hg/requires - manifestv2 - -Make a new commit. - - $ echo newfootext > foo - $ hg commit -m new - -Check that the manifest actually switched to v2. - - $ hg debugdata -m 0 - foo\x0021e958b1dca695a60ee2e9cf151753204ee0f9e9 (esc) - - $ hg debugdata -m 1 - \x00 (esc) - \x00foo\x00 (esc) - I\xab\x7f\xb8(\x83\xcas\x15\x9d\xc2\xd3\xd3:5\x08\xbad5_ (esc) - -Check that manifestv2 is used if the requirement is present, even if it's -disabled in the config. - - $ echo newerfootext > foo - $ hg --config experimental.manifestv2=False commit -m newer - - $ hg debugdata -m 2 - \x00 (esc) - \x00foo\x00 (esc) - \xa6\xb1\xfb\xef]\x91\xa1\x19`\xf3.#\x90S\xf8\x06 \xe2\x19\x00 (esc) - -Check that we can still read v1 manifests. - - $ hg files -r 0 - foo - - $ cd .. - -Check that entry is added to .hg/requires on repo creation - - $ hg --config experimental.manifestv2=True init repo - $ cd repo - $ grep manifestv2 .hg/requires - manifestv2 - -Set up simple repo - - $ echo a > file1 - $ echo b > file2 - $ echo c > file3 - $ hg ci -Aqm 'initial' - $ echo d > file2 - $ hg ci -m 'modify file2' - -Check that 'hg verify', which uses manifest.readdelta(), works - - $ hg verify - checking changesets - checking manifests - crosschecking files in changesets and manifests - checking files - 3 files, 2 changesets, 4 total revisions - -Check that manifest revlog is smaller than for v1 - - $ hg debugindex -m - rev offset length delta linkrev nodeid p1 p2 - 0 0 81 -1 0 57361477c778 000000000000 000000000000 - 1 81 33 0 1 aeaab5a2ef74 57361477c778 000000000000
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-mdiff.py Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,24 @@ +from __future__ import absolute_import +from __future__ import print_function + +import unittest + +from mercurial import ( + mdiff, +) + +class splitnewlinesTests(unittest.TestCase): + + def test_splitnewlines(self): + cases = {b'a\nb\nc\n': [b'a\n', b'b\n', b'c\n'], + b'a\nb\nc': [b'a\n', b'b\n', b'c'], + b'a\nb\nc\n\n': [b'a\n', b'b\n', b'c\n', b'\n'], + b'': [], + b'abcabc': [b'abcabc'], + } + for inp, want in cases.items(): + self.assertEqual(mdiff.splitnewlines(inp), want) + +if __name__ == '__main__': + import silenttestrunner + silenttestrunner.main(__name__)
--- a/tests/test-merge-tools.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-merge-tools.t Sun Mar 04 10:42:51 2018 -0500 @@ -1059,6 +1059,150 @@ # hg resolve --list R f +premerge=keep respects ui.mergemarkers=basic: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ hg merge -r 4 --config merge-tools.true.premerge=keep --config ui.mergemarkers=basic + merging f + <<<<<<< working copy + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev + revision 0 + space + revision 4 + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ aftermerge + # cat f + <<<<<<< working copy + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev + # hg stat + M f + # hg resolve --list + R f + +premerge=keep ignores ui.mergemarkers=basic if true.mergemarkers=detailed: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ hg merge -r 4 --config merge-tools.true.premerge=keep \ + > --config ui.mergemarkers=basic \ + > --config merge-tools.true.mergemarkers=detailed + merging f + <<<<<<< working copy: ef83787e2614 - test: revision 1 + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev: 81448d39c9a0 - test: revision 4 + revision 0 + space + revision 4 + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ aftermerge + # cat f + <<<<<<< working copy: ef83787e2614 - test: revision 1 + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev: 81448d39c9a0 - test: revision 4 + # hg stat + M f + # hg resolve --list + R f + +premerge=keep respects ui.mergemarkertemplate instead of +true.mergemarkertemplate if true.mergemarkers=basic: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ hg merge -r 4 --config merge-tools.true.premerge=keep \ + > --config ui.mergemarkertemplate='uitmpl {rev}' \ + > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' + merging f + <<<<<<< working copy: uitmpl 1 + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev: uitmpl 4 + revision 0 + space + revision 4 + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ aftermerge + # cat f + <<<<<<< working copy: uitmpl 1 + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev: uitmpl 4 + # hg stat + M f + # hg resolve --list + R f + +premerge=keep respects true.mergemarkertemplate instead of +true.mergemarkertemplate if true.mergemarkers=detailed: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ hg merge -r 4 --config merge-tools.true.premerge=keep \ + > --config ui.mergemarkertemplate='uitmpl {rev}' \ + > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \ + > --config merge-tools.true.mergemarkers=detailed + merging f + <<<<<<< working copy: tooltmpl ef83787e2614 + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev: tooltmpl 81448d39c9a0 + revision 0 + space + revision 4 + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ aftermerge + # cat f + <<<<<<< working copy: tooltmpl ef83787e2614 + revision 1 + space + ======= + revision 4 + >>>>>>> merge rev: tooltmpl 81448d39c9a0 + # hg stat + M f + # hg resolve --list + R f Tool execution @@ -1190,6 +1334,142 @@ # hg resolve --list R f +Merge using a tool that supports labellocal, labelother, and labelbase, checking +that they're quoted properly as well. This is using the default 'basic' +mergemarkers even though ui.mergemarkers is 'detailed', so it's ignoring both +mergemarkertemplate settings: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ cat <<EOF > printargs_merge_tool + > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done + > EOF + $ hg --config merge-tools.true.executable='sh' \ + > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \ + > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \ + > --config ui.mergemarkertemplate='uitmpl {rev}' \ + > --config ui.mergemarkers=detailed \ + > merge -r 2 + merging f + arg: "ll:working copy" + arg: "lo:" + arg: "merge rev" + arg: "lb:base: */f~base.*" (glob) + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ rm -f 'printargs_merge_tool' + +Merge using a tool that supports labellocal, labelother, and labelbase, checking +that they're quoted properly as well. This is using 'detailed' mergemarkers, +even though ui.mergemarkers is 'basic', and using the tool's +mergemarkertemplate: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ cat <<EOF > printargs_merge_tool + > while test \$# -gt 0; do echo arg: \"\$1\"; shift; done + > EOF + $ hg --config merge-tools.true.executable='sh' \ + > --config merge-tools.true.args='./printargs_merge_tool ll:$labellocal lo: $labelother lb:$labelbase": "$base' \ + > --config merge-tools.true.mergemarkers=detailed \ + > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \ + > --config ui.mergemarkertemplate='uitmpl {rev}' \ + > --config ui.mergemarkers=basic \ + > merge -r 2 + merging f + arg: "ll:working copy: tooltmpl ef83787e2614" + arg: "lo:" + arg: "merge rev: tooltmpl 0185f4e0cf02" + arg: "lb:base: */f~base.*" (glob) + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ rm -f 'printargs_merge_tool' + +The merge tool still gets labellocal and labelother as 'basic' even when +premerge=keep is used and has 'detailed' markers: + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ cat <<EOF > mytool + > echo labellocal: \"\$1\" + > echo labelother: \"\$2\" + > echo "output (arg)": \"\$3\" + > echo "output (contents)": + > cat "\$3" + > EOF + $ hg --config merge-tools.true.executable='sh' \ + > --config merge-tools.true.args='mytool $labellocal $labelother $output' \ + > --config merge-tools.true.premerge=keep \ + > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \ + > --config ui.mergemarkertemplate='uitmpl {rev}' \ + > --config ui.mergemarkers=detailed \ + > merge -r 2 + merging f + labellocal: "working copy" + labelother: "merge rev" + output (arg): "$TESTTMP/f" + output (contents): + <<<<<<< working copy: uitmpl 1 + revision 1 + ======= + revision 2 + >>>>>>> merge rev: uitmpl 2 + space + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ rm -f 'mytool' + +premerge=keep uses the *tool's* mergemarkertemplate if tool's +mergemarkers=detailed; labellocal and labelother also use the tool's template + + $ beforemerge + [merge-tools] + false.whatever= + true.priority=1 + true.executable=cat + # hg update -C 1 + $ cat <<EOF > mytool + > echo labellocal: \"\$1\" + > echo labelother: \"\$2\" + > echo "output (arg)": \"\$3\" + > echo "output (contents)": + > cat "\$3" + > EOF + $ hg --config merge-tools.true.executable='sh' \ + > --config merge-tools.true.args='mytool $labellocal $labelother $output' \ + > --config merge-tools.true.premerge=keep \ + > --config merge-tools.true.mergemarkers=detailed \ + > --config merge-tools.true.mergemarkertemplate='tooltmpl {short(node)}' \ + > --config ui.mergemarkertemplate='uitmpl {rev}' \ + > --config ui.mergemarkers=detailed \ + > merge -r 2 + merging f + labellocal: "working copy: tooltmpl ef83787e2614" + labelother: "merge rev: tooltmpl 0185f4e0cf02" + output (arg): "$TESTTMP/f" + output (contents): + <<<<<<< working copy: tooltmpl ef83787e2614 + revision 1 + ======= + revision 2 + >>>>>>> merge rev: tooltmpl 0185f4e0cf02 + space + 0 files updated, 1 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ rm -f 'mytool' + Issue3581: Merging a filename that needs to be quoted (This test doesn't work on Windows filesystems even on Linux, so check for Unix-like permission)
--- a/tests/test-mq-eol.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-mq-eol.t Sun Mar 04 10:42:51 2018 -0500 @@ -10,29 +10,29 @@ > EOF $ cat > makepatch.py <<EOF - > f = file('eol.diff', 'wb') + > f = open('eol.diff', 'wb') > w = f.write - > w('test message\n') - > w('diff --git a/a b/a\n') - > w('--- a/a\n') - > w('+++ b/a\n') - > w('@@ -1,5 +1,5 @@\n') - > w(' a\n') - > w('-b\r\n') - > w('+y\r\n') - > w(' c\r\n') - > w(' d\n') - > w('-e\n') - > w('\ No newline at end of file\n') - > w('+z\r\n') - > w('\ No newline at end of file\r\n') + > w(b'test message\n') + > w(b'diff --git a/a b/a\n') + > w(b'--- a/a\n') + > w(b'+++ b/a\n') + > w(b'@@ -1,5 +1,5 @@\n') + > w(b' a\n') + > w(b'-b\r\n') + > w(b'+y\r\n') + > w(b' c\r\n') + > w(b' d\n') + > w(b'-e\n') + > w(b'\ No newline at end of file\n') + > w(b'+z\r\n') + > w(b'\ No newline at end of file\r\n') > EOF $ cat > cateol.py <<EOF > import sys - > for line in file(sys.argv[1], 'rb'): - > line = line.replace('\r', '<CR>') - > line = line.replace('\n', '<LF>') + > for line in open(sys.argv[1], 'rb'): + > line = line.replace(b'\r', b'<CR>') + > line = line.replace(b'\n', b'<LF>') > print(line) > EOF @@ -44,7 +44,7 @@ Test different --eol values - $ $PYTHON -c 'file("a", "wb").write("a\nb\nc\nd\ne")' + $ $PYTHON -c 'open("a", "wb").write(b"a\nb\nc\nd\ne")' $ hg ci -Am adda adding .hgignore adding a @@ -152,15 +152,15 @@ $ hg init testeol $ cd testeol - $ $PYTHON -c "file('a', 'wb').write('1\r\n2\r\n3\r\n4')" + $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n3\r\n4')" $ hg ci -Am adda adding a - $ $PYTHON -c "file('a', 'wb').write('1\r\n2\r\n33\r\n4')" + $ $PYTHON -c "open('a', 'wb').write(b'1\r\n2\r\n33\r\n4')" $ hg qnew patch1 $ hg qpop popping patch1 patch queue now empty - $ $PYTHON -c "file('a', 'wb').write('1\r\n22\r\n33\r\n4')" + $ $PYTHON -c "open('a', 'wb').write(b'1\r\n22\r\n33\r\n4')" $ hg ci -m changea $ hg --config 'patch.eol=LF' qpush
--- a/tests/test-mq-missingfiles.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-mq-missingfiles.t Sun Mar 04 10:42:51 2018 -0500 @@ -9,8 +9,8 @@ > args = sys.argv[2:] > assert (len(args) % 2) == 0 > - > f = file(path, 'wb') - > for i in xrange(len(args)/2): + > f = open(path, 'wb') + > for i in range(len(args) // 2): > count, s = args[2*i:2*i+2] > count = int(count) > s = s.decode('string_escape')
--- a/tests/test-mq-qimport.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-mq-qimport.t Sun Mar 04 10:42:51 2018 -0500 @@ -6,8 +6,8 @@ > args = sys.argv[2:] > assert (len(args) % 2) == 0 > - > f = file(path, 'wb') - > for i in xrange(len(args)/2): + > f = open(path, 'wb') + > for i in range(len(args)/2): > count, s = args[2*i:2*i+2] > count = int(count) > s = s.decode('string_escape')
--- a/tests/test-mq-qrefresh-replace-log-message.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-mq-qrefresh-replace-log-message.t Sun Mar 04 10:42:51 2018 -0500 @@ -119,7 +119,7 @@ > def reposetup(ui, repo): > class commitfailure(repo.__class__): > def commit(self, *args, **kwargs): - > raise error.Abort('emulating unexpected abort') + > raise error.Abort(b'emulating unexpected abort') > repo.__class__ = commitfailure > EOF
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-acl.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,42 @@ +Make a narrow clone then archive it + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + + $ for x in `$TESTDIR/seq.py 3`; do + > echo $x > "f$x" + > hg add "f$x" + > hg commit -m "Add $x" + > done + $ cat >> .hg/hgrc << EOF + > [narrowhgacl] + > default.includes=f1 f2 + > EOF + $ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid + $ cat hg.pid >> "$DAEMON_PIDS" + + $ cd .. + $ hg clone http://localhost:$HGPORT1 narrowclone1 + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 2 files + new changesets * (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + +The clone directory should only contain f1 and f2 + $ ls -1 narrowclone1 | sort + f1 + f2 + +Requirements should contain narrowhg + $ cat narrowclone1/.hg/requires | grep narrowhg + narrowhg-experimental + +NarrowHG should track f1 and f2 + $ hg -R narrowclone1 tracked + I path:f1 + I path:f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-archive.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,32 @@ +Make a narrow clone then archive it + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + + $ for x in `$TESTDIR/seq.py 3`; do + > echo $x > "f$x" + > hg add "f$x" + > hg commit -m "Add $x" + > done + + $ hg serve -a localhost -p $HGPORT1 -d --pid-file=hg.pid + $ cat hg.pid >> "$DAEMON_PIDS" + + $ cd .. + $ hg clone --narrow --include f1 --include f2 http://localhost:$HGPORT1/ narrowclone1 + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 2 files + new changesets * (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + +The tar should only contain f1 and f2 + $ cd narrowclone1 + $ hg archive -t tgz repo.tgz + $ tar tfz repo.tgz + repo/f1 + repo/f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-clone-no-ellipsis.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,130 @@ + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + $ mkdir dir + $ mkdir dir/src + $ cd dir/src + $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done + $ cd .. + $ mkdir tests + $ cd tests + $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done + $ cd ../../.. + +narrow clone a file, f10 + + $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10" + requesting all changes + adding changesets + adding manifests + adding file changes + added 40 changesets with 1 changes to 1 files + new changesets *:* (glob) + $ cd narrow + $ cat .hg/requires | grep -v generaldelta + dotencode + fncache + narrowhg-experimental + revlogv1 + store + + $ cat .hg/narrowspec + [includes] + path:dir/src/f10 + [excludes] + $ hg update + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ find * | sort + dir + dir/src + dir/src/f10 + $ cat dir/src/f10 + 10 + + $ cd .. + +narrow clone a directory, tests/, except tests/t19 + + $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19" + requesting all changes + adding changesets + adding manifests + adding file changes + added 40 changesets with 19 changes to 19 files + new changesets *:* (glob) + $ cd narrowdir + $ cat .hg/narrowspec + [includes] + path:dir/tests + [excludes] + path:dir/tests/t19 + $ hg update + 19 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ find * | sort + dir + dir/tests + dir/tests/t1 + dir/tests/t10 + dir/tests/t11 + dir/tests/t12 + dir/tests/t13 + dir/tests/t14 + dir/tests/t15 + dir/tests/t16 + dir/tests/t17 + dir/tests/t18 + dir/tests/t2 + dir/tests/t20 + dir/tests/t3 + dir/tests/t4 + dir/tests/t5 + dir/tests/t6 + dir/tests/t7 + dir/tests/t8 + dir/tests/t9 + + $ cd .. + +narrow clone everything but a directory (tests/) + + $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests" + requesting all changes + adding changesets + adding manifests + adding file changes + added 40 changesets with 20 changes to 20 files + new changesets *:* (glob) + $ cd narrowroot + $ cat .hg/narrowspec + [includes] + path:. + [excludes] + path:dir/tests + $ hg update + 20 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ find * | sort + dir + dir/src + dir/src/f1 + dir/src/f10 + dir/src/f11 + dir/src/f12 + dir/src/f13 + dir/src/f14 + dir/src/f15 + dir/src/f16 + dir/src/f17 + dir/src/f18 + dir/src/f19 + dir/src/f2 + dir/src/f20 + dir/src/f3 + dir/src/f4 + dir/src/f5 + dir/src/f6 + dir/src/f7 + dir/src/f8 + dir/src/f9 + + $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-clone-non-narrow-server.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,53 @@ +Test attempting a narrow clone against a server that doesn't support narrowhg. + + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + + $ for x in `$TESTDIR/seq.py 10`; do + > echo $x > "f$x" + > hg add "f$x" + > hg commit -m "Add $x" + > done + + $ hg serve -a localhost -p $HGPORT1 --config extensions.narrow=! -d \ + > --pid-file=hg.pid + $ cat hg.pid >> "$DAEMON_PIDS" + $ hg serve -a localhost -p $HGPORT2 -d --pid-file=hg.pid + $ cat hg.pid >> "$DAEMON_PIDS" + +Verify that narrow is advertised in the bundle2 capabilities: + $ echo hello | hg -R . serve --stdio | \ + > python -c "import sys, urllib; print urllib.unquote_plus(list(sys.stdin)[1])" | grep narrow + narrow=v0 + + $ cd .. + + $ hg clone --narrow --include f1 http://localhost:$HGPORT1/ narrowclone + requesting all changes + abort: server doesn't support narrow clones + [255] + +Make a narrow clone (via HGPORT2), then try to narrow and widen +into it (from HGPORT1) to prove that narrowing is fine and widening fails +gracefully: + $ hg clone -r 0 --narrow --include f1 http://localhost:$HGPORT2/ narrowclone + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files + new changesets * (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrowclone + $ hg tracked --addexclude f2 http://localhost:$HGPORT1/ + comparing with http://localhost:$HGPORT1/ + searching for changes + looking for local changes to affected paths + $ hg tracked --addinclude f1 http://localhost:$HGPORT1/ + comparing with http://localhost:$HGPORT1/ + searching for changes + no changes found + abort: server doesn't support narrow clones + [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-clone-nonlinear.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,148 @@ +Testing narrow clones when changesets modifying a matching file exist on +multiple branches + + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ hg branch default + marked working directory as branch default + (branches are permanent and global, did you want a bookmark?) + $ for x in `$TESTDIR/seq.py 10`; do + > echo $x > "f$x" + > hg add "f$x" + > hg commit -m "Add $x" + > done + + $ hg branch release-v1 + marked working directory as branch release-v1 + (branches are permanent and global, did you want a bookmark?) + $ hg commit -m "Start release for v1" + + $ hg update default + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ for x in `$TESTDIR/seq.py 10`; do + > echo "$x v2" > "f$x" + > hg commit -m "Update $x to v2" + > done + + $ hg update release-v1 + 10 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg branch release-v1 + marked working directory as branch release-v1 + $ for x in `$TESTDIR/seq.py 1 5`; do + > echo "$x v1 hotfix" > "f$x" + > hg commit -m "Hotfix $x in v1" + > done + + $ hg update default + 10 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg branch release-v2 + marked working directory as branch release-v2 + $ hg commit -m "Start release for v2" + + $ hg update default + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg branch default + marked working directory as branch default + $ for x in `$TESTDIR/seq.py 10`; do + > echo "$x v3" > "f$x" + > hg commit -m "Update $x to v3" + > done + + $ hg update release-v2 + 10 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg branch release-v2 + marked working directory as branch release-v2 + $ for x in `$TESTDIR/seq.py 4 9`; do + > echo "$x v2 hotfix" > "f$x" + > hg commit -m "Hotfix $x in v2" + > done + + $ hg heads -T '{rev} <- {p1rev} ({branch}): {desc}\n' + 42 <- 41 (release-v2): Hotfix 9 in v2 + 36 <- 35 (default): Update 10 to v3 + 25 <- 24 (release-v1): Hotfix 5 in v1 + + $ cd .. + +We now have 3 branches: default, which has v3 of all files, release-v1 which +has v1 of all files, and release-v2 with v2 of all files. + +Narrow clone which should get all branches + + $ hg clone --narrow ssh://user@dummy/master narrow --include "f5" + requesting all changes + adding changesets + adding manifests + adding file changes + added 12 changesets with 5 changes to 1 files (+2 heads) + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ hg log -G -T "{if(ellipsis, '...')}{node|short} ({branch}): {desc}\n" + o ...031f516143fe (release-v2): Hotfix 9 in v2 + | + o 9cd7f7bb9ca1 (release-v2): Hotfix 5 in v2 + | + o ...37bbc88f3ef0 (release-v2): Hotfix 4 in v2 + | + | @ ...dae2f368ca07 (default): Update 10 to v3 + | | + | o 9c224e89cb31 (default): Update 5 to v3 + | | + | o ...04fb59c7c9dc (default): Update 4 to v3 + |/ + | o b2253e82401f (release-v1): Hotfix 5 in v1 + | | + | o ...960ac37d74fd (release-v1): Hotfix 4 in v1 + | | + o | 986298e3f347 (default): Update 5 to v2 + | | + o | ...75d539c667ec (default): Update 4 to v2 + |/ + o 04c71bd5707f (default): Add 5 + | + o ...881b3891d041 (default): Add 4 + + +Narrow clone the first file, hitting edge condition where unaligned +changeset and manifest revnums cross branches. + + $ hg clone --narrow ssh://user@dummy/master narrow --include "f1" + requesting all changes + adding changesets + adding manifests + adding file changes + added 10 changesets with 4 changes to 1 files (+2 heads) + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ hg log -G -T "{if(ellipsis, '...')}{node|short} ({branch}): {desc}\n" + o ...031f516143fe (release-v2): Hotfix 9 in v2 + | + | @ ...dae2f368ca07 (default): Update 10 to v3 + | | + | o 1f5d184b8e96 (default): Update 1 to v3 + |/ + | o ...b2253e82401f (release-v1): Hotfix 5 in v1 + | | + | o 133502f6b7e5 (release-v1): Hotfix 1 in v1 + | | + o | ...79165c83d644 (default): Update 10 to v2 + | | + o | c7b7a5f2f088 (default): Update 1 to v2 + | | + | o ...f0531a3db7a9 (release-v1): Start release for v1 + |/ + o ...6a3f0f0abef3 (default): Add 10 + | + o e012ac15eaaa (default): Add 1 +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-clone.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,225 @@ + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ mkdir dir + $ mkdir dir/src + $ cd dir/src + $ for x in `$TESTDIR/seq.py 20`; do echo $x > "f$x"; hg add "f$x"; hg commit -m "Commit src $x"; done + $ cd .. + $ mkdir tests + $ cd tests + $ for x in `$TESTDIR/seq.py 20`; do echo $x > "t$x"; hg add "t$x"; hg commit -m "Commit test $x"; done + $ cd ../../.. + +narrow clone a file, f10 + + $ hg clone --narrow ssh://user@dummy/master narrow --noupdate --include "dir/src/f10" + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 1 changes to 1 files + new changesets *:* (glob) + $ cd narrow + $ cat .hg/requires | grep -v generaldelta + dotencode + fncache + narrowhg-experimental + revlogv1 + store + + $ cat .hg/narrowspec + [includes] + path:dir/src/f10 + [excludes] + $ hg tracked + I path:dir/src/f10 + $ hg update + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ find * | sort + dir + dir/src + dir/src/f10 + $ cat dir/src/f10 + 10 + + $ cd .. + +narrow clone with a newline should fail + + $ hg clone --narrow ssh://user@dummy/master narrow_fail --noupdate --include 'dir/src/f10 + > ' + requesting all changes + abort: newlines are not allowed in narrowspec paths + [255] + +narrow clone a directory, tests/, except tests/t19 + + $ hg clone --narrow ssh://user@dummy/master narrowdir --noupdate --include "dir/tests/" --exclude "dir/tests/t19" + requesting all changes + adding changesets + adding manifests + adding file changes + added 21 changesets with 19 changes to 19 files + new changesets *:* (glob) + $ cd narrowdir + $ cat .hg/narrowspec + [includes] + path:dir/tests + [excludes] + path:dir/tests/t19 + $ hg tracked + I path:dir/tests + X path:dir/tests/t19 + $ hg update + 19 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ find * | sort + dir + dir/tests + dir/tests/t1 + dir/tests/t10 + dir/tests/t11 + dir/tests/t12 + dir/tests/t13 + dir/tests/t14 + dir/tests/t15 + dir/tests/t16 + dir/tests/t17 + dir/tests/t18 + dir/tests/t2 + dir/tests/t20 + dir/tests/t3 + dir/tests/t4 + dir/tests/t5 + dir/tests/t6 + dir/tests/t7 + dir/tests/t8 + dir/tests/t9 + + $ cd .. + +narrow clone everything but a directory (tests/) + + $ hg clone --narrow ssh://user@dummy/master narrowroot --noupdate --exclude "dir/tests" + requesting all changes + adding changesets + adding manifests + adding file changes + added 21 changesets with 20 changes to 20 files + new changesets *:* (glob) + $ cd narrowroot + $ cat .hg/narrowspec + [includes] + path:. + [excludes] + path:dir/tests + $ hg tracked + I path:. + X path:dir/tests + $ hg update + 20 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ find * | sort + dir + dir/src + dir/src/f1 + dir/src/f10 + dir/src/f11 + dir/src/f12 + dir/src/f13 + dir/src/f14 + dir/src/f15 + dir/src/f16 + dir/src/f17 + dir/src/f18 + dir/src/f19 + dir/src/f2 + dir/src/f20 + dir/src/f3 + dir/src/f4 + dir/src/f5 + dir/src/f6 + dir/src/f7 + dir/src/f8 + dir/src/f9 + + $ cd .. + +narrow clone no paths at all + + $ hg clone --narrow ssh://user@dummy/master narrowempty --noupdate + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 0 changes to 0 files + new changesets * (glob) + $ cd narrowempty + $ hg tracked + $ hg update + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ ls + + $ cd .. + +simple clone + $ hg clone ssh://user@dummy/master simpleclone + requesting all changes + adding changesets + adding manifests + adding file changes + added 40 changesets with 40 changes to 40 files + new changesets * (glob) + updating to branch default + 40 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd simpleclone + $ find * | sort + dir + dir/src + dir/src/f1 + dir/src/f10 + dir/src/f11 + dir/src/f12 + dir/src/f13 + dir/src/f14 + dir/src/f15 + dir/src/f16 + dir/src/f17 + dir/src/f18 + dir/src/f19 + dir/src/f2 + dir/src/f20 + dir/src/f3 + dir/src/f4 + dir/src/f5 + dir/src/f6 + dir/src/f7 + dir/src/f8 + dir/src/f9 + dir/tests + dir/tests/t1 + dir/tests/t10 + dir/tests/t11 + dir/tests/t12 + dir/tests/t13 + dir/tests/t14 + dir/tests/t15 + dir/tests/t16 + dir/tests/t17 + dir/tests/t18 + dir/tests/t19 + dir/tests/t2 + dir/tests/t20 + dir/tests/t3 + dir/tests/t4 + dir/tests/t5 + dir/tests/t6 + dir/tests/t7 + dir/tests/t8 + dir/tests/t9 + + $ cd ..
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-commit.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,102 @@ +#testcases flat tree + + $ . "$TESTDIR/narrow-library.sh" + +#if tree + $ cat << EOF >> $HGRCPATH + > [experimental] + > treemanifest = 1 + > EOF +#endif + +create full repo + + $ hg init master + $ cd master + + $ mkdir inside + $ echo inside > inside/f1 + $ mkdir outside + $ echo outside > outside/f1 + $ hg ci -Aqm 'initial' + + $ echo modified > inside/f1 + $ hg ci -qm 'modify inside' + + $ echo modified > outside/f1 + $ hg ci -qm 'modify outside' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + + $ hg update -q 0 + +Can not modify dirstate outside + + $ mkdir outside + $ touch outside/f1 + $ hg debugwalk -I 'relglob:f1' + matcher: <includematcher includes='(?:(?:|.*/)f1(?:/|$))'> + f inside/f1 inside/f1 + $ hg add outside/f1 + abort: cannot track 'outside/f1' - it is outside the narrow clone + [255] + $ touch outside/f3 + $ hg add outside/f3 + abort: cannot track 'outside/f3' - it is outside the narrow clone + [255] + +But adding a truly excluded file shouldn't count + + $ hg add outside/f3 -X outside/f3 + + $ rm -r outside + +Can modify dirstate inside + + $ echo modified > inside/f1 + $ touch inside/f3 + $ hg add inside/f3 + $ hg status + M inside/f1 + A inside/f3 + $ hg revert -qC . + $ rm inside/f3 + +Can commit changes inside. Leaves outside unchanged. + + $ hg update -q 'desc("initial")' + $ echo modified2 > inside/f1 + $ hg manifest --debug + 4d6a634d5ba06331a60c29ee0db8412490a54fcd 644 inside/f1 + 7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644 outside/f1 (flat !) + d0f2f706468ab0e8bec7af87446835fb1b13511b 755 d outside/ (tree !) + $ hg commit -m 'modify inside/f1' + created new head + $ hg files -r . + inside/f1 + outside/f1 (flat !) + outside/ (tree !) + $ hg manifest --debug + 3f4197b4a11b9016e77ebc47fe566944885fd11b 644 inside/f1 + 7fb3bb6356d28d4dc352c5ba52d7350a81b6bd46 644 outside/f1 (flat !) + d0f2f706468ab0e8bec7af87446835fb1b13511b 755 d outside/ (tree !) +Some filesystems (notably FAT/exFAT only store timestamps with 2 +seconds of precision, so by sleeping for 3 seconds, we can ensure that +the timestamps of files stored by dirstate will appear older than the +dirstate file, and therefore we'll be able to get stable output from +debugdirstate. If we don't do this, the test can be slightly flaky. + $ sleep 3 + $ hg status + $ hg debugdirstate --nodates + n 644 10 set inside/f1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-copies.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,57 @@ + + $ . "$TESTDIR/narrow-library.sh" + +create full repo + + $ hg init master + $ cd master + + $ mkdir inside + $ echo inside > inside/f1 + $ mkdir outside + $ echo outside > outside/f2 + $ hg ci -Aqm 'initial' + + $ hg mv outside/f2 inside/f2 + $ hg ci -qm 'move f2 from outside' + + $ echo modified > inside/f2 + $ hg ci -qm 'modify inside/f2' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 3 changes to 2 files + new changesets *:* (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + + $ hg co 'desc("move f2")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg status + $ hg diff + $ hg diff --change . --git + diff --git a/inside/f2 b/inside/f2 + new file mode 100644 + --- /dev/null + +++ b/inside/f2 + @@ -0,0 +1,1 @@ + +outside + + $ hg log --follow inside/f2 -r tip + changeset: 2:bcfb756e0ca9 + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify inside/f2 + + changeset: 1:5a016133b2bb + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: move f2 from outside +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-debugcommands.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,43 @@ + $ . "$TESTDIR/narrow-library.sh" + $ hg init repo + $ cd repo + $ cat << EOF > .hg/narrowspec + > [includes] + > path:foo + > [excludes] + > EOF + $ echo treemanifest >> .hg/requires + $ echo narrowhg-experimental >> .hg/requires + $ mkdir -p foo/bar + $ echo b > foo/f + $ echo c > foo/bar/f + $ hg commit -Am hi + adding foo/bar/f + adding foo/f + $ hg debugindex -m + rev offset length delta linkrev nodeid p1 p2 + 0 0 47 -1 0 14a5d056d75a 000000000000 000000000000 + $ hg debugindex --dir foo + rev offset length delta linkrev nodeid p1 p2 + 0 0 77 -1 0 e635c7857aef 000000000000 000000000000 + $ hg debugindex --dir foo/ + rev offset length delta linkrev nodeid p1 p2 + 0 0 77 -1 0 e635c7857aef 000000000000 000000000000 + $ hg debugindex --dir foo/bar + rev offset length delta linkrev nodeid p1 p2 + 0 0 44 -1 0 e091d4224761 000000000000 000000000000 + $ hg debugindex --dir foo/bar/ + rev offset length delta linkrev nodeid p1 p2 + 0 0 44 -1 0 e091d4224761 000000000000 000000000000 + $ hg debugdata -m 0 + foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc) + $ hg debugdata --dir foo 0 + bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc) + f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc) + $ hg debugdata --dir foo/ 0 + bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc) + f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc) + $ hg debugdata --dir foo/bar 0 + f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc) + $ hg debugdata --dir foo/bar/ 0 + f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-debugrebuilddirstate.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,31 @@ + $ . "$TESTDIR/narrow-library.sh" + $ hg init master + $ cd master + $ echo treemanifest >> .hg/requires + $ echo 'contents of file' > file + $ mkdir foo + $ echo 'contents of foo/bar' > foo/bar + $ hg ci -Am 'some change' + adding file + adding foo/bar + + $ cd .. + $ hg clone --narrow ssh://user@dummy/master copy --include=foo + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files + new changesets * (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd copy + + $ hg debugdirstate + n * 20 unset foo/bar (glob) + $ mv .hg/dirstate .hg/old_dirstate + $ dd bs=40 count=1 if=.hg/old_dirstate of=.hg/dirstate 2>/dev/null + $ hg debugdirstate + $ hg debugrebuilddirstate + $ hg debugdirstate + n * * unset foo/bar (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-exchange-merges.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,207 @@ + + $ . "$TESTDIR/narrow-library.sh" + +create full repo + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ mkdir inside + $ echo 1 > inside/f + $ hg commit -Aqm 'initial inside' + + $ mkdir outside + $ echo 1 > outside/f + $ hg commit -Aqm 'initial outside' + + $ echo 2a > outside/f + $ hg commit -Aqm 'outside 2a' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4a > outside/f + $ hg commit -Aqm 'outside 4a' + $ hg update '.~3' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ echo 2b > outside/f + $ hg commit -Aqm 'outside 2b' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4b > outside/f + $ hg commit -Aqm 'outside 4b' + $ hg update '.~3' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ echo 2c > outside/f + $ hg commit -Aqm 'outside 2c' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4c > outside/f + $ hg commit -Aqm 'outside 4c' + $ hg update '.~3' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ echo 2d > outside/f + $ hg commit -Aqm 'outside 2d' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4d > outside/f + $ hg commit -Aqm 'outside 4d' + + $ hg update -r 'desc("outside 4a")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 5 > outside/f + $ rm outside/f.orig + $ hg resolve --mark outside/f + (no more unresolved files) + $ hg commit -m 'merge a/b 5' + $ echo 6 > outside/f + $ hg commit -Aqm 'outside 6' + + $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 7 > outside/f + $ rm outside/f.orig + $ hg resolve --mark outside/f + (no more unresolved files) + $ hg commit -Aqm 'merge a/b/c 7' + $ echo 8 > outside/f + $ hg commit -Aqm 'outside 8' + + $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 9 > outside/f + $ rm outside/f.orig + $ hg resolve --mark outside/f + (no more unresolved files) + $ hg commit -Aqm 'merge a/b/c/d 9' + $ echo 10 > outside/f + $ hg commit -Aqm 'outside 10' + + $ echo 11 > inside/f + $ hg commit -Aqm 'inside 11' + $ echo 12 > outside/f + $ hg commit -Aqm 'outside 12' + + $ hg log -G -T '{rev} {node|short} {desc}\n' + @ 21 8d874d57adea outside 12 + | + o 20 7ef88b4dd4fa inside 11 + | + o 19 2a20009de83e outside 10 + | + o 18 3ac1f5779de3 merge a/b/c/d 9 + |\ + | o 17 38a9c2f7e546 outside 8 + | | + | o 16 094aa62fc898 merge a/b/c 7 + | |\ + | | o 15 f29d083d32e4 outside 6 + | | | + | | o 14 2dc11382541d merge a/b 5 + | | |\ + o | | | 13 27d07ef97221 outside 4d + | | | | + o | | | 12 465567bdfb2d inside 3 + | | | | + o | | | 11 d1c61993ec83 outside 2d + | | | | + | o | | 10 56859a8e33b9 outside 4c + | | | | + | o | | 9 bb96a08b062a inside 3 + | | | | + | o | | 8 b844052e7b3b outside 2c + |/ / / + | | o 7 9db2d8fcc2a6 outside 4b + | | | + | | o 6 6418167787a6 inside 3 + | | | + +---o 5 77344f344d83 outside 2b + | | + | o 4 9cadde08dc9f outside 4a + | | + | o 3 019ef06f125b inside 3 + | | + | o 2 75e40c075a19 outside 2a + |/ + o 1 906d6c682641 initial outside + | + o 0 9f8e82b51004 initial inside + + +Now narrow clone this and get a hopefully correct graph + + $ cd .. + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 14 changesets with 3 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + +To make updating the tests easier, we print the emitted nodes +sorted. This makes it easier to identify when the same node structure +has been emitted, just in a different order. + + $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort + ...094aa62fc898 6418167787a6 bb96a08b062a merge a/b/c 7 + ...2a20009de83e 019ef06f125b 3ac1f5779de3 outside 10 + ...3ac1f5779de3 465567bdfb2d 094aa62fc898 merge a/b/c/d 9 + ...75e40c075a19 9f8e82b51004 000000000000 outside 2a + ...77344f344d83 9f8e82b51004 000000000000 outside 2b + ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12 + ...b844052e7b3b 9f8e82b51004 000000000000 outside 2c + ...d1c61993ec83 9f8e82b51004 000000000000 outside 2d + 019ef06f125b 75e40c075a19 000000000000 inside 3 + 465567bdfb2d d1c61993ec83 000000000000 inside 3 + 6418167787a6 77344f344d83 000000000000 inside 3 + 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11 + 9f8e82b51004 000000000000 000000000000 initial inside + bb96a08b062a b844052e7b3b 000000000000 inside 3 + +But seeing the graph is also nice: + $ hg log -G -T '{if(ellipsis,"...")}{node|short} {desc}\n' + @ ...8d874d57adea outside 12 + | + o 7ef88b4dd4fa inside 11 + | + o ...2a20009de83e outside 10 + |\ + | o ...3ac1f5779de3 merge a/b/c/d 9 + | |\ + | | o ...094aa62fc898 merge a/b/c 7 + | | |\ + | o | | 465567bdfb2d inside 3 + | | | | + | o | | ...d1c61993ec83 outside 2d + | | | | + | | | o bb96a08b062a inside 3 + | | | | + | +---o ...b844052e7b3b outside 2c + | | | + | | o 6418167787a6 inside 3 + | | | + | | o ...77344f344d83 outside 2b + | |/ + o | 019ef06f125b inside 3 + | | + o | ...75e40c075a19 outside 2a + |/ + o 9f8e82b51004 initial inside +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-exchange.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,209 @@ + + $ . "$TESTDIR/narrow-library.sh" + +create full repo + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ mkdir inside + $ echo 1 > inside/f + $ mkdir inside2 + $ echo 1 > inside2/f + $ mkdir outside + $ echo 1 > outside/f + $ hg ci -Aqm 'initial' + + $ echo 2 > inside/f + $ hg ci -qm 'inside 2' + + $ echo 2 > inside2/f + $ hg ci -qm 'inside2 2' + + $ echo 2 > outside/f + $ hg ci -qm 'outside 2' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ hg clone --narrow ssh://user@dummy/master narrow2 --include inside --include inside2 + requesting all changes + adding changesets + adding manifests + adding file changes + added 4 changesets with 4 changes to 2 files + new changesets *:* (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + +Can push to wider repo if change does not affect paths in wider repo that are +not also in narrower repo + + $ cd narrow + $ echo 3 > inside/f + $ hg ci -m 'inside 3' + $ hg push ssh://user@dummy/narrow2 + pushing to ssh://user@dummy/narrow2 + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: added 1 changesets with 1 changes to 1 files + +Can push to narrower repo if change affects only paths within remote's +narrow spec + + $ cd ../narrow2 + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ hg co -r 'desc("inside 3")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ echo 4 > inside/f + $ hg ci -m 'inside 4' + $ hg push ssh://user@dummy/narrow + pushing to ssh://user@dummy/narrow + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: added 1 changesets with 1 changes to 1 files + +Can push to narrow repo if change affects only paths outside remote's +narrow spec + + $ echo 3 > inside2/f + $ hg ci -m 'inside2 3' +TODO: this should be successful + $ hg push ssh://user@dummy/narrow + pushing to ssh://user@dummy/narrow + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: transaction abort! + remote: rollback completed + remote: abort: data/inside2/f.i@4a1aa07735e6: unknown parent! + abort: stream ended unexpectedly (got 0 bytes, expected 4) + [255] + +Can pull from wider repo if change affects only paths outside remote's +narrow spec + $ echo 4 > inside2/f + $ hg ci -m 'inside2 4' + $ hg log -G -T '{rev} {node|short} {files}\n' + @ 7 d78a96df731d inside2/f + | + o 6 8c26f5218962 inside2/f + | + o 5 ba3480e2f9de inside/f + | + o 4 4e5edd526618 inside/f + | + o 3 81e7e07b7ab0 outside/f + | + o 2 f3993b8c0c2b inside2/f + | + o 1 8cd66ca966b4 inside/f + | + o 0 c8057d6f53ab inside/f inside2/f outside/f + + $ cd ../narrow + $ hg log -G -T '{rev} {node|short} {files}\n' + o 4 ba3480e2f9de inside/f + | + @ 3 4e5edd526618 inside/f + | + o 2 81e7e07b7ab0 outside/f + | + o 1 8cd66ca966b4 inside/f + | + o 0 c8057d6f53ab inside/f inside2/f outside/f + + $ hg pull ssh://user@dummy/narrow2 + pulling from ssh://user@dummy/narrow2 + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 0 changes to 0 files + new changesets d78a96df731d + (run 'hg update' to get a working copy) + +Check that the resulting history is valid in the full repo + + $ cd ../narrow2 + $ hg push ssh://user@dummy/master + pushing to ssh://user@dummy/master + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: added 4 changesets with 4 changes to 2 files + $ cd ../master + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + 3 files, 8 changesets, 10 total revisions + +Can not push to wider repo if change affects paths in wider repo that are +not also in narrower repo + $ cd ../master + $ hg co -r 'desc("inside2 4")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ echo 5 > inside2/f + $ hg ci -m 'inside2 5' + $ hg log -G -T '{rev} {node|short} {files}\n' + @ 8 5970befb64ba inside2/f + | + o 7 d78a96df731d inside2/f + | + o 6 8c26f5218962 inside2/f + | + o 5 ba3480e2f9de inside/f + | + o 4 4e5edd526618 inside/f + | + o 3 81e7e07b7ab0 outside/f + | + o 2 f3993b8c0c2b inside2/f + | + o 1 8cd66ca966b4 inside/f + | + o 0 c8057d6f53ab inside/f inside2/f outside/f + + $ cd ../narrow + $ hg pull + pulling from ssh://user@dummy/master + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 0 changes to 0 files + new changesets * (glob) + (run 'hg update' to get a working copy) +TODO: this should tell the user that their narrow clone does not have the +necessary content to be able to push to the target + $ hg push ssh://user@dummy/narrow2 + pushing to ssh://user@dummy/narrow2 + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: added 1 changesets with 0 changes to 0 files
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-expanddirstate.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,162 @@ + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + + $ mkdir inside + $ echo inside > inside/f1 + $ mkdir outside + $ echo outside > outside/f2 + $ mkdir patchdir + $ echo patch_this > patchdir/f3 + $ hg ci -Aqm 'initial' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files + new changesets dff6a2a6d433 + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ cd narrow + + $ mkdir outside + $ echo other_contents > outside/f2 + $ grep outside .hg/narrowspec + [1] + $ grep outside .hg/dirstate + [1] + $ hg status + +`hg status` did not add outside. + $ grep outside .hg/narrowspec + [1] + $ grep outside .hg/dirstate + [1] + +Unfortunately this is not really a candidate for adding to narrowhg proper, +since it depends on some other source for providing the manifests (when using +treemanifests) and file contents. Something like a virtual filesystem and/or +remotefilelog. We want to be useful when not using those systems, so we do not +have this method available in narrowhg proper at the moment. + $ cat > "$TESTTMP/expand_extension.py" <<EOF + > import os + > import sys + > + > from mercurial import encoding + > from mercurial import extensions + > from mercurial import localrepo + > from mercurial import match as matchmod + > from mercurial import narrowspec + > from mercurial import patch + > from mercurial import util as hgutil + > + > def expandnarrowspec(ui, repo, newincludes=None): + > if not newincludes: + > return + > import sys + > newincludes = set([newincludes]) + > includes, excludes = repo.narrowpats + > currentmatcher = narrowspec.match(repo.root, includes, excludes) + > includes = includes | newincludes + > if not repo.currenttransaction(): + > ui.develwarn(b'expandnarrowspec called outside of transaction!') + > repo.setnarrowpats(includes, excludes) + > newmatcher = narrowspec.match(repo.root, includes, excludes) + > added = matchmod.differencematcher(newmatcher, currentmatcher) + > for f in repo[b'.'].manifest().walk(added): + > repo.dirstate.normallookup(f) + > + > def makeds(ui, repo): + > def wrapds(orig, self): + > ds = orig(self) + > class expandingdirstate(ds.__class__): + > @hgutil.propertycache + > def _map(self): + > ret = super(expandingdirstate, self)._map + > with repo.wlock(), repo.lock(), repo.transaction( + > b'expandnarrowspec'): + > expandnarrowspec(ui, repo, + > encoding.environ.get(b'DIRSTATEINCLUDES')) + > return ret + > ds.__class__ = expandingdirstate + > return ds + > return wrapds + > + > def reposetup(ui, repo): + > extensions.wrapfilecache(localrepo.localrepository, b'dirstate', + > makeds(ui, repo)) + > def overridepatch(orig, *args, **kwargs): + > with repo.wlock(): + > expandnarrowspec(ui, repo, encoding.environ.get(b'PATCHINCLUDES')) + > return orig(*args, **kwargs) + > + > extensions.wrapfunction(patch, b'patch', overridepatch) + > EOF + $ cat >> ".hg/hgrc" <<EOF + > [extensions] + > expand_extension = $TESTTMP/expand_extension.py + > EOF + +Since we do not have the ability to rely on a virtual filesystem or +remotefilelog in the test, we just fake it by copying the data from the 'master' +repo. + $ cp -a ../master/.hg/store/data/* .hg/store/data +Do that for patchdir as well. + $ cp -a ../master/patchdir . + +`hg status` will now add outside, but not patchdir. + $ DIRSTATEINCLUDES=path:outside hg status + M outside/f2 + $ grep outside .hg/narrowspec + path:outside + $ grep outside .hg/dirstate > /dev/null + $ grep patchdir .hg/narrowspec + [1] + $ grep patchdir .hg/dirstate + [1] + +Get rid of the modification to outside/f2. + $ hg update -C . + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + +This patch will not apply cleanly at the moment, so `hg import` will break + $ cat > "$TESTTMP/foo.patch" <<EOF + > --- patchdir/f3 + > +++ patchdir/f3 + > @@ -1,1 +1,1 @@ + > -this should be "patch_this", but its not, so patch fails + > +this text is irrelevant + > EOF + $ PATCHINCLUDES=path:patchdir hg import -p0 -e "$TESTTMP/foo.patch" -m ignored + applying $TESTTMP/foo.patch + patching file patchdir/f3 + Hunk #1 FAILED at 0 + 1 out of 1 hunks FAILED -- saving rejects to file patchdir/f3.rej + abort: patch failed to apply + [255] + $ grep patchdir .hg/narrowspec + [1] + $ grep patchdir .hg/dirstate > /dev/null + [1] + +Let's make it apply cleanly and see that it *did* expand properly + $ cat > "$TESTTMP/foo.patch" <<EOF + > --- patchdir/f3 + > +++ patchdir/f3 + > @@ -1,1 +1,1 @@ + > -patch_this + > +patched_this + > EOF + $ PATCHINCLUDES=path:patchdir hg import -p0 -e "$TESTTMP/foo.patch" -m message + applying $TESTTMP/foo.patch + $ cat patchdir/f3 + patched_this + $ grep patchdir .hg/narrowspec + path:patchdir + $ grep patchdir .hg/dirstate > /dev/null
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-merge.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,104 @@ +#testcases flat tree + + $ . "$TESTDIR/narrow-library.sh" + +#if tree + $ cat << EOF >> $HGRCPATH + > [experimental] + > treemanifest = 1 + > EOF +#endif + +create full repo + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ mkdir inside + $ echo inside1 > inside/f1 + $ echo inside2 > inside/f2 + $ mkdir outside + $ echo outside1 > outside/f1 + $ echo outside2 > outside/f2 + $ hg ci -Aqm 'initial' + + $ echo modified > inside/f1 + $ hg ci -qm 'modify inside/f1' + + $ hg update -q 0 + $ echo modified > inside/f2 + $ hg ci -qm 'modify inside/f2' + + $ hg update -q 0 + $ echo modified2 > inside/f1 + $ hg ci -qm 'conflicting inside/f1' + + $ hg update -q 0 + $ echo modified > outside/f1 + $ hg ci -qm 'modify outside/f1' + + $ hg update -q 0 + $ echo modified2 > outside/f1 + $ hg ci -qm 'conflicting outside/f1' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 6 changesets with 5 changes to 2 files (+4 heads) + new changesets *:* (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + + $ hg update -q 0 + +Can merge in when no files outside narrow spec are involved + + $ hg update -q 'desc("modify inside/f1")' + $ hg merge 'desc("modify inside/f2")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg commit -m 'merge inside changes' + +Can merge conflicting changes inside narrow spec + + $ hg update -q 'desc("modify inside/f1")' + $ hg merge 'desc("conflicting inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging inside/f1 + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo modified3 > inside/f1 + $ hg resolve -m + (no more unresolved files) + $ hg commit -m 'merge inside/f1' + +TODO: Can merge non-conflicting changes outside narrow spec + + $ hg update -q 'desc("modify inside/f1")' + $ hg merge 'desc("modify outside/f1")' + abort: merge affects file 'outside/f1' outside narrow, which is not yet supported (flat !) + abort: merge affects file 'outside/' outside narrow, which is not yet supported (tree !) + (merging in the other direction may work) + [255] + + $ hg update -q 'desc("modify outside/f1")' + $ hg merge 'desc("modify inside/f1")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ hg ci -m 'merge from inside to outside' + +Refuses merge of conflicting outside changes + + $ hg update -q 'desc("modify outside/f1")' + $ hg merge 'desc("conflicting outside/f1")' + abort: conflict in file 'outside/f1' is outside narrow clone (flat !) + abort: conflict in file 'outside/' is outside narrow clone (tree !) + [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-patch.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,84 @@ +#testcases flat tree + + $ . "$TESTDIR/narrow-library.sh" + +#if tree + $ cat << EOF >> $HGRCPATH + > [experimental] + > treemanifest = 1 + > EOF +#endif + +create full repo + + $ hg init master + $ cd master + + $ mkdir inside + $ echo inside > inside/f1 + $ mkdir outside + $ echo outside > outside/f1 + $ hg ci -Aqm 'initial' + + $ echo modified > inside/f1 + $ hg ci -qm 'modify inside' + + $ echo modified > outside/f1 + $ hg ci -qm 'modify outside' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + +Can show patch touching paths outside + + $ hg log -p + changeset: 2:* (glob) + tag: tip + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify outside + + + changeset: 1:* (glob) + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify inside + + diff -r * -r * inside/f1 (glob) + --- a/inside/f1 Thu Jan 01 00:00:00 1970 +0000 + +++ b/inside/f1 Thu Jan 01 00:00:00 1970 +0000 + @@ -1,1 +1,1 @@ + -inside + +modified + + changeset: 0:* (glob) + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: initial + + diff -r 000000000000 -r * inside/f1 (glob) + --- /dev/null Thu Jan 01 00:00:00 1970 +0000 + +++ b/inside/f1 Thu Jan 01 00:00:00 1970 +0000 + @@ -0,0 +1,1 @@ + +inside + + + $ hg status --rev 1 --rev 2 + +Can show copies inside the narrow clone + + $ hg cp inside/f1 inside/f2 + $ hg diff --git + diff --git a/inside/f1 b/inside/f2 + copy from inside/f1 + copy to inside/f2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-patterns.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,435 @@ + $ . "$TESTDIR/narrow-library.sh" + +initialize nested directories to validate complex include/exclude patterns + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ echo root > root + $ hg add root + $ hg commit -m 'add root' + + $ for d in dir1 dir2 dir1/dirA dir1/dirB dir2/dirA dir2/dirB + > do + > mkdir -p $d + > echo $d/foo > $d/foo + > hg add $d/foo + > hg commit -m "add $d/foo" + > echo $d/bar > $d/bar + > hg add $d/bar + > hg commit -m "add $d/bar" + > done +#if execbit + $ chmod +x dir1/dirA/foo + $ hg commit -m "make dir1/dirA/foo executable" +#else + $ hg import --bypass - <<EOF + > # HG changeset patch + > make dir1/dirA/foo executable + > + > diff --git a/dir1/dirA/foo b/dir1/dirA/foo + > old mode 100644 + > new mode 100755 + > EOF + applying patch from stdin + $ hg update -qr tip +#endif + $ hg log -G -T '{rev} {node|short} {files}\n' + @ 13 c87ca422d521 dir1/dirA/foo + | + o 12 951b8a83924e dir2/dirB/bar + | + o 11 01ae5a51b563 dir2/dirB/foo + | + o 10 5eababdf0ac5 dir2/dirA/bar + | + o 9 99d690663739 dir2/dirA/foo + | + o 8 8e80155d5445 dir1/dirB/bar + | + o 7 406760310428 dir1/dirB/foo + | + o 6 623466a5f475 dir1/dirA/bar + | + o 5 06ff3a5be997 dir1/dirA/foo + | + o 4 33227af02764 dir2/bar + | + o 3 5e1f9d8d7c69 dir2/foo + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da root + + $ cd .. + +clone a narrow portion of the master, such that we can widen it later + + $ hg clone --narrow ssh://user@dummy/master narrow \ + > --include dir1 \ + > --include dir2 \ + > --exclude dir1/dirA \ + > --exclude dir1/dirB \ + > --exclude dir2/dirA \ + > --exclude dir2/dirB + requesting all changes + adding changesets + adding manifests + adding file changes + added 6 changesets with 4 changes to 4 files + new changesets *:* (glob) + updating to branch default + 4 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ cd narrow + $ cat .hg/narrowspec + [includes] + path:dir1 + path:dir2 + [excludes] + path:dir1/dirA + path:dir1/dirB + path:dir2/dirA + path:dir2/dirB + $ hg manifest -r tip + dir1/bar + dir1/dirA/bar + dir1/dirA/foo + dir1/dirB/bar + dir1/dirB/foo + dir1/foo + dir2/bar + dir2/dirA/bar + dir2/dirA/foo + dir2/dirB/bar + dir2/dirB/foo + dir2/foo + root + $ find * | sort + dir1 + dir1/bar + dir1/foo + dir2 + dir2/bar + dir2/foo + $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n' + @ 5 c87ca422d521... dir1/dirA/foo + | + o 4 33227af02764 dir2/bar + | + o 3 5e1f9d8d7c69 dir2/foo + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da... root + + +widen the narrow checkout + + $ hg tracked --removeexclude dir1/dirA + comparing with ssh://user@dummy/master + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 9 changesets with 6 changes to 6 files + new changesets *:* (glob) + $ cat .hg/narrowspec + [includes] + path:dir1 + path:dir2 + [excludes] + path:dir1/dirB + path:dir2/dirA + path:dir2/dirB + $ find * | sort + dir1 + dir1/bar + dir1/dirA + dir1/dirA/bar + dir1/dirA/foo + dir1/foo + dir2 + dir2/bar + dir2/foo + +#if execbit + $ test -x dir1/dirA/foo && echo executable + executable + $ test -x dir1/dirA/bar || echo not executable + not executable +#endif + + $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n' + @ 8 c87ca422d521 dir1/dirA/foo + | + o 7 951b8a83924e... dir2/dirB/bar + | + o 6 623466a5f475 dir1/dirA/bar + | + o 5 06ff3a5be997 dir1/dirA/foo + | + o 4 33227af02764 dir2/bar + | + o 3 5e1f9d8d7c69 dir2/foo + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da... root + + +widen narrow spec again, but exclude a file in previously included spec + + $ hg tracked --removeexclude dir2/dirB --addexclude dir1/dirA/bar + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + deleting data/dir1/dirA/bar.i + no changes found + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 11 changesets with 7 changes to 7 files + new changesets *:* (glob) + $ cat .hg/narrowspec + [includes] + path:dir1 + path:dir2 + [excludes] + path:dir1/dirA/bar + path:dir1/dirB + path:dir2/dirA + $ find * | sort + dir1 + dir1/bar + dir1/dirA + dir1/dirA/foo + dir1/foo + dir2 + dir2/bar + dir2/dirB + dir2/dirB/bar + dir2/dirB/foo + dir2/foo + $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n' + @ 10 c87ca422d521 dir1/dirA/foo + | + o 9 951b8a83924e dir2/dirB/bar + | + o 8 01ae5a51b563 dir2/dirB/foo + | + o 7 5eababdf0ac5... dir2/dirA/bar + | + o 6 623466a5f475... dir1/dirA/bar + | + o 5 06ff3a5be997 dir1/dirA/foo + | + o 4 33227af02764 dir2/bar + | + o 3 5e1f9d8d7c69 dir2/foo + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da... root + + +widen narrow spec yet again, excluding a directory in previous spec + + $ hg tracked --removeexclude dir2/dirA --addexclude dir1/dirA + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + deleting data/dir1/dirA/foo.i + no changes found + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 13 changesets with 8 changes to 8 files + new changesets *:* (glob) + $ cat .hg/narrowspec + [includes] + path:dir1 + path:dir2 + [excludes] + path:dir1/dirA + path:dir1/dirA/bar + path:dir1/dirB + $ find * | sort + dir1 + dir1/bar + dir1/foo + dir2 + dir2/bar + dir2/dirA + dir2/dirA/bar + dir2/dirA/foo + dir2/dirB + dir2/dirB/bar + dir2/dirB/foo + dir2/foo + $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n' + @ 12 c87ca422d521... dir1/dirA/foo + | + o 11 951b8a83924e dir2/dirB/bar + | + o 10 01ae5a51b563 dir2/dirB/foo + | + o 9 5eababdf0ac5 dir2/dirA/bar + | + o 8 99d690663739 dir2/dirA/foo + | + o 7 8e80155d5445... dir1/dirB/bar + | + o 6 623466a5f475... dir1/dirA/bar + | + o 5 06ff3a5be997... dir1/dirA/foo + | + o 4 33227af02764 dir2/bar + | + o 3 5e1f9d8d7c69 dir2/foo + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da... root + + +include a directory that was previously explicitly excluded + + $ hg tracked --removeexclude dir1/dirA + comparing with ssh://user@dummy/master + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 13 changesets with 9 changes to 9 files + new changesets *:* (glob) + $ cat .hg/narrowspec + [includes] + path:dir1 + path:dir2 + [excludes] + path:dir1/dirA/bar + path:dir1/dirB + $ find * | sort + dir1 + dir1/bar + dir1/dirA + dir1/dirA/foo + dir1/foo + dir2 + dir2/bar + dir2/dirA + dir2/dirA/bar + dir2/dirA/foo + dir2/dirB + dir2/dirB/bar + dir2/dirB/foo + dir2/foo + $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n' + @ 12 c87ca422d521 dir1/dirA/foo + | + o 11 951b8a83924e dir2/dirB/bar + | + o 10 01ae5a51b563 dir2/dirB/foo + | + o 9 5eababdf0ac5 dir2/dirA/bar + | + o 8 99d690663739 dir2/dirA/foo + | + o 7 8e80155d5445... dir1/dirB/bar + | + o 6 623466a5f475... dir1/dirA/bar + | + o 5 06ff3a5be997 dir1/dirA/foo + | + o 4 33227af02764 dir2/bar + | + o 3 5e1f9d8d7c69 dir2/foo + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da... root + + + $ cd .. + +clone a narrow portion of the master, such that we can widen it later + + $ hg clone --narrow ssh://user@dummy/master narrow2 --include dir1/dirA + requesting all changes + adding changesets + adding manifests + adding file changes + added 5 changesets with 2 changes to 2 files + new changesets *:* (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow2 + $ find * | sort + dir1 + dir1/dirA + dir1/dirA/bar + dir1/dirA/foo + $ hg tracked --addinclude dir1 + comparing with ssh://user@dummy/master + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow2/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 10 changesets with 6 changes to 6 files + new changesets *:* (glob) + $ find * | sort + dir1 + dir1/bar + dir1/dirA + dir1/dirA/bar + dir1/dirA/foo + dir1/dirB + dir1/dirB/bar + dir1/dirB/foo + dir1/foo + $ hg log -G -T '{rev} {node|short}{if(ellipsis, "...")} {files}\n' + @ 9 c87ca422d521 dir1/dirA/foo + | + o 8 951b8a83924e... dir2/dirB/bar + | + o 7 8e80155d5445 dir1/dirB/bar + | + o 6 406760310428 dir1/dirB/foo + | + o 5 623466a5f475 dir1/dirA/bar + | + o 4 06ff3a5be997 dir1/dirA/foo + | + o 3 33227af02764... dir2/bar + | + o 2 594bc4b13d4a dir1/bar + | + o 1 47f480a08324 dir1/foo + | + o 0 2a4f0c3b67da... root +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-pull.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,175 @@ + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ for x in `$TESTDIR/seq.py 10` + > do + > echo $x > "f$x" + > hg add "f$x" + > hg commit -m "Commit f$x" + > done + $ cd .. + +narrow clone a couple files, f2 and f8 + + $ hg clone --narrow ssh://user@dummy/master narrow --include "f2" --include "f8" + requesting all changes + adding changesets + adding manifests + adding file changes + added 5 changesets with 2 changes to 2 files + new changesets *:* (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ ls + f2 + f8 + $ cat f2 f8 + 2 + 8 + + $ cd .. + +change every upstream file twice + + $ cd master + $ for x in `$TESTDIR/seq.py 10` + > do + > echo "update#1 $x" >> "f$x" + > hg commit -m "Update#1 to f$x" "f$x" + > done + $ for x in `$TESTDIR/seq.py 10` + > do + > echo "update#2 $x" >> "f$x" + > hg commit -m "Update#2 to f$x" "f$x" + > done + $ cd .. + +look for incoming changes + + $ cd narrow + $ hg incoming --limit 3 + comparing with ssh://user@dummy/master + searching for changes + changeset: 5:ddc055582556 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: Update#1 to f1 + + changeset: 6:f66eb5ad621d + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: Update#1 to f2 + + changeset: 7:c42ecff04e99 + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: Update#1 to f3 + + +Interrupting the pull is safe + $ hg --config hooks.pretxnchangegroup.bad=false pull -q + transaction abort! + rollback completed + abort: pretxnchangegroup.bad hook exited with status 1 + [255] + $ hg id + 223311e70a6f tip + +pull new changes down to the narrow clone. Should get 8 new changesets: 4 +relevant to the narrow spec, and 4 ellipsis nodes gluing them all together. + + $ hg pull + pulling from ssh://user@dummy/master + searching for changes + adding changesets + adding manifests + adding file changes + added 9 changesets with 4 changes to 2 files + new changesets *:* (glob) + (run 'hg update' to get a working copy) + $ hg log -T '{rev}: {desc}\n' + 13: Update#2 to f10 + 12: Update#2 to f8 + 11: Update#2 to f7 + 10: Update#2 to f2 + 9: Update#2 to f1 + 8: Update#1 to f8 + 7: Update#1 to f7 + 6: Update#1 to f2 + 5: Update#1 to f1 + 4: Commit f10 + 3: Commit f8 + 2: Commit f7 + 1: Commit f2 + 0: Commit f1 + $ hg update tip + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + +add a change and push it + + $ echo "update#3 2" >> f2 + $ hg commit -m "Update#3 to f2" f2 + $ hg log f2 -T '{rev}: {desc}\n' + 14: Update#3 to f2 + 10: Update#2 to f2 + 6: Update#1 to f2 + 1: Commit f2 + $ hg push + pushing to ssh://user@dummy/master + searching for changes + remote: adding changesets + remote: adding manifests + remote: adding file changes + remote: added 1 changesets with 1 changes to 1 files + $ cd .. + + $ cd master + $ hg log f2 -T '{rev}: {desc}\n' + 30: Update#3 to f2 + 21: Update#2 to f2 + 11: Update#1 to f2 + 1: Commit f2 + $ hg log -l 3 -T '{rev}: {desc}\n' + 30: Update#3 to f2 + 29: Update#2 to f10 + 28: Update#2 to f9 + +Can pull into repo with a single commit + + $ cd .. + $ hg clone -q --narrow ssh://user@dummy/master narrow2 --include "f1" -r 0 + $ cd narrow2 + $ hg pull -q -r 1 + transaction abort! + rollback completed + abort: pull failed on remote + [255] + +Can use 'hg share': + $ cat >> $HGRCPATH <<EOF + > [extensions] + > share= + > EOF + + $ cd .. + $ hg share narrow2 narrow2-share + updating working directory + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow2-share + $ hg status + +We should also be able to unshare without breaking everything: + $ hg unshare + devel-warn: write with no wlock: "narrowspec" at: */hgext/narrow/narrowrepo.py:* (unsharenarrowspec) (glob) + $ hg verify + checking changesets + checking manifests + crosschecking files in changesets and manifests + checking files + 1 files, 1 changesets, 1 total revisions
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-rebase.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,93 @@ + + $ . "$TESTDIR/narrow-library.sh" + +create full repo + + $ hg init master + $ cd master + + $ mkdir inside + $ echo inside1 > inside/f1 + $ echo inside2 > inside/f2 + $ mkdir outside + $ echo outside1 > outside/f1 + $ echo outside2 > outside/f2 + $ hg ci -Aqm 'initial' + + $ echo modified > inside/f1 + $ hg ci -qm 'modify inside/f1' + + $ hg update -q 0 + $ echo modified2 > inside/f2 + $ hg ci -qm 'modify inside/f2' + + $ hg update -q 0 + $ echo modified > outside/f1 + $ hg ci -qm 'modify outside/f1' + + $ hg update -q 0 + $ echo modified2 > outside/f1 + $ hg ci -qm 'conflicting outside/f1' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 5 changesets with 4 changes to 2 files (+3 heads) + new changesets *:* (glob) + updating to branch default + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ cat >> $HGRCPATH <<EOF + > [extensions] + > rebase= + > EOF + + $ hg update -q 0 + +Can rebase onto commit where no files outside narrow spec are involved + + $ hg update -q 0 + $ echo modified > inside/f2 + $ hg ci -qm 'modify inside/f2' + $ hg rebase -d 'desc("modify inside/f1")' + rebasing 5:c2f36d04e05d "modify inside/f2" (tip) + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob) + +Can rebase onto conflicting changes inside narrow spec + + $ hg update -q 0 + $ echo conflicting > inside/f1 + $ hg ci -qm 'conflicting inside/f1' + $ hg rebase -d 'desc("modify inside/f1")' 2>&1 | egrep -v '(warning:|incomplete!)' + rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip) + merging inside/f1 + unresolved conflicts (see hg resolve, then hg rebase --continue) + $ echo modified3 > inside/f1 + $ hg resolve -m 2>&1 | grep -v continue: + (no more unresolved files) + $ hg rebase --continue + rebasing 6:cdce97fbf653 "conflicting inside/f1" (tip) + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob) + +Can rebase onto non-conflicting changes outside narrow spec + + $ hg update -q 0 + $ echo modified > inside/f2 + $ hg ci -qm 'modify inside/f2' + $ hg rebase -d 'desc("modify outside/f1")' + rebasing 7:c2f36d04e05d "modify inside/f2" (tip) + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-rebase.hg (glob) + +Rebase interrupts on conflicting changes outside narrow spec + + $ hg update -q 'desc("conflicting outside/f1")' + $ hg phase -f -d . + no phases changed + $ hg rebase -d 'desc("modify outside/f1")' + rebasing 4:707c035aadb6 "conflicting outside/f1" + abort: conflict in file 'outside/f1' is outside narrow clone + [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-shallow-merges.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,345 @@ + $ . "$TESTDIR/narrow-library.sh" + +create full repo + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ mkdir inside + $ echo 1 > inside/f + $ hg commit -Aqm 'initial inside' + + $ mkdir outside + $ echo 1 > outside/f + $ hg commit -Aqm 'initial outside' + + $ echo 2a > outside/f + $ hg commit -Aqm 'outside 2a' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4a > outside/f + $ hg commit -Aqm 'outside 4a' + $ hg update '.~3' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ echo 2b > outside/f + $ hg commit -Aqm 'outside 2b' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4b > outside/f + $ hg commit -Aqm 'outside 4b' + $ hg update '.~3' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ echo 2c > outside/f + $ hg commit -Aqm 'outside 2c' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4c > outside/f + $ hg commit -Aqm 'outside 4c' + $ hg update '.~3' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + + $ echo 2d > outside/f + $ hg commit -Aqm 'outside 2d' + $ echo 3 > inside/f + $ hg commit -Aqm 'inside 3' + $ echo 4d > outside/f + $ hg commit -Aqm 'outside 4d' + + $ hg update -r 'desc("outside 4a")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 5 > outside/f + $ rm outside/f.orig + $ hg resolve --mark outside/f + (no more unresolved files) + $ hg commit -m 'merge a/b 5' + $ echo 6 > outside/f + $ hg commit -Aqm 'outside 6' + + $ hg merge -r 'desc("outside 4c")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 7 > outside/f + $ rm outside/f.orig + $ hg resolve --mark outside/f + (no more unresolved files) + $ hg commit -Aqm 'merge a/b/c 7' + $ echo 8 > outside/f + $ hg commit -Aqm 'outside 8' + + $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 1 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 9 > outside/f + $ rm outside/f.orig + $ hg resolve --mark outside/f + (no more unresolved files) + $ hg commit -Aqm 'merge a/b/c/d 9' + $ echo 10 > outside/f + $ hg commit -Aqm 'outside 10' + + $ echo 11 > inside/f + $ hg commit -Aqm 'inside 11' + $ echo 12 > outside/f + $ hg commit -Aqm 'outside 12' + + $ hg log -G -T '{rev} {node|short} {desc}\n' + @ 21 8d874d57adea outside 12 + | + o 20 7ef88b4dd4fa inside 11 + | + o 19 2a20009de83e outside 10 + | + o 18 3ac1f5779de3 merge a/b/c/d 9 + |\ + | o 17 38a9c2f7e546 outside 8 + | | + | o 16 094aa62fc898 merge a/b/c 7 + | |\ + | | o 15 f29d083d32e4 outside 6 + | | | + | | o 14 2dc11382541d merge a/b 5 + | | |\ + o | | | 13 27d07ef97221 outside 4d + | | | | + o | | | 12 465567bdfb2d inside 3 + | | | | + o | | | 11 d1c61993ec83 outside 2d + | | | | + | o | | 10 56859a8e33b9 outside 4c + | | | | + | o | | 9 bb96a08b062a inside 3 + | | | | + | o | | 8 b844052e7b3b outside 2c + |/ / / + | | o 7 9db2d8fcc2a6 outside 4b + | | | + | | o 6 6418167787a6 inside 3 + | | | + +---o 5 77344f344d83 outside 2b + | | + | o 4 9cadde08dc9f outside 4a + | | + | o 3 019ef06f125b inside 3 + | | + | o 2 75e40c075a19 outside 2a + |/ + o 1 906d6c682641 initial outside + | + o 0 9f8e82b51004 initial inside + + +Now narrow and shallow clone this and get a hopefully correct graph + + $ cd .. + $ hg clone --narrow ssh://user@dummy/master narrow --include inside --depth 7 + requesting all changes + adding changesets + adding manifests + adding file changes + added 8 changesets with 3 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + +To make updating the tests easier, we print the emitted nodes +sorted. This makes it easier to identify when the same node structure +has been emitted, just in a different order. + + $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n' + @ 7 8d874d57adea... outside 12 + | + o 6 7ef88b4dd4fa inside 11 + | + o 5 2a20009de83e... outside 10 + | + o 4 3ac1f5779de3... merge a/b/c/d 9 + |\ + | o 3 465567bdfb2d inside 3 + | | + | o 2 d1c61993ec83... outside 2d + | + o 1 bb96a08b062a inside 3 + | + o 0 b844052e7b3b... outside 2c + + + $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort + ...2a20009de83e 000000000000 3ac1f5779de3 outside 10 + ...3ac1f5779de3 bb96a08b062a 465567bdfb2d merge a/b/c/d 9 + ...8d874d57adea 7ef88b4dd4fa 000000000000 outside 12 + ...b844052e7b3b 000000000000 000000000000 outside 2c + ...d1c61993ec83 000000000000 000000000000 outside 2d + 465567bdfb2d d1c61993ec83 000000000000 inside 3 + 7ef88b4dd4fa 2a20009de83e 000000000000 inside 11 + bb96a08b062a b844052e7b3b 000000000000 inside 3 + + $ cd .. + +Incremental test case: show a pull can pull in a conflicted merge even if elided + + $ hg init pullmaster + $ cd pullmaster + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ mkdir inside outside + $ echo v1 > inside/f + $ echo v1 > outside/f + $ hg add inside/f outside/f + $ hg commit -m init + + $ for line in a b c d + > do + > hg update -r 0 + > echo v2$line > outside/f + > hg commit -m "outside 2$line" + > echo v2$line > inside/f + > hg commit -m "inside 2$line" + > echo v3$line > outside/f + > hg commit -m "outside 3$line" + > echo v4$line > outside/f + > hg commit -m "outside 4$line" + > done + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + created new head + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + created new head + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + created new head + + $ cd .. + $ hg clone --narrow ssh://user@dummy/pullmaster pullshallow \ + > --include inside --depth 3 + requesting all changes + adding changesets + adding manifests + adding file changes + added 12 changesets with 5 changes to 1 files (+3 heads) + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd pullshallow + + $ hg log -G -T '{rev} {node|short}{if(ellipsis,"...")} {desc}\n' + @ 11 0ebbd712a0c8... outside 4d + | + o 10 0d4c867aeb23 inside 2d + | + o 9 e932969c3961... outside 2d + + o 8 33d530345455... outside 4c + | + o 7 0ce6481bfe07 inside 2c + | + o 6 caa65c940632... outside 2c + + o 5 3df233defecc... outside 4b + | + o 4 7162cc6d11a4 inside 2b + | + o 3 f2a632f0082d... outside 2b + + o 2 b8a3da16ba49... outside 4a + | + o 1 53f543eb8e45 inside 2a + | + o 0 1be3e5221c6a... outside 2a + + $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort + ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d + ...1be3e5221c6a 000000000000 000000000000 outside 2a + ...33d530345455 0ce6481bfe07 000000000000 outside 4c + ...3df233defecc 7162cc6d11a4 000000000000 outside 4b + ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a + ...caa65c940632 000000000000 000000000000 outside 2c + ...e932969c3961 000000000000 000000000000 outside 2d + ...f2a632f0082d 000000000000 000000000000 outside 2b + 0ce6481bfe07 caa65c940632 000000000000 inside 2c + 0d4c867aeb23 e932969c3961 000000000000 inside 2d + 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a + 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b + + $ cd ../pullmaster + $ hg update -r 'desc("outside 4a")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge -r 'desc("outside 4b")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging inside/f + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 2 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 3 > inside/f + $ echo 5 > outside/f + $ rm -f {in,out}side/f.orig + $ hg resolve --mark inside/f outside/f + (no more unresolved files) + $ hg commit -m 'merge a/b 5' + + $ hg update -r 'desc("outside 4c")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge -r 'desc("outside 4d")' 2>&1 | egrep -v '(warning:|incomplete!)' + merging inside/f + merging outside/f + 0 files updated, 0 files merged, 0 files removed, 2 files unresolved + use 'hg resolve' to retry unresolved file merges or 'hg merge --abort' to abandon + $ echo 3 > inside/f + $ echo 5 > outside/f + $ rm -f {in,out}side/f.orig + $ hg resolve --mark inside/f outside/f + (no more unresolved files) + $ hg commit -m 'merge c/d 5' + + $ hg update -r 'desc("merge a/b 5")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg merge -r 'desc("merge c/d 5")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + (branch merge, don't forget to commit) + $ echo 6 > outside/f + $ hg commit -m 'outside 6' + $ echo 7 > outside/f + $ hg commit -m 'outside 7' + $ echo 8 > outside/f + $ hg commit -m 'outside 8' + + $ cd ../pullshallow + $ hg pull --depth 3 + pulling from ssh://user@dummy/pullmaster + searching for changes + adding changesets + adding manifests + adding file changes + added 4 changesets with 3 changes to 1 files (-3 heads) + new changesets *:* (glob) + (run 'hg update' to get a working copy) + + $ hg log -T '{if(ellipsis,"...")}{node|short} {p1node|short} {p2node|short} {desc}\n' | sort + ...0ebbd712a0c8 0d4c867aeb23 000000000000 outside 4d + ...1be3e5221c6a 000000000000 000000000000 outside 2a + ...33d530345455 0ce6481bfe07 000000000000 outside 4c + ...3df233defecc 7162cc6d11a4 000000000000 outside 4b + ...b8a3da16ba49 53f543eb8e45 000000000000 outside 4a + ...bf545653453e 968003d40c60 000000000000 outside 8 + ...caa65c940632 000000000000 000000000000 outside 2c + ...e932969c3961 000000000000 000000000000 outside 2d + ...f2a632f0082d 000000000000 000000000000 outside 2b + 0ce6481bfe07 caa65c940632 000000000000 inside 2c + 0d4c867aeb23 e932969c3961 000000000000 inside 2d + 53f543eb8e45 1be3e5221c6a 000000000000 inside 2a + 67d49c0bdbda b8a3da16ba49 3df233defecc merge a/b 5 + 7162cc6d11a4 f2a632f0082d 000000000000 inside 2b + 968003d40c60 67d49c0bdbda e867021d52c2 outside 6 + e867021d52c2 33d530345455 0ebbd712a0c8 merge c/d 5
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-shallow.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,122 @@ + $ . "$TESTDIR/narrow-library.sh" + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ for x in `$TESTDIR/seq.py 10` + > do + > echo $x > "f$x" + > hg add "f$x" + > done + $ hg commit -m "Add root files" + $ mkdir d1 d2 + $ for x in `$TESTDIR/seq.py 10` + > do + > echo d1/$x > "d1/f$x" + > hg add "d1/f$x" + > echo d2/$x > "d2/f$x" + > hg add "d2/f$x" + > done + $ hg commit -m "Add d1 and d2" + $ for x in `$TESTDIR/seq.py 10` + > do + > echo f$x rev2 > "f$x" + > echo d1/f$x rev2 > "d1/f$x" + > echo d2/f$x rev2 > "d2/f$x" + > hg commit -m "Commit rev2 of f$x, d1/f$x, d2/f$x" + > done + $ cd .. + +narrow and shallow clone the d2 directory + + $ hg clone --narrow ssh://user@dummy/master shallow --include "d2" --depth 2 + requesting all changes + adding changesets + adding manifests + adding file changes + added 4 changesets with 13 changes to 10 files + new changesets *:* (glob) + updating to branch default + 10 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd shallow + $ hg log -T '{rev}{if(ellipsis,"...")}: {desc}\n' + 3: Commit rev2 of f10, d1/f10, d2/f10 + 2: Commit rev2 of f9, d1/f9, d2/f9 + 1: Commit rev2 of f8, d1/f8, d2/f8 + 0...: Commit rev2 of f7, d1/f7, d2/f7 + $ hg update 0 + 3 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat d2/f7 d2/f8 + d2/f7 rev2 + d2/8 + + $ cd .. + +change every upstream file once + + $ cd master + $ for x in `$TESTDIR/seq.py 10` + > do + > echo f$x rev3 > "f$x" + > echo d1/f$x rev3 > "d1/f$x" + > echo d2/f$x rev3 > "d2/f$x" + > hg commit -m "Commit rev3 of f$x, d1/f$x, d2/f$x" + > done + $ cd .. + +pull new changes with --depth specified. There were 10 changes to the d2 +directory but the shallow pull should only fetch 3. + + $ cd shallow + $ hg pull --depth 2 + pulling from ssh://user@dummy/master + searching for changes + adding changesets + adding manifests + adding file changes + added 4 changesets with 10 changes to 10 files + new changesets *:* (glob) + (run 'hg update' to get a working copy) + $ hg log -T '{rev}{if(ellipsis,"...")}: {desc}\n' + 7: Commit rev3 of f10, d1/f10, d2/f10 + 6: Commit rev3 of f9, d1/f9, d2/f9 + 5: Commit rev3 of f8, d1/f8, d2/f8 + 4...: Commit rev3 of f7, d1/f7, d2/f7 + 3: Commit rev2 of f10, d1/f10, d2/f10 + 2: Commit rev2 of f9, d1/f9, d2/f9 + 1: Commit rev2 of f8, d1/f8, d2/f8 + 0...: Commit rev2 of f7, d1/f7, d2/f7 + $ hg update 4 + merging d2/f1 + merging d2/f2 + merging d2/f3 + merging d2/f4 + merging d2/f5 + merging d2/f6 + merging d2/f7 + 3 files updated, 7 files merged, 0 files removed, 0 files unresolved + $ cat d2/f7 d2/f8 + d2/f7 rev3 + d2/f8 rev2 + $ hg update 7 + 3 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat d2/f10 + d2/f10 rev3 + + $ cd .. + +cannot clone with zero or negative depth + + $ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth 0 + requesting all changes + remote: abort: depth must be positive, got 0 + abort: pull failed on remote + [255] + $ hg clone --narrow ssh://user@dummy/master bad --include "d2" --depth -1 + requesting all changes + remote: abort: depth must be positive, got -1 + abort: pull failed on remote + [255]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-strip.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,163 @@ +#testcases flat tree + + $ . "$TESTDIR/narrow-library.sh" + +#if tree + $ cat << EOF >> $HGRCPATH + > [experimental] + > treemanifest = 1 + > EOF +#endif + +create full repo + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ mkdir inside + $ echo inside > inside/f1 + $ mkdir outside + $ echo outside > outside/f1 + $ hg ci -Aqm 'initial' + + $ echo modified > inside/f1 + $ hg ci -qm 'modify inside' + + $ hg co -q 0 + $ echo modified > outside/f1 + $ hg ci -qm 'modify outside' + + $ echo modified again >> outside/f1 + $ hg ci -qm 'modify outside again' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 1 files (+1 heads) + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ cat >> $HGRCPATH <<EOF + > [extensions] + > strip= + > EOF + +Can strip and recover changesets affecting only files within narrow spec + + $ hg co -r 'desc("modify inside")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ rm -f $TESTTMP/narrow/.hg/strip-backup/*-backup.hg + $ hg strip . + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob) + $ hg unbundle .hg/strip-backup/*-backup.hg + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + new changesets * (glob) + (run 'hg heads' to see heads, 'hg merge' to merge) + +Can strip and recover changesets affecting files outside of narrow spec + + $ hg co -r 'desc("modify outside")' + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg log -G -T '{rev} {desc}\n' + o 2 modify inside + | + | @ 1 modify outside again + |/ + o 0 initial + + $ hg debugdata -m 1 + inside/f1\x004d6a634d5ba06331a60c29ee0db8412490a54fcd (esc) (flat !) + outside/f1\x0084ba604d54dee1f13310ce3d4ac2e8a36636691a (esc) (flat !) + inside\x006a8bc41df94075d501f9740587a0c0e13c170dc5t (esc) (tree !) + outside\x00255c2627ebdd3c7dcaa6945246f9b9f02bd45a09t (esc) (tree !) + + $ rm -f $TESTTMP/narrow/.hg/strip-backup/*-backup.hg + $ hg strip . + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob) + $ hg unbundle .hg/strip-backup/*-backup.hg + adding changesets + adding manifests + adding file changes + added 1 changesets with 0 changes to 0 files (+1 heads) + new changesets * (glob) + (run 'hg heads' to see heads, 'hg merge' to merge) + $ hg log -G -T '{rev} {desc}\n' + o 2 modify outside again + | + | o 1 modify inside + |/ + @ 0 initial + +Check that hash of file outside narrow spec got restored + $ hg debugdata -m 2 + inside/f1\x004d6a634d5ba06331a60c29ee0db8412490a54fcd (esc) (flat !) + outside/f1\x0084ba604d54dee1f13310ce3d4ac2e8a36636691a (esc) (flat !) + inside\x006a8bc41df94075d501f9740587a0c0e13c170dc5t (esc) (tree !) + outside\x00255c2627ebdd3c7dcaa6945246f9b9f02bd45a09t (esc) (tree !) + +Also verify we can apply the bundle with 'hg pull': + $ hg co -r 'desc("modify inside")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ rm .hg/strip-backup/*-backup.hg + $ hg strip . + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob) + $ hg pull .hg/strip-backup/*-backup.hg + pulling from .hg/strip-backup/*-backup.hg (glob) + searching for changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 1 changes to 1 files (+1 heads) + new changesets * (glob) + (run 'hg heads' to see heads, 'hg merge' to merge) + + $ rm .hg/strip-backup/*-backup.hg + $ hg strip 0 + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-backup.hg (glob) + $ hg incoming .hg/strip-backup/*-backup.hg + comparing with .hg/strip-backup/*-backup.hg (glob) + changeset: 0:* (glob) + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: initial + + changeset: 1:9e48d953700d (flat !) + changeset: 1:3888164bccf0 (tree !) + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify outside again + + changeset: 2:f505d5e96aa8 (flat !) + changeset: 2:40b66f95a209 (tree !) + tag: tip + parent: 0:a99f4d53924d (flat !) + parent: 0:c2a5fabcca3c (tree !) + user: test + date: Thu Jan 01 00:00:00 1970 +0000 + summary: modify inside + + $ hg pull .hg/strip-backup/*-backup.hg + pulling from .hg/strip-backup/*-backup.hg (glob) + requesting all changes + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 1 files (+1 heads) + new changesets *:* (glob) + (run 'hg heads' to see heads, 'hg merge' to merge)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-update.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,76 @@ + + $ . "$TESTDIR/narrow-library.sh" + +create full repo + + $ hg init master + $ cd master + $ echo init > init + $ hg ci -Aqm 'initial' + + $ mkdir inside + $ echo inside > inside/f1 + $ mkdir outside + $ echo outside > outside/f1 + $ hg ci -Aqm 'add inside and outside' + + $ echo modified > inside/f1 + $ hg ci -qm 'modify inside' + + $ echo modified > outside/f1 + $ hg ci -qm 'modify outside' + + $ cd .. + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 4 changesets with 2 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ hg debugindex -c + rev offset length base linkrev nodeid p1 p2 + 0 0 64 0 0 9958b1af2add 000000000000 000000000000 + 1 64 81 1 1 2db4ce2a3bfe 9958b1af2add 000000000000 + 2 145 75 2 2 0980ee31a742 2db4ce2a3bfe 000000000000 + 3 220 (76|77) 3 3 4410145019b7 0980ee31a742 000000000000 (re) + + $ hg update -q 0 + +Can update to revision with changes inside + + $ hg update -q 'desc("add inside and outside")' + $ hg update -q 'desc("modify inside")' + $ find * + inside + inside/f1 + $ cat inside/f1 + modified + +Can update to revision with changes outside + + $ hg update -q 'desc("modify outside")' + $ find * + inside + inside/f1 + $ cat inside/f1 + modified + +Can update with a deleted file inside + + $ hg rm inside/f1 + $ hg update -q 'desc("modify inside")' + $ hg update -q 'desc("modify outside")' + $ hg update -q 'desc("initial")' + $ hg update -q 'desc("modify inside")' + +Can update with a moved file inside + + $ hg mv inside/f1 inside/f2 + $ hg update -q 'desc("modify outside")' + $ hg update -q 'desc("initial")' + $ hg update -q 'desc("modify inside")'
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow-widen.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,365 @@ +#testcases flat tree + $ . "$TESTDIR/narrow-library.sh" + +#if tree + $ cat << EOF >> $HGRCPATH + > [experimental] + > treemanifest = 1 + > EOF +#endif + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + + $ mkdir inside + $ echo 'inside' > inside/f + $ hg add inside/f + $ hg commit -m 'add inside' + + $ mkdir widest + $ echo 'widest' > widest/f + $ hg add widest/f + $ hg commit -m 'add widest' + + $ mkdir outside + $ echo 'outside' > outside/f + $ hg add outside/f + $ hg commit -m 'add outside' + + $ cd .. + +narrow clone the inside file + + $ hg clone --narrow ssh://user@dummy/master narrow --include inside + requesting all changes + adding changesets + adding manifests + adding file changes + added 2 changesets with 1 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ hg tracked + I path:inside + $ ls + inside + $ cat inside/f + inside + $ cd .. + +add more upstream files which we will include in a wider narrow spec + + $ cd master + + $ mkdir wider + $ echo 'wider' > wider/f + $ hg add wider/f + $ echo 'widest v2' > widest/f + $ hg commit -m 'add wider, update widest' + + $ echo 'widest v3' > widest/f + $ hg commit -m 'update widest v3' + + $ echo 'inside v2' > inside/f + $ hg commit -m 'update inside' + + $ mkdir outside2 + $ echo 'outside2' > outside2/f + $ hg add outside2/f + $ hg commit -m 'add outside2' + + $ echo 'widest v4' > widest/f + $ hg commit -m 'update widest v4' + + $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n" + *: update widest v4 (glob) + *: add outside2 (glob) + *: update inside (glob) + *: update widest v3 (glob) + *: add wider, update widest (glob) + *: add outside (glob) + *: add widest (glob) + *: add inside (glob) + + $ cd .. + +Widen the narrow spec to see the wider file. This should not get the newly +added upstream revisions. + + $ cd narrow + $ hg tracked --addinclude wider/f + comparing with ssh://user@dummy/master + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 2 changesets with 1 changes to 1 files + new changesets *:* (glob) + $ hg tracked + I path:inside + I path:wider/f + +Pull down the newly added upstream revision. + + $ hg pull + pulling from ssh://user@dummy/master + searching for changes + adding changesets + adding manifests + adding file changes + added 4 changesets with 2 changes to 2 files + new changesets *:* (glob) + (run 'hg update' to get a working copy) + $ hg update -r 'desc("add wider")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat wider/f + wider + + $ hg update -r 'desc("update inside")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat wider/f + wider + $ cat inside/f + inside v2 + + $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n" + ...*: update widest v4 (glob) + *: update inside (glob) + ...*: update widest v3 (glob) + *: add wider, update widest (glob) + ...*: add outside (glob) + *: add inside (glob) + +Check that widening with a newline fails + + $ hg tracked --addinclude 'widest + > ' + abort: newlines are not allowed in narrowspec paths + [255] + +widen the narrow spec to include the widest file + + $ hg tracked --addinclude widest + comparing with ssh://user@dummy/master + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 8 changesets with 7 changes to 3 files + new changesets *:* (glob) + $ hg tracked + I path:inside + I path:wider/f + I path:widest + $ hg update 'desc("add widest")' + 2 files updated, 0 files merged, 1 files removed, 0 files unresolved + $ cat widest/f + widest + $ hg update 'desc("add wider, update widest")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat wider/f + wider + $ cat widest/f + widest v2 + $ hg update 'desc("update widest v3")' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat widest/f + widest v3 + $ hg update 'desc("update widest v4")' + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cat widest/f + widest v4 + + $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n" + *: update widest v4 (glob) + ...*: add outside2 (glob) + *: update inside (glob) + *: update widest v3 (glob) + *: add wider, update widest (glob) + ...*: add outside (glob) + *: add widest (glob) + *: add inside (glob) + +separate suite of tests: files from 0-10 modified in changes 0-10. This allows +more obvious precise tests tickling particular corner cases. + + $ cd .. + $ hg init upstream + $ cd upstream + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ for x in `$TESTDIR/seq.py 0 10` + > do + > mkdir d$x + > echo $x > d$x/f + > hg add d$x/f + > hg commit -m "add d$x/f" + > done + $ hg log -T "{node|short}: {desc}\n" + *: add d10/f (glob) + *: add d9/f (glob) + *: add d8/f (glob) + *: add d7/f (glob) + *: add d6/f (glob) + *: add d5/f (glob) + *: add d4/f (glob) + *: add d3/f (glob) + *: add d2/f (glob) + *: add d1/f (glob) + *: add d0/f (glob) + +make narrow clone with every third node. + + $ cd .. + $ hg clone --narrow ssh://user@dummy/upstream narrow2 --include d0 --include d3 --include d6 --include d9 + requesting all changes + adding changesets + adding manifests + adding file changes + added 8 changesets with 4 changes to 4 files + new changesets *:* (glob) + updating to branch default + 4 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow2 + $ hg tracked + I path:d0 + I path:d3 + I path:d6 + I path:d9 + $ hg verify + checking changesets + checking manifests + checking directory manifests (tree !) + crosschecking files in changesets and manifests + checking files + 4 files, 8 changesets, 4 total revisions + $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n" + ...*: add d10/f (glob) + *: add d9/f (glob) + ...*: add d8/f (glob) + *: add d6/f (glob) + ...*: add d5/f (glob) + *: add d3/f (glob) + ...*: add d2/f (glob) + *: add d0/f (glob) + $ hg tracked --addinclude d1 + comparing with ssh://user@dummy/upstream + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow2/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 9 changesets with 5 changes to 5 files + new changesets *:* (glob) + $ hg tracked + I path:d0 + I path:d1 + I path:d3 + I path:d6 + I path:d9 + $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n" + ...*: add d10/f (glob) + *: add d9/f (glob) + ...*: add d8/f (glob) + *: add d6/f (glob) + ...*: add d5/f (glob) + *: add d3/f (glob) + ...*: add d2/f (glob) + *: add d1/f (glob) + *: add d0/f (glob) + +Verify shouldn't claim the repo is corrupt after a widen. + + $ hg verify + checking changesets + checking manifests + checking directory manifests (tree !) + crosschecking files in changesets and manifests + checking files + 5 files, 9 changesets, 5 total revisions + +Widening preserves parent of local commit + + $ cd .. + $ hg clone -q --narrow ssh://user@dummy/upstream narrow3 --include d2 -r 2 + $ cd narrow3 + $ hg log -T "{if(ellipsis, '...')}{node|short}: {desc}\n" + *: add d2/f (glob) + ...*: add d1/f (glob) + $ hg pull -q -r 3 + $ hg co -q tip + $ hg pull -q -r 4 + $ echo local > d2/f + $ hg ci -m local + created new head + $ hg tracked -q --addinclude d0 --addinclude d9 + +Widening preserves bookmarks + + $ cd .. + $ hg clone -q --narrow ssh://user@dummy/upstream narrow-bookmarks --include d4 + $ cd narrow-bookmarks + $ echo local > d4/f + $ hg ci -m local + $ hg bookmarks bookmark + $ hg bookmarks + * bookmark 3:* (glob) + $ hg -q tracked --addinclude d2 + $ hg bookmarks + * bookmark 5:* (glob) + $ hg log -r bookmark -T '{desc}\n' + local + +Widening that fails can be recovered from + + $ cd .. + $ hg clone -q --narrow ssh://user@dummy/upstream interrupted --include d0 + $ cd interrupted + $ echo local > d0/f + $ hg ci -m local + $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n" + 2: local + ...1: add d10/f + 0: add d0/f + $ hg bookmarks bookmark + $ hg --config hooks.pretxnchangegroup.bad=false tracked --addinclude d1 + comparing with ssh://user@dummy/upstream + searching for changes + no changes found + saved backup bundle to $TESTTMP/interrupted/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 2 files + transaction abort! + rollback completed + abort: pretxnchangegroup.bad hook exited with status 1 + [255] + $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n" + $ hg bookmarks + no bookmarks set + $ hg unbundle .hg/strip-backup/*-widen.hg + adding changesets + adding manifests + adding file changes + added 3 changesets with 2 changes to 1 files + new changesets *:* (glob) + (run 'hg update' to get a working copy) + $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n" + 2: local + ...1: add d10/f + 0: add d0/f + $ hg bookmarks + * bookmark 2:* (glob)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-narrow.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,374 @@ +#testcases flat tree + + $ . "$TESTDIR/narrow-library.sh" + +#if tree + $ cat << EOF >> $HGRCPATH + > [experimental] + > treemanifest = 1 + > EOF +#endif + + $ hg init master + $ cd master + $ cat >> .hg/hgrc <<EOF + > [narrow] + > serveellipses=True + > EOF + $ for x in `$TESTDIR/seq.py 0 10` + > do + > mkdir d$x + > echo $x > d$x/f + > hg add d$x/f + > hg commit -m "add d$x/f" + > done + $ hg log -T "{node|short}: {desc}\n" + *: add d10/f (glob) + *: add d9/f (glob) + *: add d8/f (glob) + *: add d7/f (glob) + *: add d6/f (glob) + *: add d5/f (glob) + *: add d4/f (glob) + *: add d3/f (glob) + *: add d2/f (glob) + *: add d1/f (glob) + *: add d0/f (glob) + $ cd .. + +Error if '.' or '..' are in the directory to track. + $ hg clone --narrow ssh://user@dummy/master foo --include ./asdf + requesting all changes + abort: "." and ".." are not allowed in narrowspec paths + [255] + $ hg clone --narrow ssh://user@dummy/master foo --include asdf/.. + requesting all changes + abort: "." and ".." are not allowed in narrowspec paths + [255] + $ hg clone --narrow ssh://user@dummy/master foo --include a/./c + requesting all changes + abort: "." and ".." are not allowed in narrowspec paths + [255] + +Names with '.' in them are OK. + $ hg clone --narrow ssh://user@dummy/master should-work --include a/.b/c + requesting all changes + adding changesets + adding manifests + adding file changes + added 1 changesets with 0 changes to 0 files + new changesets * (glob) + updating to branch default + 0 files updated, 0 files merged, 0 files removed, 0 files unresolved + +Test repo with local changes + $ hg clone --narrow ssh://user@dummy/master narrow-local-changes --include d0 --include d3 --include d6 + requesting all changes + adding changesets + adding manifests + adding file changes + added 6 changesets with 3 changes to 3 files + new changesets *:* (glob) + updating to branch default + 3 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow-local-changes + $ cat >> $HGRCPATH << EOF + > [experimental] + > evolution=createmarkers + > EOF + $ echo local change >> d0/f + $ hg ci -m 'local change to d0' + $ hg co '.^' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ echo local change >> d3/f + $ hg ci -m 'local hidden change to d3' + created new head + $ hg ci --amend -m 'local change to d3' + $ hg tracked --removeinclude d0 + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + The following changeset(s) or their ancestors have local changes not on the remote: + * (glob) + abort: local changes found + (use --force-delete-local-changes to ignore) + [255] +Check that nothing was removed by the failed attempts + $ hg tracked + I path:d0 + I path:d3 + I path:d6 + $ hg files + d0/f + d3/f + d6/f + $ find * + d0 + d0/f + d3 + d3/f + d6 + d6/f + $ hg verify -q +Force deletion of local changes + $ hg log -T "{node|short}: {desc} {outsidenarrow}\n" + *: local change to d3 (glob) + *: local change to d0 (glob) + *: add d10/f outsidenarrow (glob) + *: add d6/f (glob) + *: add d5/f outsidenarrow (glob) + *: add d3/f (glob) + *: add d2/f outsidenarrow (glob) + *: add d0/f (glob) + $ hg tracked --removeinclude d0 --force-delete-local-changes + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + The following changeset(s) or their ancestors have local changes not on the remote: + * (glob) + saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob) + deleting data/d0/f.i + deleting meta/d0/00manifest.i (tree !) + $ hg log -T "{node|short}: {desc} {outsidenarrow}\n" + *: local change to d3 (glob) + *: add d10/f outsidenarrow (glob) + *: add d6/f (glob) + *: add d5/f outsidenarrow (glob) + *: add d3/f (glob) + *: add d2/f outsidenarrow (glob) + *: add d0/f outsidenarrow (glob) +Can restore stripped local changes after widening + $ hg tracked --addinclude d0 -q + $ hg unbundle .hg/strip-backup/*-narrow.hg -q + $ hg --hidden co -r 'desc("local change to d0")' -q + $ cat d0/f + 0 + local change +Pruned commits affecting removed paths should not prevent narrowing + $ hg co '.^' + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ hg debugobsolete `hg log -T '{node}' -r 'desc("local change to d0")'` + obsoleted 1 changesets + $ hg tracked --removeinclude d0 + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob) + deleting data/d0/f.i + deleting meta/d0/00manifest.i (tree !) +Updates off of stripped commit if necessary + $ hg co -r 'desc("local change to d3")' -q + $ echo local change >> d6/f + $ hg ci -m 'local change to d6' + $ hg tracked --removeinclude d3 --force-delete-local-changes + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + The following changeset(s) or their ancestors have local changes not on the remote: + * (glob) + * (glob) + 2 files updated, 0 files merged, 0 files removed, 0 files unresolved + saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob) + deleting data/d3/f.i + deleting meta/d3/00manifest.i (tree !) + $ hg log -T '{desc}\n' -r . + add d10/f +Updates to nullid if necessary + $ hg tracked --addinclude d3 -q + $ hg co null -q + $ mkdir d3 + $ echo local change > d3/f + $ hg add d3/f + $ hg ci -m 'local change to d3' + created new head + $ hg tracked --removeinclude d3 --force-delete-local-changes + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + The following changeset(s) or their ancestors have local changes not on the remote: + * (glob) + 0 files updated, 0 files merged, 1 files removed, 0 files unresolved + saved backup bundle to $TESTTMP/narrow-local-changes/.hg/strip-backup/*-narrow.hg (glob) + deleting data/d3/f.i + deleting meta/d3/00manifest.i (tree !) + $ hg id + 000000000000 + $ cd .. + +Can remove last include, making repo empty + $ hg clone --narrow ssh://user@dummy/master narrow-empty --include d0 -r 5 + adding changesets + adding manifests + adding file changes + added 2 changesets with 1 changes to 1 files + new changesets *:* (glob) + updating to branch default + 1 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow-empty + $ hg tracked --removeinclude d0 + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + deleting data/d0/f.i + deleting meta/d0/00manifest.i (tree !) + $ hg tracked + $ hg files + [1] + $ test -d d0 + [1] +Do some work in the empty clone + $ hg diff --change . + $ hg branch foo + marked working directory as branch foo + (branches are permanent and global, did you want a bookmark?) + $ hg ci -m empty + $ hg pull -q +Can widen the empty clone + $ hg tracked --addinclude d0 + comparing with ssh://user@dummy/master + searching for changes + no changes found + saved backup bundle to $TESTTMP/narrow-empty/.hg/strip-backup/*-widen.hg (glob) + adding changesets + adding manifests + adding file changes + added 3 changesets with 1 changes to 1 files + new changesets *:* (glob) + $ hg tracked + I path:d0 + $ hg files + d0/f + $ find * + d0 + d0/f + $ cd .. + +TODO(martinvonz): test including e.g. d3/g and then removing it once +https://bitbucket.org/Google/narrowhg/issues/6 is fixed + + $ hg clone --narrow ssh://user@dummy/master narrow --include d0 --include d3 --include d6 --include d9 + requesting all changes + adding changesets + adding manifests + adding file changes + added 8 changesets with 4 changes to 4 files + new changesets *:* (glob) + updating to branch default + 4 files updated, 0 files merged, 0 files removed, 0 files unresolved + $ cd narrow + $ hg tracked + I path:d0 + I path:d3 + I path:d6 + I path:d9 + $ hg tracked --removeinclude d6 + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + deleting data/d6/f.i + deleting meta/d6/00manifest.i (tree !) + $ hg tracked + I path:d0 + I path:d3 + I path:d9 + $ hg debugrebuildfncache + fncache already up to date + $ find * + d0 + d0/f + d3 + d3/f + d9 + d9/f + $ hg verify -q + $ hg tracked --addexclude d3/f + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + deleting data/d3/f.i + $ hg tracked + I path:d0 + I path:d3 + I path:d9 + X path:d3/f + $ hg debugrebuildfncache + fncache already up to date + $ find * + d0 + d0/f + d9 + d9/f + $ hg verify -q + $ hg tracked --addexclude d0 + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + deleting data/d0/f.i + deleting meta/d0/00manifest.i (tree !) + $ hg tracked + I path:d3 + I path:d9 + X path:d0 + X path:d3/f + $ hg debugrebuildfncache + fncache already up to date + $ find * + d9 + d9/f + +Make a 15 of changes to d9 to test the path without --verbose +(Note: using regexes instead of "* (glob)" because if the test fails, it +produces more sensible diffs) + $ hg tracked + I path:d3 + I path:d9 + X path:d0 + X path:d3/f + $ for x in `$TESTDIR/seq.py 1 15` + > do + > echo local change >> d9/f + > hg commit -m "change $x to d9/f" + > done + $ hg tracked --removeinclude d9 + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + The following changeset(s) or their ancestors have local changes not on the remote: + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ...and 5 more, use --verbose to list all + abort: local changes found + (use --force-delete-local-changes to ignore) + [255] +Now test it *with* verbose. + $ hg tracked --removeinclude d9 --verbose + comparing with ssh://user@dummy/master + searching for changes + looking for local changes to affected paths + The following changeset(s) or their ancestors have local changes not on the remote: + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + ^[0-9a-f]{12}$ (re) + abort: local changes found + (use --force-delete-local-changes to ignore) + [255]
--- a/tests/test-notify.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-notify.t Sun Mar 04 10:42:51 2018 -0500 @@ -421,7 +421,7 @@ > test = False > mbox = mbox > EOF - $ $PYTHON -c 'file("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")' + $ $PYTHON -c 'open("a/a", "ab").write("no" * 500 + "\xd1\x84" + "\n")' $ hg --cwd a commit -A -m "long line" $ hg --traceback --cwd b pull ../a pulling from ../a
--- a/tests/test-obsolete-divergent.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-obsolete-divergent.t Sun Mar 04 10:42:51 2018 -0500 @@ -621,6 +621,34 @@ a139f71be9da $ hg log -r 'contentdivergent()' +#if serve + + $ hg serve -n test -p $HGPORT -d --pid-file=hg.pid --config web.view=all \ + > -A access.log -E errors.log + $ cat hg.pid >> $DAEMON_PIDS + +check an obsolete changeset that was rewritten and also split + + $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=paper' | egrep 'rewritten|split' + <td>rewritten as <a href="/rev/bed64f5d2f5a?style=paper">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span><br> + split as <a href="/rev/7ae126973a96?style=paper">7ae126973a96</a> <a href="/rev/14608b260df8?style=paper">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> + $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=coal' | egrep 'rewritten|split' + <td>rewritten as <a href="/rev/bed64f5d2f5a?style=coal">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span><br> + split as <a href="/rev/7ae126973a96?style=coal">7ae126973a96</a> <a href="/rev/14608b260df8?style=coal">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> + $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=gitweb' | egrep 'rewritten|split' + <td>rewritten as <a class="list" href="/rev/bed64f5d2f5a?style=gitweb">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> + <td>split as <a class="list" href="/rev/7ae126973a96?style=gitweb">7ae126973a96</a> <a class="list" href="/rev/14608b260df8?style=gitweb">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> + $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=monoblue' | egrep 'rewritten|split' + <dd>rewritten as <a href="/rev/bed64f5d2f5a?style=monoblue">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></dd> + <dd>split as <a href="/rev/7ae126973a96?style=monoblue">7ae126973a96</a> <a href="/rev/14608b260df8?style=monoblue">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></dd> + $ get-with-headers.py localhost:$HGPORT 'rev/e442cfc57690?style=spartan' | egrep 'rewritten|split' + <td class="obsolete">rewritten as <a href="/rev/bed64f5d2f5a?style=spartan">bed64f5d2f5a</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> + <td class="obsolete">split as <a href="/rev/7ae126973a96?style=spartan">7ae126973a96</a> <a href="/rev/14608b260df8?style=spartan">14608b260df8</a> by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> + + $ killdaemons.py + +#endif + $ cd ..
--- a/tests/test-obsolete.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-obsolete.t Sun Mar 04 10:42:51 2018 -0500 @@ -18,7 +18,7 @@ > def reposetup(ui, repo): > class debugkeysrepo(repo.__class__): > def listkeys(self, namespace): - > ui.write('listkeys %s\n' % (namespace,)) + > ui.write(b'listkeys %s\n' % (namespace,)) > return super(debugkeysrepo, self).listkeys(namespace) > > if repo.local(): @@ -1049,20 +1049,8 @@ $ get-with-headers.py localhost:$HGPORT 'log?rev=first(obsolete())&style=monoblue' | grep '<span class="logtags">' <span class="logtags"><span class="phasetag" title="draft">draft</span> <span class="obsoletetag" title="obsolete">obsolete</span> </span> $ get-with-headers.py localhost:$HGPORT 'log?rev=first(obsolete())&style=spartan' | grep 'class="obsolete"' - <th class="obsolete">obsolete:</th> - <td class="obsolete">pruned</td> - -check an obsolete changeset that has been rewritten - $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=paper' | grep rewritten - <td>rewritten as <a href="/rev/3de5eca88c00?style=paper">3de5eca88c00</a> </td> - $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=coal' | grep rewritten - <td>rewritten as <a href="/rev/3de5eca88c00?style=coal">3de5eca88c00</a> </td> - $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=gitweb' | grep rewritten - <tr><td>obsolete</td><td>rewritten as <a class="list" href="/rev/3de5eca88c00?style=gitweb">3de5eca88c00</a> </td></tr> - $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=monoblue' | grep rewritten - <dt>obsolete</dt><dd>rewritten as <a href="/rev/3de5eca88c00?style=monoblue">3de5eca88c00</a> </dd> - $ get-with-headers.py localhost:$HGPORT 'rev/cda648ca50f5?style=spartan' | grep rewritten - <td class="obsolete">rewritten as <a href="/rev/3de5eca88c00?style=spartan">3de5eca88c00</a> </td> + <th class="obsolete">obsolete:</th> + <td class="obsolete">pruned by test <span class="age">Thu, 01 Jan 1970 00:00:00 +0000</span></td> check changeset with instabilities @@ -1291,12 +1279,12 @@ > > cmdtable = {} > command = registrar.command(cmdtable) - > @command(b"amendtransient",[], _('hg amendtransient [rev]')) + > @command(b"amendtransient",[], _(b'hg amendtransient [rev]')) > def amend(ui, repo, *pats, **opts): > opts['message'] = 'Test' > opts['logfile'] = None > cmdutil.amend(ui, repo, repo['.'], {}, pats, opts) - > ui.write('%s\n' % repo.changelog.headrevs()) + > ui.write(b'%s\n' % repo.changelog.headrevs()) > EOF $ cat >> $HGRCPATH << EOF > [extensions] @@ -1331,7 +1319,7 @@ > def trhook(tr): > repo = reporef() > hidden1 = repoview.computehidden(repo) - > hidden = repoview.filterrevs(repo, 'visible') + > hidden = repoview.filterrevs(repo, b'visible') > if sorted(hidden1) != sorted(hidden): > print("cache inconsistency") > bkmstoreinst._repo.currenttransaction().addpostclose('test_extension', trhook)
--- a/tests/test-parseindex.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-parseindex.t Sun Mar 04 10:42:51 2018 -0500 @@ -41,10 +41,17 @@ > def __getattr__(self, key): > return getattr(self.real, key) > + > def __enter__(self): + > self.real.__enter__() + > return self + > + > def __exit__(self, *args, **kwargs): + > return self.real.__exit__(*args, **kwargs) + > > def opener(*args): > o = vfs.vfs(*args) - > def wrapper(*a): - > f = o(*a) + > def wrapper(*a, **kwargs): + > f = o(*a, **kwargs) > return singlebyteread(f) > return wrapper >
--- a/tests/test-patch-offset.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-patch-offset.t Sun Mar 04 10:42:51 2018 -0500 @@ -5,7 +5,7 @@ > path = sys.argv[1] > patterns = sys.argv[2:] > - > fp = file(path, 'wb') + > fp = open(path, 'wb') > for pattern in patterns: > count = int(pattern[0:-1]) > char = pattern[-1] + '\n'
--- a/tests/test-pathencode.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-pathencode.py Sun Mar 04 10:42:51 2018 -0500 @@ -64,7 +64,7 @@ counts.pop(c, None) t = sum(counts.itervalues()) / 100.0 fp.write('probtable = (') - for i, (k, v) in enumerate(sorted(counts.iteritems(), key=lambda x: x[1], + for i, (k, v) in enumerate(sorted(counts.items(), key=lambda x: x[1], reverse=True)): if (i % 5) == 0: fp.write('\n ')
--- a/tests/test-pending.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-pending.t Sun Mar 04 10:42:51 2018 -0500 @@ -44,7 +44,7 @@ > import os, time > from mercurial import ui, localrepo > def rejecthook(ui, repo, hooktype, node, **opts): - > ui.write('hook %s\\n' % repo['tip'].hex()) + > ui.write(b'hook %s\\n' % repo[b'tip'].hex()) > # create the notify file so caller knows we're running > fpath = os.path.join('$d', 'notify') > f = open(fpath, 'w')
--- a/tests/test-pull.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-pull.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,5 +1,15 @@ #require serve +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + $ hg init test $ cd test
--- a/tests/test-pushvars.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-pushvars.t Sun Mar 04 10:42:51 2018 -0500 @@ -11,8 +11,6 @@ $ cat >> $HGRCPATH << EOF > [hooks] > pretxnchangegroup = sh $TESTTMP/pretxnchangegroup.sh - > [experimental] - > bundle2-exp = true > EOF $ hg init repo
--- a/tests/test-rebase-dest.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-rebase-dest.t Sun Mar 04 10:42:51 2018 -0500 @@ -85,20 +85,20 @@ > from mercurial import registrar, revset, revsetlang, smartset > revsetpredicate = registrar.revsetpredicate() > cache = {} - > @revsetpredicate('map') + > @revsetpredicate(b'map') > def map(repo, subset, x): > """(set, mapping)""" - > setarg, maparg = revsetlang.getargs(x, 2, 2, '') + > setarg, maparg = revsetlang.getargs(x, 2, 2, b'') > rset = revset.getset(repo, smartset.fullreposet(repo), setarg) - > mapstr = revsetlang.getstring(maparg, '') - > map = dict(a.split(':') for a in mapstr.split(',')) + > mapstr = revsetlang.getstring(maparg, b'') + > map = dict(a.split(b':') for a in mapstr.split(b',')) > rev = rset.first() > desc = repo[rev].description() > newdesc = map.get(desc) - > if newdesc == 'null': + > if newdesc == b'null': > revs = [-1] > else: - > query = revsetlang.formatspec('desc(%s)', newdesc) + > query = revsetlang.formatspec(b'desc(%s)', newdesc) > revs = repo.revs(query) > return smartset.baseset(revs) > EOF
--- a/tests/test-rebase-obsolete.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-rebase-obsolete.t Sun Mar 04 10:42:51 2018 -0500 @@ -1218,6 +1218,46 @@ o 0:b173517d0057 a +issue5782 + $ hg strip -r 0: + $ hg debugdrawdag <<EOF + > d + > | + > c1 c # replace: c -> c1 + > \ / + > b + > | + > a + > EOF + 1 new orphan changesets + $ hg debugobsolete `hg log -T "{node}" --hidden -r 'desc("c1")'` + obsoleted 1 changesets + $ hg log -G -r 'a': --hidden + * 4:76be324c128b d + | + | x 3:ef8a456de8fa c1 (pruned) + | | + x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa) + |/ + o 1:488e1b7e7341 b + | + o 0:b173517d0057 a + + $ hg rebase -d 0 -r 2 + rebasing 2:a82ac2b38757 "c" (c) + $ hg log -G -r 'a': --hidden + o 5:69ad416a4a26 c + | + | * 4:76be324c128b d + | | + | | x 3:ef8a456de8fa c1 (pruned) + | | | + | x | 2:a82ac2b38757 c (rewritten using replace as 3:ef8a456de8fa rewritten using rebase as 5:69ad416a4a26) + | |/ + | o 1:488e1b7e7341 b + |/ + o 0:b173517d0057 a + $ cd .. Rebase merge where successor of one parent is equal to destination (issue5198)
--- a/tests/test-rebase-scenario-global.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-rebase-scenario-global.t Sun Mar 04 10:42:51 2018 -0500 @@ -954,14 +954,14 @@ > def _rebase(orig, ui, repo, *args, **kwargs): > with repo.wlock(): > with repo.lock(): - > with repo.transaction('wrappedrebase'): + > with repo.transaction(b'wrappedrebase'): > return orig(ui, repo, *args, **kwargs) > def wraprebase(loaded): > assert loaded - > rebasemod = extensions.find('rebase') - > extensions.wrapcommand(rebasemod.cmdtable, 'rebase', _rebase) + > rebasemod = extensions.find(b'rebase') + > extensions.wrapcommand(rebasemod.cmdtable, b'rebase', _rebase) > def extsetup(ui): - > extensions.afterloaded('rebase', wraprebase) + > extensions.afterloaded(b'rebase', wraprebase) > EOF $ cat >> .hg/hgrc <<EOF
--- a/tests/test-relink.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-relink.t Sun Mar 04 10:42:51 2018 -0500 @@ -49,7 +49,7 @@ Test files are read in binary mode - $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\r\nb\n')" + $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\r\nb\n')" $ cd .. @@ -68,7 +68,7 @@ $ echo b >> b $ hg ci -m changeb created new head - $ $PYTHON -c "file('.hg/store/data/dummy.i', 'wb').write('a\nb\r\n')" + $ $PYTHON -c "open('.hg/store/data/dummy.i', 'wb').write(b'a\nb\r\n')" relink
--- a/tests/test-resolve.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-resolve.t Sun Mar 04 10:42:51 2018 -0500 @@ -85,24 +85,30 @@ $ cat > $TESTTMP/markdriver.py << EOF > '''mark and unmark files as driver-resolved''' - > from mercurial import merge, registrar, scmutil + > from mercurial import ( + > merge, + > pycompat, + > registrar, + > scmutil, + > ) > cmdtable = {} > command = registrar.command(cmdtable) > @command(b'markdriver', - > [('u', 'unmark', None, '')], - > 'FILE...') + > [(b'u', b'unmark', None, b'')], + > b'FILE...') > def markdriver(ui, repo, *pats, **opts): > wlock = repo.wlock() + > opts = pycompat.byteskwargs(opts) > try: > ms = merge.mergestate.read(repo) > m = scmutil.match(repo[None], pats, opts) > for f in ms: > if not m(f): > continue - > if not opts['unmark']: - > ms.mark(f, 'd') + > if not opts[b'unmark']: + > ms.mark(f, b'd') > else: - > ms.mark(f, 'u') + > ms.mark(f, b'u') > ms.commit() > finally: > wlock.release()
--- a/tests/test-revert-interactive.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-revert-interactive.t Sun Mar 04 10:42:51 2018 -0500 @@ -420,4 +420,13 @@ $ cat a 0 +When specified pattern does not exist, we should exit early (issue5789). + + $ hg files + a + $ hg rev b + b: no such file in rev b40d1912accf + $ hg rev -i b + b: no such file in rev b40d1912accf + $ cd ..
--- a/tests/test-revlog-ancestry.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-revlog-ancestry.py Sun Mar 04 10:42:51 2018 -0500 @@ -8,15 +8,15 @@ u = uimod.ui.load() -repo = hg.repository(u, 'test1', create=1) +repo = hg.repository(u, b'test1', create=1) os.chdir('test1') def commit(text, time): - repo.commit(text=text, date="%d 0" % time) + repo.commit(text=text, date=b"%d 0" % time) def addcommit(name, time): - f = open(name, 'w') - f.write('%s\n' % name) + f = open(name, 'wb') + f.write(b'%s\n' % name) f.close() repo[None].add([name]) commit(name, time) @@ -28,27 +28,27 @@ merge.update(repo, rev, True, False) if __name__ == '__main__': - addcommit("A", 0) - addcommit("B", 1) + addcommit(b"A", 0) + addcommit(b"B", 1) update(0) - addcommit("C", 2) + addcommit(b"C", 2) merge_(1) - commit("D", 3) + commit(b"D", 3) update(2) - addcommit("E", 4) - addcommit("F", 5) + addcommit(b"E", 4) + addcommit(b"F", 5) update(3) - addcommit("G", 6) + addcommit(b"G", 6) merge_(5) - commit("H", 7) + commit(b"H", 7) update(5) - addcommit("I", 8) + addcommit(b"I", 8) # Ancestors print('Ancestors of 5')
--- a/tests/test-revlog-v2.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-revlog-v2.t Sun Mar 04 10:42:51 2018 -0500 @@ -29,7 +29,7 @@ Unknown flags to revlog are rejected >>> with open('.hg/store/00changelog.i', 'wb') as fh: - ... fh.write('\x00\x04\xde\xad') + ... fh.write(b'\x00\x04\xde\xad') $ hg log abort: unknown flags (0x04) in version 57005 revlog 00changelog.i!
--- a/tests/test-revlog.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-revlog.t Sun Mar 04 10:42:51 2018 -0500 @@ -4,7 +4,7 @@ Flags on revlog version 0 are rejected >>> with open('.hg/store/00changelog.i', 'wb') as fh: - ... fh.write('\x00\x01\x00\x00') + ... fh.write(b'\x00\x01\x00\x00') $ hg log abort: unknown flags (0x01) in version 0 revlog 00changelog.i! @@ -13,7 +13,7 @@ Unknown flags on revlog version 1 are rejected >>> with open('.hg/store/00changelog.i', 'wb') as fh: - ... fh.write('\x00\x04\x00\x01') + ... fh.write(b'\x00\x04\x00\x01') $ hg log abort: unknown flags (0x04) in version 1 revlog 00changelog.i! @@ -22,7 +22,7 @@ Unknown version is rejected >>> with open('.hg/store/00changelog.i', 'wb') as fh: - ... fh.write('\x00\x00\x00\x02') + ... fh.write(b'\x00\x00\x00\x02') $ hg log abort: unknown version (2) in revlog 00changelog.i! @@ -34,8 +34,8 @@ $ hg init - >>> open("a.i", "w").write( - ... """eJxjYGZgZIAAYQYGxhgom+k/FMx8YKx9ZUaKSOyqo4cnuKb8mbqHV5cBCVTMWb1Cwqkhe4Gsg9AD + >>> open("a.i", "wb").write( + ... b"""eJxjYGZgZIAAYQYGxhgom+k/FMx8YKx9ZUaKSOyqo4cnuKb8mbqHV5cBCVTMWb1Cwqkhe4Gsg9AD ... Joa3dYtcYYYBAQ8Qr4OqZAYRICPTSr5WKd/42rV36d+8/VmrNpv7NP1jQAXrQE4BqQUARngwVA==""" ... .decode("base64").decode("zlib"))
--- a/tests/test-revset.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-revset.t Sun Mar 04 10:42:51 2018 -0500 @@ -16,7 +16,7 @@ > return baseset() > return baseset([3,3,2,2]) > - > mercurial.revset.symbols['r3232'] = r3232 + > mercurial.revset.symbols[b'r3232'] = r3232 > EOF $ cat >> $HGRCPATH << EOF > [extensions] @@ -47,25 +47,25 @@ > cmdtable = {} > command = registrar.command(cmdtable) > @command(b'debugrevlistspec', - > [('', 'optimize', None, 'print parsed tree after optimizing'), - > ('', 'bin', None, 'unhexlify arguments')]) + > [(b'', b'optimize', None, b'print parsed tree after optimizing'), + > (b'', b'bin', None, b'unhexlify arguments')]) > def debugrevlistspec(ui, repo, fmt, *args, **opts): > if opts['bin']: > args = map(nodemod.bin, args) > expr = revsetlang.formatspec(fmt, list(args)) > if ui.verbose: > tree = revsetlang.parse(expr, lookup=repo.__contains__) - > ui.note(revsetlang.prettyformat(tree), "\n") + > ui.note(revsetlang.prettyformat(tree), b"\n") > if opts["optimize"]: > opttree = revsetlang.optimize(revsetlang.analyze(tree)) - > ui.note("* optimized:\n", revsetlang.prettyformat(opttree), - > "\n") + > ui.note(b"* optimized:\n", revsetlang.prettyformat(opttree), + > b"\n") > func = revset.match(ui, expr, repo) > revs = func(repo) > if ui.verbose: - > ui.note("* set:\n", smartset.prettyformat(revs), "\n") + > ui.note(b"* set:\n", smartset.prettyformat(revs), b"\n") > for c in revs: - > ui.write("%s\n" % c) + > ui.write(b"%d\n" % c) > EOF $ cat <<EOF >> $HGRCPATH > [extensions] @@ -399,6 +399,8 @@ 4 $ log 'date(this is a test)' hg: parse error at 10: unexpected token: symbol + (date(this is a test) + ^ here) [255] $ log 'date()' hg: parse error: date requires a string @@ -408,9 +410,11 @@ [255] $ log 'date(' hg: parse error at 5: not a prefix: end + (date( + ^ here) [255] $ log 'date("\xy")' - hg: parse error: invalid \x escape + hg: parse error: invalid \x escape* (glob) [255] $ log 'date(tip)' hg: parse error: invalid date: 'tip' @@ -614,18 +618,28 @@ $ hg debugrevspec '[0]' hg: parse error at 0: not a prefix: [ + ([0] + ^ here) [255] $ hg debugrevspec '.#' hg: parse error at 2: not a prefix: end + (.# + ^ here) [255] $ hg debugrevspec '#rel' hg: parse error at 0: not a prefix: # + (#rel + ^ here) [255] $ hg debugrevspec '.#rel[0' hg: parse error at 7: unexpected token: end + (.#rel[0 + ^ here) [255] $ hg debugrevspec '.]' hg: parse error at 1: invalid token + (.] + ^ here) [255] $ hg debugrevspec '.#generations[a]' @@ -1309,7 +1323,7 @@ (func (symbol 'grep') (string '(')) - hg: parse error: invalid match pattern: unbalanced parenthesis + hg: parse error: invalid match pattern: (unbalanced parenthesis|missing \),.*) (re) [255] $ try 'grep("\bissue\d+")' (func @@ -1330,6 +1344,8 @@ 6 $ try 'grep(r"\")' hg: parse error at 7: unterminated string + (grep(r"\") + ^ here) [255] $ log 'head()' 0 @@ -2774,3 +2790,14 @@ $ cd .. $ cd repo + +test multiline revset with errors + + $ echo > multiline-revset + $ echo '. +' >> multiline-revset + $ echo '.^ +' >> multiline-revset + $ hg log -r "`cat multiline-revset`" + hg: parse error at 9: not a prefix: end + ( . + .^ + + ^ here) + [255]
--- a/tests/test-revset2.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-revset2.t Sun Mar 04 10:42:51 2018 -0500 @@ -420,7 +420,7 @@ test that repeated `-r` options never eat up stack (issue4565) (uses `-r 0::1` to avoid possible optimization at old-style parser) - $ hg log -T '{rev}\n' `$PYTHON -c "for i in xrange(500): print '-r 0::1 ',"` + $ hg log -T '{rev}\n' `$PYTHON -c "for i in range(500): print '-r 0::1 ',"` 0 1 @@ -690,6 +690,8 @@ $ log '1 OR 2' hg: parse error at 2: invalid token + (1 OR 2 + ^ here) [255] or operator should preserve ordering: @@ -1562,6 +1564,8 @@ test error message of bad revset $ hg log -r 'foo\\' hg: parse error at 3: syntax error in revset 'foo\\' + (foo\\ + ^ here) [255] $ cd ..
--- a/tests/test-rollback.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-rollback.t Sun Mar 04 10:42:51 2018 -0500 @@ -220,29 +220,29 @@ > import errno > from mercurial.i18n import _ > from mercurial import ( + > error, > registrar, - > error, > ui as uimod, > ) > > configtable = {} > configitem = registrar.configitem(configtable) > - > configitem('ui', 'ioerrors', + > configitem(b'ui', b'ioerrors', > default=list, > ) > > def pretxncommit(ui, repo, **kwargs): - > ui.warn('warn during pretxncommit\n') + > ui.warn(b'warn during pretxncommit\n') > > def pretxnclose(ui, repo, **kwargs): - > ui.warn('warn during pretxnclose\n') + > ui.warn(b'warn during pretxnclose\n') > > def txnclose(ui, repo, **kwargs): - > ui.warn('warn during txnclose\n') + > ui.warn(b'warn during txnclose\n') > > def txnabort(ui, repo, **kwargs): - > ui.warn('warn during abort\n') + > ui.warn(b'warn during abort\n') > > class fdproxy(object): > def __init__(self, ui, o): @@ -253,25 +253,25 @@ > return getattr(self._o, attr) > > def write(self, msg): - > errors = set(self._ui.configlist('ui', 'ioerrors')) - > pretxncommit = msg == 'warn during pretxncommit\n' - > pretxnclose = msg == 'warn during pretxnclose\n' - > txnclose = msg == 'warn during txnclose\n' - > txnabort = msg == 'warn during abort\n' - > msgabort = msg == _('transaction abort!\n') - > msgrollback = msg == _('rollback completed\n') + > errors = set(self._ui.configlist(b'ui', b'ioerrors')) + > pretxncommit = msg == b'warn during pretxncommit\n' + > pretxnclose = msg == b'warn during pretxnclose\n' + > txnclose = msg == b'warn during txnclose\n' + > txnabort = msg == b'warn during abort\n' + > msgabort = msg == _(b'transaction abort!\n') + > msgrollback = msg == _(b'rollback completed\n') > - > if pretxncommit and 'pretxncommit' in errors: + > if pretxncommit and b'pretxncommit' in errors: > raise IOError(errno.EPIPE, 'simulated epipe') - > if pretxnclose and 'pretxnclose' in errors: + > if pretxnclose and b'pretxnclose' in errors: > raise IOError(errno.EIO, 'simulated eio') - > if txnclose and 'txnclose' in errors: + > if txnclose and b'txnclose' in errors: > raise IOError(errno.EBADF, 'simulated badf') - > if txnabort and 'txnabort' in errors: + > if txnabort and b'txnabort' in errors: > raise IOError(errno.EPIPE, 'simulated epipe') - > if msgabort and 'msgabort' in errors: + > if msgabort and b'msgabort' in errors: > raise IOError(errno.EBADF, 'simulated ebadf') - > if msgrollback and 'msgrollback' in errors: + > if msgrollback and b'msgrollback' in errors: > raise IOError(errno.EIO, 'simulated eio') > > return self._o.write(msg) @@ -289,10 +289,10 @@ > ui.__class__ = badui > > def reposetup(ui, repo): - > ui.setconfig('hooks', 'pretxnclose.badui', pretxnclose, 'badui') - > ui.setconfig('hooks', 'txnclose.badui', txnclose, 'badui') - > ui.setconfig('hooks', 'pretxncommit.badui', pretxncommit, 'badui') - > ui.setconfig('hooks', 'txnabort.badui', txnabort, 'badui') + > ui.setconfig(b'hooks', b'pretxnclose.badui', pretxnclose, b'badui') + > ui.setconfig(b'hooks', b'txnclose.badui', txnclose, b'badui') + > ui.setconfig(b'hooks', b'pretxncommit.badui', pretxncommit, b'badui') + > ui.setconfig(b'hooks', b'txnabort.badui', txnabort, b'badui') > EOF $ cat >> $HGRCPATH << EOF
--- a/tests/test-run-tests.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-run-tests.t Sun Mar 04 10:42:51 2018 -0500 @@ -374,6 +374,7 @@ </testsuite> $ cat .testtimes + test-empty.t * (glob) test-failure-unicode.t * (glob) test-failure.t * (glob) test-success.t * (glob) @@ -541,6 +542,12 @@ > EOF $ rt test-serve-fail.t + --- $TESTTMP/test-serve-fail.t + +++ $TESTTMP/test-serve-fail.t.err + @@ -1* +1,2 @@ (glob) + $ echo 'abort: child process failed to start blah' + + abort: child process failed to start blah + ERROR: test-serve-fail.t output changed ! Failed test-serve-fail.t: server failed to start (HGPORT=*) (glob) @@ -914,16 +921,24 @@ ================ $ cat > test-skip.t <<EOF > $ echo xyzzy + > #if true > #require false + > #end + > EOF + $ cat > test-noskip.t <<EOF + > #if false + > #require false + > #endif > EOF $ rt --nodiff - !.s + !.s. Skipped test-skip.t: missing feature: nail clipper Failed test-failure.t: output changed - # Ran 2 tests, 1 skipped, 1 failed. + # Ran 3 tests, 1 skipped, 1 failed. python hash seed: * (glob) [1] + $ rm test-noskip.t $ rt --keyword xyzzy .s Skipped test-skip.t: missing feature: nail clipper
--- a/tests/test-sparse.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-sparse.t Sun Mar 04 10:42:51 2018 -0500 @@ -129,6 +129,10 @@ (include file with `hg debugsparse --include <pattern>` or use `hg add -s <file>` to include file directory while adding) [255] +But adding a truly excluded file shouldn't count + + $ hg add hide3 -X hide3 + Verify deleting sparseness while a file has changes fails $ hg debugsparse --delete 'show*'
--- a/tests/test-split.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-split.t Sun Mar 04 10:42:51 2018 -0500 @@ -2,7 +2,8 @@ $ cat > $TESTTMP/editor.py <<EOF > #!$PYTHON - > import os, sys + > import os + > import sys > path = os.path.join(os.environ['TESTTMP'], 'messages') > messages = open(path).read().split('--\n') > prompt = open(sys.argv[1]).read()
--- a/tests/test-ssh-bundle1.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-ssh-bundle1.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,6 +1,16 @@ This test is a duplicate of 'test-http.t' feel free to factor out parts that are not bundle1/bundle2 specific. +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + $ cat << EOF >> $HGRCPATH > [devel] > # This test is dedicated to interaction through old bundle @@ -465,11 +475,13 @@ $ hg pull --debug ssh://user@dummy/remote pulling from ssh://user@dummy/remote running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re) + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) sending hello command sending between command - remote: 384 - remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN - remote: 1 + protocol upgraded to exp-ssh-v2-0001 (sshv2 !) + remote: 384 (sshv1 !) + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 (sshv1 !) preparing listkeys for "bookmarks" sending listkeys command received listkey for "bookmarks": 45 bytes
--- a/tests/test-ssh-clone-r.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-ssh-clone-r.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,5 +1,15 @@ This test tries to exercise the ssh functionality with a dummy script +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif + creating 'remote' repo $ hg init remote
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-ssh-proto-unbundle.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,2034 @@ + $ cat > hgrc-sshv2 << EOF + > %include $HGRCPATH + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF + + $ debugwireproto() { + > commands=`cat -` + > echo 'testing ssh1' + > tip=`hg log -r tip -T '{node}'` + > echo "${commands}" | hg --verbose debugwireproto --localssh --noreadstderr + > if [ -n "$1" ]; then + > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}" + > fi + > echo "" + > echo 'testing ssh2' + > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh --noreadstderr + > if [ -n "$1" ]; then + > hg --config extensions.strip= strip --no-backup -r "all() - ::${tip}" + > fi + > } + +Generate some bundle files + + $ hg init repo + $ cd repo + $ echo 0 > foo + $ hg -q commit -A -m initial + $ hg bundle --all -t none-v1 ../initial.v1.hg + 1 changesets found + $ cd .. + +Test pushing bundle1 payload to a server with bundle1 disabled + + $ hg init no-bundle1 + $ cd no-bundle1 + $ cat > .hg/hgrc << EOF + > [server] + > bundle1 = false + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 115: + e> abort: incompatible Mercurial client; bundle2 required\n + e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 115: + e> abort: incompatible Mercurial client; bundle2 required\n + e> (see https://www.mercurial-scm.org/wiki/IncompatibleClient)\n + + $ cd .. + +Create a pretxnchangegroup hook that fails. Give it multiple modes of printing +output so we can test I/O capture and behavior. + +Test pushing to a server that has a pretxnchangegroup Python hook that fails + + $ cat > $TESTTMP/failhook << EOF + > from __future__ import print_function + > import sys + > def hook1line(ui, repo, **kwargs): + > ui.write(b'ui.write 1 line\n') + > return 1 + > def hook2lines(ui, repo, **kwargs): + > ui.write(b'ui.write 2 lines 1\n') + > ui.write(b'ui.write 2 lines 2\n') + > return 1 + > def hook1lineflush(ui, repo, **kwargs): + > ui.write(b'ui.write 1 line flush\n') + > ui.flush() + > return 1 + > def hookmultiflush(ui, repo, **kwargs): + > ui.write(b'ui.write 1st\n') + > ui.flush() + > ui.write(b'ui.write 2nd\n') + > ui.flush() + > return 1 + > def hookwriteandwriteerr(ui, repo, **kwargs): + > ui.write(b'ui.write 1\n') + > ui.write_err(b'ui.write_err 1\n') + > ui.write(b'ui.write 2\n') + > ui.write_err(b'ui.write_err 2\n') + > return 1 + > def hookprintstdout(ui, repo, **kwargs): + > print('printed line') + > return 1 + > def hookprintandwrite(ui, repo, **kwargs): + > print('print 1') + > ui.write(b'ui.write 1\n') + > print('print 2') + > ui.write(b'ui.write 2\n') + > return 1 + > def hookprintstderrandstdout(ui, repo, **kwargs): + > print('stdout 1') + > print('stderr 1', file=sys.stderr) + > print('stdout 2') + > print('stderr 2', file=sys.stderr) + > return 1 + > EOF + + $ hg init failrepo + $ cd failrepo + +ui.write() in hook is redirected to stderr + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hook1line + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 196: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1 line\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 196: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1 line\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +And a variation that writes multiple lines using ui.write + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hook2lines + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 218: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 2 lines 1\n + e> ui.write 2 lines 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 218: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 2 lines 1\n + e> ui.write 2 lines 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +And a variation that does a ui.flush() after writing output + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hook1lineflush + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 202: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1 line flush\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 202: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1 line flush\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +Multiple writes + flush + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookmultiflush + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 206: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1st\n + e> ui.write 2nd\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 206: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1st\n + e> ui.write 2nd\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +ui.write() + ui.write_err() output is captured + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookwriteandwriteerr + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 232: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1\n + e> ui.write_err 1\n + e> ui.write 2\n + e> ui.write_err 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 232: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1\n + e> ui.write_err 1\n + e> ui.write 2\n + e> ui.write_err 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +print() output is captured + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookprintstdout + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 193: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> printed line\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 193: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> printed line\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +Mixed print() and ui.write() are both captured + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookprintandwrite + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 218: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1\n + e> ui.write 2\n + e> print 1\n + e> print 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 218: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1\n + e> ui.write 2\n + e> print 1\n + e> print 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +print() to stdout and stderr both get captured + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = python:$TESTTMP/failhook:hookprintstderrandstdout + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 216: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stderr 1\n + e> stderr 2\n + e> stdout 1\n + e> stdout 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 216: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stderr 1\n + e> stderr 2\n + e> stdout 1\n + e> stdout 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook failed\n + +Shell hook writing to stdout has output captured + + $ cat > $TESTTMP/hook.sh << EOF + > echo 'stdout 1' + > echo 'stdout 2' + > exit 1 + > EOF + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.fail = sh $TESTTMP/hook.sh + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 212: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stdout 1\n + e> stdout 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook exited with status 1\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 212: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stdout 1\n + e> stdout 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook exited with status 1\n + +Shell hook writing to stderr has output captured + + $ cat > $TESTTMP/hook.sh << EOF + > echo 'stderr 1' 1>&2 + > echo 'stderr 2' 1>&2 + > exit 1 + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 212: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stderr 1\n + e> stderr 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook exited with status 1\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 212: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stderr 1\n + e> stderr 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook exited with status 1\n + +Shell hook writing to stdout and stderr has output captured + + $ cat > $TESTTMP/hook.sh << EOF + > echo 'stdout 1' + > echo 'stderr 1' 1>&2 + > echo 'stdout 2' + > echo 'stderr 2' 1>&2 + > exit 1 + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 230: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stdout 1\n + e> stderr 1\n + e> stdout 2\n + e> stderr 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook exited with status 1\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 230: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> stdout 1\n + e> stderr 1\n + e> stdout 2\n + e> stderr 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.fail hook exited with status 1\n + +Shell and Python hooks writing to stdout and stderr have output captured + + $ cat > $TESTTMP/hook.sh << EOF + > echo 'shell stdout 1' + > echo 'shell stderr 1' 1>&2 + > echo 'shell stdout 2' + > echo 'shell stderr 2' 1>&2 + > exit 0 + > EOF + + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.a = sh $TESTTMP/hook.sh + > pretxnchangegroup.b = python:$TESTTMP/failhook:hookprintstderrandstdout + > EOF + + $ debugwireproto << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 273: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> shell stdout 1\n + e> shell stderr 1\n + e> shell stdout 2\n + e> shell stderr 2\n + e> stderr 1\n + e> stderr 2\n + e> stdout 1\n + e> stdout 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.b hook failed\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 0 + result: 0 + remote output: + e> read(-1) -> 273: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> shell stdout 1\n + e> shell stderr 1\n + e> shell stdout 2\n + e> shell stderr 2\n + e> stderr 1\n + e> stderr 2\n + e> stdout 1\n + e> stdout 2\n + e> transaction abort!\n + e> rollback completed\n + e> abort: pretxnchangegroup.b hook failed\n + + $ cd .. + +Pushing a bundle1 with no output + + $ hg init simplerepo + $ cd simplerepo + + $ debugwireproto 1 << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 1 + result: 1 + remote output: + e> read(-1) -> 100: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 1 + result: 1 + remote output: + e> read(-1) -> 100: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + + $ cd .. + +Pushing a bundle1 with ui.write() and ui.write_err() + + $ cat > $TESTTMP/hook << EOF + > def hookuiwrite(ui, repo, **kwargs): + > ui.write(b'ui.write 1\n') + > ui.write_err(b'ui.write_err 1\n') + > ui.write(b'ui.write 2\n') + > ui.write_err(b'ui.write_err 2\n') + > EOF + + $ hg init uiwriterepo + $ cd uiwriterepo + $ cat > .hg/hgrc << EOF + > [hooks] + > pretxnchangegroup.hook = python:$TESTTMP/hook:hookuiwrite + > EOF + + $ debugwireproto 1 << EOF + > command unbundle + > # This is "force" in hex. + > heads 666f726365 + > PUSHFILE ../initial.v1.hg + > readavailable + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 1 + result: 1 + remote output: + e> read(-1) -> 152: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1\n + e> ui.write_err 1\n + e> ui.write 2\n + e> ui.write_err 2\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending unbundle command + i> write(9) -> 9: + i> unbundle\n + i> write(9) -> 9: + i> heads 10\n + i> write(10) -> 10: 666f726365 + i> flush() -> None + o> readline() -> 2: + o> 0\n + i> write(4) -> 4: + i> 426\n + i> write(426) -> 426: + i> HG10UN\x00\x00\x00\x9eh\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>cba485ca3678256e044428f70f58291196f6e9de\n + i> test\n + i> 0 0\n + i> foo\n + i> \n + i> initial\x00\x00\x00\x00\x00\x00\x00\x8d\xcb\xa4\x85\xca6x%n\x04D(\xf7\x0fX)\x11\x96\xf6\xe9\xde\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00-foo\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe\n + i> \x00\x00\x00\x00\x00\x00\x00\x07foo\x00\x00\x00b6/\xef(L\xe2\xca\x02\xae\xcc\x8d\xe6\xd5\xe8\xa1\xc3\xaf\x05V\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\x98b\x13\xbdD\x85\xeaQS55\xe3\xfc\x9ex\x00zq\x1f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x020\n + i> \x00\x00\x00\x00\x00\x00\x00\x00 + i> write(2) -> 2: + i> 0\n + i> flush() -> None + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 1\n + o> read(1) -> 1: 1 + result: 1 + remote output: + e> read(-1) -> 152: + e> adding changesets\n + e> adding manifests\n + e> adding file changes\n + e> added 1 changesets with 1 changes to 1 files\n + e> ui.write 1\n + e> ui.write_err 1\n + e> ui.write 2\n + e> ui.write_err 2\n
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/tests/test-ssh-proto.t Sun Mar 04 10:42:51 2018 -0500 @@ -0,0 +1,2139 @@ + $ cat > hgrc-sshv2 << EOF + > %include $HGRCPATH + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF + +Helper function to run protocol tests against multiple protocol versions. +This is easier than using #testcases because managing differences between +protocols with inline conditional output is hard to read. + + $ debugwireproto() { + > commands=`cat -` + > echo 'testing ssh1' + > echo "${commands}" | hg --verbose debugwireproto --localssh + > echo "" + > echo 'testing ssh2' + > echo "${commands}" | HGRCPATH=$TESTTMP/hgrc-sshv2 hg --verbose debugwireproto --localssh + > } + + $ cat >> $HGRCPATH << EOF + > [ui] + > ssh = $PYTHON "$TESTDIR/dummyssh" + > [devel] + > debug.peer-request = true + > [extensions] + > sshprotoext = $TESTDIR/sshprotoext.py + > EOF + + $ hg init server + $ cd server + $ echo 0 > foo + $ hg -q add foo + $ hg commit -m initial + +A no-op connection performs a handshake + + $ hg debugwireproto --localssh << EOF + > EOF + creating ssh peer from handshake results + +Raw peers don't perform any activity + + $ hg debugwireproto --localssh --peer raw << EOF + > EOF + using raw connection to peer + $ hg debugwireproto --localssh --peer ssh1 << EOF + > EOF + creating ssh peer for wire protocol version 1 + $ hg debugwireproto --localssh --peer ssh2 << EOF + > EOF + creating ssh peer for wire protocol version 2 + +Test a normal behaving server, for sanity + + $ cd .. + + $ hg --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: 384 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 + url: ssh://user@dummy/server + local: no + pushable: yes + +Server should answer the "hello" command in isolation + + $ hg -R server debugwireproto --localssh --peer raw << EOF + > raw + > hello\n + > readline + > readline + > EOF + using raw connection to peer + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + +`hg debugserve --sshstdio` works + + $ cd server + $ hg debugserve --sshstdio << EOF + > hello + > EOF + 384 + capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + +I/O logging works + + $ hg debugserve --sshstdio --logiofd 1 << EOF + > hello + > EOF + o> write(4) -> 4: + o> 384\n + o> write(384) -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + 384 + capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> flush() -> None + + $ hg debugserve --sshstdio --logiofile $TESTTMP/io << EOF + > hello + > EOF + 384 + capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + + $ cat $TESTTMP/io + o> write(4) -> 4: + o> 384\n + o> write(384) -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> flush() -> None + + $ cd .. + +>=0.9.1 clients send a "hello" + "between" for the null range as part of handshake. +Server should reply with capabilities and should send "1\n\n" as a successful +reply with empty response to the "between". + + $ hg -R server debugwireproto --localssh --peer raw << EOF + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +SSH banner is not printed by default, ignored by clients + + $ SSHSERVERMODE=banner hg debugpeer ssh://user@dummy/server + url: ssh://user@dummy/server + local: no + pushable: yes + +--debug will print the banner + + $ SSHSERVERMODE=banner hg --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: banner: line 0 + remote: banner: line 1 + remote: banner: line 2 + remote: banner: line 3 + remote: banner: line 4 + remote: banner: line 5 + remote: banner: line 6 + remote: banner: line 7 + remote: banner: line 8 + remote: banner: line 9 + remote: 384 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 + url: ssh://user@dummy/server + local: no + pushable: yes + +And test the banner with the raw protocol + + $ SSHSERVERMODE=banner hg -R server debugwireproto --localssh --peer raw << EOF + > raw + > hello\n + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(6) -> 6: + i> hello\n + o> readline() -> 15: + o> banner: line 0\n + o> readline() -> 15: + o> banner: line 1\n + o> readline() -> 15: + o> banner: line 2\n + o> readline() -> 15: + o> banner: line 3\n + o> readline() -> 15: + o> banner: line 4\n + o> readline() -> 15: + o> banner: line 5\n + o> readline() -> 15: + o> banner: line 6\n + o> readline() -> 15: + o> banner: line 7\n + o> readline() -> 15: + o> banner: line 8\n + o> readline() -> 15: + o> banner: line 9\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +Connecting to a <0.9.1 server that doesn't support the hello command. +The client should refuse, as we dropped support for connecting to such +servers. + + $ SSHSERVERMODE=no-hello hg --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: 0 + remote: 1 + abort: no suitable response from remote hg! + [255] + +Sending an unknown command to the server results in an empty response to that command + + $ hg -R server debugwireproto --localssh --peer raw << EOF + > raw + > pre-hello\n + > readline + > raw + > hello\n + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(10) -> 10: + i> pre-hello\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + + $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-no-args --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + sending no-args command + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: 0 + remote: 384 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 + url: ssh://user@dummy/server + local: no + pushable: yes + +Send multiple unknown commands before hello + + $ hg -R server debugwireproto --localssh --peer raw << EOF + > raw + > unknown1\n + > readline + > raw + > unknown2\n + > readline + > raw + > unknown3\n + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(9) -> 9: + i> unknown1\n + o> readline() -> 2: + o> 0\n + i> write(9) -> 9: + i> unknown2\n + o> readline() -> 2: + o> 0\n + i> write(9) -> 9: + i> unknown3\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + + $ hg --config sshpeer.mode=extra-handshake-commands --config sshpeer.handshake-mode=pre-multiple-no-args --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + sending unknown1 command + sending unknown2 command + sending unknown3 command + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: 0 + remote: 0 + remote: 0 + remote: 384 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 + url: ssh://user@dummy/server + local: no + pushable: yes + +Send an unknown command before hello that has arguments + + $ cd server + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > with-args\n + > foo 13\n + > value for foo\n + > bar 13\n + > value for bar\n + > readline + > readline + > readline + > readline + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(52) -> 52: + i> with-args\n + i> foo 13\n + i> value for foo\n + i> bar 13\n + i> value for bar\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +Send an unknown command having an argument that looks numeric + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown\n + > foo 1\n + > 0\n + > readline + > readline + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(16) -> 16: + i> unknown\n + i> foo 1\n + i> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown\n + > foo 1\n + > 1\n + > readline + > readline + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(16) -> 16: + i> unknown\n + i> foo 1\n + i> 1\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +When sending a dict argument value, it is serialized to +"<arg> <item count>" followed by "<key> <len>\n<value>" for each item +in the dict. + +Dictionary value for unknown command + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown\n + > dict 3\n + > key1 3\n + > foo\n + > key2 3\n + > bar\n + > key3 3\n + > baz\n + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > readline + > raw + > hello\n + > readline + > readline + > EOF + using raw connection to peer + i> write(48) -> 48: + i> unknown\n + i> dict 3\n + i> key1 3\n + i> foo\n + i> key2 3\n + i> bar\n + i> key3 3\n + i> baz\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + +Incomplete dictionary send + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown\n + > dict 3\n + > key1 3\n + > foo\n + > readline + > readline + > readline + > readline + > EOF + using raw connection to peer + i> write(26) -> 26: + i> unknown\n + i> dict 3\n + i> key1 3\n + i> foo\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + +Incomplete value send + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown\n + > dict 3\n + > key1 3\n + > fo + > readline + > readline + > readline + > EOF + using raw connection to peer + i> write(24) -> 24: + i> unknown\n + i> dict 3\n + i> key1 3\n + i> fo + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + +Send a command line with spaces + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown withspace\n + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(18) -> 18: + i> unknown withspace\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown with multiple spaces\n + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > EOF + using raw connection to peer + i> write(29) -> 29: + i> unknown with multiple spaces\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > unknown with spaces\n + > key 10\n + > some value\n + > readline + > readline + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(38) -> 38: + i> unknown with spaces\n + i> key 10\n + i> some value\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +Send an unknown command after the "between" + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000unknown + > readline + > readline + > EOF + using raw connection to peer + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(105) -> 105: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000unknown + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +And one with arguments + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > hello\n + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > readline + > readline + > raw + > unknown\n + > foo 5\n + > \nvalue\n + > bar 3\n + > baz\n + > readline + > readline + > readline + > EOF + using raw connection to peer + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + i> write(31) -> 31: + i> unknown\n + i> foo 5\n + i> \n + i> value\n + i> bar 3\n + i> baz\n + o> readline() -> 2: + o> 0\n + o> readline() -> 2: + o> 0\n + o> readline() -> 0: + +Send a valid command before the handshake + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > heads\n + > readline + > raw + > hello\n + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > readline + > readline + > EOF + using raw connection to peer + i> write(6) -> 6: + i> heads\n + o> readline() -> 3: + o> 41\n + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 41: + o> 68986213bd4485ea51533535e3fc9e78007a711f\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + +And a variation that doesn't send the between command + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > heads\n + > readline + > raw + > hello\n + > readline + > readline + > EOF + using raw connection to peer + i> write(6) -> 6: + i> heads\n + o> readline() -> 3: + o> 41\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 41: + o> 68986213bd4485ea51533535e3fc9e78007a711f\n + o> readline() -> 4: + o> 384\n + +Send an upgrade request to a server that doesn't support that command + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n + > readline + > raw + > hello\n + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > readline + > readline + > EOF + using raw connection to peer + i> write(77) -> 77: + i> upgrade 2e82ab3f-9ce3-4b4e-8f8c-6fd1c0e9e23a proto=irrelevant1%2Cirrelevant2\n + o> readline() -> 2: + o> 0\n + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + + $ cd .. + + $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + remote: 0 + remote: 384 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 + url: ssh://user@dummy/server + local: no + pushable: yes + +Enable version 2 support on server. We need to do this in hgrc because we can't +use --config with `hg serve --stdio`. + + $ cat >> server/.hg/hgrc << EOF + > [experimental] + > sshserver.support-v2 = true + > EOF + +Send an upgrade request to a server that supports upgrade + + $ cd server + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade this-is-some-token proto=exp-ssh-v2-0001\n + > hello\n + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > readline + > EOF + using raw connection to peer + i> write(153) -> 153: + i> upgrade this-is-some-token proto=exp-ssh-v2-0001\n + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 44: + o> upgraded this-is-some-token exp-ssh-v2-0001\n + o> readline() -> 4: + o> 383\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + + $ cd .. + + $ hg --config experimental.sshpeer.advertise-v2=true --debug debugpeer ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + protocol upgraded to exp-ssh-v2-0001 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + url: ssh://user@dummy/server + local: no + pushable: yes + +Verify the peer has capabilities + + $ hg --config experimental.sshpeer.advertise-v2=true --debug debugcapabilities ssh://user@dummy/server + running * "*/tests/dummyssh" 'user@dummy' 'hg -R server serve --stdio' (glob) (no-windows !) + running * "*\tests/dummyssh" "user@dummy" "hg -R server serve --stdio" (glob) (windows !) + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) + devel-peer-request: hello + sending hello command + devel-peer-request: between + devel-peer-request: pairs: 81 bytes + sending between command + protocol upgraded to exp-ssh-v2-0001 + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + Main capabilities: + batch + branchmap + $USUAL_BUNDLE2_CAPS_SERVER$ + changegroupsubset + getbundle + known + lookup + pushkey + streamreqs=generaldelta,revlogv1 + unbundle=HG10GZ,HG10BZ,HG10UN + unbundlehash + Bundle2 capabilities: + HG20 + bookmarks + changegroup + 01 + 02 + digests + md5 + sha1 + sha512 + error + abort + unsupportedcontent + pushraced + pushkey + hgtagsfnodes + listkeys + phases + heads + pushkey + remote-changegroup + http + https + +Command after upgrade to version 2 is processed + + $ cd server + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade this-is-some-token proto=exp-ssh-v2-0001\n + > hello\n + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > readline + > raw + > hello\n + > readline + > readline + > EOF + using raw connection to peer + i> write(153) -> 153: + i> upgrade this-is-some-token proto=exp-ssh-v2-0001\n + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 44: + o> upgraded this-is-some-token exp-ssh-v2-0001\n + o> readline() -> 4: + o> 383\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 366\n + o> readline() -> 366: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + +Multiple upgrades is not allowed + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade this-is-some-token proto=exp-ssh-v2-0001\n + > hello\n + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > readline + > raw + > upgrade another-token proto=irrelevant\n + > hello\n + > readline + > readavailable + > EOF + using raw connection to peer + i> write(153) -> 153: + i> upgrade this-is-some-token proto=exp-ssh-v2-0001\n + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 44: + o> upgraded this-is-some-token exp-ssh-v2-0001\n + o> readline() -> 4: + o> 383\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(45) -> 45: + i> upgrade another-token proto=irrelevant\n + i> hello\n + o> readline() -> 1: + o> \n + e> read(-1) -> 42: + e> cannot upgrade protocols multiple times\n + e> -\n + +Malformed upgrade request line (not exactly 3 space delimited tokens) + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade\n + > readline + > EOF + using raw connection to peer + i> write(8) -> 8: + i> upgrade\n + o> readline() -> 2: + o> 0\n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade token\n + > readline + > EOF + using raw connection to peer + i> write(14) -> 14: + i> upgrade token\n + o> readline() -> 2: + o> 0\n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade token foo=bar extra-token\n + > readline + > EOF + using raw connection to peer + i> write(34) -> 34: + i> upgrade token foo=bar extra-token\n + o> readline() -> 2: + o> 0\n + +Upgrade request to unsupported protocol is ignored + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade this-is-some-token proto=unknown1,unknown2\n + > readline + > raw + > hello\n + > readline + > readline + > raw + > between\n + > pairs 81\n + > 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + > readline + > readline + > EOF + using raw connection to peer + i> write(51) -> 51: + i> upgrade this-is-some-token proto=unknown1,unknown2\n + o> readline() -> 2: + o> 0\n + i> write(6) -> 6: + i> hello\n + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + i> write(98) -> 98: + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + +Upgrade request must be followed by hello + between + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade token proto=exp-ssh-v2-0001\n + > invalid\n + > readline + > readavailable + > EOF + using raw connection to peer + i> write(44) -> 44: + i> upgrade token proto=exp-ssh-v2-0001\n + i> invalid\n + o> readline() -> 1: + o> \n + e> read(-1) -> 46: + e> malformed handshake protocol: missing hello\n + e> -\n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade token proto=exp-ssh-v2-0001\n + > hello\n + > invalid\n + > readline + > readavailable + > EOF + using raw connection to peer + i> write(50) -> 50: + i> upgrade token proto=exp-ssh-v2-0001\n + i> hello\n + i> invalid\n + o> readline() -> 1: + o> \n + e> read(-1) -> 48: + e> malformed handshake protocol: missing between\n + e> -\n + + $ hg debugwireproto --localssh --peer raw << EOF + > raw + > upgrade token proto=exp-ssh-v2-0001\n + > hello\n + > between\n + > invalid\n + > readline + > readavailable + > EOF + using raw connection to peer + i> write(58) -> 58: + i> upgrade token proto=exp-ssh-v2-0001\n + i> hello\n + i> between\n + i> invalid\n + o> readline() -> 1: + o> \n + e> read(-1) -> 49: + e> malformed handshake protocol: missing pairs 81\n + e> -\n + +Legacy commands are not exposed to version 2 of protocol + + $ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF + > command branches + > nodes 0000000000000000000000000000000000000000 + > EOF + creating ssh peer from handshake results + sending branches command + response: + + $ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF + > command changegroup + > roots 0000000000000000000000000000000000000000 + > EOF + creating ssh peer from handshake results + sending changegroup command + response: + + $ hg --config experimental.sshpeer.advertise-v2=true debugwireproto --localssh << EOF + > command changegroupsubset + > bases 0000000000000000000000000000000000000000 + > heads 0000000000000000000000000000000000000000 + > EOF + creating ssh peer from handshake results + sending changegroupsubset command + response: + + $ cd .. + +Test listkeys for listing namespaces + + $ hg init empty + $ cd empty + $ debugwireproto << EOF + > command listkeys + > namespace namespaces + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(13) -> 13: + i> namespace 10\n + i> write(10) -> 10: namespaces + i> flush() -> None + o> bufferedreadline() -> 3: + o> 30\n + o> bufferedread(30) -> 30: + o> bookmarks \n + o> namespaces \n + o> phases + response: bookmarks \nnamespaces \nphases + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(13) -> 13: + i> namespace 10\n + i> write(10) -> 10: namespaces + i> flush() -> None + o> bufferedreadline() -> 3: + o> 30\n + o> bufferedread(30) -> 30: + o> bookmarks \n + o> namespaces \n + o> phases + response: bookmarks \nnamespaces \nphases + + $ cd .. + +Test listkeys for bookmarks + + $ hg init bookmarkrepo + $ cd bookmarkrepo + $ echo 0 > foo + $ hg add foo + $ hg -q commit -m initial + $ echo 1 > foo + $ hg commit -m second + +With no bookmarks set + + $ debugwireproto << EOF + > command listkeys + > namespace bookmarks + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> flush() -> None + o> bufferedreadline() -> 2: + o> 0\n + response: + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> flush() -> None + o> bufferedreadline() -> 2: + o> 0\n + response: + +With a single bookmark set + + $ hg book -r 0 bookA + $ debugwireproto << EOF + > command listkeys + > namespace bookmarks + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> flush() -> None + o> bufferedreadline() -> 3: + o> 46\n + o> bufferedread(46) -> 46: bookA 68986213bd4485ea51533535e3fc9e78007a711f + response: bookA 68986213bd4485ea51533535e3fc9e78007a711f + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> flush() -> None + o> bufferedreadline() -> 3: + o> 46\n + o> bufferedread(46) -> 46: bookA 68986213bd4485ea51533535e3fc9e78007a711f + response: bookA 68986213bd4485ea51533535e3fc9e78007a711f + +With multiple bookmarks set + + $ hg book -r 1 bookB + $ debugwireproto << EOF + > command listkeys + > namespace bookmarks + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> flush() -> None + o> bufferedreadline() -> 3: + o> 93\n + o> bufferedread(93) -> 93: + o> bookA 68986213bd4485ea51533535e3fc9e78007a711f\n + o> bookB 1880f3755e2e52e3199e0ee5638128b08642f34d + response: bookA 68986213bd4485ea51533535e3fc9e78007a711f\nbookB 1880f3755e2e52e3199e0ee5638128b08642f34d + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> flush() -> None + o> bufferedreadline() -> 3: + o> 93\n + o> bufferedread(93) -> 93: + o> bookA 68986213bd4485ea51533535e3fc9e78007a711f\n + o> bookB 1880f3755e2e52e3199e0ee5638128b08642f34d + response: bookA 68986213bd4485ea51533535e3fc9e78007a711f\nbookB 1880f3755e2e52e3199e0ee5638128b08642f34d + +Test pushkey for bookmarks + + $ debugwireproto << EOF + > command pushkey + > namespace bookmarks + > key remote + > old + > new 68986213bd4485ea51533535e3fc9e78007a711f + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending pushkey command + i> write(8) -> 8: + i> pushkey\n + i> write(6) -> 6: + i> key 6\n + i> write(6) -> 6: remote + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> write(7) -> 7: + i> new 40\n + i> write(40) -> 40: 68986213bd4485ea51533535e3fc9e78007a711f + i> write(6) -> 6: + i> old 0\n + i> flush() -> None + o> bufferedreadline() -> 2: + o> 2\n + o> bufferedread(2) -> 2: + o> 1\n + response: 1\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending pushkey command + i> write(8) -> 8: + i> pushkey\n + i> write(6) -> 6: + i> key 6\n + i> write(6) -> 6: remote + i> write(12) -> 12: + i> namespace 9\n + i> write(9) -> 9: bookmarks + i> write(7) -> 7: + i> new 40\n + i> write(40) -> 40: 68986213bd4485ea51533535e3fc9e78007a711f + i> write(6) -> 6: + i> old 0\n + i> flush() -> None + o> bufferedreadline() -> 2: + o> 2\n + o> bufferedread(2) -> 2: + o> 1\n + response: 1\n + + $ hg bookmarks + bookA 0:68986213bd44 + bookB 1:1880f3755e2e + remote 0:68986213bd44 + + $ cd .. + +Test listkeys for phases + + $ hg init phasesrepo + $ cd phasesrepo + +Phases on empty repo + + $ debugwireproto << EOF + > command listkeys + > namespace phases + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 3: + o> 15\n + o> bufferedread(15) -> 15: publishing True + response: publishing True + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 3: + o> 15\n + o> bufferedread(15) -> 15: publishing True + response: publishing True + +Create some commits + + $ echo 0 > foo + $ hg add foo + $ hg -q commit -m initial + $ hg phase --public + $ echo 1 > foo + $ hg commit -m 'head 1 commit 1' + $ echo 2 > foo + $ hg commit -m 'head 1 commit 2' + $ hg -q up 0 + $ echo 1a > foo + $ hg commit -m 'head 2 commit 1' + created new head + $ echo 2a > foo + $ hg commit -m 'head 2 commit 2' + +Two draft heads + + $ debugwireproto << EOF + > command listkeys + > namespace phases + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 4: + o> 101\n + o> bufferedread(101) -> 101: + o> 20b8a89289d80036e6c4e87c2083e3bea1586637 1\n + o> c4750011d906c18ea2f0527419cbc1a544435150 1\n + o> publishing True + response: 20b8a89289d80036e6c4e87c2083e3bea1586637 1\nc4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 4: + o> 101\n + o> bufferedread(101) -> 101: + o> 20b8a89289d80036e6c4e87c2083e3bea1586637 1\n + o> c4750011d906c18ea2f0527419cbc1a544435150 1\n + o> publishing True + response: 20b8a89289d80036e6c4e87c2083e3bea1586637 1\nc4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True + +Single draft head + + $ hg phase --public -r 2 + $ debugwireproto << EOF + > command listkeys + > namespace phases + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 3: + o> 58\n + o> bufferedread(58) -> 58: + o> c4750011d906c18ea2f0527419cbc1a544435150 1\n + o> publishing True + response: c4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 3: + o> 58\n + o> bufferedread(58) -> 58: + o> c4750011d906c18ea2f0527419cbc1a544435150 1\n + o> publishing True + response: c4750011d906c18ea2f0527419cbc1a544435150 1\npublishing True + +All public heads + + $ hg phase --public -r 4 + $ debugwireproto << EOF + > command listkeys + > namespace phases + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 3: + o> 15\n + o> bufferedread(15) -> 15: publishing True + response: publishing True + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending listkeys command + i> write(9) -> 9: + i> listkeys\n + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> flush() -> None + o> bufferedreadline() -> 3: + o> 15\n + o> bufferedread(15) -> 15: publishing True + response: publishing True + +Setting public phase via pushkey + + $ hg phase --draft --force -r . + + $ debugwireproto << EOF + > command pushkey + > namespace phases + > key 7127240a084fd9dc86fe8d1f98e26229161ec82b + > old 1 + > new 0 + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending pushkey command + i> write(8) -> 8: + i> pushkey\n + i> write(7) -> 7: + i> key 40\n + i> write(40) -> 40: 7127240a084fd9dc86fe8d1f98e26229161ec82b + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> write(6) -> 6: + i> new 1\n + i> write(1) -> 1: 0 + i> write(6) -> 6: + i> old 1\n + i> write(1) -> 1: 1 + i> flush() -> None + o> bufferedreadline() -> 2: + o> 2\n + o> bufferedread(2) -> 2: + o> 1\n + response: 1\n + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending pushkey command + i> write(8) -> 8: + i> pushkey\n + i> write(7) -> 7: + i> key 40\n + i> write(40) -> 40: 7127240a084fd9dc86fe8d1f98e26229161ec82b + i> write(12) -> 12: + i> namespace 6\n + i> write(6) -> 6: phases + i> write(6) -> 6: + i> new 1\n + i> write(1) -> 1: 0 + i> write(6) -> 6: + i> old 1\n + i> write(1) -> 1: 1 + i> flush() -> None + o> bufferedreadline() -> 2: + o> 2\n + o> bufferedread(2) -> 2: + o> 1\n + response: 1\n + + $ hg phase . + 4: public + + $ cd .. + +Test batching of requests + + $ hg init batching + $ cd batching + $ echo 0 > foo + $ hg add foo + $ hg -q commit -m initial + $ hg phase --public + $ echo 1 > foo + $ hg commit -m 'commit 1' + $ hg -q up 0 + $ echo 2 > foo + $ hg commit -m 'commit 2' + created new head + $ hg book -r 1 bookA + $ hg book -r 2 bookB + + $ debugwireproto << EOF + > batchbegin + > command heads + > command listkeys + > namespace bookmarks + > command listkeys + > namespace phases + > batchsubmit + > EOF + testing ssh1 + creating ssh peer from handshake results + i> write(104) -> 104: + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 4: + o> 384\n + o> readline() -> 384: + o> capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN\n + o> readline() -> 2: + o> 1\n + o> readline() -> 1: + o> \n + sending batch with 3 sub-commands + i> write(6) -> 6: + i> batch\n + i> write(4) -> 4: + i> * 0\n + i> write(8) -> 8: + i> cmds 61\n + i> write(61) -> 61: heads ;listkeys namespace=bookmarks;listkeys namespace=phases + i> flush() -> None + o> bufferedreadline() -> 4: + o> 278\n + o> bufferedread(278) -> 278: + o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n + o> ;bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n + o> bookB bfebe6bd38eebc6f8202e419c1171268987ea6a6;4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\n + o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 1\n + o> publishing True + response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n + response #1: bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB bfebe6bd38eebc6f8202e419c1171268987ea6a6 + response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6 1\npublishing True + + testing ssh2 + creating ssh peer from handshake results + i> write(171) -> 171: + i> upgrade * proto=exp-ssh-v2-0001\n (glob) + i> hello\n + i> between\n + i> pairs 81\n + i> 0000000000000000000000000000000000000000-0000000000000000000000000000000000000000 + i> flush() -> None + o> readline() -> 62: + o> upgraded * exp-ssh-v2-0001\n (glob) + o> readline() -> 4: + o> 383\n + o> read(383) -> 383: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + o> read(1) -> 1: + o> \n + sending batch with 3 sub-commands + i> write(6) -> 6: + i> batch\n + i> write(4) -> 4: + i> * 0\n + i> write(8) -> 8: + i> cmds 61\n + i> write(61) -> 61: heads ;listkeys namespace=bookmarks;listkeys namespace=phases + i> flush() -> None + o> bufferedreadline() -> 4: + o> 278\n + o> bufferedread(278) -> 278: + o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n + o> ;bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n + o> bookB bfebe6bd38eebc6f8202e419c1171268987ea6a6;4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\n + o> bfebe6bd38eebc6f8202e419c1171268987ea6a6 1\n + o> publishing True + response #0: bfebe6bd38eebc6f8202e419c1171268987ea6a6 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\n + response #1: bookA 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab\nbookB bfebe6bd38eebc6f8202e419c1171268987ea6a6 + response #2: 4ee3fcef1c800fa2bf23e20af7c83ff111d9c7ab 1\nbfebe6bd38eebc6f8202e419c1171268987ea6a6 1\npublishing True
--- a/tests/test-ssh.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-ssh.t Sun Mar 04 10:42:51 2018 -0500 @@ -1,3 +1,12 @@ +#testcases sshv1 sshv2 + +#if sshv2 + $ cat >> $HGRCPATH << EOF + > [experimental] + > sshpeer.advertise-v2 = true + > sshserver.support-v2 = true + > EOF +#endif This test tries to exercise the ssh functionality with a dummy script @@ -481,14 +490,16 @@ $ hg pull --debug ssh://user@dummy/remote --config devel.debug.peer-request=yes pulling from ssh://user@dummy/remote running .* ".*/dummyssh" ['"]user@dummy['"] ('|")hg -R remote serve --stdio('|") (re) + sending upgrade request: * proto=exp-ssh-v2-0001 (glob) (sshv2 !) devel-peer-request: hello sending hello command devel-peer-request: between devel-peer-request: pairs: 81 bytes sending between command - remote: 384 - remote: capabilities: lookup changegroupsubset branchmap pushkey known getbundle unbundlehash batch streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN - remote: 1 + remote: 384 (sshv1 !) + protocol upgraded to exp-ssh-v2-0001 (sshv2 !) + remote: capabilities: lookup branchmap pushkey known getbundle unbundlehash batch changegroupsubset streamreqs=generaldelta,revlogv1 $USUAL_BUNDLE2_CAPS_SERVER$ unbundle=HG10GZ,HG10BZ,HG10UN + remote: 1 (sshv1 !) query 1; heads devel-peer-request: batch devel-peer-request: cmds: 141 bytes
--- a/tests/test-sshserver.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-sshserver.py Sun Mar 04 10:42:51 2018 -0500 @@ -6,30 +6,33 @@ import silenttestrunner from mercurial import ( - sshserver, util, wireproto, + wireprotoserver, ) class SSHServerGetArgsTests(unittest.TestCase): def testparseknown(self): tests = [ - ('* 0\nnodes 0\n', ['', {}]), - ('* 0\nnodes 40\n1111111111111111111111111111111111111111\n', - ['1111111111111111111111111111111111111111', {}]), + (b'* 0\nnodes 0\n', [b'', {}]), + (b'* 0\nnodes 40\n1111111111111111111111111111111111111111\n', + [b'1111111111111111111111111111111111111111', {}]), ] for input, expected in tests: - self.assertparse('known', input, expected) + self.assertparse(b'known', input, expected) def assertparse(self, cmd, input, expected): server = mockserver(input) + proto = wireprotoserver.sshv1protocolhandler(server._ui, + server._fin, + server._fout) _func, spec = wireproto.commands[cmd] - self.assertEqual(server.getargs(spec), expected) + self.assertEqual(proto.getargs(spec), expected) def mockserver(inbytes): ui = mockui(inbytes) repo = mockrepo(ui) - return sshserver.sshserver(ui, repo) + return wireprotoserver.sshserver(ui, repo) class mockrepo(object): def __init__(self, ui):
--- a/tests/test-status.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-status.t Sun Mar 04 10:42:51 2018 -0500 @@ -465,12 +465,12 @@ $ hg init repo5 $ cd repo5 - >>> open("010a", "wb").write("\1\nfoo") + >>> open("010a", r"wb").write(b"\1\nfoo") $ hg ci -q -A -m 'initial checkin' $ hg status -A C 010a - >>> open("010a", "wb").write("\1\nbar") + >>> open("010a", r"wb").write(b"\1\nbar") $ hg status -A M 010a $ hg ci -q -m 'modify 010a'
--- a/tests/test-strip.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-strip.t Sun Mar 04 10:42:51 2018 -0500 @@ -893,17 +893,17 @@ > def test(transaction): > # observe cache inconsistency > try: - > [repo.changelog.node(r) for r in repo.revs("not public()")] + > [repo.changelog.node(r) for r in repo.revs(b"not public()")] > except IndexError: - > repo.ui.status("Index error!\n") + > repo.ui.status(b"Index error!\n") > transaction = orig(repo, desc, *args, **kwargs) > # warm up the phase cache - > list(repo.revs("not public()")) - > if desc != 'strip': - > transaction.addpostclose("phase invalidation test", test) + > list(repo.revs(b"not public()")) + > if desc != b'strip': + > transaction.addpostclose(b"phase invalidation test", test) > return transaction > def extsetup(ui): - > extensions.wrapfunction(localrepo.localrepository, "transaction", + > extensions.wrapfunction(localrepo.localrepository, b"transaction", > transactioncallback) > EOF $ hg up -C 2 @@ -930,9 +930,9 @@ > class crashstriprepo(repo.__class__): > def transaction(self, desc, *args, **kwargs): > tr = super(crashstriprepo, self).transaction(desc, *args, **kwargs) - > if desc == 'strip': - > def crash(tra): raise error.Abort('boom') - > tr.addpostclose('crash', crash) + > if desc == b'strip': + > def crash(tra): raise error.Abort(b'boom') + > tr.addpostclose(b'crash', crash) > return tr > repo.__class__ = crashstriprepo > EOF @@ -1175,16 +1175,16 @@ > from mercurial import commands, registrar, repair > cmdtable = {} > command = registrar.command(cmdtable) - > @command('testdelayedstrip') + > @command(b'testdelayedstrip') > def testdelayedstrip(ui, repo): > def getnodes(expr): > return [repo.changelog.node(r) for r in repo.revs(expr)] > with repo.wlock(): > with repo.lock(): - > with repo.transaction('delayedstrip'): - > repair.delayedstrip(ui, repo, getnodes('B+I+Z+D+E'), 'J') - > repair.delayedstrip(ui, repo, getnodes('G+H+Z'), 'I') - > commands.commit(ui, repo, message='J', date='0 0') + > with repo.transaction(b'delayedstrip'): + > repair.delayedstrip(ui, repo, getnodes(b'B+I+Z+D+E'), b'J') + > repair.delayedstrip(ui, repo, getnodes(b'G+H+Z'), b'I') + > commands.commit(ui, repo, message=b'J', date=b'0 0') > EOF $ hg testdelayedstrip --config extensions.t=$TESTTMP/delayedstrip.py warning: orphaned descendants detected, not stripping 08ebfeb61bac, 112478962961, 7fb047a69f22 @@ -1225,7 +1225,7 @@ > from mercurial import registrar, scmutil > cmdtable = {} > command = registrar.command(cmdtable) - > @command('testnodescleanup') + > @command(b'testnodescleanup') > def testnodescleanup(ui, repo): > def nodes(expr): > return [repo.changelog.node(r) for r in repo.revs(expr)] @@ -1233,12 +1233,13 @@ > return nodes(expr)[0] > with repo.wlock(): > with repo.lock(): - > with repo.transaction('delayedstrip'): - > mapping = {node('F'): [node('F2')], - > node('D'): [node('D2')], - > node('G'): [node('G2')]} - > scmutil.cleanupnodes(repo, mapping, 'replace') - > scmutil.cleanupnodes(repo, nodes('((B::)+I+Z)-D2'), 'replace') + > with repo.transaction(b'delayedstrip'): + > mapping = {node(b'F'): [node(b'F2')], + > node(b'D'): [node(b'D2')], + > node(b'G'): [node(b'G2')]} + > scmutil.cleanupnodes(repo, mapping, b'replace') + > scmutil.cleanupnodes(repo, nodes(b'((B::)+I+Z)-D2'), + > b'replace') > EOF $ hg testnodescleanup --config extensions.t=$TESTTMP/scmutilcleanup.py warning: orphaned descendants detected, not stripping 112478962961, 1fc8102cda62, 26805aba1e60
--- a/tests/test-subrepo-missing.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-subrepo-missing.t Sun Mar 04 10:42:51 2018 -0500 @@ -14,7 +14,7 @@ ignore blanklines in .hgsubstate - >>> file('.hgsubstate', 'wb').write('\n\n \t \n \n') + >>> open('.hgsubstate', 'wb').write(b'\n\n \t \n \n') $ hg st --subrepos M .hgsubstate $ hg revert -qC .hgsubstate @@ -22,7 +22,7 @@ abort more gracefully on .hgsubstate parsing error $ cp .hgsubstate .hgsubstate.old - >>> file('.hgsubstate', 'wb').write('\ninvalid') + >>> open('.hgsubstate', 'wb').write(b'\ninvalid') $ hg st --subrepos --cwd $TESTTMP -R $TESTTMP/repo abort: invalid subrepository revision specifier in 'repo/.hgsubstate' line 2 [255]
--- a/tests/test-tag.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-tag.t Sun Mar 04 10:42:51 2018 -0500 @@ -231,8 +231,8 @@ doesn't end with EOL $ $PYTHON << EOF - > f = file('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close() - > f = file('.hg/localtags', 'w'); f.write(last); f.close() + > f = open('.hg/localtags'); last = f.readlines()[-1][:-1]; f.close() + > f = open('.hg/localtags', 'w'); f.write(last); f.close() > EOF $ cat .hg/localtags; echo acb14030fe0a21b60322c440ad2d20cf7685a376 localblah @@ -243,8 +243,8 @@ $ $PYTHON << EOF - > f = file('.hgtags'); last = f.readlines()[-1][:-1]; f.close() - > f = file('.hgtags', 'w'); f.write(last); f.close() + > f = open('.hgtags'); last = f.readlines()[-1][:-1]; f.close() + > f = open('.hgtags', 'w'); f.write(last); f.close() > EOF $ hg ci -m'broken manual edit of .hgtags' $ cat .hgtags; echo
--- a/tests/test-template-engine.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-template-engine.t Sun Mar 04 10:42:51 2018 -0500 @@ -9,17 +9,28 @@ > self._defaults = defaults > self._resources = resources > + > def symbol(self, mapping, key): + > return mapping[key] + > + > def resource(self, mapping, key): + > v = self._resources[key] + > if v is None: + > v = mapping[key] + > return v + > > def process(self, t, map): > tmpl = self.loader(t) > props = self._defaults.copy() > props.update(map) - > for k, v in props.iteritems(): + > for k, v in props.items(): > if k in ('templ', 'ctx', 'repo', 'revcache', 'cache', 'troubles'): > continue - > if hasattr(v, '__call__'): + > if callable(v) and getattr(v, '_requires', None) is None: > props = self._resources.copy() > props.update(map) > v = v(**props) + > elif callable(v): + > v = v(self, props) > v = templater.stringify(v) > tmpl = tmpl.replace('{{%s}}' % k, v) > yield tmpl
--- a/tests/test-transplant.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-transplant.t Sun Mar 04 10:42:51 2018 -0500 @@ -760,7 +760,7 @@ $ cd twin2 $ echo '[patch]' >> .hg/hgrc $ echo 'eol = crlf' >> .hg/hgrc - $ $PYTHON -c "file('b', 'wb').write('b\r\nb\r\n')" + $ $PYTHON -c "open('b', 'wb').write(b'b\r\nb\r\n')" $ hg ci -Am addb adding b $ hg transplant -s ../twin1 tip @@ -838,9 +838,9 @@ $ cd binarysource $ echo a > a $ hg ci -Am adda a - >>> file('b', 'wb').write('\0b1') + >>> open('b', 'wb').write(b'\0b1') $ hg ci -Am addb b - >>> file('b', 'wb').write('\0b2') + >>> open('b', 'wb').write(b'\0b2') $ hg ci -m changeb b $ cd .. @@ -891,14 +891,14 @@ > # emulate that patch.patch() is aborted at patching on "abort" file > from mercurial import error, extensions, patch as patchmod > def patch(orig, ui, repo, patchname, - > strip=1, prefix='', files=None, - > eolmode='strict', similarity=0): + > strip=1, prefix=b'', files=None, + > eolmode=b'strict', similarity=0): > if files is None: > files = set() > r = orig(ui, repo, patchname, > strip=strip, prefix=prefix, files=files, > eolmode=eolmode, similarity=similarity) - > if 'abort' in files: + > if b'abort' in files: > raise error.PatchError('intentional error while patching') > return r > def extsetup(ui):
--- a/tests/test-ui-color.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-ui-color.py Sun Mar 04 10:42:51 2018 -0500 @@ -9,27 +9,27 @@ # ensure errors aren't buffered testui = uimod.ui() testui.pushbuffer() -testui.write(('buffered\n')) -testui.warn(('warning\n')) -testui.write_err('error\n') +testui.write((b'buffered\n')) +testui.warn((b'warning\n')) +testui.write_err(b'error\n') print(repr(testui.popbuffer())) # test dispatch.dispatch with the same ui object -hgrc = open(os.environ["HGRCPATH"], 'w') -hgrc.write('[extensions]\n') -hgrc.write('color=\n') +hgrc = open(os.environ["HGRCPATH"], 'wb') +hgrc.write(b'[extensions]\n') +hgrc.write(b'color=\n') hgrc.close() ui_ = uimod.ui.load() -ui_.setconfig('ui', 'formatted', 'True') +ui_.setconfig(b'ui', b'formatted', b'True') # we're not interested in the output, so write that to devnull -ui_.fout = open(os.devnull, 'w') +ui_.fout = open(os.devnull, 'wb') # call some arbitrary command just so we go through # color's wrapped _runcommand twice. def runcmd(): - dispatch.dispatch(dispatch.request(['version', '-q'], ui_)) + dispatch.dispatch(dispatch.request([b'version', b'-q'], ui_)) runcmd() print("colored? %s" % (ui_._colormode is not None))
--- a/tests/test-ui-verbosity.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-ui-verbosity.py Sun Mar 04 10:42:51 2018 -0500 @@ -2,9 +2,13 @@ import os from mercurial import ( + pycompat, ui as uimod, ) +if pycompat.ispy3: + xrange = range + hgrc = os.environ['HGRCPATH'] f = open(hgrc) basehgrc = f.read()
--- a/tests/test-upgrade-repo.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-upgrade-repo.t Sun Mar 04 10:42:51 2018 -0500 @@ -31,23 +31,18 @@ abort: cannot upgrade repository; unsupported source requirement: shared [255] -Do not yet support upgrading manifestv2 and treemanifest repos - - $ hg --config experimental.manifestv2=true init manifestv2 - $ hg -R manifestv2 debugupgraderepo - abort: cannot upgrade repository; unsupported source requirement: manifestv2 - [255] +Do not yet support upgrading treemanifest repos $ hg --config experimental.treemanifest=true init treemanifest $ hg -R treemanifest debugupgraderepo abort: cannot upgrade repository; unsupported source requirement: treemanifest [255] -Cannot add manifestv2 or treemanifest requirement during upgrade +Cannot add treemanifest requirement during upgrade $ hg init disallowaddedreq - $ hg -R disallowaddedreq --config experimental.manifestv2=true --config experimental.treemanifest=true debugupgraderepo - abort: cannot upgrade repository; do not support adding requirement: manifestv2, treemanifest + $ hg -R disallowaddedreq --config experimental.treemanifest=true debugupgraderepo + abort: cannot upgrade repository; do not support adding requirement: treemanifest [255] An upgrade of a repository created with recommended settings only suggests optimizations
--- a/tests/test-walk.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-walk.t Sun Mar 04 10:42:51 2018 -0500 @@ -304,12 +304,10 @@ f beans/turtle beans/turtle $ hg debugwalk -Xbeans/black beans/black matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans\\/black(?:/|$))'>> - f beans/black beans/black exact $ hg debugwalk -Xbeans/black -Ibeans/black matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans\\/black(?:/|$))'>> $ hg debugwalk -Xbeans beans/black matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>> - f beans/black beans/black exact $ hg debugwalk -Xbeans -Ibeans/black matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>> $ hg debugwalk 'glob:mammals/../beans/b*' @@ -345,17 +343,13 @@ [255] Test explicit paths and excludes: -(BROKEN: nothing should be included, but wctx.walk() does) $ hg debugwalk fennel -X fennel matcher: <differencematcher m1=<patternmatcher patterns='(?:fennel(?:/|$))'>, m2=<includematcher includes='(?:fennel(?:/|$))'>> - f fennel fennel exact $ hg debugwalk fennel -X 'f*' matcher: <differencematcher m1=<patternmatcher patterns='(?:fennel(?:/|$))'>, m2=<includematcher includes='(?:f[^/]*(?:/|$))'>> - f fennel fennel exact $ hg debugwalk beans/black -X 'path:beans' matcher: <differencematcher m1=<patternmatcher patterns='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>> - f beans/black beans/black exact $ hg debugwalk -I 'path:beans/black' -X 'path:beans' matcher: <differencematcher m1=<includematcher includes='(?:beans\\/black(?:/|$))'>, m2=<includematcher includes='(?:beans(?:/|$))'>> @@ -494,12 +488,12 @@ Test listfile and listfile0 - $ $PYTHON -c "file('listfile0', 'wb').write('fenugreek\0new\0')" + $ $PYTHON -c "open('listfile0', 'wb').write(b'fenugreek\0new\0')" $ hg debugwalk -I 'listfile0:listfile0' matcher: <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$))'> f fenugreek fenugreek f new new - $ $PYTHON -c "file('listfile', 'wb').write('fenugreek\nnew\r\nmammals/skunk\n')" + $ $PYTHON -c "open('listfile', 'wb').write(b'fenugreek\nnew\r\nmammals/skunk\n')" $ hg debugwalk -I 'listfile:listfile' matcher: <includematcher includes='(?:fenugreek(?:/|$)|new(?:/|$)|mammals\\/skunk(?:/|$))'> f fenugreek fenugreek @@ -525,7 +519,7 @@ $ cd t $ echo fennel > overflow.list - $ $PYTHON -c "for i in xrange(20000 / 100): print 'x' * 100" >> overflow.list + $ $PYTHON -c "for i in range(20000 / 100): print 'x' * 100" >> overflow.list $ echo fenugreek >> overflow.list $ hg debugwalk 'listfile:overflow.list' 2>&1 | egrep -v '(^matcher: |^xxx)' f fennel fennel exact
--- a/tests/test-win32text.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-win32text.t Sun Mar 04 10:42:51 2018 -0500 @@ -5,9 +5,9 @@ > import sys > > for path in sys.argv[1:]: - > data = file(path, 'rb').read() - > data = data.replace('\n', '\r\n') - > file(path, 'wb').write(data) + > data = open(path, 'rb').read() + > data = data.replace(b'\n', b'\r\n') + > open(path, 'wb').write(data) > EOF $ echo '[hooks]' >> .hg/hgrc $ echo 'pretxncommit.crlf = python:hgext.win32text.forbidcrlf' >> .hg/hgrc @@ -118,7 +118,7 @@ $ hg rem f $ hg ci -m 4 - $ $PYTHON -c 'file("bin", "wb").write("hello\x00\x0D\x0A")' + $ $PYTHON -c 'open("bin", "wb").write(b"hello\x00\x0D\x0A")' $ hg add bin $ hg ci -m 5 $ hg log -v @@ -342,7 +342,7 @@ $ rm .hg/hgrc $ (echo some; echo text) > f3 - $ $PYTHON -c 'file("f4.bat", "wb").write("rem empty\x0D\x0A")' + $ $PYTHON -c 'open("f4.bat", "wb").write(b"rem empty\x0D\x0A")' $ hg add f3 f4.bat $ hg ci -m 6 $ cat bin @@ -395,7 +395,7 @@ $ cat f4.bat rem empty\r (esc) - $ $PYTHON -c 'file("f5.sh", "wb").write("# empty\x0D\x0A")' + $ $PYTHON -c 'open("f5.sh", "wb").write(b"# empty\x0D\x0A")' $ hg add f5.sh $ hg ci -m 7 $ cat f5.sh
--- a/tests/test-wireproto.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-wireproto.py Sun Mar 04 10:42:51 2018 -0500 @@ -1,8 +1,11 @@ from __future__ import absolute_import, print_function from mercurial import ( + error, + pycompat, util, wireproto, + wireprototypes, ) stringio = util.stringio @@ -11,7 +14,7 @@ self.args = args def getargs(self, spec): args = self.args - args.setdefault('*', {}) + args.setdefault(b'*', {}) names = spec.split() return [args[n] for n in names] @@ -24,7 +27,7 @@ return self.serverrepo.ui def url(self): - return 'test' + return b'test' def local(self): return None @@ -39,10 +42,17 @@ pass def capabilities(self): - return ['batch'] + return [b'batch'] def _call(self, cmd, **args): - return wireproto.dispatch(self.serverrepo, proto(args), cmd) + args = pycompat.byteskwargs(args) + res = wireproto.dispatch(self.serverrepo, proto(args), cmd) + if isinstance(res, wireprototypes.bytesresponse): + return res.data + elif isinstance(res, bytes): + return res + else: + raise error.Abort('dummy client does not support response type') def _callstream(self, cmd, **args): return stringio(self._call(cmd, **args)) @@ -50,31 +60,31 @@ @wireproto.batchable def greet(self, name): f = wireproto.future() - yield {'name': mangle(name)}, f + yield {b'name': mangle(name)}, f yield unmangle(f.value) class serverrepo(object): def greet(self, name): - return "Hello, " + name + return b"Hello, " + name def filtered(self, name): return self def mangle(s): - return ''.join(chr(ord(c) + 1) for c in s) + return b''.join(pycompat.bytechr(ord(c) + 1) for c in pycompat.bytestr(s)) def unmangle(s): - return ''.join(chr(ord(c) - 1) for c in s) + return b''.join(pycompat.bytechr(ord(c) - 1) for c in pycompat.bytestr(s)) def greet(repo, proto, name): return mangle(repo.greet(unmangle(name))) -wireproto.commands['greet'] = (greet, 'name',) +wireproto.commands[b'greet'] = (greet, b'name',) srv = serverrepo() clt = clientpeer(srv) -print(clt.greet("Foobar")) +print(clt.greet(b"Foobar")) b = clt.iterbatch() -map(b.greet, ('Fo, =;:<o', 'Bar')) +list(map(b.greet, (b'Fo, =;:<o', b'Bar'))) b.submit() print([r for r in b.results()])
--- a/tests/test-worker.t Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/test-worker.t Sun Mar 04 10:42:51 2018 -0500 @@ -12,7 +12,7 @@ > def abort(ui, args): > if args[0] == 0: > # by first worker for test stability - > raise error.Abort('known exception') + > raise error.Abort(b'known exception') > return runme(ui, []) > def exc(ui, args): > if args[0] == 0: @@ -21,25 +21,25 @@ > return runme(ui, []) > def runme(ui, args): > for arg in args: - > ui.status('run\n') + > ui.status(b'run\n') > yield 1, arg > time.sleep(0.1) # easier to trigger killworkers code path > functable = { - > 'abort': abort, - > 'exc': exc, - > 'runme': runme, + > b'abort': abort, + > b'exc': exc, + > b'runme': runme, > } > cmdtable = {} > command = registrar.command(cmdtable) - > @command(b'test', [], 'hg test [COST] [FUNC]') - > def t(ui, repo, cost=1.0, func='runme'): + > @command(b'test', [], b'hg test [COST] [FUNC]') + > def t(ui, repo, cost=1.0, func=b'runme'): > cost = float(cost) > func = functable[func] - > ui.status('start\n') + > ui.status(b'start\n') > runs = worker.worker(ui, cost, func, (ui,), range(8)) > for n, i in runs: > pass - > ui.status('done\n') + > ui.status(b'done\n') > EOF $ abspath=`pwd`/t.py $ hg init
--- a/tests/testlib/ext-phase-report.py Sat Mar 03 22:29:24 2018 -0500 +++ b/tests/testlib/ext-phase-report.py Sun Mar 04 10:42:51 2018 -0500 @@ -5,18 +5,18 @@ def reposetup(ui, repo): def reportphasemove(tr): - for rev, move in sorted(tr.changes['phases'].iteritems()): + for rev, move in sorted(tr.changes[b'phases'].items()): if move[0] is None: - ui.write(('test-debug-phase: new rev %d: x -> %d\n' + ui.write((b'test-debug-phase: new rev %d: x -> %d\n' % (rev, move[1]))) else: - ui.write(('test-debug-phase: move rev %d: %s -> %d\n' + ui.write((b'test-debug-phase: move rev %d: %d -> %d\n' % (rev, move[0], move[1]))) class reportphaserepo(repo.__class__): def transaction(self, *args, **kwargs): tr = super(reportphaserepo, self).transaction(*args, **kwargs) - tr.addpostclose('report-phase', reportphasemove) + tr.addpostclose(b'report-phase', reportphasemove) return tr repo.__class__ = reportphaserepo