Mercurial > public > mercurial-scm > hg
comparison mercurial/hg.py @ 43076:2372284d9457
formatting: blacken the codebase
This is using my patch to black
(https://github.com/psf/black/pull/826) so we don't un-wrap collection
literals.
Done with:
hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S
# skip-blame mass-reformatting only
# no-check-commit reformats foo_bar functions
Differential Revision: https://phab.mercurial-scm.org/D6971
author | Augie Fackler <augie@google.com> |
---|---|
date | Sun, 06 Oct 2019 09:45:02 -0400 |
parents | ee1ef76d7339 |
children | 687b865b95ad |
comparison
equal
deleted
inserted
replaced
43075:57875cf423c9 | 43076:2372284d9457 |
---|---|
13 import os | 13 import os |
14 import shutil | 14 import shutil |
15 import stat | 15 import stat |
16 | 16 |
17 from .i18n import _ | 17 from .i18n import _ |
18 from .node import ( | 18 from .node import nullid |
19 nullid, | |
20 ) | |
21 | 19 |
22 from . import ( | 20 from . import ( |
23 bookmarks, | 21 bookmarks, |
24 bundlerepo, | 22 bundlerepo, |
25 cacheutil, | 23 cacheutil, |
48 util, | 46 util, |
49 verify as verifymod, | 47 verify as verifymod, |
50 vfs as vfsmod, | 48 vfs as vfsmod, |
51 ) | 49 ) |
52 | 50 |
53 from .interfaces import ( | 51 from .interfaces import repository as repositorymod |
54 repository as repositorymod, | |
55 ) | |
56 | 52 |
57 release = lock.release | 53 release = lock.release |
58 | 54 |
59 # shared features | 55 # shared features |
60 sharedbookmarks = 'bookmarks' | 56 sharedbookmarks = 'bookmarks' |
57 | |
61 | 58 |
62 def _local(path): | 59 def _local(path): |
63 path = util.expandpath(util.urllocalpath(path)) | 60 path = util.expandpath(util.urllocalpath(path)) |
64 | 61 |
65 try: | 62 try: |
66 isfile = os.path.isfile(path) | 63 isfile = os.path.isfile(path) |
67 # Python 2 raises TypeError, Python 3 ValueError. | 64 # Python 2 raises TypeError, Python 3 ValueError. |
68 except (TypeError, ValueError) as e: | 65 except (TypeError, ValueError) as e: |
69 raise error.Abort(_('invalid path %s: %s') % ( | 66 raise error.Abort( |
70 path, pycompat.bytestr(e))) | 67 _('invalid path %s: %s') % (path, pycompat.bytestr(e)) |
68 ) | |
71 | 69 |
72 return isfile and bundlerepo or localrepo | 70 return isfile and bundlerepo or localrepo |
73 | 71 |
72 | |
74 def addbranchrevs(lrepo, other, branches, revs): | 73 def addbranchrevs(lrepo, other, branches, revs): |
75 peer = other.peer() # a courtesy to callers using a localrepo for other | 74 peer = other.peer() # a courtesy to callers using a localrepo for other |
76 hashbranch, branches = branches | 75 hashbranch, branches = branches |
77 if not hashbranch and not branches: | 76 if not hashbranch and not branches: |
78 x = revs or None | 77 x = revs or None |
79 if revs: | 78 if revs: |
80 y = revs[0] | 79 y = revs[0] |
112 if hashbranch: | 111 if hashbranch: |
113 if not primary(hashbranch): | 112 if not primary(hashbranch): |
114 revs.append(hashbranch) | 113 revs.append(hashbranch) |
115 return revs, revs[0] | 114 return revs, revs[0] |
116 | 115 |
116 | |
117 def parseurl(path, branches=None): | 117 def parseurl(path, branches=None): |
118 '''parse url#branch, returning (url, (branch, branches))''' | 118 '''parse url#branch, returning (url, (branch, branches))''' |
119 | 119 |
120 u = util.url(path) | 120 u = util.url(path) |
121 branch = None | 121 branch = None |
122 if u.fragment: | 122 if u.fragment: |
123 branch = u.fragment | 123 branch = u.fragment |
124 u.fragment = None | 124 u.fragment = None |
125 return bytes(u), (branch, branches or []) | 125 return bytes(u), (branch, branches or []) |
126 | |
126 | 127 |
127 schemes = { | 128 schemes = { |
128 'bundle': bundlerepo, | 129 'bundle': bundlerepo, |
129 'union': unionrepo, | 130 'union': unionrepo, |
130 'file': _local, | 131 'file': _local, |
131 'http': httppeer, | 132 'http': httppeer, |
132 'https': httppeer, | 133 'https': httppeer, |
133 'ssh': sshpeer, | 134 'ssh': sshpeer, |
134 'static-http': statichttprepo, | 135 'static-http': statichttprepo, |
135 } | 136 } |
137 | |
136 | 138 |
137 def _peerlookup(path): | 139 def _peerlookup(path): |
138 u = util.url(path) | 140 u = util.url(path) |
139 scheme = u.scheme or 'file' | 141 scheme = u.scheme or 'file' |
140 thing = schemes.get(scheme) or schemes['file'] | 142 thing = schemes.get(scheme) or schemes['file'] |
145 # module that implements __call__ | 147 # module that implements __call__ |
146 if not util.safehasattr(thing, 'instance'): | 148 if not util.safehasattr(thing, 'instance'): |
147 raise | 149 raise |
148 return thing | 150 return thing |
149 | 151 |
152 | |
150 def islocal(repo): | 153 def islocal(repo): |
151 '''return true if repo (or path pointing to repo) is local''' | 154 '''return true if repo (or path pointing to repo) is local''' |
152 if isinstance(repo, bytes): | 155 if isinstance(repo, bytes): |
153 try: | 156 try: |
154 return _peerlookup(repo).islocal(repo) | 157 return _peerlookup(repo).islocal(repo) |
155 except AttributeError: | 158 except AttributeError: |
156 return False | 159 return False |
157 return repo.local() | 160 return repo.local() |
158 | 161 |
162 | |
159 def openpath(ui, path, sendaccept=True): | 163 def openpath(ui, path, sendaccept=True): |
160 '''open path with open if local, url.open if remote''' | 164 '''open path with open if local, url.open if remote''' |
161 pathurl = util.url(path, parsequery=False, parsefragment=False) | 165 pathurl = util.url(path, parsequery=False, parsefragment=False) |
162 if pathurl.islocal(): | 166 if pathurl.islocal(): |
163 return util.posixfile(pathurl.localpath(), 'rb') | 167 return util.posixfile(pathurl.localpath(), 'rb') |
164 else: | 168 else: |
165 return url.open(ui, path, sendaccept=sendaccept) | 169 return url.open(ui, path, sendaccept=sendaccept) |
166 | 170 |
171 | |
167 # a list of (ui, repo) functions called for wire peer initialization | 172 # a list of (ui, repo) functions called for wire peer initialization |
168 wirepeersetupfuncs = [] | 173 wirepeersetupfuncs = [] |
169 | 174 |
170 def _peerorrepo(ui, path, create=False, presetupfuncs=None, | 175 |
171 intents=None, createopts=None): | 176 def _peerorrepo( |
177 ui, path, create=False, presetupfuncs=None, intents=None, createopts=None | |
178 ): | |
172 """return a repository object for the specified path""" | 179 """return a repository object for the specified path""" |
173 obj = _peerlookup(path).instance(ui, path, create, intents=intents, | 180 obj = _peerlookup(path).instance( |
174 createopts=createopts) | 181 ui, path, create, intents=intents, createopts=createopts |
182 ) | |
175 ui = getattr(obj, "ui", ui) | 183 ui = getattr(obj, "ui", ui) |
176 for f in presetupfuncs or []: | 184 for f in presetupfuncs or []: |
177 f(ui, obj) | 185 f(ui, obj) |
178 ui.log(b'extension', b'- executing reposetup hooks\n') | 186 ui.log(b'extension', b'- executing reposetup hooks\n') |
179 with util.timedcm('all reposetup') as allreposetupstats: | 187 with util.timedcm('all reposetup') as allreposetupstats: |
181 ui.log(b'extension', b' - running reposetup for %s\n', name) | 189 ui.log(b'extension', b' - running reposetup for %s\n', name) |
182 hook = getattr(module, 'reposetup', None) | 190 hook = getattr(module, 'reposetup', None) |
183 if hook: | 191 if hook: |
184 with util.timedcm('reposetup %r', name) as stats: | 192 with util.timedcm('reposetup %r', name) as stats: |
185 hook(ui, obj) | 193 hook(ui, obj) |
186 ui.log(b'extension', b' > reposetup for %s took %s\n', | 194 ui.log( |
187 name, stats) | 195 b'extension', b' > reposetup for %s took %s\n', name, stats |
196 ) | |
188 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) | 197 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats) |
189 if not obj.local(): | 198 if not obj.local(): |
190 for f in wirepeersetupfuncs: | 199 for f in wirepeersetupfuncs: |
191 f(ui, obj) | 200 f(ui, obj) |
192 return obj | 201 return obj |
193 | 202 |
194 def repository(ui, path='', create=False, presetupfuncs=None, intents=None, | 203 |
195 createopts=None): | 204 def repository( |
205 ui, path='', create=False, presetupfuncs=None, intents=None, createopts=None | |
206 ): | |
196 """return a repository object for the specified path""" | 207 """return a repository object for the specified path""" |
197 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs, | 208 peer = _peerorrepo( |
198 intents=intents, createopts=createopts) | 209 ui, |
210 path, | |
211 create, | |
212 presetupfuncs=presetupfuncs, | |
213 intents=intents, | |
214 createopts=createopts, | |
215 ) | |
199 repo = peer.local() | 216 repo = peer.local() |
200 if not repo: | 217 if not repo: |
201 raise error.Abort(_("repository '%s' is not local") % | 218 raise error.Abort( |
202 (path or peer.url())) | 219 _("repository '%s' is not local") % (path or peer.url()) |
220 ) | |
203 return repo.filtered('visible') | 221 return repo.filtered('visible') |
222 | |
204 | 223 |
205 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): | 224 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None): |
206 '''return a repository peer for the specified path''' | 225 '''return a repository peer for the specified path''' |
207 rui = remoteui(uiorrepo, opts) | 226 rui = remoteui(uiorrepo, opts) |
208 return _peerorrepo(rui, path, create, intents=intents, | 227 return _peerorrepo( |
209 createopts=createopts).peer() | 228 rui, path, create, intents=intents, createopts=createopts |
229 ).peer() | |
230 | |
210 | 231 |
211 def defaultdest(source): | 232 def defaultdest(source): |
212 '''return default destination of clone if none is given | 233 '''return default destination of clone if none is given |
213 | 234 |
214 >>> defaultdest(b'foo') | 235 >>> defaultdest(b'foo') |
227 path = util.url(source).path | 248 path = util.url(source).path |
228 if not path: | 249 if not path: |
229 return '' | 250 return '' |
230 return os.path.basename(os.path.normpath(path)) | 251 return os.path.basename(os.path.normpath(path)) |
231 | 252 |
253 | |
232 def sharedreposource(repo): | 254 def sharedreposource(repo): |
233 """Returns repository object for source repository of a shared repo. | 255 """Returns repository object for source repository of a shared repo. |
234 | 256 |
235 If repo is not a shared repository, returns None. | 257 If repo is not a shared repository, returns None. |
236 """ | 258 """ |
245 srcurl, branches = parseurl(source) | 267 srcurl, branches = parseurl(source) |
246 srcrepo = repository(repo.ui, srcurl) | 268 srcrepo = repository(repo.ui, srcurl) |
247 repo.srcrepo = srcrepo | 269 repo.srcrepo = srcrepo |
248 return srcrepo | 270 return srcrepo |
249 | 271 |
250 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None, | 272 |
251 relative=False): | 273 def share( |
274 ui, | |
275 source, | |
276 dest=None, | |
277 update=True, | |
278 bookmarks=True, | |
279 defaultpath=None, | |
280 relative=False, | |
281 ): | |
252 '''create a shared repository''' | 282 '''create a shared repository''' |
253 | 283 |
254 if not islocal(source): | 284 if not islocal(source): |
255 raise error.Abort(_('can only share local repositories')) | 285 raise error.Abort(_('can only share local repositories')) |
256 | 286 |
270 | 300 |
271 shareditems = set() | 301 shareditems = set() |
272 if bookmarks: | 302 if bookmarks: |
273 shareditems.add(sharedbookmarks) | 303 shareditems.add(sharedbookmarks) |
274 | 304 |
275 r = repository(ui, dest, create=True, createopts={ | 305 r = repository( |
276 'sharedrepo': srcrepo, | 306 ui, |
277 'sharedrelative': relative, | 307 dest, |
278 'shareditems': shareditems, | 308 create=True, |
279 }) | 309 createopts={ |
310 'sharedrepo': srcrepo, | |
311 'sharedrelative': relative, | |
312 'shareditems': shareditems, | |
313 }, | |
314 ) | |
280 | 315 |
281 postshare(srcrepo, r, defaultpath=defaultpath) | 316 postshare(srcrepo, r, defaultpath=defaultpath) |
282 r = repository(ui, dest) | 317 r = repository(ui, dest) |
283 _postshareupdate(r, update, checkout=checkout) | 318 _postshareupdate(r, update, checkout=checkout) |
284 return r | 319 return r |
320 | |
285 | 321 |
286 def unshare(ui, repo): | 322 def unshare(ui, repo): |
287 """convert a shared repository to a normal one | 323 """convert a shared repository to a normal one |
288 | 324 |
289 Copy the store data to the repo and remove the sharedpath data. | 325 Copy the store data to the repo and remove the sharedpath data. |
323 | 359 |
324 localrepo.poisonrepository(repo) | 360 localrepo.poisonrepository(repo) |
325 | 361 |
326 return newrepo | 362 return newrepo |
327 | 363 |
364 | |
328 def postshare(sourcerepo, destrepo, defaultpath=None): | 365 def postshare(sourcerepo, destrepo, defaultpath=None): |
329 """Called after a new shared repo is created. | 366 """Called after a new shared repo is created. |
330 | 367 |
331 The new repo only has a requirements file and pointer to the source. | 368 The new repo only has a requirements file and pointer to the source. |
332 This function configures additional shared data. | 369 This function configures additional shared data. |
334 Extensions can wrap this function and write additional entries to | 371 Extensions can wrap this function and write additional entries to |
335 destrepo/.hg/shared to indicate additional pieces of data to be shared. | 372 destrepo/.hg/shared to indicate additional pieces of data to be shared. |
336 """ | 373 """ |
337 default = defaultpath or sourcerepo.ui.config('paths', 'default') | 374 default = defaultpath or sourcerepo.ui.config('paths', 'default') |
338 if default: | 375 if default: |
339 template = ('[paths]\n' | 376 template = '[paths]\n' 'default = %s\n' |
340 'default = %s\n') | |
341 destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) | 377 destrepo.vfs.write('hgrc', util.tonativeeol(template % default)) |
342 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: | 378 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements: |
343 with destrepo.wlock(): | 379 with destrepo.wlock(): |
344 narrowspec.copytoworkingcopy(destrepo) | 380 narrowspec.copytoworkingcopy(destrepo) |
381 | |
345 | 382 |
346 def _postshareupdate(repo, update, checkout=None): | 383 def _postshareupdate(repo, update, checkout=None): |
347 """Maybe perform a working directory update after a shared repo is created. | 384 """Maybe perform a working directory update after a shared repo is created. |
348 | 385 |
349 ``update`` can be a boolean or a revision to update to. | 386 ``update`` can be a boolean or a revision to update to. |
361 uprev = repo.lookup(test) | 398 uprev = repo.lookup(test) |
362 break | 399 break |
363 except error.RepoLookupError: | 400 except error.RepoLookupError: |
364 continue | 401 continue |
365 _update(repo, uprev) | 402 _update(repo, uprev) |
403 | |
366 | 404 |
367 def copystore(ui, srcrepo, destpath): | 405 def copystore(ui, srcrepo, destpath): |
368 '''copy files from store of srcrepo in destpath | 406 '''copy files from store of srcrepo in destpath |
369 | 407 |
370 returns destlock | 408 returns destlock |
388 if f.endswith('data'): | 426 if f.endswith('data'): |
389 # 'dstbase' may be empty (e.g. revlog format 0) | 427 # 'dstbase' may be empty (e.g. revlog format 0) |
390 lockfile = os.path.join(dstbase, "lock") | 428 lockfile = os.path.join(dstbase, "lock") |
391 # lock to avoid premature writing to the target | 429 # lock to avoid premature writing to the target |
392 destlock = lock.lock(dstvfs, lockfile) | 430 destlock = lock.lock(dstvfs, lockfile) |
393 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f), | 431 hardlink, n = util.copyfiles( |
394 hardlink, progress) | 432 srcvfs.join(f), dstvfs.join(f), hardlink, progress |
433 ) | |
395 num += n | 434 num += n |
396 if hardlink: | 435 if hardlink: |
397 ui.debug("linked %d files\n" % num) | 436 ui.debug("linked %d files\n" % num) |
398 else: | 437 else: |
399 ui.debug("copied %d files\n" % num) | 438 ui.debug("copied %d files\n" % num) |
400 return destlock | 439 return destlock |
401 except: # re-raises | 440 except: # re-raises |
402 release(destlock) | 441 release(destlock) |
403 raise | 442 raise |
404 | 443 |
405 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False, | 444 |
406 rev=None, update=True, stream=False): | 445 def clonewithshare( |
446 ui, | |
447 peeropts, | |
448 sharepath, | |
449 source, | |
450 srcpeer, | |
451 dest, | |
452 pull=False, | |
453 rev=None, | |
454 update=True, | |
455 stream=False, | |
456 ): | |
407 """Perform a clone using a shared repo. | 457 """Perform a clone using a shared repo. |
408 | 458 |
409 The store for the repository will be located at <sharepath>/.hg. The | 459 The store for the repository will be located at <sharepath>/.hg. The |
410 specified revisions will be cloned or pulled from "source". A shared repo | 460 specified revisions will be cloned or pulled from "source". A shared repo |
411 will be created at "dest" and a working copy will be created if "update" is | 461 will be created at "dest" and a working copy will be created if "update" is |
412 True. | 462 True. |
413 """ | 463 """ |
414 revs = None | 464 revs = None |
415 if rev: | 465 if rev: |
416 if not srcpeer.capable('lookup'): | 466 if not srcpeer.capable('lookup'): |
417 raise error.Abort(_("src repository does not support " | 467 raise error.Abort( |
418 "revision lookup and so doesn't " | 468 _( |
419 "support clone by revision")) | 469 "src repository does not support " |
470 "revision lookup and so doesn't " | |
471 "support clone by revision" | |
472 ) | |
473 ) | |
420 | 474 |
421 # TODO this is batchable. | 475 # TODO this is batchable. |
422 remoterevs = [] | 476 remoterevs = [] |
423 for r in rev: | 477 for r in rev: |
424 with srcpeer.commandexecutor() as e: | 478 with srcpeer.commandexecutor() as e: |
425 remoterevs.append(e.callcommand('lookup', { | 479 remoterevs.append(e.callcommand('lookup', {'key': r,}).result()) |
426 'key': r, | |
427 }).result()) | |
428 revs = remoterevs | 480 revs = remoterevs |
429 | 481 |
430 # Obtain a lock before checking for or cloning the pooled repo otherwise | 482 # Obtain a lock before checking for or cloning the pooled repo otherwise |
431 # 2 clients may race creating or populating it. | 483 # 2 clients may race creating or populating it. |
432 pooldir = os.path.dirname(sharepath) | 484 pooldir = os.path.dirname(sharepath) |
440 poolvfs = vfsmod.vfs(pooldir) | 492 poolvfs = vfsmod.vfs(pooldir) |
441 basename = os.path.basename(sharepath) | 493 basename = os.path.basename(sharepath) |
442 | 494 |
443 with lock.lock(poolvfs, '%s.lock' % basename): | 495 with lock.lock(poolvfs, '%s.lock' % basename): |
444 if os.path.exists(sharepath): | 496 if os.path.exists(sharepath): |
445 ui.status(_('(sharing from existing pooled repository %s)\n') % | 497 ui.status( |
446 basename) | 498 _('(sharing from existing pooled repository %s)\n') % basename |
499 ) | |
447 else: | 500 else: |
448 ui.status(_('(sharing from new pooled repository %s)\n') % basename) | 501 ui.status(_('(sharing from new pooled repository %s)\n') % basename) |
449 # Always use pull mode because hardlinks in share mode don't work | 502 # Always use pull mode because hardlinks in share mode don't work |
450 # well. Never update because working copies aren't necessary in | 503 # well. Never update because working copies aren't necessary in |
451 # share mode. | 504 # share mode. |
452 clone(ui, peeropts, source, dest=sharepath, pull=True, | 505 clone( |
453 revs=rev, update=False, stream=stream) | 506 ui, |
507 peeropts, | |
508 source, | |
509 dest=sharepath, | |
510 pull=True, | |
511 revs=rev, | |
512 update=False, | |
513 stream=stream, | |
514 ) | |
454 | 515 |
455 # Resolve the value to put in [paths] section for the source. | 516 # Resolve the value to put in [paths] section for the source. |
456 if islocal(source): | 517 if islocal(source): |
457 defaultpath = os.path.abspath(util.urllocalpath(source)) | 518 defaultpath = os.path.abspath(util.urllocalpath(source)) |
458 else: | 519 else: |
459 defaultpath = source | 520 defaultpath = source |
460 | 521 |
461 sharerepo = repository(ui, path=sharepath) | 522 sharerepo = repository(ui, path=sharepath) |
462 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False, | 523 destrepo = share( |
463 defaultpath=defaultpath) | 524 ui, |
525 sharerepo, | |
526 dest=dest, | |
527 update=False, | |
528 bookmarks=False, | |
529 defaultpath=defaultpath, | |
530 ) | |
464 | 531 |
465 # We need to perform a pull against the dest repo to fetch bookmarks | 532 # We need to perform a pull against the dest repo to fetch bookmarks |
466 # and other non-store data that isn't shared by default. In the case of | 533 # and other non-store data that isn't shared by default. In the case of |
467 # non-existing shared repo, this means we pull from the remote twice. This | 534 # non-existing shared repo, this means we pull from the remote twice. This |
468 # is a bit weird. But at the time it was implemented, there wasn't an easy | 535 # is a bit weird. But at the time it was implemented, there wasn't an easy |
470 exchange.pull(destrepo, srcpeer, heads=revs) | 537 exchange.pull(destrepo, srcpeer, heads=revs) |
471 | 538 |
472 _postshareupdate(destrepo, update) | 539 _postshareupdate(destrepo, update) |
473 | 540 |
474 return srcpeer, peer(ui, peeropts, dest) | 541 return srcpeer, peer(ui, peeropts, dest) |
542 | |
475 | 543 |
476 # Recomputing branch cache might be slow on big repos, | 544 # Recomputing branch cache might be slow on big repos, |
477 # so just copy it | 545 # so just copy it |
478 def _copycache(srcrepo, dstcachedir, fname): | 546 def _copycache(srcrepo, dstcachedir, fname): |
479 """copy a cache from srcrepo to destcachedir (if it exists)""" | 547 """copy a cache from srcrepo to destcachedir (if it exists)""" |
482 if os.path.exists(srcbranchcache): | 550 if os.path.exists(srcbranchcache): |
483 if not os.path.exists(dstcachedir): | 551 if not os.path.exists(dstcachedir): |
484 os.mkdir(dstcachedir) | 552 os.mkdir(dstcachedir) |
485 util.copyfile(srcbranchcache, dstbranchcache) | 553 util.copyfile(srcbranchcache, dstbranchcache) |
486 | 554 |
487 def clone(ui, peeropts, source, dest=None, pull=False, revs=None, | 555 |
488 update=True, stream=False, branch=None, shareopts=None, | 556 def clone( |
489 storeincludepats=None, storeexcludepats=None, depth=None): | 557 ui, |
558 peeropts, | |
559 source, | |
560 dest=None, | |
561 pull=False, | |
562 revs=None, | |
563 update=True, | |
564 stream=False, | |
565 branch=None, | |
566 shareopts=None, | |
567 storeincludepats=None, | |
568 storeexcludepats=None, | |
569 depth=None, | |
570 ): | |
490 """Make a copy of an existing repository. | 571 """Make a copy of an existing repository. |
491 | 572 |
492 Create a copy of an existing repository in a new directory. The | 573 Create a copy of an existing repository in a new directory. The |
493 source and destination are URLs, as passed to the repository | 574 source and destination are URLs, as passed to the repository |
494 function. Returns a pair of repository peers, the source and | 575 function. Returns a pair of repository peers, the source and |
540 if isinstance(source, bytes): | 621 if isinstance(source, bytes): |
541 origsource = ui.expandpath(source) | 622 origsource = ui.expandpath(source) |
542 source, branches = parseurl(origsource, branch) | 623 source, branches = parseurl(origsource, branch) |
543 srcpeer = peer(ui, peeropts, source) | 624 srcpeer = peer(ui, peeropts, source) |
544 else: | 625 else: |
545 srcpeer = source.peer() # in case we were called with a localrepo | 626 srcpeer = source.peer() # in case we were called with a localrepo |
546 branches = (None, branch or []) | 627 branches = (None, branch or []) |
547 origsource = source = srcpeer.url() | 628 origsource = source = srcpeer.url() |
548 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) | 629 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs) |
549 | 630 |
550 if dest is None: | 631 if dest is None: |
597 # requirement is set, the clone aborts early, before transferring any | 678 # requirement is set, the clone aborts early, before transferring any |
598 # data. | 679 # data. |
599 createopts['lfs'] = True | 680 createopts['lfs'] = True |
600 | 681 |
601 if extensions.disabledext('lfs'): | 682 if extensions.disabledext('lfs'): |
602 ui.status(_('(remote is using large file support (lfs), but it is ' | 683 ui.status( |
603 'explicitly disabled in the local configuration)\n')) | 684 _( |
685 '(remote is using large file support (lfs), but it is ' | |
686 'explicitly disabled in the local configuration)\n' | |
687 ) | |
688 ) | |
604 else: | 689 else: |
605 ui.status(_('(remote is using large file support (lfs); lfs will ' | 690 ui.status( |
606 'be enabled for this repository)\n')) | 691 _( |
692 '(remote is using large file support (lfs); lfs will ' | |
693 'be enabled for this repository)\n' | |
694 ) | |
695 ) | |
607 | 696 |
608 shareopts = shareopts or {} | 697 shareopts = shareopts or {} |
609 sharepool = shareopts.get('pool') | 698 sharepool = shareopts.get('pool') |
610 sharenamemode = shareopts.get('mode') | 699 sharenamemode = shareopts.get('mode') |
611 if sharepool and islocal(dest): | 700 if sharepool and islocal(dest): |
615 # repository. This returns nullid when the remote is empty. It | 704 # repository. This returns nullid when the remote is empty. It |
616 # raises RepoLookupError if revision 0 is filtered or otherwise | 705 # raises RepoLookupError if revision 0 is filtered or otherwise |
617 # not available. If we fail to resolve, sharing is not enabled. | 706 # not available. If we fail to resolve, sharing is not enabled. |
618 try: | 707 try: |
619 with srcpeer.commandexecutor() as e: | 708 with srcpeer.commandexecutor() as e: |
620 rootnode = e.callcommand('lookup', { | 709 rootnode = e.callcommand('lookup', {'key': '0',}).result() |
621 'key': '0', | |
622 }).result() | |
623 | 710 |
624 if rootnode != node.nullid: | 711 if rootnode != node.nullid: |
625 sharepath = os.path.join(sharepool, node.hex(rootnode)) | 712 sharepath = os.path.join(sharepool, node.hex(rootnode)) |
626 else: | 713 else: |
627 ui.status(_('(not using pooled storage: ' | 714 ui.status( |
628 'remote appears to be empty)\n')) | 715 _( |
716 '(not using pooled storage: ' | |
717 'remote appears to be empty)\n' | |
718 ) | |
719 ) | |
629 except error.RepoLookupError: | 720 except error.RepoLookupError: |
630 ui.status(_('(not using pooled storage: ' | 721 ui.status( |
631 'unable to resolve identity of remote)\n')) | 722 _( |
723 '(not using pooled storage: ' | |
724 'unable to resolve identity of remote)\n' | |
725 ) | |
726 ) | |
632 elif sharenamemode == 'remote': | 727 elif sharenamemode == 'remote': |
633 sharepath = os.path.join( | 728 sharepath = os.path.join( |
634 sharepool, node.hex(hashlib.sha1(source).digest())) | 729 sharepool, node.hex(hashlib.sha1(source).digest()) |
730 ) | |
635 else: | 731 else: |
636 raise error.Abort(_('unknown share naming mode: %s') % | 732 raise error.Abort( |
637 sharenamemode) | 733 _('unknown share naming mode: %s') % sharenamemode |
734 ) | |
638 | 735 |
639 # TODO this is a somewhat arbitrary restriction. | 736 # TODO this is a somewhat arbitrary restriction. |
640 if narrow: | 737 if narrow: |
641 ui.status(_('(pooled storage not supported for narrow clones)\n')) | 738 ui.status(_('(pooled storage not supported for narrow clones)\n')) |
642 sharepath = None | 739 sharepath = None |
643 | 740 |
644 if sharepath: | 741 if sharepath: |
645 return clonewithshare(ui, peeropts, sharepath, source, srcpeer, | 742 return clonewithshare( |
646 dest, pull=pull, rev=revs, update=update, | 743 ui, |
647 stream=stream) | 744 peeropts, |
745 sharepath, | |
746 source, | |
747 srcpeer, | |
748 dest, | |
749 pull=pull, | |
750 rev=revs, | |
751 update=update, | |
752 stream=stream, | |
753 ) | |
648 | 754 |
649 srclock = destlock = cleandir = None | 755 srclock = destlock = cleandir = None |
650 srcrepo = srcpeer.local() | 756 srcrepo = srcpeer.local() |
651 try: | 757 try: |
652 abspath = origsource | 758 abspath = origsource |
655 | 761 |
656 if islocal(dest): | 762 if islocal(dest): |
657 cleandir = dest | 763 cleandir = dest |
658 | 764 |
659 copy = False | 765 copy = False |
660 if (srcrepo and srcrepo.cancopy() and islocal(dest) | 766 if ( |
661 and not phases.hassecret(srcrepo)): | 767 srcrepo |
768 and srcrepo.cancopy() | |
769 and islocal(dest) | |
770 and not phases.hassecret(srcrepo) | |
771 ): | |
662 copy = not pull and not revs | 772 copy = not pull and not revs |
663 | 773 |
664 # TODO this is a somewhat arbitrary restriction. | 774 # TODO this is a somewhat arbitrary restriction. |
665 if narrow: | 775 if narrow: |
666 copy = False | 776 copy = False |
687 destpath = hgdir | 797 destpath = hgdir |
688 util.makedir(destpath, notindexed=True) | 798 util.makedir(destpath, notindexed=True) |
689 except OSError as inst: | 799 except OSError as inst: |
690 if inst.errno == errno.EEXIST: | 800 if inst.errno == errno.EEXIST: |
691 cleandir = None | 801 cleandir = None |
692 raise error.Abort(_("destination '%s' already exists") | 802 raise error.Abort( |
693 % dest) | 803 _("destination '%s' already exists") % dest |
804 ) | |
694 raise | 805 raise |
695 | 806 |
696 destlock = copystore(ui, srcrepo, destpath) | 807 destlock = copystore(ui, srcrepo, destpath) |
697 # copy bookmarks over | 808 # copy bookmarks over |
698 srcbookmarks = srcrepo.vfs.join('bookmarks') | 809 srcbookmarks = srcrepo.vfs.join('bookmarks') |
705 _copycache(srcrepo, dstcachedir, cache) | 816 _copycache(srcrepo, dstcachedir, cache) |
706 | 817 |
707 # we need to re-init the repo after manually copying the data | 818 # we need to re-init the repo after manually copying the data |
708 # into it | 819 # into it |
709 destpeer = peer(srcrepo, peeropts, dest) | 820 destpeer = peer(srcrepo, peeropts, dest) |
710 srcrepo.hook('outgoing', source='clone', | 821 srcrepo.hook('outgoing', source='clone', node=node.hex(node.nullid)) |
711 node=node.hex(node.nullid)) | |
712 else: | 822 else: |
713 try: | 823 try: |
714 # only pass ui when no srcrepo | 824 # only pass ui when no srcrepo |
715 destpeer = peer(srcrepo or ui, peeropts, dest, create=True, | 825 destpeer = peer( |
716 createopts=createopts) | 826 srcrepo or ui, |
827 peeropts, | |
828 dest, | |
829 create=True, | |
830 createopts=createopts, | |
831 ) | |
717 except OSError as inst: | 832 except OSError as inst: |
718 if inst.errno == errno.EEXIST: | 833 if inst.errno == errno.EEXIST: |
719 cleandir = None | 834 cleandir = None |
720 raise error.Abort(_("destination '%s' already exists") | 835 raise error.Abort( |
721 % dest) | 836 _("destination '%s' already exists") % dest |
837 ) | |
722 raise | 838 raise |
723 | 839 |
724 if revs: | 840 if revs: |
725 if not srcpeer.capable('lookup'): | 841 if not srcpeer.capable('lookup'): |
726 raise error.Abort(_("src repository does not support " | 842 raise error.Abort( |
727 "revision lookup and so doesn't " | 843 _( |
728 "support clone by revision")) | 844 "src repository does not support " |
845 "revision lookup and so doesn't " | |
846 "support clone by revision" | |
847 ) | |
848 ) | |
729 | 849 |
730 # TODO this is batchable. | 850 # TODO this is batchable. |
731 remoterevs = [] | 851 remoterevs = [] |
732 for rev in revs: | 852 for rev in revs: |
733 with srcpeer.commandexecutor() as e: | 853 with srcpeer.commandexecutor() as e: |
734 remoterevs.append(e.callcommand('lookup', { | 854 remoterevs.append( |
735 'key': rev, | 855 e.callcommand('lookup', {'key': rev,}).result() |
736 }).result()) | 856 ) |
737 revs = remoterevs | 857 revs = remoterevs |
738 | 858 |
739 checkout = revs[0] | 859 checkout = revs[0] |
740 else: | 860 else: |
741 revs = None | 861 revs = None |
755 else: | 875 else: |
756 stream = None | 876 stream = None |
757 # internal config: ui.quietbookmarkmove | 877 # internal config: ui.quietbookmarkmove |
758 overrides = {('ui', 'quietbookmarkmove'): True} | 878 overrides = {('ui', 'quietbookmarkmove'): True} |
759 with local.ui.configoverride(overrides, 'clone'): | 879 with local.ui.configoverride(overrides, 'clone'): |
760 exchange.pull(local, srcpeer, revs, | 880 exchange.pull( |
761 streamclonerequested=stream, | 881 local, |
762 includepats=storeincludepats, | 882 srcpeer, |
763 excludepats=storeexcludepats, | 883 revs, |
764 depth=depth) | 884 streamclonerequested=stream, |
885 includepats=storeincludepats, | |
886 excludepats=storeexcludepats, | |
887 depth=depth, | |
888 ) | |
765 elif srcrepo: | 889 elif srcrepo: |
766 # TODO lift restriction once exchange.push() accepts narrow | 890 # TODO lift restriction once exchange.push() accepts narrow |
767 # push. | 891 # push. |
768 if narrow: | 892 if narrow: |
769 raise error.Abort(_('narrow clone not available for ' | 893 raise error.Abort( |
770 'remote destinations')) | 894 _( |
771 | 895 'narrow clone not available for ' |
772 exchange.push(srcrepo, destpeer, revs=revs, | 896 'remote destinations' |
773 bookmarks=srcrepo._bookmarks.keys()) | 897 ) |
898 ) | |
899 | |
900 exchange.push( | |
901 srcrepo, | |
902 destpeer, | |
903 revs=revs, | |
904 bookmarks=srcrepo._bookmarks.keys(), | |
905 ) | |
774 else: | 906 else: |
775 raise error.Abort(_("clone from remote to remote not supported") | 907 raise error.Abort( |
776 ) | 908 _("clone from remote to remote not supported") |
909 ) | |
777 | 910 |
778 cleandir = None | 911 cleandir = None |
779 | 912 |
780 destrepo = destpeer.local() | 913 destrepo = destpeer.local() |
781 if destrepo: | 914 if destrepo: |
790 logexchange.pullremotenames(destrepo, srcpeer) | 923 logexchange.pullremotenames(destrepo, srcpeer) |
791 | 924 |
792 if update: | 925 if update: |
793 if update is not True: | 926 if update is not True: |
794 with srcpeer.commandexecutor() as e: | 927 with srcpeer.commandexecutor() as e: |
795 checkout = e.callcommand('lookup', { | 928 checkout = e.callcommand( |
796 'key': update, | 929 'lookup', {'key': update,} |
797 }).result() | 930 ).result() |
798 | 931 |
799 uprev = None | 932 uprev = None |
800 status = None | 933 status = None |
801 if checkout is not None: | 934 if checkout is not None: |
802 # Some extensions (at least hg-git and hg-subversion) have | 935 # Some extensions (at least hg-git and hg-subversion) have |
819 update = '@' | 952 update = '@' |
820 bn = destrepo[uprev].branch() | 953 bn = destrepo[uprev].branch() |
821 if bn == 'default': | 954 if bn == 'default': |
822 status = _("updating to bookmark @\n") | 955 status = _("updating to bookmark @\n") |
823 else: | 956 else: |
824 status = (_("updating to bookmark @ on branch %s\n") | 957 status = ( |
825 % bn) | 958 _("updating to bookmark @ on branch %s\n") % bn |
959 ) | |
826 except KeyError: | 960 except KeyError: |
827 try: | 961 try: |
828 uprev = destrepo.branchtip('default') | 962 uprev = destrepo.branchtip('default') |
829 except error.RepoLookupError: | 963 except error.RepoLookupError: |
830 uprev = destrepo.lookup('tip') | 964 uprev = destrepo.lookup('tip') |
841 shutil.rmtree(cleandir, True) | 975 shutil.rmtree(cleandir, True) |
842 if srcpeer is not None: | 976 if srcpeer is not None: |
843 srcpeer.close() | 977 srcpeer.close() |
844 return srcpeer, destpeer | 978 return srcpeer, destpeer |
845 | 979 |
980 | |
846 def _showstats(repo, stats, quietempty=False): | 981 def _showstats(repo, stats, quietempty=False): |
847 if quietempty and stats.isempty(): | 982 if quietempty and stats.isempty(): |
848 return | 983 return |
849 repo.ui.status(_("%d files updated, %d files merged, " | 984 repo.ui.status( |
850 "%d files removed, %d files unresolved\n") % ( | 985 _( |
851 stats.updatedcount, stats.mergedcount, | 986 "%d files updated, %d files merged, " |
852 stats.removedcount, stats.unresolvedcount)) | 987 "%d files removed, %d files unresolved\n" |
988 ) | |
989 % ( | |
990 stats.updatedcount, | |
991 stats.mergedcount, | |
992 stats.removedcount, | |
993 stats.unresolvedcount, | |
994 ) | |
995 ) | |
996 | |
853 | 997 |
854 def updaterepo(repo, node, overwrite, updatecheck=None): | 998 def updaterepo(repo, node, overwrite, updatecheck=None): |
855 """Update the working directory to node. | 999 """Update the working directory to node. |
856 | 1000 |
857 When overwrite is set, changes are clobbered, merged else | 1001 When overwrite is set, changes are clobbered, merged else |
858 | 1002 |
859 returns stats (see pydoc mercurial.merge.applyupdates)""" | 1003 returns stats (see pydoc mercurial.merge.applyupdates)""" |
860 return mergemod.update(repo, node, branchmerge=False, force=overwrite, | 1004 return mergemod.update( |
861 labels=['working copy', 'destination'], | 1005 repo, |
862 updatecheck=updatecheck) | 1006 node, |
1007 branchmerge=False, | |
1008 force=overwrite, | |
1009 labels=['working copy', 'destination'], | |
1010 updatecheck=updatecheck, | |
1011 ) | |
1012 | |
863 | 1013 |
864 def update(repo, node, quietempty=False, updatecheck=None): | 1014 def update(repo, node, quietempty=False, updatecheck=None): |
865 """update the working directory to node""" | 1015 """update the working directory to node""" |
866 stats = updaterepo(repo, node, False, updatecheck=updatecheck) | 1016 stats = updaterepo(repo, node, False, updatecheck=updatecheck) |
867 _showstats(repo, stats, quietempty) | 1017 _showstats(repo, stats, quietempty) |
868 if stats.unresolvedcount: | 1018 if stats.unresolvedcount: |
869 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) | 1019 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n")) |
870 return stats.unresolvedcount > 0 | 1020 return stats.unresolvedcount > 0 |
871 | 1021 |
1022 | |
872 # naming conflict in clone() | 1023 # naming conflict in clone() |
873 _update = update | 1024 _update = update |
1025 | |
874 | 1026 |
875 def clean(repo, node, show_stats=True, quietempty=False): | 1027 def clean(repo, node, show_stats=True, quietempty=False): |
876 """forcibly switch the working directory to node, clobbering changes""" | 1028 """forcibly switch the working directory to node, clobbering changes""" |
877 stats = updaterepo(repo, node, True) | 1029 stats = updaterepo(repo, node, True) |
878 repo.vfs.unlinkpath('graftstate', ignoremissing=True) | 1030 repo.vfs.unlinkpath('graftstate', ignoremissing=True) |
879 if show_stats: | 1031 if show_stats: |
880 _showstats(repo, stats, quietempty) | 1032 _showstats(repo, stats, quietempty) |
881 return stats.unresolvedcount > 0 | 1033 return stats.unresolvedcount > 0 |
882 | 1034 |
1035 | |
883 # naming conflict in updatetotally() | 1036 # naming conflict in updatetotally() |
884 _clean = clean | 1037 _clean = clean |
885 | 1038 |
886 _VALID_UPDATECHECKS = {mergemod.UPDATECHECK_ABORT, | 1039 _VALID_UPDATECHECKS = { |
887 mergemod.UPDATECHECK_NONE, | 1040 mergemod.UPDATECHECK_ABORT, |
888 mergemod.UPDATECHECK_LINEAR, | 1041 mergemod.UPDATECHECK_NONE, |
889 mergemod.UPDATECHECK_NO_CONFLICT, | 1042 mergemod.UPDATECHECK_LINEAR, |
1043 mergemod.UPDATECHECK_NO_CONFLICT, | |
890 } | 1044 } |
1045 | |
891 | 1046 |
892 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): | 1047 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None): |
893 """Update the working directory with extra care for non-file components | 1048 """Update the working directory with extra care for non-file components |
894 | 1049 |
895 This takes care of non-file components below: | 1050 This takes care of non-file components below: |
919 updatecheck = ui.config('commands', 'update.check') | 1074 updatecheck = ui.config('commands', 'update.check') |
920 if updatecheck not in _VALID_UPDATECHECKS: | 1075 if updatecheck not in _VALID_UPDATECHECKS: |
921 # If not configured, or invalid value configured | 1076 # If not configured, or invalid value configured |
922 updatecheck = mergemod.UPDATECHECK_LINEAR | 1077 updatecheck = mergemod.UPDATECHECK_LINEAR |
923 if updatecheck not in _VALID_UPDATECHECKS: | 1078 if updatecheck not in _VALID_UPDATECHECKS: |
924 raise ValueError(r'Invalid updatecheck value %r (can accept %r)' % ( | 1079 raise ValueError( |
925 updatecheck, _VALID_UPDATECHECKS)) | 1080 r'Invalid updatecheck value %r (can accept %r)' |
1081 % (updatecheck, _VALID_UPDATECHECKS) | |
1082 ) | |
926 with repo.wlock(): | 1083 with repo.wlock(): |
927 movemarkfrom = None | 1084 movemarkfrom = None |
928 warndest = False | 1085 warndest = False |
929 if checkout is None: | 1086 if checkout is None: |
930 updata = destutil.destupdate(repo, clean=clean) | 1087 updata = destutil.destupdate(repo, clean=clean) |
939 updatecheck = mergemod.UPDATECHECK_NONE | 1096 updatecheck = mergemod.UPDATECHECK_NONE |
940 ret = _update(repo, checkout, updatecheck=updatecheck) | 1097 ret = _update(repo, checkout, updatecheck=updatecheck) |
941 | 1098 |
942 if not ret and movemarkfrom: | 1099 if not ret and movemarkfrom: |
943 if movemarkfrom == repo['.'].node(): | 1100 if movemarkfrom == repo['.'].node(): |
944 pass # no-op update | 1101 pass # no-op update |
945 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): | 1102 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()): |
946 b = ui.label(repo._activebookmark, 'bookmarks.active') | 1103 b = ui.label(repo._activebookmark, 'bookmarks.active') |
947 ui.status(_("updating bookmark %s\n") % b) | 1104 ui.status(_("updating bookmark %s\n") % b) |
948 else: | 1105 else: |
949 # this can happen with a non-linear update | 1106 # this can happen with a non-linear update |
964 if warndest: | 1121 if warndest: |
965 destutil.statusotherdests(ui, repo) | 1122 destutil.statusotherdests(ui, repo) |
966 | 1123 |
967 return ret | 1124 return ret |
968 | 1125 |
969 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None, | 1126 |
970 abort=False): | 1127 def merge( |
1128 repo, | |
1129 node, | |
1130 force=None, | |
1131 remind=True, | |
1132 mergeforce=False, | |
1133 labels=None, | |
1134 abort=False, | |
1135 ): | |
971 """Branch merge with node, resolving changes. Return true if any | 1136 """Branch merge with node, resolving changes. Return true if any |
972 unresolved conflicts.""" | 1137 unresolved conflicts.""" |
973 if abort: | 1138 if abort: |
974 return abortmerge(repo.ui, repo) | 1139 return abortmerge(repo.ui, repo) |
975 | 1140 |
976 stats = mergemod.update(repo, node, branchmerge=True, force=force, | 1141 stats = mergemod.update( |
977 mergeforce=mergeforce, labels=labels) | 1142 repo, |
1143 node, | |
1144 branchmerge=True, | |
1145 force=force, | |
1146 mergeforce=mergeforce, | |
1147 labels=labels, | |
1148 ) | |
978 _showstats(repo, stats) | 1149 _showstats(repo, stats) |
979 if stats.unresolvedcount: | 1150 if stats.unresolvedcount: |
980 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges " | 1151 repo.ui.status( |
981 "or 'hg merge --abort' to abandon\n")) | 1152 _( |
1153 "use 'hg resolve' to retry unresolved file merges " | |
1154 "or 'hg merge --abort' to abandon\n" | |
1155 ) | |
1156 ) | |
982 elif remind: | 1157 elif remind: |
983 repo.ui.status(_("(branch merge, don't forget to commit)\n")) | 1158 repo.ui.status(_("(branch merge, don't forget to commit)\n")) |
984 return stats.unresolvedcount > 0 | 1159 return stats.unresolvedcount > 0 |
1160 | |
985 | 1161 |
986 def abortmerge(ui, repo): | 1162 def abortmerge(ui, repo): |
987 ms = mergemod.mergestate.read(repo) | 1163 ms = mergemod.mergestate.read(repo) |
988 if ms.active(): | 1164 if ms.active(): |
989 # there were conflicts | 1165 # there were conflicts |
990 node = ms.localctx.hex() | 1166 node = ms.localctx.hex() |
991 else: | 1167 else: |
992 # there were no conficts, mergestate was not stored | 1168 # there were no conficts, mergestate was not stored |
993 node = repo['.'].hex() | 1169 node = repo['.'].hex() |
994 | 1170 |
995 repo.ui.status(_("aborting the merge, updating back to" | 1171 repo.ui.status( |
996 " %s\n") % node[:12]) | 1172 _("aborting the merge, updating back to" " %s\n") % node[:12] |
1173 ) | |
997 stats = mergemod.update(repo, node, branchmerge=False, force=True) | 1174 stats = mergemod.update(repo, node, branchmerge=False, force=True) |
998 _showstats(repo, stats) | 1175 _showstats(repo, stats) |
999 return stats.unresolvedcount > 0 | 1176 return stats.unresolvedcount > 0 |
1000 | 1177 |
1001 def _incoming(displaychlist, subreporecurse, ui, repo, source, | 1178 |
1002 opts, buffered=False): | 1179 def _incoming( |
1180 displaychlist, subreporecurse, ui, repo, source, opts, buffered=False | |
1181 ): | |
1003 """ | 1182 """ |
1004 Helper for incoming / gincoming. | 1183 Helper for incoming / gincoming. |
1005 displaychlist gets called with | 1184 displaychlist gets called with |
1006 (remoterepo, incomingchangesetlist, displayer) parameters, | 1185 (remoterepo, incomingchangesetlist, displayer) parameters, |
1007 and is supposed to contain only code that can't be unified. | 1186 and is supposed to contain only code that can't be unified. |
1011 ui.status(_('comparing with %s\n') % util.hidepassword(source)) | 1190 ui.status(_('comparing with %s\n') % util.hidepassword(source)) |
1012 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) | 1191 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev')) |
1013 | 1192 |
1014 if revs: | 1193 if revs: |
1015 revs = [other.lookup(rev) for rev in revs] | 1194 revs = [other.lookup(rev) for rev in revs] |
1016 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other, | 1195 other, chlist, cleanupfn = bundlerepo.getremotechanges( |
1017 revs, opts["bundle"], opts["force"]) | 1196 ui, repo, other, revs, opts["bundle"], opts["force"] |
1197 ) | |
1018 try: | 1198 try: |
1019 if not chlist: | 1199 if not chlist: |
1020 ui.status(_("no changes found\n")) | 1200 ui.status(_("no changes found\n")) |
1021 return subreporecurse() | 1201 return subreporecurse() |
1022 ui.pager('incoming') | 1202 ui.pager('incoming') |
1023 displayer = logcmdutil.changesetdisplayer(ui, other, opts, | 1203 displayer = logcmdutil.changesetdisplayer( |
1024 buffered=buffered) | 1204 ui, other, opts, buffered=buffered |
1205 ) | |
1025 displaychlist(other, chlist, displayer) | 1206 displaychlist(other, chlist, displayer) |
1026 displayer.close() | 1207 displayer.close() |
1027 finally: | 1208 finally: |
1028 cleanupfn() | 1209 cleanupfn() |
1029 subreporecurse() | 1210 subreporecurse() |
1030 return 0 # exit code is zero since we found incoming changes | 1211 return 0 # exit code is zero since we found incoming changes |
1212 | |
1031 | 1213 |
1032 def incoming(ui, repo, source, opts): | 1214 def incoming(ui, repo, source, opts): |
1033 def subreporecurse(): | 1215 def subreporecurse(): |
1034 ret = 1 | 1216 ret = 1 |
1035 if opts.get('subrepos'): | 1217 if opts.get('subrepos'): |
1050 parents = [p for p in other.changelog.parents(n) if p != nullid] | 1232 parents = [p for p in other.changelog.parents(n) if p != nullid] |
1051 if opts.get('no_merges') and len(parents) == 2: | 1233 if opts.get('no_merges') and len(parents) == 2: |
1052 continue | 1234 continue |
1053 count += 1 | 1235 count += 1 |
1054 displayer.show(other[n]) | 1236 displayer.show(other[n]) |
1237 | |
1055 return _incoming(display, subreporecurse, ui, repo, source, opts) | 1238 return _incoming(display, subreporecurse, ui, repo, source, opts) |
1239 | |
1056 | 1240 |
1057 def _outgoing(ui, repo, dest, opts): | 1241 def _outgoing(ui, repo, dest, opts): |
1058 path = ui.paths.getpath(dest, default=('default-push', 'default')) | 1242 path = ui.paths.getpath(dest, default=('default-push', 'default')) |
1059 if not path: | 1243 if not path: |
1060 raise error.Abort(_('default repository not configured!'), | 1244 raise error.Abort( |
1061 hint=_("see 'hg help config.paths'")) | 1245 _('default repository not configured!'), |
1246 hint=_("see 'hg help config.paths'"), | |
1247 ) | |
1062 dest = path.pushloc or path.loc | 1248 dest = path.pushloc or path.loc |
1063 branches = path.branch, opts.get('branch') or [] | 1249 branches = path.branch, opts.get('branch') or [] |
1064 | 1250 |
1065 ui.status(_('comparing with %s\n') % util.hidepassword(dest)) | 1251 ui.status(_('comparing with %s\n') % util.hidepassword(dest)) |
1066 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) | 1252 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev')) |
1067 if revs: | 1253 if revs: |
1068 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] | 1254 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)] |
1069 | 1255 |
1070 other = peer(repo, opts, dest) | 1256 other = peer(repo, opts, dest) |
1071 outgoing = discovery.findcommonoutgoing(repo, other, revs, | 1257 outgoing = discovery.findcommonoutgoing( |
1072 force=opts.get('force')) | 1258 repo, other, revs, force=opts.get('force') |
1259 ) | |
1073 o = outgoing.missing | 1260 o = outgoing.missing |
1074 if not o: | 1261 if not o: |
1075 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) | 1262 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded) |
1076 return o, other | 1263 return o, other |
1264 | |
1077 | 1265 |
1078 def outgoing(ui, repo, dest, opts): | 1266 def outgoing(ui, repo, dest, opts): |
1079 def recurse(): | 1267 def recurse(): |
1080 ret = 1 | 1268 ret = 1 |
1081 if opts.get('subrepos'): | 1269 if opts.get('subrepos'): |
1105 count += 1 | 1293 count += 1 |
1106 displayer.show(repo[n]) | 1294 displayer.show(repo[n]) |
1107 displayer.close() | 1295 displayer.close() |
1108 cmdutil.outgoinghooks(ui, repo, other, opts, o) | 1296 cmdutil.outgoinghooks(ui, repo, other, opts, o) |
1109 recurse() | 1297 recurse() |
1110 return 0 # exit code is zero since we found outgoing changes | 1298 return 0 # exit code is zero since we found outgoing changes |
1299 | |
1111 | 1300 |
1112 def verify(repo, level=None): | 1301 def verify(repo, level=None): |
1113 """verify the consistency of a repository""" | 1302 """verify the consistency of a repository""" |
1114 ret = verifymod.verify(repo, level=level) | 1303 ret = verifymod.verify(repo, level=level) |
1115 | 1304 |
1116 # Broken subrepo references in hidden csets don't seem worth worrying about, | 1305 # Broken subrepo references in hidden csets don't seem worth worrying about, |
1117 # since they can't be pushed/pulled, and --hidden can be used if they are a | 1306 # since they can't be pushed/pulled, and --hidden can be used if they are a |
1118 # concern. | 1307 # concern. |
1119 | 1308 |
1120 # pathto() is needed for -R case | 1309 # pathto() is needed for -R case |
1121 revs = repo.revs("filelog(%s)", | 1310 revs = repo.revs( |
1122 util.pathto(repo.root, repo.getcwd(), '.hgsubstate')) | 1311 "filelog(%s)", util.pathto(repo.root, repo.getcwd(), '.hgsubstate') |
1312 ) | |
1123 | 1313 |
1124 if revs: | 1314 if revs: |
1125 repo.ui.status(_('checking subrepo links\n')) | 1315 repo.ui.status(_('checking subrepo links\n')) |
1126 for rev in revs: | 1316 for rev in revs: |
1127 ctx = repo[rev] | 1317 ctx = repo[rev] |
1128 try: | 1318 try: |
1129 for subpath in ctx.substate: | 1319 for subpath in ctx.substate: |
1130 try: | 1320 try: |
1131 ret = (ctx.sub(subpath, allowcreate=False).verify() | 1321 ret = ( |
1132 or ret) | 1322 ctx.sub(subpath, allowcreate=False).verify() or ret |
1323 ) | |
1133 except error.RepoError as e: | 1324 except error.RepoError as e: |
1134 repo.ui.warn(('%d: %s\n') % (rev, e)) | 1325 repo.ui.warn('%d: %s\n' % (rev, e)) |
1135 except Exception: | 1326 except Exception: |
1136 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') % | 1327 repo.ui.warn( |
1137 node.short(ctx.node())) | 1328 _('.hgsubstate is corrupt in revision %s\n') |
1329 % node.short(ctx.node()) | |
1330 ) | |
1138 | 1331 |
1139 return ret | 1332 return ret |
1333 | |
1140 | 1334 |
1141 def remoteui(src, opts): | 1335 def remoteui(src, opts): |
1142 'build a remote ui from ui or repo and opts' | 1336 'build a remote ui from ui or repo and opts' |
1143 if util.safehasattr(src, 'baseui'): # looks like a repository | 1337 if util.safehasattr(src, 'baseui'): # looks like a repository |
1144 dst = src.baseui.copy() # drop repo-specific config | 1338 dst = src.baseui.copy() # drop repo-specific config |
1145 src = src.ui # copy target options from repo | 1339 src = src.ui # copy target options from repo |
1146 else: # assume it's a global ui object | 1340 else: # assume it's a global ui object |
1147 dst = src.copy() # keep all global options | 1341 dst = src.copy() # keep all global options |
1148 | 1342 |
1149 # copy ssh-specific options | 1343 # copy ssh-specific options |
1150 for o in 'ssh', 'remotecmd': | 1344 for o in 'ssh', 'remotecmd': |
1151 v = opts.get(o) or src.config('ui', o) | 1345 v = opts.get(o) or src.config('ui', o) |
1152 if v: | 1346 if v: |
1165 if v: | 1359 if v: |
1166 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') | 1360 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied') |
1167 | 1361 |
1168 return dst | 1362 return dst |
1169 | 1363 |
1364 | |
1170 # Files of interest | 1365 # Files of interest |
1171 # Used to check if the repository has changed looking at mtime and size of | 1366 # Used to check if the repository has changed looking at mtime and size of |
1172 # these files. | 1367 # these files. |
1173 foi = [('spath', '00changelog.i'), | 1368 foi = [ |
1174 ('spath', 'phaseroots'), # ! phase can change content at the same size | 1369 ('spath', '00changelog.i'), |
1175 ('spath', 'obsstore'), | 1370 ('spath', 'phaseroots'), # ! phase can change content at the same size |
1176 ('path', 'bookmarks'), # ! bookmark can change content at the same size | 1371 ('spath', 'obsstore'), |
1177 ] | 1372 ('path', 'bookmarks'), # ! bookmark can change content at the same size |
1373 ] | |
1374 | |
1178 | 1375 |
1179 class cachedlocalrepo(object): | 1376 class cachedlocalrepo(object): |
1180 """Holds a localrepository that can be cached and reused.""" | 1377 """Holds a localrepository that can be cached and reused.""" |
1181 | 1378 |
1182 def __init__(self, repo): | 1379 def __init__(self, repo): |