comparison mercurial/bundlerepo.py @ 43077:687b865b95ad

formatting: byteify all mercurial/ and hgext/ string literals Done with python3.7 contrib/byteify-strings.py -i $(hg files 'set:mercurial/**.py - mercurial/thirdparty/** + hgext/**.py - hgext/fsmonitor/pywatchman/** - mercurial/__init__.py') black -l 80 -t py33 -S $(hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**" - hgext/fsmonitor/pywatchman/**') # skip-blame mass-reformatting only Differential Revision: https://phab.mercurial-scm.org/D6972
author Augie Fackler <augie@google.com>
date Sun, 06 Oct 2019 09:48:39 -0400
parents 2372284d9457
children 4aa72cdf616f
comparison
equal deleted inserted replaced
43076:2372284d9457 43077:687b865b95ad
70 continue 70 continue
71 71
72 for p in (p1, p2): 72 for p in (p1, p2):
73 if p not in self.nodemap: 73 if p not in self.nodemap:
74 raise error.LookupError( 74 raise error.LookupError(
75 p, self.indexfile, _("unknown parent") 75 p, self.indexfile, _(b"unknown parent")
76 ) 76 )
77 77
78 if deltabase not in self.nodemap: 78 if deltabase not in self.nodemap:
79 raise LookupError( 79 raise LookupError(
80 deltabase, self.indexfile, _('unknown delta base') 80 deltabase, self.indexfile, _(b'unknown delta base')
81 ) 81 )
82 82
83 baserev = self.rev(deltabase) 83 baserev = self.rev(deltabase)
84 # start, size, full unc. size, base (unused), link, p1, p2, node 84 # start, size, full unc. size, base (unused), link, p1, p2, node
85 e = ( 85 e = (
131 rawtext = self._revisioncache[2] 131 rawtext = self._revisioncache[2]
132 break 132 break
133 chain.append(iterrev) 133 chain.append(iterrev)
134 iterrev = self.index[iterrev][3] 134 iterrev = self.index[iterrev][3]
135 if iterrev == nullrev: 135 if iterrev == nullrev:
136 rawtext = '' 136 rawtext = b''
137 elif rawtext is None: 137 elif rawtext is None:
138 r = super(bundlerevlog, self)._rawtext( 138 r = super(bundlerevlog, self)._rawtext(
139 self.node(iterrev), iterrev, _df=_df 139 self.node(iterrev), iterrev, _df=_df
140 ) 140 )
141 __, rawtext, validated = r 141 __, rawtext, validated = r
168 ) 168 )
169 169
170 170
171 class bundlemanifest(bundlerevlog, manifest.manifestrevlog): 171 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
172 def __init__( 172 def __init__(
173 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir='' 173 self, opener, cgunpacker, linkmapper, dirlogstarts=None, dir=b''
174 ): 174 ):
175 manifest.manifestrevlog.__init__(self, opener, tree=dir) 175 manifest.manifestrevlog.__init__(self, opener, tree=dir)
176 bundlerevlog.__init__( 176 bundlerevlog.__init__(
177 self, opener, self.indexfile, cgunpacker, linkmapper 177 self, opener, self.indexfile, cgunpacker, linkmapper
178 ) 178 )
179 if dirlogstarts is None: 179 if dirlogstarts is None:
180 dirlogstarts = {} 180 dirlogstarts = {}
181 if self.bundle.version == "03": 181 if self.bundle.version == b"03":
182 dirlogstarts = _getfilestarts(self.bundle) 182 dirlogstarts = _getfilestarts(self.bundle)
183 self._dirlogstarts = dirlogstarts 183 self._dirlogstarts = dirlogstarts
184 self._linkmapper = linkmapper 184 self._linkmapper = linkmapper
185 185
186 def dirlog(self, d): 186 def dirlog(self, d):
210 210
211 211
212 class bundlephasecache(phases.phasecache): 212 class bundlephasecache(phases.phasecache):
213 def __init__(self, *args, **kwargs): 213 def __init__(self, *args, **kwargs):
214 super(bundlephasecache, self).__init__(*args, **kwargs) 214 super(bundlephasecache, self).__init__(*args, **kwargs)
215 if util.safehasattr(self, 'opener'): 215 if util.safehasattr(self, b'opener'):
216 self.opener = vfsmod.readonlyvfs(self.opener) 216 self.opener = vfsmod.readonlyvfs(self.opener)
217 217
218 def write(self): 218 def write(self):
219 raise NotImplementedError 219 raise NotImplementedError
220 220
228 228
229 229
230 def _getfilestarts(cgunpacker): 230 def _getfilestarts(cgunpacker):
231 filespos = {} 231 filespos = {}
232 for chunkdata in iter(cgunpacker.filelogheader, {}): 232 for chunkdata in iter(cgunpacker.filelogheader, {}):
233 fname = chunkdata['filename'] 233 fname = chunkdata[b'filename']
234 filespos[fname] = cgunpacker.tell() 234 filespos[fname] = cgunpacker.tell()
235 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}): 235 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
236 pass 236 pass
237 return filespos 237 return filespos
238 238
252 252
253 def __init__(self, bundlepath, url, tempparent): 253 def __init__(self, bundlepath, url, tempparent):
254 self._tempparent = tempparent 254 self._tempparent = tempparent
255 self._url = url 255 self._url = url
256 256
257 self.ui.setconfig('phases', 'publish', False, 'bundlerepo') 257 self.ui.setconfig(b'phases', b'publish', False, b'bundlerepo')
258 258
259 self.tempfile = None 259 self.tempfile = None
260 f = util.posixfile(bundlepath, "rb") 260 f = util.posixfile(bundlepath, b"rb")
261 bundle = exchange.readbundle(self.ui, f, bundlepath) 261 bundle = exchange.readbundle(self.ui, f, bundlepath)
262 262
263 if isinstance(bundle, bundle2.unbundle20): 263 if isinstance(bundle, bundle2.unbundle20):
264 self._bundlefile = bundle 264 self._bundlefile = bundle
265 self._cgunpacker = None 265 self._cgunpacker = None
266 266
267 cgpart = None 267 cgpart = None
268 for part in bundle.iterparts(seekable=True): 268 for part in bundle.iterparts(seekable=True):
269 if part.type == 'changegroup': 269 if part.type == b'changegroup':
270 if cgpart: 270 if cgpart:
271 raise NotImplementedError( 271 raise NotImplementedError(
272 "can't process " "multiple changegroups" 272 b"can't process " b"multiple changegroups"
273 ) 273 )
274 cgpart = part 274 cgpart = part
275 275
276 self._handlebundle2part(bundle, part) 276 self._handlebundle2part(bundle, part)
277 277
278 if not cgpart: 278 if not cgpart:
279 raise error.Abort(_("No changegroups found")) 279 raise error.Abort(_(b"No changegroups found"))
280 280
281 # This is required to placate a later consumer, which expects 281 # This is required to placate a later consumer, which expects
282 # the payload offset to be at the beginning of the changegroup. 282 # the payload offset to be at the beginning of the changegroup.
283 # We need to do this after the iterparts() generator advances 283 # We need to do this after the iterparts() generator advances
284 # because iterparts() will seek to end of payload after the 284 # because iterparts() will seek to end of payload after the
286 cgpart.seek(0, os.SEEK_SET) 286 cgpart.seek(0, os.SEEK_SET)
287 287
288 elif isinstance(bundle, changegroup.cg1unpacker): 288 elif isinstance(bundle, changegroup.cg1unpacker):
289 if bundle.compressed(): 289 if bundle.compressed():
290 f = self._writetempbundle( 290 f = self._writetempbundle(
291 bundle.read, '.hg10un', header='HG10UN' 291 bundle.read, b'.hg10un', header=b'HG10UN'
292 ) 292 )
293 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs) 293 bundle = exchange.readbundle(self.ui, f, bundlepath, self.vfs)
294 294
295 self._bundlefile = bundle 295 self._bundlefile = bundle
296 self._cgunpacker = bundle 296 self._cgunpacker = bundle
297 else: 297 else:
298 raise error.Abort(_('bundle type %s cannot be read') % type(bundle)) 298 raise error.Abort(
299 _(b'bundle type %s cannot be read') % type(bundle)
300 )
299 301
300 # dict with the mapping 'filename' -> position in the changegroup. 302 # dict with the mapping 'filename' -> position in the changegroup.
301 self._cgfilespos = {} 303 self._cgfilespos = {}
302 304
303 self.firstnewrev = self.changelog.repotiprev + 1 305 self.firstnewrev = self.changelog.repotiprev + 1
307 phases.draft, 309 phases.draft,
308 [ctx.node() for ctx in self[self.firstnewrev :]], 310 [ctx.node() for ctx in self[self.firstnewrev :]],
309 ) 311 )
310 312
311 def _handlebundle2part(self, bundle, part): 313 def _handlebundle2part(self, bundle, part):
312 if part.type != 'changegroup': 314 if part.type != b'changegroup':
313 return 315 return
314 316
315 cgstream = part 317 cgstream = part
316 version = part.params.get('version', '01') 318 version = part.params.get(b'version', b'01')
317 legalcgvers = changegroup.supportedincomingversions(self) 319 legalcgvers = changegroup.supportedincomingversions(self)
318 if version not in legalcgvers: 320 if version not in legalcgvers:
319 msg = _('Unsupported changegroup version: %s') 321 msg = _(b'Unsupported changegroup version: %s')
320 raise error.Abort(msg % version) 322 raise error.Abort(msg % version)
321 if bundle.compressed(): 323 if bundle.compressed():
322 cgstream = self._writetempbundle(part.read, '.cg%sun' % version) 324 cgstream = self._writetempbundle(part.read, b'.cg%sun' % version)
323 325
324 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN') 326 self._cgunpacker = changegroup.getunbundler(version, cgstream, b'UN')
325 327
326 def _writetempbundle(self, readfn, suffix, header=''): 328 def _writetempbundle(self, readfn, suffix, header=b''):
327 """Write a temporary file to disk 329 """Write a temporary file to disk
328 """ 330 """
329 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-", suffix=suffix) 331 fdtemp, temp = self.vfs.mkstemp(prefix=b"hg-bundle-", suffix=suffix)
330 self.tempfile = temp 332 self.tempfile = temp
331 333
332 with os.fdopen(fdtemp, r'wb') as fptemp: 334 with os.fdopen(fdtemp, r'wb') as fptemp:
333 fptemp.write(header) 335 fptemp.write(header)
334 while True: 336 while True:
335 chunk = readfn(2 ** 18) 337 chunk = readfn(2 ** 18)
336 if not chunk: 338 if not chunk:
337 break 339 break
338 fptemp.write(chunk) 340 fptemp.write(chunk)
339 341
340 return self.vfs.open(self.tempfile, mode="rb") 342 return self.vfs.open(self.tempfile, mode=b"rb")
341 343
342 @localrepo.unfilteredpropertycache 344 @localrepo.unfilteredpropertycache
343 def _phasecache(self): 345 def _phasecache(self):
344 return bundlephasecache(self, self._phasedefaults) 346 return bundlephasecache(self, self._phasedefaults)
345 347
430 432
431 # Check if parents exist in localrepo before setting 433 # Check if parents exist in localrepo before setting
432 def setparents(self, p1, p2=nullid): 434 def setparents(self, p1, p2=nullid):
433 p1rev = self.changelog.rev(p1) 435 p1rev = self.changelog.rev(p1)
434 p2rev = self.changelog.rev(p2) 436 p2rev = self.changelog.rev(p2)
435 msg = _("setting parent to node %s that only exists in the bundle\n") 437 msg = _(b"setting parent to node %s that only exists in the bundle\n")
436 if self.changelog.repotiprev < p1rev: 438 if self.changelog.repotiprev < p1rev:
437 self.ui.warn(msg % nodemod.hex(p1)) 439 self.ui.warn(msg % nodemod.hex(p1))
438 if self.changelog.repotiprev < p2rev: 440 if self.changelog.repotiprev < p2rev:
439 self.ui.warn(msg % nodemod.hex(p2)) 441 self.ui.warn(msg % nodemod.hex(p2))
440 return super(bundlerepository, self).setparents(p1, p2) 442 return super(bundlerepository, self).setparents(p1, p2)
441 443
442 444
443 def instance(ui, path, create, intents=None, createopts=None): 445 def instance(ui, path, create, intents=None, createopts=None):
444 if create: 446 if create:
445 raise error.Abort(_('cannot create new bundle repository')) 447 raise error.Abort(_(b'cannot create new bundle repository'))
446 # internal config: bundle.mainreporoot 448 # internal config: bundle.mainreporoot
447 parentpath = ui.config("bundle", "mainreporoot") 449 parentpath = ui.config(b"bundle", b"mainreporoot")
448 if not parentpath: 450 if not parentpath:
449 # try to find the correct path to the working directory repo 451 # try to find the correct path to the working directory repo
450 parentpath = cmdutil.findrepo(encoding.getcwd()) 452 parentpath = cmdutil.findrepo(encoding.getcwd())
451 if parentpath is None: 453 if parentpath is None:
452 parentpath = '' 454 parentpath = b''
453 if parentpath: 455 if parentpath:
454 # Try to make the full path relative so we get a nice, short URL. 456 # Try to make the full path relative so we get a nice, short URL.
455 # In particular, we don't want temp dir names in test outputs. 457 # In particular, we don't want temp dir names in test outputs.
456 cwd = encoding.getcwd() 458 cwd = encoding.getcwd()
457 if parentpath == cwd: 459 if parentpath == cwd:
458 parentpath = '' 460 parentpath = b''
459 else: 461 else:
460 cwd = pathutil.normasprefix(cwd) 462 cwd = pathutil.normasprefix(cwd)
461 if parentpath.startswith(cwd): 463 if parentpath.startswith(cwd):
462 parentpath = parentpath[len(cwd) :] 464 parentpath = parentpath[len(cwd) :]
463 u = util.url(path) 465 u = util.url(path)
464 path = u.localpath() 466 path = u.localpath()
465 if u.scheme == 'bundle': 467 if u.scheme == b'bundle':
466 s = path.split("+", 1) 468 s = path.split(b"+", 1)
467 if len(s) == 1: 469 if len(s) == 1:
468 repopath, bundlename = parentpath, s[0] 470 repopath, bundlename = parentpath, s[0]
469 else: 471 else:
470 repopath, bundlename = s 472 repopath, bundlename = s
471 else: 473 else:
475 477
476 478
477 def makebundlerepository(ui, repopath, bundlepath): 479 def makebundlerepository(ui, repopath, bundlepath):
478 """Make a bundle repository object based on repo and bundle paths.""" 480 """Make a bundle repository object based on repo and bundle paths."""
479 if repopath: 481 if repopath:
480 url = 'bundle:%s+%s' % (util.expandpath(repopath), bundlepath) 482 url = b'bundle:%s+%s' % (util.expandpath(repopath), bundlepath)
481 else: 483 else:
482 url = 'bundle:%s' % bundlepath 484 url = b'bundle:%s' % bundlepath
483 485
484 # Because we can't make any guarantees about the type of the base 486 # Because we can't make any guarantees about the type of the base
485 # repository, we can't have a static class representing the bundle 487 # repository, we can't have a static class representing the bundle
486 # repository. We also can't make any guarantees about how to even 488 # repository. We also can't make any guarantees about how to even
487 # call the base repository's constructor! 489 # call the base repository's constructor!
563 localrepo = peer.local() 565 localrepo = peer.local()
564 if bundlename or not localrepo: 566 if bundlename or not localrepo:
565 # create a bundle (uncompressed if peer repo is not local) 567 # create a bundle (uncompressed if peer repo is not local)
566 568
567 # developer config: devel.legacy.exchange 569 # developer config: devel.legacy.exchange
568 legexc = ui.configlist('devel', 'legacy.exchange') 570 legexc = ui.configlist(b'devel', b'legacy.exchange')
569 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc 571 forcebundle1 = b'bundle2' not in legexc and b'bundle1' in legexc
570 canbundle2 = ( 572 canbundle2 = (
571 not forcebundle1 573 not forcebundle1
572 and peer.capable('getbundle') 574 and peer.capable(b'getbundle')
573 and peer.capable('bundle2') 575 and peer.capable(b'bundle2')
574 ) 576 )
575 if canbundle2: 577 if canbundle2:
576 with peer.commandexecutor() as e: 578 with peer.commandexecutor() as e:
577 b2 = e.callcommand( 579 b2 = e.callcommand(
578 'getbundle', 580 b'getbundle',
579 { 581 {
580 'source': 'incoming', 582 b'source': b'incoming',
581 'common': common, 583 b'common': common,
582 'heads': rheads, 584 b'heads': rheads,
583 'bundlecaps': exchange.caps20to10(repo, role='client'), 585 b'bundlecaps': exchange.caps20to10(
584 'cg': True, 586 repo, role=b'client'
587 ),
588 b'cg': True,
585 }, 589 },
586 ).result() 590 ).result()
587 591
588 fname = bundle = changegroup.writechunks( 592 fname = bundle = changegroup.writechunks(
589 ui, b2._forwardchunks(), bundlename 593 ui, b2._forwardchunks(), bundlename
590 ) 594 )
591 else: 595 else:
592 if peer.capable('getbundle'): 596 if peer.capable(b'getbundle'):
593 with peer.commandexecutor() as e: 597 with peer.commandexecutor() as e:
594 cg = e.callcommand( 598 cg = e.callcommand(
595 'getbundle', 599 b'getbundle',
596 { 600 {
597 'source': 'incoming', 601 b'source': b'incoming',
598 'common': common, 602 b'common': common,
599 'heads': rheads, 603 b'heads': rheads,
600 }, 604 },
601 ).result() 605 ).result()
602 elif onlyheads is None and not peer.capable('changegroupsubset'): 606 elif onlyheads is None and not peer.capable(b'changegroupsubset'):
603 # compat with older servers when pulling all remote heads 607 # compat with older servers when pulling all remote heads
604 608
605 with peer.commandexecutor() as e: 609 with peer.commandexecutor() as e:
606 cg = e.callcommand( 610 cg = e.callcommand(
607 'changegroup', 611 b'changegroup',
608 {'nodes': incoming, 'source': 'incoming',}, 612 {b'nodes': incoming, b'source': b'incoming',},
609 ).result() 613 ).result()
610 614
611 rheads = None 615 rheads = None
612 else: 616 else:
613 with peer.commandexecutor() as e: 617 with peer.commandexecutor() as e:
614 cg = e.callcommand( 618 cg = e.callcommand(
615 'changegroupsubset', 619 b'changegroupsubset',
616 { 620 {
617 'bases': incoming, 621 b'bases': incoming,
618 'heads': rheads, 622 b'heads': rheads,
619 'source': 'incoming', 623 b'source': b'incoming',
620 }, 624 },
621 ).result() 625 ).result()
622 626
623 if localrepo: 627 if localrepo:
624 bundletype = "HG10BZ" 628 bundletype = b"HG10BZ"
625 else: 629 else:
626 bundletype = "HG10UN" 630 bundletype = b"HG10UN"
627 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype) 631 fname = bundle = bundle2.writebundle(ui, cg, bundlename, bundletype)
628 # keep written bundle? 632 # keep written bundle?
629 if bundlename: 633 if bundlename:
630 bundle = None 634 bundle = None
631 if not localrepo: 635 if not localrepo:
647 if bundlerepo: 651 if bundlerepo:
648 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]] 652 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev :]]
649 653
650 with peer.commandexecutor() as e: 654 with peer.commandexecutor() as e:
651 remotephases = e.callcommand( 655 remotephases = e.callcommand(
652 'listkeys', {'namespace': 'phases',} 656 b'listkeys', {b'namespace': b'phases',}
653 ).result() 657 ).result()
654 658
655 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes) 659 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
656 pullop.trmanager = bundletransactionmanager() 660 pullop.trmanager = bundletransactionmanager()
657 exchange._pullapplyphases(pullop, remotephases) 661 exchange._pullapplyphases(pullop, remotephases)