Mercurial > public > mercurial-scm > hg
comparison mercurial/branchmap.py @ 51452:7a063dd9d64e
branchcache: dispatch the code into the dedicated subclass
The code useful only to the local brancache have now been moved into the
dedicated subclass. This will help improving the branchcache code without subtle
breaking the remote variants.
author | Pierre-Yves David <pierre-yves.david@octobus.net> |
---|---|
date | Mon, 26 Feb 2024 15:46:24 +0100 |
parents | 84fca6d79e25 |
children | 19b2736c8e45 |
comparison
equal
deleted
inserted
replaced
51451:84fca6d79e25 | 51452:7a063dd9d64e |
---|---|
209 self, | 209 self, |
210 repo: "localrepo.localrepository", | 210 repo: "localrepo.localrepository", |
211 entries: Union[ | 211 entries: Union[ |
212 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | 212 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] |
213 ] = (), | 213 ] = (), |
214 tipnode: Optional[bytes] = None, | 214 closed_nodes: Optional[Set[bytes]] = None, |
215 tiprev: Optional[int] = nullrev, | |
216 filteredhash: Optional[bytes] = None, | |
217 closednodes: Optional[Set[bytes]] = None, | |
218 hasnode: Optional[Callable[[bytes], bool]] = None, | |
219 verify_node: bool = False, | |
220 ) -> None: | 215 ) -> None: |
221 """hasnode is a function which can be used to verify whether changelog | 216 """hasnode is a function which can be used to verify whether changelog |
222 has a given node or not. If it's not provided, we assume that every node | 217 has a given node or not. If it's not provided, we assume that every node |
223 we have exists in changelog""" | 218 we have exists in changelog""" |
224 self._filtername = repo.filtername | |
225 self._delayed = False | |
226 if tipnode is None: | |
227 self.tipnode = repo.nullid | |
228 else: | |
229 self.tipnode = tipnode | |
230 self.tiprev = tiprev | |
231 self.filteredhash = filteredhash | |
232 # closednodes is a set of nodes that close their branch. If the branch | 219 # closednodes is a set of nodes that close their branch. If the branch |
233 # cache has been updated, it may contain nodes that are no longer | 220 # cache has been updated, it may contain nodes that are no longer |
234 # heads. | 221 # heads. |
235 if closednodes is None: | 222 if closed_nodes is None: |
236 self._closednodes = set() | 223 closed_nodes = set() |
237 else: | 224 self._closednodes = set(closed_nodes) |
238 self._closednodes = closednodes | |
239 self._entries = dict(entries) | 225 self._entries = dict(entries) |
240 # Do we need to verify branch at all ? | |
241 self._verify_node = verify_node | |
242 # whether closed nodes are verified or not | |
243 self._closedverified = False | |
244 # branches for which nodes are verified | |
245 self._verifiedbranches = set() | |
246 self._hasnode = None | |
247 if self._verify_node: | |
248 self._hasnode = repo.changelog.hasnode | |
249 | |
250 def _verifyclosed(self): | |
251 """verify the closed nodes we have""" | |
252 if not self._verify_node: | |
253 return | |
254 if self._closedverified: | |
255 return | |
256 assert self._hasnode is not None | |
257 for node in self._closednodes: | |
258 if not self._hasnode(node): | |
259 _unknownnode(node) | |
260 | |
261 self._closedverified = True | |
262 | |
263 def _verifybranch(self, branch): | |
264 """verify head nodes for the given branch.""" | |
265 if not self._verify_node: | |
266 return | |
267 if branch not in self._entries or branch in self._verifiedbranches: | |
268 return | |
269 assert self._hasnode is not None | |
270 for n in self._entries[branch]: | |
271 if not self._hasnode(n): | |
272 _unknownnode(n) | |
273 | |
274 self._verifiedbranches.add(branch) | |
275 | |
276 def _verifyall(self): | |
277 """verifies nodes of all the branches""" | |
278 needverification = set(self._entries.keys()) - self._verifiedbranches | |
279 for b in needverification: | |
280 self._verifybranch(b) | |
281 | 226 |
282 def __iter__(self): | 227 def __iter__(self): |
283 return iter(self._entries) | 228 return iter(self._entries) |
284 | 229 |
285 def __setitem__(self, key, value): | 230 def __setitem__(self, key, value): |
286 self._entries[key] = value | 231 self._entries[key] = value |
287 | 232 |
288 def __getitem__(self, key): | 233 def __getitem__(self, key): |
289 self._verifybranch(key) | |
290 return self._entries[key] | 234 return self._entries[key] |
291 | 235 |
292 def __contains__(self, key): | 236 def __contains__(self, key): |
293 self._verifybranch(key) | |
294 return key in self._entries | 237 return key in self._entries |
295 | 238 |
296 def iteritems(self): | 239 def iteritems(self): |
297 for k, v in self._entries.items(): | 240 return self._entries.items() |
298 self._verifybranch(k) | |
299 yield k, v | |
300 | 241 |
301 items = iteritems | 242 items = iteritems |
302 | 243 |
303 def hasbranch(self, label): | 244 def hasbranch(self, label): |
304 """checks whether a branch of this name exists or not""" | 245 """checks whether a branch of this name exists or not""" |
305 self._verifybranch(label) | |
306 return label in self._entries | 246 return label in self._entries |
307 | |
308 @classmethod | |
309 def fromfile(cls, repo): | |
310 f = None | |
311 try: | |
312 f = repo.cachevfs(cls._filename(repo)) | |
313 lineiter = iter(f) | |
314 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | |
315 last, lrev = cachekey[:2] | |
316 last, lrev = bin(last), int(lrev) | |
317 filteredhash = None | |
318 if len(cachekey) > 2: | |
319 filteredhash = bin(cachekey[2]) | |
320 bcache = cls( | |
321 repo, | |
322 tipnode=last, | |
323 tiprev=lrev, | |
324 filteredhash=filteredhash, | |
325 verify_node=True, | |
326 ) | |
327 if not bcache.validfor(repo): | |
328 # invalidate the cache | |
329 raise ValueError('tip differs') | |
330 bcache.load(repo, lineiter) | |
331 except (IOError, OSError): | |
332 return None | |
333 | |
334 except Exception as inst: | |
335 if repo.ui.debugflag: | |
336 msg = b'invalid %s: %s\n' | |
337 repo.ui.debug( | |
338 msg | |
339 % ( | |
340 _branchcachedesc(repo), | |
341 stringutil.forcebytestr(inst), | |
342 ) | |
343 ) | |
344 bcache = None | |
345 | |
346 finally: | |
347 if f: | |
348 f.close() | |
349 | |
350 return bcache | |
351 | |
352 def load(self, repo, lineiter): | |
353 """fully loads the branchcache by reading from the file using the line | |
354 iterator passed""" | |
355 for line in lineiter: | |
356 line = line.rstrip(b'\n') | |
357 if not line: | |
358 continue | |
359 node, state, label = line.split(b" ", 2) | |
360 if state not in b'oc': | |
361 raise ValueError('invalid branch state') | |
362 label = encoding.tolocal(label.strip()) | |
363 node = bin(node) | |
364 self._entries.setdefault(label, []).append(node) | |
365 if state == b'c': | |
366 self._closednodes.add(node) | |
367 | |
368 @staticmethod | |
369 def _filename(repo): | |
370 """name of a branchcache file for a given repo or repoview""" | |
371 filename = b"branch2" | |
372 if repo.filtername: | |
373 filename = b'%s-%s' % (filename, repo.filtername) | |
374 return filename | |
375 | |
376 def validfor(self, repo): | |
377 """check that cache contents are valid for (a subset of) this repo | |
378 | |
379 - False when the order of changesets changed or if we detect a strip. | |
380 - True when cache is up-to-date for the current repo or its subset.""" | |
381 try: | |
382 node = repo.changelog.node(self.tiprev) | |
383 except IndexError: | |
384 # changesets were stripped and now we don't even have enough to | |
385 # find tiprev | |
386 return False | |
387 if self.tipnode != node: | |
388 # tiprev doesn't correspond to tipnode: repo was stripped, or this | |
389 # repo has a different order of changesets | |
390 return False | |
391 tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) | |
392 # hashes don't match if this repo view has a different set of filtered | |
393 # revisions (e.g. due to phase changes) or obsolete revisions (e.g. | |
394 # history was rewritten) | |
395 return self.filteredhash == tiphash | |
396 | 247 |
397 def _branchtip(self, heads): | 248 def _branchtip(self, heads): |
398 """Return tuple with last open head in heads and false, | 249 """Return tuple with last open head in heads and false, |
399 otherwise return last closed head and true.""" | 250 otherwise return last closed head and true.""" |
400 tip = heads[-1] | 251 tip = heads[-1] |
414 | 265 |
415 def iteropen(self, nodes): | 266 def iteropen(self, nodes): |
416 return (n for n in nodes if n not in self._closednodes) | 267 return (n for n in nodes if n not in self._closednodes) |
417 | 268 |
418 def branchheads(self, branch, closed=False): | 269 def branchheads(self, branch, closed=False): |
419 self._verifybranch(branch) | |
420 heads = self._entries[branch] | 270 heads = self._entries[branch] |
421 if not closed: | 271 if not closed: |
422 heads = list(self.iteropen(heads)) | 272 heads = list(self.iteropen(heads)) |
423 return heads | 273 return heads |
424 | 274 |
426 for bn, heads in self.items(): | 276 for bn, heads in self.items(): |
427 yield (bn, heads) + self._branchtip(heads) | 277 yield (bn, heads) + self._branchtip(heads) |
428 | 278 |
429 def iterheads(self): | 279 def iterheads(self): |
430 """returns all the heads""" | 280 """returns all the heads""" |
431 self._verifyall() | |
432 return self._entries.values() | 281 return self._entries.values() |
433 | |
434 def copy(self, repo): | |
435 """return a deep copy of the branchcache object""" | |
436 other = type(self)( | |
437 repo=repo, | |
438 # we always do a shally copy of self._entries, and the values is | |
439 # always replaced, so no need to deepcopy until the above remains | |
440 # true. | |
441 entries=self._entries, | |
442 tipnode=self.tipnode, | |
443 tiprev=self.tiprev, | |
444 filteredhash=self.filteredhash, | |
445 closednodes=set(self._closednodes), | |
446 verify_node=self._verify_node, | |
447 ) | |
448 # we copy will likely schedule a write anyway, but that does not seems | |
449 # to hurt to overschedule | |
450 other._delayed = self._delayed | |
451 # also copy information about the current verification state | |
452 other._closedverified = self._closedverified | |
453 other._verifiedbranches = set(self._verifiedbranches) | |
454 return other | |
455 | |
456 def write(self, repo): | |
457 assert self._filtername == repo.filtername, ( | |
458 self._filtername, | |
459 repo.filtername, | |
460 ) | |
461 tr = repo.currenttransaction() | |
462 if not getattr(tr, 'finalized', True): | |
463 # Avoid premature writing. | |
464 # | |
465 # (The cache warming setup by localrepo will update the file later.) | |
466 self._delayed = True | |
467 return | |
468 try: | |
469 filename = self._filename(repo) | |
470 with repo.cachevfs(filename, b"w", atomictemp=True) as f: | |
471 cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | |
472 if self.filteredhash is not None: | |
473 cachekey.append(hex(self.filteredhash)) | |
474 f.write(b" ".join(cachekey) + b'\n') | |
475 nodecount = 0 | |
476 for label, nodes in sorted(self._entries.items()): | |
477 label = encoding.fromlocal(label) | |
478 for node in nodes: | |
479 nodecount += 1 | |
480 if node in self._closednodes: | |
481 state = b'c' | |
482 else: | |
483 state = b'o' | |
484 f.write(b"%s %s %s\n" % (hex(node), state, label)) | |
485 repo.ui.log( | |
486 b'branchcache', | |
487 b'wrote %s with %d labels and %d nodes\n', | |
488 _branchcachedesc(repo), | |
489 len(self._entries), | |
490 nodecount, | |
491 ) | |
492 self._delayed = False | |
493 except (IOError, OSError, error.Abort) as inst: | |
494 # Abort may be raised by read only opener, so log and continue | |
495 repo.ui.debug( | |
496 b"couldn't write branch cache: %s\n" | |
497 % stringutil.forcebytestr(inst) | |
498 ) | |
499 | 282 |
500 def update(self, repo, revgen): | 283 def update(self, repo, revgen): |
501 """Given a branchhead cache, self, that may have extra nodes or be | 284 """Given a branchhead cache, self, that may have extra nodes or be |
502 missing heads, and a generator of nodes that are strictly a superset of | 285 missing heads, and a generator of nodes that are strictly a superset of |
503 heads missing, this function updates self to be correct. | 286 heads missing, this function updates self to be correct. |
504 """ | 287 """ |
505 assert self._filtername == repo.filtername, ( | |
506 self._filtername, | |
507 repo.filtername, | |
508 ) | |
509 starttime = util.timer() | 288 starttime = util.timer() |
510 cl = repo.changelog | 289 cl = repo.changelog |
511 # collect new branch entries | 290 # collect new branch entries |
512 newbranches = {} | 291 newbranches = {} |
513 getbranchinfo = repo.revbranchcache().branchinfo | 292 getbranchinfo = repo.revbranchcache().branchinfo |
293 max_rev = -1 | |
514 for r in revgen: | 294 for r in revgen: |
515 branch, closesbranch = getbranchinfo(r) | 295 branch, closesbranch = getbranchinfo(r) |
516 newbranches.setdefault(branch, []).append(r) | 296 newbranches.setdefault(branch, []).append(r) |
517 if closesbranch: | 297 if closesbranch: |
518 self._closednodes.add(cl.node(r)) | 298 self._closednodes.add(cl.node(r)) |
519 | 299 max_rev = max(max_rev, r) |
520 # new tip revision which we found after iterating items from new | 300 if max_rev < 0: |
521 # branches | 301 max_rev = None |
522 ntiprev = self.tiprev | |
523 | 302 |
524 # Delay fetching the topological heads until they are needed. | 303 # Delay fetching the topological heads until they are needed. |
525 # A repository without non-continous branches can skip this part. | 304 # A repository without non-continous branches can skip this part. |
526 topoheads = None | 305 topoheads = None |
527 | 306 |
611 if floorrev <= max(uncertain): | 390 if floorrev <= max(uncertain): |
612 ancestors = set(cl.ancestors(uncertain, floorrev)) | 391 ancestors = set(cl.ancestors(uncertain, floorrev)) |
613 bheadset -= ancestors | 392 bheadset -= ancestors |
614 if bheadset: | 393 if bheadset: |
615 self[branch] = [cl.node(rev) for rev in sorted(bheadset)] | 394 self[branch] = [cl.node(rev) for rev in sorted(bheadset)] |
616 tiprev = max(newheadrevs) | 395 |
617 if tiprev > ntiprev: | 396 duration = util.timer() - starttime |
618 ntiprev = tiprev | 397 repo.ui.log( |
619 | 398 b'branchcache', |
620 if ntiprev > self.tiprev: | 399 b'updated %s in %.4f seconds\n', |
621 self.tiprev = ntiprev | 400 _branchcachedesc(repo), |
622 self.tipnode = cl.node(ntiprev) | 401 duration, |
402 ) | |
403 return max_rev | |
404 | |
405 | |
406 class branchcache(_BaseBranchCache): | |
407 """Branchmap info for a local repo or repoview""" | |
408 | |
409 def __init__( | |
410 self, | |
411 repo: "localrepo.localrepository", | |
412 entries: Union[ | |
413 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | |
414 ] = (), | |
415 tipnode: Optional[bytes] = None, | |
416 tiprev: Optional[int] = nullrev, | |
417 filteredhash: Optional[bytes] = None, | |
418 closednodes: Optional[Set[bytes]] = None, | |
419 hasnode: Optional[Callable[[bytes], bool]] = None, | |
420 verify_node: bool = False, | |
421 ) -> None: | |
422 """hasnode is a function which can be used to verify whether changelog | |
423 has a given node or not. If it's not provided, we assume that every node | |
424 we have exists in changelog""" | |
425 self._filtername = repo.filtername | |
426 self._delayed = False | |
427 if tipnode is None: | |
428 self.tipnode = repo.nullid | |
429 else: | |
430 self.tipnode = tipnode | |
431 self.tiprev = tiprev | |
432 self.filteredhash = filteredhash | |
433 | |
434 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) | |
435 # closednodes is a set of nodes that close their branch. If the branch | |
436 # cache has been updated, it may contain nodes that are no longer | |
437 # heads. | |
438 | |
439 # Do we need to verify branch at all ? | |
440 self._verify_node = verify_node | |
441 # whether closed nodes are verified or not | |
442 self._closedverified = False | |
443 # branches for which nodes are verified | |
444 self._verifiedbranches = set() | |
445 self._hasnode = None | |
446 if self._verify_node: | |
447 self._hasnode = repo.changelog.hasnode | |
448 | |
449 def validfor(self, repo): | |
450 """check that cache contents are valid for (a subset of) this repo | |
451 | |
452 - False when the order of changesets changed or if we detect a strip. | |
453 - True when cache is up-to-date for the current repo or its subset.""" | |
454 try: | |
455 node = repo.changelog.node(self.tiprev) | |
456 except IndexError: | |
457 # changesets were stripped and now we don't even have enough to | |
458 # find tiprev | |
459 return False | |
460 if self.tipnode != node: | |
461 # tiprev doesn't correspond to tipnode: repo was stripped, or this | |
462 # repo has a different order of changesets | |
463 return False | |
464 tiphash = scmutil.filteredhash(repo, self.tiprev, needobsolete=True) | |
465 # hashes don't match if this repo view has a different set of filtered | |
466 # revisions (e.g. due to phase changes) or obsolete revisions (e.g. | |
467 # history was rewritten) | |
468 return self.filteredhash == tiphash | |
469 | |
470 @classmethod | |
471 def fromfile(cls, repo): | |
472 f = None | |
473 try: | |
474 f = repo.cachevfs(cls._filename(repo)) | |
475 lineiter = iter(f) | |
476 cachekey = next(lineiter).rstrip(b'\n').split(b" ", 2) | |
477 last, lrev = cachekey[:2] | |
478 last, lrev = bin(last), int(lrev) | |
479 filteredhash = None | |
480 if len(cachekey) > 2: | |
481 filteredhash = bin(cachekey[2]) | |
482 bcache = cls( | |
483 repo, | |
484 tipnode=last, | |
485 tiprev=lrev, | |
486 filteredhash=filteredhash, | |
487 verify_node=True, | |
488 ) | |
489 if not bcache.validfor(repo): | |
490 # invalidate the cache | |
491 raise ValueError('tip differs') | |
492 bcache.load(repo, lineiter) | |
493 except (IOError, OSError): | |
494 return None | |
495 | |
496 except Exception as inst: | |
497 if repo.ui.debugflag: | |
498 msg = b'invalid %s: %s\n' | |
499 repo.ui.debug( | |
500 msg | |
501 % ( | |
502 _branchcachedesc(repo), | |
503 stringutil.forcebytestr(inst), | |
504 ) | |
505 ) | |
506 bcache = None | |
507 | |
508 finally: | |
509 if f: | |
510 f.close() | |
511 | |
512 return bcache | |
513 | |
514 def load(self, repo, lineiter): | |
515 """fully loads the branchcache by reading from the file using the line | |
516 iterator passed""" | |
517 for line in lineiter: | |
518 line = line.rstrip(b'\n') | |
519 if not line: | |
520 continue | |
521 node, state, label = line.split(b" ", 2) | |
522 if state not in b'oc': | |
523 raise ValueError('invalid branch state') | |
524 label = encoding.tolocal(label.strip()) | |
525 node = bin(node) | |
526 self._entries.setdefault(label, []).append(node) | |
527 if state == b'c': | |
528 self._closednodes.add(node) | |
529 | |
530 @staticmethod | |
531 def _filename(repo): | |
532 """name of a branchcache file for a given repo or repoview""" | |
533 filename = b"branch2" | |
534 if repo.filtername: | |
535 filename = b'%s-%s' % (filename, repo.filtername) | |
536 return filename | |
537 | |
538 def copy(self, repo): | |
539 """return a deep copy of the branchcache object""" | |
540 other = type(self)( | |
541 repo=repo, | |
542 # we always do a shally copy of self._entries, and the values is | |
543 # always replaced, so no need to deepcopy until the above remains | |
544 # true. | |
545 entries=self._entries, | |
546 tipnode=self.tipnode, | |
547 tiprev=self.tiprev, | |
548 filteredhash=self.filteredhash, | |
549 closednodes=set(self._closednodes), | |
550 verify_node=self._verify_node, | |
551 ) | |
552 # we copy will likely schedule a write anyway, but that does not seems | |
553 # to hurt to overschedule | |
554 other._delayed = self._delayed | |
555 # also copy information about the current verification state | |
556 other._closedverified = self._closedverified | |
557 other._verifiedbranches = set(self._verifiedbranches) | |
558 return other | |
559 | |
560 def write(self, repo): | |
561 assert self._filtername == repo.filtername, ( | |
562 self._filtername, | |
563 repo.filtername, | |
564 ) | |
565 tr = repo.currenttransaction() | |
566 if not getattr(tr, 'finalized', True): | |
567 # Avoid premature writing. | |
568 # | |
569 # (The cache warming setup by localrepo will update the file later.) | |
570 self._delayed = True | |
571 return | |
572 try: | |
573 filename = self._filename(repo) | |
574 with repo.cachevfs(filename, b"w", atomictemp=True) as f: | |
575 cachekey = [hex(self.tipnode), b'%d' % self.tiprev] | |
576 if self.filteredhash is not None: | |
577 cachekey.append(hex(self.filteredhash)) | |
578 f.write(b" ".join(cachekey) + b'\n') | |
579 nodecount = 0 | |
580 for label, nodes in sorted(self._entries.items()): | |
581 label = encoding.fromlocal(label) | |
582 for node in nodes: | |
583 nodecount += 1 | |
584 if node in self._closednodes: | |
585 state = b'c' | |
586 else: | |
587 state = b'o' | |
588 f.write(b"%s %s %s\n" % (hex(node), state, label)) | |
589 repo.ui.log( | |
590 b'branchcache', | |
591 b'wrote %s with %d labels and %d nodes\n', | |
592 _branchcachedesc(repo), | |
593 len(self._entries), | |
594 nodecount, | |
595 ) | |
596 self._delayed = False | |
597 except (IOError, OSError, error.Abort) as inst: | |
598 # Abort may be raised by read only opener, so log and continue | |
599 repo.ui.debug( | |
600 b"couldn't write branch cache: %s\n" | |
601 % stringutil.forcebytestr(inst) | |
602 ) | |
603 | |
604 def _verifyclosed(self): | |
605 """verify the closed nodes we have""" | |
606 if not self._verify_node: | |
607 return | |
608 if self._closedverified: | |
609 return | |
610 assert self._hasnode is not None | |
611 for node in self._closednodes: | |
612 if not self._hasnode(node): | |
613 _unknownnode(node) | |
614 | |
615 self._closedverified = True | |
616 | |
617 def _verifybranch(self, branch): | |
618 """verify head nodes for the given branch.""" | |
619 if not self._verify_node: | |
620 return | |
621 if branch not in self._entries or branch in self._verifiedbranches: | |
622 return | |
623 assert self._hasnode is not None | |
624 for n in self._entries[branch]: | |
625 if not self._hasnode(n): | |
626 _unknownnode(n) | |
627 | |
628 self._verifiedbranches.add(branch) | |
629 | |
630 def _verifyall(self): | |
631 """verifies nodes of all the branches""" | |
632 for b in self._entries.keys(): | |
633 if b not in self._verifiedbranches: | |
634 self._verifybranch(b) | |
635 | |
636 def __getitem__(self, key): | |
637 self._verifybranch(key) | |
638 return super().__getitem__(key) | |
639 | |
640 def __contains__(self, key): | |
641 self._verifybranch(key) | |
642 return super().__contains__(key) | |
643 | |
644 def iteritems(self): | |
645 self._verifyall() | |
646 return super().iteritems() | |
647 | |
648 items = iteritems | |
649 | |
650 def iterheads(self): | |
651 """returns all the heads""" | |
652 self._verifyall() | |
653 return super().iterheads() | |
654 | |
655 def hasbranch(self, label): | |
656 """checks whether a branch of this name exists or not""" | |
657 self._verifybranch(label) | |
658 return super().hasbranch(label) | |
659 | |
660 def branchheads(self, branch, closed=False): | |
661 self._verifybranch(branch) | |
662 return super().branchheads(branch, closed=closed) | |
663 | |
664 def update(self, repo, revgen): | |
665 assert self._filtername == repo.filtername, ( | |
666 self._filtername, | |
667 repo.filtername, | |
668 ) | |
669 cl = repo.changelog | |
670 max_rev = super().update(repo, revgen) | |
671 # new tip revision which we found after iterating items from new | |
672 # branches | |
673 if max_rev is not None and max_rev > self.tiprev: | |
674 self.tiprev = max_rev | |
675 self.tipnode = cl.node(max_rev) | |
623 | 676 |
624 if not self.validfor(repo): | 677 if not self.validfor(repo): |
625 # old cache key is now invalid for the repo, but we've just updated | 678 # old cache key is now invalid for the repo, but we've just updated |
626 # the cache and we assume it's valid, so let's make the cache key | 679 # the cache and we assume it's valid, so let's make the cache key |
627 # valid as well by recomputing it from the cached data | 680 # valid as well by recomputing it from the cached data |
639 self.tiprev = tiprev | 692 self.tiprev = tiprev |
640 self.filteredhash = scmutil.filteredhash( | 693 self.filteredhash = scmutil.filteredhash( |
641 repo, self.tiprev, needobsolete=True | 694 repo, self.tiprev, needobsolete=True |
642 ) | 695 ) |
643 | 696 |
644 duration = util.timer() - starttime | |
645 repo.ui.log( | |
646 b'branchcache', | |
647 b'updated %s in %.4f seconds\n', | |
648 _branchcachedesc(repo), | |
649 duration, | |
650 ) | |
651 | |
652 self.write(repo) | 697 self.write(repo) |
653 | |
654 | |
655 class branchcache(_BaseBranchCache): | |
656 """Branchmap info for a local repo or repoview""" | |
657 | 698 |
658 | 699 |
659 class remotebranchcache(_BaseBranchCache): | 700 class remotebranchcache(_BaseBranchCache): |
660 """Branchmap info for a remote connection, should not write locally""" | 701 """Branchmap info for a remote connection, should not write locally""" |
702 | |
703 def __init__( | |
704 self, | |
705 repo: "localrepo.localrepository", | |
706 entries: Union[ | |
707 Dict[bytes, List[bytes]], Iterable[Tuple[bytes, List[bytes]]] | |
708 ] = (), | |
709 closednodes: Optional[Set[bytes]] = None, | |
710 ) -> None: | |
711 super().__init__(repo=repo, entries=entries, closed_nodes=closednodes) | |
661 | 712 |
662 | 713 |
663 # Revision branch info cache | 714 # Revision branch info cache |
664 | 715 |
665 _rbcversion = b'-v1' | 716 _rbcversion = b'-v1' |