Mercurial > public > mercurial-scm > hg
comparison mercurial/localrepo.py @ 18120:88990d3e3d75
branchmap: extract _updatebranchcache from repo
author | Pierre-Yves David <pierre-yves.david@logilab.fr> |
---|---|
date | Wed, 19 Dec 2012 14:49:06 +0100 |
parents | 5264464b5f68 |
children | f8a13f061a8a |
comparison
equal
deleted
inserted
replaced
18119:5264464b5f68 | 18120:88990d3e3d75 |
---|---|
680 # if lrev == catip: cache is already up to date | 680 # if lrev == catip: cache is already up to date |
681 # if lrev > catip: we have uncachable element in `partial` can't write | 681 # if lrev > catip: we have uncachable element in `partial` can't write |
682 # on disk | 682 # on disk |
683 if lrev < catip: | 683 if lrev < catip: |
684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip)) | 684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip)) |
685 self._updatebranchcache(partial, ctxgen) | 685 branchmap.update(self, partial, ctxgen) |
686 branchmap.write(self, partial, cl.node(catip), catip) | 686 branchmap.write(self, partial, cl.node(catip), catip) |
687 lrev = catip | 687 lrev = catip |
688 # If cacheable tip were lower than actual tip, we need to update the | 688 # If cacheable tip were lower than actual tip, we need to update the |
689 # cache up to tip. This update (from cacheable to actual tip) is not | 689 # cache up to tip. This update (from cacheable to actual tip) is not |
690 # written to disk since it's not cacheable. | 690 # written to disk since it's not cacheable. |
691 tiprev = len(self) - 1 | 691 tiprev = len(self) - 1 |
692 if lrev < tiprev: | 692 if lrev < tiprev: |
693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev)) | 693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev)) |
694 self._updatebranchcache(partial, ctxgen) | 694 branchmap.update(self, partial, ctxgen) |
695 self._branchcache = partial | 695 self._branchcache = partial |
696 self._branchcachetip = tip | 696 self._branchcachetip = tip |
697 | 697 |
698 def branchmap(self): | 698 def branchmap(self): |
699 '''returns a dictionary {branch: [branchheads]}''' | 699 '''returns a dictionary {branch: [branchheads]}''' |
700 if self.changelog.filteredrevs: | 700 if self.changelog.filteredrevs: |
701 # some changeset are excluded we can't use the cache | 701 # some changeset are excluded we can't use the cache |
702 branchmap = {} | 702 bmap = {} |
703 self._updatebranchcache(branchmap, (self[r] for r in self)) | 703 branchmap.update(self, bmap, (self[r] for r in self)) |
704 return branchmap | 704 return bmap |
705 else: | 705 else: |
706 self.updatebranchcache() | 706 self.updatebranchcache() |
707 return self._branchcache | 707 return self._branchcache |
708 | 708 |
709 | 709 |
727 the branch, open heads come before closed''' | 727 the branch, open heads come before closed''' |
728 bt = {} | 728 bt = {} |
729 for bn, heads in self.branchmap().iteritems(): | 729 for bn, heads in self.branchmap().iteritems(): |
730 bt[bn] = self._branchtip(heads) | 730 bt[bn] = self._branchtip(heads) |
731 return bt | 731 return bt |
732 | |
733 def _updatebranchcache(self, partial, ctxgen): | |
734 """Given a branchhead cache, partial, that may have extra nodes or be | |
735 missing heads, and a generator of nodes that are at least a superset of | |
736 heads missing, this function updates partial to be correct. | |
737 """ | |
738 # collect new branch entries | |
739 newbranches = {} | |
740 for c in ctxgen: | |
741 newbranches.setdefault(c.branch(), []).append(c.node()) | |
742 # if older branchheads are reachable from new ones, they aren't | |
743 # really branchheads. Note checking parents is insufficient: | |
744 # 1 (branch a) -> 2 (branch b) -> 3 (branch a) | |
745 for branch, newnodes in newbranches.iteritems(): | |
746 bheads = partial.setdefault(branch, []) | |
747 # Remove candidate heads that no longer are in the repo (e.g., as | |
748 # the result of a strip that just happened). Avoid using 'node in | |
749 # self' here because that dives down into branchcache code somewhat | |
750 # recursively. | |
751 bheadrevs = [self.changelog.rev(node) for node in bheads | |
752 if self.changelog.hasnode(node)] | |
753 newheadrevs = [self.changelog.rev(node) for node in newnodes | |
754 if self.changelog.hasnode(node)] | |
755 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs) | |
756 # Remove duplicates - nodes that are in newheadrevs and are already | |
757 # in bheadrevs. This can happen if you strip a node whose parent | |
758 # was already a head (because they're on different branches). | |
759 bheadrevs = sorted(set(bheadrevs).union(newheadrevs)) | |
760 | |
761 # Starting from tip means fewer passes over reachable. If we know | |
762 # the new candidates are not ancestors of existing heads, we don't | |
763 # have to examine ancestors of existing heads | |
764 if ctxisnew: | |
765 iterrevs = sorted(newheadrevs) | |
766 else: | |
767 iterrevs = list(bheadrevs) | |
768 | |
769 # This loop prunes out two kinds of heads - heads that are | |
770 # superseded by a head in newheadrevs, and newheadrevs that are not | |
771 # heads because an existing head is their descendant. | |
772 while iterrevs: | |
773 latest = iterrevs.pop() | |
774 if latest not in bheadrevs: | |
775 continue | |
776 ancestors = set(self.changelog.ancestors([latest], | |
777 bheadrevs[0])) | |
778 if ancestors: | |
779 bheadrevs = [b for b in bheadrevs if b not in ancestors] | |
780 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs] | |
781 | |
782 # There may be branches that cease to exist when the last commit in the | |
783 # branch was stripped. This code filters them out. Note that the | |
784 # branch that ceased to exist may not be in newbranches because | |
785 # newbranches is the set of candidate heads, which when you strip the | |
786 # last commit in a branch will be the parent branch. | |
787 for branch in partial.keys(): | |
788 nodes = [head for head in partial[branch] | |
789 if self.changelog.hasnode(head)] | |
790 if not nodes: | |
791 del partial[branch] | |
792 | 732 |
793 def lookup(self, key): | 733 def lookup(self, key): |
794 return self[key].node() | 734 return self[key].node() |
795 | 735 |
796 def lookupbranch(self, key, remote=None): | 736 def lookupbranch(self, key, remote=None): |
1530 # will be caught the next time it is read. | 1470 # will be caught the next time it is read. |
1531 if newheadnodes: | 1471 if newheadnodes: |
1532 tiprev = len(self) - 1 | 1472 tiprev = len(self) - 1 |
1533 ctxgen = (self[node] for node in newheadnodes | 1473 ctxgen = (self[node] for node in newheadnodes |
1534 if self.changelog.hasnode(node)) | 1474 if self.changelog.hasnode(node)) |
1535 self._updatebranchcache(self._branchcache, ctxgen) | 1475 branchmap.update(self, self._branchcache, ctxgen) |
1536 branchmap.write(self, self._branchcache, self.changelog.tip(), | 1476 branchmap.write(self, self._branchcache, self.changelog.tip(), |
1537 tiprev) | 1477 tiprev) |
1538 | 1478 |
1539 # Ensure the persistent tag cache is updated. Doing it now | 1479 # Ensure the persistent tag cache is updated. Doing it now |
1540 # means that the tag cache only has to worry about destroyed | 1480 # means that the tag cache only has to worry about destroyed |