diff mercurial/localrepo.py @ 16716:0311a6abd38a

localrepo: strip now incrementally updates the branchheads cache Destroying history via strip used to invalidate the branchheads cache, causing it to be regenerated the next time it is read. This is expensive in large repos. This change converts strip to pass info to localrepo.destroyed() to enable to it to incrementally update the cache, improving the performance of strip and other operations that depend on it (e.g., rebase). This change also strengthens a bit the integrity checking of the branchheads cache when it is read, by rejecting the cache if it has nodes in it that no longer exist.
author Joshua Redstone <joshua.redstone@fb.com>
date Fri, 11 May 2012 10:35:54 -0700
parents f8dee1a8f844
children e7bf09acd410
line wrap: on
line diff
--- a/mercurial/localrepo.py	Sat May 12 16:53:36 2012 +0200
+++ b/mercurial/localrepo.py	Fri May 11 10:35:54 2012 -0700
@@ -540,6 +540,9 @@
                     continue
                 node, label = l.split(" ", 1)
                 label = encoding.tolocal(label.strip())
+                if not node in self:
+                    raise ValueError('invalidating branch cache because node '+
+                                     '%s does not exist' % node)
                 partial.setdefault(label, []).append(bin(node))
         except KeyboardInterrupt:
             raise
@@ -561,6 +564,10 @@
             pass
 
     def _updatebranchcache(self, partial, ctxgen):
+        """Given a branchhead cache, partial, that may have extra nodes or be
+        missing heads, and a generator of nodes that are at least a superset of
+        heads missing, this function updates partial to be correct.
+        """
         # collect new branch entries
         newbranches = {}
         for c in ctxgen:
@@ -571,21 +578,42 @@
         for branch, newnodes in newbranches.iteritems():
             bheads = partial.setdefault(branch, [])
             bheads.extend(newnodes)
-            if len(bheads) <= 1:
-                continue
-            bheads = sorted(bheads, key=lambda x: self[x].rev())
-            # starting from tip means fewer passes over reachable
-            while newnodes:
-                latest = newnodes.pop()
-                if latest not in bheads:
-                    continue
-                minbhnode = self[bheads[0]].node()
-                reachable = self.changelog.reachable(latest, minbhnode)
-                reachable.remove(latest)
-                if reachable:
-                    bheads = [b for b in bheads if b not in reachable]
+            # Remove duplicates - nodes that are in newnodes and are already in
+            # bheads.  This can happen if you strip a node and its parent was
+            # already a head (because they're on different branches).
+            bheads = set(bheads)
+
+            # Remove candidate heads that no longer are in the repo (e.g., as
+            # the result of a strip that just happened).
+            # avoid using 'bhead in self' here because that dives down into
+            # branchcache code somewhat recrusively.
+            bheads = [bhead for bhead in bheads \
+                          if self.changelog.hasnode(bhead)]
+            if len(bheads) > 1:
+                bheads = sorted(bheads, key=lambda x: self[x].rev())
+                # starting from tip means fewer passes over reachable
+                while newnodes:
+                    latest = newnodes.pop()
+                    if latest not in bheads:
+                        continue
+                    minbhnode = self[bheads[0]].node()
+                    reachable = self.changelog.reachable(latest, minbhnode)
+                    reachable.remove(latest)
+                    if reachable:
+                        bheads = [b for b in bheads if b not in reachable]
             partial[branch] = bheads
 
+        # There may be branches that cease to exist when the last commit in the
+        # branch was stripped.  This code filters them out.  Note that the
+        # branch that ceased to exist may not be in newbranches because
+        # newbranches is the set of candidate heads, which when you strip the
+        # last commit in a branch will be the parent branch.
+        for branch in partial.keys():
+            nodes = [head for head in partial[branch] \
+                         if self.changelog.hasnode(head)]
+            if len(nodes) < 1:
+                del partial[branch]
+
     def lookup(self, key):
         return self[key].node()
 
@@ -848,6 +876,9 @@
             else:
                 ui.status(_('working directory now based on '
                             'revision %d\n') % parents)
+        # TODO: if we know which new heads may result from this rollback, pass
+        # them to destroy(), which will prevent the branchhead cache from being
+        # invalidated.
         self.destroyed()
         return 0
 
@@ -1291,12 +1322,27 @@
                 tr.release()
             lock.release()
 
-    def destroyed(self):
+    def destroyed(self, newheadrevs=None):
         '''Inform the repository that nodes have been destroyed.
         Intended for use by strip and rollback, so there's a common
-        place for anything that has to be done after destroying history.'''
-        # XXX it might be nice if we could take the list of destroyed
-        # nodes, but I don't see an easy way for rollback() to do that
+        place for anything that has to be done after destroying history.
+
+        If you know the branchheadcache was uptodate before nodes were removed
+        and you also know the set of candidate set of new heads that may have
+        resulted from the destruction, you can set newheadrevs.  This will
+        enable the code to update the branchheads cache, rather than having
+        future code decide it's invalid and regenrating it.
+        '''
+        if newheadrevs:
+            tiprev = len(self) - 1
+            ctxgen = (self[rev] for rev in newheadrevs)
+            self._updatebranchcache(self._branchcache, ctxgen)
+            self._writebranchcache(self._branchcache, self.changelog.tip(),
+                                   tiprev)
+        else:
+            # No info to update the cache.  If nodes were destroyed, the cache
+            # is stale and this will be caught the next time it is read.
+            pass
 
         # Ensure the persistent tag cache is updated.  Doing it now
         # means that the tag cache only has to worry about destroyed