--- a/mercurial/revlog.py Tue May 10 00:34:57 2005 -0800
+++ b/mercurial/revlog.py Tue May 10 00:40:49 2005 -0800
@@ -227,3 +227,186 @@
# return the unmerged heads for later resolving
return (old, self.tip())
+
+ def group(self, linkmap):
+ # given a list of changeset revs, return a set of deltas and
+ # metadata corresponding to nodes the first delta is
+ # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
+ # have this parent as it has all history before these
+ # changesets. parent is parent[0]
+
+ revs = []
+ needed = {}
+
+ # find file nodes/revs that match changeset revs
+ for i in xrange(0, self.count()):
+ if self.index[i][3] in linkmap:
+ revs.append(i)
+ needed[i] = 1
+
+ # if we don't have any revisions touched by these changesets, bail
+ if not revs: return struct.pack(">l", 0)
+
+ # add the parent of the first rev
+ p = self.parents(self.node(revs[0]))[0]
+ revs.insert(0, self.rev(p))
+
+ # for each delta that isn't contiguous in the log, we need to
+ # reconstruct the base, reconstruct the result, and then
+ # calculate the delta. We also need to do this where we've
+ # stored a full version and not a delta
+ for i in xrange(0, len(revs) - 1):
+ a, b = revs[i], revs[i + 1]
+ if a + 1 != b or self.base(b) == b:
+ for j in xrange(self.base(a), a + 1):
+ needed[j] = 1
+ for j in xrange(self.base(b), b + 1):
+ needed[j] = 1
+
+ # calculate spans to retrieve from datafile
+ needed = needed.keys()
+ needed.sort()
+ spans = []
+ for n in needed:
+ if n < 0: continue
+ o = self.start(n)
+ l = self.length(n)
+ spans.append((o, l, [(n, l)]))
+
+ # merge spans
+ merge = [spans.pop(0)]
+ while spans:
+ e = spans.pop(0)
+ f = merge[-1]
+ if e[0] == f[0] + f[1]:
+ merge[-1] = (f[0], f[1] + e[1], f[2] + e[2])
+ else:
+ merge.append(e)
+
+ # read spans in, divide up chunks
+ chunks = {}
+ for span in merge:
+ # we reopen the file for each span to make http happy for now
+ f = self.opener(self.datafile)
+ f.seek(span[0])
+ data = f.read(span[1])
+
+ # divide up the span
+ pos = 0
+ for r, l in span[2]:
+ chunks[r] = data[pos: pos + l]
+ pos += l
+
+ # helper to reconstruct intermediate versions
+ def construct(text, base, rev):
+ for r in range(base + 1, rev + 1):
+ b = decompress(chunks[r])
+ text = self.patch(text, b)
+ return text
+
+ # build deltas
+ deltas = []
+ for d in range(0, len(revs) - 1):
+ a, b = revs[d], revs[d + 1]
+ n = self.node(b)
+
+ if a + 1 != b or self.base(b) == b:
+ if a >= 0:
+ base = self.base(a)
+ ta = decompress(chunks[self.base(a)])
+ ta = construct(ta, base, a)
+ else:
+ ta = ""
+
+ base = self.base(b)
+ if a > base:
+ base = a
+ tb = ta
+ else:
+ tb = decompress(chunks[self.base(b)])
+ tb = construct(tb, base, b)
+ d = self.diff(ta, tb)
+ else:
+ d = decompress(chunks[b])
+
+ p = self.parents(n)
+ meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
+ l = struct.pack(">l", len(meta) + len(d) + 4)
+ deltas.append(l + meta + d)
+
+ l = struct.pack(">l", sum(map(len, deltas)) + 4)
+ deltas.insert(0, l)
+ return "".join(deltas)
+
+ def addgroup(self, data, linkmapper, transaction):
+ # given a set of deltas, add them to the revision log. the
+ # first delta is against its parent, which should be in our
+ # log, the rest are against the previous delta.
+
+ if len(data) <= 4: return
+
+ # retrieve the parent revision of the delta chain
+ chain = data[28:48]
+ text = self.revision(chain)
+
+ # track the base of the current delta log
+ r = self.count()
+ t = r - 1
+
+ base = prev = -1
+ start = end = 0
+ if r:
+ start = self.start(self.base(t))
+ end = self.end(t)
+ measure = self.length(self.base(t))
+ base = self.base(t)
+ prev = self.tip()
+
+ transaction.add(self.datafile, end)
+ transaction.add(self.indexfile, r * struct.calcsize(indexformat))
+ dfh = self.opener(self.datafile, "a")
+ ifh = self.opener(self.indexfile, "a")
+
+ # loop through our set of deltas
+ pos = 4
+ while pos < len(data):
+ l, node, p1, p2, cs = struct.unpack(">l20s20s20s20s",
+ data[pos:pos+84])
+ link = linkmapper(cs)
+ delta = data[pos + 84:pos + l]
+ pos += l
+
+ # full versions are inserted when the needed deltas become
+ # comparable to the uncompressed text or when the previous
+ # version is not the one we have a delta against. We use
+ # the size of the previous full rev as a proxy for the
+ # current size.
+
+ if chain == prev:
+ cdelta = compress(delta)
+
+ if chain != prev or (end - start + len(cdelta)) > measure * 2:
+ # flush our writes here so we can read it in revision
+ dfh.flush()
+ ifh.flush()
+ text = self.revision(self.node(t))
+ text = self.patch(text, delta)
+ chk = self.addrevision(text, transaction, link, p1, p2)
+ if chk != node:
+ raise "consistency error adding group"
+ measure = len(text)
+ else:
+ e = (end, len(cdelta), self.base(t), link, p1, p2, node)
+ self.index.append(e)
+ self.nodemap[node] = r
+ dfh.write(cdelta)
+ ifh.write(struct.pack(indexformat, *e))
+
+ t, r = r, r + 1
+ chain = prev
+ start = self.start(self.base(t))
+ end = self.end(t)
+
+ dfh.close()
+ ifh.close()
+ return node