Mercurial > public > mercurial-scm > hg-stable
comparison mercurial/revlog.py @ 43076:2372284d9457
formatting: blacken the codebase
This is using my patch to black
(https://github.com/psf/black/pull/826) so we don't un-wrap collection
literals.
Done with:
hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S
# skip-blame mass-reformatting only
# no-check-commit reformats foo_bar functions
Differential Revision: https://phab.mercurial-scm.org/D6971
author | Augie Fackler <augie@google.com> |
---|---|
date | Sun, 06 Oct 2019 09:45:02 -0400 |
parents | 7902001aaf41 |
children | 687b865b95ad |
comparison
equal
deleted
inserted
replaced
43075:57875cf423c9 | 43076:2372284d9457 |
---|---|
54 REVIDX_FLAGS_ORDER, | 54 REVIDX_FLAGS_ORDER, |
55 REVIDX_ISCENSORED, | 55 REVIDX_ISCENSORED, |
56 REVIDX_RAWTEXT_CHANGING_FLAGS, | 56 REVIDX_RAWTEXT_CHANGING_FLAGS, |
57 REVIDX_SIDEDATA, | 57 REVIDX_SIDEDATA, |
58 ) | 58 ) |
59 from .thirdparty import ( | 59 from .thirdparty import attr |
60 attr, | |
61 ) | |
62 from . import ( | 60 from . import ( |
63 ancestor, | 61 ancestor, |
64 dagop, | 62 dagop, |
65 error, | 63 error, |
66 mdiff, | 64 mdiff, |
116 | 114 |
117 # Flag processors for REVIDX_ELLIPSIS. | 115 # Flag processors for REVIDX_ELLIPSIS. |
118 def ellipsisreadprocessor(rl, text): | 116 def ellipsisreadprocessor(rl, text): |
119 return text, False, {} | 117 return text, False, {} |
120 | 118 |
119 | |
121 def ellipsiswriteprocessor(rl, text, sidedata): | 120 def ellipsiswriteprocessor(rl, text, sidedata): |
122 return text, False | 121 return text, False |
123 | 122 |
123 | |
124 def ellipsisrawprocessor(rl, text): | 124 def ellipsisrawprocessor(rl, text): |
125 return False | 125 return False |
126 | |
126 | 127 |
127 ellipsisprocessor = ( | 128 ellipsisprocessor = ( |
128 ellipsisreadprocessor, | 129 ellipsisreadprocessor, |
129 ellipsiswriteprocessor, | 130 ellipsiswriteprocessor, |
130 ellipsisrawprocessor, | 131 ellipsisrawprocessor, |
131 ) | 132 ) |
132 | 133 |
134 | |
133 def getoffset(q): | 135 def getoffset(q): |
134 return int(q >> 16) | 136 return int(q >> 16) |
135 | 137 |
138 | |
136 def gettype(q): | 139 def gettype(q): |
137 return int(q & 0xFFFF) | 140 return int(q & 0xFFFF) |
141 | |
138 | 142 |
139 def offset_type(offset, type): | 143 def offset_type(offset, type): |
140 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0: | 144 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0: |
141 raise ValueError('unknown revlog index flags') | 145 raise ValueError('unknown revlog index flags') |
142 return int(int(offset) << 16 | type) | 146 return int(int(offset) << 16 | type) |
147 | |
143 | 148 |
144 @attr.s(slots=True, frozen=True) | 149 @attr.s(slots=True, frozen=True) |
145 class _revisioninfo(object): | 150 class _revisioninfo(object): |
146 """Information about a revision that allows building its fulltext | 151 """Information about a revision that allows building its fulltext |
147 node: expected hash of the revision | 152 node: expected hash of the revision |
150 cachedelta: (baserev, uncompressed_delta) or None | 155 cachedelta: (baserev, uncompressed_delta) or None |
151 flags: flags associated to the revision storage | 156 flags: flags associated to the revision storage |
152 | 157 |
153 One of btext[0] or cachedelta must be set. | 158 One of btext[0] or cachedelta must be set. |
154 """ | 159 """ |
160 | |
155 node = attr.ib() | 161 node = attr.ib() |
156 p1 = attr.ib() | 162 p1 = attr.ib() |
157 p2 = attr.ib() | 163 p2 = attr.ib() |
158 btext = attr.ib() | 164 btext = attr.ib() |
159 textlen = attr.ib() | 165 textlen = attr.ib() |
160 cachedelta = attr.ib() | 166 cachedelta = attr.ib() |
161 flags = attr.ib() | 167 flags = attr.ib() |
168 | |
162 | 169 |
163 @interfaceutil.implementer(repository.irevisiondelta) | 170 @interfaceutil.implementer(repository.irevisiondelta) |
164 @attr.s(slots=True) | 171 @attr.s(slots=True) |
165 class revlogrevisiondelta(object): | 172 class revlogrevisiondelta(object): |
166 node = attr.ib() | 173 node = attr.ib() |
171 baserevisionsize = attr.ib() | 178 baserevisionsize = attr.ib() |
172 revision = attr.ib() | 179 revision = attr.ib() |
173 delta = attr.ib() | 180 delta = attr.ib() |
174 linknode = attr.ib(default=None) | 181 linknode = attr.ib(default=None) |
175 | 182 |
183 | |
176 @interfaceutil.implementer(repository.iverifyproblem) | 184 @interfaceutil.implementer(repository.iverifyproblem) |
177 @attr.s(frozen=True) | 185 @attr.s(frozen=True) |
178 class revlogproblem(object): | 186 class revlogproblem(object): |
179 warning = attr.ib(default=None) | 187 warning = attr.ib(default=None) |
180 error = attr.ib(default=None) | 188 error = attr.ib(default=None) |
181 node = attr.ib(default=None) | 189 node = attr.ib(default=None) |
190 | |
182 | 191 |
183 # index v0: | 192 # index v0: |
184 # 4 bytes: offset | 193 # 4 bytes: offset |
185 # 4 bytes: compressed length | 194 # 4 bytes: compressed length |
186 # 4 bytes: base rev | 195 # 4 bytes: base rev |
190 # 20 bytes: nodeid | 199 # 20 bytes: nodeid |
191 indexformatv0 = struct.Struct(">4l20s20s20s") | 200 indexformatv0 = struct.Struct(">4l20s20s20s") |
192 indexformatv0_pack = indexformatv0.pack | 201 indexformatv0_pack = indexformatv0.pack |
193 indexformatv0_unpack = indexformatv0.unpack | 202 indexformatv0_unpack = indexformatv0.unpack |
194 | 203 |
204 | |
195 class revlogoldindex(list): | 205 class revlogoldindex(list): |
196 def __getitem__(self, i): | 206 def __getitem__(self, i): |
197 if i == -1: | 207 if i == -1: |
198 return (0, 0, 0, -1, -1, -1, -1, nullid) | 208 return (0, 0, 0, -1, -1, -1, -1, nullid) |
199 return list.__getitem__(self, i) | 209 return list.__getitem__(self, i) |
210 | |
200 | 211 |
201 class revlogoldio(object): | 212 class revlogoldio(object): |
202 def __init__(self): | 213 def __init__(self): |
203 self.size = indexformatv0.size | 214 self.size = indexformatv0.size |
204 | 215 |
207 index = [] | 218 index = [] |
208 nodemap = {nullid: nullrev} | 219 nodemap = {nullid: nullrev} |
209 n = off = 0 | 220 n = off = 0 |
210 l = len(data) | 221 l = len(data) |
211 while off + s <= l: | 222 while off + s <= l: |
212 cur = data[off:off + s] | 223 cur = data[off : off + s] |
213 off += s | 224 off += s |
214 e = indexformatv0_unpack(cur) | 225 e = indexformatv0_unpack(cur) |
215 # transform to revlogv1 format | 226 # transform to revlogv1 format |
216 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3], | 227 e2 = ( |
217 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6]) | 228 offset_type(e[0], 0), |
229 e[1], | |
230 -1, | |
231 e[2], | |
232 e[3], | |
233 nodemap.get(e[4], nullrev), | |
234 nodemap.get(e[5], nullrev), | |
235 e[6], | |
236 ) | |
218 index.append(e2) | 237 index.append(e2) |
219 nodemap[e[6]] = n | 238 nodemap[e[6]] = n |
220 n += 1 | 239 n += 1 |
221 | 240 |
222 return revlogoldindex(index), nodemap, None | 241 return revlogoldindex(index), nodemap, None |
223 | 242 |
224 def packentry(self, entry, node, version, rev): | 243 def packentry(self, entry, node, version, rev): |
225 if gettype(entry[0]): | 244 if gettype(entry[0]): |
226 raise error.RevlogError(_('index entry flags need revlog ' | 245 raise error.RevlogError( |
227 'version 1')) | 246 _('index entry flags need revlog ' 'version 1') |
228 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4], | 247 ) |
229 node(entry[5]), node(entry[6]), entry[7]) | 248 e2 = ( |
249 getoffset(entry[0]), | |
250 entry[1], | |
251 entry[3], | |
252 entry[4], | |
253 node(entry[5]), | |
254 node(entry[6]), | |
255 entry[7], | |
256 ) | |
230 return indexformatv0_pack(*e2) | 257 return indexformatv0_pack(*e2) |
258 | |
231 | 259 |
232 # index ng: | 260 # index ng: |
233 # 6 bytes: offset | 261 # 6 bytes: offset |
234 # 2 bytes: flags | 262 # 2 bytes: flags |
235 # 4 bytes: compressed length | 263 # 4 bytes: compressed length |
245 versionformat_pack = versionformat.pack | 273 versionformat_pack = versionformat.pack |
246 versionformat_unpack = versionformat.unpack | 274 versionformat_unpack = versionformat.unpack |
247 | 275 |
248 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte | 276 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte |
249 # signed integer) | 277 # signed integer) |
250 _maxentrysize = 0x7fffffff | 278 _maxentrysize = 0x7FFFFFFF |
279 | |
251 | 280 |
252 class revlogio(object): | 281 class revlogio(object): |
253 def __init__(self): | 282 def __init__(self): |
254 self.size = indexformatng.size | 283 self.size = indexformatng.size |
255 | 284 |
261 def packentry(self, entry, node, version, rev): | 290 def packentry(self, entry, node, version, rev): |
262 p = indexformatng_pack(*entry) | 291 p = indexformatng_pack(*entry) |
263 if rev == 0: | 292 if rev == 0: |
264 p = versionformat_pack(version) + p[4:] | 293 p = versionformat_pack(version) + p[4:] |
265 return p | 294 return p |
295 | |
266 | 296 |
267 class revlog(object): | 297 class revlog(object): |
268 """ | 298 """ |
269 the underlying revision storage object | 299 the underlying revision storage object |
270 | 300 |
302 compression for the data content. | 332 compression for the data content. |
303 """ | 333 """ |
304 | 334 |
305 _flagserrorclass = error.RevlogError | 335 _flagserrorclass = error.RevlogError |
306 | 336 |
307 def __init__(self, opener, indexfile, datafile=None, checkambig=False, | 337 def __init__( |
308 mmaplargeindex=False, censorable=False, | 338 self, |
309 upperboundcomp=None): | 339 opener, |
340 indexfile, | |
341 datafile=None, | |
342 checkambig=False, | |
343 mmaplargeindex=False, | |
344 censorable=False, | |
345 upperboundcomp=None, | |
346 ): | |
310 """ | 347 """ |
311 create a revlog object | 348 create a revlog object |
312 | 349 |
313 opener is a function that abstracts the file opening operation | 350 opener is a function that abstracts the file opening operation |
314 and can be used to implement COW semantics or the like. | 351 and can be used to implement COW semantics or the like. |
408 # revlog v0 doesn't have flag processors | 445 # revlog v0 doesn't have flag processors |
409 for flag, processor in opts.get(b'flagprocessors', {}).iteritems(): | 446 for flag, processor in opts.get(b'flagprocessors', {}).iteritems(): |
410 flagutil.insertflagprocessor(flag, processor, self._flagprocessors) | 447 flagutil.insertflagprocessor(flag, processor, self._flagprocessors) |
411 | 448 |
412 if self._chunkcachesize <= 0: | 449 if self._chunkcachesize <= 0: |
413 raise error.RevlogError(_('revlog chunk cache size %r is not ' | 450 raise error.RevlogError( |
414 'greater than 0') % self._chunkcachesize) | 451 _('revlog chunk cache size %r is not ' 'greater than 0') |
452 % self._chunkcachesize | |
453 ) | |
415 elif self._chunkcachesize & (self._chunkcachesize - 1): | 454 elif self._chunkcachesize & (self._chunkcachesize - 1): |
416 raise error.RevlogError(_('revlog chunk cache size %r is not a ' | 455 raise error.RevlogError( |
417 'power of 2') % self._chunkcachesize) | 456 _('revlog chunk cache size %r is not a ' 'power of 2') |
457 % self._chunkcachesize | |
458 ) | |
418 | 459 |
419 indexdata = '' | 460 indexdata = '' |
420 self._initempty = True | 461 self._initempty = True |
421 try: | 462 try: |
422 with self._indexfp() as f: | 463 with self._indexfp() as f: |
423 if (mmapindexthreshold is not None and | 464 if ( |
424 self.opener.fstat(f).st_size >= mmapindexthreshold): | 465 mmapindexthreshold is not None |
466 and self.opener.fstat(f).st_size >= mmapindexthreshold | |
467 ): | |
425 # TODO: should .close() to release resources without | 468 # TODO: should .close() to release resources without |
426 # relying on Python GC | 469 # relying on Python GC |
427 indexdata = util.buffer(util.mmapread(f)) | 470 indexdata = util.buffer(util.mmapread(f)) |
428 else: | 471 else: |
429 indexdata = f.read() | 472 indexdata = f.read() |
443 flags = versionflags & ~0xFFFF | 486 flags = versionflags & ~0xFFFF |
444 fmt = versionflags & 0xFFFF | 487 fmt = versionflags & 0xFFFF |
445 | 488 |
446 if fmt == REVLOGV0: | 489 if fmt == REVLOGV0: |
447 if flags: | 490 if flags: |
448 raise error.RevlogError(_('unknown flags (%#04x) in version %d ' | 491 raise error.RevlogError( |
449 'revlog %s') % | 492 _('unknown flags (%#04x) in version %d ' 'revlog %s') |
450 (flags >> 16, fmt, self.indexfile)) | 493 % (flags >> 16, fmt, self.indexfile) |
494 ) | |
451 | 495 |
452 self._inline = False | 496 self._inline = False |
453 self._generaldelta = False | 497 self._generaldelta = False |
454 | 498 |
455 elif fmt == REVLOGV1: | 499 elif fmt == REVLOGV1: |
456 if flags & ~REVLOGV1_FLAGS: | 500 if flags & ~REVLOGV1_FLAGS: |
457 raise error.RevlogError(_('unknown flags (%#04x) in version %d ' | 501 raise error.RevlogError( |
458 'revlog %s') % | 502 _('unknown flags (%#04x) in version %d ' 'revlog %s') |
459 (flags >> 16, fmt, self.indexfile)) | 503 % (flags >> 16, fmt, self.indexfile) |
504 ) | |
460 | 505 |
461 self._inline = versionflags & FLAG_INLINE_DATA | 506 self._inline = versionflags & FLAG_INLINE_DATA |
462 self._generaldelta = versionflags & FLAG_GENERALDELTA | 507 self._generaldelta = versionflags & FLAG_GENERALDELTA |
463 | 508 |
464 elif fmt == REVLOGV2: | 509 elif fmt == REVLOGV2: |
465 if flags & ~REVLOGV2_FLAGS: | 510 if flags & ~REVLOGV2_FLAGS: |
466 raise error.RevlogError(_('unknown flags (%#04x) in version %d ' | 511 raise error.RevlogError( |
467 'revlog %s') % | 512 _('unknown flags (%#04x) in version %d ' 'revlog %s') |
468 (flags >> 16, fmt, self.indexfile)) | 513 % (flags >> 16, fmt, self.indexfile) |
514 ) | |
469 | 515 |
470 self._inline = versionflags & FLAG_INLINE_DATA | 516 self._inline = versionflags & FLAG_INLINE_DATA |
471 # generaldelta implied by version 2 revlogs. | 517 # generaldelta implied by version 2 revlogs. |
472 self._generaldelta = True | 518 self._generaldelta = True |
473 | 519 |
474 else: | 520 else: |
475 raise error.RevlogError(_('unknown version (%d) in revlog %s') % | 521 raise error.RevlogError( |
476 (fmt, self.indexfile)) | 522 _('unknown version (%d) in revlog %s') % (fmt, self.indexfile) |
523 ) | |
477 # sparse-revlog can't be on without general-delta (issue6056) | 524 # sparse-revlog can't be on without general-delta (issue6056) |
478 if not self._generaldelta: | 525 if not self._generaldelta: |
479 self._sparserevlog = False | 526 self._sparserevlog = False |
480 | 527 |
481 self._storedeltachains = True | 528 self._storedeltachains = True |
484 if self.version == REVLOGV0: | 531 if self.version == REVLOGV0: |
485 self._io = revlogoldio() | 532 self._io = revlogoldio() |
486 try: | 533 try: |
487 d = self._io.parseindex(indexdata, self._inline) | 534 d = self._io.parseindex(indexdata, self._inline) |
488 except (ValueError, IndexError): | 535 except (ValueError, IndexError): |
489 raise error.RevlogError(_("index %s is corrupted") % | 536 raise error.RevlogError(_("index %s is corrupted") % self.indexfile) |
490 self.indexfile) | |
491 self.index, nodemap, self._chunkcache = d | 537 self.index, nodemap, self._chunkcache = d |
492 if nodemap is not None: | 538 if nodemap is not None: |
493 self.nodemap = self._nodecache = nodemap | 539 self.nodemap = self._nodecache = nodemap |
494 if not self._chunkcache: | 540 if not self._chunkcache: |
495 self._chunkclear() | 541 self._chunkclear() |
542 with func() as fp: | 588 with func() as fp: |
543 yield fp | 589 yield fp |
544 | 590 |
545 def tip(self): | 591 def tip(self): |
546 return self.node(len(self.index) - 1) | 592 return self.node(len(self.index) - 1) |
593 | |
547 def __contains__(self, rev): | 594 def __contains__(self, rev): |
548 return 0 <= rev < len(self) | 595 return 0 <= rev < len(self) |
596 | |
549 def __len__(self): | 597 def __len__(self): |
550 return len(self.index) | 598 return len(self.index) |
599 | |
551 def __iter__(self): | 600 def __iter__(self): |
552 return iter(pycompat.xrange(len(self))) | 601 return iter(pycompat.xrange(len(self))) |
602 | |
553 def revs(self, start=0, stop=None): | 603 def revs(self, start=0, stop=None): |
554 """iterate over all rev in this revlog (from start to stop)""" | 604 """iterate over all rev in this revlog (from start to stop)""" |
555 return storageutil.iterrevs(len(self), start=start, stop=stop) | 605 return storageutil.iterrevs(len(self), start=start, stop=stop) |
556 | 606 |
557 @util.propertycache | 607 @util.propertycache |
574 # Disable delta if either rev requires a content-changing flag | 624 # Disable delta if either rev requires a content-changing flag |
575 # processor (ex. LFS). This is because such flag processor can alter | 625 # processor (ex. LFS). This is because such flag processor can alter |
576 # the rawtext content that the delta will be based on, and two clients | 626 # the rawtext content that the delta will be based on, and two clients |
577 # could have a same revlog node with different flags (i.e. different | 627 # could have a same revlog node with different flags (i.e. different |
578 # rawtext contents) and the delta could be incompatible. | 628 # rawtext contents) and the delta could be incompatible. |
579 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) | 629 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or ( |
580 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)): | 630 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS |
631 ): | |
581 return False | 632 return False |
582 return True | 633 return True |
583 | 634 |
584 def clearcaches(self): | 635 def clearcaches(self): |
585 self._revisioncache = None | 636 self._revisioncache = None |
702 return self.start(rev) + self.length(rev) | 753 return self.start(rev) + self.length(rev) |
703 | 754 |
704 def parents(self, node): | 755 def parents(self, node): |
705 i = self.index | 756 i = self.index |
706 d = i[self.rev(node)] | 757 d = i[self.rev(node)] |
707 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline | 758 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline |
708 | 759 |
709 def chainlen(self, rev): | 760 def chainlen(self, rev): |
710 return self._chaininfo(rev)[0] | 761 return self._chaininfo(rev)[0] |
711 | 762 |
712 def _chaininfo(self, rev): | 763 def _chaininfo(self, rev): |
964 roots = list(roots) | 1015 roots = list(roots) |
965 if not roots: | 1016 if not roots: |
966 return nonodes | 1017 return nonodes |
967 lowestrev = min([self.rev(n) for n in roots]) | 1018 lowestrev = min([self.rev(n) for n in roots]) |
968 else: | 1019 else: |
969 roots = [nullid] # Everybody's a descendant of nullid | 1020 roots = [nullid] # Everybody's a descendant of nullid |
970 lowestrev = nullrev | 1021 lowestrev = nullrev |
971 if (lowestrev == nullrev) and (heads is None): | 1022 if (lowestrev == nullrev) and (heads is None): |
972 # We want _all_ the nodes! | 1023 # We want _all_ the nodes! |
973 return ([self.node(r) for r in self], [nullid], list(self.heads())) | 1024 return ([self.node(r) for r in self], [nullid], list(self.heads())) |
974 if heads is None: | 1025 if heads is None: |
1003 r = self.rev(n) | 1054 r = self.rev(n) |
1004 if r >= lowestrev: | 1055 if r >= lowestrev: |
1005 if n not in ancestors: | 1056 if n not in ancestors: |
1006 # If we are possibly a descendant of one of the roots | 1057 # If we are possibly a descendant of one of the roots |
1007 # and we haven't already been marked as an ancestor | 1058 # and we haven't already been marked as an ancestor |
1008 ancestors.add(n) # Mark as ancestor | 1059 ancestors.add(n) # Mark as ancestor |
1009 # Add non-nullid parents to list of nodes to tag. | 1060 # Add non-nullid parents to list of nodes to tag. |
1010 nodestotag.update([p for p in self.parents(n) if | 1061 nodestotag.update( |
1011 p != nullid]) | 1062 [p for p in self.parents(n) if p != nullid] |
1012 elif n in heads: # We've seen it before, is it a fake head? | 1063 ) |
1064 elif n in heads: # We've seen it before, is it a fake head? | |
1013 # So it is, real heads should not be the ancestors of | 1065 # So it is, real heads should not be the ancestors of |
1014 # any other heads. | 1066 # any other heads. |
1015 heads.pop(n) | 1067 heads.pop(n) |
1016 if not ancestors: | 1068 if not ancestors: |
1017 return nonodes | 1069 return nonodes |
1137 else: | 1189 else: |
1138 start = self.rev(start) | 1190 start = self.rev(start) |
1139 | 1191 |
1140 stoprevs = set(self.rev(n) for n in stop or []) | 1192 stoprevs = set(self.rev(n) for n in stop or []) |
1141 | 1193 |
1142 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start, | 1194 revs = dagop.headrevssubset( |
1143 stoprevs=stoprevs) | 1195 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs |
1196 ) | |
1144 | 1197 |
1145 return [self.node(rev) for rev in revs] | 1198 return [self.node(rev) for rev in revs] |
1146 | 1199 |
1147 def children(self, node): | 1200 def children(self, node): |
1148 """find the children of a given node""" | 1201 """find the children of a given node""" |
1166 | 1219 |
1167 def _commonancestorsheads(self, *revs): | 1220 def _commonancestorsheads(self, *revs): |
1168 """calculate all the heads of the common ancestors of revs""" | 1221 """calculate all the heads of the common ancestors of revs""" |
1169 try: | 1222 try: |
1170 ancs = self.index.commonancestorsheads(*revs) | 1223 ancs = self.index.commonancestorsheads(*revs) |
1171 except (AttributeError, OverflowError): # C implementation failed | 1224 except (AttributeError, OverflowError): # C implementation failed |
1172 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs) | 1225 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs) |
1173 return ancs | 1226 return ancs |
1174 | 1227 |
1175 def isancestor(self, a, b): | 1228 def isancestor(self, a, b): |
1176 """return True if node a is an ancestor of node b | 1229 """return True if node a is an ancestor of node b |
1197 def reachableroots(self, minroot, heads, roots, includepath=False): | 1250 def reachableroots(self, minroot, heads, roots, includepath=False): |
1198 """return (heads(::<roots> and <roots>::<heads>)) | 1251 """return (heads(::<roots> and <roots>::<heads>)) |
1199 | 1252 |
1200 If includepath is True, return (<roots>::<heads>).""" | 1253 If includepath is True, return (<roots>::<heads>).""" |
1201 try: | 1254 try: |
1202 return self.index.reachableroots2(minroot, heads, roots, | 1255 return self.index.reachableroots2( |
1203 includepath) | 1256 minroot, heads, roots, includepath |
1257 ) | |
1204 except AttributeError: | 1258 except AttributeError: |
1205 return dagop._reachablerootspure(self.parentrevs, | 1259 return dagop._reachablerootspure( |
1206 minroot, roots, heads, includepath) | 1260 self.parentrevs, minroot, roots, heads, includepath |
1261 ) | |
1207 | 1262 |
1208 def ancestor(self, a, b): | 1263 def ancestor(self, a, b): |
1209 """calculate the "best" common ancestor of nodes a and b""" | 1264 """calculate the "best" common ancestor of nodes a and b""" |
1210 | 1265 |
1211 a, b = self.rev(a), self.rev(b) | 1266 a, b = self.rev(a), self.rev(b) |
1225 if len(id) == 20: | 1280 if len(id) == 20: |
1226 # possibly a binary node | 1281 # possibly a binary node |
1227 # odds of a binary node being all hex in ASCII are 1 in 10**25 | 1282 # odds of a binary node being all hex in ASCII are 1 in 10**25 |
1228 try: | 1283 try: |
1229 node = id | 1284 node = id |
1230 self.rev(node) # quick search the index | 1285 self.rev(node) # quick search the index |
1231 return node | 1286 return node |
1232 except error.LookupError: | 1287 except error.LookupError: |
1233 pass # may be partial hex id | 1288 pass # may be partial hex id |
1234 try: | 1289 try: |
1235 # str(rev) | 1290 # str(rev) |
1236 rev = int(id) | 1291 rev = int(id) |
1237 if "%d" % rev != id: | 1292 if "%d" % rev != id: |
1238 raise ValueError | 1293 raise ValueError |
1269 except error.RevlogError: | 1324 except error.RevlogError: |
1270 # parsers.c radix tree lookup gave multiple matches | 1325 # parsers.c radix tree lookup gave multiple matches |
1271 # fast path: for unfiltered changelog, radix tree is accurate | 1326 # fast path: for unfiltered changelog, radix tree is accurate |
1272 if not getattr(self, 'filteredrevs', None): | 1327 if not getattr(self, 'filteredrevs', None): |
1273 raise error.AmbiguousPrefixLookupError( | 1328 raise error.AmbiguousPrefixLookupError( |
1274 id, self.indexfile, _('ambiguous identifier')) | 1329 id, self.indexfile, _('ambiguous identifier') |
1330 ) | |
1275 # fall through to slow path that filters hidden revisions | 1331 # fall through to slow path that filters hidden revisions |
1276 except (AttributeError, ValueError): | 1332 except (AttributeError, ValueError): |
1277 # we are pure python, or key was too short to search radix tree | 1333 # we are pure python, or key was too short to search radix tree |
1278 pass | 1334 pass |
1279 | 1335 |
1282 | 1338 |
1283 if len(id) <= 40: | 1339 if len(id) <= 40: |
1284 try: | 1340 try: |
1285 # hex(node)[:...] | 1341 # hex(node)[:...] |
1286 l = len(id) // 2 # grab an even number of digits | 1342 l = len(id) // 2 # grab an even number of digits |
1287 prefix = bin(id[:l * 2]) | 1343 prefix = bin(id[: l * 2]) |
1288 nl = [e[7] for e in self.index if e[7].startswith(prefix)] | 1344 nl = [e[7] for e in self.index if e[7].startswith(prefix)] |
1289 nl = [n for n in nl if hex(n).startswith(id) and | 1345 nl = [ |
1290 self.hasnode(n)] | 1346 n for n in nl if hex(n).startswith(id) and self.hasnode(n) |
1347 ] | |
1291 if nullhex.startswith(id): | 1348 if nullhex.startswith(id): |
1292 nl.append(nullid) | 1349 nl.append(nullid) |
1293 if len(nl) > 0: | 1350 if len(nl) > 0: |
1294 if len(nl) == 1 and not maybewdir: | 1351 if len(nl) == 1 and not maybewdir: |
1295 self._pcache[id] = nl[0] | 1352 self._pcache[id] = nl[0] |
1296 return nl[0] | 1353 return nl[0] |
1297 raise error.AmbiguousPrefixLookupError( | 1354 raise error.AmbiguousPrefixLookupError( |
1298 id, self.indexfile, _('ambiguous identifier')) | 1355 id, self.indexfile, _('ambiguous identifier') |
1356 ) | |
1299 if maybewdir: | 1357 if maybewdir: |
1300 raise error.WdirUnsupported | 1358 raise error.WdirUnsupported |
1301 return None | 1359 return None |
1302 except TypeError: | 1360 except TypeError: |
1303 pass | 1361 pass |
1316 | 1374 |
1317 raise error.LookupError(id, self.indexfile, _('no match found')) | 1375 raise error.LookupError(id, self.indexfile, _('no match found')) |
1318 | 1376 |
1319 def shortest(self, node, minlength=1): | 1377 def shortest(self, node, minlength=1): |
1320 """Find the shortest unambiguous prefix that matches node.""" | 1378 """Find the shortest unambiguous prefix that matches node.""" |
1379 | |
1321 def isvalid(prefix): | 1380 def isvalid(prefix): |
1322 try: | 1381 try: |
1323 matchednode = self._partialmatch(prefix) | 1382 matchednode = self._partialmatch(prefix) |
1324 except error.AmbiguousPrefixLookupError: | 1383 except error.AmbiguousPrefixLookupError: |
1325 return False | 1384 return False |
1400 # Cache data both forward and backward around the requested | 1459 # Cache data both forward and backward around the requested |
1401 # data, in a fixed size window. This helps speed up operations | 1460 # data, in a fixed size window. This helps speed up operations |
1402 # involving reading the revlog backwards. | 1461 # involving reading the revlog backwards. |
1403 cachesize = self._chunkcachesize | 1462 cachesize = self._chunkcachesize |
1404 realoffset = offset & ~(cachesize - 1) | 1463 realoffset = offset & ~(cachesize - 1) |
1405 reallength = (((offset + length + cachesize) & ~(cachesize - 1)) | 1464 reallength = ( |
1406 - realoffset) | 1465 (offset + length + cachesize) & ~(cachesize - 1) |
1466 ) - realoffset | |
1407 with self._datareadfp(df) as df: | 1467 with self._datareadfp(df) as df: |
1408 df.seek(realoffset) | 1468 df.seek(realoffset) |
1409 d = df.read(reallength) | 1469 d = df.read(reallength) |
1410 | 1470 |
1411 self._cachesegment(realoffset, d) | 1471 self._cachesegment(realoffset, d) |
1412 if offset != realoffset or reallength != length: | 1472 if offset != realoffset or reallength != length: |
1413 startoffset = offset - realoffset | 1473 startoffset = offset - realoffset |
1414 if len(d) - startoffset < length: | 1474 if len(d) - startoffset < length: |
1415 raise error.RevlogError( | 1475 raise error.RevlogError( |
1416 _('partial read of revlog %s; expected %d bytes from ' | 1476 _( |
1417 'offset %d, got %d') % | 1477 'partial read of revlog %s; expected %d bytes from ' |
1418 (self.indexfile if self._inline else self.datafile, | 1478 'offset %d, got %d' |
1419 length, realoffset, len(d) - startoffset)) | 1479 ) |
1480 % ( | |
1481 self.indexfile if self._inline else self.datafile, | |
1482 length, | |
1483 realoffset, | |
1484 len(d) - startoffset, | |
1485 ) | |
1486 ) | |
1420 | 1487 |
1421 return util.buffer(d, startoffset, length) | 1488 return util.buffer(d, startoffset, length) |
1422 | 1489 |
1423 if len(d) < length: | 1490 if len(d) < length: |
1424 raise error.RevlogError( | 1491 raise error.RevlogError( |
1425 _('partial read of revlog %s; expected %d bytes from offset ' | 1492 _( |
1426 '%d, got %d') % | 1493 'partial read of revlog %s; expected %d bytes from offset ' |
1427 (self.indexfile if self._inline else self.datafile, | 1494 '%d, got %d' |
1428 length, offset, len(d))) | 1495 ) |
1496 % ( | |
1497 self.indexfile if self._inline else self.datafile, | |
1498 length, | |
1499 offset, | |
1500 len(d), | |
1501 ) | |
1502 ) | |
1429 | 1503 |
1430 return d | 1504 return d |
1431 | 1505 |
1432 def _getsegment(self, offset, length, df=None): | 1506 def _getsegment(self, offset, length, df=None): |
1433 """Obtain a segment of raw data from the revlog. | 1507 """Obtain a segment of raw data from the revlog. |
1446 # is it in the cache? | 1520 # is it in the cache? |
1447 cachestart = offset - o | 1521 cachestart = offset - o |
1448 cacheend = cachestart + length | 1522 cacheend = cachestart + length |
1449 if cachestart >= 0 and cacheend <= l: | 1523 if cachestart >= 0 and cacheend <= l: |
1450 if cachestart == 0 and cacheend == l: | 1524 if cachestart == 0 and cacheend == l: |
1451 return d # avoid a copy | 1525 return d # avoid a copy |
1452 return util.buffer(d, cachestart, cacheend - cachestart) | 1526 return util.buffer(d, cachestart, cacheend - cachestart) |
1453 | 1527 |
1454 return self._readsegment(offset, length, df=df) | 1528 return self._readsegment(offset, length, df=df) |
1455 | 1529 |
1456 def _getsegmentforrevs(self, startrev, endrev, df=None): | 1530 def _getsegmentforrevs(self, startrev, endrev, df=None): |
1523 ladd = l.append | 1597 ladd = l.append |
1524 | 1598 |
1525 if not self._withsparseread: | 1599 if not self._withsparseread: |
1526 slicedchunks = (revs,) | 1600 slicedchunks = (revs,) |
1527 else: | 1601 else: |
1528 slicedchunks = deltautil.slicechunk(self, revs, | 1602 slicedchunks = deltautil.slicechunk( |
1529 targetsize=targetsize) | 1603 self, revs, targetsize=targetsize |
1604 ) | |
1530 | 1605 |
1531 for revschunk in slicedchunks: | 1606 for revschunk in slicedchunks: |
1532 firstrev = revschunk[0] | 1607 firstrev = revschunk[0] |
1533 # Skip trailing revisions with empty diff | 1608 # Skip trailing revisions with empty diff |
1534 for lastrev in revschunk[::-1]: | 1609 for lastrev in revschunk[::-1]: |
1602 revlog data directly. So this function needs raw revision data. | 1677 revlog data directly. So this function needs raw revision data. |
1603 """ | 1678 """ |
1604 if rev1 != nullrev and self.deltaparent(rev2) == rev1: | 1679 if rev1 != nullrev and self.deltaparent(rev2) == rev1: |
1605 return bytes(self._chunk(rev2)) | 1680 return bytes(self._chunk(rev2)) |
1606 | 1681 |
1607 return mdiff.textdiff(self.rawdata(rev1), | 1682 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2)) |
1608 self.rawdata(rev2)) | |
1609 | 1683 |
1610 def _processflags(self, text, flags, operation, raw=False): | 1684 def _processflags(self, text, flags, operation, raw=False): |
1611 """deprecated entry point to access flag processors""" | 1685 """deprecated entry point to access flag processors""" |
1612 msg = ('_processflag(...) use the specialized variant') | 1686 msg = '_processflag(...) use the specialized variant' |
1613 util.nouideprecwarn(msg, '5.2', stacklevel=2) | 1687 util.nouideprecwarn(msg, '5.2', stacklevel=2) |
1614 if raw: | 1688 if raw: |
1615 return text, flagutil.processflagsraw(self, text, flags) | 1689 return text, flagutil.processflagsraw(self, text, flags) |
1616 elif operation == 'read': | 1690 elif operation == 'read': |
1617 return flagutil.processflagsread(self, text, flags) | 1691 return flagutil.processflagsread(self, text, flags) |
1618 else: # write operation | 1692 else: # write operation |
1619 return flagutil.processflagswrite(self, text, flags) | 1693 return flagutil.processflagswrite(self, text, flags) |
1620 | 1694 |
1621 def revision(self, nodeorrev, _df=None, raw=False): | 1695 def revision(self, nodeorrev, _df=None, raw=False): |
1622 """return an uncompressed revision of a given node or revision | 1696 """return an uncompressed revision of a given node or revision |
1623 number. | 1697 number. |
1626 raw - an optional argument specifying if the revision data is to be | 1700 raw - an optional argument specifying if the revision data is to be |
1627 treated as raw data when applying flag transforms. 'raw' should be set | 1701 treated as raw data when applying flag transforms. 'raw' should be set |
1628 to True when generating changegroups or in debug commands. | 1702 to True when generating changegroups or in debug commands. |
1629 """ | 1703 """ |
1630 if raw: | 1704 if raw: |
1631 msg = ('revlog.revision(..., raw=True) is deprecated, ' | 1705 msg = ( |
1632 'use revlog.rawdata(...)') | 1706 'revlog.revision(..., raw=True) is deprecated, ' |
1707 'use revlog.rawdata(...)' | |
1708 ) | |
1633 util.nouideprecwarn(msg, '5.2', stacklevel=2) | 1709 util.nouideprecwarn(msg, '5.2', stacklevel=2) |
1634 return self._revisiondata(nodeorrev, _df, raw=raw)[0] | 1710 return self._revisiondata(nodeorrev, _df, raw=raw)[0] |
1635 | 1711 |
1636 def sidedata(self, nodeorrev, _df=None): | 1712 def sidedata(self, nodeorrev, _df=None): |
1637 """a map of extra data related to the changeset but not part of the hash | 1713 """a map of extra data related to the changeset but not part of the hash |
1682 else: | 1758 else: |
1683 try: | 1759 try: |
1684 r = flagutil.processflagsread(self, rawtext, flags) | 1760 r = flagutil.processflagsread(self, rawtext, flags) |
1685 except error.SidedataHashError as exc: | 1761 except error.SidedataHashError as exc: |
1686 msg = _("integrity check failed on %s:%s sidedata key %d") | 1762 msg = _("integrity check failed on %s:%s sidedata key %d") |
1687 msg %= (self.indexfile, pycompat.bytestr(rev), | 1763 msg %= (self.indexfile, pycompat.bytestr(rev), exc.sidedatakey) |
1688 exc.sidedatakey) | |
1689 raise error.RevlogError(msg) | 1764 raise error.RevlogError(msg) |
1690 text, validatehash, sidedata = r | 1765 text, validatehash, sidedata = r |
1691 if validatehash: | 1766 if validatehash: |
1692 self.checkhash(text, node, rev=rev) | 1767 self.checkhash(text, node, rev=rev) |
1693 if not validated: | 1768 if not validated: |
1733 if basetext is None: | 1808 if basetext is None: |
1734 basetext = bytes(bins[0]) | 1809 basetext = bytes(bins[0]) |
1735 bins = bins[1:] | 1810 bins = bins[1:] |
1736 | 1811 |
1737 rawtext = mdiff.patches(basetext, bins) | 1812 rawtext = mdiff.patches(basetext, bins) |
1738 del basetext # let us have a chance to free memory early | 1813 del basetext # let us have a chance to free memory early |
1739 return (rev, rawtext, False) | 1814 return (rev, rawtext, False) |
1740 | 1815 |
1741 def rawdata(self, nodeorrev, _df=None): | 1816 def rawdata(self, nodeorrev, _df=None): |
1742 """return an uncompressed raw data of a given node or revision number. | 1817 """return an uncompressed raw data of a given node or revision number. |
1743 | 1818 |
1773 self._revisioncache = None | 1848 self._revisioncache = None |
1774 | 1849 |
1775 revornode = rev | 1850 revornode = rev |
1776 if revornode is None: | 1851 if revornode is None: |
1777 revornode = templatefilters.short(hex(node)) | 1852 revornode = templatefilters.short(hex(node)) |
1778 raise error.RevlogError(_("integrity check failed on %s:%s") | 1853 raise error.RevlogError( |
1779 % (self.indexfile, pycompat.bytestr(revornode))) | 1854 _("integrity check failed on %s:%s") |
1855 % (self.indexfile, pycompat.bytestr(revornode)) | |
1856 ) | |
1780 except error.RevlogError: | 1857 except error.RevlogError: |
1781 if self._censorable and storageutil.iscensoredtext(text): | 1858 if self._censorable and storageutil.iscensoredtext(text): |
1782 raise error.CensoredNodeError(self.indexfile, node, text) | 1859 raise error.CensoredNodeError(self.indexfile, node, text) |
1783 raise | 1860 raise |
1784 | 1861 |
1788 This should be called after revisions are added to the revlog. If the | 1865 This should be called after revisions are added to the revlog. If the |
1789 revlog has grown too large to be an inline revlog, it will convert it | 1866 revlog has grown too large to be an inline revlog, it will convert it |
1790 to use multiple index and data files. | 1867 to use multiple index and data files. |
1791 """ | 1868 """ |
1792 tiprev = len(self) - 1 | 1869 tiprev = len(self) - 1 |
1793 if (not self._inline or | 1870 if ( |
1794 (self.start(tiprev) + self.length(tiprev)) < _maxinline): | 1871 not self._inline |
1872 or (self.start(tiprev) + self.length(tiprev)) < _maxinline | |
1873 ): | |
1795 return | 1874 return |
1796 | 1875 |
1797 trinfo = tr.find(self.indexfile) | 1876 trinfo = tr.find(self.indexfile) |
1798 if trinfo is None: | 1877 if trinfo is None: |
1799 raise error.RevlogError(_("%s not found in the transaction") | 1878 raise error.RevlogError( |
1800 % self.indexfile) | 1879 _("%s not found in the transaction") % self.indexfile |
1880 ) | |
1801 | 1881 |
1802 trindex = trinfo[2] | 1882 trindex = trinfo[2] |
1803 if trindex is not None: | 1883 if trindex is not None: |
1804 dataoff = self.start(trindex) | 1884 dataoff = self.start(trindex) |
1805 else: | 1885 else: |
1836 | 1916 |
1837 def _nodeduplicatecallback(self, transaction, node): | 1917 def _nodeduplicatecallback(self, transaction, node): |
1838 """called when trying to add a node already stored. | 1918 """called when trying to add a node already stored. |
1839 """ | 1919 """ |
1840 | 1920 |
1841 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None, | 1921 def addrevision( |
1842 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None, | 1922 self, |
1843 sidedata=None): | 1923 text, |
1924 transaction, | |
1925 link, | |
1926 p1, | |
1927 p2, | |
1928 cachedelta=None, | |
1929 node=None, | |
1930 flags=REVIDX_DEFAULT_FLAGS, | |
1931 deltacomputer=None, | |
1932 sidedata=None, | |
1933 ): | |
1844 """add a revision to the log | 1934 """add a revision to the log |
1845 | 1935 |
1846 text - the revision data to add | 1936 text - the revision data to add |
1847 transaction - the transaction object used for rollback | 1937 transaction - the transaction object used for rollback |
1848 link - the linkrev data to add | 1938 link - the linkrev data to add |
1854 flags - the known flags to set on the revision | 1944 flags - the known flags to set on the revision |
1855 deltacomputer - an optional deltacomputer instance shared between | 1945 deltacomputer - an optional deltacomputer instance shared between |
1856 multiple calls | 1946 multiple calls |
1857 """ | 1947 """ |
1858 if link == nullrev: | 1948 if link == nullrev: |
1859 raise error.RevlogError(_("attempted to add linkrev -1 to %s") | 1949 raise error.RevlogError( |
1860 % self.indexfile) | 1950 _("attempted to add linkrev -1 to %s") % self.indexfile |
1951 ) | |
1861 | 1952 |
1862 if sidedata is None: | 1953 if sidedata is None: |
1863 sidedata = {} | 1954 sidedata = {} |
1864 flags = flags & ~REVIDX_SIDEDATA | 1955 flags = flags & ~REVIDX_SIDEDATA |
1865 elif not self.hassidedata: | 1956 elif not self.hassidedata: |
1866 raise error.ProgrammingError( | 1957 raise error.ProgrammingError( |
1867 _("trying to add sidedata to a revlog who don't support them") | 1958 _("trying to add sidedata to a revlog who don't support them") |
1868 ) | 1959 ) |
1869 else: | 1960 else: |
1870 flags |= REVIDX_SIDEDATA | 1961 flags |= REVIDX_SIDEDATA |
1871 | 1962 |
1872 if flags: | 1963 if flags: |
1873 node = node or self.hash(text, p1, p2) | 1964 node = node or self.hash(text, p1, p2) |
1874 | 1965 |
1875 rawtext, validatehash = flagutil.processflagswrite(self, text, flags, | 1966 rawtext, validatehash = flagutil.processflagswrite( |
1876 sidedata=sidedata) | 1967 self, text, flags, sidedata=sidedata |
1968 ) | |
1877 | 1969 |
1878 # If the flag processor modifies the revision data, ignore any provided | 1970 # If the flag processor modifies the revision data, ignore any provided |
1879 # cachedelta. | 1971 # cachedelta. |
1880 if rawtext != text: | 1972 if rawtext != text: |
1881 cachedelta = None | 1973 cachedelta = None |
1882 | 1974 |
1883 if len(rawtext) > _maxentrysize: | 1975 if len(rawtext) > _maxentrysize: |
1884 raise error.RevlogError( | 1976 raise error.RevlogError( |
1885 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB") | 1977 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB") |
1886 % (self.indexfile, len(rawtext))) | 1978 % (self.indexfile, len(rawtext)) |
1979 ) | |
1887 | 1980 |
1888 node = node or self.hash(rawtext, p1, p2) | 1981 node = node or self.hash(rawtext, p1, p2) |
1889 if node in self.nodemap: | 1982 if node in self.nodemap: |
1890 return node | 1983 return node |
1891 | 1984 |
1892 if validatehash: | 1985 if validatehash: |
1893 self.checkhash(rawtext, node, p1=p1, p2=p2) | 1986 self.checkhash(rawtext, node, p1=p1, p2=p2) |
1894 | 1987 |
1895 return self.addrawrevision(rawtext, transaction, link, p1, p2, node, | 1988 return self.addrawrevision( |
1896 flags, cachedelta=cachedelta, | 1989 rawtext, |
1897 deltacomputer=deltacomputer) | 1990 transaction, |
1898 | 1991 link, |
1899 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags, | 1992 p1, |
1900 cachedelta=None, deltacomputer=None): | 1993 p2, |
1994 node, | |
1995 flags, | |
1996 cachedelta=cachedelta, | |
1997 deltacomputer=deltacomputer, | |
1998 ) | |
1999 | |
2000 def addrawrevision( | |
2001 self, | |
2002 rawtext, | |
2003 transaction, | |
2004 link, | |
2005 p1, | |
2006 p2, | |
2007 node, | |
2008 flags, | |
2009 cachedelta=None, | |
2010 deltacomputer=None, | |
2011 ): | |
1901 """add a raw revision with known flags, node and parents | 2012 """add a raw revision with known flags, node and parents |
1902 useful when reusing a revision not stored in this revlog (ex: received | 2013 useful when reusing a revision not stored in this revlog (ex: received |
1903 over wire, or read from an external bundle). | 2014 over wire, or read from an external bundle). |
1904 """ | 2015 """ |
1905 dfh = None | 2016 dfh = None |
1906 if not self._inline: | 2017 if not self._inline: |
1907 dfh = self._datafp("a+") | 2018 dfh = self._datafp("a+") |
1908 ifh = self._indexfp("a+") | 2019 ifh = self._indexfp("a+") |
1909 try: | 2020 try: |
1910 return self._addrevision(node, rawtext, transaction, link, p1, p2, | 2021 return self._addrevision( |
1911 flags, cachedelta, ifh, dfh, | 2022 node, |
1912 deltacomputer=deltacomputer) | 2023 rawtext, |
2024 transaction, | |
2025 link, | |
2026 p1, | |
2027 p2, | |
2028 flags, | |
2029 cachedelta, | |
2030 ifh, | |
2031 dfh, | |
2032 deltacomputer=deltacomputer, | |
2033 ) | |
1913 finally: | 2034 finally: |
1914 if dfh: | 2035 if dfh: |
1915 dfh.close() | 2036 dfh.close() |
1916 ifh.close() | 2037 ifh.close() |
1917 | 2038 |
1964 | 2085 |
1965 if t == 'x': | 2086 if t == 'x': |
1966 try: | 2087 try: |
1967 return _zlibdecompress(data) | 2088 return _zlibdecompress(data) |
1968 except zlib.error as e: | 2089 except zlib.error as e: |
1969 raise error.RevlogError(_('revlog decompress error: %s') % | 2090 raise error.RevlogError( |
1970 stringutil.forcebytestr(e)) | 2091 _('revlog decompress error: %s') |
2092 % stringutil.forcebytestr(e) | |
2093 ) | |
1971 # '\0' is more common than 'u' so it goes first. | 2094 # '\0' is more common than 'u' so it goes first. |
1972 elif t == '\0': | 2095 elif t == '\0': |
1973 return data | 2096 return data |
1974 elif t == 'u': | 2097 elif t == 'u': |
1975 return util.buffer(data, 1) | 2098 return util.buffer(data, 1) |
1984 except KeyError: | 2107 except KeyError: |
1985 raise error.RevlogError(_('unknown compression type %r') % t) | 2108 raise error.RevlogError(_('unknown compression type %r') % t) |
1986 | 2109 |
1987 return compressor.decompress(data) | 2110 return compressor.decompress(data) |
1988 | 2111 |
1989 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags, | 2112 def _addrevision( |
1990 cachedelta, ifh, dfh, alwayscache=False, | 2113 self, |
1991 deltacomputer=None): | 2114 node, |
2115 rawtext, | |
2116 transaction, | |
2117 link, | |
2118 p1, | |
2119 p2, | |
2120 flags, | |
2121 cachedelta, | |
2122 ifh, | |
2123 dfh, | |
2124 alwayscache=False, | |
2125 deltacomputer=None, | |
2126 ): | |
1992 """internal function to add revisions to the log | 2127 """internal function to add revisions to the log |
1993 | 2128 |
1994 see addrevision for argument descriptions. | 2129 see addrevision for argument descriptions. |
1995 | 2130 |
1996 note: "addrevision" takes non-raw text, "_addrevision" takes raw text. | 2131 note: "addrevision" takes non-raw text, "_addrevision" takes raw text. |
2001 invariants: | 2136 invariants: |
2002 - rawtext is optional (can be None); if not set, cachedelta must be set. | 2137 - rawtext is optional (can be None); if not set, cachedelta must be set. |
2003 if both are set, they must correspond to each other. | 2138 if both are set, they must correspond to each other. |
2004 """ | 2139 """ |
2005 if node == nullid: | 2140 if node == nullid: |
2006 raise error.RevlogError(_("%s: attempt to add null revision") % | 2141 raise error.RevlogError( |
2007 self.indexfile) | 2142 _("%s: attempt to add null revision") % self.indexfile |
2143 ) | |
2008 if node == wdirid or node in wdirfilenodeids: | 2144 if node == wdirid or node in wdirfilenodeids: |
2009 raise error.RevlogError(_("%s: attempt to add wdir revision") % | 2145 raise error.RevlogError( |
2010 self.indexfile) | 2146 _("%s: attempt to add wdir revision") % self.indexfile |
2147 ) | |
2011 | 2148 |
2012 if self._inline: | 2149 if self._inline: |
2013 fh = ifh | 2150 fh = ifh |
2014 else: | 2151 else: |
2015 fh = dfh | 2152 fh = dfh |
2025 # become comparable to the uncompressed text | 2162 # become comparable to the uncompressed text |
2026 if rawtext is None: | 2163 if rawtext is None: |
2027 # need rawtext size, before changed by flag processors, which is | 2164 # need rawtext size, before changed by flag processors, which is |
2028 # the non-raw size. use revlog explicitly to avoid filelog's extra | 2165 # the non-raw size. use revlog explicitly to avoid filelog's extra |
2029 # logic that might remove metadata size. | 2166 # logic that might remove metadata size. |
2030 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]), | 2167 textlen = mdiff.patchedsize( |
2031 cachedelta[1]) | 2168 revlog.size(self, cachedelta[0]), cachedelta[1] |
2169 ) | |
2032 else: | 2170 else: |
2033 textlen = len(rawtext) | 2171 textlen = len(rawtext) |
2034 | 2172 |
2035 if deltacomputer is None: | 2173 if deltacomputer is None: |
2036 deltacomputer = deltautil.deltacomputer(self) | 2174 deltacomputer = deltautil.deltacomputer(self) |
2037 | 2175 |
2038 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags) | 2176 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags) |
2039 | 2177 |
2040 deltainfo = deltacomputer.finddeltainfo(revinfo, fh) | 2178 deltainfo = deltacomputer.finddeltainfo(revinfo, fh) |
2041 | 2179 |
2042 e = (offset_type(offset, flags), deltainfo.deltalen, textlen, | 2180 e = ( |
2043 deltainfo.base, link, p1r, p2r, node) | 2181 offset_type(offset, flags), |
2182 deltainfo.deltalen, | |
2183 textlen, | |
2184 deltainfo.base, | |
2185 link, | |
2186 p1r, | |
2187 p2r, | |
2188 node, | |
2189 ) | |
2044 self.index.append(e) | 2190 self.index.append(e) |
2045 self.nodemap[node] = curr | 2191 self.nodemap[node] = curr |
2046 | 2192 |
2047 # Reset the pure node cache start lookup offset to account for new | 2193 # Reset the pure node cache start lookup offset to account for new |
2048 # revision. | 2194 # revision. |
2049 if self._nodepos is not None: | 2195 if self._nodepos is not None: |
2050 self._nodepos = curr | 2196 self._nodepos = curr |
2051 | 2197 |
2052 entry = self._io.packentry(e, self.node, self.version, curr) | 2198 entry = self._io.packentry(e, self.node, self.version, curr) |
2053 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data, | 2199 self._writeentry( |
2054 link, offset) | 2200 transaction, ifh, dfh, entry, deltainfo.data, link, offset |
2201 ) | |
2055 | 2202 |
2056 rawtext = btext[0] | 2203 rawtext = btext[0] |
2057 | 2204 |
2058 if alwayscache and rawtext is None: | 2205 if alwayscache and rawtext is None: |
2059 rawtext = deltacomputer.buildtext(revinfo, fh) | 2206 rawtext = deltacomputer.buildtext(revinfo, fh) |
2060 | 2207 |
2061 if type(rawtext) == bytes: # only accept immutable objects | 2208 if type(rawtext) == bytes: # only accept immutable objects |
2062 self._revisioncache = (node, curr, rawtext) | 2209 self._revisioncache = (node, curr, rawtext) |
2063 self._chainbasecache[curr] = deltainfo.chainbase | 2210 self._chainbasecache[curr] = deltainfo.chainbase |
2064 return node | 2211 return node |
2065 | 2212 |
2066 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset): | 2213 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset): |
2124 dfh = None | 2271 dfh = None |
2125 else: | 2272 else: |
2126 transaction.add(self.indexfile, isize, r) | 2273 transaction.add(self.indexfile, isize, r) |
2127 transaction.add(self.datafile, end) | 2274 transaction.add(self.datafile, end) |
2128 dfh = self._datafp("a+") | 2275 dfh = self._datafp("a+") |
2276 | |
2129 def flush(): | 2277 def flush(): |
2130 if dfh: | 2278 if dfh: |
2131 dfh.flush() | 2279 dfh.flush() |
2132 ifh.flush() | 2280 ifh.flush() |
2133 | 2281 |
2148 # this can happen if two branches make the same change | 2296 # this can happen if two branches make the same change |
2149 continue | 2297 continue |
2150 | 2298 |
2151 for p in (p1, p2): | 2299 for p in (p1, p2): |
2152 if p not in self.nodemap: | 2300 if p not in self.nodemap: |
2153 raise error.LookupError(p, self.indexfile, | 2301 raise error.LookupError( |
2154 _('unknown parent')) | 2302 p, self.indexfile, _('unknown parent') |
2303 ) | |
2155 | 2304 |
2156 if deltabase not in self.nodemap: | 2305 if deltabase not in self.nodemap: |
2157 raise error.LookupError(deltabase, self.indexfile, | 2306 raise error.LookupError( |
2158 _('unknown delta base')) | 2307 deltabase, self.indexfile, _('unknown delta base') |
2308 ) | |
2159 | 2309 |
2160 baserev = self.rev(deltabase) | 2310 baserev = self.rev(deltabase) |
2161 | 2311 |
2162 if baserev != nullrev and self.iscensored(baserev): | 2312 if baserev != nullrev and self.iscensored(baserev): |
2163 # if base is censored, delta must be full replacement in a | 2313 # if base is censored, delta must be full replacement in a |
2164 # single patch operation | 2314 # single patch operation |
2165 hlen = struct.calcsize(">lll") | 2315 hlen = struct.calcsize(">lll") |
2166 oldlen = self.rawsize(baserev) | 2316 oldlen = self.rawsize(baserev) |
2167 newlen = len(delta) - hlen | 2317 newlen = len(delta) - hlen |
2168 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): | 2318 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen): |
2169 raise error.CensoredBaseError(self.indexfile, | 2319 raise error.CensoredBaseError( |
2170 self.node(baserev)) | 2320 self.indexfile, self.node(baserev) |
2321 ) | |
2171 | 2322 |
2172 if not flags and self._peek_iscensored(baserev, delta, flush): | 2323 if not flags and self._peek_iscensored(baserev, delta, flush): |
2173 flags |= REVIDX_ISCENSORED | 2324 flags |= REVIDX_ISCENSORED |
2174 | 2325 |
2175 # We assume consumers of addrevisioncb will want to retrieve | 2326 # We assume consumers of addrevisioncb will want to retrieve |
2177 # revision(). revision() will fast path if there is a cache | 2328 # revision(). revision() will fast path if there is a cache |
2178 # hit. So, we tell _addrevision() to always cache in this case. | 2329 # hit. So, we tell _addrevision() to always cache in this case. |
2179 # We're only using addgroup() in the context of changegroup | 2330 # We're only using addgroup() in the context of changegroup |
2180 # generation so the revision data can always be handled as raw | 2331 # generation so the revision data can always be handled as raw |
2181 # by the flagprocessor. | 2332 # by the flagprocessor. |
2182 self._addrevision(node, None, transaction, link, | 2333 self._addrevision( |
2183 p1, p2, flags, (baserev, delta), | 2334 node, |
2184 ifh, dfh, | 2335 None, |
2185 alwayscache=bool(addrevisioncb), | 2336 transaction, |
2186 deltacomputer=deltacomputer) | 2337 link, |
2338 p1, | |
2339 p2, | |
2340 flags, | |
2341 (baserev, delta), | |
2342 ifh, | |
2343 dfh, | |
2344 alwayscache=bool(addrevisioncb), | |
2345 deltacomputer=deltacomputer, | |
2346 ) | |
2187 | 2347 |
2188 if addrevisioncb: | 2348 if addrevisioncb: |
2189 addrevisioncb(self, node) | 2349 addrevisioncb(self, node) |
2190 | 2350 |
2191 if not dfh and not self._inline: | 2351 if not dfh and not self._inline: |
2222 """find the minimum rev that must be stripped to strip the linkrev | 2382 """find the minimum rev that must be stripped to strip the linkrev |
2223 | 2383 |
2224 Returns a tuple containing the minimum rev and a set of all revs that | 2384 Returns a tuple containing the minimum rev and a set of all revs that |
2225 have linkrevs that will be broken by this strip. | 2385 have linkrevs that will be broken by this strip. |
2226 """ | 2386 """ |
2227 return storageutil.resolvestripinfo(minlink, len(self) - 1, | 2387 return storageutil.resolvestripinfo( |
2228 self.headrevs(), | 2388 minlink, |
2229 self.linkrev, self.parentrevs) | 2389 len(self) - 1, |
2390 self.headrevs(), | |
2391 self.linkrev, | |
2392 self.parentrevs, | |
2393 ) | |
2230 | 2394 |
2231 def strip(self, minlink, transaction): | 2395 def strip(self, minlink, transaction): |
2232 """truncate the revlog on the first revision with a linkrev >= minlink | 2396 """truncate the revlog on the first revision with a linkrev >= minlink |
2233 | 2397 |
2234 This function is called when we're stripping revision minlink and | 2398 This function is called when we're stripping revision minlink and |
2317 res = [self.indexfile] | 2481 res = [self.indexfile] |
2318 if not self._inline: | 2482 if not self._inline: |
2319 res.append(self.datafile) | 2483 res.append(self.datafile) |
2320 return res | 2484 return res |
2321 | 2485 |
2322 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, | 2486 def emitrevisions( |
2323 assumehaveparentrevisions=False, | 2487 self, |
2324 deltamode=repository.CG_DELTAMODE_STD): | 2488 nodes, |
2489 nodesorder=None, | |
2490 revisiondata=False, | |
2491 assumehaveparentrevisions=False, | |
2492 deltamode=repository.CG_DELTAMODE_STD, | |
2493 ): | |
2325 if nodesorder not in ('nodes', 'storage', 'linear', None): | 2494 if nodesorder not in ('nodes', 'storage', 'linear', None): |
2326 raise error.ProgrammingError('unhandled value for nodesorder: %s' % | 2495 raise error.ProgrammingError( |
2327 nodesorder) | 2496 'unhandled value for nodesorder: %s' % nodesorder |
2497 ) | |
2328 | 2498 |
2329 if nodesorder is None and not self._generaldelta: | 2499 if nodesorder is None and not self._generaldelta: |
2330 nodesorder = 'storage' | 2500 nodesorder = 'storage' |
2331 | 2501 |
2332 if (not self._storedeltachains and | 2502 if ( |
2333 deltamode != repository.CG_DELTAMODE_PREV): | 2503 not self._storedeltachains |
2504 and deltamode != repository.CG_DELTAMODE_PREV | |
2505 ): | |
2334 deltamode = repository.CG_DELTAMODE_FULL | 2506 deltamode = repository.CG_DELTAMODE_FULL |
2335 | 2507 |
2336 return storageutil.emitrevisions( | 2508 return storageutil.emitrevisions( |
2337 self, nodes, nodesorder, revlogrevisiondelta, | 2509 self, |
2510 nodes, | |
2511 nodesorder, | |
2512 revlogrevisiondelta, | |
2338 deltaparentfn=self.deltaparent, | 2513 deltaparentfn=self.deltaparent, |
2339 candeltafn=self.candelta, | 2514 candeltafn=self.candelta, |
2340 rawsizefn=self.rawsize, | 2515 rawsizefn=self.rawsize, |
2341 revdifffn=self.revdiff, | 2516 revdifffn=self.revdiff, |
2342 flagsfn=self.flags, | 2517 flagsfn=self.flags, |
2343 deltamode=deltamode, | 2518 deltamode=deltamode, |
2344 revisiondata=revisiondata, | 2519 revisiondata=revisiondata, |
2345 assumehaveparentrevisions=assumehaveparentrevisions) | 2520 assumehaveparentrevisions=assumehaveparentrevisions, |
2521 ) | |
2346 | 2522 |
2347 DELTAREUSEALWAYS = 'always' | 2523 DELTAREUSEALWAYS = 'always' |
2348 DELTAREUSESAMEREVS = 'samerevs' | 2524 DELTAREUSESAMEREVS = 'samerevs' |
2349 DELTAREUSENEVER = 'never' | 2525 DELTAREUSENEVER = 'never' |
2350 | 2526 |
2351 DELTAREUSEFULLADD = 'fulladd' | 2527 DELTAREUSEFULLADD = 'fulladd' |
2352 | 2528 |
2353 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'} | 2529 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'} |
2354 | 2530 |
2355 def clone(self, tr, destrevlog, addrevisioncb=None, | 2531 def clone( |
2356 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None): | 2532 self, |
2533 tr, | |
2534 destrevlog, | |
2535 addrevisioncb=None, | |
2536 deltareuse=DELTAREUSESAMEREVS, | |
2537 forcedeltabothparents=None, | |
2538 ): | |
2357 """Copy this revlog to another, possibly with format changes. | 2539 """Copy this revlog to another, possibly with format changes. |
2358 | 2540 |
2359 The destination revlog will contain the same revisions and nodes. | 2541 The destination revlog will contain the same revisions and nodes. |
2360 However, it may not be bit-for-bit identical due to e.g. delta encoding | 2542 However, it may not be bit-for-bit identical due to e.g. delta encoding |
2361 differences. | 2543 differences. |
2423 destrevlog._lazydeltabase = False | 2605 destrevlog._lazydeltabase = False |
2424 destrevlog._lazydelta = False | 2606 destrevlog._lazydelta = False |
2425 | 2607 |
2426 destrevlog._deltabothparents = forcedeltabothparents or oldamd | 2608 destrevlog._deltabothparents = forcedeltabothparents or oldamd |
2427 | 2609 |
2428 self._clone(tr, destrevlog, addrevisioncb, deltareuse, | 2610 self._clone( |
2429 forcedeltabothparents) | 2611 tr, destrevlog, addrevisioncb, deltareuse, forcedeltabothparents |
2612 ) | |
2430 | 2613 |
2431 finally: | 2614 finally: |
2432 destrevlog._lazydelta = oldlazydelta | 2615 destrevlog._lazydelta = oldlazydelta |
2433 destrevlog._lazydeltabase = oldlazydeltabase | 2616 destrevlog._lazydeltabase = oldlazydeltabase |
2434 destrevlog._deltabothparents = oldamd | 2617 destrevlog._deltabothparents = oldamd |
2435 | 2618 |
2436 def _clone(self, tr, destrevlog, addrevisioncb, deltareuse, | 2619 def _clone( |
2437 forcedeltabothparents): | 2620 self, tr, destrevlog, addrevisioncb, deltareuse, forcedeltabothparents |
2621 ): | |
2438 """perform the core duty of `revlog.clone` after parameter processing""" | 2622 """perform the core duty of `revlog.clone` after parameter processing""" |
2439 deltacomputer = deltautil.deltacomputer(destrevlog) | 2623 deltacomputer = deltautil.deltacomputer(destrevlog) |
2440 index = self.index | 2624 index = self.index |
2441 for rev in self: | 2625 for rev in self: |
2442 entry = index[rev] | 2626 entry = index[rev] |
2443 | 2627 |
2444 # Some classes override linkrev to take filtered revs into | 2628 # Some classes override linkrev to take filtered revs into |
2445 # account. Use raw entry from index. | 2629 # account. Use raw entry from index. |
2446 flags = entry[0] & 0xffff | 2630 flags = entry[0] & 0xFFFF |
2447 linkrev = entry[4] | 2631 linkrev = entry[4] |
2448 p1 = index[entry[5]][7] | 2632 p1 = index[entry[5]][7] |
2449 p2 = index[entry[6]][7] | 2633 p2 = index[entry[6]][7] |
2450 node = entry[7] | 2634 node = entry[7] |
2451 | 2635 |
2453 # the revlog chunk is a delta. | 2637 # the revlog chunk is a delta. |
2454 cachedelta = None | 2638 cachedelta = None |
2455 rawtext = None | 2639 rawtext = None |
2456 if deltareuse == self.DELTAREUSEFULLADD: | 2640 if deltareuse == self.DELTAREUSEFULLADD: |
2457 text = self.revision(rev) | 2641 text = self.revision(rev) |
2458 destrevlog.addrevision(text, tr, linkrev, p1, p2, | 2642 destrevlog.addrevision( |
2459 cachedelta=cachedelta, | 2643 text, |
2460 node=node, flags=flags, | 2644 tr, |
2461 deltacomputer=deltacomputer) | 2645 linkrev, |
2646 p1, | |
2647 p2, | |
2648 cachedelta=cachedelta, | |
2649 node=node, | |
2650 flags=flags, | |
2651 deltacomputer=deltacomputer, | |
2652 ) | |
2462 else: | 2653 else: |
2463 if destrevlog._lazydelta: | 2654 if destrevlog._lazydelta: |
2464 dp = self.deltaparent(rev) | 2655 dp = self.deltaparent(rev) |
2465 if dp != nullrev: | 2656 if dp != nullrev: |
2466 cachedelta = (dp, bytes(self._chunk(rev))) | 2657 cachedelta = (dp, bytes(self._chunk(rev))) |
2467 | 2658 |
2468 if not cachedelta: | 2659 if not cachedelta: |
2469 rawtext = self.rawdata(rev) | 2660 rawtext = self.rawdata(rev) |
2470 | 2661 |
2471 ifh = destrevlog.opener(destrevlog.indexfile, 'a+', | 2662 ifh = destrevlog.opener( |
2472 checkambig=False) | 2663 destrevlog.indexfile, 'a+', checkambig=False |
2664 ) | |
2473 dfh = None | 2665 dfh = None |
2474 if not destrevlog._inline: | 2666 if not destrevlog._inline: |
2475 dfh = destrevlog.opener(destrevlog.datafile, 'a+') | 2667 dfh = destrevlog.opener(destrevlog.datafile, 'a+') |
2476 try: | 2668 try: |
2477 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, | 2669 destrevlog._addrevision( |
2478 p2, flags, cachedelta, ifh, dfh, | 2670 node, |
2479 deltacomputer=deltacomputer) | 2671 rawtext, |
2672 tr, | |
2673 linkrev, | |
2674 p1, | |
2675 p2, | |
2676 flags, | |
2677 cachedelta, | |
2678 ifh, | |
2679 dfh, | |
2680 deltacomputer=deltacomputer, | |
2681 ) | |
2480 finally: | 2682 finally: |
2481 if dfh: | 2683 if dfh: |
2482 dfh.close() | 2684 dfh.close() |
2483 ifh.close() | 2685 ifh.close() |
2484 | 2686 |
2485 if addrevisioncb: | 2687 if addrevisioncb: |
2486 addrevisioncb(self, rev, node) | 2688 addrevisioncb(self, rev, node) |
2487 | 2689 |
2488 def censorrevision(self, tr, censornode, tombstone=b''): | 2690 def censorrevision(self, tr, censornode, tombstone=b''): |
2489 if (self.version & 0xFFFF) == REVLOGV0: | 2691 if (self.version & 0xFFFF) == REVLOGV0: |
2490 raise error.RevlogError(_('cannot censor with version %d revlogs') % | 2692 raise error.RevlogError( |
2491 self.version) | 2693 _('cannot censor with version %d revlogs') % self.version |
2694 ) | |
2492 | 2695 |
2493 censorrev = self.rev(censornode) | 2696 censorrev = self.rev(censornode) |
2494 tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | 2697 tombstone = storageutil.packmeta({b'censored': tombstone}, b'') |
2495 | 2698 |
2496 if len(tombstone) > self.rawsize(censorrev): | 2699 if len(tombstone) > self.rawsize(censorrev): |
2497 raise error.Abort(_('censor tombstone must be no longer than ' | 2700 raise error.Abort( |
2498 'censored data')) | 2701 _('censor tombstone must be no longer than ' 'censored data') |
2702 ) | |
2499 | 2703 |
2500 # Rewriting the revlog in place is hard. Our strategy for censoring is | 2704 # Rewriting the revlog in place is hard. Our strategy for censoring is |
2501 # to create a new revlog, copy all revisions to it, then replace the | 2705 # to create a new revlog, copy all revisions to it, then replace the |
2502 # revlogs on transaction close. | 2706 # revlogs on transaction close. |
2503 | 2707 |
2504 newindexfile = self.indexfile + b'.tmpcensored' | 2708 newindexfile = self.indexfile + b'.tmpcensored' |
2505 newdatafile = self.datafile + b'.tmpcensored' | 2709 newdatafile = self.datafile + b'.tmpcensored' |
2506 | 2710 |
2507 # This is a bit dangerous. We could easily have a mismatch of state. | 2711 # This is a bit dangerous. We could easily have a mismatch of state. |
2508 newrl = revlog(self.opener, newindexfile, newdatafile, | 2712 newrl = revlog(self.opener, newindexfile, newdatafile, censorable=True) |
2509 censorable=True) | |
2510 newrl.version = self.version | 2713 newrl.version = self.version |
2511 newrl._generaldelta = self._generaldelta | 2714 newrl._generaldelta = self._generaldelta |
2512 newrl._io = self._io | 2715 newrl._io = self._io |
2513 | 2716 |
2514 for rev in self.revs(): | 2717 for rev in self.revs(): |
2515 node = self.node(rev) | 2718 node = self.node(rev) |
2516 p1, p2 = self.parents(node) | 2719 p1, p2 = self.parents(node) |
2517 | 2720 |
2518 if rev == censorrev: | 2721 if rev == censorrev: |
2519 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev), | 2722 newrl.addrawrevision( |
2520 p1, p2, censornode, REVIDX_ISCENSORED) | 2723 tombstone, |
2724 tr, | |
2725 self.linkrev(censorrev), | |
2726 p1, | |
2727 p2, | |
2728 censornode, | |
2729 REVIDX_ISCENSORED, | |
2730 ) | |
2521 | 2731 |
2522 if newrl.deltaparent(rev) != nullrev: | 2732 if newrl.deltaparent(rev) != nullrev: |
2523 raise error.Abort(_('censored revision stored as delta; ' | 2733 raise error.Abort( |
2524 'cannot censor'), | 2734 _( |
2525 hint=_('censoring of revlogs is not ' | 2735 'censored revision stored as delta; ' |
2526 'fully implemented; please report ' | 2736 'cannot censor' |
2527 'this bug')) | 2737 ), |
2738 hint=_( | |
2739 'censoring of revlogs is not ' | |
2740 'fully implemented; please report ' | |
2741 'this bug' | |
2742 ), | |
2743 ) | |
2528 continue | 2744 continue |
2529 | 2745 |
2530 if self.iscensored(rev): | 2746 if self.iscensored(rev): |
2531 if self.deltaparent(rev) != nullrev: | 2747 if self.deltaparent(rev) != nullrev: |
2532 raise error.Abort(_('cannot censor due to censored ' | 2748 raise error.Abort( |
2533 'revision having delta stored')) | 2749 _( |
2750 'cannot censor due to censored ' | |
2751 'revision having delta stored' | |
2752 ) | |
2753 ) | |
2534 rawtext = self._chunk(rev) | 2754 rawtext = self._chunk(rev) |
2535 else: | 2755 else: |
2536 rawtext = self.rawdata(rev) | 2756 rawtext = self.rawdata(rev) |
2537 | 2757 |
2538 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node, | 2758 newrl.addrawrevision( |
2539 self.flags(rev)) | 2759 rawtext, tr, self.linkrev(rev), p1, p2, node, self.flags(rev) |
2760 ) | |
2540 | 2761 |
2541 tr.addbackup(self.indexfile, location='store') | 2762 tr.addbackup(self.indexfile, location='store') |
2542 if not self._inline: | 2763 if not self._inline: |
2543 tr.addbackup(self.datafile, location='store') | 2764 tr.addbackup(self.datafile, location='store') |
2544 | 2765 |
2564 version = self.version & 0xFFFF | 2785 version = self.version & 0xFFFF |
2565 | 2786 |
2566 # The verifier tells us what version revlog we should be. | 2787 # The verifier tells us what version revlog we should be. |
2567 if version != state['expectedversion']: | 2788 if version != state['expectedversion']: |
2568 yield revlogproblem( | 2789 yield revlogproblem( |
2569 warning=_("warning: '%s' uses revlog format %d; expected %d") % | 2790 warning=_("warning: '%s' uses revlog format %d; expected %d") |
2570 (self.indexfile, version, state['expectedversion'])) | 2791 % (self.indexfile, version, state['expectedversion']) |
2792 ) | |
2571 | 2793 |
2572 state['skipread'] = set() | 2794 state['skipread'] = set() |
2573 | 2795 |
2574 for rev in self: | 2796 for rev in self: |
2575 node = self.node(rev) | 2797 node = self.node(rev) |
2637 l2 = len(self.rawdata(node)) | 2859 l2 = len(self.rawdata(node)) |
2638 | 2860 |
2639 if l1 != l2: | 2861 if l1 != l2: |
2640 yield revlogproblem( | 2862 yield revlogproblem( |
2641 error=_('unpacked size is %d, %d expected') % (l2, l1), | 2863 error=_('unpacked size is %d, %d expected') % (l2, l1), |
2642 node=node) | 2864 node=node, |
2865 ) | |
2643 | 2866 |
2644 except error.CensoredNodeError: | 2867 except error.CensoredNodeError: |
2645 if state['erroroncensored']: | 2868 if state['erroroncensored']: |
2646 yield revlogproblem(error=_('censored file data'), | 2869 yield revlogproblem( |
2647 node=node) | 2870 error=_('censored file data'), node=node |
2871 ) | |
2648 state['skipread'].add(node) | 2872 state['skipread'].add(node) |
2649 except Exception as e: | 2873 except Exception as e: |
2650 yield revlogproblem( | 2874 yield revlogproblem( |
2651 error=_('unpacking %s: %s') % (short(node), | 2875 error=_('unpacking %s: %s') |
2652 stringutil.forcebytestr(e)), | 2876 % (short(node), stringutil.forcebytestr(e)), |
2653 node=node) | 2877 node=node, |
2878 ) | |
2654 state['skipread'].add(node) | 2879 state['skipread'].add(node) |
2655 | 2880 |
2656 def storageinfo(self, exclusivefiles=False, sharedfiles=False, | 2881 def storageinfo( |
2657 revisionscount=False, trackedsize=False, | 2882 self, |
2658 storedsize=False): | 2883 exclusivefiles=False, |
2884 sharedfiles=False, | |
2885 revisionscount=False, | |
2886 trackedsize=False, | |
2887 storedsize=False, | |
2888 ): | |
2659 d = {} | 2889 d = {} |
2660 | 2890 |
2661 if exclusivefiles: | 2891 if exclusivefiles: |
2662 d['exclusivefiles'] = [(self.opener, self.indexfile)] | 2892 d['exclusivefiles'] = [(self.opener, self.indexfile)] |
2663 if not self._inline: | 2893 if not self._inline: |
2671 | 2901 |
2672 if trackedsize: | 2902 if trackedsize: |
2673 d['trackedsize'] = sum(map(self.rawsize, iter(self))) | 2903 d['trackedsize'] = sum(map(self.rawsize, iter(self))) |
2674 | 2904 |
2675 if storedsize: | 2905 if storedsize: |
2676 d['storedsize'] = sum(self.opener.stat(path).st_size | 2906 d['storedsize'] = sum( |
2677 for path in self.files()) | 2907 self.opener.stat(path).st_size for path in self.files() |
2908 ) | |
2678 | 2909 |
2679 return d | 2910 return d |