Mercurial > public > mercurial-scm > hg
comparison mercurial/revlogutils/deltas.py @ 43076:2372284d9457
formatting: blacken the codebase
This is using my patch to black
(https://github.com/psf/black/pull/826) so we don't un-wrap collection
literals.
Done with:
hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S
# skip-blame mass-reformatting only
# no-check-commit reformats foo_bar functions
Differential Revision: https://phab.mercurial-scm.org/D6971
author | Augie Fackler <augie@google.com> |
---|---|
date | Sun, 06 Oct 2019 09:45:02 -0400 |
parents | dff95420480f |
children | 687b865b95ad |
comparison
equal
deleted
inserted
replaced
43075:57875cf423c9 | 43076:2372284d9457 |
---|---|
11 | 11 |
12 import collections | 12 import collections |
13 import struct | 13 import struct |
14 | 14 |
15 # import stuff from node for others to import from revlog | 15 # import stuff from node for others to import from revlog |
16 from ..node import ( | 16 from ..node import nullrev |
17 nullrev, | |
18 ) | |
19 from ..i18n import _ | 17 from ..i18n import _ |
20 | 18 |
21 from .constants import ( | 19 from .constants import ( |
22 REVIDX_ISCENSORED, | 20 REVIDX_ISCENSORED, |
23 REVIDX_RAWTEXT_CHANGING_FLAGS, | 21 REVIDX_RAWTEXT_CHANGING_FLAGS, |
24 ) | 22 ) |
25 | 23 |
26 from ..thirdparty import ( | 24 from ..thirdparty import attr |
27 attr, | |
28 ) | |
29 | 25 |
30 from .. import ( | 26 from .. import ( |
31 error, | 27 error, |
32 mdiff, | 28 mdiff, |
33 util, | 29 util, |
34 ) | 30 ) |
35 | 31 |
36 from . import ( | 32 from . import flagutil |
37 flagutil, | |
38 ) | |
39 | 33 |
40 # maximum <delta-chain-data>/<revision-text-length> ratio | 34 # maximum <delta-chain-data>/<revision-text-length> ratio |
41 LIMIT_DELTA2TEXT = 2 | 35 LIMIT_DELTA2TEXT = 2 |
36 | |
42 | 37 |
43 class _testrevlog(object): | 38 class _testrevlog(object): |
44 """minimalist fake revlog to use in doctests""" | 39 """minimalist fake revlog to use in doctests""" |
45 | 40 |
46 def __init__(self, data, density=0.5, mingap=0, snapshot=()): | 41 def __init__(self, data, density=0.5, mingap=0, snapshot=()): |
71 | 66 |
72 def issnapshot(self, rev): | 67 def issnapshot(self, rev): |
73 if rev == nullrev: | 68 if rev == nullrev: |
74 return True | 69 return True |
75 return rev in self._snapshot | 70 return rev in self._snapshot |
71 | |
76 | 72 |
77 def slicechunk(revlog, revs, targetsize=None): | 73 def slicechunk(revlog, revs, targetsize=None): |
78 """slice revs to reduce the amount of unrelated data to be read from disk. | 74 """slice revs to reduce the amount of unrelated data to be read from disk. |
79 | 75 |
80 ``revs`` is sliced into groups that should be read in one time. | 76 ``revs`` is sliced into groups that should be read in one time. |
139 # targetsize should not be specified when evaluating delta candidates: | 135 # targetsize should not be specified when evaluating delta candidates: |
140 # * targetsize is used to ensure we stay within specification when reading, | 136 # * targetsize is used to ensure we stay within specification when reading, |
141 densityslicing = getattr(revlog.index, 'slicechunktodensity', None) | 137 densityslicing = getattr(revlog.index, 'slicechunktodensity', None) |
142 if densityslicing is None: | 138 if densityslicing is None: |
143 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z) | 139 densityslicing = lambda x, y, z: _slicechunktodensity(revlog, x, y, z) |
144 for chunk in densityslicing(revs, | 140 for chunk in densityslicing( |
145 revlog._srdensitythreshold, | 141 revs, revlog._srdensitythreshold, revlog._srmingapsize |
146 revlog._srmingapsize): | 142 ): |
147 for subchunk in _slicechunktosize(revlog, chunk, targetsize): | 143 for subchunk in _slicechunktosize(revlog, chunk, targetsize): |
148 yield subchunk | 144 yield subchunk |
145 | |
149 | 146 |
150 def _slicechunktosize(revlog, revs, targetsize=None): | 147 def _slicechunktosize(revlog, revs, targetsize=None): |
151 """slice revs to match the target size | 148 """slice revs to match the target size |
152 | 149 |
153 This is intended to be used on chunk that density slicing selected by that | 150 This is intended to be used on chunk that density slicing selected by that |
255 return | 252 return |
256 | 253 |
257 startrevidx = 0 | 254 startrevidx = 0 |
258 endrevidx = 1 | 255 endrevidx = 1 |
259 iterrevs = enumerate(revs) | 256 iterrevs = enumerate(revs) |
260 next(iterrevs) # skip first rev. | 257 next(iterrevs) # skip first rev. |
261 # first step: get snapshots out of the way | 258 # first step: get snapshots out of the way |
262 for idx, r in iterrevs: | 259 for idx, r in iterrevs: |
263 span = revlog.end(r) - startdata | 260 span = revlog.end(r) - startdata |
264 snapshot = revlog.issnapshot(r) | 261 snapshot = revlog.issnapshot(r) |
265 if span <= targetsize and snapshot: | 262 if span <= targetsize and snapshot: |
280 # focuses on quickly converging toward valid chunks. | 277 # focuses on quickly converging toward valid chunks. |
281 nbitem = len(revs) | 278 nbitem = len(revs) |
282 while (enddata - startdata) > targetsize: | 279 while (enddata - startdata) > targetsize: |
283 endrevidx = nbitem | 280 endrevidx = nbitem |
284 if nbitem - startrevidx <= 1: | 281 if nbitem - startrevidx <= 1: |
285 break # protect against individual chunk larger than limit | 282 break # protect against individual chunk larger than limit |
286 localenddata = revlog.end(revs[endrevidx - 1]) | 283 localenddata = revlog.end(revs[endrevidx - 1]) |
287 span = localenddata - startdata | 284 span = localenddata - startdata |
288 while span > targetsize: | 285 while span > targetsize: |
289 if endrevidx - startrevidx <= 1: | 286 if endrevidx - startrevidx <= 1: |
290 break # protect against individual chunk larger than limit | 287 break # protect against individual chunk larger than limit |
291 endrevidx -= (endrevidx - startrevidx) // 2 | 288 endrevidx -= (endrevidx - startrevidx) // 2 |
292 localenddata = revlog.end(revs[endrevidx - 1]) | 289 localenddata = revlog.end(revs[endrevidx - 1]) |
293 span = localenddata - startdata | 290 span = localenddata - startdata |
294 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx) | 291 chunk = _trimchunk(revlog, revs, startrevidx, endrevidx) |
295 if chunk: | 292 if chunk: |
299 | 296 |
300 chunk = _trimchunk(revlog, revs, startrevidx) | 297 chunk = _trimchunk(revlog, revs, startrevidx) |
301 if chunk: | 298 if chunk: |
302 yield chunk | 299 yield chunk |
303 | 300 |
304 def _slicechunktodensity(revlog, revs, targetdensity=0.5, | 301 |
305 mingapsize=0): | 302 def _slicechunktodensity(revlog, revs, targetdensity=0.5, mingapsize=0): |
306 """slice revs to reduce the amount of unrelated data to be read from disk. | 303 """slice revs to reduce the amount of unrelated data to be read from disk. |
307 | 304 |
308 ``revs`` is sliced into groups that should be read in one time. | 305 ``revs`` is sliced into groups that should be read in one time. |
309 Assume that revs are sorted. | 306 Assume that revs are sorted. |
310 | 307 |
425 | 422 |
426 chunk = _trimchunk(revlog, revs, previdx) | 423 chunk = _trimchunk(revlog, revs, previdx) |
427 if chunk: | 424 if chunk: |
428 yield chunk | 425 yield chunk |
429 | 426 |
427 | |
430 def _trimchunk(revlog, revs, startidx, endidx=None): | 428 def _trimchunk(revlog, revs, startidx, endidx=None): |
431 """returns revs[startidx:endidx] without empty trailing revs | 429 """returns revs[startidx:endidx] without empty trailing revs |
432 | 430 |
433 Doctest Setup | 431 Doctest Setup |
434 >>> revlog = _testrevlog([ | 432 >>> revlog = _testrevlog([ |
471 endidx = len(revs) | 469 endidx = len(revs) |
472 | 470 |
473 # If we have a non-emtpy delta candidate, there are nothing to trim | 471 # If we have a non-emtpy delta candidate, there are nothing to trim |
474 if revs[endidx - 1] < len(revlog): | 472 if revs[endidx - 1] < len(revlog): |
475 # Trim empty revs at the end, except the very first revision of a chain | 473 # Trim empty revs at the end, except the very first revision of a chain |
476 while (endidx > 1 | 474 while ( |
477 and endidx > startidx | 475 endidx > 1 and endidx > startidx and length(revs[endidx - 1]) == 0 |
478 and length(revs[endidx - 1]) == 0): | 476 ): |
479 endidx -= 1 | 477 endidx -= 1 |
480 | 478 |
481 return revs[startidx:endidx] | 479 return revs[startidx:endidx] |
480 | |
482 | 481 |
483 def segmentspan(revlog, revs): | 482 def segmentspan(revlog, revs): |
484 """Get the byte span of a segment of revisions | 483 """Get the byte span of a segment of revisions |
485 | 484 |
486 revs is a sorted array of revision numbers | 485 revs is a sorted array of revision numbers |
507 if not revs: | 506 if not revs: |
508 return 0 | 507 return 0 |
509 end = revlog.end(revs[-1]) | 508 end = revlog.end(revs[-1]) |
510 return end - revlog.start(revs[0]) | 509 return end - revlog.start(revs[0]) |
511 | 510 |
511 | |
512 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode): | 512 def _textfromdelta(fh, revlog, baserev, delta, p1, p2, flags, expectednode): |
513 """build full text from a (base, delta) pair and other metadata""" | 513 """build full text from a (base, delta) pair and other metadata""" |
514 # special case deltas which replace entire base; no need to decode | 514 # special case deltas which replace entire base; no need to decode |
515 # base revision. this neatly avoids censored bases, which throw when | 515 # base revision. this neatly avoids censored bases, which throw when |
516 # they're decoded. | 516 # they're decoded. |
517 hlen = struct.calcsize(">lll") | 517 hlen = struct.calcsize(">lll") |
518 if delta[:hlen] == mdiff.replacediffheader(revlog.rawsize(baserev), | 518 if delta[:hlen] == mdiff.replacediffheader( |
519 len(delta) - hlen): | 519 revlog.rawsize(baserev), len(delta) - hlen |
520 ): | |
520 fulltext = delta[hlen:] | 521 fulltext = delta[hlen:] |
521 else: | 522 else: |
522 # deltabase is rawtext before changed by flag processors, which is | 523 # deltabase is rawtext before changed by flag processors, which is |
523 # equivalent to non-raw text | 524 # equivalent to non-raw text |
524 basetext = revlog.revision(baserev, _df=fh, raw=False) | 525 basetext = revlog.revision(baserev, _df=fh, raw=False) |
527 try: | 528 try: |
528 validatehash = flagutil.processflagsraw(revlog, fulltext, flags) | 529 validatehash = flagutil.processflagsraw(revlog, fulltext, flags) |
529 if validatehash: | 530 if validatehash: |
530 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2) | 531 revlog.checkhash(fulltext, expectednode, p1=p1, p2=p2) |
531 if flags & REVIDX_ISCENSORED: | 532 if flags & REVIDX_ISCENSORED: |
532 raise error.StorageError(_('node %s is not censored') % | 533 raise error.StorageError( |
533 expectednode) | 534 _('node %s is not censored') % expectednode |
535 ) | |
534 except error.CensoredNodeError: | 536 except error.CensoredNodeError: |
535 # must pass the censored index flag to add censored revisions | 537 # must pass the censored index flag to add censored revisions |
536 if not flags & REVIDX_ISCENSORED: | 538 if not flags & REVIDX_ISCENSORED: |
537 raise | 539 raise |
538 return fulltext | 540 return fulltext |
541 | |
539 | 542 |
540 @attr.s(slots=True, frozen=True) | 543 @attr.s(slots=True, frozen=True) |
541 class _deltainfo(object): | 544 class _deltainfo(object): |
542 distance = attr.ib() | 545 distance = attr.ib() |
543 deltalen = attr.ib() | 546 deltalen = attr.ib() |
546 chainbase = attr.ib() | 549 chainbase = attr.ib() |
547 chainlen = attr.ib() | 550 chainlen = attr.ib() |
548 compresseddeltalen = attr.ib() | 551 compresseddeltalen = attr.ib() |
549 snapshotdepth = attr.ib() | 552 snapshotdepth = attr.ib() |
550 | 553 |
554 | |
551 def isgooddeltainfo(revlog, deltainfo, revinfo): | 555 def isgooddeltainfo(revlog, deltainfo, revinfo): |
552 """Returns True if the given delta is good. Good means that it is within | 556 """Returns True if the given delta is good. Good means that it is within |
553 the disk span, disk size, and chain length bounds that we know to be | 557 the disk span, disk size, and chain length bounds that we know to be |
554 performant.""" | 558 performant.""" |
555 if deltainfo is None: | 559 if deltainfo is None: |
563 | 567 |
564 textlen = revinfo.textlen | 568 textlen = revinfo.textlen |
565 defaultmax = textlen * 4 | 569 defaultmax = textlen * 4 |
566 maxdist = revlog._maxdeltachainspan | 570 maxdist = revlog._maxdeltachainspan |
567 if not maxdist: | 571 if not maxdist: |
568 maxdist = deltainfo.distance # ensure the conditional pass | 572 maxdist = deltainfo.distance # ensure the conditional pass |
569 maxdist = max(maxdist, defaultmax) | 573 maxdist = max(maxdist, defaultmax) |
570 | 574 |
571 # Bad delta from read span: | 575 # Bad delta from read span: |
572 # | 576 # |
573 # If the span of data read is larger than the maximum allowed. | 577 # If the span of data read is larger than the maximum allowed. |
594 return False | 598 return False |
595 | 599 |
596 # Bad delta from chain length: | 600 # Bad delta from chain length: |
597 # | 601 # |
598 # If the number of delta in the chain gets too high. | 602 # If the number of delta in the chain gets too high. |
599 if (revlog._maxchainlen | 603 if revlog._maxchainlen and revlog._maxchainlen < deltainfo.chainlen: |
600 and revlog._maxchainlen < deltainfo.chainlen): | |
601 return False | 604 return False |
602 | 605 |
603 # bad delta from intermediate snapshot size limit | 606 # bad delta from intermediate snapshot size limit |
604 # | 607 # |
605 # If an intermediate snapshot size is higher than the limit. The | 608 # If an intermediate snapshot size is higher than the limit. The |
606 # limit exist to prevent endless chain of intermediate delta to be | 609 # limit exist to prevent endless chain of intermediate delta to be |
607 # created. | 610 # created. |
608 if (deltainfo.snapshotdepth is not None and | 611 if ( |
609 (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen): | 612 deltainfo.snapshotdepth is not None |
613 and (textlen >> deltainfo.snapshotdepth) < deltainfo.deltalen | |
614 ): | |
610 return False | 615 return False |
611 | 616 |
612 # bad delta if new intermediate snapshot is larger than the previous | 617 # bad delta if new intermediate snapshot is larger than the previous |
613 # snapshot | 618 # snapshot |
614 if (deltainfo.snapshotdepth | 619 if ( |
615 and revlog.length(deltainfo.base) < deltainfo.deltalen): | 620 deltainfo.snapshotdepth |
621 and revlog.length(deltainfo.base) < deltainfo.deltalen | |
622 ): | |
616 return False | 623 return False |
617 | 624 |
618 return True | 625 return True |
626 | |
619 | 627 |
620 # If a revision's full text is that much bigger than a base candidate full | 628 # If a revision's full text is that much bigger than a base candidate full |
621 # text's, it is very unlikely that it will produce a valid delta. We no longer | 629 # text's, it is very unlikely that it will produce a valid delta. We no longer |
622 # consider these candidates. | 630 # consider these candidates. |
623 LIMIT_BASE2TEXT = 500 | 631 LIMIT_BASE2TEXT = 500 |
632 | |
624 | 633 |
625 def _candidategroups(revlog, textlen, p1, p2, cachedelta): | 634 def _candidategroups(revlog, textlen, p1, p2, cachedelta): |
626 """Provides group of revision to be tested as delta base | 635 """Provides group of revision to be tested as delta base |
627 | 636 |
628 This top level function focus on emitting groups with unique and worthwhile | 637 This top level function focus on emitting groups with unique and worthwhile |
647 if temptative is None: | 656 if temptative is None: |
648 break | 657 break |
649 group = [] | 658 group = [] |
650 for rev in temptative: | 659 for rev in temptative: |
651 # skip over empty delta (no need to include them in a chain) | 660 # skip over empty delta (no need to include them in a chain) |
652 while (revlog._generaldelta | 661 while revlog._generaldelta and not ( |
653 and not (rev == nullrev | 662 rev == nullrev or rev in tested or deltalength(rev) |
654 or rev in tested | 663 ): |
655 or deltalength(rev))): | |
656 tested.add(rev) | 664 tested.add(rev) |
657 rev = deltaparent(rev) | 665 rev = deltaparent(rev) |
658 # no need to try a delta against nullrev, this will be done as a | 666 # no need to try a delta against nullrev, this will be done as a |
659 # last resort. | 667 # last resort. |
660 if rev == nullrev: | 668 if rev == nullrev: |
713 # impacting performances. Some bounding or slicing mecanism | 721 # impacting performances. Some bounding or slicing mecanism |
714 # would help to reduce this impact. | 722 # would help to reduce this impact. |
715 good = yield tuple(group) | 723 good = yield tuple(group) |
716 yield None | 724 yield None |
717 | 725 |
726 | |
718 def _findsnapshots(revlog, cache, start_rev): | 727 def _findsnapshots(revlog, cache, start_rev): |
719 """find snapshot from start_rev to tip""" | 728 """find snapshot from start_rev to tip""" |
720 if util.safehasattr(revlog.index, 'findsnapshots'): | 729 if util.safehasattr(revlog.index, 'findsnapshots'): |
721 revlog.index.findsnapshots(cache, start_rev) | 730 revlog.index.findsnapshots(cache, start_rev) |
722 else: | 731 else: |
723 deltaparent = revlog.deltaparent | 732 deltaparent = revlog.deltaparent |
724 issnapshot = revlog.issnapshot | 733 issnapshot = revlog.issnapshot |
725 for rev in revlog.revs(start_rev): | 734 for rev in revlog.revs(start_rev): |
726 if issnapshot(rev): | 735 if issnapshot(rev): |
727 cache[deltaparent(rev)].append(rev) | 736 cache[deltaparent(rev)].append(rev) |
737 | |
728 | 738 |
729 def _refinedgroups(revlog, p1, p2, cachedelta): | 739 def _refinedgroups(revlog, p1, p2, cachedelta): |
730 good = None | 740 good = None |
731 # First we try to reuse a the delta contained in the bundle. | 741 # First we try to reuse a the delta contained in the bundle. |
732 # (or from the source revlog) | 742 # (or from the source revlog) |
771 children = tuple(sorted(c for c in snapshots[good])) | 781 children = tuple(sorted(c for c in snapshots[good])) |
772 good = yield children | 782 good = yield children |
773 | 783 |
774 # we have found nothing | 784 # we have found nothing |
775 yield None | 785 yield None |
786 | |
776 | 787 |
777 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None): | 788 def _rawgroups(revlog, p1, p2, cachedelta, snapshots=None): |
778 """Provides group of revision to be tested as delta base | 789 """Provides group of revision to be tested as delta base |
779 | 790 |
780 This lower level function focus on emitting delta theorically interresting | 791 This lower level function focus on emitting delta theorically interresting |
892 if not sparse: | 903 if not sparse: |
893 # other approach failed try against prev to hopefully save us a | 904 # other approach failed try against prev to hopefully save us a |
894 # fulltext. | 905 # fulltext. |
895 yield (prev,) | 906 yield (prev,) |
896 | 907 |
908 | |
897 class deltacomputer(object): | 909 class deltacomputer(object): |
898 def __init__(self, revlog): | 910 def __init__(self, revlog): |
899 self.revlog = revlog | 911 self.revlog = revlog |
900 | 912 |
901 def buildtext(self, revinfo, fh): | 913 def buildtext(self, revinfo, fh): |
912 revlog = self.revlog | 924 revlog = self.revlog |
913 cachedelta = revinfo.cachedelta | 925 cachedelta = revinfo.cachedelta |
914 baserev = cachedelta[0] | 926 baserev = cachedelta[0] |
915 delta = cachedelta[1] | 927 delta = cachedelta[1] |
916 | 928 |
917 fulltext = btext[0] = _textfromdelta(fh, revlog, baserev, delta, | 929 fulltext = btext[0] = _textfromdelta( |
918 revinfo.p1, revinfo.p2, | 930 fh, |
919 revinfo.flags, revinfo.node) | 931 revlog, |
932 baserev, | |
933 delta, | |
934 revinfo.p1, | |
935 revinfo.p2, | |
936 revinfo.flags, | |
937 revinfo.node, | |
938 ) | |
920 return fulltext | 939 return fulltext |
921 | 940 |
922 def _builddeltadiff(self, base, revinfo, fh): | 941 def _builddeltadiff(self, base, revinfo, fh): |
923 revlog = self.revlog | 942 revlog = self.revlog |
924 t = self.buildtext(revinfo, fh) | 943 t = self.buildtext(revinfo, fh) |
951 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase): | 970 if deltabase not in (p1, p2) and revlog.issnapshot(deltabase): |
952 snapshotdepth = len(revlog._deltachain(deltabase)[0]) | 971 snapshotdepth = len(revlog._deltachain(deltabase)[0]) |
953 delta = None | 972 delta = None |
954 if revinfo.cachedelta: | 973 if revinfo.cachedelta: |
955 cachebase, cachediff = revinfo.cachedelta | 974 cachebase, cachediff = revinfo.cachedelta |
956 #check if the diff still apply | 975 # check if the diff still apply |
957 currentbase = cachebase | 976 currentbase = cachebase |
958 while (currentbase != nullrev | 977 while ( |
959 and currentbase != base | 978 currentbase != nullrev |
960 and self.revlog.length(currentbase) == 0): | 979 and currentbase != base |
980 and self.revlog.length(currentbase) == 0 | |
981 ): | |
961 currentbase = self.revlog.deltaparent(currentbase) | 982 currentbase = self.revlog.deltaparent(currentbase) |
962 if self.revlog._lazydelta and currentbase == base: | 983 if self.revlog._lazydelta and currentbase == base: |
963 delta = revinfo.cachedelta[1] | 984 delta = revinfo.cachedelta[1] |
964 if delta is None: | 985 if delta is None: |
965 delta = self._builddeltadiff(base, revinfo, fh) | 986 delta = self._builddeltadiff(base, revinfo, fh) |
977 dist = deltalen + offset - revlog.start(chainbase) | 998 dist = deltalen + offset - revlog.start(chainbase) |
978 chainlen, compresseddeltalen = revlog._chaininfo(base) | 999 chainlen, compresseddeltalen = revlog._chaininfo(base) |
979 chainlen += 1 | 1000 chainlen += 1 |
980 compresseddeltalen += deltalen | 1001 compresseddeltalen += deltalen |
981 | 1002 |
982 return _deltainfo(dist, deltalen, (header, data), deltabase, | 1003 return _deltainfo( |
983 chainbase, chainlen, compresseddeltalen, | 1004 dist, |
984 snapshotdepth) | 1005 deltalen, |
1006 (header, data), | |
1007 deltabase, | |
1008 chainbase, | |
1009 chainlen, | |
1010 compresseddeltalen, | |
1011 snapshotdepth, | |
1012 ) | |
985 | 1013 |
986 def _fullsnapshotinfo(self, fh, revinfo): | 1014 def _fullsnapshotinfo(self, fh, revinfo): |
987 curr = len(self.revlog) | 1015 curr = len(self.revlog) |
988 rawtext = self.buildtext(revinfo, fh) | 1016 rawtext = self.buildtext(revinfo, fh) |
989 data = self.revlog.compress(rawtext) | 1017 data = self.revlog.compress(rawtext) |
990 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0]) | 1018 compresseddeltalen = deltalen = dist = len(data[1]) + len(data[0]) |
991 deltabase = chainbase = curr | 1019 deltabase = chainbase = curr |
992 snapshotdepth = 0 | 1020 snapshotdepth = 0 |
993 chainlen = 1 | 1021 chainlen = 1 |
994 | 1022 |
995 return _deltainfo(dist, deltalen, data, deltabase, | 1023 return _deltainfo( |
996 chainbase, chainlen, compresseddeltalen, | 1024 dist, |
997 snapshotdepth) | 1025 deltalen, |
1026 data, | |
1027 deltabase, | |
1028 chainbase, | |
1029 chainlen, | |
1030 compresseddeltalen, | |
1031 snapshotdepth, | |
1032 ) | |
998 | 1033 |
999 def finddeltainfo(self, revinfo, fh): | 1034 def finddeltainfo(self, revinfo, fh): |
1000 """Find an acceptable delta against a candidate revision | 1035 """Find an acceptable delta against a candidate revision |
1001 | 1036 |
1002 revinfo: information about the revision (instance of _revisioninfo) | 1037 revinfo: information about the revision (instance of _revisioninfo) |
1023 p2 = revinfo.p2 | 1058 p2 = revinfo.p2 |
1024 revlog = self.revlog | 1059 revlog = self.revlog |
1025 | 1060 |
1026 deltainfo = None | 1061 deltainfo = None |
1027 p1r, p2r = revlog.rev(p1), revlog.rev(p2) | 1062 p1r, p2r = revlog.rev(p1), revlog.rev(p2) |
1028 groups = _candidategroups(self.revlog, revinfo.textlen, | 1063 groups = _candidategroups( |
1029 p1r, p2r, cachedelta) | 1064 self.revlog, revinfo.textlen, p1r, p2r, cachedelta |
1065 ) | |
1030 candidaterevs = next(groups) | 1066 candidaterevs = next(groups) |
1031 while candidaterevs is not None: | 1067 while candidaterevs is not None: |
1032 nominateddeltas = [] | 1068 nominateddeltas = [] |
1033 if deltainfo is not None: | 1069 if deltainfo is not None: |
1034 # if we already found a good delta, | 1070 # if we already found a good delta, |