Mercurial > public > mercurial-scm > hg-stable
comparison mercurial/patch.py @ 43076:2372284d9457
formatting: blacken the codebase
This is using my patch to black
(https://github.com/psf/black/pull/826) so we don't un-wrap collection
literals.
Done with:
hg files 'set:**.py - mercurial/thirdparty/** - "contrib/python-zstandard/**"' | xargs black -S
# skip-blame mass-reformatting only
# no-check-commit reformats foo_bar functions
Differential Revision: https://phab.mercurial-scm.org/D6971
author | Augie Fackler <augie@google.com> |
---|---|
date | Sun, 06 Oct 2019 09:45:02 -0400 |
parents | db33e4f25729 |
children | 687b865b95ad |
comparison
equal
deleted
inserted
replaced
43075:57875cf423c9 | 43076:2372284d9457 |
---|---|
47 | 47 |
48 stringio = util.stringio | 48 stringio = util.stringio |
49 | 49 |
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)') | 50 gitre = re.compile(br'diff --git a/(.*) b/(.*)') |
51 tabsplitter = re.compile(br'(\t+|[^\t]+)') | 51 tabsplitter = re.compile(br'(\t+|[^\t]+)') |
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|' | 52 wordsplitter = re.compile( |
53 b'[^ \ta-zA-Z0-9_\x80-\xff])') | 53 br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|' b'[^ \ta-zA-Z0-9_\x80-\xff])' |
54 ) | |
54 | 55 |
55 PatchError = error.PatchError | 56 PatchError = error.PatchError |
56 | 57 |
57 # public functions | 58 # public functions |
59 | |
58 | 60 |
59 def split(stream): | 61 def split(stream): |
60 '''return an iterator of individual patches from a stream''' | 62 '''return an iterator of individual patches from a stream''' |
63 | |
61 def isheader(line, inheader): | 64 def isheader(line, inheader): |
62 if inheader and line.startswith((' ', '\t')): | 65 if inheader and line.startswith((' ', '\t')): |
63 # continuation | 66 # continuation |
64 return True | 67 return True |
65 if line.startswith((' ', '-', '+')): | 68 if line.startswith((' ', '-', '+')): |
183 # Not enough info, keep reading | 186 # Not enough info, keep reading |
184 | 187 |
185 # if we are here, we have a very plain patch | 188 # if we are here, we have a very plain patch |
186 return remainder(cur) | 189 return remainder(cur) |
187 | 190 |
191 | |
188 ## Some facility for extensible patch parsing: | 192 ## Some facility for extensible patch parsing: |
189 # list of pairs ("header to match", "data key") | 193 # list of pairs ("header to match", "data key") |
190 patchheadermap = [('Date', 'date'), | 194 patchheadermap = [ |
191 ('Branch', 'branch'), | 195 ('Date', 'date'), |
192 ('Node ID', 'nodeid'), | 196 ('Branch', 'branch'), |
193 ] | 197 ('Node ID', 'nodeid'), |
198 ] | |
199 | |
194 | 200 |
195 @contextlib.contextmanager | 201 @contextlib.contextmanager |
196 def extract(ui, fileobj): | 202 def extract(ui, fileobj): |
197 '''extract patch from data read from fileobj. | 203 '''extract patch from data read from fileobj. |
198 | 204 |
216 yield _extract(ui, fileobj, tmpname, tmpfp) | 222 yield _extract(ui, fileobj, tmpname, tmpfp) |
217 finally: | 223 finally: |
218 tmpfp.close() | 224 tmpfp.close() |
219 os.unlink(tmpname) | 225 os.unlink(tmpname) |
220 | 226 |
227 | |
221 def _extract(ui, fileobj, tmpname, tmpfp): | 228 def _extract(ui, fileobj, tmpname, tmpfp): |
222 | 229 |
223 # attempt to detect the start of a patch | 230 # attempt to detect the start of a patch |
224 # (this heuristic is borrowed from quilt) | 231 # (this heuristic is borrowed from quilt) |
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |' | 232 diffre = re.compile( |
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|' | 233 br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |' |
227 br'---[ \t].*?^\+\+\+[ \t]|' | 234 br'retrieving revision [0-9]+(\.[0-9]+)*$|' |
228 br'\*\*\*[ \t].*?^---[ \t])', | 235 br'---[ \t].*?^\+\+\+[ \t]|' |
229 re.MULTILINE | re.DOTALL) | 236 br'\*\*\*[ \t].*?^---[ \t])', |
237 re.MULTILINE | re.DOTALL, | |
238 ) | |
230 | 239 |
231 data = {} | 240 data = {} |
232 | 241 |
233 msg = mail.parse(fileobj) | 242 msg = mail.parse(fileobj) |
234 | 243 |
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject']) | 244 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject']) |
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From']) | 245 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From']) |
237 if not subject and not data['user']: | 246 if not subject and not data['user']: |
238 # Not an email, restore parsed headers if any | 247 # Not an email, restore parsed headers if any |
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h)) | 248 subject = ( |
240 for h in msg.items()) + '\n' | 249 '\n'.join( |
250 ': '.join(map(encoding.strtolocal, h)) for h in msg.items() | |
251 ) | |
252 + '\n' | |
253 ) | |
241 | 254 |
242 # should try to parse msg['Date'] | 255 # should try to parse msg['Date'] |
243 parents = [] | 256 parents = [] |
244 | 257 |
245 if subject: | 258 if subject: |
246 if subject.startswith('[PATCH'): | 259 if subject.startswith('[PATCH'): |
247 pend = subject.find(']') | 260 pend = subject.find(']') |
248 if pend >= 0: | 261 if pend >= 0: |
249 subject = subject[pend + 1:].lstrip() | 262 subject = subject[pend + 1 :].lstrip() |
250 subject = re.sub(br'\n[ \t]+', ' ', subject) | 263 subject = re.sub(br'\n[ \t]+', ' ', subject) |
251 ui.debug('Subject: %s\n' % subject) | 264 ui.debug('Subject: %s\n' % subject) |
252 if data['user']: | 265 if data['user']: |
253 ui.debug('From: %s\n' % data['user']) | 266 ui.debug('From: %s\n' % data['user']) |
254 diffs_seen = 0 | 267 diffs_seen = 0 |
267 ignoretext = False | 280 ignoretext = False |
268 | 281 |
269 ui.debug('found patch at byte %d\n' % m.start(0)) | 282 ui.debug('found patch at byte %d\n' % m.start(0)) |
270 diffs_seen += 1 | 283 diffs_seen += 1 |
271 cfp = stringio() | 284 cfp = stringio() |
272 for line in payload[:m.start(0)].splitlines(): | 285 for line in payload[: m.start(0)].splitlines(): |
273 if line.startswith('# HG changeset patch') and not hgpatch: | 286 if line.startswith('# HG changeset patch') and not hgpatch: |
274 ui.debug('patch generated by hg export\n') | 287 ui.debug('patch generated by hg export\n') |
275 hgpatch = True | 288 hgpatch = True |
276 hgpatchheader = True | 289 hgpatchheader = True |
277 # drop earlier commit message content | 290 # drop earlier commit message content |
286 parents.append(line[9:].lstrip()) | 299 parents.append(line[9:].lstrip()) |
287 elif line.startswith("# "): | 300 elif line.startswith("# "): |
288 for header, key in patchheadermap: | 301 for header, key in patchheadermap: |
289 prefix = '# %s ' % header | 302 prefix = '# %s ' % header |
290 if line.startswith(prefix): | 303 if line.startswith(prefix): |
291 data[key] = line[len(prefix):] | 304 data[key] = line[len(prefix) :] |
292 ui.debug('%s: %s\n' % (header, data[key])) | 305 ui.debug('%s: %s\n' % (header, data[key])) |
293 else: | 306 else: |
294 hgpatchheader = False | 307 hgpatchheader = False |
295 elif line == '---': | 308 elif line == '---': |
296 ignoretext = True | 309 ignoretext = True |
317 if diffs_seen: | 330 if diffs_seen: |
318 data['filename'] = tmpname | 331 data['filename'] = tmpname |
319 | 332 |
320 return data | 333 return data |
321 | 334 |
335 | |
322 class patchmeta(object): | 336 class patchmeta(object): |
323 """Patched file metadata | 337 """Patched file metadata |
324 | 338 |
325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY | 339 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY |
326 or COPY. 'path' is patched file path. 'oldpath' is set to the | 340 or COPY. 'path' is patched file path. 'oldpath' is set to the |
327 origin file when 'op' is either COPY or RENAME, None otherwise. If | 341 origin file when 'op' is either COPY or RENAME, None otherwise. If |
328 file mode is changed, 'mode' is a tuple (islink, isexec) where | 342 file mode is changed, 'mode' is a tuple (islink, isexec) where |
329 'islink' is True if the file is a symlink and 'isexec' is True if | 343 'islink' is True if the file is a symlink and 'isexec' is True if |
330 the file is executable. Otherwise, 'mode' is None. | 344 the file is executable. Otherwise, 'mode' is None. |
331 """ | 345 """ |
346 | |
332 def __init__(self, path): | 347 def __init__(self, path): |
333 self.path = path | 348 self.path = path |
334 self.oldpath = None | 349 self.oldpath = None |
335 self.mode = None | 350 self.mode = None |
336 self.op = 'MODIFY' | 351 self.op = 'MODIFY' |
362 def ispatching(self, afile, bfile): | 377 def ispatching(self, afile, bfile): |
363 return self._ispatchinga(afile) and self._ispatchingb(bfile) | 378 return self._ispatchinga(afile) and self._ispatchingb(bfile) |
364 | 379 |
365 def __repr__(self): | 380 def __repr__(self): |
366 return r"<patchmeta %s %r>" % (self.op, self.path) | 381 return r"<patchmeta %s %r>" % (self.op, self.path) |
382 | |
367 | 383 |
368 def readgitpatch(lr): | 384 def readgitpatch(lr): |
369 """extract git-style metadata about patches from <patchname>""" | 385 """extract git-style metadata about patches from <patchname>""" |
370 | 386 |
371 # Filter patch for git information | 387 # Filter patch for git information |
407 if gp: | 423 if gp: |
408 gitpatches.append(gp) | 424 gitpatches.append(gp) |
409 | 425 |
410 return gitpatches | 426 return gitpatches |
411 | 427 |
428 | |
412 class linereader(object): | 429 class linereader(object): |
413 # simple class to allow pushing lines back into the input stream | 430 # simple class to allow pushing lines back into the input stream |
414 def __init__(self, fp): | 431 def __init__(self, fp): |
415 self.fp = fp | 432 self.fp = fp |
416 self.buf = [] | 433 self.buf = [] |
426 return l | 443 return l |
427 return self.fp.readline() | 444 return self.fp.readline() |
428 | 445 |
429 def __iter__(self): | 446 def __iter__(self): |
430 return iter(self.readline, '') | 447 return iter(self.readline, '') |
448 | |
431 | 449 |
432 class abstractbackend(object): | 450 class abstractbackend(object): |
433 def __init__(self, ui): | 451 def __init__(self, ui): |
434 self.ui = ui | 452 self.ui = ui |
435 | 453 |
460 def exists(self, fname): | 478 def exists(self, fname): |
461 raise NotImplementedError | 479 raise NotImplementedError |
462 | 480 |
463 def close(self): | 481 def close(self): |
464 raise NotImplementedError | 482 raise NotImplementedError |
483 | |
465 | 484 |
466 class fsbackend(abstractbackend): | 485 class fsbackend(abstractbackend): |
467 def __init__(self, ui, basedir): | 486 def __init__(self, ui, basedir): |
468 super(fsbackend, self).__init__(ui) | 487 super(fsbackend, self).__init__(ui) |
469 self.opener = vfsmod.vfs(basedir) | 488 self.opener = vfsmod.vfs(basedir) |
502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir) | 521 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir) |
503 | 522 |
504 def writerej(self, fname, failed, total, lines): | 523 def writerej(self, fname, failed, total, lines): |
505 fname = fname + ".rej" | 524 fname = fname + ".rej" |
506 self.ui.warn( | 525 self.ui.warn( |
507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") % | 526 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") |
508 (failed, total, fname)) | 527 % (failed, total, fname) |
528 ) | |
509 fp = self.opener(fname, 'w') | 529 fp = self.opener(fname, 'w') |
510 fp.writelines(lines) | 530 fp.writelines(lines) |
511 fp.close() | 531 fp.close() |
512 | 532 |
513 def exists(self, fname): | 533 def exists(self, fname): |
514 return self.opener.lexists(fname) | 534 return self.opener.lexists(fname) |
535 | |
515 | 536 |
516 class workingbackend(fsbackend): | 537 class workingbackend(fsbackend): |
517 def __init__(self, ui, repo, similarity): | 538 def __init__(self, ui, repo, similarity): |
518 super(workingbackend, self).__init__(ui, repo.root) | 539 super(workingbackend, self).__init__(ui, repo.root) |
519 self.repo = repo | 540 self.repo = repo |
555 changed.discard(f) | 576 changed.discard(f) |
556 if changed: | 577 if changed: |
557 scmutil.marktouched(self.repo, changed, self.similarity) | 578 scmutil.marktouched(self.repo, changed, self.similarity) |
558 return sorted(self.changed) | 579 return sorted(self.changed) |
559 | 580 |
581 | |
560 class filestore(object): | 582 class filestore(object): |
561 def __init__(self, maxsize=None): | 583 def __init__(self, maxsize=None): |
562 self.opener = None | 584 self.opener = None |
563 self.files = {} | 585 self.files = {} |
564 self.created = 0 | 586 self.created = 0 |
565 self.maxsize = maxsize | 587 self.maxsize = maxsize |
566 if self.maxsize is None: | 588 if self.maxsize is None: |
567 self.maxsize = 4*(2**20) | 589 self.maxsize = 4 * (2 ** 20) |
568 self.size = 0 | 590 self.size = 0 |
569 self.data = {} | 591 self.data = {} |
570 | 592 |
571 def setfile(self, fname, data, mode, copied=None): | 593 def setfile(self, fname, data, mode, copied=None): |
572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: | 594 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize: |
592 | 614 |
593 def close(self): | 615 def close(self): |
594 if self.opener: | 616 if self.opener: |
595 shutil.rmtree(self.opener.base) | 617 shutil.rmtree(self.opener.base) |
596 | 618 |
619 | |
597 class repobackend(abstractbackend): | 620 class repobackend(abstractbackend): |
598 def __init__(self, ui, repo, ctx, store): | 621 def __init__(self, ui, repo, ctx, store): |
599 super(repobackend, self).__init__(ui) | 622 super(repobackend, self).__init__(ui) |
600 self.repo = repo | 623 self.repo = repo |
601 self.ctx = ctx | 624 self.ctx = ctx |
634 return fname in self.ctx | 657 return fname in self.ctx |
635 | 658 |
636 def close(self): | 659 def close(self): |
637 return self.changed | self.removed | 660 return self.changed | self.removed |
638 | 661 |
662 | |
639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 | 663 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1 |
640 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') | 664 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@') |
641 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') | 665 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)') |
642 eolmodes = ['strict', 'crlf', 'lf', 'auto'] | 666 eolmodes = ['strict', 'crlf', 'lf', 'auto'] |
667 | |
643 | 668 |
644 class patchfile(object): | 669 class patchfile(object): |
645 def __init__(self, ui, gp, backend, store, eolmode='strict'): | 670 def __init__(self, ui, gp, backend, store, eolmode='strict'): |
646 self.fname = gp.path | 671 self.fname = gp.path |
647 self.eolmode = eolmode | 672 self.eolmode = eolmode |
684 self.missing = False | 709 self.missing = False |
685 if self.mode is None: | 710 if self.mode is None: |
686 self.mode = (False, False) | 711 self.mode = (False, False) |
687 if self.missing: | 712 if self.missing: |
688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) | 713 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname) |
689 self.ui.warn(_("(use '--prefix' to apply patch relative to the " | 714 self.ui.warn( |
690 "current directory)\n")) | 715 _( |
716 "(use '--prefix' to apply patch relative to the " | |
717 "current directory)\n" | |
718 ) | |
719 ) | |
691 | 720 |
692 self.hash = {} | 721 self.hash = {} |
693 self.dirty = 0 | 722 self.dirty = 0 |
694 self.offset = 0 | 723 self.offset = 0 |
695 self.skew = 0 | 724 self.skew = 0 |
724 s = _("patching file %s\n") % self.fname | 753 s = _("patching file %s\n") % self.fname |
725 if warn: | 754 if warn: |
726 self.ui.warn(s) | 755 self.ui.warn(s) |
727 else: | 756 else: |
728 self.ui.note(s) | 757 self.ui.note(s) |
729 | |
730 | 758 |
731 def findlines(self, l, linenum): | 759 def findlines(self, l, linenum): |
732 # looks through the hash and finds candidate lines. The | 760 # looks through the hash and finds candidate lines. The |
733 # result is a list of line numbers sorted based on distance | 761 # result is a list of line numbers sorted based on distance |
734 # from linenum | 762 # from linenum |
755 lines.append("\n\\ No newline at end of file\n") | 783 lines.append("\n\\ No newline at end of file\n") |
756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines) | 784 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines) |
757 | 785 |
758 def apply(self, h): | 786 def apply(self, h): |
759 if not h.complete(): | 787 if not h.complete(): |
760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") % | 788 raise PatchError( |
761 (h.number, h.desc, len(h.a), h.lena, len(h.b), | 789 _("bad hunk #%d %s (%d %d %d %d)") |
762 h.lenb)) | 790 % (h.number, h.desc, len(h.a), h.lena, len(h.b), h.lenb) |
791 ) | |
763 | 792 |
764 self.hunks += 1 | 793 self.hunks += 1 |
765 | 794 |
766 if self.missing: | 795 if self.missing: |
767 self.rej.append(h) | 796 self.rej.append(h) |
768 return -1 | 797 return -1 |
769 | 798 |
770 if self.exists and self.create: | 799 if self.exists and self.create: |
771 if self.copysource: | 800 if self.copysource: |
772 self.ui.warn(_("cannot create %s: destination already " | 801 self.ui.warn( |
773 "exists\n") % self.fname) | 802 _("cannot create %s: destination already " "exists\n") |
803 % self.fname | |
804 ) | |
774 else: | 805 else: |
775 self.ui.warn(_("file %s already exists\n") % self.fname) | 806 self.ui.warn(_("file %s already exists\n") % self.fname) |
776 self.rej.append(h) | 807 self.rej.append(h) |
777 return -1 | 808 return -1 |
778 | 809 |
785 self.offset += len(l) | 816 self.offset += len(l) |
786 self.dirty = True | 817 self.dirty = True |
787 return 0 | 818 return 0 |
788 | 819 |
789 horig = h | 820 horig = h |
790 if (self.eolmode in ('crlf', 'lf') | 821 if ( |
791 or self.eolmode == 'auto' and self.eol): | 822 self.eolmode in ('crlf', 'lf') |
823 or self.eolmode == 'auto' | |
824 and self.eol | |
825 ): | |
792 # If new eols are going to be normalized, then normalize | 826 # If new eols are going to be normalized, then normalize |
793 # hunk data before patching. Otherwise, preserve input | 827 # hunk data before patching. Otherwise, preserve input |
794 # line-endings. | 828 # line-endings. |
795 h = h.getnormalized() | 829 h = h.getnormalized() |
796 | 830 |
803 # fast case code | 837 # fast case code |
804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart): | 838 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart): |
805 if self.remove: | 839 if self.remove: |
806 self.backend.unlink(self.fname) | 840 self.backend.unlink(self.fname) |
807 else: | 841 else: |
808 self.lines[oldstart:oldstart + len(old)] = new | 842 self.lines[oldstart : oldstart + len(old)] = new |
809 self.offset += len(new) - len(old) | 843 self.offset += len(new) - len(old) |
810 self.dirty = True | 844 self.dirty = True |
811 return 0 | 845 return 0 |
812 | 846 |
813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it | 847 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it |
833 self.offset += len(new) - len(old) | 867 self.offset += len(new) - len(old) |
834 self.skew = l - orig_start | 868 self.skew = l - orig_start |
835 self.dirty = True | 869 self.dirty = True |
836 offset = l - orig_start - fuzzlen | 870 offset = l - orig_start - fuzzlen |
837 if fuzzlen: | 871 if fuzzlen: |
838 msg = _("Hunk #%d succeeded at %d " | 872 msg = _( |
839 "with fuzz %d " | 873 "Hunk #%d succeeded at %d " |
840 "(offset %d lines).\n") | 874 "with fuzz %d " |
875 "(offset %d lines).\n" | |
876 ) | |
841 self.printfile(True) | 877 self.printfile(True) |
842 self.ui.warn(msg % | 878 self.ui.warn( |
843 (h.number, l + 1, fuzzlen, offset)) | 879 msg % (h.number, l + 1, fuzzlen, offset) |
880 ) | |
844 else: | 881 else: |
845 msg = _("Hunk #%d succeeded at %d " | 882 msg = _( |
846 "(offset %d lines).\n") | 883 "Hunk #%d succeeded at %d " |
884 "(offset %d lines).\n" | |
885 ) | |
847 self.ui.note(msg % (h.number, l + 1, offset)) | 886 self.ui.note(msg % (h.number, l + 1, offset)) |
848 return fuzzlen | 887 return fuzzlen |
849 self.printfile(True) | 888 self.printfile(True) |
850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) | 889 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start)) |
851 self.rej.append(horig) | 890 self.rej.append(horig) |
855 if self.dirty: | 894 if self.dirty: |
856 self.writelines(self.fname, self.lines, self.mode) | 895 self.writelines(self.fname, self.lines, self.mode) |
857 self.write_rej() | 896 self.write_rej() |
858 return len(self.rej) | 897 return len(self.rej) |
859 | 898 |
899 | |
860 class header(object): | 900 class header(object): |
861 """patch header | 901 """patch header |
862 """ | 902 """ |
903 | |
863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$') | 904 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$') |
864 diff_re = re.compile('diff -r .* (.*)$') | 905 diff_re = re.compile('diff -r .* (.*)$') |
865 allhunks_re = re.compile('(?:index|deleted file) ') | 906 allhunks_re = re.compile('(?:index|deleted file) ') |
866 pretty_re = re.compile('(?:new file|deleted file) ') | 907 pretty_re = re.compile('(?:new file|deleted file) ') |
867 special_re = re.compile('(?:index|deleted|copy|rename|new mode) ') | 908 special_re = re.compile('(?:index|deleted|copy|rename|new mode) ') |
883 fp.write(h) | 924 fp.write(h) |
884 if self.binary(): | 925 if self.binary(): |
885 fp.write(_('this is a binary file\n')) | 926 fp.write(_('this is a binary file\n')) |
886 break | 927 break |
887 if h.startswith('---'): | 928 if h.startswith('---'): |
888 fp.write(_('%d hunks, %d lines changed\n') % | 929 fp.write( |
889 (len(self.hunks), | 930 _('%d hunks, %d lines changed\n') |
890 sum([max(h.added, h.removed) for h in self.hunks]))) | 931 % ( |
932 len(self.hunks), | |
933 sum([max(h.added, h.removed) for h in self.hunks]), | |
934 ) | |
935 ) | |
891 break | 936 break |
892 fp.write(h) | 937 fp.write(h) |
893 | 938 |
894 def write(self, fp): | 939 def write(self, fp): |
895 fp.write(''.join(self.header)) | 940 fp.write(''.join(self.header)) |
924 # cannot take some of it. | 969 # cannot take some of it. |
925 # Newly added files are special if they are empty, they are not special | 970 # Newly added files are special if they are empty, they are not special |
926 # if they have some content as we want to be able to change it | 971 # if they have some content as we want to be able to change it |
927 nocontent = len(self.header) == 2 | 972 nocontent = len(self.header) == 2 |
928 emptynewfile = self.isnewfile() and nocontent | 973 emptynewfile = self.isnewfile() and nocontent |
929 return (emptynewfile | 974 return emptynewfile or any( |
930 or any(self.special_re.match(h) for h in self.header)) | 975 self.special_re.match(h) for h in self.header |
976 ) | |
977 | |
931 | 978 |
932 class recordhunk(object): | 979 class recordhunk(object): |
933 """patch hunk | 980 """patch hunk |
934 | 981 |
935 XXX shouldn't we merge this with the other hunk class? | 982 XXX shouldn't we merge this with the other hunk class? |
936 """ | 983 """ |
937 | 984 |
938 def __init__(self, header, fromline, toline, proc, before, hunk, after, | 985 def __init__( |
939 maxcontext=None): | 986 self, |
987 header, | |
988 fromline, | |
989 toline, | |
990 proc, | |
991 before, | |
992 hunk, | |
993 after, | |
994 maxcontext=None, | |
995 ): | |
940 def trimcontext(lines, reverse=False): | 996 def trimcontext(lines, reverse=False): |
941 if maxcontext is not None: | 997 if maxcontext is not None: |
942 delta = len(lines) - maxcontext | 998 delta = len(lines) - maxcontext |
943 if delta > 0: | 999 if delta > 0: |
944 if reverse: | 1000 if reverse: |
958 | 1014 |
959 def __eq__(self, v): | 1015 def __eq__(self, v): |
960 if not isinstance(v, recordhunk): | 1016 if not isinstance(v, recordhunk): |
961 return False | 1017 return False |
962 | 1018 |
963 return ((v.hunk == self.hunk) and | 1019 return ( |
964 (v.proc == self.proc) and | 1020 (v.hunk == self.hunk) |
965 (self.fromline == v.fromline) and | 1021 and (v.proc == self.proc) |
966 (self.header.files() == v.header.files())) | 1022 and (self.fromline == v.fromline) |
1023 and (self.header.files() == v.header.files()) | |
1024 ) | |
967 | 1025 |
968 def __hash__(self): | 1026 def __hash__(self): |
969 return hash((tuple(self.hunk), | 1027 return hash( |
970 tuple(self.header.files()), | 1028 ( |
971 self.fromline, | 1029 tuple(self.hunk), |
972 self.proc)) | 1030 tuple(self.header.files()), |
1031 self.fromline, | |
1032 self.proc, | |
1033 ) | |
1034 ) | |
973 | 1035 |
974 def countchanges(self, hunk): | 1036 def countchanges(self, hunk): |
975 """hunk -> (n+,n-)""" | 1037 """hunk -> (n+,n-)""" |
976 add = len([h for h in hunk if h.startswith('+')]) | 1038 add = len([h for h in hunk if h.startswith('+')]) |
977 rem = len([h for h in hunk if h.startswith('-')]) | 1039 rem = len([h for h in hunk if h.startswith('-')]) |
984 that, swap fromline/toline and +/- signs while keep other things | 1046 that, swap fromline/toline and +/- signs while keep other things |
985 unchanged. | 1047 unchanged. |
986 """ | 1048 """ |
987 m = {'+': '-', '-': '+', '\\': '\\'} | 1049 m = {'+': '-', '-': '+', '\\': '\\'} |
988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk] | 1050 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk] |
989 return recordhunk(self.header, self.toline, self.fromline, self.proc, | 1051 return recordhunk( |
990 self.before, hunk, self.after) | 1052 self.header, |
1053 self.toline, | |
1054 self.fromline, | |
1055 self.proc, | |
1056 self.before, | |
1057 hunk, | |
1058 self.after, | |
1059 ) | |
991 | 1060 |
992 def write(self, fp): | 1061 def write(self, fp): |
993 delta = len(self.before) + len(self.after) | 1062 delta = len(self.before) + len(self.after) |
994 if self.after and self.after[-1] == '\\ No newline at end of file\n': | 1063 if self.after and self.after[-1] == '\\ No newline at end of file\n': |
995 delta -= 1 | 1064 delta -= 1 |
996 fromlen = delta + self.removed | 1065 fromlen = delta + self.removed |
997 tolen = delta + self.added | 1066 tolen = delta + self.added |
998 fp.write('@@ -%d,%d +%d,%d @@%s\n' % | 1067 fp.write( |
999 (self.fromline, fromlen, self.toline, tolen, | 1068 '@@ -%d,%d +%d,%d @@%s\n' |
1000 self.proc and (' ' + self.proc))) | 1069 % ( |
1070 self.fromline, | |
1071 fromlen, | |
1072 self.toline, | |
1073 tolen, | |
1074 self.proc and (' ' + self.proc), | |
1075 ) | |
1076 ) | |
1001 fp.write(''.join(self.before + self.hunk + self.after)) | 1077 fp.write(''.join(self.before + self.hunk + self.after)) |
1002 | 1078 |
1003 pretty = write | 1079 pretty = write |
1004 | 1080 |
1005 def filename(self): | 1081 def filename(self): |
1006 return self.header.filename() | 1082 return self.header.filename() |
1007 | 1083 |
1008 def __repr__(self): | 1084 def __repr__(self): |
1009 return '<hunk %r@%d>' % (self.filename(), self.fromline) | 1085 return '<hunk %r@%d>' % (self.filename(), self.fromline) |
1086 | |
1010 | 1087 |
1011 def getmessages(): | 1088 def getmessages(): |
1012 return { | 1089 return { |
1013 'multiple': { | 1090 'multiple': { |
1014 'apply': _("apply change %d/%d to '%s'?"), | 1091 'apply': _("apply change %d/%d to '%s'?"), |
1021 'discard': _("discard this change to '%s'?"), | 1098 'discard': _("discard this change to '%s'?"), |
1022 'keep': _("keep this change to '%s'?"), | 1099 'keep': _("keep this change to '%s'?"), |
1023 'record': _("record this change to '%s'?"), | 1100 'record': _("record this change to '%s'?"), |
1024 }, | 1101 }, |
1025 'help': { | 1102 'help': { |
1026 'apply': _('[Ynesfdaq?]' | 1103 'apply': _( |
1027 '$$ &Yes, apply this change' | 1104 '[Ynesfdaq?]' |
1028 '$$ &No, skip this change' | 1105 '$$ &Yes, apply this change' |
1029 '$$ &Edit this change manually' | 1106 '$$ &No, skip this change' |
1030 '$$ &Skip remaining changes to this file' | 1107 '$$ &Edit this change manually' |
1031 '$$ Apply remaining changes to this &file' | 1108 '$$ &Skip remaining changes to this file' |
1032 '$$ &Done, skip remaining changes and files' | 1109 '$$ Apply remaining changes to this &file' |
1033 '$$ Apply &all changes to all remaining files' | 1110 '$$ &Done, skip remaining changes and files' |
1034 '$$ &Quit, applying no changes' | 1111 '$$ Apply &all changes to all remaining files' |
1035 '$$ &? (display help)'), | 1112 '$$ &Quit, applying no changes' |
1036 'discard': _('[Ynesfdaq?]' | 1113 '$$ &? (display help)' |
1037 '$$ &Yes, discard this change' | 1114 ), |
1038 '$$ &No, skip this change' | 1115 'discard': _( |
1039 '$$ &Edit this change manually' | 1116 '[Ynesfdaq?]' |
1040 '$$ &Skip remaining changes to this file' | 1117 '$$ &Yes, discard this change' |
1041 '$$ Discard remaining changes to this &file' | 1118 '$$ &No, skip this change' |
1042 '$$ &Done, skip remaining changes and files' | 1119 '$$ &Edit this change manually' |
1043 '$$ Discard &all changes to all remaining files' | 1120 '$$ &Skip remaining changes to this file' |
1044 '$$ &Quit, discarding no changes' | 1121 '$$ Discard remaining changes to this &file' |
1045 '$$ &? (display help)'), | 1122 '$$ &Done, skip remaining changes and files' |
1046 'keep': _('[Ynesfdaq?]' | 1123 '$$ Discard &all changes to all remaining files' |
1047 '$$ &Yes, keep this change' | 1124 '$$ &Quit, discarding no changes' |
1048 '$$ &No, skip this change' | 1125 '$$ &? (display help)' |
1049 '$$ &Edit this change manually' | 1126 ), |
1050 '$$ &Skip remaining changes to this file' | 1127 'keep': _( |
1051 '$$ Keep remaining changes to this &file' | 1128 '[Ynesfdaq?]' |
1052 '$$ &Done, skip remaining changes and files' | 1129 '$$ &Yes, keep this change' |
1053 '$$ Keep &all changes to all remaining files' | 1130 '$$ &No, skip this change' |
1054 '$$ &Quit, keeping all changes' | 1131 '$$ &Edit this change manually' |
1055 '$$ &? (display help)'), | 1132 '$$ &Skip remaining changes to this file' |
1056 'record': _('[Ynesfdaq?]' | 1133 '$$ Keep remaining changes to this &file' |
1057 '$$ &Yes, record this change' | 1134 '$$ &Done, skip remaining changes and files' |
1058 '$$ &No, skip this change' | 1135 '$$ Keep &all changes to all remaining files' |
1059 '$$ &Edit this change manually' | 1136 '$$ &Quit, keeping all changes' |
1060 '$$ &Skip remaining changes to this file' | 1137 '$$ &? (display help)' |
1061 '$$ Record remaining changes to this &file' | 1138 ), |
1062 '$$ &Done, skip remaining changes and files' | 1139 'record': _( |
1063 '$$ Record &all changes to all remaining files' | 1140 '[Ynesfdaq?]' |
1064 '$$ &Quit, recording no changes' | 1141 '$$ &Yes, record this change' |
1065 '$$ &? (display help)'), | 1142 '$$ &No, skip this change' |
1066 } | 1143 '$$ &Edit this change manually' |
1144 '$$ &Skip remaining changes to this file' | |
1145 '$$ Record remaining changes to this &file' | |
1146 '$$ &Done, skip remaining changes and files' | |
1147 '$$ Record &all changes to all remaining files' | |
1148 '$$ &Quit, recording no changes' | |
1149 '$$ &? (display help)' | |
1150 ), | |
1151 }, | |
1067 } | 1152 } |
1153 | |
1068 | 1154 |
1069 def filterpatch(ui, headers, match, operation=None): | 1155 def filterpatch(ui, headers, match, operation=None): |
1070 """Interactively filter patch chunks into applied-only chunks""" | 1156 """Interactively filter patch chunks into applied-only chunks""" |
1071 messages = getmessages() | 1157 messages = getmessages() |
1072 | 1158 |
1092 resps = messages['help'][operation] | 1178 resps = messages['help'][operation] |
1093 # IMPORTANT: keep the last line of this prompt short (<40 english | 1179 # IMPORTANT: keep the last line of this prompt short (<40 english |
1094 # chars is a good target) because of issue6158. | 1180 # chars is a good target) because of issue6158. |
1095 r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps)) | 1181 r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps)) |
1096 ui.write("\n") | 1182 ui.write("\n") |
1097 if r == 8: # ? | 1183 if r == 8: # ? |
1098 for c, t in ui.extractchoices(resps)[1]: | 1184 for c, t in ui.extractchoices(resps)[1]: |
1099 ui.write('%s - %s\n' % (c, encoding.lower(t))) | 1185 ui.write('%s - %s\n' % (c, encoding.lower(t))) |
1100 continue | 1186 continue |
1101 elif r == 0: # yes | 1187 elif r == 0: # yes |
1102 ret = True | 1188 ret = True |
1103 elif r == 1: # no | 1189 elif r == 1: # no |
1104 ret = False | 1190 ret = False |
1105 elif r == 2: # Edit patch | 1191 elif r == 2: # Edit patch |
1106 if chunk is None: | 1192 if chunk is None: |
1107 ui.write(_('cannot edit patch for whole file')) | 1193 ui.write(_('cannot edit patch for whole file')) |
1108 ui.write("\n") | 1194 ui.write("\n") |
1109 continue | 1195 continue |
1110 if chunk.header.binary(): | 1196 if chunk.header.binary(): |
1111 ui.write(_('cannot edit patch for binary file')) | 1197 ui.write(_('cannot edit patch for binary file')) |
1112 ui.write("\n") | 1198 ui.write("\n") |
1113 continue | 1199 continue |
1114 # Patch comment based on the Git one (based on comment at end of | 1200 # Patch comment based on the Git one (based on comment at end of |
1115 # https://mercurial-scm.org/wiki/RecordExtension) | 1201 # https://mercurial-scm.org/wiki/RecordExtension) |
1116 phelp = '---' + _(""" | 1202 phelp = '---' + _( |
1203 """ | |
1117 To remove '-' lines, make them ' ' lines (context). | 1204 To remove '-' lines, make them ' ' lines (context). |
1118 To remove '+' lines, delete them. | 1205 To remove '+' lines, delete them. |
1119 Lines starting with # will be removed from the patch. | 1206 Lines starting with # will be removed from the patch. |
1120 | 1207 |
1121 If the patch applies cleanly, the edited hunk will immediately be | 1208 If the patch applies cleanly, the edited hunk will immediately be |
1122 added to the record list. If it does not apply cleanly, a rejects | 1209 added to the record list. If it does not apply cleanly, a rejects |
1123 file will be generated: you can use that when you try again. If | 1210 file will be generated: you can use that when you try again. If |
1124 all lines of the hunk are removed, then the edit is aborted and | 1211 all lines of the hunk are removed, then the edit is aborted and |
1125 the hunk is left unchanged. | 1212 the hunk is left unchanged. |
1126 """) | 1213 """ |
1127 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-", | 1214 ) |
1128 suffix=".diff") | 1215 (patchfd, patchfn) = pycompat.mkstemp( |
1216 prefix="hg-editor-", suffix=".diff" | |
1217 ) | |
1129 ncpatchfp = None | 1218 ncpatchfp = None |
1130 try: | 1219 try: |
1131 # Write the initial patch | 1220 # Write the initial patch |
1132 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb')) | 1221 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb')) |
1133 chunk.header.write(f) | 1222 chunk.header.write(f) |
1134 chunk.write(f) | 1223 chunk.write(f) |
1135 f.write(''.join(['# ' + i + '\n' | 1224 f.write( |
1136 for i in phelp.splitlines()])) | 1225 ''.join(['# ' + i + '\n' for i in phelp.splitlines()]) |
1226 ) | |
1137 f.close() | 1227 f.close() |
1138 # Start the editor and wait for it to complete | 1228 # Start the editor and wait for it to complete |
1139 editor = ui.geteditor() | 1229 editor = ui.geteditor() |
1140 ret = ui.system("%s \"%s\"" % (editor, patchfn), | 1230 ret = ui.system( |
1141 environ={'HGUSER': ui.username()}, | 1231 "%s \"%s\"" % (editor, patchfn), |
1142 blockedtag='filterpatch') | 1232 environ={'HGUSER': ui.username()}, |
1233 blockedtag='filterpatch', | |
1234 ) | |
1143 if ret != 0: | 1235 if ret != 0: |
1144 ui.warn(_("editor exited with exit code %d\n") % ret) | 1236 ui.warn(_("editor exited with exit code %d\n") % ret) |
1145 continue | 1237 continue |
1146 # Remove comment lines | 1238 # Remove comment lines |
1147 patchfp = open(patchfn, r'rb') | 1239 patchfp = open(patchfn, r'rb') |
1157 os.unlink(patchfn) | 1249 os.unlink(patchfn) |
1158 del ncpatchfp | 1250 del ncpatchfp |
1159 # Signal that the chunk shouldn't be applied as-is, but | 1251 # Signal that the chunk shouldn't be applied as-is, but |
1160 # provide the new patch to be used instead. | 1252 # provide the new patch to be used instead. |
1161 ret = False | 1253 ret = False |
1162 elif r == 3: # Skip | 1254 elif r == 3: # Skip |
1163 ret = skipfile = False | 1255 ret = skipfile = False |
1164 elif r == 4: # file (Record remaining) | 1256 elif r == 4: # file (Record remaining) |
1165 ret = skipfile = True | 1257 ret = skipfile = True |
1166 elif r == 5: # done, skip remaining | 1258 elif r == 5: # done, skip remaining |
1167 ret = skipall = False | 1259 ret = skipall = False |
1168 elif r == 6: # all | 1260 elif r == 6: # all |
1169 ret = skipall = True | 1261 ret = skipall = True |
1170 elif r == 7: # quit | 1262 elif r == 7: # quit |
1171 raise error.Abort(_('user quit')) | 1263 raise error.Abort(_('user quit')) |
1172 return ret, skipfile, skipall, newpatches | 1264 return ret, skipfile, skipall, newpatches |
1173 | 1265 |
1174 seen = set() | 1266 seen = set() |
1175 applied = {} # 'filename' -> [] of chunks | 1267 applied = {} # 'filename' -> [] of chunks |
1176 skipfile, skipall = None, None | 1268 skipfile, skipall = None, None |
1177 pos, total = 1, sum(len(h.hunks) for h in headers) | 1269 pos, total = 1, sum(len(h.hunks) for h in headers) |
1178 for h in headers: | 1270 for h in headers: |
1179 pos += len(h.hunks) | 1271 pos += len(h.hunks) |
1180 skipfile = None | 1272 skipfile = None |
1184 continue | 1276 continue |
1185 seen.add(hdr) | 1277 seen.add(hdr) |
1186 if skipall is None: | 1278 if skipall is None: |
1187 h.pretty(ui) | 1279 h.pretty(ui) |
1188 files = h.files() | 1280 files = h.files() |
1189 msg = (_('examine changes to %s?') % | 1281 msg = _('examine changes to %s?') % _(' and ').join( |
1190 _(' and ').join("'%s'" % f for f in files)) | 1282 "'%s'" % f for f in files |
1283 ) | |
1191 if all(match.exact(f) for f in files): | 1284 if all(match.exact(f) for f in files): |
1192 r, skipall, np = True, None, None | 1285 r, skipall, np = True, None, None |
1193 else: | 1286 else: |
1194 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None) | 1287 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None) |
1195 if not r: | 1288 if not r: |
1203 chunk.pretty(ui) | 1296 chunk.pretty(ui) |
1204 if total == 1: | 1297 if total == 1: |
1205 msg = messages['single'][operation] % chunk.filename() | 1298 msg = messages['single'][operation] % chunk.filename() |
1206 else: | 1299 else: |
1207 idx = pos - len(h.hunks) + i | 1300 idx = pos - len(h.hunks) + i |
1208 msg = messages['multiple'][operation] % (idx, total, | 1301 msg = messages['multiple'][operation] % ( |
1209 chunk.filename()) | 1302 idx, |
1210 r, skipfile, skipall, newpatches = prompt(skipfile, | 1303 total, |
1211 skipall, msg, chunk) | 1304 chunk.filename(), |
1305 ) | |
1306 r, skipfile, skipall, newpatches = prompt( | |
1307 skipfile, skipall, msg, chunk | |
1308 ) | |
1212 if r: | 1309 if r: |
1213 if fixoffset: | 1310 if fixoffset: |
1214 chunk = copy.copy(chunk) | 1311 chunk = copy.copy(chunk) |
1215 chunk.toline += fixoffset | 1312 chunk.toline += fixoffset |
1216 applied[chunk.filename()].append(chunk) | 1313 applied[chunk.filename()].append(chunk) |
1220 if fixoffset: | 1317 if fixoffset: |
1221 newhunk.toline += fixoffset | 1318 newhunk.toline += fixoffset |
1222 applied[newhunk.filename()].append(newhunk) | 1319 applied[newhunk.filename()].append(newhunk) |
1223 else: | 1320 else: |
1224 fixoffset += chunk.removed - chunk.added | 1321 fixoffset += chunk.removed - chunk.added |
1225 return (sum([h for h in applied.itervalues() | 1322 return ( |
1226 if h[0].special() or len(h) > 1], []), {}) | 1323 sum( |
1324 [h for h in applied.itervalues() if h[0].special() or len(h) > 1], | |
1325 [], | |
1326 ), | |
1327 {}, | |
1328 ) | |
1329 | |
1330 | |
1227 class hunk(object): | 1331 class hunk(object): |
1228 def __init__(self, desc, num, lr, context): | 1332 def __init__(self, desc, num, lr, context): |
1229 self.number = num | 1333 self.number = num |
1230 self.desc = desc | 1334 self.desc = desc |
1231 self.hunk = [desc] | 1335 self.hunk = [desc] |
1277 else: | 1381 else: |
1278 self.lenb = int(self.lenb) | 1382 self.lenb = int(self.lenb) |
1279 self.starta = int(self.starta) | 1383 self.starta = int(self.starta) |
1280 self.startb = int(self.startb) | 1384 self.startb = int(self.startb) |
1281 try: | 1385 try: |
1282 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb, | 1386 diffhelper.addlines( |
1283 self.a, self.b) | 1387 lr, self.hunk, self.lena, self.lenb, self.a, self.b |
1388 ) | |
1284 except error.ParseError as e: | 1389 except error.ParseError as e: |
1285 raise PatchError(_("bad hunk #%d: %s") % (self.number, e)) | 1390 raise PatchError(_("bad hunk #%d: %s") % (self.number, e)) |
1286 # if we hit eof before finishing out the hunk, the last line will | 1391 # if we hit eof before finishing out the hunk, the last line will |
1287 # be zero length. Lets try to fix it up. | 1392 # be zero length. Lets try to fix it up. |
1288 while len(self.hunk[-1]) == 0: | 1393 while len(self.hunk[-1]) == 0: |
1315 if l.startswith('- ') or l.startswith('! '): | 1420 if l.startswith('- ') or l.startswith('! '): |
1316 u = '-' + s | 1421 u = '-' + s |
1317 elif l.startswith(' '): | 1422 elif l.startswith(' '): |
1318 u = ' ' + s | 1423 u = ' ' + s |
1319 else: | 1424 else: |
1320 raise PatchError(_("bad hunk #%d old text line %d") % | 1425 raise PatchError( |
1321 (self.number, x)) | 1426 _("bad hunk #%d old text line %d") % (self.number, x) |
1427 ) | |
1322 self.a.append(u) | 1428 self.a.append(u) |
1323 self.hunk.append(u) | 1429 self.hunk.append(u) |
1324 | 1430 |
1325 l = lr.readline() | 1431 l = lr.readline() |
1326 if l.startswith(br'\ '): | 1432 if l.startswith(br'\ '): |
1361 elif len(self.b) == 0: | 1467 elif len(self.b) == 0: |
1362 # line deletions, new block is empty | 1468 # line deletions, new block is empty |
1363 lr.push(l) | 1469 lr.push(l) |
1364 break | 1470 break |
1365 else: | 1471 else: |
1366 raise PatchError(_("bad hunk #%d old text line %d") % | 1472 raise PatchError( |
1367 (self.number, x)) | 1473 _("bad hunk #%d old text line %d") % (self.number, x) |
1474 ) | |
1368 self.b.append(s) | 1475 self.b.append(s) |
1369 while True: | 1476 while True: |
1370 if hunki >= len(self.hunk): | 1477 if hunki >= len(self.hunk): |
1371 h = "" | 1478 h = "" |
1372 else: | 1479 else: |
1389 # this happens when lines were only deleted from the hunk | 1496 # this happens when lines were only deleted from the hunk |
1390 for x in self.hunk: | 1497 for x in self.hunk: |
1391 if x.startswith('+') or x.startswith(' '): | 1498 if x.startswith('+') or x.startswith(' '): |
1392 self.b.append(x[1:]) | 1499 self.b.append(x[1:]) |
1393 # @@ -start,len +start,len @@ | 1500 # @@ -start,len +start,len @@ |
1394 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena, | 1501 self.desc = "@@ -%d,%d +%d,%d @@\n" % ( |
1395 self.startb, self.lenb) | 1502 self.starta, |
1503 self.lena, | |
1504 self.startb, | |
1505 self.lenb, | |
1506 ) | |
1396 self.hunk[0] = self.desc | 1507 self.hunk[0] = self.desc |
1397 self._fixnewline(lr) | 1508 self._fixnewline(lr) |
1398 | 1509 |
1399 def _fixnewline(self, lr): | 1510 def _fixnewline(self, lr): |
1400 l = lr.readline() | 1511 l = lr.readline() |
1428 else: | 1539 else: |
1429 break | 1540 break |
1430 | 1541 |
1431 bot = min(fuzz, bot) | 1542 bot = min(fuzz, bot) |
1432 top = min(fuzz, top) | 1543 top = min(fuzz, top) |
1433 return old[top:len(old) - bot], new[top:len(new) - bot], top | 1544 return old[top : len(old) - bot], new[top : len(new) - bot], top |
1434 return old, new, 0 | 1545 return old, new, 0 |
1435 | 1546 |
1436 def fuzzit(self, fuzz, toponly): | 1547 def fuzzit(self, fuzz, toponly): |
1437 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly) | 1548 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly) |
1438 oldstart = self.starta + top | 1549 oldstart = self.starta + top |
1442 oldstart -= 1 | 1553 oldstart -= 1 |
1443 if self.lenb and newstart > 0: | 1554 if self.lenb and newstart > 0: |
1444 newstart -= 1 | 1555 newstart -= 1 |
1445 return old, oldstart, new, newstart | 1556 return old, oldstart, new, newstart |
1446 | 1557 |
1558 | |
1447 class binhunk(object): | 1559 class binhunk(object): |
1448 'A binary patch file.' | 1560 'A binary patch file.' |
1561 | |
1449 def __init__(self, lr, fname): | 1562 def __init__(self, lr, fname): |
1450 self.text = None | 1563 self.text = None |
1451 self.delta = False | 1564 self.delta = False |
1452 self.hunk = ['GIT binary patch\n'] | 1565 self.hunk = ['GIT binary patch\n'] |
1453 self._fname = fname | 1566 self._fname = fname |
1468 return l.rstrip('\r\n') | 1581 return l.rstrip('\r\n') |
1469 | 1582 |
1470 while True: | 1583 while True: |
1471 line = getline(lr, self.hunk) | 1584 line = getline(lr, self.hunk) |
1472 if not line: | 1585 if not line: |
1473 raise PatchError(_('could not extract "%s" binary data') | 1586 raise PatchError( |
1474 % self._fname) | 1587 _('could not extract "%s" binary data') % self._fname |
1588 ) | |
1475 if line.startswith('literal '): | 1589 if line.startswith('literal '): |
1476 size = int(line[8:].rstrip()) | 1590 size = int(line[8:].rstrip()) |
1477 break | 1591 break |
1478 if line.startswith('delta '): | 1592 if line.startswith('delta '): |
1479 size = int(line[6:].rstrip()) | 1593 size = int(line[6:].rstrip()) |
1488 else: | 1602 else: |
1489 l = ord(l) - ord('a') + 27 | 1603 l = ord(l) - ord('a') + 27 |
1490 try: | 1604 try: |
1491 dec.append(util.b85decode(line[1:])[:l]) | 1605 dec.append(util.b85decode(line[1:])[:l]) |
1492 except ValueError as e: | 1606 except ValueError as e: |
1493 raise PatchError(_('could not decode "%s" binary patch: %s') | 1607 raise PatchError( |
1494 % (self._fname, stringutil.forcebytestr(e))) | 1608 _('could not decode "%s" binary patch: %s') |
1609 % (self._fname, stringutil.forcebytestr(e)) | |
1610 ) | |
1495 line = getline(lr, self.hunk) | 1611 line = getline(lr, self.hunk) |
1496 text = zlib.decompress(''.join(dec)) | 1612 text = zlib.decompress(''.join(dec)) |
1497 if len(text) != size: | 1613 if len(text) != size: |
1498 raise PatchError(_('"%s" length is %d bytes, should be %d') | 1614 raise PatchError( |
1499 % (self._fname, len(text), size)) | 1615 _('"%s" length is %d bytes, should be %d') |
1616 % (self._fname, len(text), size) | |
1617 ) | |
1500 self.text = text | 1618 self.text = text |
1619 | |
1501 | 1620 |
1502 def parsefilename(str): | 1621 def parsefilename(str): |
1503 # --- filename \t|space stuff | 1622 # --- filename \t|space stuff |
1504 s = str[4:].rstrip('\r\n') | 1623 s = str[4:].rstrip('\r\n') |
1505 i = s.find('\t') | 1624 i = s.find('\t') |
1506 if i < 0: | 1625 if i < 0: |
1507 i = s.find(' ') | 1626 i = s.find(' ') |
1508 if i < 0: | 1627 if i < 0: |
1509 return s | 1628 return s |
1510 return s[:i] | 1629 return s[:i] |
1630 | |
1511 | 1631 |
1512 def reversehunks(hunks): | 1632 def reversehunks(hunks): |
1513 '''reverse the signs in the hunks given as argument | 1633 '''reverse the signs in the hunks given as argument |
1514 | 1634 |
1515 This function operates on hunks coming out of patch.filterpatch, that is | 1635 This function operates on hunks coming out of patch.filterpatch, that is |
1569 for c in hunks: | 1689 for c in hunks: |
1570 if util.safehasattr(c, 'reversehunk'): | 1690 if util.safehasattr(c, 'reversehunk'): |
1571 c = c.reversehunk() | 1691 c = c.reversehunk() |
1572 newhunks.append(c) | 1692 newhunks.append(c) |
1573 return newhunks | 1693 return newhunks |
1694 | |
1574 | 1695 |
1575 def parsepatch(originalchunks, maxcontext=None): | 1696 def parsepatch(originalchunks, maxcontext=None): |
1576 """patch -> [] of headers -> [] of hunks | 1697 """patch -> [] of headers -> [] of hunks |
1577 | 1698 |
1578 If maxcontext is not None, trim context lines if necessary. | 1699 If maxcontext is not None, trim context lines if necessary. |
1613 7 | 1734 7 |
1614 @@ -8,1 +9,2 @@ | 1735 @@ -8,1 +9,2 @@ |
1615 8 | 1736 8 |
1616 +9 | 1737 +9 |
1617 """ | 1738 """ |
1739 | |
1618 class parser(object): | 1740 class parser(object): |
1619 """patch parsing state machine""" | 1741 """patch parsing state machine""" |
1742 | |
1620 def __init__(self): | 1743 def __init__(self): |
1621 self.fromline = 0 | 1744 self.fromline = 0 |
1622 self.toline = 0 | 1745 self.toline = 0 |
1623 self.proc = '' | 1746 self.proc = '' |
1624 self.header = None | 1747 self.header = None |
1634 self.toline = int(tostart) | 1757 self.toline = int(tostart) |
1635 self.proc = proc | 1758 self.proc = proc |
1636 | 1759 |
1637 def addcontext(self, context): | 1760 def addcontext(self, context): |
1638 if self.hunk: | 1761 if self.hunk: |
1639 h = recordhunk(self.header, self.fromline, self.toline, | 1762 h = recordhunk( |
1640 self.proc, self.before, self.hunk, context, maxcontext) | 1763 self.header, |
1764 self.fromline, | |
1765 self.toline, | |
1766 self.proc, | |
1767 self.before, | |
1768 self.hunk, | |
1769 context, | |
1770 maxcontext, | |
1771 ) | |
1641 self.header.hunks.append(h) | 1772 self.header.hunks.append(h) |
1642 self.fromline += len(self.before) + h.removed | 1773 self.fromline += len(self.before) + h.removed |
1643 self.toline += len(self.before) + h.added | 1774 self.toline += len(self.before) + h.added |
1644 self.before = [] | 1775 self.before = [] |
1645 self.hunk = [] | 1776 self.hunk = [] |
1658 h = header(hdr) | 1789 h = header(hdr) |
1659 self.headers.append(h) | 1790 self.headers.append(h) |
1660 self.header = h | 1791 self.header = h |
1661 | 1792 |
1662 def addother(self, line): | 1793 def addother(self, line): |
1663 pass # 'other' lines are ignored | 1794 pass # 'other' lines are ignored |
1664 | 1795 |
1665 def finished(self): | 1796 def finished(self): |
1666 self.addcontext([]) | 1797 self.addcontext([]) |
1667 return self.headers | 1798 return self.headers |
1668 | 1799 |
1669 transitions = { | 1800 transitions = { |
1670 'file': {'context': addcontext, | 1801 'file': { |
1671 'file': newfile, | 1802 'context': addcontext, |
1672 'hunk': addhunk, | 1803 'file': newfile, |
1673 'range': addrange}, | 1804 'hunk': addhunk, |
1674 'context': {'file': newfile, | 1805 'range': addrange, |
1675 'hunk': addhunk, | 1806 }, |
1676 'range': addrange, | 1807 'context': { |
1677 'other': addother}, | 1808 'file': newfile, |
1678 'hunk': {'context': addcontext, | 1809 'hunk': addhunk, |
1679 'file': newfile, | 1810 'range': addrange, |
1680 'range': addrange}, | 1811 'other': addother, |
1681 'range': {'context': addcontext, | 1812 }, |
1682 'hunk': addhunk}, | 1813 'hunk': {'context': addcontext, 'file': newfile, 'range': addrange}, |
1814 'range': {'context': addcontext, 'hunk': addhunk}, | |
1683 'other': {'other': addother}, | 1815 'other': {'other': addother}, |
1684 } | 1816 } |
1685 | 1817 |
1686 p = parser() | 1818 p = parser() |
1687 fp = stringio() | 1819 fp = stringio() |
1688 fp.write(''.join(originalchunks)) | 1820 fp.write(''.join(originalchunks)) |
1689 fp.seek(0) | 1821 fp.seek(0) |
1691 state = 'context' | 1823 state = 'context' |
1692 for newstate, data in scanpatch(fp): | 1824 for newstate, data in scanpatch(fp): |
1693 try: | 1825 try: |
1694 p.transitions[state][newstate](p, data) | 1826 p.transitions[state][newstate](p, data) |
1695 except KeyError: | 1827 except KeyError: |
1696 raise PatchError('unhandled transition: %s -> %s' % | 1828 raise PatchError( |
1697 (state, newstate)) | 1829 'unhandled transition: %s -> %s' % (state, newstate) |
1830 ) | |
1698 state = newstate | 1831 state = newstate |
1699 del fp | 1832 del fp |
1700 return p.finished() | 1833 return p.finished() |
1834 | |
1701 | 1835 |
1702 def pathtransform(path, strip, prefix): | 1836 def pathtransform(path, strip, prefix): |
1703 '''turn a path from a patch into a path suitable for the repository | 1837 '''turn a path from a patch into a path suitable for the repository |
1704 | 1838 |
1705 prefix, if not empty, is expected to be normalized with a / at the end. | 1839 prefix, if not empty, is expected to be normalized with a / at the end. |
1726 return '', prefix + path.rstrip() | 1860 return '', prefix + path.rstrip() |
1727 count = strip | 1861 count = strip |
1728 while count > 0: | 1862 while count > 0: |
1729 i = path.find('/', i) | 1863 i = path.find('/', i) |
1730 if i == -1: | 1864 if i == -1: |
1731 raise PatchError(_("unable to strip away %d of %d dirs from %s") % | 1865 raise PatchError( |
1732 (count, strip, path)) | 1866 _("unable to strip away %d of %d dirs from %s") |
1867 % (count, strip, path) | |
1868 ) | |
1733 i += 1 | 1869 i += 1 |
1734 # consume '//' in the path | 1870 # consume '//' in the path |
1735 while i < pathlen - 1 and path[i:i + 1] == '/': | 1871 while i < pathlen - 1 and path[i : i + 1] == '/': |
1736 i += 1 | 1872 i += 1 |
1737 count -= 1 | 1873 count -= 1 |
1738 return path[:i].lstrip(), prefix + path[i:].rstrip() | 1874 return path[:i].lstrip(), prefix + path[i:].rstrip() |
1875 | |
1739 | 1876 |
1740 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix): | 1877 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix): |
1741 nulla = afile_orig == "/dev/null" | 1878 nulla = afile_orig == "/dev/null" |
1742 nullb = bfile_orig == "/dev/null" | 1879 nullb = bfile_orig == "/dev/null" |
1743 create = nulla and hunk.starta == 0 and hunk.lena == 0 | 1880 create = nulla and hunk.starta == 0 and hunk.lena == 0 |
1751 goodb = not nullb and backend.exists(bfile) | 1888 goodb = not nullb and backend.exists(bfile) |
1752 missing = not goodb and not gooda and not create | 1889 missing = not goodb and not gooda and not create |
1753 | 1890 |
1754 # some diff programs apparently produce patches where the afile is | 1891 # some diff programs apparently produce patches where the afile is |
1755 # not /dev/null, but afile starts with bfile | 1892 # not /dev/null, but afile starts with bfile |
1756 abasedir = afile[:afile.rfind('/') + 1] | 1893 abasedir = afile[: afile.rfind('/') + 1] |
1757 bbasedir = bfile[:bfile.rfind('/') + 1] | 1894 bbasedir = bfile[: bfile.rfind('/') + 1] |
1758 if (missing and abasedir == bbasedir and afile.startswith(bfile) | 1895 if ( |
1759 and hunk.starta == 0 and hunk.lena == 0): | 1896 missing |
1897 and abasedir == bbasedir | |
1898 and afile.startswith(bfile) | |
1899 and hunk.starta == 0 | |
1900 and hunk.lena == 0 | |
1901 ): | |
1760 create = True | 1902 create = True |
1761 missing = False | 1903 missing = False |
1762 | 1904 |
1763 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the | 1905 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the |
1764 # diff is between a file and its backup. In this case, the original | 1906 # diff is between a file and its backup. In this case, the original |
1765 # file should be patched (see original mpatch code). | 1907 # file should be patched (see original mpatch code). |
1766 isbackup = (abase == bbase and bfile.startswith(afile)) | 1908 isbackup = abase == bbase and bfile.startswith(afile) |
1767 fname = None | 1909 fname = None |
1768 if not missing: | 1910 if not missing: |
1769 if gooda and goodb: | 1911 if gooda and goodb: |
1770 if isbackup: | 1912 if isbackup: |
1771 fname = afile | 1913 fname = afile |
1790 gp.op = 'ADD' | 1932 gp.op = 'ADD' |
1791 elif remove: | 1933 elif remove: |
1792 gp.op = 'DELETE' | 1934 gp.op = 'DELETE' |
1793 return gp | 1935 return gp |
1794 | 1936 |
1937 | |
1795 def scanpatch(fp): | 1938 def scanpatch(fp): |
1796 """like patch.iterhunks, but yield different events | 1939 """like patch.iterhunks, but yield different events |
1797 | 1940 |
1798 - ('file', [header_lines + fromfile + tofile]) | 1941 - ('file', [header_lines + fromfile + tofile]) |
1799 - ('context', [context_lines]) | 1942 - ('context', [context_lines]) |
1814 break | 1957 break |
1815 return lines | 1958 return lines |
1816 | 1959 |
1817 for line in iter(lr.readline, ''): | 1960 for line in iter(lr.readline, ''): |
1818 if line.startswith('diff --git a/') or line.startswith('diff -r '): | 1961 if line.startswith('diff --git a/') or line.startswith('diff -r '): |
1962 | |
1819 def notheader(line): | 1963 def notheader(line): |
1820 s = line.split(None, 1) | 1964 s = line.split(None, 1) |
1821 return not s or s[0] not in ('---', 'diff') | 1965 return not s or s[0] not in ('---', 'diff') |
1966 | |
1822 header = scanwhile(line, notheader) | 1967 header = scanwhile(line, notheader) |
1823 fromfile = lr.readline() | 1968 fromfile = lr.readline() |
1824 if fromfile.startswith('---'): | 1969 if fromfile.startswith('---'): |
1825 tofile = lr.readline() | 1970 tofile = lr.readline() |
1826 header += [fromfile, tofile] | 1971 header += [fromfile, tofile] |
1838 if m: | 1983 if m: |
1839 yield 'range', m.groups() | 1984 yield 'range', m.groups() |
1840 else: | 1985 else: |
1841 yield 'other', line | 1986 yield 'other', line |
1842 | 1987 |
1988 | |
1843 def scangitpatch(lr, firstline): | 1989 def scangitpatch(lr, firstline): |
1844 """ | 1990 """ |
1845 Git patches can emit: | 1991 Git patches can emit: |
1846 - rename a to b | 1992 - rename a to b |
1847 - change b | 1993 - change b |
1864 gitlr.push(firstline) | 2010 gitlr.push(firstline) |
1865 gitpatches = readgitpatch(gitlr) | 2011 gitpatches = readgitpatch(gitlr) |
1866 fp.seek(pos) | 2012 fp.seek(pos) |
1867 return gitpatches | 2013 return gitpatches |
1868 | 2014 |
2015 | |
1869 def iterhunks(fp): | 2016 def iterhunks(fp): |
1870 """Read a patch and yield the following events: | 2017 """Read a patch and yield the following events: |
1871 - ("file", afile, bfile, firsthunk): select a new target file. | 2018 - ("file", afile, bfile, firsthunk): select a new target file. |
1872 - ("hunk", hunk): a new hunk is ready to be applied, follows a | 2019 - ("hunk", hunk): a new hunk is ready to be applied, follows a |
1873 "file" event. | 2020 "file" event. |
1888 | 2035 |
1889 for x in iter(lr.readline, ''): | 2036 for x in iter(lr.readline, ''): |
1890 if state == BFILE and ( | 2037 if state == BFILE and ( |
1891 (not context and x.startswith('@')) | 2038 (not context and x.startswith('@')) |
1892 or (context is not False and x.startswith('***************')) | 2039 or (context is not False and x.startswith('***************')) |
1893 or x.startswith('GIT binary patch')): | 2040 or x.startswith('GIT binary patch') |
2041 ): | |
1894 gp = None | 2042 gp = None |
1895 if (gitpatches and | 2043 if gitpatches and gitpatches[-1].ispatching(afile, bfile): |
1896 gitpatches[-1].ispatching(afile, bfile)): | |
1897 gp = gitpatches.pop() | 2044 gp = gitpatches.pop() |
1898 if x.startswith('GIT binary patch'): | 2045 if x.startswith('GIT binary patch'): |
1899 h = binhunk(lr, gp.path) | 2046 h = binhunk(lr, gp.path) |
1900 else: | 2047 else: |
1901 if context is None and x.startswith('***************'): | 2048 if context is None and x.startswith('***************'): |
1911 if not m: | 2058 if not m: |
1912 continue | 2059 continue |
1913 if gitpatches is None: | 2060 if gitpatches is None: |
1914 # scan whole input for git metadata | 2061 # scan whole input for git metadata |
1915 gitpatches = scangitpatch(lr, x) | 2062 gitpatches = scangitpatch(lr, x) |
1916 yield 'git', [g.copy() for g in gitpatches | 2063 yield 'git', [ |
1917 if g.op in ('COPY', 'RENAME')] | 2064 g.copy() for g in gitpatches if g.op in ('COPY', 'RENAME') |
2065 ] | |
1918 gitpatches.reverse() | 2066 gitpatches.reverse() |
1919 afile = 'a/' + m.group(1) | 2067 afile = 'a/' + m.group(1) |
1920 bfile = 'b/' + m.group(2) | 2068 bfile = 'b/' + m.group(2) |
1921 while gitpatches and not gitpatches[-1].ispatching(afile, bfile): | 2069 while gitpatches and not gitpatches[-1].ispatching(afile, bfile): |
1922 gp = gitpatches.pop() | 2070 gp = gitpatches.pop() |
1923 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) | 2071 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) |
1924 if not gitpatches: | 2072 if not gitpatches: |
1925 raise PatchError(_('failed to synchronize metadata for "%s"') | 2073 raise PatchError( |
1926 % afile[2:]) | 2074 _('failed to synchronize metadata for "%s"') % afile[2:] |
2075 ) | |
1927 newfile = True | 2076 newfile = True |
1928 elif x.startswith('---'): | 2077 elif x.startswith('---'): |
1929 # check for a unified diff | 2078 # check for a unified diff |
1930 l2 = lr.readline() | 2079 l2 = lr.readline() |
1931 if not l2.startswith('+++'): | 2080 if not l2.startswith('+++'): |
1959 | 2108 |
1960 while gitpatches: | 2109 while gitpatches: |
1961 gp = gitpatches.pop() | 2110 gp = gitpatches.pop() |
1962 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) | 2111 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy()) |
1963 | 2112 |
2113 | |
1964 def applybindelta(binchunk, data): | 2114 def applybindelta(binchunk, data): |
1965 """Apply a binary delta hunk | 2115 """Apply a binary delta hunk |
1966 The algorithm used is the algorithm from git's patch-delta.c | 2116 The algorithm used is the algorithm from git's patch-delta.c |
1967 """ | 2117 """ |
2118 | |
1968 def deltahead(binchunk): | 2119 def deltahead(binchunk): |
1969 i = 0 | 2120 i = 0 |
1970 for c in pycompat.bytestr(binchunk): | 2121 for c in pycompat.bytestr(binchunk): |
1971 i += 1 | 2122 i += 1 |
1972 if not (ord(c) & 0x80): | 2123 if not (ord(c) & 0x80): |
1973 return i | 2124 return i |
1974 return i | 2125 return i |
2126 | |
1975 out = "" | 2127 out = "" |
1976 s = deltahead(binchunk) | 2128 s = deltahead(binchunk) |
1977 binchunk = binchunk[s:] | 2129 binchunk = binchunk[s:] |
1978 s = deltahead(binchunk) | 2130 s = deltahead(binchunk) |
1979 binchunk = binchunk[s:] | 2131 binchunk = binchunk[s:] |
1980 i = 0 | 2132 i = 0 |
1981 while i < len(binchunk): | 2133 while i < len(binchunk): |
1982 cmd = ord(binchunk[i:i + 1]) | 2134 cmd = ord(binchunk[i : i + 1]) |
1983 i += 1 | 2135 i += 1 |
1984 if (cmd & 0x80): | 2136 if cmd & 0x80: |
1985 offset = 0 | 2137 offset = 0 |
1986 size = 0 | 2138 size = 0 |
1987 if (cmd & 0x01): | 2139 if cmd & 0x01: |
1988 offset = ord(binchunk[i:i + 1]) | 2140 offset = ord(binchunk[i : i + 1]) |
1989 i += 1 | 2141 i += 1 |
1990 if (cmd & 0x02): | 2142 if cmd & 0x02: |
1991 offset |= ord(binchunk[i:i + 1]) << 8 | 2143 offset |= ord(binchunk[i : i + 1]) << 8 |
1992 i += 1 | 2144 i += 1 |
1993 if (cmd & 0x04): | 2145 if cmd & 0x04: |
1994 offset |= ord(binchunk[i:i + 1]) << 16 | 2146 offset |= ord(binchunk[i : i + 1]) << 16 |
1995 i += 1 | 2147 i += 1 |
1996 if (cmd & 0x08): | 2148 if cmd & 0x08: |
1997 offset |= ord(binchunk[i:i + 1]) << 24 | 2149 offset |= ord(binchunk[i : i + 1]) << 24 |
1998 i += 1 | 2150 i += 1 |
1999 if (cmd & 0x10): | 2151 if cmd & 0x10: |
2000 size = ord(binchunk[i:i + 1]) | 2152 size = ord(binchunk[i : i + 1]) |
2001 i += 1 | 2153 i += 1 |
2002 if (cmd & 0x20): | 2154 if cmd & 0x20: |
2003 size |= ord(binchunk[i:i + 1]) << 8 | 2155 size |= ord(binchunk[i : i + 1]) << 8 |
2004 i += 1 | 2156 i += 1 |
2005 if (cmd & 0x40): | 2157 if cmd & 0x40: |
2006 size |= ord(binchunk[i:i + 1]) << 16 | 2158 size |= ord(binchunk[i : i + 1]) << 16 |
2007 i += 1 | 2159 i += 1 |
2008 if size == 0: | 2160 if size == 0: |
2009 size = 0x10000 | 2161 size = 0x10000 |
2010 offset_end = offset + size | 2162 offset_end = offset + size |
2011 out += data[offset:offset_end] | 2163 out += data[offset:offset_end] |
2015 i += cmd | 2167 i += cmd |
2016 else: | 2168 else: |
2017 raise PatchError(_('unexpected delta opcode 0')) | 2169 raise PatchError(_('unexpected delta opcode 0')) |
2018 return out | 2170 return out |
2019 | 2171 |
2172 | |
2020 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'): | 2173 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'): |
2021 """Reads a patch from fp and tries to apply it. | 2174 """Reads a patch from fp and tries to apply it. |
2022 | 2175 |
2023 Returns 0 for a clean patch, -1 if any rejects were found and 1 if | 2176 Returns 0 for a clean patch, -1 if any rejects were found and 1 if |
2024 there was any fuzz. | 2177 there was any fuzz. |
2025 | 2178 |
2026 If 'eolmode' is 'strict', the patch content and patched file are | 2179 If 'eolmode' is 'strict', the patch content and patched file are |
2027 read in binary mode. Otherwise, line endings are ignored when | 2180 read in binary mode. Otherwise, line endings are ignored when |
2028 patching then normalized according to 'eolmode'. | 2181 patching then normalized according to 'eolmode'. |
2029 """ | 2182 """ |
2030 return _applydiff(ui, fp, patchfile, backend, store, strip=strip, | 2183 return _applydiff( |
2031 prefix=prefix, eolmode=eolmode) | 2184 ui, |
2185 fp, | |
2186 patchfile, | |
2187 backend, | |
2188 store, | |
2189 strip=strip, | |
2190 prefix=prefix, | |
2191 eolmode=eolmode, | |
2192 ) | |
2193 | |
2032 | 2194 |
2033 def _canonprefix(repo, prefix): | 2195 def _canonprefix(repo, prefix): |
2034 if prefix: | 2196 if prefix: |
2035 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix) | 2197 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix) |
2036 if prefix != '': | 2198 if prefix != '': |
2037 prefix += '/' | 2199 prefix += '/' |
2038 return prefix | 2200 return prefix |
2039 | 2201 |
2040 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='', | 2202 |
2041 eolmode='strict'): | 2203 def _applydiff( |
2204 ui, fp, patcher, backend, store, strip=1, prefix='', eolmode='strict' | |
2205 ): | |
2042 prefix = _canonprefix(backend.repo, prefix) | 2206 prefix = _canonprefix(backend.repo, prefix) |
2207 | |
2043 def pstrip(p): | 2208 def pstrip(p): |
2044 return pathtransform(p, strip - 1, prefix)[1] | 2209 return pathtransform(p, strip - 1, prefix)[1] |
2045 | 2210 |
2046 rejects = 0 | 2211 rejects = 0 |
2047 err = 0 | 2212 err = 0 |
2062 if gp: | 2227 if gp: |
2063 gp.path = pstrip(gp.path) | 2228 gp.path = pstrip(gp.path) |
2064 if gp.oldpath: | 2229 if gp.oldpath: |
2065 gp.oldpath = pstrip(gp.oldpath) | 2230 gp.oldpath = pstrip(gp.oldpath) |
2066 else: | 2231 else: |
2067 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, | 2232 gp = makepatchmeta( |
2068 prefix) | 2233 backend, afile, bfile, first_hunk, strip, prefix |
2234 ) | |
2069 if gp.op == 'RENAME': | 2235 if gp.op == 'RENAME': |
2070 backend.unlink(gp.oldpath) | 2236 backend.unlink(gp.oldpath) |
2071 if not first_hunk: | 2237 if not first_hunk: |
2072 if gp.op == 'DELETE': | 2238 if gp.op == 'DELETE': |
2073 backend.unlink(gp.path) | 2239 backend.unlink(gp.path) |
2075 data, mode = None, None | 2241 data, mode = None, None |
2076 if gp.op in ('RENAME', 'COPY'): | 2242 if gp.op in ('RENAME', 'COPY'): |
2077 data, mode = store.getfile(gp.oldpath)[:2] | 2243 data, mode = store.getfile(gp.oldpath)[:2] |
2078 if data is None: | 2244 if data is None: |
2079 # This means that the old path does not exist | 2245 # This means that the old path does not exist |
2080 raise PatchError(_("source file '%s' does not exist") | 2246 raise PatchError( |
2081 % gp.oldpath) | 2247 _("source file '%s' does not exist") % gp.oldpath |
2248 ) | |
2082 if gp.mode: | 2249 if gp.mode: |
2083 mode = gp.mode | 2250 mode = gp.mode |
2084 if gp.op == 'ADD': | 2251 if gp.op == 'ADD': |
2085 # Added files without content have no hunk and | 2252 # Added files without content have no hunk and |
2086 # must be created | 2253 # must be created |
2087 data = '' | 2254 data = '' |
2088 if data or mode: | 2255 if data or mode: |
2089 if (gp.op in ('ADD', 'RENAME', 'COPY') | 2256 if gp.op in ('ADD', 'RENAME', 'COPY') and backend.exists( |
2090 and backend.exists(gp.path)): | 2257 gp.path |
2091 raise PatchError(_("cannot create %s: destination " | 2258 ): |
2092 "already exists") % gp.path) | 2259 raise PatchError( |
2260 _("cannot create %s: destination " "already exists") | |
2261 % gp.path | |
2262 ) | |
2093 backend.setfile(gp.path, data, mode, gp.oldpath) | 2263 backend.setfile(gp.path, data, mode, gp.oldpath) |
2094 continue | 2264 continue |
2095 try: | 2265 try: |
2096 current_file = patcher(ui, gp, backend, store, | 2266 current_file = patcher(ui, gp, backend, store, eolmode=eolmode) |
2097 eolmode=eolmode) | |
2098 except PatchError as inst: | 2267 except PatchError as inst: |
2099 ui.warn(str(inst) + '\n') | 2268 ui.warn(str(inst) + '\n') |
2100 current_file = None | 2269 current_file = None |
2101 rejects += 1 | 2270 rejects += 1 |
2102 continue | 2271 continue |
2120 | 2289 |
2121 if rejects: | 2290 if rejects: |
2122 return -1 | 2291 return -1 |
2123 return err | 2292 return err |
2124 | 2293 |
2125 def _externalpatch(ui, repo, patcher, patchname, strip, files, | 2294 |
2126 similarity): | 2295 def _externalpatch(ui, repo, patcher, patchname, strip, files, similarity): |
2127 """use <patcher> to apply <patchname> to the working directory. | 2296 """use <patcher> to apply <patchname> to the working directory. |
2128 returns whether patch was applied with fuzz factor.""" | 2297 returns whether patch was applied with fuzz factor.""" |
2129 | 2298 |
2130 fuzz = False | 2299 fuzz = False |
2131 args = [] | 2300 args = [] |
2132 cwd = repo.root | 2301 cwd = repo.root |
2133 if cwd: | 2302 if cwd: |
2134 args.append('-d %s' % procutil.shellquote(cwd)) | 2303 args.append('-d %s' % procutil.shellquote(cwd)) |
2135 cmd = ('%s %s -p%d < %s' | 2304 cmd = '%s %s -p%d < %s' % ( |
2136 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname))) | 2305 patcher, |
2306 ' '.join(args), | |
2307 strip, | |
2308 procutil.shellquote(patchname), | |
2309 ) | |
2137 ui.debug('Using external patch tool: %s\n' % cmd) | 2310 ui.debug('Using external patch tool: %s\n' % cmd) |
2138 fp = procutil.popen(cmd, 'rb') | 2311 fp = procutil.popen(cmd, 'rb') |
2139 try: | 2312 try: |
2140 for line in util.iterfile(fp): | 2313 for line in util.iterfile(fp): |
2141 line = line.rstrip() | 2314 line = line.rstrip() |
2160 finally: | 2333 finally: |
2161 if files: | 2334 if files: |
2162 scmutil.marktouched(repo, files, similarity) | 2335 scmutil.marktouched(repo, files, similarity) |
2163 code = fp.close() | 2336 code = fp.close() |
2164 if code: | 2337 if code: |
2165 raise PatchError(_("patch command failed: %s") % | 2338 raise PatchError( |
2166 procutil.explainexit(code)) | 2339 _("patch command failed: %s") % procutil.explainexit(code) |
2340 ) | |
2167 return fuzz | 2341 return fuzz |
2168 | 2342 |
2169 def patchbackend(ui, backend, patchobj, strip, prefix, files=None, | 2343 |
2170 eolmode='strict'): | 2344 def patchbackend( |
2345 ui, backend, patchobj, strip, prefix, files=None, eolmode='strict' | |
2346 ): | |
2171 if files is None: | 2347 if files is None: |
2172 files = set() | 2348 files = set() |
2173 if eolmode is None: | 2349 if eolmode is None: |
2174 eolmode = ui.config('patch', 'eol') | 2350 eolmode = ui.config('patch', 'eol') |
2175 if eolmode.lower() not in eolmodes: | 2351 if eolmode.lower() not in eolmodes: |
2180 try: | 2356 try: |
2181 fp = open(patchobj, 'rb') | 2357 fp = open(patchobj, 'rb') |
2182 except TypeError: | 2358 except TypeError: |
2183 fp = patchobj | 2359 fp = patchobj |
2184 try: | 2360 try: |
2185 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix, | 2361 ret = applydiff( |
2186 eolmode=eolmode) | 2362 ui, fp, backend, store, strip=strip, prefix=prefix, eolmode=eolmode |
2363 ) | |
2187 finally: | 2364 finally: |
2188 if fp != patchobj: | 2365 if fp != patchobj: |
2189 fp.close() | 2366 fp.close() |
2190 files.update(backend.close()) | 2367 files.update(backend.close()) |
2191 store.close() | 2368 store.close() |
2192 if ret < 0: | 2369 if ret < 0: |
2193 raise PatchError(_('patch failed to apply')) | 2370 raise PatchError(_('patch failed to apply')) |
2194 return ret > 0 | 2371 return ret > 0 |
2195 | 2372 |
2196 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None, | 2373 |
2197 eolmode='strict', similarity=0): | 2374 def internalpatch( |
2375 ui, | |
2376 repo, | |
2377 patchobj, | |
2378 strip, | |
2379 prefix='', | |
2380 files=None, | |
2381 eolmode='strict', | |
2382 similarity=0, | |
2383 ): | |
2198 """use builtin patch to apply <patchobj> to the working directory. | 2384 """use builtin patch to apply <patchobj> to the working directory. |
2199 returns whether patch was applied with fuzz factor.""" | 2385 returns whether patch was applied with fuzz factor.""" |
2200 backend = workingbackend(ui, repo, similarity) | 2386 backend = workingbackend(ui, repo, similarity) |
2201 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode) | 2387 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode) |
2202 | 2388 |
2203 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None, | 2389 |
2204 eolmode='strict'): | 2390 def patchrepo( |
2391 ui, repo, ctx, store, patchobj, strip, prefix, files=None, eolmode='strict' | |
2392 ): | |
2205 backend = repobackend(ui, repo, ctx, store) | 2393 backend = repobackend(ui, repo, ctx, store) |
2206 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode) | 2394 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode) |
2207 | 2395 |
2208 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict', | 2396 |
2209 similarity=0): | 2397 def patch( |
2398 ui, | |
2399 repo, | |
2400 patchname, | |
2401 strip=1, | |
2402 prefix='', | |
2403 files=None, | |
2404 eolmode='strict', | |
2405 similarity=0, | |
2406 ): | |
2210 """Apply <patchname> to the working directory. | 2407 """Apply <patchname> to the working directory. |
2211 | 2408 |
2212 'eolmode' specifies how end of lines should be handled. It can be: | 2409 'eolmode' specifies how end of lines should be handled. It can be: |
2213 - 'strict': inputs are read in binary mode, EOLs are preserved | 2410 - 'strict': inputs are read in binary mode, EOLs are preserved |
2214 - 'crlf': EOLs are ignored when patching and reset to CRLF | 2411 - 'crlf': EOLs are ignored when patching and reset to CRLF |
2220 """ | 2417 """ |
2221 patcher = ui.config('ui', 'patch') | 2418 patcher = ui.config('ui', 'patch') |
2222 if files is None: | 2419 if files is None: |
2223 files = set() | 2420 files = set() |
2224 if patcher: | 2421 if patcher: |
2225 return _externalpatch(ui, repo, patcher, patchname, strip, | 2422 return _externalpatch( |
2226 files, similarity) | 2423 ui, repo, patcher, patchname, strip, files, similarity |
2227 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode, | 2424 ) |
2228 similarity) | 2425 return internalpatch( |
2426 ui, repo, patchname, strip, prefix, files, eolmode, similarity | |
2427 ) | |
2428 | |
2229 | 2429 |
2230 def changedfiles(ui, repo, patchpath, strip=1, prefix=''): | 2430 def changedfiles(ui, repo, patchpath, strip=1, prefix=''): |
2231 backend = fsbackend(ui, repo.root) | 2431 backend = fsbackend(ui, repo.root) |
2232 prefix = _canonprefix(repo, prefix) | 2432 prefix = _canonprefix(repo, prefix) |
2233 with open(patchpath, 'rb') as fp: | 2433 with open(patchpath, 'rb') as fp: |
2236 if state == 'file': | 2436 if state == 'file': |
2237 afile, bfile, first_hunk, gp = values | 2437 afile, bfile, first_hunk, gp = values |
2238 if gp: | 2438 if gp: |
2239 gp.path = pathtransform(gp.path, strip - 1, prefix)[1] | 2439 gp.path = pathtransform(gp.path, strip - 1, prefix)[1] |
2240 if gp.oldpath: | 2440 if gp.oldpath: |
2241 gp.oldpath = pathtransform(gp.oldpath, strip - 1, | 2441 gp.oldpath = pathtransform( |
2242 prefix)[1] | 2442 gp.oldpath, strip - 1, prefix |
2443 )[1] | |
2243 else: | 2444 else: |
2244 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip, | 2445 gp = makepatchmeta( |
2245 prefix) | 2446 backend, afile, bfile, first_hunk, strip, prefix |
2447 ) | |
2246 changed.add(gp.path) | 2448 changed.add(gp.path) |
2247 if gp.op == 'RENAME': | 2449 if gp.op == 'RENAME': |
2248 changed.add(gp.oldpath) | 2450 changed.add(gp.oldpath) |
2249 elif state not in ('hunk', 'git'): | 2451 elif state not in ('hunk', 'git'): |
2250 raise error.Abort(_('unsupported parser state: %s') % state) | 2452 raise error.Abort(_('unsupported parser state: %s') % state) |
2251 return changed | 2453 return changed |
2252 | 2454 |
2455 | |
2253 class GitDiffRequired(Exception): | 2456 class GitDiffRequired(Exception): |
2254 pass | 2457 pass |
2458 | |
2255 | 2459 |
2256 diffopts = diffutil.diffallopts | 2460 diffopts = diffutil.diffallopts |
2257 diffallopts = diffutil.diffallopts | 2461 diffallopts = diffutil.diffallopts |
2258 difffeatureopts = diffutil.difffeatureopts | 2462 difffeatureopts = diffutil.difffeatureopts |
2259 | 2463 |
2260 def diff(repo, node1=None, node2=None, match=None, changes=None, | 2464 |
2261 opts=None, losedatafn=None, pathfn=None, copy=None, | 2465 def diff( |
2262 copysourcematch=None, hunksfilterfn=None): | 2466 repo, |
2467 node1=None, | |
2468 node2=None, | |
2469 match=None, | |
2470 changes=None, | |
2471 opts=None, | |
2472 losedatafn=None, | |
2473 pathfn=None, | |
2474 copy=None, | |
2475 copysourcematch=None, | |
2476 hunksfilterfn=None, | |
2477 ): | |
2263 '''yields diff of changes to files between two nodes, or node and | 2478 '''yields diff of changes to files between two nodes, or node and |
2264 working directory. | 2479 working directory. |
2265 | 2480 |
2266 if node1 is None, use first dirstate parent instead. | 2481 if node1 is None, use first dirstate parent instead. |
2267 if node2 is None, compare node1 with working directory. | 2482 if node2 is None, compare node1 with working directory. |
2294 | 2509 |
2295 ctx1 = repo[node1] | 2510 ctx1 = repo[node1] |
2296 ctx2 = repo[node2] | 2511 ctx2 = repo[node2] |
2297 | 2512 |
2298 for fctx1, fctx2, hdr, hunks in diffhunks( | 2513 for fctx1, fctx2, hdr, hunks in diffhunks( |
2299 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts, | 2514 repo, |
2300 losedatafn=losedatafn, pathfn=pathfn, copy=copy, | 2515 ctx1=ctx1, |
2301 copysourcematch=copysourcematch): | 2516 ctx2=ctx2, |
2517 match=match, | |
2518 changes=changes, | |
2519 opts=opts, | |
2520 losedatafn=losedatafn, | |
2521 pathfn=pathfn, | |
2522 copy=copy, | |
2523 copysourcematch=copysourcematch, | |
2524 ): | |
2302 if hunksfilterfn is not None: | 2525 if hunksfilterfn is not None: |
2303 # If the file has been removed, fctx2 is None; but this should | 2526 # If the file has been removed, fctx2 is None; but this should |
2304 # not occur here since we catch removed files early in | 2527 # not occur here since we catch removed files early in |
2305 # logcmdutil.getlinerangerevs() for 'hg log -L'. | 2528 # logcmdutil.getlinerangerevs() for 'hg log -L'. |
2306 assert fctx2 is not None, ( | 2529 assert ( |
2307 'fctx2 unexpectly None in diff hunks filtering') | 2530 fctx2 is not None |
2531 ), 'fctx2 unexpectly None in diff hunks filtering' | |
2308 hunks = hunksfilterfn(fctx2, hunks) | 2532 hunks = hunksfilterfn(fctx2, hunks) |
2309 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), [])) | 2533 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), [])) |
2310 if hdr and (text or len(hdr) > 1): | 2534 if hdr and (text or len(hdr) > 1): |
2311 yield '\n'.join(hdr) + '\n' | 2535 yield '\n'.join(hdr) + '\n' |
2312 if text: | 2536 if text: |
2313 yield text | 2537 yield text |
2314 | 2538 |
2315 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None, | 2539 |
2316 losedatafn=None, pathfn=None, copy=None, copysourcematch=None): | 2540 def diffhunks( |
2541 repo, | |
2542 ctx1, | |
2543 ctx2, | |
2544 match=None, | |
2545 changes=None, | |
2546 opts=None, | |
2547 losedatafn=None, | |
2548 pathfn=None, | |
2549 copy=None, | |
2550 copysourcematch=None, | |
2551 ): | |
2317 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples | 2552 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples |
2318 where `header` is a list of diff headers and `hunks` is an iterable of | 2553 where `header` is a list of diff headers and `hunks` is an iterable of |
2319 (`hunkrange`, `hunklines`) tuples. | 2554 (`hunkrange`, `hunklines`) tuples. |
2320 | 2555 |
2321 See diff() for the meaning of parameters. | 2556 See diff() for the meaning of parameters. |
2325 opts = mdiff.defaultopts | 2560 opts = mdiff.defaultopts |
2326 | 2561 |
2327 def lrugetfilectx(): | 2562 def lrugetfilectx(): |
2328 cache = {} | 2563 cache = {} |
2329 order = collections.deque() | 2564 order = collections.deque() |
2565 | |
2330 def getfilectx(f, ctx): | 2566 def getfilectx(f, ctx): |
2331 fctx = ctx.filectx(f, filelog=cache.get(f)) | 2567 fctx = ctx.filectx(f, filelog=cache.get(f)) |
2332 if f not in cache: | 2568 if f not in cache: |
2333 if len(cache) > 20: | 2569 if len(cache) > 20: |
2334 del cache[order.popleft()] | 2570 del cache[order.popleft()] |
2335 cache[f] = fctx.filelog() | 2571 cache[f] = fctx.filelog() |
2336 else: | 2572 else: |
2337 order.remove(f) | 2573 order.remove(f) |
2338 order.append(f) | 2574 order.append(f) |
2339 return fctx | 2575 return fctx |
2576 | |
2340 return getfilectx | 2577 return getfilectx |
2578 | |
2341 getfilectx = lrugetfilectx() | 2579 getfilectx = lrugetfilectx() |
2342 | 2580 |
2343 if not changes: | 2581 if not changes: |
2344 changes = ctx1.status(ctx2, match=match) | 2582 changes = ctx1.status(ctx2, match=match) |
2345 modified, added, removed = changes[:3] | 2583 modified, added, removed = changes[:3] |
2359 copy = copies.pathcopies(ctx1, ctx2, match=match) | 2597 copy = copies.pathcopies(ctx1, ctx2, match=match) |
2360 | 2598 |
2361 if copysourcematch: | 2599 if copysourcematch: |
2362 # filter out copies where source side isn't inside the matcher | 2600 # filter out copies where source side isn't inside the matcher |
2363 # (copies.pathcopies() already filtered out the destination) | 2601 # (copies.pathcopies() already filtered out the destination) |
2364 copy = {dst: src for dst, src in copy.iteritems() | 2602 copy = { |
2365 if copysourcematch(src)} | 2603 dst: src for dst, src in copy.iteritems() if copysourcematch(src) |
2604 } | |
2366 | 2605 |
2367 modifiedset = set(modified) | 2606 modifiedset = set(modified) |
2368 addedset = set(added) | 2607 addedset = set(added) |
2369 removedset = set(removed) | 2608 removedset = set(removed) |
2370 for f in modified: | 2609 for f in modified: |
2386 # Files merged in during a merge and then copied/renamed are | 2625 # Files merged in during a merge and then copied/renamed are |
2387 # reported as copies. We want to show them in the diff as additions. | 2626 # reported as copies. We want to show them in the diff as additions. |
2388 del copy[dst] | 2627 del copy[dst] |
2389 | 2628 |
2390 prefetchmatch = scmutil.matchfiles( | 2629 prefetchmatch = scmutil.matchfiles( |
2391 repo, list(modifiedset | addedset | removedset)) | 2630 repo, list(modifiedset | addedset | removedset) |
2631 ) | |
2392 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch) | 2632 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch) |
2393 | 2633 |
2394 def difffn(opts, losedata): | 2634 def difffn(opts, losedata): |
2395 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed, | 2635 return trydiff( |
2396 copy, getfilectx, opts, losedata, pathfn) | 2636 repo, |
2637 revs, | |
2638 ctx1, | |
2639 ctx2, | |
2640 modified, | |
2641 added, | |
2642 removed, | |
2643 copy, | |
2644 getfilectx, | |
2645 opts, | |
2646 losedata, | |
2647 pathfn, | |
2648 ) | |
2649 | |
2397 if opts.upgrade and not opts.git: | 2650 if opts.upgrade and not opts.git: |
2398 try: | 2651 try: |
2652 | |
2399 def losedata(fn): | 2653 def losedata(fn): |
2400 if not losedatafn or not losedatafn(fn=fn): | 2654 if not losedatafn or not losedatafn(fn=fn): |
2401 raise GitDiffRequired | 2655 raise GitDiffRequired |
2656 | |
2402 # Buffer the whole output until we are sure it can be generated | 2657 # Buffer the whole output until we are sure it can be generated |
2403 return list(difffn(opts.copy(git=False), losedata)) | 2658 return list(difffn(opts.copy(git=False), losedata)) |
2404 except GitDiffRequired: | 2659 except GitDiffRequired: |
2405 return difffn(opts.copy(git=True), None) | 2660 return difffn(opts.copy(git=True), None) |
2406 else: | 2661 else: |
2407 return difffn(opts, None) | 2662 return difffn(opts, None) |
2663 | |
2408 | 2664 |
2409 def diffsinglehunk(hunklines): | 2665 def diffsinglehunk(hunklines): |
2410 """yield tokens for a list of lines in a single hunk""" | 2666 """yield tokens for a list of lines in a single hunk""" |
2411 for line in hunklines: | 2667 for line in hunklines: |
2412 # chomp | 2668 # chomp |
2424 yield (token, 'diff.tab') | 2680 yield (token, 'diff.tab') |
2425 else: | 2681 else: |
2426 yield (token, label) | 2682 yield (token, label) |
2427 | 2683 |
2428 if chompline != stripline: | 2684 if chompline != stripline: |
2429 yield (chompline[len(stripline):], 'diff.trailingwhitespace') | 2685 yield (chompline[len(stripline) :], 'diff.trailingwhitespace') |
2430 if chompline != line: | 2686 if chompline != line: |
2431 yield (line[len(chompline):], '') | 2687 yield (line[len(chompline) :], '') |
2688 | |
2432 | 2689 |
2433 def diffsinglehunkinline(hunklines): | 2690 def diffsinglehunkinline(hunklines): |
2434 """yield tokens for a list of lines in a single hunk, with inline colors""" | 2691 """yield tokens for a list of lines in a single hunk, with inline colors""" |
2435 # prepare deleted, and inserted content | 2692 # prepare deleted, and inserted content |
2436 a = '' | 2693 a = '' |
2465 atokens.append((changed, token)) | 2722 atokens.append((changed, token)) |
2466 for token in mdiff.splitnewlines(''.join(bl[b1:b2])): | 2723 for token in mdiff.splitnewlines(''.join(bl[b1:b2])): |
2467 btokens.append((changed, token)) | 2724 btokens.append((changed, token)) |
2468 | 2725 |
2469 # yield deleted tokens, then inserted ones | 2726 # yield deleted tokens, then inserted ones |
2470 for prefix, label, tokens in [('-', 'diff.deleted', atokens), | 2727 for prefix, label, tokens in [ |
2471 ('+', 'diff.inserted', btokens)]: | 2728 ('-', 'diff.deleted', atokens), |
2729 ('+', 'diff.inserted', btokens), | |
2730 ]: | |
2472 nextisnewline = True | 2731 nextisnewline = True |
2473 for changed, token in tokens: | 2732 for changed, token in tokens: |
2474 if nextisnewline: | 2733 if nextisnewline: |
2475 yield (prefix, label) | 2734 yield (prefix, label) |
2476 nextisnewline = False | 2735 nextisnewline = False |
2477 # special handling line end | 2736 # special handling line end |
2478 isendofline = token.endswith('\n') | 2737 isendofline = token.endswith('\n') |
2479 if isendofline: | 2738 if isendofline: |
2480 chomp = token[:-1] # chomp | 2739 chomp = token[:-1] # chomp |
2481 if chomp.endswith('\r'): | 2740 if chomp.endswith('\r'): |
2482 chomp = chomp[:-1] | 2741 chomp = chomp[:-1] |
2483 endofline = token[len(chomp):] | 2742 endofline = token[len(chomp) :] |
2484 token = chomp.rstrip() # detect spaces at the end | 2743 token = chomp.rstrip() # detect spaces at the end |
2485 endspaces = chomp[len(token):] | 2744 endspaces = chomp[len(token) :] |
2486 # scan tabs | 2745 # scan tabs |
2487 for maybetab in tabsplitter.findall(token): | 2746 for maybetab in tabsplitter.findall(token): |
2488 if b'\t' == maybetab[0:1]: | 2747 if b'\t' == maybetab[0:1]: |
2489 currentlabel = 'diff.tab' | 2748 currentlabel = 'diff.tab' |
2490 else: | 2749 else: |
2497 if endspaces: | 2756 if endspaces: |
2498 yield (endspaces, 'diff.trailingwhitespace') | 2757 yield (endspaces, 'diff.trailingwhitespace') |
2499 yield (endofline, '') | 2758 yield (endofline, '') |
2500 nextisnewline = True | 2759 nextisnewline = True |
2501 | 2760 |
2761 | |
2502 def difflabel(func, *args, **kw): | 2762 def difflabel(func, *args, **kw): |
2503 '''yields 2-tuples of (output, label) based on the output of func()''' | 2763 '''yields 2-tuples of (output, label) based on the output of func()''' |
2504 if kw.get(r'opts') and kw[r'opts'].worddiff: | 2764 if kw.get(r'opts') and kw[r'opts'].worddiff: |
2505 dodiffhunk = diffsinglehunkinline | 2765 dodiffhunk = diffsinglehunkinline |
2506 else: | 2766 else: |
2507 dodiffhunk = diffsinglehunk | 2767 dodiffhunk = diffsinglehunk |
2508 headprefixes = [('diff', 'diff.diffline'), | 2768 headprefixes = [ |
2509 ('copy', 'diff.extended'), | 2769 ('diff', 'diff.diffline'), |
2510 ('rename', 'diff.extended'), | 2770 ('copy', 'diff.extended'), |
2511 ('old', 'diff.extended'), | 2771 ('rename', 'diff.extended'), |
2512 ('new', 'diff.extended'), | 2772 ('old', 'diff.extended'), |
2513 ('deleted', 'diff.extended'), | 2773 ('new', 'diff.extended'), |
2514 ('index', 'diff.extended'), | 2774 ('deleted', 'diff.extended'), |
2515 ('similarity', 'diff.extended'), | 2775 ('index', 'diff.extended'), |
2516 ('---', 'diff.file_a'), | 2776 ('similarity', 'diff.extended'), |
2517 ('+++', 'diff.file_b')] | 2777 ('---', 'diff.file_a'), |
2518 textprefixes = [('@', 'diff.hunk'), | 2778 ('+++', 'diff.file_b'), |
2519 # - and + are handled by diffsinglehunk | 2779 ] |
2520 ] | 2780 textprefixes = [ |
2781 ('@', 'diff.hunk'), | |
2782 # - and + are handled by diffsinglehunk | |
2783 ] | |
2521 head = False | 2784 head = False |
2522 | 2785 |
2523 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes. | 2786 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes. |
2524 hunkbuffer = [] | 2787 hunkbuffer = [] |
2788 | |
2525 def consumehunkbuffer(): | 2789 def consumehunkbuffer(): |
2526 if hunkbuffer: | 2790 if hunkbuffer: |
2527 for token in dodiffhunk(hunkbuffer): | 2791 for token in dodiffhunk(hunkbuffer): |
2528 yield token | 2792 yield token |
2529 hunkbuffer[:] = [] | 2793 hunkbuffer[:] = [] |
2558 stripline = line.rstrip() | 2822 stripline = line.rstrip() |
2559 for prefix, label in prefixes: | 2823 for prefix, label in prefixes: |
2560 if stripline.startswith(prefix): | 2824 if stripline.startswith(prefix): |
2561 yield (stripline, label) | 2825 yield (stripline, label) |
2562 if line != stripline: | 2826 if line != stripline: |
2563 yield (line[len(stripline):], | 2827 yield ( |
2564 'diff.trailingwhitespace') | 2828 line[len(stripline) :], |
2829 'diff.trailingwhitespace', | |
2830 ) | |
2565 break | 2831 break |
2566 else: | 2832 else: |
2567 yield (line, '') | 2833 yield (line, '') |
2568 if i + 1 < linecount: | 2834 if i + 1 < linecount: |
2569 yield ('\n', '') | 2835 yield ('\n', '') |
2570 for token in consumehunkbuffer(): | 2836 for token in consumehunkbuffer(): |
2571 yield token | 2837 yield token |
2572 | 2838 |
2839 | |
2573 def diffui(*args, **kw): | 2840 def diffui(*args, **kw): |
2574 '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' | 2841 '''like diff(), but yields 2-tuples of (output, label) for ui.write()''' |
2575 return difflabel(diff, *args, **kw) | 2842 return difflabel(diff, *args, **kw) |
2843 | |
2576 | 2844 |
2577 def _filepairs(modified, added, removed, copy, opts): | 2845 def _filepairs(modified, added, removed, copy, opts): |
2578 '''generates tuples (f1, f2, copyop), where f1 is the name of the file | 2846 '''generates tuples (f1, f2, copyop), where f1 is the name of the file |
2579 before and f2 is the the name after. For added files, f1 will be None, | 2847 before and f2 is the the name after. For added files, f1 will be None, |
2580 and for removed files, f2 will be None. copyop may be set to None, 'copy' | 2848 and for removed files, f2 will be None. copyop may be set to None, 'copy' |
2600 copyop = 'copy' | 2868 copyop = 'copy' |
2601 elif f in removedset: | 2869 elif f in removedset: |
2602 f2 = None | 2870 f2 = None |
2603 if opts.git: | 2871 if opts.git: |
2604 # have we already reported a copy above? | 2872 # have we already reported a copy above? |
2605 if (f in copyto and copyto[f] in addedset | 2873 if ( |
2606 and copy[copyto[f]] == f): | 2874 f in copyto |
2875 and copyto[f] in addedset | |
2876 and copy[copyto[f]] == f | |
2877 ): | |
2607 continue | 2878 continue |
2608 yield f1, f2, copyop | 2879 yield f1, f2, copyop |
2609 | 2880 |
2610 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed, | 2881 |
2611 copy, getfilectx, opts, losedatafn, pathfn): | 2882 def trydiff( |
2883 repo, | |
2884 revs, | |
2885 ctx1, | |
2886 ctx2, | |
2887 modified, | |
2888 added, | |
2889 removed, | |
2890 copy, | |
2891 getfilectx, | |
2892 opts, | |
2893 losedatafn, | |
2894 pathfn, | |
2895 ): | |
2612 '''given input data, generate a diff and yield it in blocks | 2896 '''given input data, generate a diff and yield it in blocks |
2613 | 2897 |
2614 If generating a diff would lose data like flags or binary data and | 2898 If generating a diff would lose data like flags or binary data and |
2615 losedatafn is not None, it will be called. | 2899 losedatafn is not None, it will be called. |
2616 | 2900 |
2666 binary = False | 2950 binary = False |
2667 else: | 2951 else: |
2668 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None) | 2952 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None) |
2669 | 2953 |
2670 if losedatafn and not opts.git: | 2954 if losedatafn and not opts.git: |
2671 if (binary or | 2955 if ( |
2956 binary | |
2957 or | |
2672 # copy/rename | 2958 # copy/rename |
2673 f2 in copy or | 2959 f2 in copy |
2960 or | |
2674 # empty file creation | 2961 # empty file creation |
2675 (not f1 and isempty(fctx2)) or | 2962 (not f1 and isempty(fctx2)) |
2963 or | |
2676 # empty file deletion | 2964 # empty file deletion |
2677 (isempty(fctx1) and not f2) or | 2965 (isempty(fctx1) and not f2) |
2966 or | |
2678 # create with flags | 2967 # create with flags |
2679 (not f1 and flag2) or | 2968 (not f1 and flag2) |
2969 or | |
2680 # change flags | 2970 # change flags |
2681 (f1 and f2 and flag1 != flag2)): | 2971 (f1 and f2 and flag1 != flag2) |
2972 ): | |
2682 losedatafn(f2 or f1) | 2973 losedatafn(f2 or f1) |
2683 | 2974 |
2684 path1 = pathfn(f1 or f2) | 2975 path1 = pathfn(f1 or f2) |
2685 path2 = pathfn(f2 or f1) | 2976 path2 = pathfn(f2 or f1) |
2686 header = [] | 2977 header = [] |
2687 if opts.git: | 2978 if opts.git: |
2688 header.append('diff --git %s%s %s%s' % | 2979 header.append( |
2689 (aprefix, path1, bprefix, path2)) | 2980 'diff --git %s%s %s%s' % (aprefix, path1, bprefix, path2) |
2690 if not f1: # added | 2981 ) |
2982 if not f1: # added | |
2691 header.append('new file mode %s' % gitmode[flag2]) | 2983 header.append('new file mode %s' % gitmode[flag2]) |
2692 elif not f2: # removed | 2984 elif not f2: # removed |
2693 header.append('deleted file mode %s' % gitmode[flag1]) | 2985 header.append('deleted file mode %s' % gitmode[flag1]) |
2694 else: # modified/copied/renamed | 2986 else: # modified/copied/renamed |
2695 mode1, mode2 = gitmode[flag1], gitmode[flag2] | 2987 mode1, mode2 = gitmode[flag1], gitmode[flag2] |
2696 if mode1 != mode2: | 2988 if mode1 != mode2: |
2697 header.append('old mode %s' % mode1) | 2989 header.append('old mode %s' % mode1) |
2714 # yes | no yes yes 0 | summary | no | 3006 # yes | no yes yes 0 | summary | no |
2715 # yes | no yes yes >0 | summary | semi [1] | 3007 # yes | no yes yes >0 | summary | semi [1] |
2716 # yes | yes * * * | text diff | yes | 3008 # yes | yes * * * | text diff | yes |
2717 # no | * * * * | text diff | yes | 3009 # no | * * * * | text diff | yes |
2718 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked | 3010 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked |
2719 if binary and (not opts.git or (opts.git and opts.nobinary and not | 3011 if binary and ( |
2720 opts.index)): | 3012 not opts.git or (opts.git and opts.nobinary and not opts.index) |
3013 ): | |
2721 # fast path: no binary content will be displayed, content1 and | 3014 # fast path: no binary content will be displayed, content1 and |
2722 # content2 are only used for equivalent test. cmp() could have a | 3015 # content2 are only used for equivalent test. cmp() could have a |
2723 # fast path. | 3016 # fast path. |
2724 if fctx1 is not None: | 3017 if fctx1 is not None: |
2725 content1 = b'\0' | 3018 content1 = b'\0' |
2726 if fctx2 is not None: | 3019 if fctx2 is not None: |
2727 if fctx1 is not None and not fctx1.cmp(fctx2): | 3020 if fctx1 is not None and not fctx1.cmp(fctx2): |
2728 content2 = b'\0' # not different | 3021 content2 = b'\0' # not different |
2729 else: | 3022 else: |
2730 content2 = b'\0\0' | 3023 content2 = b'\0\0' |
2731 else: | 3024 else: |
2732 # normal path: load contents | 3025 # normal path: load contents |
2733 if fctx1 is not None: | 3026 if fctx1 is not None: |
2736 content2 = fctx2.data() | 3029 content2 = fctx2.data() |
2737 | 3030 |
2738 if binary and opts.git and not opts.nobinary: | 3031 if binary and opts.git and not opts.nobinary: |
2739 text = mdiff.b85diff(content1, content2) | 3032 text = mdiff.b85diff(content1, content2) |
2740 if text: | 3033 if text: |
2741 header.append('index %s..%s' % | 3034 header.append( |
2742 (gitindex(content1), gitindex(content2))) | 3035 'index %s..%s' % (gitindex(content1), gitindex(content2)) |
2743 hunks = (None, [text]), | 3036 ) |
3037 hunks = ((None, [text]),) | |
2744 else: | 3038 else: |
2745 if opts.git and opts.index > 0: | 3039 if opts.git and opts.index > 0: |
2746 flag = flag1 | 3040 flag = flag1 |
2747 if flag is None: | 3041 if flag is None: |
2748 flag = flag2 | 3042 flag = flag2 |
2749 header.append('index %s..%s %s' % | 3043 header.append( |
2750 (gitindex(content1)[0:opts.index], | 3044 'index %s..%s %s' |
2751 gitindex(content2)[0:opts.index], | 3045 % ( |
2752 gitmode[flag])) | 3046 gitindex(content1)[0 : opts.index], |
2753 | 3047 gitindex(content2)[0 : opts.index], |
2754 uheaders, hunks = mdiff.unidiff(content1, date1, | 3048 gitmode[flag], |
2755 content2, date2, | 3049 ) |
2756 path1, path2, | 3050 ) |
2757 binary=binary, opts=opts) | 3051 |
3052 uheaders, hunks = mdiff.unidiff( | |
3053 content1, | |
3054 date1, | |
3055 content2, | |
3056 date2, | |
3057 path1, | |
3058 path2, | |
3059 binary=binary, | |
3060 opts=opts, | |
3061 ) | |
2758 header.extend(uheaders) | 3062 header.extend(uheaders) |
2759 yield fctx1, fctx2, header, hunks | 3063 yield fctx1, fctx2, header, hunks |
3064 | |
2760 | 3065 |
2761 def diffstatsum(stats): | 3066 def diffstatsum(stats): |
2762 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False | 3067 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False |
2763 for f, a, r, b in stats: | 3068 for f, a, r, b in stats: |
2764 maxfile = max(maxfile, encoding.colwidth(f)) | 3069 maxfile = max(maxfile, encoding.colwidth(f)) |
2766 addtotal += a | 3071 addtotal += a |
2767 removetotal += r | 3072 removetotal += r |
2768 binary = binary or b | 3073 binary = binary or b |
2769 | 3074 |
2770 return maxfile, maxtotal, addtotal, removetotal, binary | 3075 return maxfile, maxtotal, addtotal, removetotal, binary |
3076 | |
2771 | 3077 |
2772 def diffstatdata(lines): | 3078 def diffstatdata(lines): |
2773 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$') | 3079 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$') |
2774 | 3080 |
2775 results = [] | 3081 results = [] |
2800 inheader = False | 3106 inheader = False |
2801 elif line.startswith('+') and not inheader: | 3107 elif line.startswith('+') and not inheader: |
2802 adds += 1 | 3108 adds += 1 |
2803 elif line.startswith('-') and not inheader: | 3109 elif line.startswith('-') and not inheader: |
2804 removes += 1 | 3110 removes += 1 |
2805 elif (line.startswith('GIT binary patch') or | 3111 elif line.startswith('GIT binary patch') or line.startswith( |
2806 line.startswith('Binary file')): | 3112 'Binary file' |
3113 ): | |
2807 isbinary = True | 3114 isbinary = True |
2808 elif line.startswith('rename from'): | 3115 elif line.startswith('rename from'): |
2809 filename = line[12:] | 3116 filename = line[12:] |
2810 elif line.startswith('rename to'): | 3117 elif line.startswith('rename to'): |
2811 filename += ' => %s' % line[10:] | 3118 filename += ' => %s' % line[10:] |
2812 addresult() | 3119 addresult() |
2813 return results | 3120 return results |
2814 | 3121 |
3122 | |
2815 def diffstat(lines, width=80): | 3123 def diffstat(lines, width=80): |
2816 output = [] | 3124 output = [] |
2817 stats = diffstatdata(lines) | 3125 stats = diffstatdata(lines) |
2818 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats) | 3126 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats) |
2819 | 3127 |
2837 count = 'Bin' | 3145 count = 'Bin' |
2838 else: | 3146 else: |
2839 count = '%d' % (adds + removes) | 3147 count = '%d' % (adds + removes) |
2840 pluses = '+' * scale(adds) | 3148 pluses = '+' * scale(adds) |
2841 minuses = '-' * scale(removes) | 3149 minuses = '-' * scale(removes) |
2842 output.append(' %s%s | %*s %s%s\n' % | 3150 output.append( |
2843 (filename, ' ' * (maxname - encoding.colwidth(filename)), | 3151 ' %s%s | %*s %s%s\n' |
2844 countwidth, count, pluses, minuses)) | 3152 % ( |
3153 filename, | |
3154 ' ' * (maxname - encoding.colwidth(filename)), | |
3155 countwidth, | |
3156 count, | |
3157 pluses, | |
3158 minuses, | |
3159 ) | |
3160 ) | |
2845 | 3161 |
2846 if stats: | 3162 if stats: |
2847 output.append(_(' %d files changed, %d insertions(+), ' | 3163 output.append( |
2848 '%d deletions(-)\n') | 3164 _(' %d files changed, %d insertions(+), ' '%d deletions(-)\n') |
2849 % (len(stats), totaladds, totalremoves)) | 3165 % (len(stats), totaladds, totalremoves) |
3166 ) | |
2850 | 3167 |
2851 return ''.join(output) | 3168 return ''.join(output) |
3169 | |
2852 | 3170 |
2853 def diffstatui(*args, **kw): | 3171 def diffstatui(*args, **kw): |
2854 '''like diffstat(), but yields 2-tuples of (output, label) for | 3172 '''like diffstat(), but yields 2-tuples of (output, label) for |
2855 ui.write() | 3173 ui.write() |
2856 ''' | 3174 ''' |