60 if pycompat.iswindows: |
72 if pycompat.iswindows: |
61 from . import scmwindows as scmplatform |
73 from . import scmwindows as scmplatform |
62 else: |
74 else: |
63 from . import scmposix as scmplatform |
75 from . import scmposix as scmplatform |
64 |
76 |
|
77 if typing.TYPE_CHECKING: |
|
78 from . import ( |
|
79 ui as uimod, |
|
80 ) |
|
81 |
65 parsers = policy.importmod('parsers') |
82 parsers = policy.importmod('parsers') |
66 rustrevlog = policy.importrust('revlog') |
83 rustrevlog = policy.importrust('revlog') |
67 |
84 |
68 termsize = scmplatform.termsize |
85 termsize = scmplatform.termsize |
69 |
86 |
74 |
91 |
75 The 'deleted', 'unknown' and 'ignored' properties are only |
92 The 'deleted', 'unknown' and 'ignored' properties are only |
76 relevant to the working copy. |
93 relevant to the working copy. |
77 """ |
94 """ |
78 |
95 |
79 modified = attr.ib(default=attr.Factory(list)) |
96 modified = attr.ib(default=attr.Factory(list), type=List[bytes]) |
80 added = attr.ib(default=attr.Factory(list)) |
97 added = attr.ib(default=attr.Factory(list), type=List[bytes]) |
81 removed = attr.ib(default=attr.Factory(list)) |
98 removed = attr.ib(default=attr.Factory(list), type=List[bytes]) |
82 deleted = attr.ib(default=attr.Factory(list)) |
99 deleted = attr.ib(default=attr.Factory(list), type=List[bytes]) |
83 unknown = attr.ib(default=attr.Factory(list)) |
100 unknown = attr.ib(default=attr.Factory(list), type=List[bytes]) |
84 ignored = attr.ib(default=attr.Factory(list)) |
101 ignored = attr.ib(default=attr.Factory(list), type=List[bytes]) |
85 clean = attr.ib(default=attr.Factory(list)) |
102 clean = attr.ib(default=attr.Factory(list), type=List[bytes]) |
86 |
103 |
87 def __iter__(self): |
104 def __iter__(self) -> Iterator[List[bytes]]: |
88 yield self.modified |
105 yield self.modified |
89 yield self.added |
106 yield self.added |
90 yield self.removed |
107 yield self.removed |
91 yield self.deleted |
108 yield self.deleted |
92 yield self.unknown |
109 yield self.unknown |
93 yield self.ignored |
110 yield self.ignored |
94 yield self.clean |
111 yield self.clean |
95 |
112 |
96 def __repr__(self): |
113 def __repr__(self) -> str: |
97 return ( |
114 return ( |
98 r'<status modified=%s, added=%s, removed=%s, deleted=%s, ' |
115 r'<status modified=%s, added=%s, removed=%s, deleted=%s, ' |
99 r'unknown=%s, ignored=%s, clean=%s>' |
116 r'unknown=%s, ignored=%s, clean=%s>' |
100 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self) |
117 ) % tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self) |
101 |
118 |
124 # against itself. |
141 # against itself. |
125 for subpath in missing: |
142 for subpath in missing: |
126 yield subpath, ctx2.nullsub(subpath, ctx1) |
143 yield subpath, ctx2.nullsub(subpath, ctx1) |
127 |
144 |
128 |
145 |
129 def nochangesfound(ui, repo, excluded=None): |
146 def nochangesfound(ui: "uimod.ui", repo, excluded=None) -> None: |
130 """Report no changes for push/pull, excluded is None or a list of |
147 """Report no changes for push/pull, excluded is None or a list of |
131 nodes excluded from the push/pull. |
148 nodes excluded from the push/pull. |
132 """ |
149 """ |
133 secretlist = [] |
150 secretlist = [] |
134 if excluded: |
151 if excluded: |
144 ) |
161 ) |
145 else: |
162 else: |
146 ui.status(_(b"no changes found\n")) |
163 ui.status(_(b"no changes found\n")) |
147 |
164 |
148 |
165 |
149 def callcatch(ui, func): |
166 def callcatch(ui: "uimod.ui", func: Callable[[], int]) -> int: |
150 """call func() with global exception handling |
167 """call func() with global exception handling |
151 |
168 |
152 return func() if no exception happens. otherwise do some error handling |
169 return func() if no exception happens. otherwise do some error handling |
153 and return an exit code accordingly. does not handle all exceptions. |
170 and return an exit code accordingly. does not handle all exceptions. |
154 """ |
171 """ |
266 return detailed_exit_code |
283 return detailed_exit_code |
267 else: |
284 else: |
268 return coarse_exit_code |
285 return coarse_exit_code |
269 |
286 |
270 |
287 |
271 def checknewlabel(repo, lbl, kind): |
288 def checknewlabel(repo, lbl: bytes, kind) -> None: |
272 # Do not use the "kind" parameter in ui output. |
289 # Do not use the "kind" parameter in ui output. |
273 # It makes strings difficult to translate. |
290 # It makes strings difficult to translate. |
274 if lbl in [b'tip', b'.', b'null']: |
291 if lbl in [b'tip', b'.', b'null']: |
275 raise error.InputError(_(b"the name '%s' is reserved") % lbl) |
292 raise error.InputError(_(b"the name '%s' is reserved") % lbl) |
276 for c in (b':', b'\0', b'\n', b'\r'): |
293 for c in (b':', b'\0', b'\n', b'\r'): |
292 raise error.InputError( |
309 raise error.InputError( |
293 _(b"leading or trailing whitespace in name %r") % lbl |
310 _(b"leading or trailing whitespace in name %r") % lbl |
294 ) |
311 ) |
295 |
312 |
296 |
313 |
297 def checkfilename(f): |
314 def checkfilename(f: bytes) -> None: |
298 '''Check that the filename f is an acceptable filename for a tracked file''' |
315 '''Check that the filename f is an acceptable filename for a tracked file''' |
299 if b'\r' in f or b'\n' in f: |
316 if b'\r' in f or b'\n' in f: |
300 raise error.InputError( |
317 raise error.InputError( |
301 _(b"'\\n' and '\\r' disallowed in filenames: %r") |
318 _(b"'\\n' and '\\r' disallowed in filenames: %r") |
302 % pycompat.bytestr(f) |
319 % pycompat.bytestr(f) |
303 ) |
320 ) |
304 |
321 |
305 |
322 |
306 def checkportable(ui, f): |
323 def checkportable(ui: "uimod.ui", f: bytes) -> None: |
307 '''Check if filename f is portable and warn or abort depending on config''' |
324 '''Check if filename f is portable and warn or abort depending on config''' |
308 checkfilename(f) |
325 checkfilename(f) |
309 abort, warn = checkportabilityalert(ui) |
326 abort, warn = checkportabilityalert(ui) |
310 if abort or warn: |
327 if abort or warn: |
311 msg = util.checkwinfilename(f) |
328 msg = util.checkwinfilename(f) |
314 if abort: |
331 if abort: |
315 raise error.InputError(msg) |
332 raise error.InputError(msg) |
316 ui.warn(_(b"warning: %s\n") % msg) |
333 ui.warn(_(b"warning: %s\n") % msg) |
317 |
334 |
318 |
335 |
319 def checkportabilityalert(ui): |
336 def checkportabilityalert(ui: "uimod.ui") -> Tuple[bool, bool]: |
320 """check if the user's config requests nothing, a warning, or abort for |
337 """check if the user's config requests nothing, a warning, or abort for |
321 non-portable filenames""" |
338 non-portable filenames""" |
322 val = ui.config(b'ui', b'portablefilenames') |
339 val = ui.config(b'ui', b'portablefilenames') |
323 lval = val.lower() |
340 lval = val.lower() |
324 bval = stringutil.parsebool(val) |
341 bval = stringutil.parsebool(val) |
330 ) |
347 ) |
331 return abort, warn |
348 return abort, warn |
332 |
349 |
333 |
350 |
334 class casecollisionauditor: |
351 class casecollisionauditor: |
335 def __init__(self, ui, abort, dirstate): |
352 def __init__(self, ui: "uimod.ui", abort: bool, dirstate) -> None: |
336 self._ui = ui |
353 self._ui = ui |
337 self._abort = abort |
354 self._abort = abort |
338 allfiles = b'\0'.join(dirstate) |
355 allfiles = b'\0'.join(dirstate) |
339 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0')) |
356 self._loweredfiles = set(encoding.lower(allfiles).split(b'\0')) |
340 self._dirstate = dirstate |
357 self._dirstate = dirstate |
341 # The purpose of _newfiles is so that we don't complain about |
358 # The purpose of _newfiles is so that we don't complain about |
342 # case collisions if someone were to call this object with the |
359 # case collisions if someone were to call this object with the |
343 # same filename twice. |
360 # same filename twice. |
344 self._newfiles = set() |
361 self._newfiles = set() |
345 |
362 |
346 def __call__(self, f): |
363 def __call__(self, f: bytes) -> None: |
347 if f in self._newfiles: |
364 if f in self._newfiles: |
348 return |
365 return |
349 fl = encoding.lower(f) |
366 fl = encoding.lower(f) |
350 if fl in self._loweredfiles and f not in self._dirstate: |
367 if fl in self._loweredfiles and f not in self._dirstate: |
351 msg = _(b'possible case-folding collision for %s') % f |
368 msg = _(b'possible case-folding collision for %s') % f |
354 self._ui.warn(_(b"warning: %s\n") % msg) |
371 self._ui.warn(_(b"warning: %s\n") % msg) |
355 self._loweredfiles.add(fl) |
372 self._loweredfiles.add(fl) |
356 self._newfiles.add(f) |
373 self._newfiles.add(f) |
357 |
374 |
358 |
375 |
359 def combined_filtered_and_obsolete_hash(repo, maxrev, needobsolete=False): |
376 def combined_filtered_and_obsolete_hash( |
|
377 repo, maxrev, needobsolete: bool = False |
|
378 ): |
360 """build hash of filtered revisions in the current repoview. |
379 """build hash of filtered revisions in the current repoview. |
361 |
380 |
362 Multiple caches perform up-to-date validation by checking that the |
381 Multiple caches perform up-to-date validation by checking that the |
363 tiprev and tipnode stored in the cache file match the current repository. |
382 tiprev and tipnode stored in the cache file match the current repository. |
364 However, this is not sufficient for validating repoviews because the set |
383 However, this is not sufficient for validating repoviews because the set |
435 filtered_set = set(r for r in filtered_set if r <= max_rev) |
454 filtered_set = set(r for r in filtered_set if r <= max_rev) |
436 obs_set = set(r for r in obs_set if r <= max_rev) |
455 obs_set = set(r for r in obs_set if r <= max_rev) |
437 return (filtered_set, obs_set) |
456 return (filtered_set, obs_set) |
438 |
457 |
439 |
458 |
440 def _hash_revs(revs): |
459 def _hash_revs(revs: Iterable[int]) -> bytes: |
441 """return a hash from a list of revision numbers""" |
460 """return a hash from a list of revision numbers""" |
442 s = hashutil.sha1() |
461 s = hashutil.sha1() |
443 for rev in revs: |
462 for rev in revs: |
444 s.update(b'%d;' % rev) |
463 s.update(b'%d;' % rev) |
445 return s.digest() |
464 return s.digest() |
446 |
465 |
447 |
466 |
448 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
467 def walkrepos( |
|
468 path, |
|
469 followsym: bool = False, |
|
470 seen_dirs: Optional[List[bytes]] = None, |
|
471 recurse: bool = False, |
|
472 ) -> Iterable[bytes]: |
449 """yield every hg repository under path, always recursively. |
473 """yield every hg repository under path, always recursively. |
450 The recurse flag will only control recursion into repo working dirs""" |
474 The recurse flag will only control recursion into repo working dirs""" |
451 |
475 |
452 def errhandler(err): |
476 def errhandler(err): |
453 if err.filename == path: |
477 if err.filename == path: |
492 else: |
516 else: |
493 newdirs.append(d) |
517 newdirs.append(d) |
494 dirs[:] = newdirs |
518 dirs[:] = newdirs |
495 |
519 |
496 |
520 |
497 def binnode(ctx): |
521 def binnode(ctx) -> bytes: |
498 """Return binary node id for a given basectx""" |
522 """Return binary node id for a given basectx""" |
499 node = ctx.node() |
523 node = ctx.node() |
500 if node is None: |
524 if node is None: |
501 return ctx.repo().nodeconstants.wdirid |
525 return ctx.repo().nodeconstants.wdirid |
502 return node |
526 return node |
503 |
527 |
504 |
528 |
505 def intrev(ctx): |
529 def intrev(ctx) -> int: |
506 """Return integer for a given basectx that can be used in comparison or |
530 """Return integer for a given basectx that can be used in comparison or |
507 arithmetic operation""" |
531 arithmetic operation""" |
508 rev = ctx.rev() |
532 rev = ctx.rev() |
509 if rev is None: |
533 if rev is None: |
510 return wdirrev |
534 return wdirrev |
511 return rev |
535 return rev |
512 |
536 |
513 |
537 |
514 def formatchangeid(ctx): |
538 def formatchangeid(ctx) -> bytes: |
515 """Format changectx as '{rev}:{node|formatnode}', which is the default |
539 """Format changectx as '{rev}:{node|formatnode}', which is the default |
516 template provided by logcmdutil.changesettemplater""" |
540 template provided by logcmdutil.changesettemplater""" |
517 repo = ctx.repo() |
541 repo = ctx.repo() |
518 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) |
542 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) |
519 |
543 |
520 |
544 |
521 def formatrevnode(ui, rev, node): |
545 def formatrevnode(ui: "uimod.ui", rev: int, node: bytes) -> bytes: |
522 """Format given revision and node depending on the current verbosity""" |
546 """Format given revision and node depending on the current verbosity""" |
523 if ui.debugflag: |
547 if ui.debugflag: |
524 hexfunc = hex |
548 hexfunc = hex |
525 else: |
549 else: |
526 hexfunc = short |
550 hexfunc = short |
527 return b'%d:%s' % (rev, hexfunc(node)) |
551 return b'%d:%s' % (rev, hexfunc(node)) |
528 |
552 |
529 |
553 |
530 def resolvehexnodeidprefix(repo, prefix): |
554 def resolvehexnodeidprefix(repo, prefix: bytes): |
531 if prefix.startswith(b'x'): |
555 if prefix.startswith(b'x'): |
532 prefix = prefix[1:] |
556 prefix = prefix[1:] |
533 try: |
557 try: |
534 # Uses unfiltered repo because it's faster when prefix is ambiguous/ |
558 # Uses unfiltered repo because it's faster when prefix is ambiguous/ |
535 # This matches the shortesthexnodeidprefix() function below. |
559 # This matches the shortesthexnodeidprefix() function below. |
557 return |
581 return |
558 repo.changelog.rev(node) # make sure node isn't filtered |
582 repo.changelog.rev(node) # make sure node isn't filtered |
559 return node |
583 return node |
560 |
584 |
561 |
585 |
562 def mayberevnum(repo, prefix): |
586 def mayberevnum(repo, prefix: bytes) -> bool: |
563 """Checks if the given prefix may be mistaken for a revision number""" |
587 """Checks if the given prefix may be mistaken for a revision number""" |
564 try: |
588 try: |
565 i = int(prefix) |
589 i = int(prefix) |
566 # if we are a pure int, then starting with zero will not be |
590 # if we are a pure int, then starting with zero will not be |
567 # confused as a rev; or, obviously, if the int is larger |
591 # confused as a rev; or, obviously, if the int is larger |
572 return True |
596 return True |
573 except ValueError: |
597 except ValueError: |
574 return False |
598 return False |
575 |
599 |
576 |
600 |
577 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None): |
601 def shortesthexnodeidprefix(repo, node: bytes, minlength: int = 1, cache=None): |
578 """Find the shortest unambiguous prefix that matches hexnode. |
602 """Find the shortest unambiguous prefix that matches hexnode. |
579 |
603 |
580 If "cache" is not None, it must be a dictionary that can be used for |
604 If "cache" is not None, it must be a dictionary that can be used for |
581 caching between calls to this method. |
605 caching between calls to this method. |
582 """ |
606 """ |
584 # which would be unacceptably slow. so we look for hash collision in |
608 # which would be unacceptably slow. so we look for hash collision in |
585 # unfiltered space, which means some hashes may be slightly longer. |
609 # unfiltered space, which means some hashes may be slightly longer. |
586 |
610 |
587 minlength = max(minlength, 1) |
611 minlength = max(minlength, 1) |
588 |
612 |
589 def disambiguate(prefix): |
613 def disambiguate(prefix: bytes): |
590 """Disambiguate against revnums.""" |
614 """Disambiguate against revnums.""" |
591 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'): |
615 if repo.ui.configbool(b'experimental', b'revisions.prefixhexnode'): |
592 if mayberevnum(repo, prefix): |
616 if mayberevnum(repo, prefix): |
593 return b'x' + prefix |
617 return b'x' + prefix |
594 else: |
618 else: |
649 return disambiguate(cl.shortest(node, minlength)) |
673 return disambiguate(cl.shortest(node, minlength)) |
650 except error.LookupError: |
674 except error.LookupError: |
651 raise error.RepoLookupError() |
675 raise error.RepoLookupError() |
652 |
676 |
653 |
677 |
654 def isrevsymbol(repo, symbol): |
678 def isrevsymbol(repo, symbol: bytes) -> bool: |
655 """Checks if a symbol exists in the repo. |
679 """Checks if a symbol exists in the repo. |
656 |
680 |
657 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the |
681 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the |
658 symbol is an ambiguous nodeid prefix. |
682 symbol is an ambiguous nodeid prefix. |
659 """ |
683 """ |
662 return True |
686 return True |
663 except error.RepoLookupError: |
687 except error.RepoLookupError: |
664 return False |
688 return False |
665 |
689 |
666 |
690 |
667 def revsymbol(repo, symbol): |
691 def revsymbol(repo, symbol: bytes): |
668 """Returns a context given a single revision symbol (as string). |
692 """Returns a context given a single revision symbol (as string). |
669 |
693 |
670 This is similar to revsingle(), but accepts only a single revision symbol, |
694 This is similar to revsingle(), but accepts only a single revision symbol, |
671 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but |
695 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but |
672 not "max(public())". |
696 not "max(public())". |
729 error.FilteredRepoLookupError, |
753 error.FilteredRepoLookupError, |
730 ): |
754 ): |
731 raise _filterederror(repo, symbol) |
755 raise _filterederror(repo, symbol) |
732 |
756 |
733 |
757 |
734 def _filterederror(repo, changeid): |
758 def _filterederror(repo, changeid: bytes) -> error.FilteredRepoLookupError: |
735 """build an exception to be raised about a filtered changeid |
759 """build an exception to be raised about a filtered changeid |
736 |
760 |
737 This is extracted in a function to help extensions (eg: evolve) to |
761 This is extracted in a function to help extensions (eg: evolve) to |
738 experiment with various message variants.""" |
762 experiment with various message variants.""" |
739 if repo.filtername.startswith(b'visible'): |
763 if repo.filtername.startswith(b'visible'): |
764 if not l: |
788 if not l: |
765 raise error.InputError(_(b'empty revision set')) |
789 raise error.InputError(_(b'empty revision set')) |
766 return repo[l.last()] |
790 return repo[l.last()] |
767 |
791 |
768 |
792 |
769 def _pairspec(revspec): |
793 def _pairspec(revspec) -> bool: |
770 tree = revsetlang.parse(revspec) |
794 tree = revsetlang.parse(revspec) |
771 return tree and tree[0] in ( |
795 return tree and tree[0] in ( |
772 b'range', |
796 b'range', |
773 b'rangepre', |
797 b'rangepre', |
774 b'rangepost', |
798 b'rangepost', |
829 spec = revsetlang.formatspec(b'%d', spec) |
853 spec = revsetlang.formatspec(b'%d', spec) |
830 allspecs.append(spec) |
854 allspecs.append(spec) |
831 return repo.anyrevs(allspecs, user=True, localalias=localalias) |
855 return repo.anyrevs(allspecs, user=True, localalias=localalias) |
832 |
856 |
833 |
857 |
834 def increasingwindows(windowsize=8, sizelimit=512): |
858 def increasingwindows( |
|
859 windowsize: int = 8, sizelimit: int = 512 |
|
860 ) -> Iterable[int]: |
835 while True: |
861 while True: |
836 yield windowsize |
862 yield windowsize |
837 if windowsize < sizelimit: |
863 if windowsize < sizelimit: |
838 windowsize *= 2 |
864 windowsize *= 2 |
839 |
865 |
895 if parents[0].rev() >= intrev(ctx) - 1: |
921 if parents[0].rev() >= intrev(ctx) - 1: |
896 return [] |
922 return [] |
897 return parents |
923 return parents |
898 |
924 |
899 |
925 |
900 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None): |
926 def getuipathfn( |
|
927 repo, |
|
928 legacyrelativevalue: bool = False, |
|
929 forcerelativevalue: Optional[bool] = None, |
|
930 ) -> typelib.UiPathFn: |
901 """Return a function that produced paths for presenting to the user. |
931 """Return a function that produced paths for presenting to the user. |
902 |
932 |
903 The returned function takes a repo-relative path and produces a path |
933 The returned function takes a repo-relative path and produces a path |
904 that can be presented in the UI. |
934 that can be presented in the UI. |
905 |
935 |
935 return lambda f: f |
965 return lambda f: f |
936 else: |
966 else: |
937 return util.localpath |
967 return util.localpath |
938 |
968 |
939 |
969 |
940 def subdiruipathfn(subpath, uipathfn): |
970 def subdiruipathfn( |
|
971 subpath: bytes, uipathfn: typelib.UiPathFn |
|
972 ) -> typelib.UiPathFn: |
941 '''Create a new uipathfn that treats the file as relative to subpath.''' |
973 '''Create a new uipathfn that treats the file as relative to subpath.''' |
942 return lambda f: uipathfn(posixpath.join(subpath, f)) |
974 return lambda f: uipathfn(posixpath.join(subpath, f)) |
943 |
975 |
944 |
976 |
945 def anypats(pats, opts): |
977 def anypats(pats, opts) -> bool: |
946 """Checks if any patterns, including --include and --exclude were given. |
978 """Checks if any patterns, including --include and --exclude were given. |
947 |
979 |
948 Some commands (e.g. addremove) use this condition for deciding whether to |
980 Some commands (e.g. addremove) use this condition for deciding whether to |
949 print absolute or relative paths. |
981 print absolute or relative paths. |
950 """ |
982 """ |
951 return bool(pats or opts.get(b'include') or opts.get(b'exclude')) |
983 return bool(pats or opts.get(b'include') or opts.get(b'exclude')) |
952 |
984 |
953 |
985 |
954 def expandpats(pats): |
986 def expandpats(pats: Iterable[bytes]) -> List[bytes]: |
955 """Expand bare globs when running on windows. |
987 """Expand bare globs when running on windows. |
956 On posix we assume it already has already been done by sh.""" |
988 On posix we assume it already has already been done by sh.""" |
957 if not util.expandglobs: |
989 if not util.expandglobs: |
958 return list(pats) |
990 return list(pats) |
959 ret = [] |
991 ret = [] |
970 ret.append(kindpat) |
1002 ret.append(kindpat) |
971 return ret |
1003 return ret |
972 |
1004 |
973 |
1005 |
974 def matchandpats( |
1006 def matchandpats( |
975 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None |
1007 ctx, |
|
1008 pats=(), |
|
1009 opts=None, |
|
1010 globbed: bool = False, |
|
1011 default: bytes = b'relpath', |
|
1012 badfn=None, |
976 ): |
1013 ): |
977 """Return a matcher and the patterns that were used. |
1014 """Return a matcher and the patterns that were used. |
978 The matcher will warn about bad matches, unless an alternate badfn callback |
1015 The matcher will warn about bad matches, unless an alternate badfn callback |
979 is provided.""" |
1016 is provided.""" |
980 if opts is None: |
1017 if opts is None: |
1003 pats = [] |
1040 pats = [] |
1004 return m, pats |
1041 return m, pats |
1005 |
1042 |
1006 |
1043 |
1007 def match( |
1044 def match( |
1008 ctx, pats=(), opts=None, globbed=False, default=b'relpath', badfn=None |
1045 ctx, |
|
1046 pats=(), |
|
1047 opts=None, |
|
1048 globbed: bool = False, |
|
1049 default: bytes = b'relpath', |
|
1050 badfn=None, |
1009 ): |
1051 ): |
1010 '''Return a matcher that will warn about bad matches.''' |
1052 '''Return a matcher that will warn about bad matches.''' |
1011 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] |
1053 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] |
1012 |
1054 |
1013 |
1055 |
1014 def matchall(repo): |
1056 def matchall(repo): |
1015 '''Return a matcher that will efficiently match everything.''' |
1057 '''Return a matcher that will efficiently match everything.''' |
1016 return matchmod.always() |
1058 return matchmod.always() |
1017 |
1059 |
1018 |
1060 |
1019 def matchfiles(repo, files, badfn=None): |
1061 def matchfiles(repo, files, badfn=None) -> matchmod.exactmatcher: |
1020 '''Return a matcher that will efficiently match exactly these files.''' |
1062 '''Return a matcher that will efficiently match exactly these files.''' |
1021 return matchmod.exact(files, badfn=badfn) |
1063 return matchmod.exact(files, badfn=badfn) |
1022 |
1064 |
1023 |
1065 |
1024 def parsefollowlinespattern(repo, rev, pat, msg): |
1066 def parsefollowlinespattern(repo, rev, pat: bytes, msg: bytes) -> bytes: |
1025 """Return a file name from `pat` pattern suitable for usage in followlines |
1067 """Return a file name from `pat` pattern suitable for usage in followlines |
1026 logic. |
1068 logic. |
1027 """ |
1069 """ |
1028 if not matchmod.patkind(pat): |
1070 if not matchmod.patkind(pat): |
1029 return pathutil.canonpath(repo.root, repo.getcwd(), pat) |
1071 return pathutil.canonpath(repo.root, repo.getcwd(), pat) |
1034 if len(files) != 1: |
1076 if len(files) != 1: |
1035 raise error.ParseError(msg) |
1077 raise error.ParseError(msg) |
1036 return files[0] |
1078 return files[0] |
1037 |
1079 |
1038 |
1080 |
1039 def getorigvfs(ui, repo): |
1081 def getorigvfs(ui: "uimod.ui", repo): |
1040 """return a vfs suitable to save 'orig' file |
1082 """return a vfs suitable to save 'orig' file |
1041 |
1083 |
1042 return None if no special directory is configured""" |
1084 return None if no special directory is configured""" |
1043 origbackuppath = ui.config(b'ui', b'origbackuppath') |
1085 origbackuppath = ui.config(b'ui', b'origbackuppath') |
1044 if not origbackuppath: |
1086 if not origbackuppath: |
1045 return None |
1087 return None |
1046 return vfs.vfs(repo.wvfs.join(origbackuppath)) |
1088 return vfs.vfs(repo.wvfs.join(origbackuppath)) |
1047 |
1089 |
1048 |
1090 |
1049 def backuppath(ui, repo, filepath): |
1091 def backuppath(ui: "uimod.ui", repo, filepath: bytes) -> bytes: |
1050 """customize where working copy backup files (.orig files) are created |
1092 """customize where working copy backup files (.orig files) are created |
1051 |
1093 |
1052 Fetch user defined path from config file: [ui] origbackuppath = <path> |
1094 Fetch user defined path from config file: [ui] origbackuppath = <path> |
1053 Fall back to default (filepath with .orig suffix) if not specified |
1095 Fall back to default (filepath with .orig suffix) if not specified |
1054 |
1096 |
1087 |
1129 |
1088 def __init__(self, repo, revcontainer): |
1130 def __init__(self, repo, revcontainer): |
1089 self._torev = repo.changelog.rev |
1131 self._torev = repo.changelog.rev |
1090 self._revcontains = revcontainer.__contains__ |
1132 self._revcontains = revcontainer.__contains__ |
1091 |
1133 |
1092 def __contains__(self, node): |
1134 def __contains__(self, node) -> bool: |
1093 return self._revcontains(self._torev(node)) |
1135 return self._revcontains(self._torev(node)) |
1094 |
1136 |
1095 |
1137 |
1096 def cleanupnodes( |
1138 def cleanupnodes( |
1097 repo, |
1139 repo, |
1274 repair.delayedstrip( |
1316 repair.delayedstrip( |
1275 repo.ui, repo, tostrip, operation, backup=backup |
1317 repo.ui, repo, tostrip, operation, backup=backup |
1276 ) |
1318 ) |
1277 |
1319 |
1278 |
1320 |
1279 def addremove(repo, matcher, prefix, uipathfn, opts=None, open_tr=None): |
1321 def addremove( |
|
1322 repo, |
|
1323 matcher, |
|
1324 prefix: bytes, |
|
1325 uipathfn: typelib.UiPathFn, |
|
1326 opts=None, |
|
1327 open_tr=None, |
|
1328 ) -> int: |
1280 if opts is None: |
1329 if opts is None: |
1281 opts = {} |
1330 opts = {} |
1282 m = matcher |
1331 m = matcher |
1283 dry_run = opts.get(b'dry_run') |
1332 dry_run = opts.get(b'dry_run') |
1284 try: |
1333 try: |
1345 if f in m.files(): |
1394 if f in m.files(): |
1346 return 1 |
1395 return 1 |
1347 return ret |
1396 return ret |
1348 |
1397 |
1349 |
1398 |
1350 def marktouched(repo, files, similarity=0.0): |
1399 def marktouched(repo, files, similarity: float = 0.0) -> int: |
1351 """Assert that files have somehow been operated upon. files are relative to |
1400 """Assert that files have somehow been operated upon. files are relative to |
1352 the repo root.""" |
1401 the repo root.""" |
1353 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) |
1402 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) |
1354 rejected = [] |
1403 rejected = [] |
1355 |
1404 |
1380 if f in m.files(): |
1429 if f in m.files(): |
1381 return 1 |
1430 return 1 |
1382 return 0 |
1431 return 0 |
1383 |
1432 |
1384 |
1433 |
1385 def _interestingfiles(repo, matcher): |
1434 def _interestingfiles( |
|
1435 repo, matcher |
|
1436 ) -> Tuple[List[bytes], List[bytes], List[bytes], List[bytes], List[bytes]]: |
1386 """Walk dirstate with matcher, looking for files that addremove would care |
1437 """Walk dirstate with matcher, looking for files that addremove would care |
1387 about. |
1438 about. |
1388 |
1439 |
1389 This is different from dirstate.status because it doesn't care about |
1440 This is different from dirstate.status because it doesn't care about |
1390 whether files are modified or clean.""" |
1441 whether files are modified or clean.""" |
1416 added.append(abs) |
1467 added.append(abs) |
1417 |
1468 |
1418 return added, unknown, deleted, removed, forgotten |
1469 return added, unknown, deleted, removed, forgotten |
1419 |
1470 |
1420 |
1471 |
1421 def _findrenames(repo, matcher, added, removed, similarity, uipathfn): |
1472 def _findrenames( |
|
1473 repo, matcher, added, removed, similarity, uipathfn: typelib.UiPathFn |
|
1474 ) -> Dict[bytes, bytes]: |
1422 '''Find renames from removed files to added ones.''' |
1475 '''Find renames from removed files to added ones.''' |
1423 renames = {} |
1476 renames = {} |
1424 if similarity > 0: |
1477 if similarity > 0: |
1425 for old, new, score in similar.findrenames( |
1478 for old, new, score in similar.findrenames( |
1426 repo, added, removed, similarity |
1479 repo, added, removed, similarity |
1439 ) |
1492 ) |
1440 renames[new] = old |
1493 renames[new] = old |
1441 return renames |
1494 return renames |
1442 |
1495 |
1443 |
1496 |
1444 def _markchanges(repo, unknown, deleted, renames): |
1497 def _markchanges(repo, unknown, deleted, renames) -> None: |
1445 """Marks the files in unknown as added, the files in deleted as removed, |
1498 """Marks the files in unknown as added, the files in deleted as removed, |
1446 and the files in renames as copied.""" |
1499 and the files in renames as copied.""" |
1447 wctx = repo[None] |
1500 wctx = repo[None] |
1448 with repo.wlock(): |
1501 with repo.wlock(): |
1449 wctx.forget(deleted) |
1502 wctx.forget(deleted) |
1522 return copies |
1575 return copies |
1523 |
1576 |
1524 return copiesfn |
1577 return copiesfn |
1525 |
1578 |
1526 |
1579 |
1527 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): |
1580 def dirstatecopy( |
|
1581 ui: "uimod.ui", |
|
1582 repo, |
|
1583 wctx, |
|
1584 src, |
|
1585 dst, |
|
1586 dryrun: bool = False, |
|
1587 cwd: Optional[bytes] = None, |
|
1588 ) -> None: |
1528 """Update the dirstate to reflect the intent of copying src to dst. For |
1589 """Update the dirstate to reflect the intent of copying src to dst. For |
1529 different reasons it might not end with dst being marked as copied from src. |
1590 different reasons it might not end with dst being marked as copied from src. |
1530 """ |
1591 """ |
1531 origsrc = repo.dirstate.copied(src) or src |
1592 origsrc = repo.dirstate.copied(src) or src |
1532 if dst == origsrc: # copying back a copy? |
1593 if dst == origsrc: # copying back a copy? |
1547 wctx.add([dst]) |
1608 wctx.add([dst]) |
1548 elif not dryrun: |
1609 elif not dryrun: |
1549 wctx.copy(origsrc, dst) |
1610 wctx.copy(origsrc, dst) |
1550 |
1611 |
1551 |
1612 |
1552 def movedirstate(repo, newctx, match=None): |
1613 def movedirstate(repo, newctx, match=None) -> None: |
1553 """Move the dirstate to newctx and adjust it as necessary. |
1614 """Move the dirstate to newctx and adjust it as necessary. |
1554 |
1615 |
1555 A matcher can be provided as an optimization. It is probably a bug to pass |
1616 A matcher can be provided as an optimization. It is probably a bug to pass |
1556 a matcher that doesn't match all the differences between the parent of the |
1617 a matcher that doesn't match all the differences between the parent of the |
1557 working copy and newctx. |
1618 working copy and newctx. |
1600 store.add(r) |
1661 store.add(r) |
1601 return wc, store |
1662 return wc, store |
1602 return requirements, None |
1663 return requirements, None |
1603 |
1664 |
1604 |
1665 |
1605 def istreemanifest(repo): |
1666 def istreemanifest(repo) -> bool: |
1606 """returns whether the repository is using treemanifest or not""" |
1667 """returns whether the repository is using treemanifest or not""" |
1607 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements |
1668 return requirementsmod.TREEMANIFEST_REQUIREMENT in repo.requirements |
1608 |
1669 |
1609 |
1670 |
1610 def writereporequirements(repo, requirements=None): |
1671 def writereporequirements(repo, requirements=None) -> None: |
1611 """writes requirements for the repo |
1672 """writes requirements for the repo |
1612 |
1673 |
1613 Requirements are written to .hg/requires and .hg/store/requires based |
1674 Requirements are written to .hg/requires and .hg/store/requires based |
1614 on whether share-safe mode is enabled and which requirements are wdir |
1675 on whether share-safe mode is enabled and which requirements are wdir |
1615 requirements and which are store requirements |
1676 requirements and which are store requirements |
1624 elif repo.ui.configbool(b'format', b'usestore'): |
1685 elif repo.ui.configbool(b'format', b'usestore'): |
1625 # only remove store requires if we are using store |
1686 # only remove store requires if we are using store |
1626 repo.svfs.tryunlink(b'requires') |
1687 repo.svfs.tryunlink(b'requires') |
1627 |
1688 |
1628 |
1689 |
1629 def writerequires(opener, requirements): |
1690 def writerequires(opener, requirements) -> None: |
1630 with opener(b'requires', b'w', atomictemp=True) as fp: |
1691 with opener(b'requires', b'w', atomictemp=True) as fp: |
1631 for r in sorted(requirements): |
1692 for r in sorted(requirements): |
1632 fp.write(b"%s\n" % r) |
1693 fp.write(b"%s\n" % r) |
1633 |
1694 |
1634 |
1695 |
1635 class filecachesubentry: |
1696 class filecachesubentry: |
1636 def __init__(self, path, stat): |
1697 _cacheable: Optional[bool] = None |
|
1698 |
|
1699 def __init__(self, path, stat: bool): |
1637 self.path = path |
1700 self.path = path |
1638 self.cachestat = None |
1701 self.cachestat = None |
1639 self._cacheable = None |
1702 self._cacheable = None |
1640 |
1703 |
1641 if stat: |
1704 if stat: |
1645 self._cacheable = self.cachestat.cacheable() |
1708 self._cacheable = self.cachestat.cacheable() |
1646 else: |
1709 else: |
1647 # None means we don't know yet |
1710 # None means we don't know yet |
1648 self._cacheable = None |
1711 self._cacheable = None |
1649 |
1712 |
1650 def refresh(self): |
1713 def refresh(self) -> None: |
1651 if self.cacheable(): |
1714 if self.cacheable(): |
1652 self.cachestat = filecachesubentry.stat(self.path) |
1715 self.cachestat = filecachesubentry.stat(self.path) |
1653 |
1716 |
1654 def cacheable(self): |
1717 def cacheable(self) -> bool: |
1655 if self._cacheable is not None: |
1718 if self._cacheable is not None: |
1656 return self._cacheable |
1719 return self._cacheable |
1657 |
1720 |
1658 # we don't know yet, assume it is for now |
1721 # we don't know yet, assume it is for now |
1659 return True |
1722 return True |
1660 |
1723 |
1661 def changed(self): |
1724 def changed(self) -> bool: |
1662 # no point in going further if we can't cache it |
1725 # no point in going further if we can't cache it |
1663 if not self.cacheable(): |
1726 if not self.cacheable(): |
1664 return True |
1727 return True |
1665 |
1728 |
1666 newstat = filecachesubentry.stat(self.path) |
1729 newstat = filecachesubentry.stat(self.path) |
1678 return True |
1741 return True |
1679 else: |
1742 else: |
1680 return False |
1743 return False |
1681 |
1744 |
1682 @staticmethod |
1745 @staticmethod |
1683 def stat(path): |
1746 def stat(path: bytes) -> Optional[typelib.CacheStat]: |
1684 try: |
1747 try: |
1685 return util.cachestat(path) |
1748 return util.cachestat(path) |
1686 except FileNotFoundError: |
1749 except FileNotFoundError: |
1687 pass |
1750 pass |
1688 |
1751 |
1689 |
1752 |
1690 class filecacheentry: |
1753 class filecacheentry: |
1691 def __init__(self, paths, stat=True): |
1754 def __init__(self, paths, stat: bool = True) -> None: |
1692 self._entries = [] |
1755 self._entries = [] |
1693 for path in paths: |
1756 for path in paths: |
1694 self._entries.append(filecachesubentry(path, stat)) |
1757 self._entries.append(filecachesubentry(path, stat)) |
1695 |
1758 |
1696 def changed(self): |
1759 def changed(self) -> bool: |
1697 '''true if any entry has changed''' |
1760 '''true if any entry has changed''' |
1698 for entry in self._entries: |
1761 for entry in self._entries: |
1699 if entry.changed(): |
1762 if entry.changed(): |
1700 return True |
1763 return True |
1701 return False |
1764 return False |
1702 |
1765 |
1703 def refresh(self): |
1766 def refresh(self) -> None: |
1704 for entry in self._entries: |
1767 for entry in self._entries: |
1705 entry.refresh() |
1768 entry.refresh() |
1706 |
1769 |
1707 |
1770 |
1708 class filecache: |
1771 class filecache: |
1729 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached |
1792 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached |
1730 method result as well as possibly calling ``del obj._filecache[attr]`` to |
1793 method result as well as possibly calling ``del obj._filecache[attr]`` to |
1731 remove the ``filecacheentry``. |
1794 remove the ``filecacheentry``. |
1732 """ |
1795 """ |
1733 |
1796 |
1734 def __init__(self, *paths): |
1797 paths: Tuple[bytes, ...] |
|
1798 |
|
1799 def __init__(self, *paths: bytes) -> None: |
1735 self.paths = paths |
1800 self.paths = paths |
1736 |
1801 |
1737 def tracked_paths(self, obj): |
1802 def tracked_paths(self, obj): |
1738 return [self.join(obj, path) for path in self.paths] |
1803 return [self.join(obj, path) for path in self.paths] |
1739 |
1804 |
1740 def join(self, obj, fname): |
1805 def join(self, obj, fname: bytes): |
1741 """Used to compute the runtime path of a cached file. |
1806 """Used to compute the runtime path of a cached file. |
1742 |
1807 |
1743 Users should subclass filecache and provide their own version of this |
1808 Users should subclass filecache and provide their own version of this |
1744 function to call the appropriate join function on 'obj' (an instance |
1809 function to call the appropriate join function on 'obj' (an instance |
1745 of the class that its member function was decorated). |
1810 of the class that its member function was decorated). |
1796 |
1861 |
1797 ce.obj = value # update cached copy |
1862 ce.obj = value # update cached copy |
1798 obj.__dict__[self.sname] = value # update copy returned by obj.x |
1863 obj.__dict__[self.sname] = value # update copy returned by obj.x |
1799 |
1864 |
1800 |
1865 |
1801 def extdatasource(repo, source): |
1866 def extdatasource(repo, source: bytes): |
1802 """Gather a map of rev -> value dict from the specified source |
1867 """Gather a map of rev -> value dict from the specified source |
1803 |
1868 |
1804 A source spec is treated as a URL, with a special case shell: type |
1869 A source spec is treated as a URL, with a special case shell: type |
1805 for parsing the output from a shell command. |
1870 for parsing the output from a shell command. |
1806 |
1871 |
1880 return self |
1959 return self |
1881 |
1960 |
1882 def __exit__(self, exc_type, exc_value, exc_tb): |
1961 def __exit__(self, exc_type, exc_value, exc_tb): |
1883 self.complete() |
1962 self.complete() |
1884 |
1963 |
1885 def update(self, pos, item=b"", total=None): |
1964 def update( |
|
1965 self, pos: int, item: bytes = b"", total: Optional[int] = None |
|
1966 ) -> None: |
1886 assert pos is not None |
1967 assert pos is not None |
1887 if total: |
1968 if total: |
1888 self.total = total |
1969 self.total = total |
1889 self.pos = pos |
1970 self.pos = pos |
1890 self._updatebar(self.topic, self.pos, item, self.unit, self.total) |
1971 self._updatebar(self.topic, self.pos, item, self.unit, self.total) |
1891 if self.debug: |
1972 if self.debug: |
1892 self._printdebug(item) |
1973 self._printdebug(item) |
1893 |
1974 |
1894 def increment(self, step=1, item=b"", total=None): |
1975 def increment( |
|
1976 self, step: int = 1, item: bytes = b"", total: Optional[int] = None |
|
1977 ) -> None: |
1895 self.update(self.pos + step, item, total) |
1978 self.update(self.pos + step, item, total) |
1896 |
1979 |
1897 def complete(self): |
1980 def complete(self) -> None: |
1898 self.pos = None |
1981 self.pos = None |
1899 self.unit = b"" |
1982 self.unit = b"" |
1900 self.total = None |
1983 self.total = None |
1901 self._updatebar(self.topic, self.pos, b"", self.unit, self.total) |
1984 self._updatebar(self.topic, self.pos, b"", self.unit, self.total) |
1902 |
1985 |
1903 def _printdebug(self, item): |
1986 def _printdebug(self, item: bytes) -> None: |
1904 unit = b'' |
1987 unit = b'' |
1905 if self.unit: |
1988 if self.unit: |
1906 unit = b' ' + self.unit |
1989 unit = b' ' + self.unit |
1907 if item: |
1990 if item: |
1908 item = b' ' + item |
1991 item = b' ' + item |
1915 ) |
1998 ) |
1916 else: |
1999 else: |
1917 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) |
2000 self.ui.debug(b'%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) |
1918 |
2001 |
1919 |
2002 |
1920 def gdinitconfig(ui): |
2003 def gdinitconfig(ui: "uimod.ui"): |
1921 """helper function to know if a repo should be created as general delta""" |
2004 """helper function to know if a repo should be created as general delta""" |
1922 # experimental config: format.generaldelta |
2005 # experimental config: format.generaldelta |
1923 return ui.configbool(b'format', b'generaldelta') or ui.configbool( |
2006 return ui.configbool(b'format', b'generaldelta') or ui.configbool( |
1924 b'format', b'usegeneraldelta' |
2007 b'format', b'usegeneraldelta' |
1925 ) |
2008 ) |
1926 |
2009 |
1927 |
2010 |
1928 def gddeltaconfig(ui): |
2011 def gddeltaconfig(ui: "uimod.ui"): |
1929 """helper function to know if incoming deltas should be optimized |
2012 """helper function to know if incoming deltas should be optimized |
1930 |
2013 |
1931 The `format.generaldelta` config is an old form of the config that also |
2014 The `format.generaldelta` config is an old form of the config that also |
1932 implies that incoming delta-bases should be never be trusted. This function |
2015 implies that incoming delta-bases should be never be trusted. This function |
1933 exists for this purpose. |
2016 exists for this purpose. |
1942 Keys must be alphanumerics and start with a letter, values must not |
2025 Keys must be alphanumerics and start with a letter, values must not |
1943 contain '\n' characters""" |
2026 contain '\n' characters""" |
1944 |
2027 |
1945 firstlinekey = b'__firstline' |
2028 firstlinekey = b'__firstline' |
1946 |
2029 |
1947 def __init__(self, vfs, path, keys=None): |
2030 def __init__(self, vfs, path: bytes, keys=None) -> None: |
1948 self.vfs = vfs |
2031 self.vfs = vfs |
1949 self.path = path |
2032 self.path = path |
1950 |
2033 |
1951 def read(self, firstlinenonkeyval=False): |
2034 def read(self, firstlinenonkeyval: bool = False): |
1952 """Read the contents of a simple key-value file |
2035 """Read the contents of a simple key-value file |
1953 |
2036 |
1954 'firstlinenonkeyval' indicates whether the first line of file should |
2037 'firstlinenonkeyval' indicates whether the first line of file should |
1955 be treated as a key-value pair or reuturned fully under the |
2038 be treated as a key-value pair or reuturned fully under the |
1956 __firstline key.""" |
2039 __firstline key.""" |
1977 d.update(updatedict) |
2060 d.update(updatedict) |
1978 except ValueError as e: |
2061 except ValueError as e: |
1979 raise error.CorruptedState(stringutil.forcebytestr(e)) |
2062 raise error.CorruptedState(stringutil.forcebytestr(e)) |
1980 return d |
2063 return d |
1981 |
2064 |
1982 def write(self, data, firstline=None): |
2065 def write(self, data, firstline: Optional[bytes] = None) -> None: |
1983 """Write key=>value mapping to a file |
2066 """Write key=>value mapping to a file |
1984 data is a dict. Keys must be alphanumerical and start with a letter. |
2067 data is a dict. Keys must be alphanumerical and start with a letter. |
1985 Values must not contain newline characters. |
2068 Values must not contain newline characters. |
1986 |
2069 |
1987 If 'firstline' is not None, it is written to file before |
2070 If 'firstline' is not None, it is written to file before |
2006 lines.append(b"%s=%s\n" % (k, v)) |
2089 lines.append(b"%s=%s\n" % (k, v)) |
2007 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp: |
2090 with self.vfs(self.path, mode=b'wb', atomictemp=True) as fp: |
2008 fp.write(b''.join(lines)) |
2091 fp.write(b''.join(lines)) |
2009 |
2092 |
2010 |
2093 |
2011 _reportobsoletedsource = [ |
2094 _reportobsoletedsource: List[bytes] = [ |
2012 b'debugobsolete', |
2095 b'debugobsolete', |
2013 b'pull', |
2096 b'pull', |
2014 b'push', |
2097 b'push', |
2015 b'serve', |
2098 b'serve', |
2016 b'unbundle', |
2099 b'unbundle', |
2017 ] |
2100 ] |
2018 |
2101 |
2019 _reportnewcssource = [ |
2102 _reportnewcssource: List[bytes] = [ |
2020 b'pull', |
2103 b'pull', |
2021 b'unbundle', |
2104 b'unbundle', |
2022 ] |
2105 ] |
2023 |
2106 |
2024 |
2107 |
2025 def prefetchfiles(repo, revmatches): |
2108 def prefetchfiles(repo, revmatches) -> None: |
2026 """Invokes the registered file prefetch functions, allowing extensions to |
2109 """Invokes the registered file prefetch functions, allowing extensions to |
2027 ensure the corresponding files are available locally, before the command |
2110 ensure the corresponding files are available locally, before the command |
2028 uses them. |
2111 uses them. |
2029 |
2112 |
2030 Args: |
2113 Args: |
2049 |
2132 |
2050 # a list of (repo, revs, match) prefetch functions |
2133 # a list of (repo, revs, match) prefetch functions |
2051 fileprefetchhooks = util.hooks() |
2134 fileprefetchhooks = util.hooks() |
2052 |
2135 |
2053 # A marker that tells the evolve extension to suppress its own reporting |
2136 # A marker that tells the evolve extension to suppress its own reporting |
2054 _reportstroubledchangesets = True |
2137 _reportstroubledchangesets: bool = True |
2055 |
2138 |
2056 |
2139 |
2057 def registersummarycallback(repo, otr, txnname=b'', as_validator=False): |
2140 def registersummarycallback( |
|
2141 repo, otr, txnname: bytes = b'', as_validator: bool = False |
|
2142 ) -> None: |
2058 """register a callback to issue a summary after the transaction is closed |
2143 """register a callback to issue a summary after the transaction is closed |
2059 |
2144 |
2060 If as_validator is true, then the callbacks are registered as transaction |
2145 If as_validator is true, then the callbacks are registered as transaction |
2061 validators instead |
2146 validators instead |
2062 """ |
2147 """ |
2223 if as_validator: |
2308 if as_validator: |
2224 msg = _(b'%d local changesets will be published\n') |
2309 msg = _(b'%d local changesets will be published\n') |
2225 repo.ui.status(msg % len(published)) |
2310 repo.ui.status(msg % len(published)) |
2226 |
2311 |
2227 |
2312 |
2228 def getinstabilitymessage(delta, instability): |
2313 def getinstabilitymessage(delta: int, instability: bytes) -> Optional[bytes]: |
2229 """function to return the message to show warning about new instabilities |
2314 """function to return the message to show warning about new instabilities |
2230 |
2315 |
2231 exists as a separate function so that extension can wrap to show more |
2316 exists as a separate function so that extension can wrap to show more |
2232 information like how to fix instabilities""" |
2317 information like how to fix instabilities""" |
2233 if delta > 0: |
2318 if delta > 0: |
2234 return _(b'%i new %s changesets\n') % (delta, instability) |
2319 return _(b'%i new %s changesets\n') % (delta, instability) |
2235 |
2320 |
2236 |
2321 |
2237 def nodesummaries(repo, nodes, maxnumnodes=4): |
2322 def nodesummaries(repo, nodes, maxnumnodes: int = 4) -> bytes: |
2238 if len(nodes) <= maxnumnodes or repo.ui.verbose: |
2323 if len(nodes) <= maxnumnodes or repo.ui.verbose: |
2239 return b' '.join(short(h) for h in nodes) |
2324 return b' '.join(short(h) for h in nodes) |
2240 first = b' '.join(short(h) for h in nodes[:maxnumnodes]) |
2325 first = b' '.join(short(h) for h in nodes[:maxnumnodes]) |
2241 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes) |
2326 return _(b"%s and %d others") % (first, len(nodes) - maxnumnodes) |
2242 |
2327 |
2243 |
2328 |
2244 def enforcesinglehead(repo, tr, desc, accountclosed, filtername): |
2329 def enforcesinglehead(repo, tr, desc: bytes, accountclosed, filtername) -> None: |
2245 """check that no named branch has multiple heads""" |
2330 """check that no named branch has multiple heads""" |
2246 if desc in (b'strip', b'repair'): |
2331 if desc in (b'strip', b'repair'): |
2247 # skip the logic during strip |
2332 # skip the logic during strip |
2248 return |
2333 return |
2249 visible = repo.filtered(filtername) |
2334 visible = repo.filtered(filtername) |
2264 before it is used, whether or not the convert extension was formally loaded. |
2349 before it is used, whether or not the convert extension was formally loaded. |
2265 """ |
2350 """ |
2266 return sink |
2351 return sink |
2267 |
2352 |
2268 |
2353 |
2269 def unhidehashlikerevs(repo, specs, hiddentype): |
2354 def unhidehashlikerevs(repo, specs, hiddentype: bytes): |
2270 """parse the user specs and unhide changesets whose hash or revision number |
2355 """parse the user specs and unhide changesets whose hash or revision number |
2271 is passed. |
2356 is passed. |
2272 |
2357 |
2273 hiddentype can be: 1) 'warn': warn while unhiding changesets |
2358 hiddentype can be: 1) 'warn': warn while unhiding changesets |
2274 2) 'nowarn': don't warn while unhiding changesets |
2359 2) 'nowarn': don't warn while unhiding changesets |
2317 # we have to use new filtername to separate branch/tags cache until we can |
2402 # we have to use new filtername to separate branch/tags cache until we can |
2318 # disbale these cache when revisions are dynamically pinned. |
2403 # disbale these cache when revisions are dynamically pinned. |
2319 return repo.filtered(b'visible-hidden', revs) |
2404 return repo.filtered(b'visible-hidden', revs) |
2320 |
2405 |
2321 |
2406 |
2322 def _getrevsfromsymbols(repo, symbols): |
2407 def _getrevsfromsymbols(repo, symbols) -> Set[int]: |
2323 """parse the list of symbols and returns a set of revision numbers of hidden |
2408 """parse the list of symbols and returns a set of revision numbers of hidden |
2324 changesets present in symbols""" |
2409 changesets present in symbols""" |
2325 revs = set() |
2410 revs = set() |
2326 unfi = repo.unfiltered() |
2411 unfi = repo.unfiltered() |
2327 unficl = unfi.changelog |
2412 unficl = unfi.changelog |
2352 revs.add(rev) |
2437 revs.add(rev) |
2353 |
2438 |
2354 return revs |
2439 return revs |
2355 |
2440 |
2356 |
2441 |
2357 def bookmarkrevs(repo, mark): |
2442 def bookmarkrevs(repo, mark: bytes): |
2358 """Select revisions reachable by a given bookmark |
2443 """Select revisions reachable by a given bookmark |
2359 |
2444 |
2360 If the bookmarked revision isn't a head, an empty set will be returned. |
2445 If the bookmarked revision isn't a head, an empty set will be returned. |
2361 """ |
2446 """ |
2362 return repo.revs(format_bookmark_revspec(mark)) |
2447 return repo.revs(format_bookmark_revspec(mark)) |
2363 |
2448 |
2364 |
2449 |
2365 def format_bookmark_revspec(mark): |
2450 def format_bookmark_revspec(mark: bytes) -> bytes: |
2366 """Build a revset expression to select revisions reachable by a given |
2451 """Build a revset expression to select revisions reachable by a given |
2367 bookmark""" |
2452 bookmark""" |
2368 mark = b'literal:' + mark |
2453 mark = b'literal:' + mark |
2369 return revsetlang.formatspec( |
2454 return revsetlang.formatspec( |
2370 b"ancestors(bookmark(%s)) - " |
2455 b"ancestors(bookmark(%s)) - " |
2374 mark, |
2459 mark, |
2375 mark, |
2460 mark, |
2376 ) |
2461 ) |
2377 |
2462 |
2378 |
2463 |
2379 def ismember(ui, username, userlist): |
2464 def ismember(ui: "uimod.ui", username: bytes, userlist: List[bytes]) -> bool: |
2380 """Check if username is a member of userlist. |
2465 """Check if username is a member of userlist. |
2381 |
2466 |
2382 If userlist has a single '*' member, all users are considered members. |
2467 If userlist has a single '*' member, all users are considered members. |
2383 Can be overridden by extensions to provide more complex authorization |
2468 Can be overridden by extensions to provide more complex authorization |
2384 schemes. |
2469 schemes. |
2385 """ |
2470 """ |
2386 return userlist == [b'*'] or username in userlist |
2471 return userlist == [b'*'] or username in userlist |
2387 |
2472 |
2388 |
2473 |
2389 RESOURCE_HIGH = 3 |
2474 RESOURCE_HIGH: int = 3 |
2390 RESOURCE_MEDIUM = 2 |
2475 RESOURCE_MEDIUM: int = 2 |
2391 RESOURCE_LOW = 1 |
2476 RESOURCE_LOW: int = 1 |
2392 RESOURCE_DEFAULT = 0 |
2477 RESOURCE_DEFAULT: int = 0 |
2393 |
2478 |
2394 RESOURCE_MAPPING = { |
2479 RESOURCE_MAPPING: Dict[bytes, int] = { |
2395 b'default': RESOURCE_DEFAULT, |
2480 b'default': RESOURCE_DEFAULT, |
2396 b'low': RESOURCE_LOW, |
2481 b'low': RESOURCE_LOW, |
2397 b'medium': RESOURCE_MEDIUM, |
2482 b'medium': RESOURCE_MEDIUM, |
2398 b'high': RESOURCE_HIGH, |
2483 b'high': RESOURCE_HIGH, |
2399 } |
2484 } |
2400 |
2485 |
2401 DEFAULT_RESOURCE = RESOURCE_MEDIUM |
2486 DEFAULT_RESOURCE: int = RESOURCE_MEDIUM |
2402 |
2487 |
2403 |
2488 |
2404 def get_resource_profile(ui, dimension=None): |
2489 def get_resource_profile( |
|
2490 ui: "uimod.ui", dimension: Optional[bytes] = None |
|
2491 ) -> int: |
2405 """return the resource profile for a dimension |
2492 """return the resource profile for a dimension |
2406 |
2493 |
2407 If no dimension is specified, the generic value is returned""" |
2494 If no dimension is specified, the generic value is returned""" |
2408 generic_name = ui.config(b'usage', b'resources') |
2495 generic_name = ui.config(b'usage', b'resources') |
2409 value = RESOURCE_MAPPING.get(generic_name, RESOURCE_DEFAULT) |
2496 value = RESOURCE_MAPPING.get(generic_name, RESOURCE_DEFAULT) |