96 """This internal method provides a way for child objects to override the |
97 """This internal method provides a way for child objects to override the |
97 match operator. |
98 match operator. |
98 """ |
99 """ |
99 return match |
100 return match |
100 |
101 |
101 def _buildstatus(self, other, s, match, listignored, listclean, |
102 def _buildstatus( |
102 listunknown): |
103 self, other, s, match, listignored, listclean, listunknown |
|
104 ): |
103 """build a status with respect to another context""" |
105 """build a status with respect to another context""" |
104 # Load earliest manifest first for caching reasons. More specifically, |
106 # Load earliest manifest first for caching reasons. More specifically, |
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a |
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a |
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct |
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct |
107 # 1000 and cache it so that when you read 1001, we just need to apply a |
109 # 1000 and cache it so that when you read 1001, we just need to apply a |
144 else: |
146 else: |
145 clean.append(fn) |
147 clean.append(fn) |
146 |
148 |
147 if removed: |
149 if removed: |
148 # need to filter files if they are already reported as removed |
150 # need to filter files if they are already reported as removed |
149 unknown = [fn for fn in unknown if fn not in mf1 and |
151 unknown = [ |
150 (not match or match(fn))] |
152 fn |
151 ignored = [fn for fn in ignored if fn not in mf1 and |
153 for fn in unknown |
152 (not match or match(fn))] |
154 if fn not in mf1 and (not match or match(fn)) |
|
155 ] |
|
156 ignored = [ |
|
157 fn |
|
158 for fn in ignored |
|
159 if fn not in mf1 and (not match or match(fn)) |
|
160 ] |
153 # if they're deleted, don't report them as removed |
161 # if they're deleted, don't report them as removed |
154 removed = [fn for fn in removed if fn not in deletedset] |
162 removed = [fn for fn in removed if fn not in deletedset] |
155 |
163 |
156 return scmutil.status(modified, added, removed, deleted, unknown, |
164 return scmutil.status( |
157 ignored, clean) |
165 modified, added, removed, deleted, unknown, ignored, clean |
|
166 ) |
158 |
167 |
159 @propertycache |
168 @propertycache |
160 def substate(self): |
169 def substate(self): |
161 return subrepoutil.state(self, self._repo.ui) |
170 return subrepoutil.state(self, self._repo.ui) |
162 |
171 |
163 def subrev(self, subpath): |
172 def subrev(self, subpath): |
164 return self.substate[subpath][1] |
173 return self.substate[subpath][1] |
165 |
174 |
166 def rev(self): |
175 def rev(self): |
167 return self._rev |
176 return self._rev |
|
177 |
168 def node(self): |
178 def node(self): |
169 return self._node |
179 return self._node |
|
180 |
170 def hex(self): |
181 def hex(self): |
171 return hex(self.node()) |
182 return hex(self.node()) |
|
183 |
172 def manifest(self): |
184 def manifest(self): |
173 return self._manifest |
185 return self._manifest |
|
186 |
174 def manifestctx(self): |
187 def manifestctx(self): |
175 return self._manifestctx |
188 return self._manifestctx |
|
189 |
176 def repo(self): |
190 def repo(self): |
177 return self._repo |
191 return self._repo |
|
192 |
178 def phasestr(self): |
193 def phasestr(self): |
179 return phases.phasenames[self.phase()] |
194 return phases.phasenames[self.phase()] |
|
195 |
180 def mutable(self): |
196 def mutable(self): |
181 return self.phase() > phases.public |
197 return self.phase() > phases.public |
182 |
198 |
183 def matchfileset(self, expr, badfn=None): |
199 def matchfileset(self, expr, badfn=None): |
184 return fileset.match(self, expr, badfn=badfn) |
200 return fileset.match(self, expr, badfn=badfn) |
247 def _fileinfo(self, path): |
263 def _fileinfo(self, path): |
248 if r'_manifest' in self.__dict__: |
264 if r'_manifest' in self.__dict__: |
249 try: |
265 try: |
250 return self._manifest[path], self._manifest.flags(path) |
266 return self._manifest[path], self._manifest.flags(path) |
251 except KeyError: |
267 except KeyError: |
252 raise error.ManifestLookupError(self._node, path, |
268 raise error.ManifestLookupError( |
253 _('not found in manifest')) |
269 self._node, path, _('not found in manifest') |
|
270 ) |
254 if r'_manifestdelta' in self.__dict__ or path in self.files(): |
271 if r'_manifestdelta' in self.__dict__ or path in self.files(): |
255 if path in self._manifestdelta: |
272 if path in self._manifestdelta: |
256 return (self._manifestdelta[path], |
273 return ( |
257 self._manifestdelta.flags(path)) |
274 self._manifestdelta[path], |
|
275 self._manifestdelta.flags(path), |
|
276 ) |
258 mfl = self._repo.manifestlog |
277 mfl = self._repo.manifestlog |
259 try: |
278 try: |
260 node, flag = mfl[self._changeset.manifest].find(path) |
279 node, flag = mfl[self._changeset.manifest].find(path) |
261 except KeyError: |
280 except KeyError: |
262 raise error.ManifestLookupError(self._node, path, |
281 raise error.ManifestLookupError( |
263 _('not found in manifest')) |
282 self._node, path, _('not found in manifest') |
|
283 ) |
264 |
284 |
265 return node, flag |
285 return node, flag |
266 |
286 |
267 def filenode(self, path): |
287 def filenode(self, path): |
268 return self._fileinfo(path)[0] |
288 return self._fileinfo(path)[0] |
292 '''return a subrepo for the stored revision, or wdir if this is a wdir |
314 '''return a subrepo for the stored revision, or wdir if this is a wdir |
293 context. |
315 context. |
294 ''' |
316 ''' |
295 return subrepo.subrepo(self, path, allowwdir=True) |
317 return subrepo.subrepo(self, path, allowwdir=True) |
296 |
318 |
297 def match(self, pats=None, include=None, exclude=None, default='glob', |
319 def match( |
298 listsubrepos=False, badfn=None): |
320 self, |
|
321 pats=None, |
|
322 include=None, |
|
323 exclude=None, |
|
324 default='glob', |
|
325 listsubrepos=False, |
|
326 badfn=None, |
|
327 ): |
299 r = self._repo |
328 r = self._repo |
300 return matchmod.match(r.root, r.getcwd(), pats, |
329 return matchmod.match( |
301 include, exclude, default, |
330 r.root, |
302 auditor=r.nofsauditor, ctx=self, |
331 r.getcwd(), |
303 listsubrepos=listsubrepos, badfn=badfn) |
332 pats, |
304 |
333 include, |
305 def diff(self, ctx2=None, match=None, changes=None, opts=None, |
334 exclude, |
306 losedatafn=None, pathfn=None, copy=None, |
335 default, |
307 copysourcematch=None, hunksfilterfn=None): |
336 auditor=r.nofsauditor, |
|
337 ctx=self, |
|
338 listsubrepos=listsubrepos, |
|
339 badfn=badfn, |
|
340 ) |
|
341 |
|
342 def diff( |
|
343 self, |
|
344 ctx2=None, |
|
345 match=None, |
|
346 changes=None, |
|
347 opts=None, |
|
348 losedatafn=None, |
|
349 pathfn=None, |
|
350 copy=None, |
|
351 copysourcematch=None, |
|
352 hunksfilterfn=None, |
|
353 ): |
308 """Returns a diff generator for the given contexts and matcher""" |
354 """Returns a diff generator for the given contexts and matcher""" |
309 if ctx2 is None: |
355 if ctx2 is None: |
310 ctx2 = self.p1() |
356 ctx2 = self.p1() |
311 if ctx2 is not None: |
357 if ctx2 is not None: |
312 ctx2 = self._repo[ctx2] |
358 ctx2 = self._repo[ctx2] |
313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes, |
359 return patch.diff( |
314 opts=opts, losedatafn=losedatafn, pathfn=pathfn, |
360 self._repo, |
315 copy=copy, copysourcematch=copysourcematch, |
361 ctx2, |
316 hunksfilterfn=hunksfilterfn) |
362 self, |
|
363 match=match, |
|
364 changes=changes, |
|
365 opts=opts, |
|
366 losedatafn=losedatafn, |
|
367 pathfn=pathfn, |
|
368 copy=copy, |
|
369 copysourcematch=copysourcematch, |
|
370 hunksfilterfn=hunksfilterfn, |
|
371 ) |
317 |
372 |
318 def dirs(self): |
373 def dirs(self): |
319 return self._manifest.dirs() |
374 return self._manifest.dirs() |
320 |
375 |
321 def hasdir(self, dir): |
376 def hasdir(self, dir): |
322 return self._manifest.hasdir(dir) |
377 return self._manifest.hasdir(dir) |
323 |
378 |
324 def status(self, other=None, match=None, listignored=False, |
379 def status( |
325 listclean=False, listunknown=False, listsubrepos=False): |
380 self, |
|
381 other=None, |
|
382 match=None, |
|
383 listignored=False, |
|
384 listclean=False, |
|
385 listunknown=False, |
|
386 listsubrepos=False, |
|
387 ): |
326 """return status of files between two nodes or node and working |
388 """return status of files between two nodes or node and working |
327 directory. |
389 directory. |
328 |
390 |
329 If other is None, compare this node with working directory. |
391 If other is None, compare this node with working directory. |
330 |
392 |
345 # |
407 # |
346 # If we always built the manifest for each context and compared those, |
408 # If we always built the manifest for each context and compared those, |
347 # then we'd be done. But the special case of the above call means we |
409 # then we'd be done. But the special case of the above call means we |
348 # just copy the manifest of the parent. |
410 # just copy the manifest of the parent. |
349 reversed = False |
411 reversed = False |
350 if (not isinstance(ctx1, changectx) |
412 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx): |
351 and isinstance(ctx2, changectx)): |
|
352 reversed = True |
413 reversed = True |
353 ctx1, ctx2 = ctx2, ctx1 |
414 ctx1, ctx2 = ctx2, ctx1 |
354 |
415 |
355 match = self._repo.narrowmatch(match) |
416 match = self._repo.narrowmatch(match) |
356 match = ctx2._matchstatus(ctx1, match) |
417 match = ctx2._matchstatus(ctx1, match) |
357 r = scmutil.status([], [], [], [], [], [], []) |
418 r = scmutil.status([], [], [], [], [], [], []) |
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, |
419 r = ctx2._buildstatus( |
359 listunknown) |
420 ctx1, r, match, listignored, listclean, listunknown |
|
421 ) |
360 |
422 |
361 if reversed: |
423 if reversed: |
362 # Reverse added and removed. Clear deleted, unknown and ignored as |
424 # Reverse added and removed. Clear deleted, unknown and ignored as |
363 # these make no sense to reverse. |
425 # these make no sense to reverse. |
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [], |
426 r = scmutil.status( |
365 r.clean) |
427 r.modified, r.removed, r.added, [], [], [], r.clean |
|
428 ) |
366 |
429 |
367 if listsubrepos: |
430 if listsubrepos: |
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): |
431 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): |
369 try: |
432 try: |
370 rev2 = ctx2.subrev(subpath) |
433 rev2 = ctx2.subrev(subpath) |
372 # A subrepo that existed in node1 was deleted between |
435 # A subrepo that existed in node1 was deleted between |
373 # node1 and node2 (inclusive). Thus, ctx2's substate |
436 # node1 and node2 (inclusive). Thus, ctx2's substate |
374 # won't contain that subpath. The best we can do ignore it. |
437 # won't contain that subpath. The best we can do ignore it. |
375 rev2 = None |
438 rev2 = None |
376 submatch = matchmod.subdirmatcher(subpath, match) |
439 submatch = matchmod.subdirmatcher(subpath, match) |
377 s = sub.status(rev2, match=submatch, ignored=listignored, |
440 s = sub.status( |
378 clean=listclean, unknown=listunknown, |
441 rev2, |
379 listsubrepos=True) |
442 match=submatch, |
|
443 ignored=listignored, |
|
444 clean=listclean, |
|
445 unknown=listunknown, |
|
446 listsubrepos=True, |
|
447 ) |
380 for rfiles, sfiles in zip(r, s): |
448 for rfiles, sfiles in zip(r, s): |
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) |
449 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) |
382 |
450 |
383 for l in r: |
451 for l in r: |
384 l.sort() |
452 l.sort() |
385 |
453 |
386 return r |
454 return r |
|
455 |
387 |
456 |
388 class changectx(basectx): |
457 class changectx(basectx): |
389 """A changecontext object makes access to data related to a particular |
458 """A changecontext object makes access to data related to a particular |
390 changeset convenient. It represents a read-only context already present in |
459 changeset convenient. It represents a read-only context already present in |
391 the repo.""" |
460 the repo.""" |
|
461 |
392 def __init__(self, repo, rev, node): |
462 def __init__(self, repo, rev, node): |
393 super(changectx, self).__init__(repo) |
463 super(changectx, self).__init__(repo) |
394 self._rev = rev |
464 self._rev = rev |
395 self._node = node |
465 self._node = node |
396 |
466 |
437 c.date, |
507 c.date, |
438 c.files, |
508 c.files, |
439 c.description, |
509 c.description, |
440 c.extra, |
510 c.extra, |
441 ) |
511 ) |
|
512 |
442 def manifestnode(self): |
513 def manifestnode(self): |
443 return self._changeset.manifest |
514 return self._changeset.manifest |
444 |
515 |
445 def user(self): |
516 def user(self): |
446 return self._changeset.user |
517 return self._changeset.user |
|
518 |
447 def date(self): |
519 def date(self): |
448 return self._changeset.date |
520 return self._changeset.date |
|
521 |
449 def files(self): |
522 def files(self): |
450 return self._changeset.files |
523 return self._changeset.files |
|
524 |
451 def filesmodified(self): |
525 def filesmodified(self): |
452 modified = set(self.files()) |
526 modified = set(self.files()) |
453 modified.difference_update(self.filesadded()) |
527 modified.difference_update(self.filesadded()) |
454 modified.difference_update(self.filesremoved()) |
528 modified.difference_update(self.filesremoved()) |
455 return sorted(modified) |
529 return sorted(modified) |
506 p1copies, p2copies = super(changectx, self)._copies |
580 p1copies, p2copies = super(changectx, self)._copies |
507 return p1copies, p2copies |
581 return p1copies, p2copies |
508 |
582 |
509 def description(self): |
583 def description(self): |
510 return self._changeset.description |
584 return self._changeset.description |
|
585 |
511 def branch(self): |
586 def branch(self): |
512 return encoding.tolocal(self._changeset.extra.get("branch")) |
587 return encoding.tolocal(self._changeset.extra.get("branch")) |
|
588 |
513 def closesbranch(self): |
589 def closesbranch(self): |
514 return 'close' in self._changeset.extra |
590 return 'close' in self._changeset.extra |
|
591 |
515 def extra(self): |
592 def extra(self): |
516 """Return a dict of extra information.""" |
593 """Return a dict of extra information.""" |
517 return self._changeset.extra |
594 return self._changeset.extra |
|
595 |
518 def tags(self): |
596 def tags(self): |
519 """Return a list of byte tag names""" |
597 """Return a list of byte tag names""" |
520 return self._repo.nodetags(self._node) |
598 return self._repo.nodetags(self._node) |
|
599 |
521 def bookmarks(self): |
600 def bookmarks(self): |
522 """Return a list of byte bookmark names.""" |
601 """Return a list of byte bookmark names.""" |
523 return self._repo.nodebookmarks(self._node) |
602 return self._repo.nodebookmarks(self._node) |
|
603 |
524 def phase(self): |
604 def phase(self): |
525 return self._repo._phasecache.phase(self._repo, self._rev) |
605 return self._repo._phasecache.phase(self._repo, self._rev) |
|
606 |
526 def hidden(self): |
607 def hidden(self): |
527 return self._rev in repoview.filterrevs(self._repo, 'visible') |
608 return self._rev in repoview.filterrevs(self._repo, 'visible') |
528 |
609 |
529 def isinmemory(self): |
610 def isinmemory(self): |
530 return False |
611 return False |
552 |
633 |
553 def filectx(self, path, fileid=None, filelog=None): |
634 def filectx(self, path, fileid=None, filelog=None): |
554 """get a file context from this changeset""" |
635 """get a file context from this changeset""" |
555 if fileid is None: |
636 if fileid is None: |
556 fileid = self.filenode(path) |
637 fileid = self.filenode(path) |
557 return filectx(self._repo, path, fileid=fileid, |
638 return filectx( |
558 changectx=self, filelog=filelog) |
639 self._repo, path, fileid=fileid, changectx=self, filelog=filelog |
|
640 ) |
559 |
641 |
560 def ancestor(self, c2, warn=False): |
642 def ancestor(self, c2, warn=False): |
561 """return the "best" ancestor context of self and c2 |
643 """return the "best" ancestor context of self and c2 |
562 |
644 |
563 If there are multiple candidates, it will show a message and check |
645 If there are multiple candidates, it will show a message and check |
584 break |
666 break |
585 else: |
667 else: |
586 anc = self._repo.changelog.ancestor(self._node, n2) |
668 anc = self._repo.changelog.ancestor(self._node, n2) |
587 if warn: |
669 if warn: |
588 self._repo.ui.status( |
670 self._repo.ui.status( |
589 (_("note: using %s as ancestor of %s and %s\n") % |
671 ( |
590 (short(anc), short(self._node), short(n2))) + |
672 _("note: using %s as ancestor of %s and %s\n") |
591 ''.join(_(" alternatively, use --config " |
673 % (short(anc), short(self._node), short(n2)) |
592 "merge.preferancestor=%s\n") % |
674 ) |
593 short(n) for n in sorted(cahs) if n != anc)) |
675 + ''.join( |
|
676 _( |
|
677 " alternatively, use --config " |
|
678 "merge.preferancestor=%s\n" |
|
679 ) |
|
680 % short(n) |
|
681 for n in sorted(cahs) |
|
682 if n != anc |
|
683 ) |
|
684 ) |
594 return self._repo[anc] |
685 return self._repo[anc] |
595 |
686 |
596 def isancestorof(self, other): |
687 def isancestorof(self, other): |
597 """True if this changeset is an ancestor of other""" |
688 """True if this changeset is an ancestor of other""" |
598 return self._repo.changelog.isancestorrev(self._rev, other._rev) |
689 return self._repo.changelog.isancestorrev(self._rev, other._rev) |
602 |
693 |
603 # Wrap match.bad method to have message with nodeid |
694 # Wrap match.bad method to have message with nodeid |
604 def bad(fn, msg): |
695 def bad(fn, msg): |
605 # The manifest doesn't know about subrepos, so don't complain about |
696 # The manifest doesn't know about subrepos, so don't complain about |
606 # paths into valid subrepos. |
697 # paths into valid subrepos. |
607 if any(fn == s or fn.startswith(s + '/') |
698 if any(fn == s or fn.startswith(s + '/') for s in self.substate): |
608 for s in self.substate): |
|
609 return |
699 return |
610 match.bad(fn, _('no such file in rev %s') % self) |
700 match.bad(fn, _('no such file in rev %s') % self) |
611 |
701 |
612 m = matchmod.badmatch(self._repo.narrowmatch(match), bad) |
702 m = matchmod.badmatch(self._repo.narrowmatch(match), bad) |
613 return self._manifest.walk(m) |
703 return self._manifest.walk(m) |
614 |
704 |
615 def matches(self, match): |
705 def matches(self, match): |
616 return self.walk(match) |
706 return self.walk(match) |
|
707 |
617 |
708 |
618 class basefilectx(object): |
709 class basefilectx(object): |
619 """A filecontext object represents the common logic for its children: |
710 """A filecontext object represents the common logic for its children: |
620 filectx: read-only access to a filerevision that is already present |
711 filectx: read-only access to a filerevision that is already present |
621 in the repo, |
712 in the repo, |
622 workingfilectx: a filecontext that represents files from the working |
713 workingfilectx: a filecontext that represents files from the working |
623 directory, |
714 directory, |
624 memfilectx: a filecontext that represents files in-memory, |
715 memfilectx: a filecontext that represents files in-memory, |
625 """ |
716 """ |
|
717 |
626 @propertycache |
718 @propertycache |
627 def _filelog(self): |
719 def _filelog(self): |
628 return self._repo.file(self._path) |
720 return self._repo.file(self._path) |
629 |
721 |
630 @propertycache |
722 @propertycache |
680 except AttributeError: |
772 except AttributeError: |
681 return id(self) |
773 return id(self) |
682 |
774 |
683 def __eq__(self, other): |
775 def __eq__(self, other): |
684 try: |
776 try: |
685 return (type(self) == type(other) and self._path == other._path |
777 return ( |
686 and self._filenode == other._filenode) |
778 type(self) == type(other) |
|
779 and self._path == other._path |
|
780 and self._filenode == other._filenode |
|
781 ) |
687 except AttributeError: |
782 except AttributeError: |
688 return False |
783 return False |
689 |
784 |
690 def __ne__(self, other): |
785 def __ne__(self, other): |
691 return not (self == other) |
786 return not (self == other) |
692 |
787 |
693 def filerev(self): |
788 def filerev(self): |
694 return self._filerev |
789 return self._filerev |
|
790 |
695 def filenode(self): |
791 def filenode(self): |
696 return self._filenode |
792 return self._filenode |
|
793 |
697 @propertycache |
794 @propertycache |
698 def _flags(self): |
795 def _flags(self): |
699 return self._changectx.flags(self._path) |
796 return self._changectx.flags(self._path) |
|
797 |
700 def flags(self): |
798 def flags(self): |
701 return self._flags |
799 return self._flags |
|
800 |
702 def filelog(self): |
801 def filelog(self): |
703 return self._filelog |
802 return self._filelog |
|
803 |
704 def rev(self): |
804 def rev(self): |
705 return self._changeid |
805 return self._changeid |
|
806 |
706 def linkrev(self): |
807 def linkrev(self): |
707 return self._filelog.linkrev(self._filerev) |
808 return self._filelog.linkrev(self._filerev) |
|
809 |
708 def node(self): |
810 def node(self): |
709 return self._changectx.node() |
811 return self._changectx.node() |
|
812 |
710 def hex(self): |
813 def hex(self): |
711 return self._changectx.hex() |
814 return self._changectx.hex() |
|
815 |
712 def user(self): |
816 def user(self): |
713 return self._changectx.user() |
817 return self._changectx.user() |
|
818 |
714 def date(self): |
819 def date(self): |
715 return self._changectx.date() |
820 return self._changectx.date() |
|
821 |
716 def files(self): |
822 def files(self): |
717 return self._changectx.files() |
823 return self._changectx.files() |
|
824 |
718 def description(self): |
825 def description(self): |
719 return self._changectx.description() |
826 return self._changectx.description() |
|
827 |
720 def branch(self): |
828 def branch(self): |
721 return self._changectx.branch() |
829 return self._changectx.branch() |
|
830 |
722 def extra(self): |
831 def extra(self): |
723 return self._changectx.extra() |
832 return self._changectx.extra() |
|
833 |
724 def phase(self): |
834 def phase(self): |
725 return self._changectx.phase() |
835 return self._changectx.phase() |
|
836 |
726 def phasestr(self): |
837 def phasestr(self): |
727 return self._changectx.phasestr() |
838 return self._changectx.phasestr() |
|
839 |
728 def obsolete(self): |
840 def obsolete(self): |
729 return self._changectx.obsolete() |
841 return self._changectx.obsolete() |
|
842 |
730 def instabilities(self): |
843 def instabilities(self): |
731 return self._changectx.instabilities() |
844 return self._changectx.instabilities() |
|
845 |
732 def manifest(self): |
846 def manifest(self): |
733 return self._changectx.manifest() |
847 return self._changectx.manifest() |
|
848 |
734 def changectx(self): |
849 def changectx(self): |
735 return self._changectx |
850 return self._changectx |
|
851 |
736 def renamed(self): |
852 def renamed(self): |
737 return self._copied |
853 return self._copied |
|
854 |
738 def copysource(self): |
855 def copysource(self): |
739 return self._copied and self._copied[0] |
856 return self._copied and self._copied[0] |
|
857 |
740 def repo(self): |
858 def repo(self): |
741 return self._repo |
859 return self._repo |
|
860 |
742 def size(self): |
861 def size(self): |
743 return len(self.data()) |
862 return len(self.data()) |
744 |
863 |
745 def path(self): |
864 def path(self): |
746 return self._path |
865 return self._path |
761 This is mainly for merge code to detect change/delete conflicts. This is |
882 This is mainly for merge code to detect change/delete conflicts. This is |
762 expected to be True for all subclasses of basectx.""" |
883 expected to be True for all subclasses of basectx.""" |
763 return False |
884 return False |
764 |
885 |
765 _customcmp = False |
886 _customcmp = False |
|
887 |
766 def cmp(self, fctx): |
888 def cmp(self, fctx): |
767 """compare with other file context |
889 """compare with other file context |
768 |
890 |
769 returns True if different than fctx. |
891 returns True if different than fctx. |
770 """ |
892 """ |
771 if fctx._customcmp: |
893 if fctx._customcmp: |
772 return fctx.cmp(self) |
894 return fctx.cmp(self) |
773 |
895 |
774 if self._filenode is None: |
896 if self._filenode is None: |
775 raise error.ProgrammingError( |
897 raise error.ProgrammingError( |
776 'filectx.cmp() must be reimplemented if not backed by revlog') |
898 'filectx.cmp() must be reimplemented if not backed by revlog' |
|
899 ) |
777 |
900 |
778 if fctx._filenode is None: |
901 if fctx._filenode is None: |
779 if self._repo._encodefilterpats: |
902 if self._repo._encodefilterpats: |
780 # can't rely on size() because wdir content may be decoded |
903 # can't rely on size() because wdir content may be decoded |
781 return self._filelog.cmp(self._filenode, fctx.data()) |
904 return self._filelog.cmp(self._filenode, fctx.data()) |
816 memberanc = getattr(self, '_ancestrycontext', None) |
939 memberanc = getattr(self, '_ancestrycontext', None) |
817 iteranc = None |
940 iteranc = None |
818 if srcrev is None: |
941 if srcrev is None: |
819 # wctx case, used by workingfilectx during mergecopy |
942 # wctx case, used by workingfilectx during mergecopy |
820 revs = [p.rev() for p in self._repo[None].parents()] |
943 revs = [p.rev() for p in self._repo[None].parents()] |
821 inclusive = True # we skipped the real (revless) source |
944 inclusive = True # we skipped the real (revless) source |
822 else: |
945 else: |
823 revs = [srcrev] |
946 revs = [srcrev] |
824 if memberanc is None: |
947 if memberanc is None: |
825 memberanc = iteranc = cl.ancestors(revs, lkr, |
948 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
826 inclusive=inclusive) |
|
827 # check if this linkrev is an ancestor of srcrev |
949 # check if this linkrev is an ancestor of srcrev |
828 if lkr not in memberanc: |
950 if lkr not in memberanc: |
829 if iteranc is None: |
951 if iteranc is None: |
830 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
952 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
831 fnode = self._filenode |
953 fnode = self._filenode |
832 path = self._path |
954 path = self._path |
833 for a in iteranc: |
955 for a in iteranc: |
834 if stoprev is not None and a < stoprev: |
956 if stoprev is not None and a < stoprev: |
835 return None |
957 return None |
836 ac = cl.read(a) # get changeset data (we avoid object creation) |
958 ac = cl.read(a) # get changeset data (we avoid object creation) |
837 if path in ac[3]: # checking the 'files' field. |
959 if path in ac[3]: # checking the 'files' field. |
838 # The file has been touched, check if the content is |
960 # The file has been touched, check if the content is |
839 # similar to the one we search for. |
961 # similar to the one we search for. |
840 if fnode == mfl[ac[0]].readfast().get(path): |
962 if fnode == mfl[ac[0]].readfast().get(path): |
841 return a |
963 return a |
842 # In theory, we should never get out of that loop without a result. |
964 # In theory, we should never get out of that loop without a result. |
983 if getattr(base, '_ancestrycontext', None) is None: |
1105 if getattr(base, '_ancestrycontext', None) is None: |
984 cl = self._repo.changelog |
1106 cl = self._repo.changelog |
985 if base.rev() is None: |
1107 if base.rev() is None: |
986 # wctx is not inclusive, but works because _ancestrycontext |
1108 # wctx is not inclusive, but works because _ancestrycontext |
987 # is used to test filelog revisions |
1109 # is used to test filelog revisions |
988 ac = cl.ancestors([p.rev() for p in base.parents()], |
1110 ac = cl.ancestors( |
989 inclusive=True) |
1111 [p.rev() for p in base.parents()], inclusive=True |
|
1112 ) |
990 else: |
1113 else: |
991 ac = cl.ancestors([base.rev()], inclusive=True) |
1114 ac = cl.ancestors([base.rev()], inclusive=True) |
992 base._ancestrycontext = ac |
1115 base._ancestrycontext = ac |
993 |
1116 |
994 return dagop.annotate(base, parents, skiprevs=skiprevs, |
1117 return dagop.annotate( |
995 diffopts=diffopts) |
1118 base, parents, skiprevs=skiprevs, diffopts=diffopts |
|
1119 ) |
996 |
1120 |
997 def ancestors(self, followfirst=False): |
1121 def ancestors(self, followfirst=False): |
998 visit = {} |
1122 visit = {} |
999 c = self |
1123 c = self |
1000 if followfirst: |
1124 if followfirst: |
1015 |
1139 |
1016 This is often equivalent to how the data would be expressed on disk. |
1140 This is often equivalent to how the data would be expressed on disk. |
1017 """ |
1141 """ |
1018 return self._repo.wwritedata(self.path(), self.data()) |
1142 return self._repo.wwritedata(self.path(), self.data()) |
1019 |
1143 |
|
1144 |
1020 class filectx(basefilectx): |
1145 class filectx(basefilectx): |
1021 """A filecontext object makes access to data related to a particular |
1146 """A filecontext object makes access to data related to a particular |
1022 filerevision convenient.""" |
1147 filerevision convenient.""" |
1023 def __init__(self, repo, path, changeid=None, fileid=None, |
1148 |
1024 filelog=None, changectx=None): |
1149 def __init__( |
|
1150 self, |
|
1151 repo, |
|
1152 path, |
|
1153 changeid=None, |
|
1154 fileid=None, |
|
1155 filelog=None, |
|
1156 changectx=None, |
|
1157 ): |
1025 """changeid must be a revision number, if specified. |
1158 """changeid must be a revision number, if specified. |
1026 fileid can be a file revision or node.""" |
1159 fileid can be a file revision or node.""" |
1027 self._repo = repo |
1160 self._repo = repo |
1028 self._path = path |
1161 self._path = path |
1029 |
1162 |
1030 assert (changeid is not None |
1163 assert ( |
1031 or fileid is not None |
1164 changeid is not None or fileid is not None or changectx is not None |
1032 or changectx is not None), ( |
1165 ), "bad args: changeid=%r, fileid=%r, changectx=%r" % ( |
1033 "bad args: changeid=%r, fileid=%r, changectx=%r" |
1166 changeid, |
1034 % (changeid, fileid, changectx)) |
1167 fileid, |
|
1168 changectx, |
|
1169 ) |
1035 |
1170 |
1036 if filelog is not None: |
1171 if filelog is not None: |
1037 self._filelog = filelog |
1172 self._filelog = filelog |
1038 |
1173 |
1039 if changeid is not None: |
1174 if changeid is not None: |
1067 return self._repo.unfiltered()[self._changeid] |
1202 return self._repo.unfiltered()[self._changeid] |
1068 |
1203 |
1069 def filectx(self, fileid, changeid=None): |
1204 def filectx(self, fileid, changeid=None): |
1070 '''opens an arbitrary revision of the file without |
1205 '''opens an arbitrary revision of the file without |
1071 opening a new filelog''' |
1206 opening a new filelog''' |
1072 return filectx(self._repo, self._path, fileid=fileid, |
1207 return filectx( |
1073 filelog=self._filelog, changeid=changeid) |
1208 self._repo, |
|
1209 self._path, |
|
1210 fileid=fileid, |
|
1211 filelog=self._filelog, |
|
1212 changeid=changeid, |
|
1213 ) |
1074 |
1214 |
1075 def rawdata(self): |
1215 def rawdata(self): |
1076 return self._filelog.rawdata(self._filenode) |
1216 return self._filelog.rawdata(self._filenode) |
1077 |
1217 |
1078 def rawflags(self): |
1218 def rawflags(self): |
1083 try: |
1223 try: |
1084 return self._filelog.read(self._filenode) |
1224 return self._filelog.read(self._filenode) |
1085 except error.CensoredNodeError: |
1225 except error.CensoredNodeError: |
1086 if self._repo.ui.config("censor", "policy") == "ignore": |
1226 if self._repo.ui.config("censor", "policy") == "ignore": |
1087 return "" |
1227 return "" |
1088 raise error.Abort(_("censored node: %s") % short(self._filenode), |
1228 raise error.Abort( |
1089 hint=_("set censor.policy to ignore errors")) |
1229 _("censored node: %s") % short(self._filenode), |
|
1230 hint=_("set censor.policy to ignore errors"), |
|
1231 ) |
1090 |
1232 |
1091 def size(self): |
1233 def size(self): |
1092 return self._filelog.size(self._filerev) |
1234 return self._filelog.size(self._filerev) |
1093 |
1235 |
1094 @propertycache |
1236 @propertycache |
1118 return renamed |
1260 return renamed |
1119 |
1261 |
1120 def children(self): |
1262 def children(self): |
1121 # hard for renames |
1263 # hard for renames |
1122 c = self._filelog.children(self._filenode) |
1264 c = self._filelog.children(self._filenode) |
1123 return [filectx(self._repo, self._path, fileid=x, |
1265 return [ |
1124 filelog=self._filelog) for x in c] |
1266 filectx(self._repo, self._path, fileid=x, filelog=self._filelog) |
|
1267 for x in c |
|
1268 ] |
|
1269 |
1125 |
1270 |
1126 class committablectx(basectx): |
1271 class committablectx(basectx): |
1127 """A committablectx object provides common functionality for a context that |
1272 """A committablectx object provides common functionality for a context that |
1128 wants the ability to commit, e.g. workingctx or memctx.""" |
1273 wants the ability to commit, e.g. workingctx or memctx.""" |
1129 def __init__(self, repo, text="", user=None, date=None, extra=None, |
1274 |
1130 changes=None, branch=None): |
1275 def __init__( |
|
1276 self, |
|
1277 repo, |
|
1278 text="", |
|
1279 user=None, |
|
1280 date=None, |
|
1281 extra=None, |
|
1282 changes=None, |
|
1283 branch=None, |
|
1284 ): |
1131 super(committablectx, self).__init__(repo) |
1285 super(committablectx, self).__init__(repo) |
1132 self._rev = None |
1286 self._rev = None |
1133 self._node = None |
1287 self._node = None |
1134 self._text = text |
1288 self._text = text |
1135 if date: |
1289 if date: |
1176 def subrev(self, subpath): |
1330 def subrev(self, subpath): |
1177 return None |
1331 return None |
1178 |
1332 |
1179 def manifestnode(self): |
1333 def manifestnode(self): |
1180 return None |
1334 return None |
|
1335 |
1181 def user(self): |
1336 def user(self): |
1182 return self._user or self._repo.ui.username() |
1337 return self._user or self._repo.ui.username() |
|
1338 |
1183 def date(self): |
1339 def date(self): |
1184 return self._date |
1340 return self._date |
|
1341 |
1185 def description(self): |
1342 def description(self): |
1186 return self._text |
1343 return self._text |
|
1344 |
1187 def files(self): |
1345 def files(self): |
1188 return sorted(self._status.modified + self._status.added + |
1346 return sorted( |
1189 self._status.removed) |
1347 self._status.modified + self._status.added + self._status.removed |
|
1348 ) |
|
1349 |
1190 def modified(self): |
1350 def modified(self): |
1191 return self._status.modified |
1351 return self._status.modified |
|
1352 |
1192 def added(self): |
1353 def added(self): |
1193 return self._status.added |
1354 return self._status.added |
|
1355 |
1194 def removed(self): |
1356 def removed(self): |
1195 return self._status.removed |
1357 return self._status.removed |
|
1358 |
1196 def deleted(self): |
1359 def deleted(self): |
1197 return self._status.deleted |
1360 return self._status.deleted |
|
1361 |
1198 filesmodified = modified |
1362 filesmodified = modified |
1199 filesadded = added |
1363 filesadded = added |
1200 filesremoved = removed |
1364 filesremoved = removed |
1201 |
1365 |
1202 def branch(self): |
1366 def branch(self): |
1203 return encoding.tolocal(self._extra['branch']) |
1367 return encoding.tolocal(self._extra['branch']) |
|
1368 |
1204 def closesbranch(self): |
1369 def closesbranch(self): |
1205 return 'close' in self._extra |
1370 return 'close' in self._extra |
|
1371 |
1206 def extra(self): |
1372 def extra(self): |
1207 return self._extra |
1373 return self._extra |
1208 |
1374 |
1209 def isinmemory(self): |
1375 def isinmemory(self): |
1210 return False |
1376 return False |
1230 def children(self): |
1396 def children(self): |
1231 return [] |
1397 return [] |
1232 |
1398 |
1233 def ancestor(self, c2): |
1399 def ancestor(self, c2): |
1234 """return the "best" ancestor context of self and c2""" |
1400 """return the "best" ancestor context of self and c2""" |
1235 return self._parents[0].ancestor(c2) # punt on two parents for now |
1401 return self._parents[0].ancestor(c2) # punt on two parents for now |
1236 |
1402 |
1237 def ancestors(self): |
1403 def ancestors(self): |
1238 for p in self._parents: |
1404 for p in self._parents: |
1239 yield p |
1405 yield p |
1240 for a in self._repo.changelog.ancestors( |
1406 for a in self._repo.changelog.ancestors( |
1241 [p.rev() for p in self._parents]): |
1407 [p.rev() for p in self._parents] |
|
1408 ): |
1242 yield self._repo[a] |
1409 yield self._repo[a] |
1243 |
1410 |
1244 def markcommitted(self, node): |
1411 def markcommitted(self, node): |
1245 """Perform post-commit cleanup necessary after committing this ctx |
1412 """Perform post-commit cleanup necessary after committing this ctx |
1246 |
1413 |
1251 |
1418 |
1252 """ |
1419 """ |
1253 |
1420 |
1254 def dirty(self, missing=False, merge=True, branch=True): |
1421 def dirty(self, missing=False, merge=True, branch=True): |
1255 return False |
1422 return False |
|
1423 |
1256 |
1424 |
1257 class workingctx(committablectx): |
1425 class workingctx(committablectx): |
1258 """A workingctx object makes access to data related to |
1426 """A workingctx object makes access to data related to |
1259 the current working directory convenient. |
1427 the current working directory convenient. |
1260 date - any valid date string or (unixtime, offset), or None. |
1428 date - any valid date string or (unixtime, offset), or None. |
1261 user - username string, or None. |
1429 user - username string, or None. |
1262 extra - a dictionary of extra values, or None. |
1430 extra - a dictionary of extra values, or None. |
1263 changes - a list of file lists as returned by localrepo.status() |
1431 changes - a list of file lists as returned by localrepo.status() |
1264 or None to use the repository status. |
1432 or None to use the repository status. |
1265 """ |
1433 """ |
1266 def __init__(self, repo, text="", user=None, date=None, extra=None, |
1434 |
1267 changes=None): |
1435 def __init__( |
|
1436 self, repo, text="", user=None, date=None, extra=None, changes=None |
|
1437 ): |
1268 branch = None |
1438 branch = None |
1269 if not extra or 'branch' not in extra: |
1439 if not extra or 'branch' not in extra: |
1270 try: |
1440 try: |
1271 branch = repo.dirstate.branch() |
1441 branch = repo.dirstate.branch() |
1272 except UnicodeDecodeError: |
1442 except UnicodeDecodeError: |
1273 raise error.Abort(_('branch name not in UTF-8!')) |
1443 raise error.Abort(_('branch name not in UTF-8!')) |
1274 super(workingctx, self).__init__(repo, text, user, date, extra, changes, |
1444 super(workingctx, self).__init__( |
1275 branch=branch) |
1445 repo, text, user, date, extra, changes, branch=branch |
|
1446 ) |
1276 |
1447 |
1277 def __iter__(self): |
1448 def __iter__(self): |
1278 d = self._repo.dirstate |
1449 d = self._repo.dirstate |
1279 for f in d: |
1450 for f in d: |
1280 if d[f] != 'r': |
1451 if d[f] != 'r': |
1307 copiesget = self._repo.dirstate.copies().get |
1478 copiesget = self._repo.dirstate.copies().get |
1308 parents = self.parents() |
1479 parents = self.parents() |
1309 if len(parents) < 2: |
1480 if len(parents) < 2: |
1310 # when we have one parent, it's easy: copy from parent |
1481 # when we have one parent, it's easy: copy from parent |
1311 man = parents[0].manifest() |
1482 man = parents[0].manifest() |
|
1483 |
1312 def func(f): |
1484 def func(f): |
1313 f = copiesget(f, f) |
1485 f = copiesget(f, f) |
1314 return man.flags(f) |
1486 return man.flags(f) |
|
1487 |
1315 else: |
1488 else: |
1316 # merges are tricky: we try to reconstruct the unstored |
1489 # merges are tricky: we try to reconstruct the unstored |
1317 # result from the merge (issue1802) |
1490 # result from the merge (issue1802) |
1318 p1, p2 = parents |
1491 p1, p2 = parents |
1319 pa = p1.ancestor(p2) |
1492 pa = p1.ancestor(p2) |
1320 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
1493 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
1321 |
1494 |
1322 def func(f): |
1495 def func(f): |
1323 f = copiesget(f, f) # may be wrong for merges with copies |
1496 f = copiesget(f, f) # may be wrong for merges with copies |
1324 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
1497 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
1325 if fl1 == fl2: |
1498 if fl1 == fl2: |
1326 return fl1 |
1499 return fl1 |
1327 if fl1 == fla: |
1500 if fl1 == fla: |
1328 return fl2 |
1501 return fl2 |
1329 if fl2 == fla: |
1502 if fl2 == fla: |
1330 return fl1 |
1503 return fl1 |
1331 return '' # punt for conflicts |
1504 return '' # punt for conflicts |
1332 |
1505 |
1333 return func |
1506 return func |
1334 |
1507 |
1335 @propertycache |
1508 @propertycache |
1336 def _flagfunc(self): |
1509 def _flagfunc(self): |
1348 except OSError: |
1521 except OSError: |
1349 return '' |
1522 return '' |
1350 |
1523 |
1351 def filectx(self, path, filelog=None): |
1524 def filectx(self, path, filelog=None): |
1352 """get a file context from the working directory""" |
1525 """get a file context from the working directory""" |
1353 return workingfilectx(self._repo, path, workingctx=self, |
1526 return workingfilectx( |
1354 filelog=filelog) |
1527 self._repo, path, workingctx=self, filelog=filelog |
|
1528 ) |
1355 |
1529 |
1356 def dirty(self, missing=False, merge=True, branch=True): |
1530 def dirty(self, missing=False, merge=True, branch=True): |
1357 "check whether a working directory is modified" |
1531 "check whether a working directory is modified" |
1358 # check subrepos first |
1532 # check subrepos first |
1359 for s in sorted(self.substate): |
1533 for s in sorted(self.substate): |
1360 if self.sub(s).dirty(missing=missing): |
1534 if self.sub(s).dirty(missing=missing): |
1361 return True |
1535 return True |
1362 # check current working dir |
1536 # check current working dir |
1363 return ((merge and self.p2()) or |
1537 return ( |
1364 (branch and self.branch() != self.p1().branch()) or |
1538 (merge and self.p2()) |
1365 self.modified() or self.added() or self.removed() or |
1539 or (branch and self.branch() != self.p1().branch()) |
1366 (missing and self.deleted())) |
1540 or self.modified() |
|
1541 or self.added() |
|
1542 or self.removed() |
|
1543 or (missing and self.deleted()) |
|
1544 ) |
1367 |
1545 |
1368 def add(self, list, prefix=""): |
1546 def add(self, list, prefix=""): |
1369 with self._repo.wlock(): |
1547 with self._repo.wlock(): |
1370 ui, ds = self._repo.ui, self._repo.dirstate |
1548 ui, ds = self._repo.ui, self._repo.dirstate |
1371 uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
1549 uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
1382 ui.warn(_("%s does not exist!\n") % uipath(f)) |
1560 ui.warn(_("%s does not exist!\n") % uipath(f)) |
1383 rejected.append(f) |
1561 rejected.append(f) |
1384 continue |
1562 continue |
1385 limit = ui.configbytes('ui', 'large-file-limit') |
1563 limit = ui.configbytes('ui', 'large-file-limit') |
1386 if limit != 0 and st.st_size > limit: |
1564 if limit != 0 and st.st_size > limit: |
1387 ui.warn(_("%s: up to %d MB of RAM may be required " |
1565 ui.warn( |
1388 "to manage this file\n" |
1566 _( |
1389 "(use 'hg revert %s' to cancel the " |
1567 "%s: up to %d MB of RAM may be required " |
1390 "pending addition)\n") |
1568 "to manage this file\n" |
1391 % (f, 3 * st.st_size // 1000000, uipath(f))) |
1569 "(use 'hg revert %s' to cancel the " |
|
1570 "pending addition)\n" |
|
1571 ) |
|
1572 % (f, 3 * st.st_size // 1000000, uipath(f)) |
|
1573 ) |
1392 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
1574 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
1393 ui.warn(_("%s not added: only files and symlinks " |
1575 ui.warn( |
1394 "supported currently\n") % uipath(f)) |
1576 _( |
|
1577 "%s not added: only files and symlinks " |
|
1578 "supported currently\n" |
|
1579 ) |
|
1580 % uipath(f) |
|
1581 ) |
1395 rejected.append(f) |
1582 rejected.append(f) |
1396 elif ds[f] in 'amn': |
1583 elif ds[f] in 'amn': |
1397 ui.warn(_("%s already tracked!\n") % uipath(f)) |
1584 ui.warn(_("%s already tracked!\n") % uipath(f)) |
1398 elif ds[f] == 'r': |
1585 elif ds[f] == 'r': |
1399 ds.normallookup(f) |
1586 ds.normallookup(f) |
1420 try: |
1607 try: |
1421 st = self._repo.wvfs.lstat(dest) |
1608 st = self._repo.wvfs.lstat(dest) |
1422 except OSError as err: |
1609 except OSError as err: |
1423 if err.errno != errno.ENOENT: |
1610 if err.errno != errno.ENOENT: |
1424 raise |
1611 raise |
1425 self._repo.ui.warn(_("%s does not exist!\n") |
1612 self._repo.ui.warn( |
1426 % self._repo.dirstate.pathto(dest)) |
1613 _("%s does not exist!\n") % self._repo.dirstate.pathto(dest) |
|
1614 ) |
1427 return |
1615 return |
1428 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
1616 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
1429 self._repo.ui.warn(_("copy failed: %s is not a file or a " |
1617 self._repo.ui.warn( |
1430 "symbolic link\n") |
1618 _("copy failed: %s is not a file or a " "symbolic link\n") |
1431 % self._repo.dirstate.pathto(dest)) |
1619 % self._repo.dirstate.pathto(dest) |
|
1620 ) |
1432 else: |
1621 else: |
1433 with self._repo.wlock(): |
1622 with self._repo.wlock(): |
1434 ds = self._repo.dirstate |
1623 ds = self._repo.dirstate |
1435 if ds[dest] in '?': |
1624 if ds[dest] in '?': |
1436 ds.add(dest) |
1625 ds.add(dest) |
1437 elif ds[dest] in 'r': |
1626 elif ds[dest] in 'r': |
1438 ds.normallookup(dest) |
1627 ds.normallookup(dest) |
1439 ds.copy(source, dest) |
1628 ds.copy(source, dest) |
1440 |
1629 |
1441 def match(self, pats=None, include=None, exclude=None, default='glob', |
1630 def match( |
1442 listsubrepos=False, badfn=None): |
1631 self, |
|
1632 pats=None, |
|
1633 include=None, |
|
1634 exclude=None, |
|
1635 default='glob', |
|
1636 listsubrepos=False, |
|
1637 badfn=None, |
|
1638 ): |
1443 r = self._repo |
1639 r = self._repo |
1444 |
1640 |
1445 # Only a case insensitive filesystem needs magic to translate user input |
1641 # Only a case insensitive filesystem needs magic to translate user input |
1446 # to actual case in the filesystem. |
1642 # to actual case in the filesystem. |
1447 icasefs = not util.fscasesensitive(r.root) |
1643 icasefs = not util.fscasesensitive(r.root) |
1448 return matchmod.match(r.root, r.getcwd(), pats, include, exclude, |
1644 return matchmod.match( |
1449 default, auditor=r.auditor, ctx=self, |
1645 r.root, |
1450 listsubrepos=listsubrepos, badfn=badfn, |
1646 r.getcwd(), |
1451 icasefs=icasefs) |
1647 pats, |
|
1648 include, |
|
1649 exclude, |
|
1650 default, |
|
1651 auditor=r.auditor, |
|
1652 ctx=self, |
|
1653 listsubrepos=listsubrepos, |
|
1654 badfn=badfn, |
|
1655 icasefs=icasefs, |
|
1656 ) |
1452 |
1657 |
1453 def _filtersuspectsymlink(self, files): |
1658 def _filtersuspectsymlink(self, files): |
1454 if not files or self._repo.dirstate._checklink: |
1659 if not files or self._repo.dirstate._checklink: |
1455 return files |
1660 return files |
1456 |
1661 |
1482 # do a full compare of any files that might have changed |
1692 # do a full compare of any files that might have changed |
1483 for f in sorted(files): |
1693 for f in sorted(files): |
1484 try: |
1694 try: |
1485 # This will return True for a file that got replaced by a |
1695 # This will return True for a file that got replaced by a |
1486 # directory in the interim, but fixing that is pretty hard. |
1696 # directory in the interim, but fixing that is pretty hard. |
1487 if (f not in pctx or self.flags(f) != pctx.flags(f) |
1697 if ( |
1488 or pctx[f].cmp(self[f])): |
1698 f not in pctx |
|
1699 or self.flags(f) != pctx.flags(f) |
|
1700 or pctx[f].cmp(self[f]) |
|
1701 ): |
1489 modified.append(f) |
1702 modified.append(f) |
1490 else: |
1703 else: |
1491 fixup.append(f) |
1704 fixup.append(f) |
1492 except (IOError, OSError): |
1705 except (IOError, OSError): |
1493 # A file become inaccessible in between? Mark it as deleted, |
1706 # A file become inaccessible in between? Mark it as deleted, |
1530 else: |
1743 else: |
1531 # in this case, writing changes out breaks |
1744 # in this case, writing changes out breaks |
1532 # consistency, because .hg/dirstate was |
1745 # consistency, because .hg/dirstate was |
1533 # already changed simultaneously after last |
1746 # already changed simultaneously after last |
1534 # caching (see also issue5584 for detail) |
1747 # caching (see also issue5584 for detail) |
1535 self._repo.ui.debug('skip updating dirstate: ' |
1748 self._repo.ui.debug( |
1536 'identity mismatch\n') |
1749 'skip updating dirstate: ' 'identity mismatch\n' |
|
1750 ) |
1537 except error.LockError: |
1751 except error.LockError: |
1538 pass |
1752 pass |
1539 finally: |
1753 finally: |
1540 # Even if the wlock couldn't be grabbed, clear out the list. |
1754 # Even if the wlock couldn't be grabbed, clear out the list. |
1541 self._repo.clearpostdsstatus() |
1755 self._repo.clearpostdsstatus() |
1543 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
1757 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
1544 '''Gets the status from the dirstate -- internal use only.''' |
1758 '''Gets the status from the dirstate -- internal use only.''' |
1545 subrepos = [] |
1759 subrepos = [] |
1546 if '.hgsub' in self: |
1760 if '.hgsub' in self: |
1547 subrepos = sorted(self.substate) |
1761 subrepos = sorted(self.substate) |
1548 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored, |
1762 cmp, s = self._repo.dirstate.status( |
1549 clean=clean, unknown=unknown) |
1763 match, subrepos, ignored=ignored, clean=clean, unknown=unknown |
|
1764 ) |
1550 |
1765 |
1551 # check for any possibly clean files |
1766 # check for any possibly clean files |
1552 fixup = [] |
1767 fixup = [] |
1553 if cmp: |
1768 if cmp: |
1554 modified2, deleted2, fixup = self._checklookup(cmp) |
1769 modified2, deleted2, fixup = self._checklookup(cmp) |
1562 |
1777 |
1563 if match.always(): |
1778 if match.always(): |
1564 # cache for performance |
1779 # cache for performance |
1565 if s.unknown or s.ignored or s.clean: |
1780 if s.unknown or s.ignored or s.clean: |
1566 # "_status" is cached with list*=False in the normal route |
1781 # "_status" is cached with list*=False in the normal route |
1567 self._status = scmutil.status(s.modified, s.added, s.removed, |
1782 self._status = scmutil.status( |
1568 s.deleted, [], [], []) |
1783 s.modified, s.added, s.removed, s.deleted, [], [], [] |
|
1784 ) |
1569 else: |
1785 else: |
1570 self._status = s |
1786 self._status = s |
1571 |
1787 |
1572 return s |
1788 return s |
1573 |
1789 |
1605 parents = self.parents() |
1821 parents = self.parents() |
1606 |
1822 |
1607 man = parents[0].manifest().copy() |
1823 man = parents[0].manifest().copy() |
1608 |
1824 |
1609 ff = self._flagfunc |
1825 ff = self._flagfunc |
1610 for i, l in ((addednodeid, status.added), |
1826 for i, l in ( |
1611 (modifiednodeid, status.modified)): |
1827 (addednodeid, status.added), |
|
1828 (modifiednodeid, status.modified), |
|
1829 ): |
1612 for f in l: |
1830 for f in l: |
1613 man[f] = i |
1831 man[f] = i |
1614 try: |
1832 try: |
1615 man.setflag(f, ff(f)) |
1833 man.setflag(f, ff(f)) |
1616 except OSError: |
1834 except OSError: |
1620 if f in man: |
1838 if f in man: |
1621 del man[f] |
1839 del man[f] |
1622 |
1840 |
1623 return man |
1841 return man |
1624 |
1842 |
1625 def _buildstatus(self, other, s, match, listignored, listclean, |
1843 def _buildstatus( |
1626 listunknown): |
1844 self, other, s, match, listignored, listclean, listunknown |
|
1845 ): |
1627 """build a status with respect to another context |
1846 """build a status with respect to another context |
1628 |
1847 |
1629 This includes logic for maintaining the fast path of status when |
1848 This includes logic for maintaining the fast path of status when |
1630 comparing the working directory against its parent, which is to skip |
1849 comparing the working directory against its parent, which is to skip |
1631 building a new manifest if self (working directory) is not comparing |
1850 building a new manifest if self (working directory) is not comparing |
1635 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, |
1854 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, |
1636 # might have accidentally ended up with the entire contents of the file |
1855 # might have accidentally ended up with the entire contents of the file |
1637 # they are supposed to be linking to. |
1856 # they are supposed to be linking to. |
1638 s.modified[:] = self._filtersuspectsymlink(s.modified) |
1857 s.modified[:] = self._filtersuspectsymlink(s.modified) |
1639 if other != self._repo['.']: |
1858 if other != self._repo['.']: |
1640 s = super(workingctx, self)._buildstatus(other, s, match, |
1859 s = super(workingctx, self)._buildstatus( |
1641 listignored, listclean, |
1860 other, s, match, listignored, listclean, listunknown |
1642 listunknown) |
1861 ) |
1643 return s |
1862 return s |
1644 |
1863 |
1645 def _matchstatus(self, other, match): |
1864 def _matchstatus(self, other, match): |
1646 """override the match method with a filter for directory patterns |
1865 """override the match method with a filter for directory patterns |
1647 |
1866 |
1651 |
1870 |
1652 If we aren't comparing against the working directory's parent, then we |
1871 If we aren't comparing against the working directory's parent, then we |
1653 just use the default match object sent to us. |
1872 just use the default match object sent to us. |
1654 """ |
1873 """ |
1655 if other != self._repo['.']: |
1874 if other != self._repo['.']: |
|
1875 |
1656 def bad(f, msg): |
1876 def bad(f, msg): |
1657 # 'f' may be a directory pattern from 'match.files()', |
1877 # 'f' may be a directory pattern from 'match.files()', |
1658 # so 'f not in ctx1' is not enough |
1878 # so 'f not in ctx1' is not enough |
1659 if f not in other and not other.hasdir(f): |
1879 if f not in other and not other.hasdir(f): |
1660 self._repo.ui.warn('%s: %s\n' % |
1880 self._repo.ui.warn( |
1661 (self._repo.dirstate.pathto(f), msg)) |
1881 '%s: %s\n' % (self._repo.dirstate.pathto(f), msg) |
|
1882 ) |
|
1883 |
1662 match.bad = bad |
1884 match.bad = bad |
1663 return match |
1885 return match |
1664 |
1886 |
1665 def walk(self, match): |
1887 def walk(self, match): |
1666 '''Generates matching file names.''' |
1888 '''Generates matching file names.''' |
1667 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match), |
1889 return sorted( |
1668 subrepos=sorted(self.substate), |
1890 self._repo.dirstate.walk( |
1669 unknown=True, ignored=False)) |
1891 self._repo.narrowmatch(match), |
|
1892 subrepos=sorted(self.substate), |
|
1893 unknown=True, |
|
1894 ignored=False, |
|
1895 ) |
|
1896 ) |
1670 |
1897 |
1671 def matches(self, match): |
1898 def matches(self, match): |
1672 match = self._repo.narrowmatch(match) |
1899 match = self._repo.narrowmatch(match) |
1673 ds = self._repo.dirstate |
1900 ds = self._repo.dirstate |
1674 return sorted(f for f in ds.matches(match) if ds[f] != 'r') |
1901 return sorted(f for f in ds.matches(match) if ds[f] != 'r') |
1686 # from immediately doing so for subsequent changing files |
1913 # from immediately doing so for subsequent changing files |
1687 self._repo.dirstate.write(self._repo.currenttransaction()) |
1914 self._repo.dirstate.write(self._repo.currenttransaction()) |
1688 |
1915 |
1689 sparse.aftercommit(self._repo, node) |
1916 sparse.aftercommit(self._repo, node) |
1690 |
1917 |
|
1918 |
1691 class committablefilectx(basefilectx): |
1919 class committablefilectx(basefilectx): |
1692 """A committablefilectx provides common functionality for a file context |
1920 """A committablefilectx provides common functionality for a file context |
1693 that wants the ability to commit, e.g. workingfilectx or memfilectx.""" |
1921 that wants the ability to commit, e.g. workingfilectx or memfilectx.""" |
|
1922 |
1694 def __init__(self, repo, path, filelog=None, ctx=None): |
1923 def __init__(self, repo, path, filelog=None, ctx=None): |
1695 self._repo = repo |
1924 self._repo = repo |
1696 self._path = path |
1925 self._path = path |
1697 self._changeid = None |
1926 self._changeid = None |
1698 self._filerev = self._filenode = None |
1927 self._filerev = self._filenode = None |
1733 pl = [(path, filenode(pcl[0], path), fl)] |
1963 pl = [(path, filenode(pcl[0], path), fl)] |
1734 |
1964 |
1735 for pc in pcl[1:]: |
1965 for pc in pcl[1:]: |
1736 pl.append((path, filenode(pc, path), fl)) |
1966 pl.append((path, filenode(pc, path), fl)) |
1737 |
1967 |
1738 return [self._parentfilectx(p, fileid=n, filelog=l) |
1968 return [ |
1739 for p, n, l in pl if n != nullid] |
1969 self._parentfilectx(p, fileid=n, filelog=l) |
|
1970 for p, n, l in pl |
|
1971 if n != nullid |
|
1972 ] |
1740 |
1973 |
1741 def children(self): |
1974 def children(self): |
1742 return [] |
1975 return [] |
|
1976 |
1743 |
1977 |
1744 class workingfilectx(committablefilectx): |
1978 class workingfilectx(committablefilectx): |
1745 """A workingfilectx object makes access to data related to a particular |
1979 """A workingfilectx object makes access to data related to a particular |
1746 file in the working directory convenient.""" |
1980 file in the working directory convenient.""" |
|
1981 |
1747 def __init__(self, repo, path, filelog=None, workingctx=None): |
1982 def __init__(self, repo, path, filelog=None, workingctx=None): |
1748 super(workingfilectx, self).__init__(repo, path, filelog, workingctx) |
1983 super(workingfilectx, self).__init__(repo, path, filelog, workingctx) |
1749 |
1984 |
1750 @propertycache |
1985 @propertycache |
1751 def _changectx(self): |
1986 def _changectx(self): |
1752 return workingctx(self._repo) |
1987 return workingctx(self._repo) |
1753 |
1988 |
1754 def data(self): |
1989 def data(self): |
1755 return self._repo.wread(self._path) |
1990 return self._repo.wread(self._path) |
|
1991 |
1756 def copysource(self): |
1992 def copysource(self): |
1757 return self._repo.dirstate.copied(self._path) |
1993 return self._repo.dirstate.copied(self._path) |
1758 |
1994 |
1759 def size(self): |
1995 def size(self): |
1760 return self._repo.wvfs.lstat(self._path).st_size |
1996 return self._repo.wvfs.lstat(self._path).st_size |
|
1997 |
1761 def lstat(self): |
1998 def lstat(self): |
1762 return self._repo.wvfs.lstat(self._path) |
1999 return self._repo.wvfs.lstat(self._path) |
|
2000 |
1763 def date(self): |
2001 def date(self): |
1764 t, tz = self._changectx.date() |
2002 t, tz = self._changectx.date() |
1765 try: |
2003 try: |
1766 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz) |
2004 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz) |
1767 except OSError as err: |
2005 except OSError as err: |
1788 return fctx.cmp(self) |
2026 return fctx.cmp(self) |
1789 |
2027 |
1790 def remove(self, ignoremissing=False): |
2028 def remove(self, ignoremissing=False): |
1791 """wraps unlink for a repo's working directory""" |
2029 """wraps unlink for a repo's working directory""" |
1792 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs') |
2030 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs') |
1793 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing, |
2031 self._repo.wvfs.unlinkpath( |
1794 rmdir=rmdir) |
2032 self._path, ignoremissing=ignoremissing, rmdir=rmdir |
|
2033 ) |
1795 |
2034 |
1796 def write(self, data, flags, backgroundclose=False, **kwargs): |
2035 def write(self, data, flags, backgroundclose=False, **kwargs): |
1797 """wraps repo.wwrite""" |
2036 """wraps repo.wwrite""" |
1798 return self._repo.wwrite(self._path, data, flags, |
2037 return self._repo.wwrite( |
1799 backgroundclose=backgroundclose, |
2038 self._path, data, flags, backgroundclose=backgroundclose, **kwargs |
1800 **kwargs) |
2039 ) |
1801 |
2040 |
1802 def markcopied(self, src): |
2041 def markcopied(self, src): |
1803 """marks this file a copy of `src`""" |
2042 """marks this file a copy of `src`""" |
1804 self._repo.dirstate.copy(src, self._path) |
2043 self._repo.dirstate.copy(src, self._path) |
1805 |
2044 |
1825 wvfs.removedirs(f) |
2064 wvfs.removedirs(f) |
1826 |
2065 |
1827 def setflags(self, l, x): |
2066 def setflags(self, l, x): |
1828 self._repo.wvfs.setflags(self._path, l, x) |
2067 self._repo.wvfs.setflags(self._path, l, x) |
1829 |
2068 |
|
2069 |
1830 class overlayworkingctx(committablectx): |
2070 class overlayworkingctx(committablectx): |
1831 """Wraps another mutable context with a write-back cache that can be |
2071 """Wraps another mutable context with a write-back cache that can be |
1832 converted into a commit context. |
2072 converted into a commit context. |
1833 |
2073 |
1834 self._cache[path] maps to a dict with keys: { |
2074 self._cache[path] maps to a dict with keys: { |
1861 return self._cache[path]['data'] |
2101 return self._cache[path]['data'] |
1862 else: |
2102 else: |
1863 # Must fallback here, too, because we only set flags. |
2103 # Must fallback here, too, because we only set flags. |
1864 return self._wrappedctx[path].data() |
2104 return self._wrappedctx[path].data() |
1865 else: |
2105 else: |
1866 raise error.ProgrammingError("No such file or directory: %s" % |
2106 raise error.ProgrammingError( |
1867 path) |
2107 "No such file or directory: %s" % path |
|
2108 ) |
1868 else: |
2109 else: |
1869 return self._wrappedctx[path].data() |
2110 return self._wrappedctx[path].data() |
1870 |
2111 |
1871 @propertycache |
2112 @propertycache |
1872 def _manifest(self): |
2113 def _manifest(self): |
1886 |
2127 |
1887 @propertycache |
2128 @propertycache |
1888 def _flagfunc(self): |
2129 def _flagfunc(self): |
1889 def f(path): |
2130 def f(path): |
1890 return self._cache[path]['flags'] |
2131 return self._cache[path]['flags'] |
|
2132 |
1891 return f |
2133 return f |
1892 |
2134 |
1893 def files(self): |
2135 def files(self): |
1894 return sorted(self.added() + self.modified() + self.removed()) |
2136 return sorted(self.added() + self.modified() + self.removed()) |
1895 |
2137 |
1896 def modified(self): |
2138 def modified(self): |
1897 return [f for f in self._cache.keys() if self._cache[f]['exists'] and |
2139 return [ |
1898 self._existsinparent(f)] |
2140 f |
|
2141 for f in self._cache.keys() |
|
2142 if self._cache[f]['exists'] and self._existsinparent(f) |
|
2143 ] |
1899 |
2144 |
1900 def added(self): |
2145 def added(self): |
1901 return [f for f in self._cache.keys() if self._cache[f]['exists'] and |
2146 return [ |
1902 not self._existsinparent(f)] |
2147 f |
|
2148 for f in self._cache.keys() |
|
2149 if self._cache[f]['exists'] and not self._existsinparent(f) |
|
2150 ] |
1903 |
2151 |
1904 def removed(self): |
2152 def removed(self): |
1905 return [f for f in self._cache.keys() if |
2153 return [ |
1906 not self._cache[f]['exists'] and self._existsinparent(f)] |
2154 f |
|
2155 for f in self._cache.keys() |
|
2156 if not self._cache[f]['exists'] and self._existsinparent(f) |
|
2157 ] |
1907 |
2158 |
1908 def p1copies(self): |
2159 def p1copies(self): |
1909 copies = self._repo._wrappedctx.p1copies().copy() |
2160 copies = self._repo._wrappedctx.p1copies().copy() |
1910 narrowmatch = self._repo.narrowmatch() |
2161 narrowmatch = self._repo.narrowmatch() |
1911 for f in self._cache.keys(): |
2162 for f in self._cache.keys(): |
1912 if not narrowmatch(f): |
2163 if not narrowmatch(f): |
1913 continue |
2164 continue |
1914 copies.pop(f, None) # delete if it exists |
2165 copies.pop(f, None) # delete if it exists |
1915 source = self._cache[f]['copied'] |
2166 source = self._cache[f]['copied'] |
1916 if source: |
2167 if source: |
1917 copies[f] = source |
2168 copies[f] = source |
1918 return copies |
2169 return copies |
1919 |
2170 |
1937 return self._cache[path]['date'] |
2188 return self._cache[path]['date'] |
1938 else: |
2189 else: |
1939 return self._wrappedctx[path].date() |
2190 return self._wrappedctx[path].date() |
1940 |
2191 |
1941 def markcopied(self, path, origin): |
2192 def markcopied(self, path, origin): |
1942 self._markdirty(path, exists=True, date=self.filedate(path), |
2193 self._markdirty( |
1943 flags=self.flags(path), copied=origin) |
2194 path, |
|
2195 exists=True, |
|
2196 date=self.filedate(path), |
|
2197 flags=self.flags(path), |
|
2198 copied=origin, |
|
2199 ) |
1944 |
2200 |
1945 def copydata(self, path): |
2201 def copydata(self, path): |
1946 if self.isdirty(path): |
2202 if self.isdirty(path): |
1947 return self._cache[path]['copied'] |
2203 return self._cache[path]['copied'] |
1948 else: |
2204 else: |
1951 def flags(self, path): |
2207 def flags(self, path): |
1952 if self.isdirty(path): |
2208 if self.isdirty(path): |
1953 if self._cache[path]['exists']: |
2209 if self._cache[path]['exists']: |
1954 return self._cache[path]['flags'] |
2210 return self._cache[path]['flags'] |
1955 else: |
2211 else: |
1956 raise error.ProgrammingError("No such file or directory: %s" % |
2212 raise error.ProgrammingError( |
1957 self._path) |
2213 "No such file or directory: %s" % self._path |
|
2214 ) |
1958 else: |
2215 else: |
1959 return self._wrappedctx[path].flags() |
2216 return self._wrappedctx[path].flags() |
1960 |
2217 |
1961 def __contains__(self, key): |
2218 def __contains__(self, key): |
1962 if key in self._cache: |
2219 if key in self._cache: |
1978 |
2235 |
1979 Since we never write to the filesystem and never call `applyupdates` in |
2236 Since we never write to the filesystem and never call `applyupdates` in |
1980 IMM, we'll never check that a path is actually writable -- e.g., because |
2237 IMM, we'll never check that a path is actually writable -- e.g., because |
1981 it adds `a/foo`, but `a` is actually a file in the other commit. |
2238 it adds `a/foo`, but `a` is actually a file in the other commit. |
1982 """ |
2239 """ |
|
2240 |
1983 def fail(path, component): |
2241 def fail(path, component): |
1984 # p1() is the base and we're receiving "writes" for p2()'s |
2242 # p1() is the base and we're receiving "writes" for p2()'s |
1985 # files. |
2243 # files. |
1986 if 'l' in self.p1()[component].flags(): |
2244 if 'l' in self.p1()[component].flags(): |
1987 raise error.Abort("error: %s conflicts with symlink %s " |
2245 raise error.Abort( |
1988 "in %d." % (path, component, |
2246 "error: %s conflicts with symlink %s " |
1989 self.p1().rev())) |
2247 "in %d." % (path, component, self.p1().rev()) |
|
2248 ) |
1990 else: |
2249 else: |
1991 raise error.Abort("error: '%s' conflicts with file '%s' in " |
2250 raise error.Abort( |
1992 "%d." % (path, component, |
2251 "error: '%s' conflicts with file '%s' in " |
1993 self.p1().rev())) |
2252 "%d." % (path, component, self.p1().rev()) |
|
2253 ) |
1994 |
2254 |
1995 # Test that each new directory to be created to write this path from p2 |
2255 # Test that each new directory to be created to write this path from p2 |
1996 # is not a file in p1. |
2256 # is not a file in p1. |
1997 components = path.split('/') |
2257 components = path.split('/') |
1998 for i in pycompat.xrange(len(components)): |
2258 for i in pycompat.xrange(len(components)): |
2010 return |
2270 return |
2011 # omit the files which are deleted in current IMM wctx |
2271 # omit the files which are deleted in current IMM wctx |
2012 mfiles = [m for m in mfiles if m in self] |
2272 mfiles = [m for m in mfiles if m in self] |
2013 if not mfiles: |
2273 if not mfiles: |
2014 return |
2274 return |
2015 raise error.Abort("error: file '%s' cannot be written because " |
2275 raise error.Abort( |
2016 " '%s/' is a directory in %s (containing %d " |
2276 "error: file '%s' cannot be written because " |
2017 "entries: %s)" |
2277 " '%s/' is a directory in %s (containing %d " |
2018 % (path, path, self.p1(), len(mfiles), |
2278 "entries: %s)" |
2019 ', '.join(mfiles))) |
2279 % (path, path, self.p1(), len(mfiles), ', '.join(mfiles)) |
|
2280 ) |
2020 |
2281 |
2021 def write(self, path, data, flags='', **kwargs): |
2282 def write(self, path, data, flags='', **kwargs): |
2022 if data is None: |
2283 if data is None: |
2023 raise error.ProgrammingError("data must be non-None") |
2284 raise error.ProgrammingError("data must be non-None") |
2024 self._auditconflicts(path) |
2285 self._auditconflicts(path) |
2025 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(), |
2286 self._markdirty( |
2026 flags=flags) |
2287 path, exists=True, data=data, date=dateutil.makedate(), flags=flags |
|
2288 ) |
2027 |
2289 |
2028 def setflags(self, path, l, x): |
2290 def setflags(self, path, l, x): |
2029 flag = '' |
2291 flag = '' |
2030 if l: |
2292 if l: |
2031 flag = 'l' |
2293 flag = 'l' |
2032 elif x: |
2294 elif x: |
2033 flag = 'x' |
2295 flag = 'x' |
2034 self._markdirty(path, exists=True, date=dateutil.makedate(), |
2296 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag) |
2035 flags=flag) |
|
2036 |
2297 |
2037 def remove(self, path): |
2298 def remove(self, path): |
2038 self._markdirty(path, exists=False) |
2299 self._markdirty(path, exists=False) |
2039 |
2300 |
2040 def exists(self, path): |
2301 def exists(self, path): |
2042 return False if they are broken. |
2303 return False if they are broken. |
2043 """ |
2304 """ |
2044 if self.isdirty(path): |
2305 if self.isdirty(path): |
2045 # If this path exists and is a symlink, "follow" it by calling |
2306 # If this path exists and is a symlink, "follow" it by calling |
2046 # exists on the destination path. |
2307 # exists on the destination path. |
2047 if (self._cache[path]['exists'] and |
2308 if ( |
2048 'l' in self._cache[path]['flags']): |
2309 self._cache[path]['exists'] |
|
2310 and 'l' in self._cache[path]['flags'] |
|
2311 ): |
2049 return self.exists(self._cache[path]['data'].strip()) |
2312 return self.exists(self._cache[path]['data'].strip()) |
2050 else: |
2313 else: |
2051 return self._cache[path]['exists'] |
2314 return self._cache[path]['exists'] |
2052 |
2315 |
2053 return self._existsinparent(path) |
2316 return self._existsinparent(path) |
2062 def size(self, path): |
2325 def size(self, path): |
2063 if self.isdirty(path): |
2326 if self.isdirty(path): |
2064 if self._cache[path]['exists']: |
2327 if self._cache[path]['exists']: |
2065 return len(self._cache[path]['data']) |
2328 return len(self._cache[path]['data']) |
2066 else: |
2329 else: |
2067 raise error.ProgrammingError("No such file or directory: %s" % |
2330 raise error.ProgrammingError( |
2068 self._path) |
2331 "No such file or directory: %s" % self._path |
|
2332 ) |
2069 return self._wrappedctx[path].size() |
2333 return self._wrappedctx[path].size() |
2070 |
2334 |
2071 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None, |
2335 def tomemctx( |
2072 user=None, editor=None): |
2336 self, |
|
2337 text, |
|
2338 branch=None, |
|
2339 extra=None, |
|
2340 date=None, |
|
2341 parents=None, |
|
2342 user=None, |
|
2343 editor=None, |
|
2344 ): |
2073 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be |
2345 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be |
2074 committed. |
2346 committed. |
2075 |
2347 |
2076 ``text`` is the commit message. |
2348 ``text`` is the commit message. |
2077 ``parents`` (optional) are rev numbers. |
2349 ``parents`` (optional) are rev numbers. |
2087 parents = (self._repo[parents[0]], None) |
2359 parents = (self._repo[parents[0]], None) |
2088 else: |
2360 else: |
2089 parents = (self._repo[parents[0]], self._repo[parents[1]]) |
2361 parents = (self._repo[parents[0]], self._repo[parents[1]]) |
2090 |
2362 |
2091 files = self.files() |
2363 files = self.files() |
|
2364 |
2092 def getfile(repo, memctx, path): |
2365 def getfile(repo, memctx, path): |
2093 if self._cache[path]['exists']: |
2366 if self._cache[path]['exists']: |
2094 return memfilectx(repo, memctx, path, |
2367 return memfilectx( |
2095 self._cache[path]['data'], |
2368 repo, |
2096 'l' in self._cache[path]['flags'], |
2369 memctx, |
2097 'x' in self._cache[path]['flags'], |
2370 path, |
2098 self._cache[path]['copied']) |
2371 self._cache[path]['data'], |
|
2372 'l' in self._cache[path]['flags'], |
|
2373 'x' in self._cache[path]['flags'], |
|
2374 self._cache[path]['copied'], |
|
2375 ) |
2099 else: |
2376 else: |
2100 # Returning None, but including the path in `files`, is |
2377 # Returning None, but including the path in `files`, is |
2101 # necessary for memctx to register a deletion. |
2378 # necessary for memctx to register a deletion. |
2102 return None |
2379 return None |
2103 return memctx(self._repo, parents, text, files, getfile, date=date, |
2380 |
2104 extra=extra, user=user, branch=branch, editor=editor) |
2381 return memctx( |
|
2382 self._repo, |
|
2383 parents, |
|
2384 text, |
|
2385 files, |
|
2386 getfile, |
|
2387 date=date, |
|
2388 extra=extra, |
|
2389 user=user, |
|
2390 branch=branch, |
|
2391 editor=editor, |
|
2392 ) |
2105 |
2393 |
2106 def isdirty(self, path): |
2394 def isdirty(self, path): |
2107 return path in self._cache |
2395 return path in self._cache |
2108 |
2396 |
2109 def isempty(self): |
2397 def isempty(self): |
2124 """ |
2412 """ |
2125 keys = [] |
2413 keys = [] |
2126 # This won't be perfect, but can help performance significantly when |
2414 # This won't be perfect, but can help performance significantly when |
2127 # using things like remotefilelog. |
2415 # using things like remotefilelog. |
2128 scmutil.prefetchfiles( |
2416 scmutil.prefetchfiles( |
2129 self.repo(), [self.p1().rev()], |
2417 self.repo(), |
2130 scmutil.matchfiles(self.repo(), self._cache.keys())) |
2418 [self.p1().rev()], |
|
2419 scmutil.matchfiles(self.repo(), self._cache.keys()), |
|
2420 ) |
2131 |
2421 |
2132 for path in self._cache.keys(): |
2422 for path in self._cache.keys(): |
2133 cache = self._cache[path] |
2423 cache = self._cache[path] |
2134 try: |
2424 try: |
2135 underlying = self._wrappedctx[path] |
2425 underlying = self._wrappedctx[path] |
2136 if (underlying.data() == cache['data'] and |
2426 if ( |
2137 underlying.flags() == cache['flags']): |
2427 underlying.data() == cache['data'] |
|
2428 and underlying.flags() == cache['flags'] |
|
2429 ): |
2138 keys.append(path) |
2430 keys.append(path) |
2139 except error.ManifestLookupError: |
2431 except error.ManifestLookupError: |
2140 # Path not in the underlying manifest (created). |
2432 # Path not in the underlying manifest (created). |
2141 continue |
2433 continue |
2142 |
2434 |
2143 for path in keys: |
2435 for path in keys: |
2144 del self._cache[path] |
2436 del self._cache[path] |
2145 return keys |
2437 return keys |
2146 |
2438 |
2147 def _markdirty(self, path, exists, data=None, date=None, flags='', |
2439 def _markdirty( |
2148 copied=None): |
2440 self, path, exists, data=None, date=None, flags='', copied=None |
|
2441 ): |
2149 # data not provided, let's see if we already have some; if not, let's |
2442 # data not provided, let's see if we already have some; if not, let's |
2150 # grab it from our underlying context, so that we always have data if |
2443 # grab it from our underlying context, so that we always have data if |
2151 # the file is marked as existing. |
2444 # the file is marked as existing. |
2152 if exists and data is None: |
2445 if exists and data is None: |
2153 oldentry = self._cache.get(path) or {} |
2446 oldentry = self._cache.get(path) or {} |
2162 'flags': flags, |
2455 'flags': flags, |
2163 'copied': copied, |
2456 'copied': copied, |
2164 } |
2457 } |
2165 |
2458 |
2166 def filectx(self, path, filelog=None): |
2459 def filectx(self, path, filelog=None): |
2167 return overlayworkingfilectx(self._repo, path, parent=self, |
2460 return overlayworkingfilectx( |
2168 filelog=filelog) |
2461 self._repo, path, parent=self, filelog=filelog |
|
2462 ) |
|
2463 |
2169 |
2464 |
2170 class overlayworkingfilectx(committablefilectx): |
2465 class overlayworkingfilectx(committablefilectx): |
2171 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory |
2466 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory |
2172 cache, which can be flushed through later by calling ``flush()``.""" |
2467 cache, which can be flushed through later by calling ``flush()``.""" |
2173 |
2468 |
2174 def __init__(self, repo, path, filelog=None, parent=None): |
2469 def __init__(self, repo, path, filelog=None, parent=None): |
2175 super(overlayworkingfilectx, self).__init__(repo, path, filelog, |
2470 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent) |
2176 parent) |
|
2177 self._repo = repo |
2471 self._repo = repo |
2178 self._parent = parent |
2472 self._parent = parent |
2179 self._path = path |
2473 self._path = path |
2180 |
2474 |
2181 def cmp(self, fctx): |
2475 def cmp(self, fctx): |
2221 return self._parent.remove(self._path) |
2515 return self._parent.remove(self._path) |
2222 |
2516 |
2223 def clearunknown(self): |
2517 def clearunknown(self): |
2224 pass |
2518 pass |
2225 |
2519 |
|
2520 |
2226 class workingcommitctx(workingctx): |
2521 class workingcommitctx(workingctx): |
2227 """A workingcommitctx object makes access to data related to |
2522 """A workingcommitctx object makes access to data related to |
2228 the revision being committed convenient. |
2523 the revision being committed convenient. |
2229 |
2524 |
2230 This hides changes in the working directory, if they aren't |
2525 This hides changes in the working directory, if they aren't |
2231 committed in this context. |
2526 committed in this context. |
2232 """ |
2527 """ |
2233 def __init__(self, repo, changes, |
2528 |
2234 text="", user=None, date=None, extra=None): |
2529 def __init__( |
2235 super(workingcommitctx, self).__init__(repo, text, user, date, extra, |
2530 self, repo, changes, text="", user=None, date=None, extra=None |
2236 changes) |
2531 ): |
|
2532 super(workingcommitctx, self).__init__( |
|
2533 repo, text, user, date, extra, changes |
|
2534 ) |
2237 |
2535 |
2238 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
2536 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
2239 """Return matched files only in ``self._status`` |
2537 """Return matched files only in ``self._status`` |
2240 |
2538 |
2241 Uncommitted files appear "clean" via this context, even if |
2539 Uncommitted files appear "clean" via this context, even if |
2243 """ |
2541 """ |
2244 if clean: |
2542 if clean: |
2245 clean = [f for f in self._manifest if f not in self._changedset] |
2543 clean = [f for f in self._manifest if f not in self._changedset] |
2246 else: |
2544 else: |
2247 clean = [] |
2545 clean = [] |
2248 return scmutil.status([f for f in self._status.modified if match(f)], |
2546 return scmutil.status( |
2249 [f for f in self._status.added if match(f)], |
2547 [f for f in self._status.modified if match(f)], |
2250 [f for f in self._status.removed if match(f)], |
2548 [f for f in self._status.added if match(f)], |
2251 [], [], [], clean) |
2549 [f for f in self._status.removed if match(f)], |
|
2550 [], |
|
2551 [], |
|
2552 [], |
|
2553 clean, |
|
2554 ) |
2252 |
2555 |
2253 @propertycache |
2556 @propertycache |
2254 def _changedset(self): |
2557 def _changedset(self): |
2255 """Return the set of files changed in this context |
2558 """Return the set of files changed in this context |
2256 """ |
2559 """ |
2257 changed = set(self._status.modified) |
2560 changed = set(self._status.modified) |
2258 changed.update(self._status.added) |
2561 changed.update(self._status.added) |
2259 changed.update(self._status.removed) |
2562 changed.update(self._status.removed) |
2260 return changed |
2563 return changed |
|
2564 |
2261 |
2565 |
2262 def makecachingfilectxfn(func): |
2566 def makecachingfilectxfn(func): |
2263 """Create a filectxfn that caches based on the path. |
2567 """Create a filectxfn that caches based on the path. |
2264 |
2568 |
2265 We can't use util.cachefunc because it uses all arguments as the cache |
2569 We can't use util.cachefunc because it uses all arguments as the cache |
2273 cache[path] = func(repo, memctx, path) |
2577 cache[path] = func(repo, memctx, path) |
2274 return cache[path] |
2578 return cache[path] |
2275 |
2579 |
2276 return getfilectx |
2580 return getfilectx |
2277 |
2581 |
|
2582 |
2278 def memfilefromctx(ctx): |
2583 def memfilefromctx(ctx): |
2279 """Given a context return a memfilectx for ctx[path] |
2584 """Given a context return a memfilectx for ctx[path] |
2280 |
2585 |
2281 This is a convenience method for building a memctx based on another |
2586 This is a convenience method for building a memctx based on another |
2282 context. |
2587 context. |
2283 """ |
2588 """ |
|
2589 |
2284 def getfilectx(repo, memctx, path): |
2590 def getfilectx(repo, memctx, path): |
2285 fctx = ctx[path] |
2591 fctx = ctx[path] |
2286 copysource = fctx.copysource() |
2592 copysource = fctx.copysource() |
2287 return memfilectx(repo, memctx, path, fctx.data(), |
2593 return memfilectx( |
2288 islink=fctx.islink(), isexec=fctx.isexec(), |
2594 repo, |
2289 copysource=copysource) |
2595 memctx, |
|
2596 path, |
|
2597 fctx.data(), |
|
2598 islink=fctx.islink(), |
|
2599 isexec=fctx.isexec(), |
|
2600 copysource=copysource, |
|
2601 ) |
2290 |
2602 |
2291 return getfilectx |
2603 return getfilectx |
|
2604 |
2292 |
2605 |
2293 def memfilefrompatch(patchstore): |
2606 def memfilefrompatch(patchstore): |
2294 """Given a patch (e.g. patchstore object) return a memfilectx |
2607 """Given a patch (e.g. patchstore object) return a memfilectx |
2295 |
2608 |
2296 This is a convenience method for building a memctx based on a patchstore. |
2609 This is a convenience method for building a memctx based on a patchstore. |
2297 """ |
2610 """ |
|
2611 |
2298 def getfilectx(repo, memctx, path): |
2612 def getfilectx(repo, memctx, path): |
2299 data, mode, copysource = patchstore.getfile(path) |
2613 data, mode, copysource = patchstore.getfile(path) |
2300 if data is None: |
2614 if data is None: |
2301 return None |
2615 return None |
2302 islink, isexec = mode |
2616 islink, isexec = mode |
2303 return memfilectx(repo, memctx, path, data, islink=islink, |
2617 return memfilectx( |
2304 isexec=isexec, copysource=copysource) |
2618 repo, |
|
2619 memctx, |
|
2620 path, |
|
2621 data, |
|
2622 islink=islink, |
|
2623 isexec=isexec, |
|
2624 copysource=copysource, |
|
2625 ) |
2305 |
2626 |
2306 return getfilectx |
2627 return getfilectx |
|
2628 |
2307 |
2629 |
2308 class memctx(committablectx): |
2630 class memctx(committablectx): |
2309 """Use memctx to perform in-memory commits via localrepo.commitctx(). |
2631 """Use memctx to perform in-memory commits via localrepo.commitctx(). |
2310 |
2632 |
2311 Revision information is supplied at initialization time while |
2633 Revision information is supplied at initialization time while |
2336 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. |
2658 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. |
2337 # Extensions that need to retain compatibility across Mercurial 3.1 can use |
2659 # Extensions that need to retain compatibility across Mercurial 3.1 can use |
2338 # this field to determine what to do in filectxfn. |
2660 # this field to determine what to do in filectxfn. |
2339 _returnnoneformissingfiles = True |
2661 _returnnoneformissingfiles = True |
2340 |
2662 |
2341 def __init__(self, repo, parents, text, files, filectxfn, user=None, |
2663 def __init__( |
2342 date=None, extra=None, branch=None, editor=False): |
2664 self, |
2343 super(memctx, self).__init__(repo, text, user, date, extra, |
2665 repo, |
2344 branch=branch) |
2666 parents, |
|
2667 text, |
|
2668 files, |
|
2669 filectxfn, |
|
2670 user=None, |
|
2671 date=None, |
|
2672 extra=None, |
|
2673 branch=None, |
|
2674 editor=False, |
|
2675 ): |
|
2676 super(memctx, self).__init__( |
|
2677 repo, text, user, date, extra, branch=branch |
|
2678 ) |
2345 self._rev = None |
2679 self._rev = None |
2346 self._node = None |
2680 self._node = None |
2347 parents = [(p or nullid) for p in parents] |
2681 parents = [(p or nullid) for p in parents] |
2348 p1, p2 = parents |
2682 p1, p2 = parents |
2349 self._parents = [self._repo[p] for p in (p1, p2)] |
2683 self._parents = [self._repo[p] for p in (p1, p2)] |
2418 else: |
2752 else: |
2419 removed.append(f) |
2753 removed.append(f) |
2420 |
2754 |
2421 return scmutil.status(modified, added, removed, [], [], [], []) |
2755 return scmutil.status(modified, added, removed, [], [], [], []) |
2422 |
2756 |
|
2757 |
2423 class memfilectx(committablefilectx): |
2758 class memfilectx(committablefilectx): |
2424 """memfilectx represents an in-memory file to commit. |
2759 """memfilectx represents an in-memory file to commit. |
2425 |
2760 |
2426 See memctx and committablefilectx for more details. |
2761 See memctx and committablefilectx for more details. |
2427 """ |
2762 """ |
2428 def __init__(self, repo, changectx, path, data, islink=False, |
2763 |
2429 isexec=False, copysource=None): |
2764 def __init__( |
|
2765 self, |
|
2766 repo, |
|
2767 changectx, |
|
2768 path, |
|
2769 data, |
|
2770 islink=False, |
|
2771 isexec=False, |
|
2772 copysource=None, |
|
2773 ): |
2430 """ |
2774 """ |
2431 path is the normalized file path relative to repository root. |
2775 path is the normalized file path relative to repository root. |
2432 data is the file content as a string. |
2776 data is the file content as a string. |
2433 islink is True if the file is a symbolic link. |
2777 islink is True if the file is a symbolic link. |
2434 isexec is True if the file is executable. |
2778 isexec is True if the file is executable. |
2476 user receives the committer name and defaults to current repository |
2820 user receives the committer name and defaults to current repository |
2477 username, date is the commit date in any format supported by |
2821 username, date is the commit date in any format supported by |
2478 dateutil.parsedate() and defaults to current date, extra is a dictionary of |
2822 dateutil.parsedate() and defaults to current date, extra is a dictionary of |
2479 metadata or is left empty. |
2823 metadata or is left empty. |
2480 """ |
2824 """ |
2481 def __init__(self, repo, originalctx, parents=None, text=None, user=None, |
2825 |
2482 date=None, extra=None, editor=False): |
2826 def __init__( |
|
2827 self, |
|
2828 repo, |
|
2829 originalctx, |
|
2830 parents=None, |
|
2831 text=None, |
|
2832 user=None, |
|
2833 date=None, |
|
2834 extra=None, |
|
2835 editor=False, |
|
2836 ): |
2483 if text is None: |
2837 if text is None: |
2484 text = originalctx.description() |
2838 text = originalctx.description() |
2485 super(metadataonlyctx, self).__init__(repo, text, user, date, extra) |
2839 super(metadataonlyctx, self).__init__(repo, text, user, date, extra) |
2486 self._rev = None |
2840 self._rev = None |
2487 self._node = None |
2841 self._node = None |
2498 |
2852 |
2499 # sanity check to ensure that the reused manifest parents are |
2853 # sanity check to ensure that the reused manifest parents are |
2500 # manifests of our commit parents |
2854 # manifests of our commit parents |
2501 mp1, mp2 = self.manifestctx().parents |
2855 mp1, mp2 = self.manifestctx().parents |
2502 if p1 != nullid and p1.manifestnode() != mp1: |
2856 if p1 != nullid and p1.manifestnode() != mp1: |
2503 raise RuntimeError(r"can't reuse the manifest: its p1 " |
2857 raise RuntimeError( |
2504 r"doesn't match the new ctx p1") |
2858 r"can't reuse the manifest: its p1 " |
|
2859 r"doesn't match the new ctx p1" |
|
2860 ) |
2505 if p2 != nullid and p2.manifestnode() != mp2: |
2861 if p2 != nullid and p2.manifestnode() != mp2: |
2506 raise RuntimeError(r"can't reuse the manifest: " |
2862 raise RuntimeError( |
2507 r"its p2 doesn't match the new ctx p2") |
2863 r"can't reuse the manifest: " |
|
2864 r"its p2 doesn't match the new ctx p2" |
|
2865 ) |
2508 |
2866 |
2509 self._files = originalctx.files() |
2867 self._files = originalctx.files() |
2510 self.substate = {} |
2868 self.substate = {} |
2511 |
2869 |
2512 if editor: |
2870 if editor: |
2556 else: |
2914 else: |
2557 removed.append(f) |
2915 removed.append(f) |
2558 |
2916 |
2559 return scmutil.status(modified, added, removed, [], [], [], []) |
2917 return scmutil.status(modified, added, removed, [], [], [], []) |
2560 |
2918 |
|
2919 |
2561 class arbitraryfilectx(object): |
2920 class arbitraryfilectx(object): |
2562 """Allows you to use filectx-like functions on a file in an arbitrary |
2921 """Allows you to use filectx-like functions on a file in an arbitrary |
2563 location on disk, possibly not in the working directory. |
2922 location on disk, possibly not in the working directory. |
2564 """ |
2923 """ |
|
2924 |
2565 def __init__(self, path, repo=None): |
2925 def __init__(self, path, repo=None): |
2566 # Repo is optional because contrib/simplemerge uses this class. |
2926 # Repo is optional because contrib/simplemerge uses this class. |
2567 self._repo = repo |
2927 self._repo = repo |
2568 self._path = path |
2928 self._path = path |
2569 |
2929 |
2570 def cmp(self, fctx): |
2930 def cmp(self, fctx): |
2571 # filecmp follows symlinks whereas `cmp` should not, so skip the fast |
2931 # filecmp follows symlinks whereas `cmp` should not, so skip the fast |
2572 # path if either side is a symlink. |
2932 # path if either side is a symlink. |
2573 symlinks = ('l' in self.flags() or 'l' in fctx.flags()) |
2933 symlinks = 'l' in self.flags() or 'l' in fctx.flags() |
2574 if not symlinks and isinstance(fctx, workingfilectx) and self._repo: |
2934 if not symlinks and isinstance(fctx, workingfilectx) and self._repo: |
2575 # Add a fast-path for merge if both sides are disk-backed. |
2935 # Add a fast-path for merge if both sides are disk-backed. |
2576 # Note that filecmp uses the opposite return values (True if same) |
2936 # Note that filecmp uses the opposite return values (True if same) |
2577 # from our cmp functions (True if different). |
2937 # from our cmp functions (True if different). |
2578 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) |
2938 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) |