changeset 6876 | 077f1e637cd8 |
parent 6875 | 0d714a48ab53 |
parent 6840 | 80e51429cb9a |
child 6884 | 11229144aa01 |
6875:0d714a48ab53 | 6876:077f1e637cd8 |
---|---|
7 |
7 |
8 from node import bin, hex, nullid, nullrev, short |
8 from node import bin, hex, nullid, nullrev, short |
9 from i18n import _ |
9 from i18n import _ |
10 import repo, changegroup |
10 import repo, changegroup |
11 import changelog, dirstate, filelog, manifest, context, weakref |
11 import changelog, dirstate, filelog, manifest, context, weakref |
12 import lock, transaction, stat, errno, ui |
12 import lock, transaction, stat, errno, ui, store |
13 import os, revlog, time, util, extensions, hook, inspect |
13 import os, revlog, time, util, extensions, hook, inspect |
14 import match as match_ |
|
14 |
15 |
15 class localrepository(repo.repository): |
16 class localrepository(repo.repository): |
16 capabilities = util.set(('lookup', 'changegroupsubset')) |
17 capabilities = util.set(('lookup', 'changegroupsubset')) |
17 supported = ('revlogv1', 'store') |
18 supported = ('revlogv1', 'store') |
18 |
19 |
57 # check them |
58 # check them |
58 for r in requirements: |
59 for r in requirements: |
59 if r not in self.supported: |
60 if r not in self.supported: |
60 raise repo.RepoError(_("requirement '%s' not supported") % r) |
61 raise repo.RepoError(_("requirement '%s' not supported") % r) |
61 |
62 |
62 # setup store |
63 self.store = store.store(requirements, self.path) |
63 if "store" in requirements: |
64 |
64 self.encodefn = util.encodefilename |
65 self.spath = self.store.path |
65 self.decodefn = util.decodefilename |
66 self.sopener = self.store.opener |
66 self.spath = os.path.join(self.path, "store") |
67 self.sjoin = self.store.join |
67 else: |
68 self._createmode = self.store.createmode |
68 self.encodefn = lambda x: x |
69 self.opener.createmode = self.store.createmode |
69 self.decodefn = lambda x: x |
|
70 self.spath = self.path |
|
71 |
|
72 try: |
|
73 # files in .hg/ will be created using this mode |
|
74 mode = os.stat(self.spath).st_mode |
|
75 # avoid some useless chmods |
|
76 if (0777 & ~util._umask) == (0777 & mode): |
|
77 mode = None |
|
78 except OSError: |
|
79 mode = None |
|
80 |
|
81 self._createmode = mode |
|
82 self.opener.createmode = mode |
|
83 sopener = util.opener(self.spath) |
|
84 sopener.createmode = mode |
|
85 self.sopener = util.encodedopener(sopener, self.encodefn) |
|
86 |
70 |
87 self.ui = ui.ui(parentui=parentui) |
71 self.ui = ui.ui(parentui=parentui) |
88 try: |
72 try: |
89 self.ui.readconfig(self.join("hgrc"), self.root) |
73 self.ui.readconfig(self.join("hgrc"), self.root) |
90 extensions.loadall(self.ui) |
74 extensions.loadall(self.ui) |
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) |
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root) |
115 return self.dirstate |
99 return self.dirstate |
116 else: |
100 else: |
117 raise AttributeError, name |
101 raise AttributeError, name |
118 |
102 |
103 def __getitem__(self, changeid): |
|
104 if changeid == None: |
|
105 return context.workingctx(self) |
|
106 return context.changectx(self, changeid) |
|
107 |
|
108 def __nonzero__(self): |
|
109 return True |
|
110 |
|
111 def __len__(self): |
|
112 return len(self.changelog) |
|
113 |
|
114 def __iter__(self): |
|
115 for i in xrange(len(self)): |
|
116 yield i |
|
117 |
|
119 def url(self): |
118 def url(self): |
120 return 'file:' + self.root |
119 return 'file:' + self.root |
121 |
120 |
122 def hook(self, name, throw=False, **args): |
121 def hook(self, name, throw=False, **args): |
123 return hook.hook(self.ui, self, name, throw, **args) |
122 return hook.hook(self.ui, self, name, throw, **args) |
144 def writetags(fp, names, munge, prevtags): |
143 def writetags(fp, names, munge, prevtags): |
145 fp.seek(0, 2) |
144 fp.seek(0, 2) |
146 if prevtags and prevtags[-1] != '\n': |
145 if prevtags and prevtags[-1] != '\n': |
147 fp.write('\n') |
146 fp.write('\n') |
148 for name in names: |
147 for name in names: |
149 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name)) |
148 m = munge and munge(name) or name |
149 if self._tagstypecache and name in self._tagstypecache: |
|
150 old = self.tagscache.get(name, nullid) |
|
151 fp.write('%s %s\n' % (hex(old), m)) |
|
152 fp.write('%s %s\n' % (hex(node), m)) |
|
150 fp.close() |
153 fp.close() |
151 |
154 |
152 prevtags = '' |
155 prevtags = '' |
153 if local: |
156 if local: |
154 try: |
157 try: |
300 self._tagstypecache = {} |
303 self._tagstypecache = {} |
301 for k,nh in globaltags.items(): |
304 for k,nh in globaltags.items(): |
302 n = nh[0] |
305 n = nh[0] |
303 if n != nullid: |
306 if n != nullid: |
304 self.tagscache[k] = n |
307 self.tagscache[k] = n |
305 self._tagstypecache[k] = tagtypes[k] |
308 self._tagstypecache[k] = tagtypes[k] |
306 self.tagscache['tip'] = self.changelog.tip() |
309 self.tagscache['tip'] = self.changelog.tip() |
307 |
|
308 return self.tagscache |
310 return self.tagscache |
309 |
311 |
310 def tagtype(self, tagname): |
312 def tagtype(self, tagname): |
311 ''' |
313 ''' |
312 return the type of the given tag. result can be: |
314 return the type of the given tag. result can be: |
324 heads = self.heads() |
326 heads = self.heads() |
325 heads.reverse() |
327 heads.reverse() |
326 last = {} |
328 last = {} |
327 ret = [] |
329 ret = [] |
328 for node in heads: |
330 for node in heads: |
329 c = self.changectx(node) |
331 c = self[node] |
330 rev = c.rev() |
332 rev = c.rev() |
331 try: |
333 try: |
332 fnode = c.filenode('.hgtags') |
334 fnode = c.filenode('.hgtags') |
333 except revlog.LookupError: |
335 except revlog.LookupError: |
334 continue |
336 continue |
345 try: |
347 try: |
346 r = self.changelog.rev(n) |
348 r = self.changelog.rev(n) |
347 except: |
349 except: |
348 r = -2 # sort to the beginning of the list if unknown |
350 r = -2 # sort to the beginning of the list if unknown |
349 l.append((r, t, n)) |
351 l.append((r, t, n)) |
350 l.sort() |
352 return [(t, n) for r, t, n in util.sort(l)] |
351 return [(t, n) for r, t, n in l] |
|
352 |
353 |
353 def nodetags(self, node): |
354 def nodetags(self, node): |
354 '''return the tags associated with a node''' |
355 '''return the tags associated with a node''' |
355 if not self.nodetagscache: |
356 if not self.nodetagscache: |
356 self.nodetagscache = {} |
357 self.nodetagscache = {} |
357 for t, n in self.tags().items(): |
358 for t, n in self.tags().items(): |
358 self.nodetagscache.setdefault(n, []).append(t) |
359 self.nodetagscache.setdefault(n, []).append(t) |
359 return self.nodetagscache.get(node, []) |
360 return self.nodetagscache.get(node, []) |
360 |
361 |
361 def _branchtags(self, partial, lrev): |
362 def _branchtags(self, partial, lrev): |
362 tiprev = self.changelog.count() - 1 |
363 tiprev = len(self) - 1 |
363 if lrev != tiprev: |
364 if lrev != tiprev: |
364 self._updatebranchcache(partial, lrev+1, tiprev+1) |
365 self._updatebranchcache(partial, lrev+1, tiprev+1) |
365 self._writebranchcache(partial, self.changelog.tip(), tiprev) |
366 self._writebranchcache(partial, self.changelog.tip(), tiprev) |
366 |
367 |
367 return partial |
368 return partial |
402 return {}, nullid, nullrev |
403 return {}, nullid, nullrev |
403 |
404 |
404 try: |
405 try: |
405 last, lrev = lines.pop(0).split(" ", 1) |
406 last, lrev = lines.pop(0).split(" ", 1) |
406 last, lrev = bin(last), int(lrev) |
407 last, lrev = bin(last), int(lrev) |
407 if not (lrev < self.changelog.count() and |
408 if lrev >= len(self) or self[lrev].node() != last: |
408 self.changelog.node(lrev) == last): # sanity check |
|
409 # invalidate the cache |
409 # invalidate the cache |
410 raise ValueError('invalidating branch cache (tip differs)') |
410 raise ValueError('invalidating branch cache (tip differs)') |
411 for l in lines: |
411 for l in lines: |
412 if not l: continue |
412 if not l: continue |
413 node, label = l.split(" ", 1) |
413 node, label = l.split(" ", 1) |
430 except (IOError, OSError): |
430 except (IOError, OSError): |
431 pass |
431 pass |
432 |
432 |
433 def _updatebranchcache(self, partial, start, end): |
433 def _updatebranchcache(self, partial, start, end): |
434 for r in xrange(start, end): |
434 for r in xrange(start, end): |
435 c = self.changectx(r) |
435 c = self[r] |
436 b = c.branch() |
436 b = c.branch() |
437 partial[b] = c.node() |
437 partial[b] = c.node() |
438 |
438 |
439 def lookup(self, key): |
439 def lookup(self, key): |
440 if key == '.': |
440 if key == '.': |
441 key, second = self.dirstate.parents() |
441 return self.dirstate.parents()[0] |
442 if key == nullid: |
|
443 raise repo.RepoError(_("no revision checked out")) |
|
444 if second != nullid: |
|
445 self.ui.warn(_("warning: working directory has two parents, " |
|
446 "tag '.' uses the first\n")) |
|
447 elif key == 'null': |
442 elif key == 'null': |
448 return nullid |
443 return nullid |
449 n = self.changelog._match(key) |
444 n = self.changelog._match(key) |
450 if n: |
445 if n: |
451 return n |
446 return n |
467 return True |
462 return True |
468 |
463 |
469 def join(self, f): |
464 def join(self, f): |
470 return os.path.join(self.path, f) |
465 return os.path.join(self.path, f) |
471 |
466 |
472 def sjoin(self, f): |
|
473 f = self.encodefn(f) |
|
474 return os.path.join(self.spath, f) |
|
475 |
|
476 def wjoin(self, f): |
467 def wjoin(self, f): |
477 return os.path.join(self.root, f) |
468 return os.path.join(self.root, f) |
469 |
|
470 def rjoin(self, f): |
|
471 return os.path.join(self.root, util.pconvert(f)) |
|
478 |
472 |
479 def file(self, f): |
473 def file(self, f): |
480 if f[0] == '/': |
474 if f[0] == '/': |
481 f = f[1:] |
475 f = f[1:] |
482 return filelog.filelog(self.sopener, f) |
476 return filelog.filelog(self.sopener, f) |
483 |
477 |
484 def changectx(self, changeid=None): |
478 def changectx(self, changeid): |
485 return context.changectx(self, changeid) |
479 return self[changeid] |
486 |
|
487 def workingctx(self): |
|
488 return context.workingctx(self) |
|
489 |
480 |
490 def parents(self, changeid=None): |
481 def parents(self, changeid=None): |
491 ''' |
482 '''get list of changectxs for parents of changeid''' |
492 get list of changectxs for parents of changeid or working directory |
483 return self[changeid].parents() |
493 ''' |
|
494 if changeid is None: |
|
495 pl = self.dirstate.parents() |
|
496 else: |
|
497 n = self.changelog.lookup(changeid) |
|
498 pl = self.changelog.parents(n) |
|
499 if pl[1] == nullid: |
|
500 return [self.changectx(pl[0])] |
|
501 return [self.changectx(pl[0]), self.changectx(pl[1])] |
|
502 |
484 |
503 def filectx(self, path, changeid=None, fileid=None): |
485 def filectx(self, path, changeid=None, fileid=None): |
504 """changeid can be a changeset revision, node, or tag. |
486 """changeid can be a changeset revision, node, or tag. |
505 fileid can be a file revision or node.""" |
487 fileid can be a file revision or node.""" |
506 return context.filectx(self, path, changeid, fileid) |
488 return context.filectx(self, path, changeid, fileid) |
674 self.dirstate.invalidate, _('working directory of %s') % |
656 self.dirstate.invalidate, _('working directory of %s') % |
675 self.origroot) |
657 self.origroot) |
676 self._wlockref = weakref.ref(l) |
658 self._wlockref = weakref.ref(l) |
677 return l |
659 return l |
678 |
660 |
679 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist): |
661 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist): |
680 """ |
662 """ |
681 commit an individual file as part of a larger transaction |
663 commit an individual file as part of a larger transaction |
682 """ |
664 """ |
683 |
665 |
684 t = self.wread(fn) |
666 fn = fctx.path() |
667 t = fctx.data() |
|
685 fl = self.file(fn) |
668 fl = self.file(fn) |
686 fp1 = manifest1.get(fn, nullid) |
669 fp1 = manifest1.get(fn, nullid) |
687 fp2 = manifest2.get(fn, nullid) |
670 fp2 = manifest2.get(fn, nullid) |
688 |
671 |
689 meta = {} |
672 meta = {} |
690 cf = self.dirstate.copied(fn) |
673 cp = fctx.renamed() |
691 if cf and cf != fn: |
674 if cp and cp[0] != fn: |
692 # Mark the new revision of this file as a copy of another |
675 # Mark the new revision of this file as a copy of another |
693 # file. This copy data will effectively act as a parent |
676 # file. This copy data will effectively act as a parent |
694 # of this new revision. If this is a merge, the first |
677 # of this new revision. If this is a merge, the first |
695 # parent will be the nullid (meaning "look up the copy data") |
678 # parent will be the nullid (meaning "look up the copy data") |
696 # and the second one will be the other parent. For example: |
679 # and the second one will be the other parent. For example: |
706 # 0 --- 1 --- 3 rev4 reverts the content change from rev2 |
689 # 0 --- 1 --- 3 rev4 reverts the content change from rev2 |
707 # \ / merging rev3 and rev4 should use bar@rev2 |
690 # \ / merging rev3 and rev4 should use bar@rev2 |
708 # \- 2 --- 4 as the merge base |
691 # \- 2 --- 4 as the merge base |
709 # |
692 # |
710 |
693 |
694 cf = cp[0] |
|
711 cr = manifest1.get(cf) |
695 cr = manifest1.get(cf) |
712 nfp = fp2 |
696 nfp = fp2 |
713 |
697 |
714 if manifest2: # branch merge |
698 if manifest2: # branch merge |
715 if fp2 == nullid: # copied on remote side |
699 if fp2 == nullid: # copied on remote side |
719 |
703 |
720 # find source in nearest ancestor if we've lost track |
704 # find source in nearest ancestor if we've lost track |
721 if not cr: |
705 if not cr: |
722 self.ui.debug(_(" %s: searching for copy revision for %s\n") % |
706 self.ui.debug(_(" %s: searching for copy revision for %s\n") % |
723 (fn, cf)) |
707 (fn, cf)) |
724 p1 = self.dirstate.parents()[0] |
708 for a in self['.'].ancestors(): |
725 rev = self.changelog.rev(p1) |
709 if cf in a: |
726 seen = {-1:None} |
710 cr = a[cf].filenode() |
727 visit = [rev] |
711 break |
728 while visit: |
|
729 for p in self.changelog.parentrevs(visit.pop(0)): |
|
730 if p not in seen: |
|
731 seen[p] = True |
|
732 visit.append(p) |
|
733 ctx = self.changectx(p) |
|
734 if cf in ctx: |
|
735 cr = ctx[cf].filenode() |
|
736 break |
|
737 |
712 |
738 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr))) |
713 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr))) |
739 meta["copy"] = cf |
714 meta["copy"] = cf |
740 meta["copyrev"] = hex(cr) |
715 meta["copyrev"] = hex(cr) |
741 fp1, fp2 = nullid, nfp |
716 fp1, fp2 = nullid, nfp |
759 p1, p2 = self.dirstate.parents() |
734 p1, p2 = self.dirstate.parents() |
760 return self.commit(files=files, text=text, user=user, date=date, |
735 return self.commit(files=files, text=text, user=user, date=date, |
761 p1=p1, p2=p2, extra=extra, empty_ok=True) |
736 p1=p1, p2=p2, extra=extra, empty_ok=True) |
762 |
737 |
763 def commit(self, files=None, text="", user=None, date=None, |
738 def commit(self, files=None, text="", user=None, date=None, |
764 match=util.always, force=False, force_editor=False, |
739 match=None, force=False, force_editor=False, |
765 p1=None, p2=None, extra={}, empty_ok=False): |
740 p1=None, p2=None, extra={}, empty_ok=False): |
766 wlock = lock = tr = None |
741 wlock = lock = None |
767 valid = 0 # don't save the dirstate if this isn't set |
|
768 if files: |
742 if files: |
769 files = util.unique(files) |
743 files = util.unique(files) |
770 try: |
744 try: |
771 wlock = self.wlock() |
745 wlock = self.wlock() |
772 lock = self.lock() |
746 lock = self.lock() |
773 commit = [] |
|
774 remove = [] |
|
775 changed = [] |
|
776 use_dirstate = (p1 is None) # not rawcommit |
747 use_dirstate = (p1 is None) # not rawcommit |
777 extra = extra.copy() |
|
778 |
748 |
779 if use_dirstate: |
749 if use_dirstate: |
750 p1, p2 = self.dirstate.parents() |
|
751 update_dirstate = True |
|
752 |
|
753 if (not force and p2 != nullid and |
|
754 (match and (match.files() or match.anypats()))): |
|
755 raise util.Abort(_('cannot partially commit a merge ' |
|
756 '(do not specify files or patterns)')) |
|
757 |
|
780 if files: |
758 if files: |
759 modified, removed = [], [] |
|
781 for f in files: |
760 for f in files: |
782 s = self.dirstate[f] |
761 s = self.dirstate[f] |
783 if s in 'nma': |
762 if s in 'nma': |
784 commit.append(f) |
763 modified.append(f) |
785 elif s == 'r': |
764 elif s == 'r': |
786 remove.append(f) |
765 removed.append(f) |
787 else: |
766 else: |
788 self.ui.warn(_("%s not tracked!\n") % f) |
767 self.ui.warn(_("%s not tracked!\n") % f) |
768 changes = [modified, [], removed, [], []] |
|
789 else: |
769 else: |
790 changes = self.status(match=match)[:5] |
770 changes = self.status(match=match) |
791 modified, added, removed, deleted, unknown = changes |
|
792 commit = modified + added |
|
793 remove = removed |
|
794 else: |
|
795 commit = files |
|
796 |
|
797 if use_dirstate: |
|
798 p1, p2 = self.dirstate.parents() |
|
799 update_dirstate = True |
|
800 |
|
801 if (not force and p2 != nullid and |
|
802 (files or match != util.always)): |
|
803 raise util.Abort(_('cannot partially commit a merge ' |
|
804 '(do not specify files or patterns)')) |
|
805 else: |
771 else: |
806 p1, p2 = p1, p2 or nullid |
772 p1, p2 = p1, p2 or nullid |
807 update_dirstate = (self.dirstate.parents()[0] == p1) |
773 update_dirstate = (self.dirstate.parents()[0] == p1) |
808 |
774 changes = [files, [], [], [], []] |
775 |
|
776 wctx = context.workingctx(self, (p1, p2), text, user, date, |
|
777 extra, changes) |
|
778 return self._commitctx(wctx, force, force_editor, empty_ok, |
|
779 use_dirstate, update_dirstate) |
|
780 finally: |
|
781 del lock, wlock |
|
782 |
|
783 def commitctx(self, ctx): |
|
784 wlock = lock = None |
|
785 try: |
|
786 wlock = self.wlock() |
|
787 lock = self.lock() |
|
788 return self._commitctx(ctx, force=True, force_editor=False, |
|
789 empty_ok=True, use_dirstate=False, |
|
790 update_dirstate=False) |
|
791 finally: |
|
792 del lock, wlock |
|
793 |
|
794 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False, |
|
795 use_dirstate=True, update_dirstate=True): |
|
796 tr = None |
|
797 valid = 0 # don't save the dirstate if this isn't set |
|
798 try: |
|
799 commit = util.sort(wctx.modified() + wctx.added()) |
|
800 remove = wctx.removed() |
|
801 extra = wctx.extra().copy() |
|
802 branchname = extra['branch'] |
|
803 user = wctx.user() |
|
804 text = wctx.description() |
|
805 |
|
806 p1, p2 = [p.node() for p in wctx.parents()] |
|
809 c1 = self.changelog.read(p1) |
807 c1 = self.changelog.read(p1) |
810 c2 = self.changelog.read(p2) |
808 c2 = self.changelog.read(p2) |
811 m1 = self.manifest.read(c1[0]).copy() |
809 m1 = self.manifest.read(c1[0]).copy() |
812 m2 = self.manifest.read(c2[0]) |
810 m2 = self.manifest.read(c2[0]) |
813 |
|
814 if use_dirstate: |
|
815 branchname = self.workingctx().branch() |
|
816 try: |
|
817 branchname = branchname.decode('UTF-8').encode('UTF-8') |
|
818 except UnicodeDecodeError: |
|
819 raise util.Abort(_('branch name not in UTF-8!')) |
|
820 else: |
|
821 branchname = "" |
|
822 |
811 |
823 if use_dirstate: |
812 if use_dirstate: |
824 oldname = c1[5].get("branch") # stored in UTF-8 |
813 oldname = c1[5].get("branch") # stored in UTF-8 |
825 if (not commit and not remove and not force and p2 == nullid |
814 if (not commit and not remove and not force and p2 == nullid |
826 and branchname == oldname): |
815 and branchname == oldname): |
836 tr = self.transaction() |
825 tr = self.transaction() |
837 trp = weakref.proxy(tr) |
826 trp = weakref.proxy(tr) |
838 |
827 |
839 # check in files |
828 # check in files |
840 new = {} |
829 new = {} |
841 linkrev = self.changelog.count() |
830 changed = [] |
842 commit.sort() |
831 linkrev = len(self) |
843 is_exec = util.execfunc(self.root, m1.execf) |
|
844 is_link = util.linkfunc(self.root, m1.linkf) |
|
845 for f in commit: |
832 for f in commit: |
846 self.ui.note(f + "\n") |
833 self.ui.note(f + "\n") |
847 try: |
834 try: |
848 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed) |
835 fctx = wctx.filectx(f) |
849 new_exec = is_exec(f) |
836 newflags = fctx.flags() |
850 new_link = is_link(f) |
837 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed) |
851 if ((not changed or changed[-1] != f) and |
838 if ((not changed or changed[-1] != f) and |
852 m2.get(f) != new[f]): |
839 m2.get(f) != new[f]): |
853 # mention the file in the changelog if some |
840 # mention the file in the changelog if some |
854 # flag changed, even if there was no content |
841 # flag changed, even if there was no content |
855 # change. |
842 # change. |
856 old_exec = m1.execf(f) |
843 if m1.flags(f) != newflags: |
857 old_link = m1.linkf(f) |
|
858 if old_exec != new_exec or old_link != new_link: |
|
859 changed.append(f) |
844 changed.append(f) |
860 m1.set(f, new_exec, new_link) |
845 m1.set(f, newflags) |
861 if use_dirstate: |
846 if use_dirstate: |
862 self.dirstate.normal(f) |
847 self.dirstate.normal(f) |
863 |
848 |
864 except (OSError, IOError): |
849 except (OSError, IOError): |
865 if use_dirstate: |
850 if use_dirstate: |
868 else: |
853 else: |
869 remove.append(f) |
854 remove.append(f) |
870 |
855 |
871 # update manifest |
856 # update manifest |
872 m1.update(new) |
857 m1.update(new) |
873 remove.sort() |
|
874 removed = [] |
858 removed = [] |
875 |
859 |
876 for f in remove: |
860 for f in util.sort(remove): |
877 if f in m1: |
861 if f in m1: |
878 del m1[f] |
862 del m1[f] |
879 removed.append(f) |
863 removed.append(f) |
880 elif f in m2: |
864 elif f in m2: |
881 removed.append(f) |
865 removed.append(f) |
882 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0], |
866 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0], |
883 (new, removed)) |
867 (new, removed)) |
884 |
868 |
885 # add changeset |
869 # add changeset |
886 new = new.keys() |
|
887 new.sort() |
|
888 |
|
889 user = user or self.ui.username() |
|
890 if (not empty_ok and not text) or force_editor: |
870 if (not empty_ok and not text) or force_editor: |
891 edittext = [] |
871 edittext = [] |
892 if text: |
872 if text: |
893 edittext.append(text) |
873 edittext.append(text) |
894 edittext.append("") |
874 edittext.append("") |
909 olddir = os.getcwd() |
889 olddir = os.getcwd() |
910 os.chdir(self.root) |
890 os.chdir(self.root) |
911 text = self.ui.edit("\n".join(edittext), user) |
891 text = self.ui.edit("\n".join(edittext), user) |
912 os.chdir(olddir) |
892 os.chdir(olddir) |
913 |
893 |
914 if branchname: |
|
915 extra["branch"] = branchname |
|
916 |
|
917 lines = [line.rstrip() for line in text.rstrip().splitlines()] |
894 lines = [line.rstrip() for line in text.rstrip().splitlines()] |
918 while lines and not lines[0]: |
895 while lines and not lines[0]: |
919 del lines[0] |
896 del lines[0] |
920 if not lines and use_dirstate: |
897 if not lines and use_dirstate: |
921 raise util.Abort(_("empty commit message")) |
898 raise util.Abort(_("empty commit message")) |
922 text = '\n'.join(lines) |
899 text = '\n'.join(lines) |
923 |
900 |
924 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2, |
901 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2, |
925 user, date, extra) |
902 user, wctx.date(), extra) |
926 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
903 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1, |
927 parent2=xp2) |
904 parent2=xp2) |
928 tr.close() |
905 tr.close() |
929 |
906 |
930 if self.branchcache: |
907 if self.branchcache: |
940 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) |
917 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2) |
941 return n |
918 return n |
942 finally: |
919 finally: |
943 if not valid: # don't save our updated dirstate |
920 if not valid: # don't save our updated dirstate |
944 self.dirstate.invalidate() |
921 self.dirstate.invalidate() |
945 del tr, lock, wlock |
922 del tr |
946 |
923 |
947 def walk(self, node=None, files=[], match=util.always, badmatch=None): |
924 def walk(self, match, node=None): |
948 ''' |
925 ''' |
949 walk recursively through the directory tree or a given |
926 walk recursively through the directory tree or a given |
950 changeset, finding all files matched by the match |
927 changeset, finding all files matched by the match |
951 function |
928 function |
952 |
|
953 results are yielded in a tuple (src, filename), where src |
|
954 is one of: |
|
955 'f' the file was found in the directory tree |
|
956 'm' the file was only in the dirstate and not in the tree |
|
957 'b' file was not found and matched badmatch |
|
958 ''' |
929 ''' |
959 |
930 return self[node].walk(match) |
960 if node: |
931 |
961 fdict = dict.fromkeys(files) |
932 def status(self, node1='.', node2=None, match=None, |
962 # for dirstate.walk, files=['.'] means "walk the whole tree". |
933 ignored=False, clean=False, unknown=False): |
963 # follow that here, too |
|
964 fdict.pop('.', None) |
|
965 mdict = self.manifest.read(self.changelog.read(node)[0]) |
|
966 mfiles = mdict.keys() |
|
967 mfiles.sort() |
|
968 for fn in mfiles: |
|
969 for ffn in fdict: |
|
970 # match if the file is the exact name or a directory |
|
971 if ffn == fn or fn.startswith("%s/" % ffn): |
|
972 del fdict[ffn] |
|
973 break |
|
974 if match(fn): |
|
975 yield 'm', fn |
|
976 ffiles = fdict.keys() |
|
977 ffiles.sort() |
|
978 for fn in ffiles: |
|
979 if badmatch and badmatch(fn): |
|
980 if match(fn): |
|
981 yield 'b', fn |
|
982 else: |
|
983 self.ui.warn(_('%s: No such file in rev %s\n') |
|
984 % (self.pathto(fn), short(node))) |
|
985 else: |
|
986 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch): |
|
987 yield src, fn |
|
988 |
|
989 def status(self, node1=None, node2=None, files=[], match=util.always, |
|
990 list_ignored=False, list_clean=False, list_unknown=True): |
|
991 """return status of files between two nodes or node and working directory |
934 """return status of files between two nodes or node and working directory |
992 |
935 |
993 If node1 is None, use the first dirstate parent instead. |
936 If node1 is None, use the first dirstate parent instead. |
994 If node2 is None, compare node1 with working directory. |
937 If node2 is None, compare node1 with working directory. |
995 """ |
938 """ |
996 |
939 |
997 def fcmp(fn, getnode): |
940 def mfmatches(ctx): |
998 t1 = self.wread(fn) |
941 mf = ctx.manifest().copy() |
999 return self.file(fn).cmp(getnode(fn), t1) |
|
1000 |
|
1001 def mfmatches(node): |
|
1002 change = self.changelog.read(node) |
|
1003 mf = self.manifest.read(change[0]).copy() |
|
1004 for fn in mf.keys(): |
942 for fn in mf.keys(): |
1005 if not match(fn): |
943 if not match(fn): |
1006 del mf[fn] |
944 del mf[fn] |
1007 return mf |
945 return mf |
1008 |
946 |
1009 modified, added, removed, deleted, unknown = [], [], [], [], [] |
947 ctx1 = self[node1] |
1010 ignored, clean = [], [] |
948 ctx2 = self[node2] |
1011 |
949 working = ctx2 == self[None] |
1012 compareworking = False |
950 parentworking = working and ctx1 == self['.'] |
1013 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]): |
951 match = match or match_.always(self.root, self.getcwd()) |
1014 compareworking = True |
952 listignored, listclean, listunknown = ignored, clean, unknown |
1015 |
953 |
1016 if not compareworking: |
954 if working: # we need to scan the working dir |
1017 # read the manifest from node1 before the manifest from node2, |
955 s = self.dirstate.status(match, listignored, listclean, listunknown) |
1018 # so that we'll hit the manifest cache if we're going through |
956 cmp, modified, added, removed, deleted, unknown, ignored, clean = s |
1019 # all the revisions in parent->child order. |
957 |
1020 mf1 = mfmatches(node1) |
958 # check for any possibly clean files |
1021 |
959 if parentworking and cmp: |
1022 # are we comparing the working directory? |
960 fixup = [] |
1023 if not node2: |
961 # do a full compare of any files that might have changed |
1024 (lookup, modified, added, removed, deleted, unknown, |
962 for f in cmp: |
1025 ignored, clean) = self.dirstate.status(files, match, |
963 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f) |
1026 list_ignored, list_clean, |
964 or ctx1[f].cmp(ctx2[f].data())): |
1027 list_unknown) |
965 modified.append(f) |
1028 |
966 else: |
1029 # are we comparing working dir against its parent? |
967 fixup.append(f) |
1030 if compareworking: |
968 |
1031 if lookup: |
969 if listclean: |
1032 fixup = [] |
970 clean += fixup |
1033 # do a full compare of any files that might have changed |
971 |
1034 ctx = self.changectx() |
972 # update dirstate for files that are actually clean |
1035 mexec = lambda f: 'x' in ctx.fileflags(f) |
973 if fixup: |
1036 mlink = lambda f: 'l' in ctx.fileflags(f) |
974 wlock = None |
1037 is_exec = util.execfunc(self.root, mexec) |
975 try: |
1038 is_link = util.linkfunc(self.root, mlink) |
|
1039 def flags(f): |
|
1040 return is_link(f) and 'l' or is_exec(f) and 'x' or '' |
|
1041 for f in lookup: |
|
1042 if (f not in ctx or flags(f) != ctx.fileflags(f) |
|
1043 or ctx[f].cmp(self.wread(f))): |
|
1044 modified.append(f) |
|
1045 else: |
|
1046 fixup.append(f) |
|
1047 if list_clean: |
|
1048 clean.append(f) |
|
1049 |
|
1050 # update dirstate for files that are actually clean |
|
1051 if fixup: |
|
1052 wlock = None |
|
1053 try: |
976 try: |
1054 try: |
977 wlock = self.wlock(False) |
1055 wlock = self.wlock(False) |
978 for f in fixup: |
1056 except lock.LockException: |
979 self.dirstate.normal(f) |
1057 pass |
980 except lock.LockException: |
1058 if wlock: |
981 pass |
1059 for f in fixup: |
982 finally: |
1060 self.dirstate.normal(f) |
983 del wlock |
1061 finally: |
984 |
1062 del wlock |
985 if not parentworking: |
1063 else: |
986 mf1 = mfmatches(ctx1) |
987 if working: |
|
1064 # we are comparing working dir against non-parent |
988 # we are comparing working dir against non-parent |
1065 # generate a pseudo-manifest for the working dir |
989 # generate a pseudo-manifest for the working dir |
1066 # XXX: create it in dirstate.py ? |
990 mf2 = mfmatches(self['.']) |
1067 mf2 = mfmatches(self.dirstate.parents()[0]) |
991 for f in cmp + modified + added: |
1068 is_exec = util.execfunc(self.root, mf2.execf) |
992 mf2[f] = None |
1069 is_link = util.linkfunc(self.root, mf2.linkf) |
993 mf2.set(f, ctx2.flags(f)) |
1070 for f in lookup + modified + added: |
|
1071 mf2[f] = "" |
|
1072 mf2.set(f, is_exec(f), is_link(f)) |
|
1073 for f in removed: |
994 for f in removed: |
1074 if f in mf2: |
995 if f in mf2: |
1075 del mf2[f] |
996 del mf2[f] |
1076 |
997 else: |
1077 else: |
998 # we are comparing two revisions |
1078 # we are comparing two revisions |
999 deleted, unknown, ignored = [], [], [] |
1079 mf2 = mfmatches(node2) |
1000 mf2 = mfmatches(ctx2) |
1080 |
1001 |
1081 if not compareworking: |
|
1082 # flush lists from dirstate before comparing manifests |
|
1083 modified, added, clean = [], [], [] |
1002 modified, added, clean = [], [], [] |
1084 |
1003 for fn in mf2: |
1085 # make sure to sort the files so we talk to the disk in a |
|
1086 # reasonable order |
|
1087 mf2keys = mf2.keys() |
|
1088 mf2keys.sort() |
|
1089 getnode = lambda fn: mf1.get(fn, nullid) |
|
1090 for fn in mf2keys: |
|
1091 if fn in mf1: |
1004 if fn in mf1: |
1092 if (mf1.flags(fn) != mf2.flags(fn) or |
1005 if (mf1.flags(fn) != mf2.flags(fn) or |
1093 (mf1[fn] != mf2[fn] and |
1006 (mf1[fn] != mf2[fn] and |
1094 (mf2[fn] != "" or fcmp(fn, getnode)))): |
1007 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))): |
1095 modified.append(fn) |
1008 modified.append(fn) |
1096 elif list_clean: |
1009 elif listclean: |
1097 clean.append(fn) |
1010 clean.append(fn) |
1098 del mf1[fn] |
1011 del mf1[fn] |
1099 else: |
1012 else: |
1100 added.append(fn) |
1013 added.append(fn) |
1101 |
|
1102 removed = mf1.keys() |
1014 removed = mf1.keys() |
1103 |
1015 |
1104 # sort and return results: |
1016 r = modified, added, removed, deleted, unknown, ignored, clean |
1105 for l in modified, added, removed, deleted, unknown, ignored, clean: |
1017 [l.sort() for l in r] |
1106 l.sort() |
1018 return r |
1107 return (modified, added, removed, deleted, unknown, ignored, clean) |
|
1108 |
1019 |
1109 def add(self, list): |
1020 def add(self, list): |
1110 wlock = self.wlock() |
1021 wlock = self.wlock() |
1111 try: |
1022 try: |
1112 rejected = [] |
1023 rejected = [] |
1207 |
1118 |
1208 def heads(self, start=None): |
1119 def heads(self, start=None): |
1209 heads = self.changelog.heads(start) |
1120 heads = self.changelog.heads(start) |
1210 # sort the output in rev descending order |
1121 # sort the output in rev descending order |
1211 heads = [(-self.changelog.rev(h), h) for h in heads] |
1122 heads = [(-self.changelog.rev(h), h) for h in heads] |
1212 heads.sort() |
1123 return [n for (r, n) in util.sort(heads)] |
1213 return [n for (r, n) in heads] |
1124 |
1214 |
1125 def branchheads(self, branch=None, start=None): |
1215 def branchheads(self, branch, start=None): |
1126 if branch is None: |
1127 branch = self[None].branch() |
|
1216 branches = self.branchtags() |
1128 branches = self.branchtags() |
1217 if branch not in branches: |
1129 if branch not in branches: |
1218 return [] |
1130 return [] |
1219 # The basic algorithm is this: |
1131 # The basic algorithm is this: |
1220 # |
1132 # |
1248 ancestors = set(self.changelog.parentrevs(heads[0])) |
1160 ancestors = set(self.changelog.parentrevs(heads[0])) |
1249 for rev in xrange(heads[0] - 1, nullrev, -1): |
1161 for rev in xrange(heads[0] - 1, nullrev, -1): |
1250 if rev in ancestors: |
1162 if rev in ancestors: |
1251 ancestors.update(self.changelog.parentrevs(rev)) |
1163 ancestors.update(self.changelog.parentrevs(rev)) |
1252 ancestors.remove(rev) |
1164 ancestors.remove(rev) |
1253 elif self.changectx(rev).branch() == branch: |
1165 elif self[rev].branch() == branch: |
1254 heads.append(rev) |
1166 heads.append(rev) |
1255 ancestors.update(self.changelog.parentrevs(rev)) |
1167 ancestors.update(self.changelog.parentrevs(rev)) |
1256 heads = [self.changelog.node(rev) for rev in heads] |
1168 heads = [self.changelog.node(rev) for rev in heads] |
1257 if start is not None: |
1169 if start is not None: |
1258 heads = self.changelog.nodesbetween([start], heads)[2] |
1170 heads = self.changelog.nodesbetween([start], heads)[2] |
1663 # We don't know which manifests are missing yet |
1575 # We don't know which manifests are missing yet |
1664 msng_mnfst_set = {} |
1576 msng_mnfst_set = {} |
1665 # Nor do we know which filenodes are missing. |
1577 # Nor do we know which filenodes are missing. |
1666 msng_filenode_set = {} |
1578 msng_filenode_set = {} |
1667 |
1579 |
1668 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex |
1580 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex |
1669 junk = None |
1581 junk = None |
1670 |
1582 |
1671 # A changeset always belongs to itself, so the changenode lookup |
1583 # A changeset always belongs to itself, so the changenode lookup |
1672 # function for a changenode is identity. |
1584 # function for a changenode is identity. |
1673 def identity(x): |
1585 def identity(x): |
1858 if isinstance(fname, int): |
1770 if isinstance(fname, int): |
1859 continue |
1771 continue |
1860 add_extra_nodes(fname, |
1772 add_extra_nodes(fname, |
1861 msng_filenode_set.setdefault(fname, {})) |
1773 msng_filenode_set.setdefault(fname, {})) |
1862 changedfiles[fname] = 1 |
1774 changedfiles[fname] = 1 |
1863 changedfiles = changedfiles.keys() |
|
1864 changedfiles.sort() |
|
1865 # Go through all our files in order sorted by name. |
1775 # Go through all our files in order sorted by name. |
1866 for fname in changedfiles: |
1776 for fname in util.sort(changedfiles): |
1867 filerevlog = self.file(fname) |
1777 filerevlog = self.file(fname) |
1868 if filerevlog.count() == 0: |
1778 if not len(filerevlog): |
1869 raise util.Abort(_("empty or missing revlog for %s") % fname) |
1779 raise util.Abort(_("empty or missing revlog for %s") % fname) |
1870 # Toss out the filenodes that the recipient isn't really |
1780 # Toss out the filenodes that the recipient isn't really |
1871 # missing. |
1781 # missing. |
1872 if fname in msng_filenode_set: |
1782 if fname in msng_filenode_set: |
1873 prune_filenodes(fname, filerevlog) |
1783 prune_filenodes(fname, filerevlog) |
1914 self.changegroupinfo(nodes, source) |
1824 self.changegroupinfo(nodes, source) |
1915 |
1825 |
1916 def identity(x): |
1826 def identity(x): |
1917 return x |
1827 return x |
1918 |
1828 |
1919 def gennodelst(revlog): |
1829 def gennodelst(log): |
1920 for r in xrange(0, revlog.count()): |
1830 for r in log: |
1921 n = revlog.node(r) |
1831 n = log.node(r) |
1922 if revlog.linkrev(n) in revset: |
1832 if log.linkrev(n) in revset: |
1923 yield n |
1833 yield n |
1924 |
1834 |
1925 def changed_file_collector(changedfileset): |
1835 def changed_file_collector(changedfileset): |
1926 def collect_changed_files(clnode): |
1836 def collect_changed_files(clnode): |
1927 c = cl.read(clnode) |
1837 c = cl.read(clnode) |
1939 changedfiles = {} |
1849 changedfiles = {} |
1940 |
1850 |
1941 for chnk in cl.group(nodes, identity, |
1851 for chnk in cl.group(nodes, identity, |
1942 changed_file_collector(changedfiles)): |
1852 changed_file_collector(changedfiles)): |
1943 yield chnk |
1853 yield chnk |
1944 changedfiles = changedfiles.keys() |
|
1945 changedfiles.sort() |
|
1946 |
1854 |
1947 mnfst = self.manifest |
1855 mnfst = self.manifest |
1948 nodeiter = gennodelst(mnfst) |
1856 nodeiter = gennodelst(mnfst) |
1949 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): |
1857 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)): |
1950 yield chnk |
1858 yield chnk |
1951 |
1859 |
1952 for fname in changedfiles: |
1860 for fname in util.sort(changedfiles): |
1953 filerevlog = self.file(fname) |
1861 filerevlog = self.file(fname) |
1954 if filerevlog.count() == 0: |
1862 if not len(filerevlog): |
1955 raise util.Abort(_("empty or missing revlog for %s") % fname) |
1863 raise util.Abort(_("empty or missing revlog for %s") % fname) |
1956 nodeiter = gennodelst(filerevlog) |
1864 nodeiter = gennodelst(filerevlog) |
1957 nodeiter = list(nodeiter) |
1865 nodeiter = list(nodeiter) |
1958 if nodeiter: |
1866 if nodeiter: |
1959 yield changegroup.chunkheader(len(fname)) |
1867 yield changegroup.chunkheader(len(fname)) |
1978 - less heads than before: -1-removed heads (-2..-n) |
1886 - less heads than before: -1-removed heads (-2..-n) |
1979 - number of heads stays the same: 1 |
1887 - number of heads stays the same: 1 |
1980 """ |
1888 """ |
1981 def csmap(x): |
1889 def csmap(x): |
1982 self.ui.debug(_("add changeset %s\n") % short(x)) |
1890 self.ui.debug(_("add changeset %s\n") % short(x)) |
1983 return cl.count() |
1891 return len(cl) |
1984 |
1892 |
1985 def revmap(x): |
1893 def revmap(x): |
1986 return cl.rev(x) |
1894 return cl.rev(x) |
1987 |
1895 |
1988 if not source: |
1896 if not source: |
2001 tr = self.transaction() |
1909 tr = self.transaction() |
2002 try: |
1910 try: |
2003 trp = weakref.proxy(tr) |
1911 trp = weakref.proxy(tr) |
2004 # pull off the changeset group |
1912 # pull off the changeset group |
2005 self.ui.status(_("adding changesets\n")) |
1913 self.ui.status(_("adding changesets\n")) |
2006 cor = cl.count() - 1 |
1914 cor = len(cl) - 1 |
2007 chunkiter = changegroup.chunkiter(source) |
1915 chunkiter = changegroup.chunkiter(source) |
2008 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok: |
1916 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok: |
2009 raise util.Abort(_("received changelog group is empty")) |
1917 raise util.Abort(_("received changelog group is empty")) |
2010 cnr = cl.count() - 1 |
1918 cnr = len(cl) - 1 |
2011 changesets = cnr - cor |
1919 changesets = cnr - cor |
2012 |
1920 |
2013 # pull off the manifest group |
1921 # pull off the manifest group |
2014 self.ui.status(_("adding manifests\n")) |
1922 self.ui.status(_("adding manifests\n")) |
2015 chunkiter = changegroup.chunkiter(source) |
1923 chunkiter = changegroup.chunkiter(source) |
2025 f = changegroup.getchunk(source) |
1933 f = changegroup.getchunk(source) |
2026 if not f: |
1934 if not f: |
2027 break |
1935 break |
2028 self.ui.debug(_("adding %s revisions\n") % f) |
1936 self.ui.debug(_("adding %s revisions\n") % f) |
2029 fl = self.file(f) |
1937 fl = self.file(f) |
2030 o = fl.count() |
1938 o = len(fl) |
2031 chunkiter = changegroup.chunkiter(source) |
1939 chunkiter = changegroup.chunkiter(source) |
2032 if fl.addgroup(chunkiter, revmap, trp) is None: |
1940 if fl.addgroup(chunkiter, revmap, trp) is None: |
2033 raise util.Abort(_("received file revlog group is empty")) |
1941 raise util.Abort(_("received file revlog group is empty")) |
2034 revisions += fl.count() - o |
1942 revisions += len(fl) - o |
2035 files += 1 |
1943 files += 1 |
2036 |
1944 |
2037 # make changelog see real files again |
1945 # make changelog see real files again |
2038 cl.finalize(trp) |
1946 cl.finalize(trp) |
2039 |
1947 |
2137 |
2045 |
2138 if stream and not heads and remote.capable('stream'): |
2046 if stream and not heads and remote.capable('stream'): |
2139 return self.stream_in(remote) |
2047 return self.stream_in(remote) |
2140 return self.pull(remote, heads) |
2048 return self.pull(remote, heads) |
2141 |
2049 |
2050 def storefiles(self): |
|
2051 '''get all *.i and *.d files in the store |
|
2052 |
|
2053 Returns (list of (filename, size), total_bytes)''' |
|
2054 |
|
2055 lock = None |
|
2056 try: |
|
2057 self.ui.debug('scanning\n') |
|
2058 entries = [] |
|
2059 total_bytes = 0 |
|
2060 # get consistent snapshot of repo, lock during scan |
|
2061 lock = self.lock() |
|
2062 for name, size in self.store.walk(): |
|
2063 entries.append((name, size)) |
|
2064 total_bytes += size |
|
2065 return entries, total_bytes |
|
2066 finally: |
|
2067 del lock |
|
2068 |
|
2142 # used to avoid circular references so destructors work |
2069 # used to avoid circular references so destructors work |
2143 def aftertrans(files): |
2070 def aftertrans(files): |
2144 renamefiles = [tuple(t) for t in files] |
2071 renamefiles = [tuple(t) for t in files] |
2145 def a(): |
2072 def a(): |
2146 for src, dest in renamefiles: |
2073 for src, dest in renamefiles: |