444 if v: |
444 if v: |
445 self._repo.ui.setconfig(s, k, v) |
445 self._repo.ui.setconfig(s, k, v) |
446 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True') |
446 self._repo.ui.setconfig('ui', '_usedassubrepo', 'True') |
447 self._initrepo(r, state[0], create) |
447 self._initrepo(r, state[0], create) |
448 |
448 |
|
449 def storeclean(self, path): |
|
450 clean = True |
|
451 lock = self._repo.lock() |
|
452 itercache = self._calcstorehash(path) |
|
453 try: |
|
454 for filehash in self._readstorehashcache(path): |
|
455 if filehash != itercache.next(): |
|
456 clean = False |
|
457 break |
|
458 except StopIteration: |
|
459 # the cached and current pull states have a different size |
|
460 clean = False |
|
461 if clean: |
|
462 try: |
|
463 itercache.next() |
|
464 # the cached and current pull states have a different size |
|
465 clean = False |
|
466 except StopIteration: |
|
467 pass |
|
468 lock.release() |
|
469 return clean |
|
470 |
|
471 def _calcstorehash(self, remotepath): |
|
472 '''calculate a unique "store hash" |
|
473 |
|
474 This method is used to to detect when there are changes that may |
|
475 require a push to a given remote path.''' |
|
476 # sort the files that will be hashed in increasing (likely) file size |
|
477 filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i') |
|
478 yield '# %s\n' % remotepath |
|
479 for relname in filelist: |
|
480 absname = os.path.normpath(self._repo.join(relname)) |
|
481 yield '%s = %s\n' % (absname, _calcfilehash(absname)) |
|
482 |
|
483 def _getstorehashcachepath(self, remotepath): |
|
484 '''get a unique path for the store hash cache''' |
|
485 return self._repo.join(os.path.join( |
|
486 'cache', 'storehash', _getstorehashcachename(remotepath))) |
|
487 |
|
488 def _readstorehashcache(self, remotepath): |
|
489 '''read the store hash cache for a given remote repository''' |
|
490 cachefile = self._getstorehashcachepath(remotepath) |
|
491 if not os.path.exists(cachefile): |
|
492 return '' |
|
493 fd = open(cachefile, 'r') |
|
494 pullstate = fd.readlines() |
|
495 fd.close() |
|
496 return pullstate |
|
497 |
|
498 def _cachestorehash(self, remotepath): |
|
499 '''cache the current store hash |
|
500 |
|
501 Each remote repo requires its own store hash cache, because a subrepo |
|
502 store may be "clean" versus a given remote repo, but not versus another |
|
503 ''' |
|
504 cachefile = self._getstorehashcachepath(remotepath) |
|
505 lock = self._repo.lock() |
|
506 storehash = list(self._calcstorehash(remotepath)) |
|
507 cachedir = os.path.dirname(cachefile) |
|
508 if not os.path.exists(cachedir): |
|
509 util.makedirs(cachedir, notindexed=True) |
|
510 fd = open(cachefile, 'w') |
|
511 fd.writelines(storehash) |
|
512 fd.close() |
|
513 lock.release() |
|
514 |
449 @annotatesubrepoerror |
515 @annotatesubrepoerror |
450 def _initrepo(self, parentrepo, source, create): |
516 def _initrepo(self, parentrepo, source, create): |
451 self._repo._subparent = parentrepo |
517 self._repo._subparent = parentrepo |
452 self._repo._subsource = source |
518 self._repo._subsource = source |
453 |
519 |
562 other, cloned = hg.clone(self._repo._subparent.baseui, {}, |
628 other, cloned = hg.clone(self._repo._subparent.baseui, {}, |
563 other, self._repo.root, |
629 other, self._repo.root, |
564 update=False) |
630 update=False) |
565 self._repo = cloned.local() |
631 self._repo = cloned.local() |
566 self._initrepo(parentrepo, source, create=True) |
632 self._initrepo(parentrepo, source, create=True) |
|
633 self._cachestorehash(srcurl) |
567 else: |
634 else: |
568 self._repo.ui.status(_('pulling subrepo %s from %s\n') |
635 self._repo.ui.status(_('pulling subrepo %s from %s\n') |
569 % (subrelpath(self), srcurl)) |
636 % (subrelpath(self), srcurl)) |
|
637 cleansub = self.storeclean(srcurl) |
570 remotebookmarks = other.listkeys('bookmarks') |
638 remotebookmarks = other.listkeys('bookmarks') |
571 self._repo.pull(other) |
639 self._repo.pull(other) |
572 bookmarks.updatefromremote(self._repo.ui, self._repo, |
640 bookmarks.updatefromremote(self._repo.ui, self._repo, |
573 remotebookmarks, srcurl) |
641 remotebookmarks, srcurl) |
|
642 if cleansub: |
|
643 # keep the repo clean after pull |
|
644 self._cachestorehash(srcurl) |
574 |
645 |
575 @annotatesubrepoerror |
646 @annotatesubrepoerror |
576 def get(self, state, overwrite=False): |
647 def get(self, state, overwrite=False): |
577 self._get(state) |
648 self._get(state) |
578 source, revision, kind = state |
649 source, revision, kind = state |
621 |
692 |
622 dsturl = _abssource(self._repo, True) |
693 dsturl = _abssource(self._repo, True) |
623 self._repo.ui.status(_('pushing subrepo %s to %s\n') % |
694 self._repo.ui.status(_('pushing subrepo %s to %s\n') % |
624 (subrelpath(self), dsturl)) |
695 (subrelpath(self), dsturl)) |
625 other = hg.peer(self._repo, {'ssh': ssh}, dsturl) |
696 other = hg.peer(self._repo, {'ssh': ssh}, dsturl) |
626 return self._repo.push(other, force, newbranch=newbranch) |
697 res = self._repo.push(other, force, newbranch=newbranch) |
|
698 |
|
699 # the repo is now clean |
|
700 self._cachestorehash(dsturl) |
|
701 return res |
627 |
702 |
628 @annotatesubrepoerror |
703 @annotatesubrepoerror |
629 def outgoing(self, ui, dest, opts): |
704 def outgoing(self, ui, dest, opts): |
630 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) |
705 return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts) |
631 |
706 |