Mercurial > public > mercurial-scm > hg-stable
diff mercurial/streamclone.py @ 47453:377d8fc20e34
clone: reuse the stream clone logic for local clone
Streaming clone and local (non `--pull`) clone do mostly the same thing,
however they were using different logic to do so. This means the logic
frequently went out of sync and that new case had to be dealt with twice.
This is fragile and anoying. So we replace this with a re-use of the logic we
use for streaming clone.
I can see various test changes:
- a more precise progress output,
- armless fncache loading during clone,
- fncache is no longer hardlinked (since we write it by hand).
I am not reinstalling the `reposimplestore` specific output, as far as I
understand this variant have been broken for years.
Differential Revision: https://phab.mercurial-scm.org/D10855
author | Pierre-Yves David <pierre-yves.david@octobus.net> |
---|---|
date | Tue, 08 Jun 2021 02:06:45 +0200 |
parents | 2f4ca4807033 |
children | d370256636fe |
line wrap: on
line diff
--- a/mercurial/streamclone.py Wed Jun 09 15:33:58 2021 +0200 +++ b/mercurial/streamclone.py Tue Jun 08 02:06:45 2021 +0200 @@ -8,6 +8,7 @@ from __future__ import absolute_import import contextlib +import errno import os import struct @@ -15,6 +16,7 @@ from .pycompat import open from .interfaces import repository from . import ( + bookmarks, cacheutil, error, narrowspec, @@ -25,6 +27,9 @@ store, util, ) +from .utils import ( + stringutil, +) def canperformstreamclone(pullop, bundle2=False): @@ -771,3 +776,104 @@ repo.ui, repo.requirements, repo.features ) scmutil.writereporequirements(repo) + + +def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): + hardlink = [True] + + def copy_used(): + hardlink[0] = False + progress.topic = _(b'copying') + + for k, path, size in entries: + src_vfs = src_vfs_map[k] + dst_vfs = dst_vfs_map[k] + src_path = src_vfs.join(path) + dst_path = dst_vfs.join(path) + dirname = dst_vfs.dirname(path) + if not dst_vfs.exists(dirname): + dst_vfs.makedirs(dirname) + dst_vfs.register_file(path) + # XXX we could use the #nb_bytes argument. + util.copyfile( + src_path, + dst_path, + hardlink=hardlink[0], + no_hardlink_cb=copy_used, + check_fs_hardlink=False, + ) + progress.increment() + return hardlink[0] + + +def local_copy(src_repo, dest_repo): + """copy all content from one local repository to another + + This is useful for local clone""" + src_store_requirements = { + r + for r in src_repo.requirements + if r not in requirementsmod.WORKING_DIR_REQUIREMENTS + } + dest_store_requirements = { + r + for r in dest_repo.requirements + if r not in requirementsmod.WORKING_DIR_REQUIREMENTS + } + assert src_store_requirements == dest_store_requirements + + with dest_repo.lock(): + with src_repo.lock(): + entries, totalfilesize = _v2_walk( + src_repo, + includes=None, + excludes=None, + includeobsmarkers=True, + ) + src_vfs_map = _makemap(src_repo) + dest_vfs_map = _makemap(dest_repo) + progress = src_repo.ui.makeprogress( + topic=_(b'linking'), + total=len(entries), + unit=_(b'files'), + ) + # copy files + # + # We could copy the full file while the source repository is locked + # and the other one without the lock. However, in the linking case, + # this would also requires checks that nobody is appending any data + # to the files while we do the clone, so this is not done yet. We + # could do this blindly when copying files. + files = ((k, path, size) for k, path, ftype, size in entries) + hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress) + + # copy bookmarks over + src_book_vfs = bookmarks.bookmarksvfs(src_repo) + srcbookmarks = src_book_vfs.join(b'bookmarks') + dst_book_vfs = bookmarks.bookmarksvfs(dest_repo) + dstbookmarks = dst_book_vfs.join(b'bookmarks') + if os.path.exists(srcbookmarks): + util.copyfile(srcbookmarks, dstbookmarks) + progress.complete() + if hardlink: + msg = b'linked %d files\n' + else: + msg = b'copied %d files\n' + src_repo.ui.debug(msg % len(entries)) + + with dest_repo.transaction(b"localclone") as tr: + dest_repo.store.write(tr) + + # clean up transaction file as they do not make sense + undo_files = [(dest_repo.svfs, b'undo.backupfiles')] + undo_files.extend(dest_repo.undofiles()) + for undovfs, undofile in undo_files: + try: + undovfs.unlink(undofile) + except OSError as e: + if e.errno != errno.ENOENT: + msg = _(b'error removing %s: %s\n') + path = undovfs.join(undofile) + e_msg = stringutil.forcebytestr(e) + msg %= (path, e_msg) + dest_repo.ui.warn(msg)