diff hgext/largefiles/lfutil.py @ 30180:736f92c44656

largefiles: always use filechunkiter when iterating files Before, we would sometimes use the default iterator over large files. That iterator is line based and would add extra buffering and use odd chunk sizes which could give some overhead. copyandhash can't just apply a filechunkiter as it sometimes is passed a genuine generator when downloading remotely.
author Mads Kiilerich <madski@unity3d.com>
date Wed, 12 Oct 2016 12:22:18 +0200
parents 3dcaf1c4e90d
children 7356e6b1f5b8
line wrap: on
line diff
--- a/hgext/largefiles/lfutil.py	Fri Oct 14 23:33:00 2016 +0900
+++ b/hgext/largefiles/lfutil.py	Wed Oct 12 12:22:18 2016 +0200
@@ -231,7 +231,8 @@
     # don't use atomic writes in the working copy.
     with open(path, 'rb') as srcfd:
         with wvfs(filename, 'wb') as destfd:
-            gothash = copyandhash(srcfd, destfd)
+            gothash = copyandhash(
+                util.filechunkiter(srcfd), destfd)
     if gothash != hash:
         repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
                      % (filename, path, gothash))