comparison mercurial/revlog.py @ 34824:e2ad93bcc084

revlog: introduce an experimental flag to slice chunks reads when too sparse Delta chains can become quite sparse if there is a lot of unrelated data between relevant pieces. Right now, revlog always reads all the necessary data for the delta chain in one single read. This can lead to a lot of unrelated data to be read (see issue5482 for more details). One can use the `experimental.maxdeltachainspan` option with a large value (or -1) to easily produce a very sparse delta chain. This change introduces the ability to slice the chunks retrieval into multiple reads, skipping large sections of unrelated data. Preliminary testing shows interesting results. For example the peak memory consumption to read a manifest on a large repository is reduced from 600MB to 250MB (200MB without maxdeltachainspan). However, the slicing itself and the multiple reads can have an negative impact on performance. This is why the new feature is hidden behind an experimental flag. Future changesets will add various parameters to control the slicing heuristics. We hope to experiment a wide variety of repositories during 4.4 and hopefully turn the feature on by default in 4.5. As a first try, the algorithm itself is prone to deep changes. However, we wish to define APIs and have a baseline to work on.
author Paul Morelle <paul.morelle@octobus.net>
date Tue, 10 Oct 2017 17:50:27 +0200
parents 7891d243d821
children 4d5d5009bd75
comparison
equal deleted inserted replaced
34823:7891d243d821 34824:e2ad93bcc084
158 b = p1 158 b = p1
159 s = hashlib.sha1(a) 159 s = hashlib.sha1(a)
160 s.update(b) 160 s.update(b)
161 s.update(text) 161 s.update(text)
162 return s.digest() 162 return s.digest()
163
164 def _slicechunk(revlog, revs):
165 """slice revs to reduce the amount of unrelated data to be read from disk.
166
167 ``revs`` is sliced into groups that should be read in one time.
168 Assume that revs are sorted.
169 """
170 start = revlog.start
171 length = revlog.length
172
173 chunkqueue = collections.deque()
174 chunkqueue.append((revs, 0))
175
176 while chunkqueue:
177 revs, depth = chunkqueue.popleft()
178
179 startbyte = start(revs[0])
180 endbyte = start(revs[-1]) + length(revs[-1])
181 deltachainspan = endbyte - startbyte
182
183 if len(revs) <= 1:
184 yield revs
185 continue
186
187 # Find where is the largest hole (this is where we would split) and
188 # sum up the lengths of useful data to compute the density of the span
189 textlen = 0
190 prevend = None
191 largesthole = 0
192 idxlargesthole = -1
193 for i, rev in enumerate(revs):
194 revstart = start(rev)
195 revlen = length(rev)
196
197 if prevend is not None:
198 hole = revstart - prevend
199 if hole > largesthole:
200 largesthole = hole
201 idxlargesthole = i
202
203 textlen += revlen
204 prevend = revstart + revlen
205
206 density = textlen / float(deltachainspan) if deltachainspan > 0 else 1.0
207
208 if density > revlog._srdensitythreshold:
209 yield revs
210 continue
211
212 # Add the left and right parts so that they will be sliced
213 # recursively too
214 chunkqueue.append((revs[:idxlargesthole], depth + 1))
215 chunkqueue.append((revs[idxlargesthole:], depth + 1))
163 216
164 # index v0: 217 # index v0:
165 # 4 bytes: offset 218 # 4 bytes: offset
166 # 4 bytes: compressed length 219 # 4 bytes: compressed length
167 # 4 bytes: base rev 220 # 4 bytes: base rev
303 # Mapping of revision integer to full node. 356 # Mapping of revision integer to full node.
304 self._nodecache = {nullid: nullrev} 357 self._nodecache = {nullid: nullrev}
305 self._nodepos = None 358 self._nodepos = None
306 self._compengine = 'zlib' 359 self._compengine = 'zlib'
307 self._maxdeltachainspan = -1 360 self._maxdeltachainspan = -1
361 self._withsparseread = False
362 self._srdensitythreshold = 0.25
308 363
309 mmapindexthreshold = None 364 mmapindexthreshold = None
310 v = REVLOG_DEFAULT_VERSION 365 v = REVLOG_DEFAULT_VERSION
311 opts = getattr(opener, 'options', None) 366 opts = getattr(opener, 'options', None)
312 if opts is not None: 367 if opts is not None:
329 self._compengine = opts['compengine'] 384 self._compengine = opts['compengine']
330 if 'maxdeltachainspan' in opts: 385 if 'maxdeltachainspan' in opts:
331 self._maxdeltachainspan = opts['maxdeltachainspan'] 386 self._maxdeltachainspan = opts['maxdeltachainspan']
332 if mmaplargeindex and 'mmapindexthreshold' in opts: 387 if mmaplargeindex and 'mmapindexthreshold' in opts:
333 mmapindexthreshold = opts['mmapindexthreshold'] 388 mmapindexthreshold = opts['mmapindexthreshold']
389 self._withsparseread = bool(opts.get('with-sparse-read', False))
390 if 'sparse-read-density-threshold' in opts:
391 self._srdensitythreshold = opts['sparse-read-density-threshold']
334 392
335 if self._chunkcachesize <= 0: 393 if self._chunkcachesize <= 0:
336 raise RevlogError(_('revlog chunk cache size %r is not greater ' 394 raise RevlogError(_('revlog chunk cache size %r is not greater '
337 'than 0') % self._chunkcachesize) 395 'than 0') % self._chunkcachesize)
338 elif self._chunkcachesize & (self._chunkcachesize - 1): 396 elif self._chunkcachesize & (self._chunkcachesize - 1):
1325 buffer = util.buffer 1383 buffer = util.buffer
1326 1384
1327 l = [] 1385 l = []
1328 ladd = l.append 1386 ladd = l.append
1329 1387
1330 firstrev = revs[0] 1388 if not self._withsparseread:
1331 # Skip trailing revisions with empty diff 1389 slicedchunks = (revs,)
1332 for lastrev in revs[::-1]: 1390 else:
1333 if length(lastrev) != 0: 1391 slicedchunks = _slicechunk(self, revs)
1334 break 1392
1335 1393 for revschunk in slicedchunks:
1336 try: 1394 firstrev = revschunk[0]
1337 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df) 1395 # Skip trailing revisions with empty diff
1338 except OverflowError: 1396 for lastrev in revschunk[::-1]:
1339 # issue4215 - we can't cache a run of chunks greater than 1397 if length(lastrev) != 0:
1340 # 2G on Windows 1398 break
1341 return [self._chunk(rev, df=df) for rev in revs] 1399
1342 1400 try:
1343 decomp = self.decompress 1401 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1344 for rev in revs: 1402 except OverflowError:
1345 chunkstart = start(rev) 1403 # issue4215 - we can't cache a run of chunks greater than
1346 if inline: 1404 # 2G on Windows
1347 chunkstart += (rev + 1) * iosize 1405 return [self._chunk(rev, df=df) for rev in revschunk]
1348 chunklength = length(rev) 1406
1349 ladd(decomp(buffer(data, chunkstart - offset, chunklength))) 1407 decomp = self.decompress
1408 for rev in revschunk:
1409 chunkstart = start(rev)
1410 if inline:
1411 chunkstart += (rev + 1) * iosize
1412 chunklength = length(rev)
1413 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1350 1414
1351 return l 1415 return l
1352 1416
1353 def _chunkclear(self): 1417 def _chunkclear(self):
1354 """Clear the raw chunk cache.""" 1418 """Clear the raw chunk cache."""