64 |
64 |
65 class simplestoreerror(error.StorageError): |
65 class simplestoreerror(error.StorageError): |
66 pass |
66 pass |
67 |
67 |
68 @interfaceutil.implementer(repository.irevisiondelta) |
68 @interfaceutil.implementer(repository.irevisiondelta) |
69 @attr.s(slots=True, frozen=True) |
69 @attr.s(slots=True) |
70 class simplestorerevisiondelta(object): |
70 class simplestorerevisiondelta(object): |
71 node = attr.ib() |
71 node = attr.ib() |
72 p1node = attr.ib() |
72 p1node = attr.ib() |
73 p2node = attr.ib() |
73 p2node = attr.ib() |
74 basenode = attr.ib() |
74 basenode = attr.ib() |
75 linknode = attr.ib() |
|
76 flags = attr.ib() |
75 flags = attr.ib() |
77 baserevisionsize = attr.ib() |
76 baserevisionsize = attr.ib() |
78 revision = attr.ib() |
77 revision = attr.ib() |
79 delta = attr.ib() |
78 delta = attr.ib() |
|
79 linknode = attr.ib(default=None) |
|
80 |
|
81 @interfaceutil.implementer(repository.iverifyproblem) |
|
82 @attr.s(frozen=True) |
|
83 class simplefilestoreproblem(object): |
|
84 warning = attr.ib(default=None) |
|
85 error = attr.ib(default=None) |
|
86 node = attr.ib(default=None) |
80 |
87 |
81 @interfaceutil.implementer(repository.ifilestorage) |
88 @interfaceutil.implementer(repository.ifilestorage) |
82 class filestorage(object): |
89 class filestorage(object): |
83 """Implements storage for a tracked path. |
90 """Implements storage for a tracked path. |
84 |
91 |
190 def node(self, rev): |
197 def node(self, rev): |
191 validaterev(rev) |
198 validaterev(rev) |
192 |
199 |
193 return self._indexbyrev[rev][b'node'] |
200 return self._indexbyrev[rev][b'node'] |
194 |
201 |
|
202 def hasnode(self, node): |
|
203 validatenode(node) |
|
204 return node in self._indexbynode |
|
205 |
|
206 def censorrevision(self, tr, censornode, tombstone=b''): |
|
207 raise NotImplementedError('TODO') |
|
208 |
195 def lookup(self, node): |
209 def lookup(self, node): |
196 if isinstance(node, int): |
210 if isinstance(node, int): |
197 return self.node(node) |
211 return self.node(node) |
198 |
212 |
199 if len(node) == 20: |
213 if len(node) == 20: |
288 p1, p2 = self.parents(node) |
302 p1, p2 = self.parents(node) |
289 if node != storageutil.hashrevisionsha1(text, p1, p2): |
303 if node != storageutil.hashrevisionsha1(text, p1, p2): |
290 raise simplestoreerror(_("integrity check failed on %s") % |
304 raise simplestoreerror(_("integrity check failed on %s") % |
291 self._path) |
305 self._path) |
292 |
306 |
293 def revision(self, node, raw=False): |
307 def revision(self, nodeorrev, raw=False): |
|
308 if isinstance(nodeorrev, int): |
|
309 node = self.node(nodeorrev) |
|
310 else: |
|
311 node = nodeorrev |
294 validatenode(node) |
312 validatenode(node) |
295 |
313 |
296 if node == nullid: |
314 if node == nullid: |
297 return b'' |
315 return b'' |
298 |
316 |
407 # recording. |
425 # recording. |
408 entries = [f for f in entries if not f.startswith('undo.backup.')] |
426 entries = [f for f in entries if not f.startswith('undo.backup.')] |
409 |
427 |
410 return [b'/'.join((self._storepath, f)) for f in entries] |
428 return [b'/'.join((self._storepath, f)) for f in entries] |
411 |
429 |
|
430 def storageinfo(self, exclusivefiles=False, sharedfiles=False, |
|
431 revisionscount=False, trackedsize=False, |
|
432 storedsize=False): |
|
433 # TODO do a real implementation of this |
|
434 return { |
|
435 'exclusivefiles': [], |
|
436 'sharedfiles': [], |
|
437 'revisionscount': len(self), |
|
438 'trackedsize': 0, |
|
439 'storedsize': None, |
|
440 } |
|
441 |
|
442 def verifyintegrity(self, state): |
|
443 state['skipread'] = set() |
|
444 for rev in self: |
|
445 node = self.node(rev) |
|
446 try: |
|
447 self.revision(node) |
|
448 except Exception as e: |
|
449 yield simplefilestoreproblem( |
|
450 error='unpacking %s: %s' % (node, e), |
|
451 node=node) |
|
452 state['skipread'].add(node) |
|
453 |
|
454 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False, |
|
455 assumehaveparentrevisions=False, |
|
456 deltamode=repository.CG_DELTAMODE_STD): |
|
457 # TODO this will probably break on some ordering options. |
|
458 nodes = [n for n in nodes if n != nullid] |
|
459 if not nodes: |
|
460 return |
|
461 for delta in storageutil.emitrevisions( |
|
462 self, nodes, nodesorder, simplestorerevisiondelta, |
|
463 revisiondata=revisiondata, |
|
464 assumehaveparentrevisions=assumehaveparentrevisions, |
|
465 deltamode=deltamode): |
|
466 yield delta |
|
467 |
412 def add(self, text, meta, transaction, linkrev, p1, p2): |
468 def add(self, text, meta, transaction, linkrev, p1, p2): |
413 if meta or text.startswith(b'\1\n'): |
469 if meta or text.startswith(b'\1\n'): |
414 text = storageutil.packmeta(meta, text) |
470 text = storageutil.packmeta(meta, text) |
415 |
471 |
416 return self.addrevision(text, transaction, linkrev, p1, p2) |
472 return self.addrevision(text, transaction, linkrev, p1, p2) |
487 self._addrawrevision(node, text, transaction, linkrev, p1, p2, |
543 self._addrawrevision(node, text, transaction, linkrev, p1, p2, |
488 flags) |
544 flags) |
489 |
545 |
490 if addrevisioncb: |
546 if addrevisioncb: |
491 addrevisioncb(self, node) |
547 addrevisioncb(self, node) |
492 |
|
493 return nodes |
548 return nodes |
|
549 |
|
550 def _headrevs(self): |
|
551 # Assume all revisions are heads by default. |
|
552 revishead = {rev: True for rev in self._indexbyrev} |
|
553 |
|
554 for rev, entry in self._indexbyrev.items(): |
|
555 # Unset head flag for all seen parents. |
|
556 revishead[self.rev(entry[b'p1'])] = False |
|
557 revishead[self.rev(entry[b'p2'])] = False |
|
558 |
|
559 return [rev for rev, ishead in sorted(revishead.items()) |
|
560 if ishead] |
494 |
561 |
495 def heads(self, start=None, stop=None): |
562 def heads(self, start=None, stop=None): |
496 # This is copied from revlog.py. |
563 # This is copied from revlog.py. |
497 if start is None and stop is None: |
564 if start is None and stop is None: |
498 if not len(self): |
565 if not len(self): |
499 return [nullid] |
566 return [nullid] |
500 return [self.node(r) for r in self.headrevs()] |
567 return [self.node(r) for r in self._headrevs()] |
501 |
568 |
502 if start is None: |
569 if start is None: |
503 start = nullid |
570 start = nullid |
504 if stop is None: |
571 if stop is None: |
505 stop = [] |
572 stop = [] |
535 elif p == nullrev: |
602 elif p == nullrev: |
536 c.append(self.node(r)) |
603 c.append(self.node(r)) |
537 return c |
604 return c |
538 |
605 |
539 def getstrippoint(self, minlink): |
606 def getstrippoint(self, minlink): |
540 |
607 return storageutil.resolvestripinfo( |
541 # This is largely a copy of revlog.getstrippoint(). |
608 minlink, len(self) - 1, self._headrevs(), self.linkrev, |
542 brokenrevs = set() |
609 self.parentrevs) |
543 strippoint = len(self) |
|
544 |
|
545 heads = {} |
|
546 futurelargelinkrevs = set() |
|
547 for head in self.heads(): |
|
548 headlinkrev = self.linkrev(self.rev(head)) |
|
549 heads[head] = headlinkrev |
|
550 if headlinkrev >= minlink: |
|
551 futurelargelinkrevs.add(headlinkrev) |
|
552 |
|
553 # This algorithm involves walking down the rev graph, starting at the |
|
554 # heads. Since the revs are topologically sorted according to linkrev, |
|
555 # once all head linkrevs are below the minlink, we know there are |
|
556 # no more revs that could have a linkrev greater than minlink. |
|
557 # So we can stop walking. |
|
558 while futurelargelinkrevs: |
|
559 strippoint -= 1 |
|
560 linkrev = heads.pop(strippoint) |
|
561 |
|
562 if linkrev < minlink: |
|
563 brokenrevs.add(strippoint) |
|
564 else: |
|
565 futurelargelinkrevs.remove(linkrev) |
|
566 |
|
567 for p in self.parentrevs(strippoint): |
|
568 if p != nullrev: |
|
569 plinkrev = self.linkrev(p) |
|
570 heads[p] = plinkrev |
|
571 if plinkrev >= minlink: |
|
572 futurelargelinkrevs.add(plinkrev) |
|
573 |
|
574 return strippoint, brokenrevs |
|
575 |
610 |
576 def strip(self, minlink, transaction): |
611 def strip(self, minlink, transaction): |
577 if not len(self): |
612 if not len(self): |
578 return |
613 return |
579 |
614 |
629 repo.__class__ = simplestorerepo |
664 repo.__class__ = simplestorerepo |
630 |
665 |
631 def featuresetup(ui, supported): |
666 def featuresetup(ui, supported): |
632 supported.add(REQUIREMENT) |
667 supported.add(REQUIREMENT) |
633 |
668 |
634 def newreporequirements(orig, ui): |
669 def newreporequirements(orig, ui, createopts): |
635 """Modifies default requirements for new repos to use the simple store.""" |
670 """Modifies default requirements for new repos to use the simple store.""" |
636 requirements = orig(ui) |
671 requirements = orig(ui, createopts) |
637 |
672 |
638 # These requirements are only used to affect creation of the store |
673 # These requirements are only used to affect creation of the store |
639 # object. We have our own store. So we can remove them. |
674 # object. We have our own store. So we can remove them. |
640 # TODO do this once we feel like taking the test hit. |
675 # TODO do this once we feel like taking the test hit. |
641 #if 'fncache' in requirements: |
676 #if 'fncache' in requirements: |
663 def extsetup(ui): |
698 def extsetup(ui): |
664 localrepo.featuresetupfuncs.add(featuresetup) |
699 localrepo.featuresetupfuncs.add(featuresetup) |
665 |
700 |
666 extensions.wrapfunction(localrepo, 'newreporequirements', |
701 extensions.wrapfunction(localrepo, 'newreporequirements', |
667 newreporequirements) |
702 newreporequirements) |
668 extensions.wrapfunction(store, 'store', makestore) |
703 extensions.wrapfunction(localrepo, 'makestore', makestore) |
669 extensions.wrapfunction(verify.verifier, '__init__', verifierinit) |
704 extensions.wrapfunction(verify.verifier, '__init__', verifierinit) |