Mercurial > public > mercurial-scm > hg-stable
comparison mercurial/graphmod.py @ 23567:1f080c9c6a35
groupbranchiter: support for non-contiguous revsets
The algorithm now works when some revisions are skipped. We now use "first
included ancestors" instead of just "parent" to link changesets with each other.
author | Pierre-Yves David <pierre-yves.david@fb.com> |
---|---|
date | Thu, 04 Sep 2014 19:05:36 +0200 |
parents | fee7a30cfdf5 |
children | 740ae54573a3 |
comparison
equal
deleted
inserted
replaced
23566:fee7a30cfdf5 | 23567:1f080c9c6a35 |
---|---|
17 Data depends on type. | 17 Data depends on type. |
18 """ | 18 """ |
19 | 19 |
20 from mercurial.node import nullrev | 20 from mercurial.node import nullrev |
21 import util | 21 import util |
22 | |
23 import heapq | |
22 | 24 |
23 CHANGESET = 'C' | 25 CHANGESET = 'C' |
24 | 26 |
25 def groupbranchiter(revs, parentsfunc): | 27 def groupbranchiter(revs, parentsfunc): |
26 """yield revision from heads to roots one (topo) branch after the other. | 28 """yield revision from heads to roots one (topo) branch after the other. |
38 | | | 40 | | |
39 | o 2 | 41 | o 2 |
40 |/ | 42 |/ |
41 o 0 | 43 o 0 |
42 | 44 |
43 Currently does not handle non-contiguous <revs> input. | |
44 | |
45 Currently consider every changeset under a merge to be on the same branch | 45 Currently consider every changeset under a merge to be on the same branch |
46 using revision number to sort them. | 46 using revision number to sort them. |
47 | 47 |
48 Could be easily extend to give priority to an initial branch.""" | 48 Could be easily extend to give priority to an initial branch.""" |
49 ### Quick summary of the algorithm | 49 ### Quick summary of the algorithm |
101 # were already emitted. The 'revs' lists is expected to be empty and the | 101 # were already emitted. The 'revs' lists is expected to be empty and the |
102 # 'blocked' set contains the parents revisions of already emitted revision. | 102 # 'blocked' set contains the parents revisions of already emitted revision. |
103 # | 103 # |
104 # You could pre-seed the <parents> set of groups[0] to a specific | 104 # You could pre-seed the <parents> set of groups[0] to a specific |
105 # changesets to select what the first emitted branch should be. | 105 # changesets to select what the first emitted branch should be. |
106 # | |
107 # We do not support revisions will hole yet, but adding such support would | |
108 # be easy. The iteration will have to be done using both input revision and | |
109 # parents (see cl.ancestors function + a few tweaks) but only revisions | |
110 # parts of the initial set should be emitted. | |
111 groups = [([], unblocked)] | 106 groups = [([], unblocked)] |
112 for current in revs: | 107 pendingheap = [] |
113 if True: | 108 pendingset = set() |
109 | |
110 heapq.heapify(pendingheap) | |
111 heappop = heapq.heappop | |
112 heappush = heapq.heappush | |
113 for currentrev in revs: | |
114 # Heap works with smallest element, we want highest so we invert | |
115 if currentrev not in pendingset: | |
116 heappush(pendingheap, -currentrev) | |
117 pendingset.add(currentrev) | |
118 # iterates on pending rev until after the current rev have been | |
119 # processeed. | |
120 rev = None | |
121 while rev != currentrev: | |
122 rev = -heappop(pendingheap) | |
123 pendingset.remove(rev) | |
124 | |
114 # Seek for a subgroup blocked, waiting for the current revision. | 125 # Seek for a subgroup blocked, waiting for the current revision. |
115 matching = [i for i, g in enumerate(groups) if current in g[1]] | 126 matching = [i for i, g in enumerate(groups) if rev in g[1]] |
116 | 127 |
117 if matching: | 128 if matching: |
118 # The main idea is to gather together all sets that await on | 129 # The main idea is to gather together all sets that await on |
119 # the same revision. | 130 # the same revision. |
120 # | 131 # |
138 for i in reversed(matching): | 149 for i in reversed(matching): |
139 del groups[i] | 150 del groups[i] |
140 else: | 151 else: |
141 # This is a new head. We create a new subgroup for it. | 152 # This is a new head. We create a new subgroup for it. |
142 targetidx = len(groups) | 153 targetidx = len(groups) |
143 groups.append(([], set([current]))) | 154 groups.append(([], set([rev]))) |
144 | 155 |
145 gr = groups[targetidx] | 156 gr = groups[targetidx] |
146 | 157 |
147 # We now adds the current nodes to this subgroups. This is done | 158 # We now adds the current nodes to this subgroups. This is done |
148 # after the subgroup merging because all elements from a subgroup | 159 # after the subgroup merging because all elements from a subgroup |
149 # that relied on this rev must preceed it. | 160 # that relied on this rev must preceed it. |
150 # | 161 # |
151 # we also update the <parents> set to includes the parents on the | 162 # we also update the <parents> set to includes the parents on the |
152 # new nodes. | 163 # new nodes. |
153 gr[0].append(current) | 164 if rev == currentrev: # only display stuff in rev |
154 gr[1].remove(current) | 165 gr[0].append(rev) |
155 gr[1].update([p for p in parentsfunc(current) if p > nullrev]) | 166 gr[1].remove(rev) |
167 parents = [p for p in parentsfunc(rev) if p > nullrev] | |
168 gr[1].update(parents) | |
169 for p in parents: | |
170 if p not in pendingset: | |
171 pendingset.add(p) | |
172 heappush(pendingheap, -p) | |
156 | 173 |
157 # Look for a subgroup to display | 174 # Look for a subgroup to display |
158 # | 175 # |
159 # When unblocked is empty (if clause), We are not waiting over any | 176 # When unblocked is empty (if clause), We are not waiting over any |
160 # revision during the first iteration (if no priority was given) or | 177 # revision during the first iteration (if no priority was given) or |
183 # unless it is groups[0] in which case you just empty it. | 200 # unless it is groups[0] in which case you just empty it. |
184 if targetidx: | 201 if targetidx: |
185 del groups[targetidx] | 202 del groups[targetidx] |
186 else: | 203 else: |
187 gr[0][:] = [] | 204 gr[0][:] = [] |
205 # Check if we have some subgroup waiting for revision we are not going to | |
206 # iterate over | |
207 for g in groups: | |
208 for r in g[0]: | |
209 yield r | |
188 | 210 |
189 def dagwalker(repo, revs): | 211 def dagwalker(repo, revs): |
190 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples | 212 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples |
191 | 213 |
192 This generator function walks through revisions (which should be ordered | 214 This generator function walks through revisions (which should be ordered |