47 baseset = smartset.baseset |
47 baseset = smartset.baseset |
48 generatorset = smartset.generatorset |
48 generatorset = smartset.generatorset |
49 spanset = smartset.spanset |
49 spanset = smartset.spanset |
50 fullreposet = smartset.fullreposet |
50 fullreposet = smartset.fullreposet |
51 |
51 |
52 def _revancestors(repo, revs, followfirst): |
|
53 """Like revlog.ancestors(), but supports followfirst.""" |
|
54 if followfirst: |
|
55 cut = 1 |
|
56 else: |
|
57 cut = None |
|
58 cl = repo.changelog |
|
59 |
|
60 def iterate(): |
|
61 revs.sort(reverse=True) |
|
62 irevs = iter(revs) |
|
63 h = [] |
|
64 |
|
65 inputrev = next(irevs, None) |
|
66 if inputrev is not None: |
|
67 heapq.heappush(h, -inputrev) |
|
68 |
|
69 seen = set() |
|
70 while h: |
|
71 current = -heapq.heappop(h) |
|
72 if current == inputrev: |
|
73 inputrev = next(irevs, None) |
|
74 if inputrev is not None: |
|
75 heapq.heappush(h, -inputrev) |
|
76 if current not in seen: |
|
77 seen.add(current) |
|
78 yield current |
|
79 try: |
|
80 for parent in cl.parentrevs(current)[:cut]: |
|
81 if parent != node.nullrev: |
|
82 heapq.heappush(h, -parent) |
|
83 except error.WdirUnsupported: |
|
84 for parent in repo[current].parents()[:cut]: |
|
85 if parent.rev() != node.nullrev: |
|
86 heapq.heappush(h, -parent.rev()) |
|
87 |
|
88 return generatorset(iterate(), iterasc=False) |
|
89 |
|
90 def _revdescendants(repo, revs, followfirst): |
|
91 """Like revlog.descendants() but supports followfirst.""" |
|
92 if followfirst: |
|
93 cut = 1 |
|
94 else: |
|
95 cut = None |
|
96 |
|
97 def iterate(): |
|
98 cl = repo.changelog |
|
99 # XXX this should be 'parentset.min()' assuming 'parentset' is a |
|
100 # smartset (and if it is not, it should.) |
|
101 first = min(revs) |
|
102 nullrev = node.nullrev |
|
103 if first == nullrev: |
|
104 # Are there nodes with a null first parent and a non-null |
|
105 # second one? Maybe. Do we care? Probably not. |
|
106 for i in cl: |
|
107 yield i |
|
108 else: |
|
109 seen = set(revs) |
|
110 for i in cl.revs(first + 1): |
|
111 for x in cl.parentrevs(i)[:cut]: |
|
112 if x != nullrev and x in seen: |
|
113 seen.add(i) |
|
114 yield i |
|
115 break |
|
116 |
|
117 return generatorset(iterate(), iterasc=True) |
|
118 |
|
119 def _reachablerootspure(repo, minroot, roots, heads, includepath): |
|
120 """return (heads(::<roots> and ::<heads>)) |
|
121 |
|
122 If includepath is True, return (<roots>::<heads>).""" |
|
123 if not roots: |
|
124 return [] |
|
125 parentrevs = repo.changelog.parentrevs |
|
126 roots = set(roots) |
|
127 visit = list(heads) |
|
128 reachable = set() |
|
129 seen = {} |
|
130 # prefetch all the things! (because python is slow) |
|
131 reached = reachable.add |
|
132 dovisit = visit.append |
|
133 nextvisit = visit.pop |
|
134 # open-code the post-order traversal due to the tiny size of |
|
135 # sys.getrecursionlimit() |
|
136 while visit: |
|
137 rev = nextvisit() |
|
138 if rev in roots: |
|
139 reached(rev) |
|
140 if not includepath: |
|
141 continue |
|
142 parents = parentrevs(rev) |
|
143 seen[rev] = parents |
|
144 for parent in parents: |
|
145 if parent >= minroot and parent not in seen: |
|
146 dovisit(parent) |
|
147 if not reachable: |
|
148 return baseset() |
|
149 if not includepath: |
|
150 return reachable |
|
151 for rev in sorted(seen): |
|
152 for parent in seen[rev]: |
|
153 if parent in reachable: |
|
154 reached(rev) |
|
155 return reachable |
|
156 |
|
157 def reachableroots(repo, roots, heads, includepath=False): |
|
158 """return (heads(::<roots> and ::<heads>)) |
|
159 |
|
160 If includepath is True, return (<roots>::<heads>).""" |
|
161 if not roots: |
|
162 return baseset() |
|
163 minroot = roots.min() |
|
164 roots = list(roots) |
|
165 heads = list(heads) |
|
166 try: |
|
167 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) |
|
168 except AttributeError: |
|
169 revs = _reachablerootspure(repo, minroot, roots, heads, includepath) |
|
170 revs = baseset(revs) |
|
171 revs.sort() |
|
172 return revs |
|
173 |
|
174 # helpers |
52 # helpers |
175 |
53 |
176 def getset(repo, subset, x): |
54 def getset(repo, subset, x): |
177 if not x: |
55 if not x: |
178 raise error.ParseError(_("missing argument")) |
56 raise error.ParseError(_("missing argument")) |
1855 return revs |
1733 return revs |
1856 elif keyflags[0][0] == "topo": |
1734 elif keyflags[0][0] == "topo": |
1857 firstbranch = () |
1735 firstbranch = () |
1858 if 'topo.firstbranch' in opts: |
1736 if 'topo.firstbranch' in opts: |
1859 firstbranch = getset(repo, subset, opts['topo.firstbranch']) |
1737 firstbranch = getset(repo, subset, opts['topo.firstbranch']) |
1860 revs = baseset(_toposort(revs, repo.changelog.parentrevs, firstbranch), |
1738 revs = baseset(dagop.toposort(revs, repo.changelog.parentrevs, |
|
1739 firstbranch), |
1861 istopo=True) |
1740 istopo=True) |
1862 if keyflags[0][1]: |
1741 if keyflags[0][1]: |
1863 revs.reverse() |
1742 revs.reverse() |
1864 return revs |
1743 return revs |
1865 |
1744 |
1866 # sort() is guaranteed to be stable |
1745 # sort() is guaranteed to be stable |
1867 ctxs = [repo[r] for r in revs] |
1746 ctxs = [repo[r] for r in revs] |
1868 for k, reverse in reversed(keyflags): |
1747 for k, reverse in reversed(keyflags): |
1869 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse) |
1748 ctxs.sort(key=_sortkeyfuncs[k], reverse=reverse) |
1870 return baseset([c.rev() for c in ctxs]) |
1749 return baseset([c.rev() for c in ctxs]) |
1871 |
|
1872 def _toposort(revs, parentsfunc, firstbranch=()): |
|
1873 """Yield revisions from heads to roots one (topo) branch at a time. |
|
1874 |
|
1875 This function aims to be used by a graph generator that wishes to minimize |
|
1876 the number of parallel branches and their interleaving. |
|
1877 |
|
1878 Example iteration order (numbers show the "true" order in a changelog): |
|
1879 |
|
1880 o 4 |
|
1881 | |
|
1882 o 1 |
|
1883 | |
|
1884 | o 3 |
|
1885 | | |
|
1886 | o 2 |
|
1887 |/ |
|
1888 o 0 |
|
1889 |
|
1890 Note that the ancestors of merges are understood by the current |
|
1891 algorithm to be on the same branch. This means no reordering will |
|
1892 occur behind a merge. |
|
1893 """ |
|
1894 |
|
1895 ### Quick summary of the algorithm |
|
1896 # |
|
1897 # This function is based around a "retention" principle. We keep revisions |
|
1898 # in memory until we are ready to emit a whole branch that immediately |
|
1899 # "merges" into an existing one. This reduces the number of parallel |
|
1900 # branches with interleaved revisions. |
|
1901 # |
|
1902 # During iteration revs are split into two groups: |
|
1903 # A) revision already emitted |
|
1904 # B) revision in "retention". They are stored as different subgroups. |
|
1905 # |
|
1906 # for each REV, we do the following logic: |
|
1907 # |
|
1908 # 1) if REV is a parent of (A), we will emit it. If there is a |
|
1909 # retention group ((B) above) that is blocked on REV being |
|
1910 # available, we emit all the revisions out of that retention |
|
1911 # group first. |
|
1912 # |
|
1913 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be |
|
1914 # available, if such subgroup exist, we add REV to it and the subgroup is |
|
1915 # now awaiting for REV.parents() to be available. |
|
1916 # |
|
1917 # 3) finally if no such group existed in (B), we create a new subgroup. |
|
1918 # |
|
1919 # |
|
1920 # To bootstrap the algorithm, we emit the tipmost revision (which |
|
1921 # puts it in group (A) from above). |
|
1922 |
|
1923 revs.sort(reverse=True) |
|
1924 |
|
1925 # Set of parents of revision that have been emitted. They can be considered |
|
1926 # unblocked as the graph generator is already aware of them so there is no |
|
1927 # need to delay the revisions that reference them. |
|
1928 # |
|
1929 # If someone wants to prioritize a branch over the others, pre-filling this |
|
1930 # set will force all other branches to wait until this branch is ready to be |
|
1931 # emitted. |
|
1932 unblocked = set(firstbranch) |
|
1933 |
|
1934 # list of groups waiting to be displayed, each group is defined by: |
|
1935 # |
|
1936 # (revs: lists of revs waiting to be displayed, |
|
1937 # blocked: set of that cannot be displayed before those in 'revs') |
|
1938 # |
|
1939 # The second value ('blocked') correspond to parents of any revision in the |
|
1940 # group ('revs') that is not itself contained in the group. The main idea |
|
1941 # of this algorithm is to delay as much as possible the emission of any |
|
1942 # revision. This means waiting for the moment we are about to display |
|
1943 # these parents to display the revs in a group. |
|
1944 # |
|
1945 # This first implementation is smart until it encounters a merge: it will |
|
1946 # emit revs as soon as any parent is about to be emitted and can grow an |
|
1947 # arbitrary number of revs in 'blocked'. In practice this mean we properly |
|
1948 # retains new branches but gives up on any special ordering for ancestors |
|
1949 # of merges. The implementation can be improved to handle this better. |
|
1950 # |
|
1951 # The first subgroup is special. It corresponds to all the revision that |
|
1952 # were already emitted. The 'revs' lists is expected to be empty and the |
|
1953 # 'blocked' set contains the parents revisions of already emitted revision. |
|
1954 # |
|
1955 # You could pre-seed the <parents> set of groups[0] to a specific |
|
1956 # changesets to select what the first emitted branch should be. |
|
1957 groups = [([], unblocked)] |
|
1958 pendingheap = [] |
|
1959 pendingset = set() |
|
1960 |
|
1961 heapq.heapify(pendingheap) |
|
1962 heappop = heapq.heappop |
|
1963 heappush = heapq.heappush |
|
1964 for currentrev in revs: |
|
1965 # Heap works with smallest element, we want highest so we invert |
|
1966 if currentrev not in pendingset: |
|
1967 heappush(pendingheap, -currentrev) |
|
1968 pendingset.add(currentrev) |
|
1969 # iterates on pending rev until after the current rev have been |
|
1970 # processed. |
|
1971 rev = None |
|
1972 while rev != currentrev: |
|
1973 rev = -heappop(pendingheap) |
|
1974 pendingset.remove(rev) |
|
1975 |
|
1976 # Seek for a subgroup blocked, waiting for the current revision. |
|
1977 matching = [i for i, g in enumerate(groups) if rev in g[1]] |
|
1978 |
|
1979 if matching: |
|
1980 # The main idea is to gather together all sets that are blocked |
|
1981 # on the same revision. |
|
1982 # |
|
1983 # Groups are merged when a common blocking ancestor is |
|
1984 # observed. For example, given two groups: |
|
1985 # |
|
1986 # revs [5, 4] waiting for 1 |
|
1987 # revs [3, 2] waiting for 1 |
|
1988 # |
|
1989 # These two groups will be merged when we process |
|
1990 # 1. In theory, we could have merged the groups when |
|
1991 # we added 2 to the group it is now in (we could have |
|
1992 # noticed the groups were both blocked on 1 then), but |
|
1993 # the way it works now makes the algorithm simpler. |
|
1994 # |
|
1995 # We also always keep the oldest subgroup first. We can |
|
1996 # probably improve the behavior by having the longest set |
|
1997 # first. That way, graph algorithms could minimise the length |
|
1998 # of parallel lines their drawing. This is currently not done. |
|
1999 targetidx = matching.pop(0) |
|
2000 trevs, tparents = groups[targetidx] |
|
2001 for i in matching: |
|
2002 gr = groups[i] |
|
2003 trevs.extend(gr[0]) |
|
2004 tparents |= gr[1] |
|
2005 # delete all merged subgroups (except the one we kept) |
|
2006 # (starting from the last subgroup for performance and |
|
2007 # sanity reasons) |
|
2008 for i in reversed(matching): |
|
2009 del groups[i] |
|
2010 else: |
|
2011 # This is a new head. We create a new subgroup for it. |
|
2012 targetidx = len(groups) |
|
2013 groups.append(([], {rev})) |
|
2014 |
|
2015 gr = groups[targetidx] |
|
2016 |
|
2017 # We now add the current nodes to this subgroups. This is done |
|
2018 # after the subgroup merging because all elements from a subgroup |
|
2019 # that relied on this rev must precede it. |
|
2020 # |
|
2021 # we also update the <parents> set to include the parents of the |
|
2022 # new nodes. |
|
2023 if rev == currentrev: # only display stuff in rev |
|
2024 gr[0].append(rev) |
|
2025 gr[1].remove(rev) |
|
2026 parents = [p for p in parentsfunc(rev) if p > node.nullrev] |
|
2027 gr[1].update(parents) |
|
2028 for p in parents: |
|
2029 if p not in pendingset: |
|
2030 pendingset.add(p) |
|
2031 heappush(pendingheap, -p) |
|
2032 |
|
2033 # Look for a subgroup to display |
|
2034 # |
|
2035 # When unblocked is empty (if clause), we were not waiting for any |
|
2036 # revisions during the first iteration (if no priority was given) or |
|
2037 # if we emitted a whole disconnected set of the graph (reached a |
|
2038 # root). In that case we arbitrarily take the oldest known |
|
2039 # subgroup. The heuristic could probably be better. |
|
2040 # |
|
2041 # Otherwise (elif clause) if the subgroup is blocked on |
|
2042 # a revision we just emitted, we can safely emit it as |
|
2043 # well. |
|
2044 if not unblocked: |
|
2045 if len(groups) > 1: # display other subset |
|
2046 targetidx = 1 |
|
2047 gr = groups[1] |
|
2048 elif not gr[1] & unblocked: |
|
2049 gr = None |
|
2050 |
|
2051 if gr is not None: |
|
2052 # update the set of awaited revisions with the one from the |
|
2053 # subgroup |
|
2054 unblocked |= gr[1] |
|
2055 # output all revisions in the subgroup |
|
2056 for r in gr[0]: |
|
2057 yield r |
|
2058 # delete the subgroup that you just output |
|
2059 # unless it is groups[0] in which case you just empty it. |
|
2060 if targetidx: |
|
2061 del groups[targetidx] |
|
2062 else: |
|
2063 gr[0][:] = [] |
|
2064 # Check if we have some subgroup waiting for revisions we are not going to |
|
2065 # iterate over |
|
2066 for g in groups: |
|
2067 for r in g[0]: |
|
2068 yield r |
|
2069 |
1750 |
2070 @predicate('subrepo([pattern])') |
1751 @predicate('subrepo([pattern])') |
2071 def subrepo(repo, subset, x): |
1752 def subrepo(repo, subset, x): |
2072 """Changesets that add, modify or remove the given subrepo. If no subrepo |
1753 """Changesets that add, modify or remove the given subrepo. If no subrepo |
2073 pattern is named, any subrepo changes are returned. |
1754 pattern is named, any subrepo changes are returned. |