Mercurial > public > mercurial-scm > hg
changeset 52458:a814534aaedd
run-tests: add a "--tail-report" argument to analyse run parallelism
This flag display the final running test for each thread and the timing of their
conclusion. It help spot when some very slow test is hurting parallelism.
author | Pierre-Yves David <pierre-yves.david@octobus.net> |
---|---|
date | Wed, 04 Dec 2024 06:17:32 +0100 |
parents | 8dede0df9de9 |
children | db85e2aaf66e |
files | contrib/heptapod-ci.yml tests/run-tests.py tests/test-run-tests.t |
diffstat | 3 files changed, 45 insertions(+), 4 deletions(-) [+] |
line wrap: on
line diff
--- a/contrib/heptapod-ci.yml Mon Nov 18 17:25:30 2024 +0100 +++ b/contrib/heptapod-ci.yml Wed Dec 04 06:17:32 2024 +0100 @@ -290,6 +290,7 @@ - HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" "$PYTHON" tests/run-tests.py --color=always + --tail-report $PORT_ARG $WHEEL_ARG $FLAVOR @@ -548,6 +549,7 @@ && HGTESTS_ALLOW_NETIO="$TEST_HGTESTS_ALLOW_NETIO" $PYTHON tests/run-tests.py --color=always + --tail-report $WHEEL_ARG $FLAVOR --port `expr 19051 + 1009 "*" $CI_CONCURRENT_ID`
--- a/tests/run-tests.py Mon Nov 18 17:25:30 2024 +0100 +++ b/tests/run-tests.py Wed Dec 04 06:17:32 2024 +0100 @@ -728,6 +728,11 @@ reporting.add_argument( "--time", action="store_true", help="time how long each test takes" ) + reporting.add_argument( + "--tail-report", + action="store_true", + help="display information about the final test of each thread", + ) reporting.add_argument("--view", help="external diff viewer") reporting.add_argument( "--xunit", help="record xunit results at specified path" @@ -2564,6 +2569,7 @@ runs_per_test=1, loadtest=None, showchannels=False, + tail_report=False, *args, **kwargs, ): @@ -2601,6 +2607,7 @@ self._runs_per_test = runs_per_test self._loadtest = loadtest self._showchannels = showchannels + self._tail_report = tail_report def run(self, result): # We have a number of filters that need to be applied. We do this @@ -2663,7 +2670,7 @@ raise ValueError('Could not find output channel') channels[channel] = "=" + test.name[5:].split(".")[0] - r = None + r = (None, test, None) try: test(result) except KeyboardInterrupt: @@ -2703,11 +2710,14 @@ statthread = threading.Thread(target=stat, name="stat") statthread.start() + start_time = time.monotonic() + tail_data = [] try: while tests or running: + finished = None if not done.empty() or running == self._jobs or not tests: try: - done.get(True, 1) + _, finished, _ = done.get(True, 1) running -= 1 if result and result.shouldStop: stoppedearly = True @@ -2730,6 +2740,8 @@ ) t.start() running += 1 + if finished is not None and running < self._jobs: + tail_data.append((finished, running, time.monotonic())) # If we stop early we still need to wait on started tests to # finish. Otherwise, there is a race between the test completing @@ -2746,7 +2758,16 @@ for test in runtests: test.abort() - channels = [] + if self._tail_report: + with iolock: + sys.stdout.write('\n### test tail-report ###\n') + sys.stdout.flush() + channels = [] + for test, remaining, end_time in tail_data: + m = "[% 13.6f s] %d tests still running; finished %s\n" + m %= (end_time - start_time, remaining, test) + sys.stdout.write(m) + sys.stdout.flush() return result @@ -2840,7 +2861,8 @@ ignored = len(self._result.ignored) with iolock: - self.stream.writeln('') + if not self._runner.options.tail_report: + self.stream.writeln('') if not self._runner.options.noskips: for test, msg in sorted( @@ -3655,6 +3677,7 @@ loop=self.options.loop, runs_per_test=self.options.runs_per_test, showchannels=self.options.showchannels, + tail_report=self.options.tail_report, tests=tests, loadtest=_reloadtest, )
--- a/tests/test-run-tests.t Mon Nov 18 17:25:30 2024 +0100 +++ b/tests/test-run-tests.t Wed Dec 04 06:17:32 2024 +0100 @@ -1065,6 +1065,22 @@ start end cuser csys real Test \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} \s*[\d\.]{5,8} test-success.t (re) +test for --tail-report +==================================== + + $ rt --tail-report --jobs 2 --nodiff + running 2 tests using 2 parallel processes + ?? (glob) + ### test tail-report ### + [* s] 1 tests still running; finished test-*.t (glob) + [* s] 0 tests still running; finished test-*.t (glob) + Failed test-failure.t: output changed + # Ran 2 tests, 0 skipped, 1 failed. + python hash seed: * (glob) + [1] + + + Skips ================ $ cat > test-skip.t <<EOF