mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-02-02 09:46:52 -05:00
Merge bitcoin/bitcoin#23799: test: Let test_runner.py start multiple jobs per timeslot
975097f424
Let test_runner.py start multiple jobs per timeslot (Pieter Wuille) Pull request description: test_runner.py currently only checks every 0.5s whether any job has finished, and if so, starts at most one new job. At higher parallellism it becomes increasingly likely that multiple jobs have finished at the same time. Fix this by always noticing *all* finished jobs every timeslot, and starting as many new ones. ACKs for top commit: laanwj: Code review and lightly tested ACK975097f424
prayank23: ACK975097f424
Tree-SHA512: b70c51f05efcde9bc25475c192b86e86b4c399495b42dee20576af3e6b99e8298be8b9e82146abdabbaedb24a13ee158a7c8947518b16fc4f33a3b434935b550
This commit is contained in:
commit
121d47afe3
1 changed files with 33 additions and 28 deletions
|
@ -530,33 +530,35 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
|
|||
|
||||
max_len_name = len(max(test_list, key=len))
|
||||
test_count = len(test_list)
|
||||
for i in range(test_count):
|
||||
test_result, testdir, stdout, stderr = job_queue.get_next()
|
||||
test_results.append(test_result)
|
||||
done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
|
||||
if test_result.status == "Passed":
|
||||
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
|
||||
elif test_result.status == "Skipped":
|
||||
logging.debug("%s skipped" % (done_str))
|
||||
else:
|
||||
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
|
||||
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
|
||||
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
|
||||
if combined_logs_len and os.path.isdir(testdir):
|
||||
# Print the final `combinedlogslen` lines of the combined logs
|
||||
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
|
||||
print('\n============')
|
||||
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
|
||||
print('============\n')
|
||||
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
|
||||
if BOLD[0]:
|
||||
combined_logs_args += ['--color']
|
||||
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
|
||||
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
|
||||
i = 0
|
||||
while i < test_count:
|
||||
for test_result, testdir, stdout, stderr in job_queue.get_next():
|
||||
test_results.append(test_result)
|
||||
i += 1
|
||||
done_str = "{}/{} - {}{}{}".format(i, test_count, BOLD[1], test_result.name, BOLD[0])
|
||||
if test_result.status == "Passed":
|
||||
logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
|
||||
elif test_result.status == "Skipped":
|
||||
logging.debug("%s skipped" % (done_str))
|
||||
else:
|
||||
print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
|
||||
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
|
||||
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
|
||||
if combined_logs_len and os.path.isdir(testdir):
|
||||
# Print the final `combinedlogslen` lines of the combined logs
|
||||
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
|
||||
print('\n============')
|
||||
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
|
||||
print('============\n')
|
||||
combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
|
||||
if BOLD[0]:
|
||||
combined_logs_args += ['--color']
|
||||
combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
|
||||
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
|
||||
|
||||
if failfast:
|
||||
logging.debug("Early exiting after test failure")
|
||||
break
|
||||
if failfast:
|
||||
logging.debug("Early exiting after test failure")
|
||||
break
|
||||
|
||||
print_results(test_results, max_len_name, (int(time.time() - start_time)))
|
||||
|
||||
|
@ -650,8 +652,9 @@ class TestHandler:
|
|||
|
||||
dot_count = 0
|
||||
while True:
|
||||
# Return first proc that finishes
|
||||
# Return all procs that have finished, if any. Otherwise sleep until there is one.
|
||||
time.sleep(.5)
|
||||
ret = []
|
||||
for job in self.jobs:
|
||||
(name, start_time, proc, testdir, log_out, log_err) = job
|
||||
if proc.poll() is not None:
|
||||
|
@ -670,7 +673,9 @@ class TestHandler:
|
|||
clearline = '\r' + (' ' * dot_count) + '\r'
|
||||
print(clearline, end='', flush=True)
|
||||
dot_count = 0
|
||||
return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
|
||||
ret.append((TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr))
|
||||
if ret:
|
||||
return ret
|
||||
if self.use_term_control:
|
||||
print('.', end='', flush=True)
|
||||
dot_count += 1
|
||||
|
|
Loading…
Add table
Reference in a new issue