mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-03-05 14:06:27 -05:00
Merge bitcoin/bitcoin#30291: test: write functional test results to csv
ad06e68399
test: write functional test results to csv (tdb3) Pull request description: Adds argument `--resultsfile` to test_runner.py. Enables functional test results to be written to a (csv) file for processing by other applications (or for historical archiving). Test name, status, and duration are written to the file provided with the argument. Since `test_runner.py` is being touched, also fixes a misspelling (linter warning). Can split into its own commit if desired. #### Notes - Total runtime of functional tests has seemed to have increased on my development machines over the past few months (more tests added, individual test runtime increase, etc.). Was interested in recording test runtime data over time to detect trends. Initially searched `doc/benchmarking.md`, existing PRs, and Issues, but didn't immediately see this type of capability or alternate solutions (please chime in if you know of one!). Thought it would be beneficial to add this capability to `test_runner` to facilitate this type of data analysis (and potentially other use cases) - Saw https://github.com/bitcoin/bitcoin/blob/master/test/functional/README.md#benchmarking-with-perf, and this PR's higher level data seems complimentary. - Was on the fence as to whether to expand `print_results()` (i.e. take advantage of the same loop over `test_results`) or implement in a separate `write_results()` function. Decided on the latter for now, but interested in reviewers' thoughts. #### Example 1: all tests pass ``` $ test/functional/test_runner.py --resultsfile functional_test_results.csv --cachedir=/mnt/tmp/cache --tmpdir=/mnt/tmp feature_blocksdir wallet_startup feature_config_args mempool_accept Temporary test directory at /mnt/tmp/test_runner_₿_🏃_20240614_201625 Test results will be written to functional_test_results.csv ... $ cat functional_test_results.csv test,status,duration(seconds) feature_blocksdir.py,Passed,1 feature_config_args.py,Passed,29 mempool_accept.py,Passed,9 wallet_startup.py,Passed,2 ALL,Passed,29 ``` #### Example 2: one test failure ``` $ cat functional_test_results.csv test,status,duration(seconds) feature_blocksdir.py,Passed,1 feature_config_args.py,Passed,28 wallet_startup.py,Passed,2 mempool_accept.py,Failed,1 ALL,Failed,28 ``` ACKs for top commit: maflcko: re-ACKad06e68399
kevkevinpal: tACK [ad06e68
](ad06e68399
) achow101: ACKad06e68399
rkrux: tACK [ad06e68
](ad06e68399
) brunoerg: ACKad06e68399
marcofleon: Good idea, tested ACKad06e68399
Tree-SHA512: 561194406cc744905518aa5ac6850c07c4aaecdaf5d4d8b250671b6e90093d4fc458f050e8a85374e66359cc0e0eaceba5eb24092c55f0d8f349d744a32ef76c
This commit is contained in:
commit
808898fddf
1 changed files with 28 additions and 3 deletions
|
@ -15,8 +15,10 @@ For a description of arguments recognized by test scripts, see
|
|||
import argparse
|
||||
from collections import deque
|
||||
import configparser
|
||||
import csv
|
||||
import datetime
|
||||
import os
|
||||
import pathlib
|
||||
import platform
|
||||
import time
|
||||
import shutil
|
||||
|
@ -439,6 +441,7 @@ def main():
|
|||
parser.add_argument('--filter', help='filter scripts to run by regular expression')
|
||||
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
|
||||
help="Leave bitcoinds and test.* datadir on exit or error")
|
||||
parser.add_argument('--resultsfile', '-r', help='store test results (as CSV) to the provided file')
|
||||
|
||||
|
||||
args, unknown_args = parser.parse_known_args()
|
||||
|
@ -471,6 +474,13 @@ def main():
|
|||
|
||||
logging.debug("Temporary test directory at %s" % tmpdir)
|
||||
|
||||
results_filepath = None
|
||||
if args.resultsfile:
|
||||
results_filepath = pathlib.Path(args.resultsfile)
|
||||
# Stop early if the parent directory doesn't exist
|
||||
assert results_filepath.parent.exists(), "Results file parent directory does not exist"
|
||||
logging.debug("Test results will be written to " + str(results_filepath))
|
||||
|
||||
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
|
||||
|
||||
if not enable_bitcoind:
|
||||
|
@ -557,9 +567,10 @@ def main():
|
|||
combined_logs_len=args.combinedlogslen,
|
||||
failfast=args.failfast,
|
||||
use_term_control=args.ansi,
|
||||
results_filepath=results_filepath,
|
||||
)
|
||||
|
||||
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control):
|
||||
def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control, results_filepath=None):
|
||||
args = args or []
|
||||
|
||||
# Warn if bitcoind is already running
|
||||
|
@ -651,11 +662,14 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
|
|||
break
|
||||
|
||||
if "[Errno 28] No space left on device" in stdout:
|
||||
sys.exit(f"Early exiting after test failure due to insuffient free space in {tmpdir}\n"
|
||||
sys.exit(f"Early exiting after test failure due to insufficient free space in {tmpdir}\n"
|
||||
f"Test execution data left in {tmpdir}.\n"
|
||||
f"Additional storage is needed to execute testing.")
|
||||
|
||||
print_results(test_results, max_len_name, (int(time.time() - start_time)))
|
||||
runtime = int(time.time() - start_time)
|
||||
print_results(test_results, max_len_name, runtime)
|
||||
if results_filepath:
|
||||
write_results(test_results, results_filepath, runtime)
|
||||
|
||||
if coverage:
|
||||
coverage_passed = coverage.report_rpc_coverage()
|
||||
|
@ -702,6 +716,17 @@ def print_results(test_results, max_len_name, runtime):
|
|||
results += "Runtime: %s s\n" % (runtime)
|
||||
print(results)
|
||||
|
||||
|
||||
def write_results(test_results, filepath, total_runtime):
|
||||
with open(filepath, mode="w", encoding="utf8") as results_file:
|
||||
results_writer = csv.writer(results_file)
|
||||
results_writer.writerow(['test', 'status', 'duration(seconds)'])
|
||||
all_passed = True
|
||||
for test_result in test_results:
|
||||
all_passed = all_passed and test_result.was_successful
|
||||
results_writer.writerow([test_result.name, test_result.status, str(test_result.time)])
|
||||
results_writer.writerow(['ALL', ("Passed" if all_passed else "Failed"), str(total_runtime)])
|
||||
|
||||
class TestHandler:
|
||||
"""
|
||||
Trigger the test scripts passed in via the list.
|
||||
|
|
Loading…
Add table
Reference in a new issue