diff --git a/integtest/3ru_1df_multirun_test.py b/integtest/3ru_1df_multirun_test.py index 6f84b57..d04835c 100644 --- a/integtest/3ru_1df_multirun_test.py +++ b/integtest/3ru_1df_multirun_test.py @@ -1,22 +1,20 @@ import pytest -import os -import re import copy -import psutil import math import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 3 @@ -99,8 +97,6 @@ resource_validator.free_memory_needs(20, 32) # 25% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The next three variable declarations *must* be present as globals in the test # file. They're read by the "fixtures" in conftest.py to determine how @@ -148,7 +144,7 @@ confgen_arguments = { "WIBEth_System": conf_dict, - "Software_TPG_System": swtpg_conf, + "WIBEth_TPG_System": swtpg_conf, } # The commands to run in dunerc, as a list @@ -172,26 +168,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -218,11 +205,15 @@ def test_data_files(run_dunerc): fragment_check_list.append(triggeractivity_frag_params) # Run some tests on the output data file - assert len(run_dunerc.data_files) == expected_number_of_data_files + all_ok = len(run_dunerc.data_files) == expected_number_of_data_files + if all_ok: + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\n\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") + else: + print(f"\n\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") - all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/3ru_3df_multirun_test.py b/integtest/3ru_3df_multirun_test.py index d40769d..aa95fab 100644 --- a/integtest/3ru_3df_multirun_test.py +++ b/integtest/3ru_3df_multirun_test.py @@ -1,20 +1,19 @@ import pytest -import os -import re import copy import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 2 @@ -122,8 +121,6 @@ resource_validator.free_memory_needs(15, 24) # 25% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # 29-Dec-2025, KAB: The following comment about three variables is out-of-date. # It will be replaced soon, and the comment block above is a start on that. @@ -169,7 +166,7 @@ confgen_arguments = { "WIBEth_System": conf_dict, - "Software_TPG_System": swtpg_conf, + "WIBEth_TPG_System": swtpg_conf, } # 29-Dec-2025, KAB: added sample process manager choices. @@ -200,26 +197,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -260,7 +248,7 @@ def test_data_files(run_dunerc): all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/example_system_test.py b/integtest/example_system_test.py index a87686a..bcffbcf 100755 --- a/integtest/example_system_test.py +++ b/integtest/example_system_test.py @@ -1,6 +1,6 @@ import pytest -import os import copy +import os import re import random import string @@ -8,15 +8,16 @@ import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions run_duration = 20 # seconds @@ -88,8 +89,6 @@ resource_validator.free_memory_needs(15, 24) # 25% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The arguments to pass to the config generator, excluding the json # output directory (the test framework handles that) @@ -152,33 +151,13 @@ def host_is_at_ehn1(hostname): # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - if not host_is_at_ehn1(hostname) and "EHN1" in current_test: - pytest.skip( - f"This computer ({hostname}) is not at EHN1, not running EHN1 sessions" - ) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) def test_log_files(run_dunerc): current_test = os.environ.get("PYTEST_CURRENT_TEST") - if not host_is_at_ehn1(hostname) and "EHN1" in current_test: - pytest.skip( - f"This computer ({hostname}) is not at EHN1, not running EHN1 sessions" - ) - if host_is_at_ehn1(hostname) and "EHN1" in current_test: log_dir = pathlib.Path("/log") run_dunerc.log_files += list(log_dir.glob(f"log_*_{run_dunerc.daq_session_name}*.txt")) @@ -201,17 +180,13 @@ def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) def test_data_files(run_dunerc): current_test = os.environ.get("PYTEST_CURRENT_TEST") - if not host_is_at_ehn1(hostname) and "EHN1" in current_test: - pytest.skip( - f"This computer ({hostname}) is not at EHN1, not running EHN1 sessions" - ) - datafile_params = { "Local 1x1 Conf": {"expected_fragment_count": 4, "expected_file_count": 1}, "Local 2x3 Conf": {"expected_fragment_count": 8, "expected_file_count": 3}, @@ -259,7 +234,7 @@ def test_data_files(run_dunerc): all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/fake_data_producer_test.py b/integtest/fake_data_producer_test.py index cbbdad1..37d58a4 100644 --- a/integtest/fake_data_producer_test.py +++ b/integtest/fake_data_producer_test.py @@ -1,21 +1,21 @@ import pytest import os -import re import copy import urllib.request import math import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions run_duration = 20 # seconds @@ -58,8 +58,6 @@ resource_validator.free_memory_needs(4) # double what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The next three variable declarations *must* be present as globals in the test # file. They're read by the "fixtures" in conftest.py to determine how @@ -123,26 +121,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -163,16 +152,15 @@ def test_data_files(run_dunerc): fragment_check_list = [frag_params] # Run some tests on the output data file - all_ok = True - all_ok &= len(run_dunerc.data_files) == expected_number_of_data_files - print("") # Clear potential dot from pytest + all_ok = len(run_dunerc.data_files) == expected_number_of_data_files if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\n\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") else: - print(f"\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") + print(f"\n\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/long_window_readout_test.py b/integtest/long_window_readout_test.py index ffd9fe4..0f10465 100644 --- a/integtest/long_window_readout_test.py +++ b/integtest/long_window_readout_test.py @@ -14,22 +14,20 @@ # import pytest import os -import re import copy -import shutil -import psutil import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# 20-May-2025, KAB: tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions output_path_parameter = "." @@ -84,8 +82,6 @@ actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 25) # 25% more than what we need resource_validator.total_disk_space_needs(actual_output_path, recommended_total_disk_space=40) # double what we need -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The next three variable declarations *must* be present as globals in the test # file. They're read by the "fixtures" in conftest.py to determine how @@ -185,26 +181,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=False) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -214,17 +201,16 @@ def test_data_files(run_dunerc): fragment_check_list = [triggercandidate_frag_params] fragment_check_list.append(wibeth_frag_params) # WIBEth - all_ok = True # Run some tests on the output data file - all_ok &= len(run_dunerc.data_files) == expected_number_of_data_files - print("") # Clear potential dot from pytest + all_ok = len(run_dunerc.data_files) == expected_number_of_data_files if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") else: - print(f"\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") + print(f"\n\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( @@ -249,17 +235,19 @@ def test_cleanup(run_dunerc): pathlist_string += " " + str(data_file.parent) if pathlist_string and filelist_string: - print("============================================") - print("Listing the hdf5 files before deleting them:") - print("============================================") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.integtest_debug): + print("============================================") + print("Listing the hdf5 files before deleting them:") + print("============================================") - os.system(f"df -h {pathlist_string}") - print("--------------------") - os.system(f"ls -alF {filelist_string}") + os.system(f"df -h {pathlist_string}") + print("--------------------") + os.system(f"ls -alF {filelist_string}") for data_file in run_dunerc.data_files: data_file.unlink() - print("--------------------") - os.system(f"df -h {pathlist_string}") - print("============================================") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.integtest_debug): + print("--------------------") + os.system(f"df -h {pathlist_string}") + print("============================================") diff --git a/integtest/minimal_system_quick_test.py b/integtest/minimal_system_quick_test.py index e09f9b7..d936e1f 100644 --- a/integtest/minimal_system_quick_test.py +++ b/integtest/minimal_system_quick_test.py @@ -1,20 +1,19 @@ import pytest -import os -import re import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation import integrationtest.opmon_metric_checks as opmon_metric_checks from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 2 @@ -64,8 +63,6 @@ resource_validator.free_memory_needs(5, 8) # 25% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The next three variable declarations *must* be present as globals in the test # file. They're read by the "fixtures" in conftest.py to determine how @@ -117,19 +114,9 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=False) def test_log_files(run_dunerc): @@ -151,25 +138,26 @@ def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) def test_data_files(run_dunerc): # Run some tests on the output data file all_ok = len(run_dunerc.data_files) == expected_number_of_data_files - print("") # Clear potential dot from pytest if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\n\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") else: - print(f"\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") + print(f"\n\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") fragment_check_list = [triggercandidate_frag_params, hsi_frag_params] fragment_check_list.append(wibeth_frag_params) nontrig_fragment_check_list = [hsi_frag_params, wibeth_frag_params] for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( @@ -190,7 +178,8 @@ def test_data_files(run_dunerc): # 26-Nov-2025, KAB: added some sample opmon metric checks, for demonstration purposes def test_metric_files(run_dunerc): - print("") # Clear potential dot from pytest + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print("") # Clear potential dot from pytest # 10-Dec-2025, KAB: we have noticed that sometimes drunc transitions (or other parts of # a run control session) take a little longer than expected. This can cause extra metric @@ -231,10 +220,13 @@ def test_metric_files(run_dunerc): # a 20-second run will likely result in 3 metric samples (at 10-second intervals), so a range # of 1..5 should always succeed all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1, - max_count=max_metric_sample_count) + max_count=max_metric_sample_count, + verbosity_helper=run_dunerc.verbosity_helper) # the number of triggers expected in this test is based on the run duration, so we check for # a reported number of generated trigger records between slightly above/below that all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=run_duration-3, - max_value_sum=run_duration+3) + max_value_sum=run_duration+3, + verbosity_helper=run_dunerc.verbosity_helper) + assert all_ok diff --git a/integtest/readout_type_scan_test.py b/integtest/readout_type_scan_test.py index 0410b62..278f025 100644 --- a/integtest/readout_type_scan_test.py +++ b/integtest/readout_type_scan_test.py @@ -1,20 +1,20 @@ import pytest import os -import re import copy import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 2 @@ -171,8 +171,6 @@ resource_validator.free_memory_needs(6, 10) # 20% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The next three variable declarations *must* be present as globals in the test # file. They're read by the "fixtures" in conftest.py to determine how @@ -380,26 +378,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -464,16 +453,15 @@ def test_data_files(run_dunerc): ) # Run some tests on the output data file - all_ok = True - all_ok &= len(run_dunerc.data_files) == expected_number_of_data_files - print("") # Clear potential dot from pytest + all_ok = len(run_dunerc.data_files) == expected_number_of_data_files if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\n\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({expected_number_of_data_files})") else: - print(f"\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") + print(f"\n\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {expected_number_of_data_files}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/sample_ehn1_multihost_test.py b/integtest/sample_ehn1_multihost_test.py index d9fe8bb..891c44e 100644 --- a/integtest/sample_ehn1_multihost_test.py +++ b/integtest/sample_ehn1_multihost_test.py @@ -1,10 +1,9 @@ # 29-Jan-2026, KAB: Steps to run this test: # - Log into any np04-srv-XYZ computer and set up a software area with the # appropriate branch of daqsystemtest. -# - 'cd $DBT_AREA_ROOT/sourcecode/daqsystemtest/integtest' # - 'mkdir -p $HOME/dunedaq/scratch' # only need to do this once per user account # - 'export PYTEST_DEBUG_TEMPROOT=$HOME/dunedaq/scratch' # once per login/shell -# - 'pytest -s ./sample_ehn1_multihost_test.py' +# - 'pytest -s $DBT_AREA_ROOT/sourcecode/daqsystemtest/integtest/sample_ehn1_multihost_test.py' # # This test currently puts the various DAQ processes on the following computers: # - np04-srv-021: ru-01, ru-controller @@ -18,24 +17,27 @@ # on a computer other than "localhost" was also to show that it can be done. # # To enable the capturing of TRACE messages on all of the computers... -# - 'export TRACE_FILE=/tmp/pytest-of-${USER}/log/${USER}_dunedaq.trace' # once per login/shell # - 'mkdir -p /tmp/pytest-of-${USER}/log' # only need to do this once per computer # - 'ssh np04-srv-021 "mkdir -p /tmp/pytest-of-${USER}/log"' # only once per user # - 'ssh np04-srv-022 "mkdir -p /tmp/pytest-of-${USER}/log"' # only once per user # - 'ssh np04-srv-028 "mkdir -p /tmp/pytest-of-${USER}/log"' # only once per user # - 'ssh np04-srv-029 "mkdir -p /tmp/pytest-of-${USER}/log"' # only once per user -# - 'pytest -s ./sample_ehn1_multihost_test.py' +# - 'export TRACE_FILE=/tmp/pytest-of-${USER}/log/${USER}_dunedaq.trace' # once per login/shell +# - 'pytest -s $DBT_AREA_ROOT/sourcecode/daqsystemtest/integtest/sample_ehn1_multihost_test.py' import pytest import os import copy -import re import string -import pathlib import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes +from integrationtest.verbosity_helper import IntegtestVerbosityLevels + +import functools +print = functools.partial(print, flush=True) # always flush print() output pytest_plugins = "integrationtest.integrationtest_drunc" @@ -47,7 +49,6 @@ expected_event_count = run_duration * (1.0 + 3.0) # 1 from RTCM, 3 from FakeHSI ta_prescale = 1000 expected_event_count_tolerance = expected_event_count / 10.0 -hostname = os.uname().nodename wibeth_frag_params = { "fragment_type_description": "WIBEth", @@ -118,38 +119,30 @@ # the software release from CVMFS onto all of those computers (so the startup of # DAQ apps such as the ConnectivityServer don't take a long time initially). import subprocess +import socket +computers_that_are_needed = ["np04-srv-021", "np04-srv-022", "np04-srv-028", "np04-srv-029"] computers_that_are_unreachable = [] +hostname = socket.getfqdn() sw_area_root = os.environ.get("DBT_AREA_ROOT") -if sw_area_root is not None: - print("") - needed_computer="np04-srv-021" - print(f"Confirming that we can ssh to {needed_computer}...") - proc = subprocess.Popen(f"ssh {needed_computer} 'cd {sw_area_root}; . ./env.sh'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - proc.communicate() - retval = proc.returncode - if retval != 0: - computers_that_are_unreachable.append(needed_computer) - needed_computer="np04-srv-022" - print(f"Confirming that we can ssh to {needed_computer}...") - proc = subprocess.Popen(f"ssh {needed_computer} 'cd {sw_area_root}; . ./env.sh'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - proc.communicate() - retval = proc.returncode - if retval != 0: - computers_that_are_unreachable.append(needed_computer) - needed_computer="np04-srv-028" - print(f"Confirming that we can ssh to {needed_computer}...") - proc = subprocess.Popen(f"ssh {needed_computer} 'cd {sw_area_root}; . ./env.sh'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - proc.communicate() - retval = proc.returncode - if retval != 0: - computers_that_are_unreachable.append(needed_computer) - needed_computer="np04-srv-029" - print(f"Confirming that we can ssh to {needed_computer}...") - proc = subprocess.Popen(f"ssh {needed_computer} 'cd {sw_area_root}; . ./env.sh'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - proc.communicate() - retval = proc.returncode - if retval != 0: - computers_that_are_unreachable.append(needed_computer) +if sw_area_root is not None and ".cern.ch" in hostname: + sw_setup_script = "" + if os.path.exists(f"{sw_area_root}/env.sh"): + sw_setup_script = "env.sh" + elif os.path.exists(f"{sw_area_root}/dbt-setup-release-env.sh.sh"): + sw_setup_script = "dbt-setup-release-env.sh" + if sw_setup_script: + print("") + for needed_computer in computers_that_are_needed: + print(f"Confirming that we can ssh to {needed_computer}...") + proc = subprocess.Popen(f"ssh {needed_computer} 'cd {sw_area_root}; . ./{sw_setup_script}'", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + proc.communicate() + retval = proc.returncode + if retval != 0: + computers_that_are_unreachable.append(needed_computer) + else: + computers_that_are_unreachable = ["Unable to determine which software area setup script to use"] +elif ".cern.ch" not in hostname: + computers_that_are_unreachable = [f"This test is meant to be run at CERN (hostname {hostname} does not contain .cern.ch)"] else: computers_that_are_unreachable = ["Unable to determine the value of the DBT_AREA_ROOT env var"] @@ -296,7 +289,12 @@ # The tests themselves -def test_dunerc_success(run_dunerc, capsys): +def test_dunerc_success(run_dunerc, capsys, caplog): + if ".cern.ch" not in hostname: + with capsys.disabled(): + print(f"\n\n\N{LARGE YELLOW CIRCLE} It is not possible to run this test on this computer ({hostname}):") + print(f" {computers_that_are_unreachable}") + pytest.skip(f"One or more needed computers are unreachable ({computers_that_are_unreachable}).") if len(computers_that_are_unreachable) > 0: with capsys.disabled(): print(f"\n\n\N{LARGE YELLOW CIRCLE} The following computers are needed for this test but are unreachable from this computer ({hostname}) via ssh:") @@ -311,27 +309,19 @@ def test_dunerc_success(run_dunerc, capsys): print("\N{LARGE YELLOW CIRCLE} Please see the comments at the top of this integtest for more information.") pytest.skip("The PYTEST_DEBUG_TEMPROOT env var has not been set to point to a valid directory.") - with capsys.disabled(): - print("") - print("\n\n\N{LARGE YELLOW CIRCLE} PLEASE NOTE: this script is cleaning up stale _gunicorn_ processes on np04-srv-028...") - print("") - proc = subprocess.Popen(f"ssh np04-srv-028 killall gunicorn", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - proc.communicate() - retval = proc.returncode - if retval != 0: - print("*** WARNING: the cleanup of stale _gunicorn_ process on np04-srv-028 did not succeed...") - - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_dunerc0\].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) + # 08-Apr-2026, KAB: maybe gunicorn shutdown is working now? + #with capsys.disabled(): + # print("") + # print("\n\n\N{LARGE YELLOW CIRCLE} PLEASE NOTE: this script is cleaning up stale _gunicorn_ processes on np04-srv-028...") + # print("") + #proc = subprocess.Popen(f"ssh np04-srv-028 killall gunicorn", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + #proc.communicate() + #retval = proc.returncode + #if retval != 0: + # print("*** WARNING: the cleanup of stale _gunicorn_ process on np04-srv-028 did not succeed...") - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 + # check on run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=False) def test_log_files(run_dunerc): @@ -358,7 +348,8 @@ def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -413,7 +404,7 @@ def test_data_files(run_dunerc): all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( @@ -445,12 +436,12 @@ def test_tpstream_files(run_dunerc): assert len(tpstream_files) == 1 # one for each run - print("") + #print("") all_ok = True for idx in range(len(tpstream_files)): base_filename = os.path.basename(tpstream_files[idx]) - print(f"Checking {base_filename}...") - data_file = data_file_checks.DataFile(tpstream_files[idx]) + #print(f"Checking {base_filename}...") + data_file = data_file_checks.DataFile(tpstream_files[idx], run_dunerc.verbosity_helper) # all_ok &= data_file_checks.sanity_check(data_file) # Sanity check doesn't work for stream files all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/small_footprint_quick_test.py b/integtest/small_footprint_quick_test.py index 053911f..ff1934f 100644 --- a/integtest/small_footprint_quick_test.py +++ b/integtest/small_footprint_quick_test.py @@ -1,19 +1,18 @@ import pytest -import os -import re import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 1 @@ -64,8 +63,6 @@ resource_validator.free_memory_needs(4, 6) # 33% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") # The next three variable declarations *must* be present as globals in the test # file. They're read by the "fixtures" in conftest.py to determine how @@ -104,26 +101,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=False) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -136,7 +124,7 @@ def test_data_files(run_dunerc): all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/tpg_state_collection_test.py b/integtest/tpg_state_collection_test.py index 6132c63..f833a19 100644 --- a/integtest/tpg_state_collection_test.py +++ b/integtest/tpg_state_collection_test.py @@ -1,20 +1,19 @@ import pytest -import os -import re import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.opmon_metric_checks as opmon_metric_checks import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 2 @@ -113,8 +112,6 @@ resource_validator.free_memory_needs(6, 10) # 20% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") object_databases = ["config/daqsystemtest/integrationtest-objects.data.xml"] @@ -189,7 +186,7 @@ ) ) -confgen_arguments = {"Software_TPG_System": conf_dict} +confgen_arguments = {"WIBEth_TPG_System": conf_dict} # The commands to run in dunerc, as a list dunerc_command_list = ( @@ -206,26 +203,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=False) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -261,7 +249,7 @@ def test_data_files(run_dunerc): all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( @@ -289,7 +277,7 @@ def test_tpstream_files(run_dunerc): all_ok = True for idx in range(len(tpstream_files)): - data_file = data_file_checks.DataFile(tpstream_files[idx]) + data_file = data_file_checks.DataFile(tpstream_files[idx], run_dunerc.verbosity_helper) # all_ok &= data_file_checks.sanity_check(data_file) # Sanity check doesn't work for stream files all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( @@ -308,7 +296,8 @@ def test_tpstream_files(run_dunerc): # 26-Nov-2025, KAB: added checking of opmon metrics to verify that the ones that are # specifically enabled in this test work as expected. def test_metric_files(run_dunerc): - print("") # Clear potential dot from pytest + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print("") # Clear potential dot from pytest metric_data = opmon_metric_checks.collate_opmon_data_from_files(run_dunerc.opmon_files) @@ -317,8 +306,10 @@ def test_metric_files(run_dunerc): # *** Check that the pedestal subtraction processor metrics are being produced as expected. # DLH-0, 'accum' metrics metric_key_list = [run_dunerc.daq_session_name, "ru-det-conn-0", "DLH-0", "WIBEthFrameProcessor", "def-wib-processor", "datahandlinglibs.TPGProcessorInfo", "*", "accum"] - all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1) - all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=1) + all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1, + verbosity_helper=run_dunerc.verbosity_helper) + all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=1, + verbosity_helper=run_dunerc.verbosity_helper) # DLH-0, 'pedestal' metrics # (In this test, the calculation of the pedestal for each WIB channel [used in TP @@ -331,13 +322,17 @@ def test_metric_files(run_dunerc): # Because of all of that, the pedestal value check in this section can verify that the metric # reporting system sees a pedestal value of zero.) metric_key_list = [run_dunerc.daq_session_name, "ru-det-conn-0", "DLH-0", "WIBEthFrameProcessor", "def-wib-processor", "datahandlinglibs.TPGProcessorInfo", "*", "pedestal"] - all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1) - all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=0, max_value_sum=0) + all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1, + verbosity_helper=run_dunerc.verbosity_helper) + all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=0, max_value_sum=0, + verbosity_helper=run_dunerc.verbosity_helper) # DLH-1, 'accum' metrics metric_key_list = [run_dunerc.daq_session_name, "ru-det-conn-0", "DLH-1", "WIBEthFrameProcessor", "def-wib-processor", "datahandlinglibs.TPGProcessorInfo", "*", "accum"] - all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1) - all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=1) + all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1, + verbosity_helper=run_dunerc.verbosity_helper) + all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=1, + verbosity_helper=run_dunerc.verbosity_helper) # DLH-1, 'pedestal' metrics # (In this test, the calculation of the pedestal for each WIB channel [used in TP @@ -350,7 +345,9 @@ def test_metric_files(run_dunerc): # Because of all of that, the pedestal value check in this section can verify that the metric # reporting system sees a pedestal value of zero.) metric_key_list = [run_dunerc.daq_session_name, "ru-det-conn-0", "DLH-1", "WIBEthFrameProcessor", "def-wib-processor", "datahandlinglibs.TPGProcessorInfo", "*", "pedestal"] - all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1) - all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=0, max_value_sum=0) + all_ok &= opmon_metric_checks.check_metric_sample_count(metric_data, metric_key_list, min_count=1, + verbosity_helper=run_dunerc.verbosity_helper) + all_ok &= opmon_metric_checks.check_metric_value_sum(metric_data, metric_key_list, min_value_sum=0, max_value_sum=0, + verbosity_helper=run_dunerc.verbosity_helper) assert all_ok diff --git a/integtest/tpreplay_test.py b/integtest/tpreplay_test.py index 1366813..1cb3f81 100644 --- a/integtest/tpreplay_test.py +++ b/integtest/tpreplay_test.py @@ -28,7 +28,6 @@ import pathlib import pytest import random -import re import shutil import string import tempfile @@ -36,8 +35,10 @@ import integrationtest.data_classes as data_classes import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels from daqconf.consolidate import copy_configuration from pathlib import Path @@ -47,11 +48,10 @@ def _cleanup_tmpdir(): if os.path.exists(tmpdirname): shutil.rmtree(tmpdirname) -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Run setup run_duration = 20 # seconds @@ -81,8 +81,6 @@ def _cleanup_tmpdir(): resource_validator.free_memory_needs(3, 4) # 50% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") ### Config setup # Create temp config @@ -255,19 +253,9 @@ def _cleanup_tmpdir(): ### Tests # Run control -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) # Log files def test_log_files(run_dunerc): @@ -296,7 +284,8 @@ def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ), f"Errors found in log files: {run_dunerc.log_files}" # Data files @@ -315,26 +304,27 @@ def test_data_files(run_dunerc): for key in datafile_params.keys(): if key in current_test: selected_params = datafile_params[key] - print("Selected params for", key, ":", selected_params) + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.integtest_debug): + print("Selected params for", key, ":", selected_params) break if not selected_params: print(f"\n*** ERROR: unable to determine the datafile_params for test {current_test}.") ### Run some tests on the output data file - all_ok = True - - all_ok &= len(run_dunerc.data_files) == selected_params["n_data_files"] + all_ok = len(run_dunerc.data_files) == selected_params["n_data_files"] if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({selected_params['n_data_files']})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\n\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({selected_params['n_data_files']})") else: - print(f"\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {selected_params['n_data_files']}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") + print(f"\n\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {selected_params['n_data_files']}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") ## Other test # number of SIDs - data_file = data_file_checks.DataFile(run_dunerc.data_files[0]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[0], run_dunerc.verbosity_helper) all_ok &= data_file_checks.check_n_unique_sids(data_file, selected_params['n_sids_tp'], selected_params['n_sids_ta'], selected_params['n_sids_tc'] ) if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The expected number of unique Source IDs was found ({selected_params['n_sids_tp'], selected_params['n_sids_ta'], selected_params['n_sids_tc']})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\N{WHITE HEAVY CHECK MARK} The expected number of unique Source IDs was found ({selected_params['n_sids_tp'], selected_params['n_sids_ta'], selected_params['n_sids_tc']})") else: print(f"\N{POLICE CARS REVOLVING LIGHT} The number of unique Source IDs is NOT as expected ({selected_params['n_sids_tp'], selected_params['n_sids_ta'], selected_params['n_sids_tc']})! \N{POLICE CARS REVOLVING LIGHT}") diff --git a/integtest/tpstream_writing_test.py b/integtest/tpstream_writing_test.py index e78954f..2783fc3 100644 --- a/integtest/tpstream_writing_test.py +++ b/integtest/tpstream_writing_test.py @@ -1,19 +1,18 @@ import pytest -import os -import re import urllib.request import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.data_classes as data_classes import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Values that help determine the running conditions number_of_data_producers = 2 @@ -112,8 +111,6 @@ resource_validator.free_memory_needs(6, 10) # 20% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") object_databases = ["config/daqsystemtest/integrationtest-objects.data.xml"] @@ -168,7 +165,7 @@ ) ) -confgen_arguments = {"Software_TPG_System": conf_dict} +confgen_arguments = {"WIBEth_TPG_System": conf_dict} # The commands to run in dunerc, as a list dunerc_command_list = ( @@ -185,26 +182,17 @@ # The tests themselves -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=False) def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ) @@ -240,7 +228,7 @@ def test_data_files(run_dunerc): all_ok = True for idx in range(len(run_dunerc.data_files)): - data_file = data_file_checks.DataFile(run_dunerc.data_files[idx]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[idx], run_dunerc.verbosity_helper) all_ok &= data_file_checks.sanity_check(data_file) all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( @@ -268,7 +256,7 @@ def test_tpstream_files(run_dunerc): all_ok = True for idx in range(len(tpstream_files)): - data_file = data_file_checks.DataFile(tpstream_files[idx]) + data_file = data_file_checks.DataFile(tpstream_files[idx], run_dunerc.verbosity_helper) # all_ok &= data_file_checks.sanity_check(data_file) # Sanity check doesn't work for stream files all_ok &= data_file_checks.check_file_attributes(data_file) all_ok &= data_file_checks.check_event_count( diff --git a/integtest/trigger_bitwords_test.py b/integtest/trigger_bitwords_test.py index eda7313..9ae24e7 100644 --- a/integtest/trigger_bitwords_test.py +++ b/integtest/trigger_bitwords_test.py @@ -29,20 +29,20 @@ import pathlib import pytest import random -import re import string import integrationtest.data_classes as data_classes import integrationtest.data_file_checks as data_file_checks import integrationtest.log_file_checks as log_file_checks +import integrationtest.basic_checks as basic_checks import integrationtest.resource_validation as resource_validation from integrationtest.get_pytest_tmpdir import get_pytest_tmpdir +from integrationtest.verbosity_helper import IntegtestVerbosityLevels -pytest_plugins = "integrationtest.integrationtest_drunc" - -# tweak the print() statement default behavior so that it always flushes the output. import functools -print = functools.partial(print, flush=True) +print = functools.partial(print, flush=True) # always flush print() output + +pytest_plugins = "integrationtest.integrationtest_drunc" # Run setup run_duration = 15 # seconds @@ -67,8 +67,6 @@ resource_validator.free_memory_needs(9, 14) # 30% more than what we observe being used ('free -h') actual_output_path = get_pytest_tmpdir() resource_validator.free_disk_space_needs(actual_output_path, 1) # more than what we observe -resval_debug_string = resource_validator.get_debug_string() -print(f"{resval_debug_string}") ### Config setup common_config_obj = data_classes.drunc_config() @@ -249,19 +247,9 @@ ### Tests # Run control -def test_dunerc_success(run_dunerc): - # print the name of the current test - current_test = os.environ.get("PYTEST_CURRENT_TEST") - match_obj = re.search(r".*\[(.+)-run_.*rc.*\d].*", current_test) - if match_obj: - current_test = match_obj.group(1) - banner_line = re.sub(".", "=", current_test) - print(banner_line) - print(current_test) - print(banner_line) - - # Check that dunerc completed correctly - assert run_dunerc.completed_process.returncode == 0 +def test_dunerc_success(run_dunerc, caplog): + # checks for run control success, problems during pytest setup, etc. + basic_checks.basic_checks(run_dunerc, caplog, print_test_name=True) # Log files def test_log_files(run_dunerc): @@ -290,7 +278,8 @@ def test_log_files(run_dunerc): if check_for_logfile_errors: # Check that there are no warnings or errors in the log files assert log_file_checks.logs_are_error_free( - run_dunerc.log_files, True, True, ignored_logfile_problems + run_dunerc.log_files, True, True, ignored_logfile_problems, + verbosity_helper=run_dunerc.verbosity_helper ), f"Errors found in log files: {run_dunerc.log_files}" # Data files @@ -298,7 +287,7 @@ def test_data_files(run_dunerc): current_test = os.environ.get("PYTEST_CURRENT_TEST") # sanity checks - data_file_checks.trigger_sanity_checks() + data_file_checks.trigger_sanity_checks(run_dunerc.verbosity_helper) datafile_params = { "no-bit": {"n_data_files": 1, "expected_trigger_types": ["kTiming", "kPrescale", "kRandom"], "multi_required": False}, @@ -316,36 +305,39 @@ def test_data_files(run_dunerc): for key in datafile_params.keys(): if key in current_test: selected_params = datafile_params[key] - print("Selected params for", key, ":", selected_params) + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.integtest_debug): + print("Selected params for", key, ":", selected_params) break if not selected_params: print(f"\n*** ERROR: unable to determine the datafile_params for test {current_test}.") ### Run some tests on the output data file - all_ok = True ## N of data files - all_ok &= len(run_dunerc.data_files) == selected_params["n_data_files"] + all_ok = len(run_dunerc.data_files) == selected_params["n_data_files"] if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({selected_params['n_data_files']})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\N{WHITE HEAVY CHECK MARK} The correct number of raw data files was found ({selected_params['n_data_files']})") else: print(f"\N{POLICE CARS REVOLVING LIGHT} An incorrect number of raw data files was found, expected {selected_params['n_data_files']}, found {len(run_dunerc.data_files)} \N{POLICE CARS REVOLVING LIGHT}") ## Other test if selected_params["n_data_files"] > 0: - data_file = data_file_checks.DataFile(run_dunerc.data_files[0]) + data_file = data_file_checks.DataFile(run_dunerc.data_files[0], run_dunerc.verbosity_helper) # TR types all_ok &= data_file_checks.check_tr_trigger_types(data_file, selected_params['expected_trigger_types']) if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} All expected TC bits were found ({selected_params['expected_trigger_types']})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\N{WHITE HEAVY CHECK MARK} All expected TC bits were found ({selected_params['expected_trigger_types']})") else: print(f"\N{POLICE CARS REVOLVING LIGHT} The extracted TC bits do not correspond to the expected ones! \N{POLICE CARS REVOLVING LIGHT}") # TR multiplicity all_ok &= data_file_checks.check_tr_type_multiplicity(data_file, selected_params['multi_required']) if all_ok: - print(f"\N{WHITE HEAVY CHECK MARK} The TR type multiplicity was found as expected ({selected_params['multi_required']})") + if run_dunerc.verbosity_helper.compare_level(IntegtestVerbosityLevels.drunc_transitions): + print(f"\N{WHITE HEAVY CHECK MARK} The TR type multiplicity was found as expected ({selected_params['multi_required']})") else: print(f"\N{POLICE CARS REVOLVING LIGHT} The TR type multiplicity is NOT as expected ({selected_params['multi_required']})! \N{POLICE CARS REVOLVING LIGHT}") diff --git a/scripts/dunedaq_integtest_bundle.sh b/scripts/dunedaq_integtest_bundle.sh index 8cbb401..f58a543 100755 --- a/scripts/dunedaq_integtest_bundle.sh +++ b/scripts/dunedaq_integtest_bundle.sh @@ -21,10 +21,14 @@ Options: -n -N --stop-on-failure : causes the script to stop when one of the integtests reports a failure + --verbosity : requested level of console messages, in range 1-6, where 1 is least, 6 is DRUNC debug + --trigger-full-rc-output + - the phrase can be a Python regex, which can be useful in handling colorized text --concise-output : suppresses run control and DAQApp messages in order to focus on test results - --tmpdir : specifies a root directory to use for test output, e.g. a directory instead of '/tmp' + - this is equivalent to \"--verbosity 1\" + --tmpdir : specifies a root directory to use for test output, e.g. a directory instead of '/tmp' --list-only : list the tests that match the requested patterns without running them - --pytest-options : string with one or more dunedaq-specific command-line options to pass to Pytest + --pytest-options : string with one or more dunedaq-specific command-line options to pass to Pytest - available options include the following: --dunerc-path : Path to DUNE run control. Default is to search in \$PATH --skip-resource-checks : Whether to skip the node resource (CPU/Memory) checks for this test @@ -50,7 +54,7 @@ CaptureOutput() { tee -a $1 } -GETOPT_TEMP=`getopt -o hr:k:x:n:N: --long help,stop-on-failure,concise-output,include:,exclude:,tmpdir:,list-only,pytest-options: -- "$@"` +GETOPT_TEMP=`getopt -o hr:k:x:n:N: --long help,stop-on-failure,concise-output,include:,exclude:,tmpdir:,verbosity:,trigger-full-rc-output:,list-only,pytest-options: -- "$@"` if [ $? -ne 0 ]; then usage exit 1 @@ -64,6 +68,7 @@ requested_test_names= excluded_test_names= only_list_tests="" PYTEST_COMMAND="pytest -s --tb=short" # our core pytest command, with DAQ printout included and short pytest traceback +PYTEST_OPTIONS="" while true; do case "$1" in @@ -126,8 +131,7 @@ while true; do shift ;; --concise-output) - # replace the pytest "-s" option with "-rs" to suppress all output except pytest.skip messages - PYTEST_COMMAND="`echo ${PYTEST_COMMAND} | sed 's/ -s/ -rs/'`" + PYTEST_OPTIONS="$PYTEST_OPTIONS --integtest-verbosity 1" shift ;; --tmpdir) @@ -135,8 +139,21 @@ while true; do export PYTEST_DEBUG_TEMPROOT=${tmpdir_root} shift 2 ;; + --verbosity) + PYTEST_OPTIONS="$PYTEST_OPTIONS --integtest-verbosity $2" + let level=$2 + if [[ $level -ge 6 ]]; then + PYTEST_OPTIONS="$PYTEST_OPTIONS --dunerc-option log-level debug" + fi + shift 2 + ;; + --trigger-full-rc-output) + watch_string=`echo "$2" | sed 's/ /_SPC_/g'` + PYTEST_OPTIONS="$PYTEST_OPTIONS --dunerc-fullprint-watch-string $watch_string" + shift 2 + ;; --pytest-options) - PYTEST_COMMAND="${PYTEST_COMMAND} $2 --" # Add the specified options to the pytest command + PYTEST_OPTIONS="$PYTEST_OPTIONS $2" shift 2 ;; --list-only) @@ -149,6 +166,9 @@ while true; do ;; esac done +if [[ "${PYTEST_OPTIONS}" != "" ]]; then + PYTEST_COMMAND="${PYTEST_COMMAND} ${PYTEST_OPTIONS} --" # Add the requested options to the pytest command +fi # run the integtests from the daqsystemtest repo if no repo was specified if [[ "${integtest_list}" == "" ]]; then @@ -345,7 +365,6 @@ done # print out summary information echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE} -echo "" | CaptureOutput ${ITGRUNNER_LOG_FILE} echo "+++++++++++++++++++++++++++++++++++++++++++++++++" | CaptureOutput ${ITGRUNNER_LOG_FILE} echo "++++++++++++++++++++ SUMMARY ++++++++++++++++++++" | CaptureOutput ${ITGRUNNER_LOG_FILE} echo "+++++++++++++++++++++++++++++++++++++++++++++++++" | CaptureOutput ${ITGRUNNER_LOG_FILE}