From 7a50cca13132738b6cd590de1ada2f3463c73b6c Mon Sep 17 00:00:00 2001 From: Philipp Grete Date: Mon, 9 Sep 2024 11:36:39 +0200 Subject: [PATCH] Bump Parthenon and Kokkos (#114) * bump Parthenon * Bump Parth to include restart ghost fix * update parthenon to openpmd branch * Bump Kokkos 4.4.0 and Parthenon 24.08 * Fix history filename * Update interface (cleanup and workaround) * cpp-py-formatter * Fix typo * Fix unit handling in hse test * Bump CI image to be used * Chagne CI machine * And back to the old machine * Limit timestep * Fix logical location * Back to good old CI container (with slightly updated python modules) * adjust container resources * Disable CUDA IPC in CI container --------- Co-authored-by: Ben Wibking Co-authored-by: par-hermes --- .github/workflows/ci.yml | 9 +++-- CHANGELOG.md | 8 ++++ external/Kokkos | 2 +- external/parthenon | 2 +- src/pgen/cloud.cpp | 3 +- src/pgen/cluster.cpp | 3 +- src/pgen/pgen.hpp | 6 ++- src/pgen/turbulence.cpp | 3 +- src/utils/few_modes_ft.cpp | 12 +++--- .../aniso_therm_cond_gauss_conv.py | 13 ++++++- .../aniso_therm_cond_ring_conv.py | 11 ++++-- .../aniso_therm_cond_ring_multid.py | 10 +++-- .../test_suites/cluster_hse/cluster_hse.py | 38 ++++++++----------- .../cluster_tabular_cooling.py | 30 ++++++--------- .../test_suites/field_loop/field_loop.py | 6 +-- .../riemann_hydro/riemann_hydro.py | 12 +++--- .../test_suites/turbulence/turbulence.py | 8 ++-- 17 files changed, 99 insertions(+), 77 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 05049ba7..1d8fbe03 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -14,6 +14,9 @@ env: CMAKE_BUILD_PARALLEL_LEVEL: 8 # num threads for build MACHINE_CFG: external/parthenon/cmake/machinecfg/CI.cmake OMPI_MCA_mpi_common_cuda_event_max: 1000 + # disable CUDA IPC within container entirely as it's causing issue for + # unknown reasons, see https://github.com/parthenon-hpc-lab/athenapk/pull/114 + OMPI_MCA_btl_smcuda_use_cuda_ipc: 0 jobs: regression: @@ -22,9 +25,9 @@ jobs: parallel: ['serial', 'mpi'] runs-on: [self-hosted, A100] container: - image: ghcr.io/parthenon-hpc-lab/cuda11.6-mpi-hdf5-ascent + image: ghcr.io/parthenon-hpc-lab/cuda11.6-noascent # map to local user id on CI machine to allow writing to build cache - options: --user 1001 + options: --user 1001 --cap-add CAP_SYS_PTRACE --shm-size="8g" --ulimit memlock=134217728 steps: - uses: actions/checkout@v3 with: @@ -56,6 +59,6 @@ jobs: build/tst/regression/outputs/aniso_therm_cond_gauss_conv/cond.png build/tst/regression/outputs/field_loop/field_loop.png build/tst/regression/outputs/riemann_hydro/shock_tube.png - build/tst/regression/outputs/turbulence/parthenon.hst + build/tst/regression/outputs/turbulence/parthenon.out1.hst retention-days: 3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 17e1877b..e5957e43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ ### Fixed (not changing behavior/API/variables/...) ### Infrastructure +- [[PR 114]](https://github.com/parthenon-hpc-lab/athenapk/pull/114) Bump Parthenon 24.08 and Kokkos to 4.4.00 - [[PR 112]](https://github.com/parthenon-hpc-lab/athenapk/pull/112) Add dev container configuration - [[PR 105]](https://github.com/parthenon-hpc-lab/athenapk/pull/105) Bump Parthenon to latest develop (2024-03-13) - [[PR 84]](https://github.com/parthenon-hpc-lab/athenapk/pull/84) Added `CHANGELOG.md` @@ -19,6 +20,13 @@ ### Removed (removing behavior/API/varaibles/...) ### Incompatibilities (i.e. breaking changes) +- [[PR 114]](https://github.com/parthenon-hpc-lab/athenapk/pull/114) Bump Parthenon 24.08 and Kokkos to 4.4.00 + - Changed signature of `UserWorkBeforeOutput` to include `SimTime` as last paramter + - Fixes bitwise idential restarts for AMR simulations (the derefinement counter is now included) + - Order of operations in flux-correction has changed (expect round-off error differences to previous results for AMR sims) + - History outputs now carry the output block number, i.e., a file previously called parthenon.hst might now be called parthenon.out1.hst + - History outputs now contain two additional columns (cycle number and meshblock counts), which changes/shifts the column indices (hint: use the column headers to parse the contents and do not rely on fixed indices as they may also vary between different pgen due to custom/pgen-dependent content in the history file) + - Given the introduction of a forest of tree (rather than a single tree), the logical locations are each meshblock (`pmb->loc`) are now local to the tree and not global any more. To recover the original global index use `auto loc = pmb->pmy_mesh->Forest().GetLegacyTreeLocation(pmb->loc);` - [[PR 97]](https://github.com/parthenon-hpc-lab/athenapk/pull/97) - Removes original `schure.cooling` cooling curve as it had unknown origin. - To avoid confusion, only cooling table for a single solar metallicity are supported diff --git a/external/Kokkos b/external/Kokkos index 62d2b6c8..08ceff92 160000 --- a/external/Kokkos +++ b/external/Kokkos @@ -1 +1 @@ -Subproject commit 62d2b6c879b74b6ae7bd06eb3e5e80139c4708e6 +Subproject commit 08ceff92bcf3a828844480bc1e6137eb74028517 diff --git a/external/parthenon b/external/parthenon index a66670bb..ec61c9cb 160000 --- a/external/parthenon +++ b/external/parthenon @@ -1 +1 @@ -Subproject commit a66670bbc767fe03ef48a33794c6cd288ee517ed +Subproject commit ec61c9cb102d6a67a4dd03e7d7fa4e10c82c1032 diff --git a/src/pgen/cloud.cpp b/src/pgen/cloud.cpp index db53990a..890f52dc 100644 --- a/src/pgen/cloud.cpp +++ b/src/pgen/cloud.cpp @@ -229,9 +229,10 @@ void InflowWindX2(std::shared_ptr> &mbd, bool coarse) { const auto Bx_ = Bx; const auto By_ = By; const auto Bz_ = Bz; + const bool fine = false; pmb->par_for_bndry( "InflowWindX2", nb, IndexDomain::inner_x2, parthenon::TopologicalElement::CC, - coarse, KOKKOS_LAMBDA(const int, const int &k, const int &j, const int &i) { + coarse, fine, KOKKOS_LAMBDA(const int &, const int &k, const int &j, const int &i) { cons(IDN, k, j, i) = rho_wind_; cons(IM2, k, j, i) = mom_wind_; cons(IEN, k, j, i) = rhoe_wind_ + 0.5 * mom_wind_ * mom_wind_ / rho_wind_; diff --git a/src/pgen/cluster.cpp b/src/pgen/cluster.cpp index 1e73d8b5..ecc19336 100644 --- a/src/pgen/cluster.cpp +++ b/src/pgen/cluster.cpp @@ -812,7 +812,8 @@ void ProblemGenerator(Mesh *pmesh, ParameterInput *pin, MeshData *md) { } } -void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin) { +void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin, + const parthenon::SimTime & /*tm*/) { // get hydro auto pkg = pmb->packages.Get("Hydro"); const Real gam = pin->GetReal("hydro", "gamma"); diff --git a/src/pgen/pgen.hpp b/src/pgen/pgen.hpp index 182d8497..b3e55aad 100644 --- a/src/pgen/pgen.hpp +++ b/src/pgen/pgen.hpp @@ -99,7 +99,8 @@ using namespace parthenon::driver::prelude; void ProblemInitPackageData(ParameterInput *pin, parthenon::StateDescriptor *pkg); void InitUserMeshData(ParameterInput *pin); void ProblemGenerator(Mesh *pmesh, ParameterInput *pin, MeshData *md); -void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin); +void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin, + const parthenon::SimTime &tm); void ClusterUnsplitSrcTerm(MeshData *md, const parthenon::SimTime &tm, const Real beta_dt); void ClusterSplitSrcTerm(MeshData *md, const parthenon::SimTime &tm, @@ -120,7 +121,8 @@ void ProblemGenerator(Mesh *pm, parthenon::ParameterInput *pin, MeshData * void ProblemInitPackageData(ParameterInput *pin, parthenon::StateDescriptor *pkg); void Driving(MeshData *md, const parthenon::SimTime &tm, const Real dt); void SetPhases(MeshBlock *pmb, ParameterInput *pin); -void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin); +void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin, + const parthenon::SimTime &tm); void Cleanup(); } // namespace turbulence diff --git a/src/pgen/turbulence.cpp b/src/pgen/turbulence.cpp index 2c9ff9a7..b137a9ad 100644 --- a/src/pgen/turbulence.cpp +++ b/src/pgen/turbulence.cpp @@ -450,7 +450,8 @@ void Driving(MeshData *md, const parthenon::SimTime &tm, const Real dt) { Perturb(md, dt); } -void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin) { +void UserWorkBeforeOutput(MeshBlock *pmb, ParameterInput *pin, + const parthenon::SimTime & /*tm*/) { auto hydro_pkg = pmb->packages.Get("Hydro"); // Store (common) acceleration field in spectral space diff --git a/src/utils/few_modes_ft.cpp b/src/utils/few_modes_ft.cpp index e42edde2..5d22d042 100644 --- a/src/utils/few_modes_ft.cpp +++ b/src/utils/few_modes_ft.cpp @@ -9,18 +9,15 @@ // C++ headers #include -#include // Parthenon headers #include "basic_types.hpp" #include "config.hpp" #include "globals.hpp" #include "kokkos_abstraction.hpp" -#include "mesh/domain.hpp" #include "mesh/meshblock_pack.hpp" // AthenaPK headers -#include "../main.hpp" #include "few_modes_ft.hpp" #include "utils/error_checking.hpp" @@ -130,9 +127,12 @@ void FewModesFT::SetPhases(MeshBlock *pmb, ParameterInput *pin) { const auto nx2 = pmb->block_size.nx(X2DIR); const auto nx3 = pmb->block_size.nx(X3DIR); - const auto gis = pmb->loc.lx1() * pmb->block_size.nx(X1DIR); - const auto gjs = pmb->loc.lx2() * pmb->block_size.nx(X2DIR); - const auto gks = pmb->loc.lx3() * pmb->block_size.nx(X3DIR); + // Need to use legacy locations (which are global) because locations now are local + // to the tree, which results in inconsistencies for meshes with multiple trees. + const auto loc = pmb->pmy_mesh->Forest().GetLegacyTreeLocation(pmb->loc); + const auto gis = loc.lx1() * pmb->block_size.nx(X1DIR); + const auto gjs = loc.lx2() * pmb->block_size.nx(X2DIR); + const auto gks = loc.lx3() * pmb->block_size.nx(X3DIR); // make local ref to capure in lambda const auto num_modes = num_modes_; diff --git a/tst/regression/test_suites/aniso_therm_cond_gauss_conv/aniso_therm_cond_gauss_conv.py b/tst/regression/test_suites/aniso_therm_cond_gauss_conv/aniso_therm_cond_gauss_conv.py index 9f002723..fb2a187b 100644 --- a/tst/regression/test_suites/aniso_therm_cond_gauss_conv/aniso_therm_cond_gauss_conv.py +++ b/tst/regression/test_suites/aniso_therm_cond_gauss_conv/aniso_therm_cond_gauss_conv.py @@ -98,6 +98,10 @@ def Prepare(self, parameters, step): "parthenon/output0/id=%s" % outname, "hydro/gamma=2.0", "parthenon/time/tlim=%f" % tlim, + # Work around for RKL2 integrator (that, by default, does not limit the + # timestep, which in newer versions of Parthenon results in triggering + # a fail-safe given the default init value of numeric_limits max. + "parthenon/time/dt_ceil=%f" % tlim, "diffusion/conduction=%s" % conduction, "diffusion/thermal_diff_coeff_code=0.25", "diffusion/integrator=%s" % int_cfg, @@ -137,10 +141,15 @@ def get_ref(x, Bx, field_cfg): outname = get_outname(all_cfgs[step]) data_filename = f"{parameters.output_path}/parthenon.{outname}.final.phdf" data_file = phdf.phdf(data_filename) - prim = data_file.Get("prim") zz, yy, xx = data_file.GetVolumeLocations() mask = yy == yy[0] - temp = prim[4][mask] + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) + temp = components["prim_pressure"].ravel()[ + mask + ] # because of gamma = 2.0 and rho = 1 -> p = e = T x = xx[mask] res, field_cfg, int_cfg = all_cfgs[step] row = res_cfgs.index(res) diff --git a/tst/regression/test_suites/aniso_therm_cond_ring_conv/aniso_therm_cond_ring_conv.py b/tst/regression/test_suites/aniso_therm_cond_ring_conv/aniso_therm_cond_ring_conv.py index db2ae4e4..ba13129e 100644 --- a/tst/regression/test_suites/aniso_therm_cond_ring_conv/aniso_therm_cond_ring_conv.py +++ b/tst/regression/test_suites/aniso_therm_cond_ring_conv/aniso_therm_cond_ring_conv.py @@ -35,7 +35,6 @@ class TestCase(utils.test_case.TestCaseAbs): def Prepare(self, parameters, step): - assert parameters.num_ranks <= 4, "Use <= 4 ranks for diffusion test." res = res_cfgs[step - 1] @@ -78,8 +77,14 @@ def Analyse(self, parameters): for res in res_cfgs: data_filename = f"{parameters.output_path}/parthenon.{res}.final.phdf" data_file = phdf.phdf(data_filename) - prim = data_file.Get("prim") - T = prim[4] # because of gamma = 2.0 and rho = 1 -> p = e = T + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) + T = components[ + "prim_pressure" + ].ravel() # because of gamma = 2.0 and rho = 1 -> p = e = T + zz, yy, xx = data_file.GetVolumeLocations() r = np.sqrt(xx**2 + yy**2) diff --git a/tst/regression/test_suites/aniso_therm_cond_ring_multid/aniso_therm_cond_ring_multid.py b/tst/regression/test_suites/aniso_therm_cond_ring_multid/aniso_therm_cond_ring_multid.py index bf1e0416..3f933507 100644 --- a/tst/regression/test_suites/aniso_therm_cond_ring_multid/aniso_therm_cond_ring_multid.py +++ b/tst/regression/test_suites/aniso_therm_cond_ring_multid/aniso_therm_cond_ring_multid.py @@ -35,7 +35,6 @@ class TestCase(utils.test_case.TestCaseAbs): def Prepare(self, parameters, step): - assert parameters.num_ranks <= 4, "Use <= 4 ranks for diffusion test." # 2D reference case again @@ -110,8 +109,13 @@ def Analyse(self, parameters): for step in range(1, 5): data_filename = f"{parameters.output_path}/parthenon.{step}.final.phdf" data_file = phdf.phdf(data_filename) - prim = data_file.Get("prim") - T = prim[4] # because of gamma = 2.0 and rho = 1 -> p = e = T + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) + T = components[ + "prim_pressure" + ].ravel() # because of gamma = 2.0 and rho = 1 -> p = e = T zz, yy, xx = data_file.GetVolumeLocations() if step == 1 or step == 2: r = np.sqrt(xx**2 + yy**2) diff --git a/tst/regression/test_suites/cluster_hse/cluster_hse.py b/tst/regression/test_suites/cluster_hse/cluster_hse.py index 0f567e4f..9c8ec012 100644 --- a/tst/regression/test_suites/cluster_hse/cluster_hse.py +++ b/tst/regression/test_suites/cluster_hse/cluster_hse.py @@ -211,21 +211,15 @@ def AnalyseInitPert(self, parameters): # flatten array as prim var are also flattended cell_vol = cell_vol.ravel() - prim = data_file.Get("prim") - - # FIXME: For now this is hard coded - a component mapping should be done by phdf - prim_col_dict = { - "velocity_1": 1, - "velocity_2": 2, - "velocity_3": 3, - "magnetic_field_1": 5, - "magnetic_field_2": 6, - "magnetic_field_3": 7, - } - - vx = prim[prim_col_dict["velocity_1"]] - vy = prim[prim_col_dict["velocity_2"]] - vz = prim[prim_col_dict["velocity_3"]] + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) + rho = components["prim_density"].ravel() + vx = components["prim_velocity_1"].ravel() + vy = components["prim_velocity_2"].ravel() + vz = components["prim_velocity_3"].ravel() + pres = components["prim_pressure"].ravel() # volume weighted rms velocity rms_v = np.sqrt( @@ -243,9 +237,9 @@ def AnalyseInitPert(self, parameters): f"Expected {self.sigma_v.in_units('code_velocity')} but got {rms_v}\n" ) - bx = prim[prim_col_dict["magnetic_field_1"]] - by = prim[prim_col_dict["magnetic_field_2"]] - bz = prim[prim_col_dict["magnetic_field_3"]] + bx = components["prim_magnetic_field_1"].ravel() + by = components["prim_magnetic_field_2"].ravel() + bz = components["prim_magnetic_field_3"].ravel() # volume weighted rms magnetic field rms_b = np.sqrt( @@ -394,11 +388,11 @@ def rk4(f, y0, T): # Prepare two array of R leading inwards and outwards from the virial radius, to integrate R_in = unyt.unyt_array( - np.hstack((R[R < self.R_fix], self.R_fix.in_units("code_length"))), + np.hstack((R[R < self.R_fix].v, self.R_fix.in_units("code_length").v)), "code_length", ) R_out = unyt.unyt_array( - np.hstack((self.R_fix.in_units("code_length"), R[R > self.R_fix])), + np.hstack((self.R_fix.in_units("code_length").v, R[R > self.R_fix].v)), "code_length", ) @@ -412,7 +406,7 @@ def rk4(f, y0, T): # Put the two pieces of P together P = unyt.unyt_array( np.hstack( - (P_in[:-1].in_units("dyne/cm**2"), P_out[1:].in_units("dyne/cm**2")) + (P_in[:-1].in_units("dyne/cm**2").v, P_out[1:].in_units("dyne/cm**2").v) ), "dyne/cm**2", ) @@ -537,7 +531,7 @@ def rk4(f, y0, T): # Compare the initial output to the analytic model def analytic_gold(Z, Y, X, analytic_var): - r = np.sqrt(X**2 + Y**2 + Z**2) + r = unyt.unyt_array(np.sqrt(X**2 + Y**2 + Z**2), "code_length") analytic_interp = unyt.unyt_array( np.interp(r, analytic_R, analytic_var), analytic_var.units ) diff --git a/tst/regression/test_suites/cluster_tabular_cooling/cluster_tabular_cooling.py b/tst/regression/test_suites/cluster_tabular_cooling/cluster_tabular_cooling.py index 1b4b0077..77a8cfeb 100644 --- a/tst/regression/test_suites/cluster_tabular_cooling/cluster_tabular_cooling.py +++ b/tst/regression/test_suites/cluster_tabular_cooling/cluster_tabular_cooling.py @@ -310,19 +310,16 @@ def zero_corrected_linf_err(gold, test): analyze_status = False data_file = phdf.phdf(data_filename) - prim = data_file.Get("prim") - - # FIXME: TODO(forrestglines) For now this is hard coded - a component mapping should be done by phdf - prim_col_dict = { - "density": 0, - "pressure": 4, - } + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) rho = unyt.unyt_array( - prim[prim_col_dict["density"], :], "code_mass*code_length**-3" + components["prim_density"].ravel(), "code_mass*code_length**-3" ) pres = unyt.unyt_array( - prim[prim_col_dict["pressure"], :], + components["prim_pressure"].ravel(), "code_mass*code_length**-1*code_time**-2", ) @@ -364,19 +361,16 @@ def zero_corrected_linf_err(gold, test): analyze_status = False data_file = phdf.phdf(data_filename) - prim = data_file.Get("prim") - - # FIXME: TODO(forrestglines) For now this is hard coded - a component mapping should be done by phdf - prim_col_dict = { - "density": 0, - "pressure": 4, - } + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) rho = unyt.unyt_array( - prim[prim_col_dict["density"]], "code_mass*code_length**-3" + components["prim_density"].ravel(), "code_mass*code_length**-3" ) pres = unyt.unyt_array( - prim[prim_col_dict["pressure"]], + components["prim_pressure"].ravel(), "code_mass*code_length**-1*code_time**-2", ) diff --git a/tst/regression/test_suites/field_loop/field_loop.py b/tst/regression/test_suites/field_loop/field_loop.py index b752f535..07e032b0 100644 --- a/tst/regression/test_suites/field_loop/field_loop.py +++ b/tst/regression/test_suites/field_loop/field_loop.py @@ -135,13 +135,13 @@ def Analyse(self, parameters): for step in range(len(all_cfgs)): outname = get_outname(all_cfgs[step]) - data_filename = f"{parameters.output_path}/{outname}.hst" + data_filename = f"{parameters.output_path}/{outname}.out1.hst" data = np.genfromtxt(data_filename) res, method = all_cfgs[step] row = method_cfgs.index(method) - p[row, 0].plot(data[:, 0], data[:, 8] / data[0, 8], label=outname) - p[row, 1].plot(data[:, 0], data[:, 10], label=outname) + p[row, 0].plot(data[:, 0], data[:, 10] / data[0, 10], label=outname) + p[row, 1].plot(data[:, 0], data[:, 12], label=outname) p[0, 0].set_title("Emag(t)/Emag(0)") p[0, 1].set_title("rel DivB") diff --git a/tst/regression/test_suites/riemann_hydro/riemann_hydro.py b/tst/regression/test_suites/riemann_hydro/riemann_hydro.py index 9d0fc1f4..1a926371 100644 --- a/tst/regression/test_suites/riemann_hydro/riemann_hydro.py +++ b/tst/regression/test_suites/riemann_hydro/riemann_hydro.py @@ -58,7 +58,6 @@ class TestCase(utils.test_case.TestCaseAbs): def Prepare(self, parameters, step): - method, init_cond = all_cfgs[step - 1] nx1 = method["nx1"] @@ -126,10 +125,13 @@ def Analyse(self, parameters): data_filename = f"{parameters.output_path}/parthenon.{step + 1}.final.phdf" data_file = phdf.phdf(data_filename) - prim = data_file.Get("prim") - rho = prim[0] - vx = prim[1] - pres = prim[4] + # Flatten=true (default) is currently (Sep 24) broken so we manually flatten + components = data_file.GetComponents( + data_file.Info["ComponentNames"], flatten=False + ) + rho = components["prim_density"].ravel() + vx = components["prim_velocity_1"].ravel() + pres = components["prim_pressure"].ravel() zz, yy, xx = data_file.GetVolumeLocations() label = ( diff --git a/tst/regression/test_suites/turbulence/turbulence.py b/tst/regression/test_suites/turbulence/turbulence.py index eddbd38e..c5a492a1 100644 --- a/tst/regression/test_suites/turbulence/turbulence.py +++ b/tst/regression/test_suites/turbulence/turbulence.py @@ -22,7 +22,6 @@ class TestCase(utils.test_case.TestCaseAbs): def Prepare(self, parameters, step): - parameters.driver_cmd_line_args = [ "parthenon/output2/dt=-1", "parthenon/output3/dt=-1", @@ -31,7 +30,6 @@ def Prepare(self, parameters, step): return parameters def Analyse(self, parameters): - sys.path.insert( 1, parameters.parthenon_path @@ -40,17 +38,17 @@ def Analyse(self, parameters): success = True - data_filename = f"{parameters.output_path}/parthenon.hst" + data_filename = f"{parameters.output_path}/parthenon.out1.hst" data = np.genfromtxt(data_filename) # Check Ms if not (data[-1, -3] > 0.45 and data[-1, -3] < 0.50): - print("ERROR: Mismatch in Ms") + print(f"ERROR: Mismatch in Ms={data[-1, -3]}") success = False # Check Ma if not (data[-1, -2] > 12.8 and data[-1, -2] < 13.6): - print("ERROR: Mismatch in Ma") + print(f"ERROR: Mismatch in Ma={data[-1, -2]}") success = False return success