diff --git a/script/app-mlperf-inference-nvidia/_cm.yaml b/script/app-mlperf-inference-nvidia/_cm.yaml index 0cc58ed22..21f1515f0 100644 --- a/script/app-mlperf-inference-nvidia/_cm.yaml +++ b/script/app-mlperf-inference-nvidia/_cm.yaml @@ -262,6 +262,9 @@ deps: CM_MLPERF_NVIDIA_HARNESS_RUN_MODE: - run_harness + - tags: get,generic-python-lib,_package.pycuda + version: "2022.2.2" + - tags: get,generic-python-lib,_package.nvmitten update_tags_from_env_with_prefix: _path.: diff --git a/script/build-mlperf-inference-server-nvidia/_cm.yaml b/script/build-mlperf-inference-server-nvidia/_cm.yaml index dd13fe569..460a86394 100644 --- a/script/build-mlperf-inference-server-nvidia/_cm.yaml +++ b/script/build-mlperf-inference-server-nvidia/_cm.yaml @@ -111,6 +111,7 @@ deps: # Detect pycuda - tags: get,generic-python-lib,_pycuda + version: "2022.2.2" skip_if_env: CM_RUN_STATE_DOCKER: - 'yes' diff --git a/script/get-nvidia-mitten/_cm.json b/script/get-nvidia-mitten/_cm.json index 94675091b..8329b6c21 100644 --- a/script/get-nvidia-mitten/_cm.json +++ b/script/get-nvidia-mitten/_cm.json @@ -17,7 +17,8 @@ "tags": "get,python3" }, { - "tags": "get,generic-python-lib,_pycuda" + "tags": "get,generic-python-lib,_pycuda", + "version": "2022.2.2" }, { "tags": "get,git,_repo.https://github.com/NVIDIA/mitten", diff --git a/script/run-all-mlperf-models/run-pruned-bert.sh b/script/run-all-mlperf-models/run-pruned-bert.sh index 8c6d8bd1d..211e3019c 100644 --- a/script/run-all-mlperf-models/run-pruned-bert.sh +++ b/script/run-all-mlperf-models/run-pruned-bert.sh @@ -38,10 +38,10 @@ rerun="" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15 --env.CM_MLPERF_SKIP_POWER_CHECKS=yes" power=" --power=yes --adr.mlperf-power-client.power_server=192.168.0.15" power="" -max_batchsize=128 max_batchsize=1 -scenario="Offline" +max_batchsize=128 scenario="SingleStream" +scenario="Offline" if [[ $scenario == "Offline" ]]; then for stub in ${zoo_stub_list[@]}; do @@ -55,7 +55,6 @@ cmd="cm run script --tags=run,mlperf,inference,generate-run-cmds,_find-performan --scenario=Offline \ --test_query_count=15000 \ --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ - --results_dir=$HOME/results_dir \ --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ ${rerun} \ --quiet" @@ -77,7 +76,6 @@ for stub in ${zoo_stub_list[@]}; do --execution_mode=valid \ --adr.mlperf-inference-implementation.max_batchsize=$max_batchsize \ ${power} \ - --results_dir=$HOME/results_dir \ --env.CM_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB=$stub \ --quiet" echo ${cmd} diff --git a/script/run-mlperf-inference-submission-checker/customize.py b/script/run-mlperf-inference-submission-checker/customize.py index f9158bf06..2206e3d2e 100644 --- a/script/run-mlperf-inference-submission-checker/customize.py +++ b/script/run-mlperf-inference-submission-checker/customize.py @@ -53,7 +53,7 @@ def preprocess(i): x_version = ' --version ' + version +' ' if version!='' else '' - CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + submission_checker_file + ' --input "' + submission_dir + '"' + \ + CMD = env['CM_PYTHON_BIN_WITH_PATH'] + ' \'' + submission_checker_file + '\' --input \'' + submission_dir + '\'' + \ x_submitter + \ x_version + \ skip_compliance + extra_map + power_check + extra_args @@ -61,7 +61,8 @@ def preprocess(i): report_generator_file = os.path.join(env['CM_MLPERF_INFERENCE_SOURCE'], "tools", "submission", "generate_final_report.py") env['CM_RUN_CMD'] = CMD - env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' ' + report_generator_file + ' --input summary.csv' + print(CMD) + env['CM_POST_RUN_CMD'] = env['CM_PYTHON_BIN_WITH_PATH'] + ' \'' + report_generator_file + '\' --input summary.csv' return {'return':0}