diff --git a/script/app-mlperf-inference-mlcommons-python/_cm.yaml b/script/app-mlperf-inference-mlcommons-python/_cm.yaml index 7900d8ec5..2cb9367dd 100644 --- a/script/app-mlperf-inference-mlcommons-python/_cm.yaml +++ b/script/app-mlperf-inference-mlcommons-python/_cm.yaml @@ -934,6 +934,9 @@ variations: - tags: get,generic-python-lib,_package.nltk names: - nltk + - tags: get,generic-python-lib,_package.numpy + names: + - numpy - tags: get,generic-python-lib,_package.rouge-score names: - rouge-score diff --git a/script/app-mlperf-inference/_cm.yaml b/script/app-mlperf-inference/_cm.yaml index a8d843fca..087a89fcb 100644 --- a/script/app-mlperf-inference/_cm.yaml +++ b/script/app-mlperf-inference/_cm.yaml @@ -522,6 +522,7 @@ variations: 3d-unet_: env: CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + CM_MLPERF_INFERENCE_TEST_QPS: "0.01" posthook_deps: - enable_if_env: CM_MLPERF_LOADGEN_MODE: @@ -541,8 +542,8 @@ variations: group: model env: - CM_MODEL: - stable-diffusion-xl + CM_MODEL: stable-diffusion-xl + CM_MLPERF_INFERENCE_TEST_QPS: "0.05" default_variations: precision: float16 device: cuda @@ -567,6 +568,7 @@ variations: llama2-70b_: env: CM_MLPERF_MODEL_EQUAL_ISSUE_MODE: 'yes' + CM_MLPERF_INFERENCE_TEST_QPS: "0.01" posthook_deps: - enable_if_env: CM_MLPERF_LOADGEN_MODE: @@ -1359,6 +1361,7 @@ docker: - "${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}:${{ CM_MLPERF_INFERENCE_RESULTS_DIR }}" - "${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}:${{ CM_MLPERF_INFERENCE_SUBMISSION_DIR }}" - "${{ GPTJ_CHECKPOINT_PATH }}:${{ GPTJ_CHECKPOINT_PATH }}" + - "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}" - "${{ DLRM_DATA_PATH }}:/home/mlperf_inf_dlrmv2" skip_run_cmd: 'no' shm_size: '32gb' diff --git a/script/get-ml-model-llama2/_cm.json b/script/get-ml-model-llama2/_cm.json index 330465f8c..3fc4381b9 100644 --- a/script/get-ml-model-llama2/_cm.json +++ b/script/get-ml-model-llama2/_cm.json @@ -11,6 +11,9 @@ "input_mapping": { "checkpoint": "LLAMA2_CHECKPOINT_PATH" }, + "docker": { + "real_run": false + }, "new_env_keys": [ "CM_ML_MODEL_*", "LLAMA2_CHECKPOINT_PATH" diff --git a/script/get-preprocessed-dataset-openorca/_cm.json b/script/get-preprocessed-dataset-openorca/_cm.json index 7934799c7..94a1336a0 100644 --- a/script/get-preprocessed-dataset-openorca/_cm.json +++ b/script/get-preprocessed-dataset-openorca/_cm.json @@ -110,5 +110,8 @@ }, "group": "dataset-type" } + }, + "docker": { + "real_run": false } }