diff --git a/mlperf_logging/benchmark_meta.py b/mlperf_logging/benchmark_meta.py index d323050..cf7c3e3 100644 --- a/mlperf_logging/benchmark_meta.py +++ b/mlperf_logging/benchmark_meta.py @@ -20,6 +20,9 @@ 'rgat': 10, 'llama2_70b_lora': 10, 'llama31_405b': 3, + # TODO: Update with official values + 'llama31_8b': 10, + 'flux1': 10, }, 'hpc' : { @@ -143,7 +146,16 @@ 'llama2_70b_lora', 'rgat', 'llama31_405b' - ] + ], + '5.1': [ + 'llama31_8b', + 'dlrm_dcnv2', + 'retinanet', + 'flux1', + 'llama2_70b_lora', + 'rgat', + 'llama31_405b' + ] }, 'hpc': { diff --git a/mlperf_logging/compliance_checker/README.md b/mlperf_logging/compliance_checker/README.md index d9f3dee..48c6ed5 100644 --- a/mlperf_logging/compliance_checker/README.md +++ b/mlperf_logging/compliance_checker/README.md @@ -12,7 +12,7 @@ To check a log file for compliance: By default, 5.1.0 training edition rules are used and the default config is set to `5.1.0/common.yaml`. This config will check all common keys and enqueue benchmark specific config to be checked as well. -Old training editions, still supported are 4.0.0, 3.1.0, 3.0.0, 2.1.0, 2.0.0, 1.1.0, 1.0.0, 0.7.0 and 0.6.0 +Old training editions, still supported are 5.0.0, 4.1.0, 4.0.0, 3.1.0, 3.0.0, 2.1.0, 2.0.0, 1.1.0, 1.0.0, 0.7.0 and 0.6.0 To check hpc compliance rules (only 1.0.0 ruleset is supported), set --usage hpc --ruleset 1.0.0. @@ -26,17 +26,19 @@ As log examples use [NVIDIA's training logs](https://github.com/mlperf/training_ 5.1.0/closed_common.yaml - the common rules file for closed submissions. These rules apply to all benchmarks 5.1.0/open_common.yaml - the common rules file for open submissions. These rules apply to all benchmarks 5.1.0/closed_retinanet.yaml - Per-benchmark rules, closed submissions. - 5.1.0/closed_bert.yaml + 5.1.0/closed_llama31_8b.yaml + 5.1.0/closed_llama31_405b.yaml 5.1.0/closed_dlrm_dcnv2.yaml 5.1.0/closed_rgat.yaml 5.1.0/closed_llama2_70b_lora.yaml - 5.1.0/closed_flux.yaml + 5.1.0/closed_flux1.yaml 5.1.0/open_retinanet.yaml - Per-benchmark rules, open submissions. - 5.1.0/open_bert.yaml + 5.1.0/open_llama31_8b.yaml + 5.1.0/open_llama31_405b.yaml 5.1.0/open_dlrm_dcnv2.yaml 5.1.0/open_rgat.yaml 5.1.0/open_llama2_70b_lora.yaml - 5.1.0/open_flux.yaml + 5.1.0/open_flux1.yaml ### Existing config files for HPC submissions diff --git a/mlperf_logging/compliance_checker/training_5.1.0/closed_common.yaml b/mlperf_logging/compliance_checker/training_5.1.0/closed_common.yaml index 2c49169..8639eeb 100755 --- a/mlperf_logging/compliance_checker/training_5.1.0/closed_common.yaml +++ b/mlperf_logging/compliance_checker/training_5.1.0/closed_common.yaml @@ -2,7 +2,7 @@ - KEY: NAME: submission_benchmark REQ: EXACTLY_ONE - CHECK: " v['value'] in ['retinanet', 'stable_diffusion', 'dlrm_dcnv2', 'bert', 'rgat', 'llama2_70b_lora', 'flux'] " + CHECK: " v['value'] in ['retinanet', 'flux1', 'dlrm_dcnv2', 'llama31_8b', 'rgat', 'llama2_70b_lora', 'llama31_405b'] " POST: " enqueue_config('training_5.1.0/closed_{}.yaml'.format(v['value'])) " - KEY: diff --git a/mlperf_logging/compliance_checker/training_5.1.0/closed_flux1.yaml b/mlperf_logging/compliance_checker/training_5.1.0/closed_flux1.yaml new file mode 100644 index 0000000..23955bc --- /dev/null +++ b/mlperf_logging/compliance_checker/training_5.1.0/closed_flux1.yaml @@ -0,0 +1,58 @@ +# Stable diffusion uses two metrics, FID and CLIP. +# These metrics can be calculated offline, using different scripts +# and logged seperatly. Therefore, we create a virtual key +# called aggregated_eval_accuracy, which aggregates +# both metrics into a single log line + +- KEY: + NAME: global_batch_size + REQ: AT_LEAST_ONE + CHECK: " v['value'] >= 0 " + +- KEY: + NAME: opt_name + REQ: EXACTLY_ONE + CHECK: " v['value'] == 'adamw' " + +- KEY: + NAME: opt_adamw_beta_1 + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0.9 " + +- KEY: + NAME: opt_adamw_beta_2 + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0.95 " + +- KEY: + NAME: opt_adamw_epsilon + REQ: EXACTLY_ONE + CHECK: " v['value'] == 1e-08 " + +- KEY: + NAME: opt_adamw_weight_decay + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0.1 " + +- KEY: + NAME: opt_base_learning_rate + REQ: EXACTLY_ONE + CHECK: " v['value'] >= 0.0 " + +- KEY: + NAME: opt_learning_rate_warmup_steps + REQ: EXACTLY_ONE + CHECK: " v['value'] >= 0 " + +- KEY: + NAME: opt_gradient_clip_norm + REQ: EXACTLY_ONE + CHECK: " v['value'] == 1.0 " + +# TODO: Update with official metric name +- KEY: + NAME: eval_accuracy + REQ: AT_LEAST_ONE + CHECK: + - "'epoch_num' in v['metadata']" + ATLEAST_ONE_CHECK: "v['value'] <= 0.586 and v['value'] > 0.0" diff --git a/mlperf_logging/compliance_checker/training_5.1.0/closed_llama31_405b.yaml b/mlperf_logging/compliance_checker/training_5.1.0/closed_llama31_405b.yaml new file mode 100644 index 0000000..c47fd87 --- /dev/null +++ b/mlperf_logging/compliance_checker/training_5.1.0/closed_llama31_405b.yaml @@ -0,0 +1,85 @@ +- KEY: + NAME: global_batch_size + REQ: EXACTLY_ONE + POST: > + s['global_batch_size'] = v['value'] + +- KEY: + NAME: max_sequence_length + REQ: EXACTLY_ONE + CHECK: " v['value'] == 8192 " + +- KEY: + NAME: opt_name + REQ: EXACTLY_ONE + CHECK: " v['value'] == 'adamw' " + +- KEY: + NAME: opt_base_learning_rate + REQ: EXACTLY_ONE + CHECK: " v['value'] * 1152 == s['global_batch_size'] * 8e-5 " + +- KEY: + NAME: opt_end_learning_rate + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_decay_steps + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_warmup_steps + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_decay_schedule + REQ: EXACTLY_ONE + CHECK: " v['value'] == 'cosine with linear warmup' " + +- KEY: + NAME: opt_adamw_beta_1 + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0.9 " + +- KEY: + NAME: opt_adamw_beta_2 + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0.95 " + +- KEY: + NAME: opt_adamw_epsilon + REQ: EXACTLY_ONE + CHECK: " v['value'] == 1e-05 " + +- KEY: + NAME: opt_adamw_weight_decay + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0.1 " + +- KEY: + NAME: opt_gradient_clip_norm + REQ: EXACTLY_ONE + CHECK: " v['value'] == 1.0 " + +- KEY: + NAME: gradient_accumulation_steps + REQ: EXACTLY_ONE + CHECK: " v['value'] > 0 " + +- KEY: + NAME: eval_samples + REQ: EXACTLY_ONE + CHECK: " v['value'] == 5760 " + +- KEY: + NAME: eval_accuracy + REQ: AT_LEAST_ONE + CHECK: + - "'samples_count' in v['metadata']" + ATLEAST_ONE_CHECK: "(v['value'] <= 5.6) and v['value'] > 0.0" + +- KEY: + NAME: init_checkpoint_step + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0 " + diff --git a/mlperf_logging/compliance_checker/training_5.1.0/closed_flux.yaml b/mlperf_logging/compliance_checker/training_5.1.0/closed_llama31_8b.yaml similarity index 51% rename from mlperf_logging/compliance_checker/training_5.1.0/closed_flux.yaml rename to mlperf_logging/compliance_checker/training_5.1.0/closed_llama31_8b.yaml index d0ed330..3619827 100644 --- a/mlperf_logging/compliance_checker/training_5.1.0/closed_flux.yaml +++ b/mlperf_logging/compliance_checker/training_5.1.0/closed_llama31_8b.yaml @@ -4,38 +4,46 @@ POST: > s['global_batch_size'] = v['value'] +# TODO: Update with official compliance requirements +- KEY: + NAME: opt_base_learning_rate + REQ: EXACTLY_ONE - KEY: - NAME: opt_learning_rate_warmup_steps + NAME: opt_lamb_epsilon REQ: EXACTLY_ONE - KEY: - NAME: opt_base_learning_rate + NAME: opt_learning_rate_training_steps + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_warmup_steps REQ: EXACTLY_ONE - KEY: - NAME: opt_gradient_clip_norm + NAME: num_warmup_steps REQ: EXACTLY_ONE - KEY: - NAME: opt_adamw_weight_decay + NAME: start_warmup_step REQ: EXACTLY_ONE - KEY: - NAME: opt_adamw_epsilon + NAME: opt_lamb_beta_1 REQ: EXACTLY_ONE - KEY: - NAME: opt_adamw_beta_1 + NAME: opt_lamb_beta_2 REQ: EXACTLY_ONE - KEY: - NAME: opt_adamw_beta_2 + NAME: opt_lamb_weight_decay_rate REQ: EXACTLY_ONE - KEY: NAME: eval_accuracy REQ: AT_LEAST_ONE CHECK: - - "'samples_count' in v['metadata']" - ATLEAST_ONE_CHECK: "(v['value'] <= 0.6) and v['value'] > 0.0" + - "'epoch_num' in v['metadata']" + ATLEAST_ONE_CHECK: "(v['value'] >= 0.720) and v['value'] < 1.0" diff --git a/mlperf_logging/compliance_checker/training_5.1.0/common.yaml b/mlperf_logging/compliance_checker/training_5.1.0/common.yaml index cfdd5a6..360854c 100755 --- a/mlperf_logging/compliance_checker/training_5.1.0/common.yaml +++ b/mlperf_logging/compliance_checker/training_5.1.0/common.yaml @@ -107,13 +107,13 @@ NAME: epoch_start REQ: AT_LEAST_ONE_OR(block_start) CHECK: - - "'epoch_num' in v['metadata']" + - "('epoch_num' in v['metadata']) | ('samples_count' in v['metadata'])" - KEY: NAME: epoch_stop REQ: AT_LEAST_ONE_OR(block_stop) CHECK: - - "'epoch_num' in v['metadata']" + - "('epoch_num' in v['metadata']) | ('samples_count' in v['metadata'])" # making sure previous eval did print it's accuracy result - KEY: diff --git a/mlperf_logging/compliance_checker/training_5.1.0/open_common.yaml b/mlperf_logging/compliance_checker/training_5.1.0/open_common.yaml index 97abafc..41015a8 100644 --- a/mlperf_logging/compliance_checker/training_5.1.0/open_common.yaml +++ b/mlperf_logging/compliance_checker/training_5.1.0/open_common.yaml @@ -2,5 +2,5 @@ - KEY: NAME: submission_benchmark REQ: EXACTLY_ONE - CHECK: " v['value'] in ['retinanet', 'dlrm_dcnv2', 'bert', 'rgat', 'llama2_70b_lora', 'flux'] " + CHECK: " v['value'] in ['retinanet', 'flux1', 'dlrm_dcnv2', 'llama31_8b', 'rgat', 'llama2_70b_lora', 'llama31_405b'] " POST: " enqueue_config('training_5.1.0/open_{}.yaml'.format(v['value'])) " diff --git a/mlperf_logging/compliance_checker/training_5.1.0/open_flux.yaml b/mlperf_logging/compliance_checker/training_5.1.0/open_flux.yaml deleted file mode 100644 index f732825..0000000 --- a/mlperf_logging/compliance_checker/training_5.1.0/open_flux.yaml +++ /dev/null @@ -1,6 +0,0 @@ -- KEY: - NAME: eval_accuracy - REQ: AT_LEAST_ONE - CHECK: - - "'samples_count' in v['metadata']" - ATLEAST_ONE_CHECK: "(v['value'] <= 0.6) and v['value'] > 0.0" diff --git a/mlperf_logging/compliance_checker/training_5.1.0/open_flux1.yaml b/mlperf_logging/compliance_checker/training_5.1.0/open_flux1.yaml new file mode 100644 index 0000000..4144e05 --- /dev/null +++ b/mlperf_logging/compliance_checker/training_5.1.0/open_flux1.yaml @@ -0,0 +1,13 @@ +# Stable diffusion uses two metrics, FID and CLIP. +# These metrics can be calculated offline, using different scripts +# and logged seperatly. Therefore, we create a virtual key +# called aggregated_eval_accuracy, which aggregates +# both metrics into a single log line + +# TODO: Update with official metric name +- KEY: + NAME: averaged_validation_loss + REQ: AT_LEAST_ONE + CHECK: + - "'epoch_num' in v['metadata']" + ATLEAST_ONE_CHECK: "v['value'] <= 0.586 and v['value'] > 0.0" diff --git a/mlperf_logging/compliance_checker/training_5.1.0/open_llama31_405b.yaml b/mlperf_logging/compliance_checker/training_5.1.0/open_llama31_405b.yaml new file mode 100644 index 0000000..0a29e8b --- /dev/null +++ b/mlperf_logging/compliance_checker/training_5.1.0/open_llama31_405b.yaml @@ -0,0 +1,78 @@ +- KEY: + NAME: global_batch_size + REQ: EXACTLY_ONE + POST: > + s['global_batch_size'] = v['value'] + +- KEY: + NAME: max_sequence_length + REQ: EXACTLY_ONE + CHECK: " v['value'] == 8192 " + +- KEY: + NAME: opt_name + REQ: EXACTLY_ONE + CHECK: " v['value'] == 'adamw' " + +- KEY: + NAME: opt_base_learning_rate + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_end_learning_rate + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_decay_steps + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_warmup_steps + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_learning_rate_decay_schedule + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_adamw_beta_1 + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_adamw_beta_2 + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_adamw_epsilon + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_adamw_weight_decay + REQ: EXACTLY_ONE + +- KEY: + NAME: opt_gradient_clip_norm + REQ: EXACTLY_ONE + +- KEY: + NAME: gradient_accumulation_steps + REQ: EXACTLY_ONE + CHECK: " v['value'] > 0 " + +- KEY: + NAME: eval_samples + REQ: EXACTLY_ONE + CHECK: " v['value'] == 5760 " + +- KEY: + NAME: eval_accuracy + REQ: AT_LEAST_ONE + CHECK: + - "'epoch_num' in v['metadata']" + ATLEAST_ONE_CHECK: "(v['value'] <= 5.6) and v['value'] > 0.0" + +- KEY: + NAME: init_checkpoint_step + REQ: EXACTLY_ONE + CHECK: " v['value'] == 0 " + diff --git a/mlperf_logging/compliance_checker/training_5.1.0/open_llama31_8b.yaml b/mlperf_logging/compliance_checker/training_5.1.0/open_llama31_8b.yaml new file mode 100644 index 0000000..ff3f204 --- /dev/null +++ b/mlperf_logging/compliance_checker/training_5.1.0/open_llama31_8b.yaml @@ -0,0 +1,8 @@ + +# TODO: Update with official compliance requirements +- KEY: + NAME: eval_accuracy + REQ: AT_LEAST_ONE + CHECK: + - "'epoch_num' in v['metadata']" + ATLEAST_ONE_CHECK: "v['value'] < 1.0" diff --git a/mlperf_logging/mllog/constants.py b/mlperf_logging/mllog/constants.py index d272c1e..880a814 100644 --- a/mlperf_logging/mllog/constants.py +++ b/mlperf_logging/mllog/constants.py @@ -55,6 +55,8 @@ GNN = "gnn" RGAT = "rgat" LLAMA31_405B = "llama31_405b" +LLAMA31_8B = "llama31_8b" +FLUX1 = "flux1" # Constant values - model info ADAGRAD = "adagrad" diff --git a/mlperf_logging/rcp_checker/rcp_checker.py b/mlperf_logging/rcp_checker/rcp_checker.py index 3806a90..c08f4ea 100644 --- a/mlperf_logging/rcp_checker/rcp_checker.py +++ b/mlperf_logging/rcp_checker/rcp_checker.py @@ -32,7 +32,7 @@ 'gnn': 10, 'rgat': 10, 'llama2_70b_lora': 10, - 'flux': 10, + 'flux1': 10, 'llama31_405b': 3, }, "hpc": { @@ -83,7 +83,7 @@ def read_submission_file(result_file, ruleset, use_train_samples): eval_metric = json.loads(eval_accuracy_str)["metadata"]["metric"] eval_score = json.loads(eval_accuracy_str)["value"] stable_diffusion_eval_results[eval_step][eval_metric] = eval_score - elif benchmark in {"llama2_70b_lora", "flux", "llama31_405b"} and ("eval_error" in str or "eval_accuracy" in str): + elif benchmark in {"llama2_70b_lora", "flux1", "llama31_405b"} and ("eval_error" in str or "eval_accuracy" in str): eval_accuracy_str = str conv_epoch = json.loads(eval_accuracy_str)["metadata"]["samples_count"] eval_score = json.loads(eval_accuracy_str)["value"] @@ -210,7 +210,7 @@ def _process_raw_rcp_data(self, raw_rcp_data): ''' processed_rcps = {} for record, record_contents in raw_rcp_data.items(): - conv_unit = "samples to converge" if record_contents['Benchmark'] in ['llama2_70b_lora', 'flux'] else "Epochs to converge" + conv_unit = "samples to converge" if record_contents['Benchmark'] in ['llama2_70b_lora', 'flux1'] else "Epochs to converge" processed_record = {'Benchmark': record_contents['Benchmark'], 'BS': record_contents['BS'], 'Hyperparams': record_contents['Hyperparams'], @@ -441,7 +441,7 @@ def _set_results_scaling(self, scale_factor, results_dir): def _eval_submission_record(self, rcp_record, subm_epochs, results_dir): '''Compare reference and submission convergence.''' - if self.ruleset == "5.0.0" and self.benchmark == "llama31_405b": + if self.ruleset in ["5.0.0", "5.1.0"] and self.benchmark == "llama31_405b": rcp_record['Max Speedup'] = rcp_record['RCP Mean'] / (rcp_record['Min Epochs'] - 46080) subm_epochs.sort() diff --git a/mlperf_logging/rcp_checker/training_5.1.0/rcps_flux.json b/mlperf_logging/rcp_checker/training_5.1.0/rcps_flux1.json similarity index 93% rename from mlperf_logging/rcp_checker/training_5.1.0/rcps_flux.json rename to mlperf_logging/rcp_checker/training_5.1.0/rcps_flux1.json index ba15ef9..3fb9815 100644 --- a/mlperf_logging/rcp_checker/training_5.1.0/rcps_flux.json +++ b/mlperf_logging/rcp_checker/training_5.1.0/rcps_flux1.json @@ -1,6 +1,6 @@ { "flux_ref_1024": { - "Benchmark": "flux", + "Benchmark": "flux1", "Creator": "NVIDIA", "When": "Reference RCPs before v5.1", "Platform": "8xDGX-B200", @@ -21,7 +21,7 @@ ] }, "flux_ref_2048": { - "Benchmark": "flux", + "Benchmark": "flux1", "Creator": "NVIDIA", "When": "Reference RCPs before v5.1", "Platform": "8xDGX-B200", @@ -42,7 +42,7 @@ ] }, "flux_ref_4096": { - "Benchmark": "flux", + "Benchmark": "flux1", "Creator": "NVIDIA", "When": "Reference RCPs before v5.1", "Platform": "8xDGX-B200", diff --git a/mlperf_logging/rcp_checker/training_5.1.0/rcps_llama31_405b.json b/mlperf_logging/rcp_checker/training_5.1.0/rcps_llama31_405b.json new file mode 100644 index 0000000..70adaf5 --- /dev/null +++ b/mlperf_logging/rcp_checker/training_5.1.0/rcps_llama31_405b.json @@ -0,0 +1,106 @@ +{ + "llama31_405b_ref_1008": + { + "Benchmark": "llama31_405b", + "Creator": "NVIDIA", + "When": "Reference RCPs before 5.0 submission", + "Platform": "288xDGX-H100", + "BS": 1008, + "Hyperparams": { + "opt_base_learning_rate": 7e-05, + "opt_learning_rate_warmup_steps": 9143, + "gradient_accumulation_steps": 126 + }, + "Epochs to converge": [ + 324576,324576,324576, + 324576,324576,324576 + ] + }, + "llama31_405b_ref_1152": + { + "Benchmark": "llama31_405b", + "Creator": "NVIDIA", + "When": "Reference RCPs before 5.0 submission", + "Platform": "288xDGX-H100", + "BS": 1152, + "Hyperparams": { + "opt_base_learning_rate": 8e-05, + "opt_learning_rate_warmup_steps": 8000, + "gradient_accumulation_steps": 144 + }, + "Epochs to converge": [ + 322560,322560,322560, + 322560,322560,322560 + ] + }, + + "llama31_405b_ref_2304": + { + "Benchmark": "llama31_405b", + "Creator": "NVIDIA", + "When": "Reference RCPs before 5.0 submission", + "Platform": "288xDGX-H100", + "BS": 2304, + "Hyperparams": { + "opt_base_learning_rate": 16e-05, + "opt_learning_rate_warmup_steps": 4000, + "gradient_accumulation_steps": 288 + }, + "Epochs to converge": [ + 368640,368640,368640, + 368640,414720,414720 + ] + }, + "llama31_405b_ref_4608": + { + "Benchmark": "llama31_405b", + "Creator": "NVIDIA", + "When": "Reference RCPs before 5.0 submission", + "Platform": "288xDGX-H100", + "BS": 4608, + "Hyperparams": { + "opt_base_learning_rate": 32e-05, + "opt_learning_rate_warmup_steps": 2000, + "gradient_accumulation_steps": 576 + }, + "Epochs to converge": [ + 460800,460800,506880, + 506880,506880,506880 + ] + }, + "llama31_405b_ref_6912": + { + "Benchmark": "llama31_405b", + "Creator": "NVIDIA", + "When": "Reference RCPs before 5.0 submission", + "Platform": "72xDGX-H100", + "BS": 6912, + "Hyperparams": { + "opt_base_learning_rate": 48e-05, + "opt_learning_rate_warmup_steps": 1334, + "gradient_accumulation_steps": 3456 + }, + "Epochs to converge": [ + 580608,580608,580608, + 628992,628992,628992 + ] + }, + "llama31_405b_ref_9216": + { + "Benchmark": "llama31_405b", + "Creator": "NVIDIA", + "When": "Reference RCPs before 5.0 submission", + "Platform": "288xDGX-H100", + "BS": 9216, + "Hyperparams": { + "opt_base_learning_rate": 64e-05, + "opt_learning_rate_warmup_steps": 1000, + "gradient_accumulation_steps": 1152 + }, + "Epochs to converge": [ + 645120,645120,691200, + 691200,737280,737280 + ] + } + } + \ No newline at end of file diff --git a/mlperf_logging/rcp_checker/training_5.1.0/rcps_llama31_8b.json b/mlperf_logging/rcp_checker/training_5.1.0/rcps_llama31_8b.json new file mode 100644 index 0000000..bed1d1c --- /dev/null +++ b/mlperf_logging/rcp_checker/training_5.1.0/rcps_llama31_8b.json @@ -0,0 +1,25 @@ +{ + + "llama31_8b_ref_X": + { + "Benchmark": "", + "Creator": "", + "When": "", + "Platform": "", + "BS": 0, + "Hyperparams": { + "opt_base_learning_rate": 0, + "opt_epsilon": 0, + "opt_learning_rate_training_steps": 0, + "num_warmup_steps": 0, + "start_warmup_step": 0, + "opt_lamb_beta_1": 0, + "opt_lamb_beta_2": 0, + "opt_lamb_weight_decay_rate": 0, + "gradient_accumulation_steps": 0 + }, + "Epochs to converge": [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + } +} diff --git a/mlperf_logging/rcp_checker/visualization_scripts/rcp_viewer.py b/mlperf_logging/rcp_checker/visualization_scripts/rcp_viewer.py index 1c4b968..cba24d7 100755 --- a/mlperf_logging/rcp_checker/visualization_scripts/rcp_viewer.py +++ b/mlperf_logging/rcp_checker/visualization_scripts/rcp_viewer.py @@ -22,7 +22,7 @@ def main(): parser.add_argument('--usage', type=str, default='training', choices=['training', 'hpc'], help="the WG that produced the benchmark") - parser.add_argument('--version', type=str, default='5.0.0', + parser.add_argument('--version', type=str, default='5.1.0', help='what version of the ruleset') parser.add_argument('--verbose', action='store_true') parser.add_argument('--unpruned', action='store_true', diff --git a/mlperf_logging/result_summarizer/config.yaml b/mlperf_logging/result_summarizer/config.yaml index e0306fa..897d29a 100644 --- a/mlperf_logging/result_summarizer/config.yaml +++ b/mlperf_logging/result_summarizer/config.yaml @@ -94,12 +94,12 @@ columns: llama31_405b: ["Benchmark results (minutes)", "LLM", "C4", "Llama31-405B"] default: [" ", " ", " "] "5.1.0": - bert: ["Benchmark results (minutes)", "NLP", "Wikipedia", "BERT"] dlrm_dcnv2: ["Benchmark results (minutes)", "Recommendation", "1TB Multihot Clickthrough", "DLRM DCNv2"] retinanet: ["Benchmark results (minutes)", "Object detection, light-weight", "OpenImages", "RetinaNet"] - flux: ["Benchmark results (minutes)", "Text to image", "CC12M and Coco-2014", "Flux"] + flux1: ["Benchmark results (minutes)", "Text to image", "CC12M and Coco-2014 for eval", "Flux1"] llama2_70b_lora: ["Benchmark results (minutes)", "LLM-Finetune", "SCROLSS Gov Report", "LLama2-70B-LoRA"] rgat: ["Benchmark results (minutes)", "Graph node classification", "IGBH-Full", "R-GAT"] + llama31_8b: ["Benchmark results (minutes)", "Small LLM", "C4", "Llama31-8b"] llama31_405b: ["Benchmark results (minutes)", "LLM", "C4", "Llama31-405B"] default: [" ", " ", " "]