Skip to content

Commit

Permalink
Push benchmark artifacts for auto-validation (#2157)
Browse files Browse the repository at this point in the history
* Upload benchmark artifacts for auto-validation
  • Loading branch information
agunapal committed Mar 7, 2023
1 parent 86d4400 commit fd8f1b3
Show file tree
Hide file tree
Showing 3 changed files with 159 additions and 0 deletions.
15 changes: 15 additions & 0 deletions .github/workflows/benchmark_nightly_cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,21 @@ jobs:
with:
name: nightly cpu artifact
path: /tmp/ts_benchmark
- name: Download benchmark artifacts for auto validation
uses: dawidd6/action-download-artifact@v2
with:
workflow: ${{ github.event.workflow_run.workflow_id }}
workflow_conclusion: success
if_no_artifact_found: ignore
path: /tmp/ts_artifacts
name: cpu_benchmark_validation
- name: Update benchmark artifacts for auto validation
run: python benchmarks/utils/update_artifacts.py --output /tmp/ts_artifacts/cpu_benchmark_validation
- name: Upload the updated benchmark artifacts for auto validation
uses: actions/upload-artifact@v2
with:
name: cpu_benchmark_validation
path: /tmp/ts_artifacts
- name: Open issue on failure
if: ${{ failure() && github.event_name == 'schedule' }}
uses: dacbd/create-issue-action@v1
Expand Down
15 changes: 15 additions & 0 deletions .github/workflows/benchmark_nightly_gpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,18 @@ jobs:
with:
name: nightly gpu artifact
path: /tmp/ts_benchmark
- name: Download benchmark artifacts for auto validation
uses: dawidd6/action-download-artifact@v2
with:
workflow: ${{ github.event.workflow_run.workflow_id }}
workflow_conclusion: success
if_no_artifact_found: ignore
path: /tmp/ts_artifacts
name: gpu_benchmark_validation
- name: Update benchmark artifacts for auto validation
run: python benchmarks/utils/update_artifacts.py --output /tmp/ts_artifacts/gpu_benchmark_validation
- name: Upload the updated benchmark artifacts for auto validation
uses: actions/upload-artifact@v2
with:
name: gpu_benchmark_validation
path: /tmp/ts_artifacts
129 changes: 129 additions & 0 deletions benchmarks/utils/update_artifacts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
import argparse
import os
import shutil

BENCHMARK_REPORT_PATH = "/tmp/ts_benchmark"
BENCHMARK_ARTIFACTS_PATH = "/tmp/ts_artifacts"
BENCHMARK_REPORT_FILE = "ab_report.csv"
WINDOW_LEN = 8
WINDOW_START = 0

################################################################
# This is an example directory structure for the artifacts.
# Here, report_id 1 is missing, new report would be added under 1
# and we would remove report_id 2.
# .
# └── tmp/
# └── ts_artifacts/
# ├── 0/
# │ ├── eager_mode_mnist_w4_b1/
# │ │ └── ab_report.csv
# │ ├── eager_mode_mnist_w4_b2/
# │ │ └── ab_report.csv
# │ └── ...
# ├── 2/
# │ ├── eager_mode_mnist_w4_b1/
# │ │ └── ab_report.csv
# │ ├── eager_mode_mnist_w4_b2/
# │ │ └── ab_report.csv
# │ └── ...
# ├── 3/
# │ ├── eager_mode_mnist_w4_b1/
# │ │ └── ab_report.csv
# │ ├── eager_mode_mnist_w4_b2/
# │ │ └── ab_report.csv
# │ └── ...
# ├── ...
# └── 6/
# ├── eager_mode_mnist_w4_b1/
# │ └── ab_report.csv
# ├── eager_mode_mnist_w4_b2/
# │ └── ab_report.csv
# └── ...
################################################################


# Copy BENCHMARK_REPORT_FILE to artifacts
def copy_benchmark_reports(input, output):

for dir in os.listdir(input):
if os.path.isdir(os.path.join(input, dir)):
new_dir = os.path.join(output, dir)
os.makedirs(new_dir, exist_ok=True)
shutil.copy(os.path.join(input, dir, BENCHMARK_REPORT_FILE), new_dir)


# Save new report and delete the oldest report
def update_new_report(input_dir, output_dir, add_report_id, del_report_id):

# Add new report
new_dir = os.path.join(output_dir, str(add_report_id))
print("Creating artifacts ", new_dir)
copy_benchmark_reports(input_dir, new_dir)

# Remove old report
if isinstance(del_report_id, int):
rm_dir = os.path.join(output_dir, str(del_report_id % WINDOW_LEN))
print("Removing artifacts ", rm_dir)
shutil.rmtree(rm_dir, ignore_errors=True)


# Create artifacts for a period of rolling WINDOW_LEN-1 reports
def update_artifacts(input_dir, output_dir):

# Create a drectory where artifacts will be stored
os.makedirs(output_dir, exist_ok=True)

# Get the sorted list of existing report_ids
list_dirs = sorted(map(lambda x: int(x), os.listdir(output_dir)))
num_reports = len(list_dirs)

# Initial case: When they are less than WINDOW_LEN-1 reports
if num_reports < WINDOW_LEN - 1:
add_report_id, del_report_id = num_reports, None
update_new_report(input_dir, output_dir, add_report_id, del_report_id)
return

# When there are WINDOW_LEN - 1 reports and we want to add the new report
# and remove the oldest report
for i, report_id in enumerate(list_dirs):

if i != report_id or (i + 1 == WINDOW_LEN - 1):
if i != report_id:
# When report_id has a missing element in sequence
add_report_id, del_report_id = i, report_id
else:
# When report_id WINDOW_LEN-1 is missing
add_report_id, del_report_id = i + 1, (i + 2) % WINDOW_LEN
update_new_report(input_dir, output_dir, add_report_id, del_report_id)
break


def main():
parser = argparse.ArgumentParser()

parser.add_argument(
"--input_dir",
nargs="?",
help="the dir of a list of model benchmark result subdir ",
const=BENCHMARK_REPORT_PATH,
type=str,
default=BENCHMARK_REPORT_PATH,
)

parser.add_argument(
"--output_dir",
nargs="?",
help="the dir of model benchmark artifacts ",
const=BENCHMARK_ARTIFACTS_PATH,
type=str,
default=BENCHMARK_ARTIFACTS_PATH,
)

args = parser.parse_args()

update_artifacts(args.input_dir, args.output_dir)


if __name__ == "__main__":
main()

0 comments on commit fd8f1b3

Please sign in to comment.