From 81a87cf4decec10d9b5980acd71f971e33318e55 Mon Sep 17 00:00:00 2001 From: Dennis Kobert Date: Sat, 28 Jun 2025 09:02:14 +0200 Subject: [PATCH] Include graph runtime benchmarks in ci regression run --- .../workflows/comment-profiling-changes.yaml | 159 ++++++++++++------ Cargo.lock | 1 + node-graph/interpreted-executor/Cargo.toml | 13 ++ .../benches/run_cached_iai.rs | 27 +++ .../benches/run_once_iai.rs | 21 +++ .../benches/update_executor_iai.rs | 23 +++ 6 files changed, 194 insertions(+), 50 deletions(-) create mode 100644 node-graph/interpreted-executor/benches/run_cached_iai.rs create mode 100644 node-graph/interpreted-executor/benches/run_once_iai.rs create mode 100644 node-graph/interpreted-executor/benches/update_executor_iai.rs diff --git a/.github/workflows/comment-profiling-changes.yaml b/.github/workflows/comment-profiling-changes.yaml index d83e6d67e4..26e46aac8a 100644 --- a/.github/workflows/comment-profiling-changes.yaml +++ b/.github/workflows/comment-profiling-changes.yaml @@ -45,7 +45,15 @@ jobs: - name: Run baseline benchmarks run: | + # Compile benchmarks cargo bench --bench compile_demo_art_iai -- --save-baseline=master + + # Runtime benchmarks + cd node-graph/interpreted-executor + cargo bench --bench update_executor_iai -- --save-baseline=master + cargo bench --bench run_once_iai -- --save-baseline=master + cargo bench --bench run_cached_iai -- --save-baseline=master + cd ../.. - name: Checkout PR branch run: | @@ -54,9 +62,31 @@ jobs: - name: Run PR benchmarks id: benchmark run: | - BENCH_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g') - echo "BENCHMARK_OUTPUT<> $GITHUB_OUTPUT - echo "$BENCH_OUTPUT" >> $GITHUB_OUTPUT + # Compile benchmarks + COMPILE_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g') + + # Runtime benchmarks + cd node-graph/interpreted-executor + UPDATE_OUTPUT=$(cargo bench --bench update_executor_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g') + RUN_ONCE_OUTPUT=$(cargo bench --bench run_once_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g') + RUN_CACHED_OUTPUT=$(cargo bench --bench run_cached_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g') + cd ../.. + + # Store outputs + echo "COMPILE_OUTPUT<> $GITHUB_OUTPUT + echo "$COMPILE_OUTPUT" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "UPDATE_OUTPUT<> $GITHUB_OUTPUT + echo "$UPDATE_OUTPUT" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "RUN_ONCE_OUTPUT<> $GITHUB_OUTPUT + echo "$RUN_ONCE_OUTPUT" >> $GITHUB_OUTPUT + echo "EOF" >> $GITHUB_OUTPUT + + echo "RUN_CACHED_OUTPUT<> $GITHUB_OUTPUT + echo "$RUN_CACHED_OUTPUT" >> $GITHUB_OUTPUT echo "EOF" >> $GITHUB_OUTPUT - name: Make old comments collapsed by default @@ -89,7 +119,11 @@ jobs: with: github-token: ${{secrets.GITHUB_TOKEN}} script: | - const benchmarkOutput = JSON.parse(`${{ steps.benchmark.outputs.BENCHMARK_OUTPUT }}`); + const compileOutput = JSON.parse(`${{ steps.benchmark.outputs.COMPILE_OUTPUT }}`); + const updateOutput = JSON.parse(`${{ steps.benchmark.outputs.UPDATE_OUTPUT }}`); + const runOnceOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_ONCE_OUTPUT }}`); + const runCachedOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_CACHED_OUTPUT }}`); + let significantChanges = false; let commentBody = ""; @@ -110,58 +144,83 @@ jobs: return str.padStart(len); } - for (const benchmark of benchmarkOutput) { - if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) { - const summary = benchmark.callgrind_summary.summaries[0]; - const irDiff = summary.events.Ir; - - if (irDiff.diff_pct !== null) { - const changePercentage = formatPercentage(irDiff.diff_pct); - const color = irDiff.diff_pct > 0 ? "red" : "lime"; - - commentBody += "---\n\n"; - commentBody += `${benchmark.module_path} ${benchmark.id}:${benchmark.details}\n`; - commentBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) -> \`${formatNumber(irDiff.new)}\` (HEAD) : `; - commentBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`; - - commentBody += "
\nDetailed metrics\n\n```\n"; - commentBody += `Baselines: master| HEAD\n`; + function processBenchmarkOutput(benchmarkOutput, sectionTitle) { + let sectionBody = ""; + let hasResults = false; + + for (const benchmark of benchmarkOutput) { + if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) { + const summary = benchmark.callgrind_summary.summaries[0]; + const irDiff = summary.events.Ir; - for (const [eventKind, costsDiff] of Object.entries(summary.events)) { - if (costsDiff.diff_pct !== null) { - const changePercentage = formatPercentage(costsDiff.diff_pct); - const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`; - commentBody += `${line}\n`; + if (irDiff.diff_pct !== null) { + hasResults = true; + const changePercentage = formatPercentage(irDiff.diff_pct); + const color = irDiff.diff_pct > 0 ? "red" : "lime"; + + sectionBody += `**${benchmark.module_path} ${benchmark.id}:${benchmark.details}**\n`; + sectionBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) → \`${formatNumber(irDiff.new)}\` (HEAD) : `; + sectionBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`; + + sectionBody += "
\nDetailed metrics\n\n```\n"; + sectionBody += `Baselines: master| HEAD\n`; + + for (const [eventKind, costsDiff] of Object.entries(summary.events)) { + if (costsDiff.diff_pct !== null) { + const changePercentage = formatPercentage(costsDiff.diff_pct); + const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`; + sectionBody += `${line}\n`; + } + } + + sectionBody += "```\n
\n\n"; + + if (Math.abs(irDiff.diff_pct) > 5) { + significantChanges = true; } - } - - commentBody += "```\n
\n\n"; - - if (Math.abs(irDiff.diff_pct) > 5) { - significantChanges = true; } } } + + if (hasResults) { + return `## ${sectionTitle}\n\n${sectionBody}`; + } + return ""; } - const output = ` -
- - Performance Benchmark Results - - ${commentBody} - -
- `; - - if (significantChanges) { - github.rest.issues.createComment({ - issue_number: context.issue.number, - owner: context.repo.owner, - repo: context.repo.repo, - body: output - }); + // Process each benchmark category + const compileSection = processBenchmarkOutput(compileOutput, "🔧 Compile Time"); + const updateSection = processBenchmarkOutput(updateOutput, "🔄 Executor Update"); + const runOnceSection = processBenchmarkOutput(runOnceOutput, "🚀 Cold Execution"); + const runCachedSection = processBenchmarkOutput(runCachedOutput, "⚡ Cached Execution"); + + // Combine all sections + commentBody = [compileSection, updateSection, runOnceSection, runCachedSection] + .filter(section => section.length > 0) + .join("---\n\n"); + + if (commentBody.length > 0) { + const output = ` +
+ + Performance Benchmark Results + + ${commentBody} + +
+ `; + + if (significantChanges) { + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: output + }); + } else { + console.log("No significant performance changes detected. Skipping comment."); + console.log(output); + } } else { - console.log("No significant performance changes detected. Skipping comment."); - console.log(output); + console.log("No benchmark results to display."); } diff --git a/Cargo.lock b/Cargo.lock index c7ae0cdb13..f9b03010fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2991,6 +2991,7 @@ dependencies = [ "graphene-core", "graphene-path-bool", "graphene-std", + "iai-callgrind", "log", "once_cell", "serde", diff --git a/node-graph/interpreted-executor/Cargo.toml b/node-graph/interpreted-executor/Cargo.toml index 119b8affb6..1a6185c88a 100644 --- a/node-graph/interpreted-executor/Cargo.toml +++ b/node-graph/interpreted-executor/Cargo.toml @@ -28,6 +28,7 @@ serde = { workspace = true } # Workspace dependencies graph-craft = { workspace = true, features = ["loading"] } criterion = { workspace = true } +iai-callgrind = { workspace = true } # Benchmarks [[bench]] @@ -42,3 +43,15 @@ harness = false name = "run_cached" harness = false +[[bench]] +name = "update_executor_iai" +harness = false + +[[bench]] +name = "run_once_iai" +harness = false + +[[bench]] +name = "run_cached_iai" +harness = false + diff --git a/node-graph/interpreted-executor/benches/run_cached_iai.rs b/node-graph/interpreted-executor/benches/run_cached_iai.rs new file mode 100644 index 0000000000..14aec4ae26 --- /dev/null +++ b/node-graph/interpreted-executor/benches/run_cached_iai.rs @@ -0,0 +1,27 @@ +use graph_craft::util::*; +use graphene_std::Context; +use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main}; +use interpreted_executor::dynamic_executor::DynamicExecutor; + +fn setup_run_cached(name: &str) -> DynamicExecutor { + let network = load_from_name(name); + let proto_network = compile(network); + let executor = futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap(); + + // Warm up the cache by running once + let context: Context = None; + let _ = futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), context.clone())); + + executor +} + +#[library_benchmark] +#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_cached)] +pub fn run_cached(executor: DynamicExecutor) { + let context: Context = None; + black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap()); +} + +library_benchmark_group!(name = run_cached_group; benchmarks = run_cached); + +main!(library_benchmark_groups = run_cached_group); diff --git a/node-graph/interpreted-executor/benches/run_once_iai.rs b/node-graph/interpreted-executor/benches/run_once_iai.rs new file mode 100644 index 0000000000..318c533875 --- /dev/null +++ b/node-graph/interpreted-executor/benches/run_once_iai.rs @@ -0,0 +1,21 @@ +use graph_craft::util::*; +use graphene_std::Context; +use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main}; +use interpreted_executor::dynamic_executor::DynamicExecutor; + +fn setup_run_once(name: &str) -> DynamicExecutor { + let network = load_from_name(name); + let proto_network = compile(network); + futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap() +} + +#[library_benchmark] +#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_once)] +pub fn run_once(executor: DynamicExecutor) { + let context: Context = None; + black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap()); +} + +library_benchmark_group!(name = run_once_group; benchmarks = run_once); + +main!(library_benchmark_groups = run_once_group); diff --git a/node-graph/interpreted-executor/benches/update_executor_iai.rs b/node-graph/interpreted-executor/benches/update_executor_iai.rs new file mode 100644 index 0000000000..949366a439 --- /dev/null +++ b/node-graph/interpreted-executor/benches/update_executor_iai.rs @@ -0,0 +1,23 @@ +use graph_craft::proto::ProtoNetwork; +use graph_craft::util::*; +use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main}; +use interpreted_executor::dynamic_executor::DynamicExecutor; + +fn setup_update_executor(name: &str) -> (DynamicExecutor, ProtoNetwork) { + let network = load_from_name(name); + let proto_network = compile(network); + let empty = ProtoNetwork::default(); + let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap(); + (executor, proto_network) +} + +#[library_benchmark] +#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_update_executor)] +pub fn update_executor(setup: (DynamicExecutor, ProtoNetwork)) { + let (mut executor, network) = setup; + let _ = black_box(futures::executor::block_on(executor.update(black_box(network)))); +} + +library_benchmark_group!(name = update_group; benchmarks = update_executor); + +main!(library_benchmark_groups = update_group);