Skip to content

Include graph runtime benchmarks in ci regression run #2780

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 1 commit into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
159 changes: 109 additions & 50 deletions .github/workflows/comment-profiling-changes.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,15 @@ jobs:

- name: Run baseline benchmarks
run: |
# Compile benchmarks
cargo bench --bench compile_demo_art_iai -- --save-baseline=master

# Runtime benchmarks
cd node-graph/interpreted-executor
cargo bench --bench update_executor_iai -- --save-baseline=master
cargo bench --bench run_once_iai -- --save-baseline=master
cargo bench --bench run_cached_iai -- --save-baseline=master
cd ../..

- name: Checkout PR branch
run: |
Expand All @@ -54,9 +62,31 @@ jobs:
- name: Run PR benchmarks
id: benchmark
run: |
BENCH_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
echo "BENCHMARK_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$BENCH_OUTPUT" >> $GITHUB_OUTPUT
# Compile benchmarks
COMPILE_OUTPUT=$(cargo bench --bench compile_demo_art_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')

# Runtime benchmarks
cd node-graph/interpreted-executor
UPDATE_OUTPUT=$(cargo bench --bench update_executor_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
RUN_ONCE_OUTPUT=$(cargo bench --bench run_once_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
RUN_CACHED_OUTPUT=$(cargo bench --bench run_cached_iai -- --baseline=master --output-format=json | jq -sc | sed 's/\\"//g')
cd ../..

# Store outputs
echo "COMPILE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$COMPILE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT

echo "UPDATE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$UPDATE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT

echo "RUN_ONCE_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$RUN_ONCE_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT

echo "RUN_CACHED_OUTPUT<<EOF" >> $GITHUB_OUTPUT
echo "$RUN_CACHED_OUTPUT" >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT

- name: Make old comments collapsed by default
Expand Down Expand Up @@ -89,7 +119,11 @@ jobs:
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const benchmarkOutput = JSON.parse(`${{ steps.benchmark.outputs.BENCHMARK_OUTPUT }}`);
const compileOutput = JSON.parse(`${{ steps.benchmark.outputs.COMPILE_OUTPUT }}`);
const updateOutput = JSON.parse(`${{ steps.benchmark.outputs.UPDATE_OUTPUT }}`);
const runOnceOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_ONCE_OUTPUT }}`);
const runCachedOutput = JSON.parse(`${{ steps.benchmark.outputs.RUN_CACHED_OUTPUT }}`);

let significantChanges = false;
let commentBody = "";

Expand All @@ -110,58 +144,83 @@ jobs:
return str.padStart(len);
}

for (const benchmark of benchmarkOutput) {
if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) {
const summary = benchmark.callgrind_summary.summaries[0];
const irDiff = summary.events.Ir;

if (irDiff.diff_pct !== null) {
const changePercentage = formatPercentage(irDiff.diff_pct);
const color = irDiff.diff_pct > 0 ? "red" : "lime";

commentBody += "---\n\n";
commentBody += `${benchmark.module_path} ${benchmark.id}:${benchmark.details}\n`;
commentBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) -> \`${formatNumber(irDiff.new)}\` (HEAD) : `;
commentBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`;

commentBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n";
commentBody += `Baselines: master| HEAD\n`;
function processBenchmarkOutput(benchmarkOutput, sectionTitle) {
let sectionBody = "";
let hasResults = false;

for (const benchmark of benchmarkOutput) {
if (benchmark.callgrind_summary && benchmark.callgrind_summary.summaries) {
const summary = benchmark.callgrind_summary.summaries[0];
const irDiff = summary.events.Ir;

for (const [eventKind, costsDiff] of Object.entries(summary.events)) {
if (costsDiff.diff_pct !== null) {
const changePercentage = formatPercentage(costsDiff.diff_pct);
const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`;
commentBody += `${line}\n`;
if (irDiff.diff_pct !== null) {
hasResults = true;
const changePercentage = formatPercentage(irDiff.diff_pct);
const color = irDiff.diff_pct > 0 ? "red" : "lime";

sectionBody += `**${benchmark.module_path} ${benchmark.id}:${benchmark.details}**\n`;
sectionBody += `Instructions: \`${formatNumber(irDiff.old)}\` (master) → \`${formatNumber(irDiff.new)}\` (HEAD) : `;
sectionBody += `$$\\color{${color}}${changePercentage.replace("%", "\\\\%")}$$\n\n`;

sectionBody += "<details>\n<summary>Detailed metrics</summary>\n\n```\n";
sectionBody += `Baselines: master| HEAD\n`;

for (const [eventKind, costsDiff] of Object.entries(summary.events)) {
if (costsDiff.diff_pct !== null) {
const changePercentage = formatPercentage(costsDiff.diff_pct);
const line = `${padRight(eventKind, 20)} ${padLeft(formatNumber(costsDiff.old), 11)}|${padLeft(formatNumber(costsDiff.new), 11)} ${padLeft(changePercentage, 15)}`;
sectionBody += `${line}\n`;
}
}

sectionBody += "```\n</details>\n\n";

if (Math.abs(irDiff.diff_pct) > 5) {
significantChanges = true;
}
}

commentBody += "```\n</details>\n\n";

if (Math.abs(irDiff.diff_pct) > 5) {
significantChanges = true;
}
}
}

if (hasResults) {
return `## ${sectionTitle}\n\n${sectionBody}`;
}
return "";
}

const output = `
<details open>

<summary>Performance Benchmark Results</summary>

${commentBody}

</details>
`;

if (significantChanges) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
});
// Process each benchmark category
const compileSection = processBenchmarkOutput(compileOutput, "🔧 Compile Time");
const updateSection = processBenchmarkOutput(updateOutput, "🔄 Executor Update");
const runOnceSection = processBenchmarkOutput(runOnceOutput, "🚀 Cold Execution");
const runCachedSection = processBenchmarkOutput(runCachedOutput, "⚡ Cached Execution");

// Combine all sections
commentBody = [compileSection, updateSection, runOnceSection, runCachedSection]
.filter(section => section.length > 0)
.join("---\n\n");

if (commentBody.length > 0) {
const output = `
<details open>

<summary>Performance Benchmark Results</summary>

${commentBody}

</details>
`;

if (significantChanges) {
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: output
});
} else {
console.log("No significant performance changes detected. Skipping comment.");
console.log(output);
}
} else {
console.log("No significant performance changes detected. Skipping comment.");
console.log(output);
console.log("No benchmark results to display.");
}
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 13 additions & 0 deletions node-graph/interpreted-executor/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ serde = { workspace = true }
# Workspace dependencies
graph-craft = { workspace = true, features = ["loading"] }
criterion = { workspace = true }
iai-callgrind = { workspace = true }

# Benchmarks
[[bench]]
Expand All @@ -42,3 +43,15 @@ harness = false
name = "run_cached"
harness = false

[[bench]]
name = "update_executor_iai"
harness = false

[[bench]]
name = "run_once_iai"
harness = false

[[bench]]
name = "run_cached_iai"
harness = false

27 changes: 27 additions & 0 deletions node-graph/interpreted-executor/benches/run_cached_iai.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
use graph_craft::util::*;
use graphene_std::Context;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;

fn setup_run_cached(name: &str) -> DynamicExecutor {
let network = load_from_name(name);
let proto_network = compile(network);
let executor = futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap();

// Warm up the cache by running once
let context: Context = None;
let _ = futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), context.clone()));

executor
}

#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_cached)]
pub fn run_cached(executor: DynamicExecutor) {
let context: Context = None;
black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap());
}

library_benchmark_group!(name = run_cached_group; benchmarks = run_cached);

main!(library_benchmark_groups = run_cached_group);
21 changes: 21 additions & 0 deletions node-graph/interpreted-executor/benches/run_once_iai.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
use graph_craft::util::*;
use graphene_std::Context;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;

fn setup_run_once(name: &str) -> DynamicExecutor {
let network = load_from_name(name);
let proto_network = compile(network);
futures::executor::block_on(DynamicExecutor::new(proto_network)).unwrap()
}

#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_run_once)]
pub fn run_once(executor: DynamicExecutor) {
let context: Context = None;
black_box(futures::executor::block_on(executor.tree().eval_tagged_value(executor.output(), black_box(context))).unwrap());
}

library_benchmark_group!(name = run_once_group; benchmarks = run_once);

main!(library_benchmark_groups = run_once_group);
23 changes: 23 additions & 0 deletions node-graph/interpreted-executor/benches/update_executor_iai.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
use graph_craft::proto::ProtoNetwork;
use graph_craft::util::*;
use iai_callgrind::{black_box, library_benchmark, library_benchmark_group, main};
use interpreted_executor::dynamic_executor::DynamicExecutor;

fn setup_update_executor(name: &str) -> (DynamicExecutor, ProtoNetwork) {
let network = load_from_name(name);
let proto_network = compile(network);
let empty = ProtoNetwork::default();
let executor = futures::executor::block_on(DynamicExecutor::new(empty)).unwrap();
(executor, proto_network)
}

#[library_benchmark]
#[benches::with_setup(args = ["isometric-fountain", "painted-dreams", "procedural-string-lights", "parametric-dunescape", "red-dress", "valley-of-spires"], setup = setup_update_executor)]
pub fn update_executor(setup: (DynamicExecutor, ProtoNetwork)) {
let (mut executor, network) = setup;
let _ = black_box(futures::executor::block_on(executor.update(black_box(network))));
}

library_benchmark_group!(name = update_group; benchmarks = update_executor);

main!(library_benchmark_groups = update_group);
Loading