From 79bdfd5a18a415aaa328cfa70156b4a2c099d00b Mon Sep 17 00:00:00 2001 From: Tejas Badadare Date: Mon, 7 Jul 2025 20:26:19 -0700 Subject: [PATCH 1/5] use AnyPool for generic SQL cxn, add postgres migrations --- apps/fortuna/Cargo.toml | 7 ++- ...07000000_postgres_complete_schema.down.sql | 3 ++ ...0707000000_postgres_complete_schema.up.sql | 42 ++++++++++++++++ .../20250502164500_init.down.sql | 0 .../20250502164500_init.up.sql | 0 .../20250521203448_gas.down.sql | 0 .../20250521203448_gas.up.sql | 0 ...7_add_indices_for_advanced_search.down.sql | 0 ...757_add_indices_for_advanced_search.up.sql | 0 ...0605165549_re-add_tx_hash_indices.down.sql | 0 ...250605165549_re-add_tx_hash_indices.up.sql | 0 apps/fortuna/src/api.rs | 10 ++++ apps/fortuna/src/api/explorer.rs | 2 +- apps/fortuna/src/history.rs | 50 ++++++++++--------- 14 files changed, 89 insertions(+), 25 deletions(-) create mode 100644 apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql create mode 100644 apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql rename apps/fortuna/{migrations => sqlite_migrations}/20250502164500_init.down.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250502164500_init.up.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250521203448_gas.down.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250521203448_gas.up.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250605004757_add_indices_for_advanced_search.down.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250605004757_add_indices_for_advanced_search.up.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250605165549_re-add_tx_hash_indices.down.sql (100%) rename apps/fortuna/{migrations => sqlite_migrations}/20250605165549_re-add_tx_hash_indices.up.sql (100%) diff --git a/apps/fortuna/Cargo.toml b/apps/fortuna/Cargo.toml index 80d5895dc6..097cb5cf70 100644 --- a/apps/fortuna/Cargo.toml +++ b/apps/fortuna/Cargo.toml @@ -46,7 +46,12 @@ chrono = { version = "0.4.38", features = [ backoff = { version = "0.4.0", features = ["futures", "tokio"] } thiserror = "1.0.61" futures-locks = "0.7.1" -sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite", "chrono"] } +sqlx = { version = "0.8", features = [ + "runtime-tokio", + "sqlite", + "postgres", + "chrono", +] } num-traits = "0.2.19" [dev-dependencies] diff --git a/apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql b/apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql new file mode 100644 index 0000000000..f6390ab56b --- /dev/null +++ b/apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql @@ -0,0 +1,3 @@ +-- PostgreSQL migration rollback - drops all tables and indexes created in the up migration + +DROP TABLE IF EXISTS request; diff --git a/apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql b/apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql new file mode 100644 index 0000000000..7044a6b642 --- /dev/null +++ b/apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql @@ -0,0 +1,42 @@ +-- PostgreSQL migration combining all SQLite migrations into a single comprehensive schema +-- Equivalent to: init.up.sql + gas.up.sql + add_indices_for_advanced_search.up.sql + re-add_tx_hash_indices.up.sql + +-- Create the main request table with all fields (including gas fields added later in SQLite) +CREATE TABLE request( + chain_id VARCHAR(20) NOT NULL, + network_id INTEGER NOT NULL, + provider VARCHAR(40) NOT NULL, + sequence INTEGER NOT NULL, + created_at TIMESTAMP NOT NULL, + last_updated_at TIMESTAMP NOT NULL, + state VARCHAR(10) NOT NULL, + request_block_number INTEGER NOT NULL, + request_tx_hash VARCHAR(64) NOT NULL, + user_random_number VARCHAR(64) NOT NULL, + sender VARCHAR(40) NOT NULL, + reveal_block_number INTEGER, + reveal_tx_hash VARCHAR(64), + provider_random_number VARCHAR(64), + info TEXT, + gas_used VARCHAR(100), + gas_limit VARCHAR(100) NOT NULL, + PRIMARY KEY (network_id, sequence, provider, request_tx_hash) +); + +-- Create all the optimized indexes from the final SQLite schema +CREATE INDEX request__network_id__state__created_at ON request(network_id, state, created_at); +CREATE INDEX request__network_id__created_at ON request(network_id, created_at); +CREATE INDEX request__sender__network_id__state__created_at ON request(sender, network_id, state, created_at); +CREATE INDEX request__sender__network_id__created_at ON request(sender, network_id, created_at); +CREATE INDEX request__sender__state__created_at ON request(sender, state, created_at); +CREATE INDEX request__sender__created_at ON request(sender, created_at); +CREATE INDEX request__sequence__network_id__state__created_at ON request(sequence, network_id, state, created_at); +CREATE INDEX request__sequence__network_id__created_at ON request(sequence, network_id, created_at); +CREATE INDEX request__sequence__state__created_at ON request(sequence, state, created_at); +CREATE INDEX request__sequence__created_at ON request(sequence, created_at); +CREATE INDEX request__state__created_at ON request(state, created_at); +CREATE INDEX request__created_at ON request(created_at); + +-- Create transaction hash indexes with conditional WHERE clauses +CREATE INDEX request__request_tx_hash ON request (request_tx_hash) WHERE request_tx_hash IS NOT NULL; +CREATE INDEX request__reveal_tx_hash ON request (reveal_tx_hash) WHERE reveal_tx_hash IS NOT NULL; diff --git a/apps/fortuna/migrations/20250502164500_init.down.sql b/apps/fortuna/sqlite_migrations/20250502164500_init.down.sql similarity index 100% rename from apps/fortuna/migrations/20250502164500_init.down.sql rename to apps/fortuna/sqlite_migrations/20250502164500_init.down.sql diff --git a/apps/fortuna/migrations/20250502164500_init.up.sql b/apps/fortuna/sqlite_migrations/20250502164500_init.up.sql similarity index 100% rename from apps/fortuna/migrations/20250502164500_init.up.sql rename to apps/fortuna/sqlite_migrations/20250502164500_init.up.sql diff --git a/apps/fortuna/migrations/20250521203448_gas.down.sql b/apps/fortuna/sqlite_migrations/20250521203448_gas.down.sql similarity index 100% rename from apps/fortuna/migrations/20250521203448_gas.down.sql rename to apps/fortuna/sqlite_migrations/20250521203448_gas.down.sql diff --git a/apps/fortuna/migrations/20250521203448_gas.up.sql b/apps/fortuna/sqlite_migrations/20250521203448_gas.up.sql similarity index 100% rename from apps/fortuna/migrations/20250521203448_gas.up.sql rename to apps/fortuna/sqlite_migrations/20250521203448_gas.up.sql diff --git a/apps/fortuna/migrations/20250605004757_add_indices_for_advanced_search.down.sql b/apps/fortuna/sqlite_migrations/20250605004757_add_indices_for_advanced_search.down.sql similarity index 100% rename from apps/fortuna/migrations/20250605004757_add_indices_for_advanced_search.down.sql rename to apps/fortuna/sqlite_migrations/20250605004757_add_indices_for_advanced_search.down.sql diff --git a/apps/fortuna/migrations/20250605004757_add_indices_for_advanced_search.up.sql b/apps/fortuna/sqlite_migrations/20250605004757_add_indices_for_advanced_search.up.sql similarity index 100% rename from apps/fortuna/migrations/20250605004757_add_indices_for_advanced_search.up.sql rename to apps/fortuna/sqlite_migrations/20250605004757_add_indices_for_advanced_search.up.sql diff --git a/apps/fortuna/migrations/20250605165549_re-add_tx_hash_indices.down.sql b/apps/fortuna/sqlite_migrations/20250605165549_re-add_tx_hash_indices.down.sql similarity index 100% rename from apps/fortuna/migrations/20250605165549_re-add_tx_hash_indices.down.sql rename to apps/fortuna/sqlite_migrations/20250605165549_re-add_tx_hash_indices.down.sql diff --git a/apps/fortuna/migrations/20250605165549_re-add_tx_hash_indices.up.sql b/apps/fortuna/sqlite_migrations/20250605165549_re-add_tx_hash_indices.up.sql similarity index 100% rename from apps/fortuna/migrations/20250605165549_re-add_tx_hash_indices.up.sql rename to apps/fortuna/sqlite_migrations/20250605165549_re-add_tx_hash_indices.up.sql diff --git a/apps/fortuna/src/api.rs b/apps/fortuna/src/api.rs index a14e7e49fa..5f7ab2cf24 100644 --- a/apps/fortuna/src/api.rs +++ b/apps/fortuna/src/api.rs @@ -42,6 +42,16 @@ pub enum StateTag { Failed, } +impl std::fmt::Display for StateTag { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StateTag::Pending => write!(f, "Pending"), + StateTag::Completed => write!(f, "Completed"), + StateTag::Failed => write!(f, "Failed"), + } + } +} + #[derive(Clone, Debug, Hash, PartialEq, Eq, EncodeLabelSet)] pub struct RequestLabel { pub value: String, diff --git a/apps/fortuna/src/api/explorer.rs b/apps/fortuna/src/api/explorer.rs index 48bc59d7a2..09593cfeb3 100644 --- a/apps/fortuna/src/api/explorer.rs +++ b/apps/fortuna/src/api/explorer.rs @@ -110,7 +110,7 @@ pub struct ExplorerQueryParams { #[derive(Debug, serde::Serialize, utoipa::ToSchema)] pub struct ExplorerResponse { pub requests: Vec, - pub total_results: u64, + pub total_results: i64, } /// Returns the logs of all requests captured by the keeper. diff --git a/apps/fortuna/src/history.rs b/apps/fortuna/src/history.rs index 245e5dea18..2fb53fce3a 100644 --- a/apps/fortuna/src/history.rs +++ b/apps/fortuna/src/history.rs @@ -1,7 +1,7 @@ use { crate::api::{ChainId, NetworkId, StateTag}, anyhow::Result, - chrono::{DateTime, NaiveDateTime}, + chrono::DateTime, ethers::{ core::utils::hex::ToHex, prelude::TxHash, @@ -10,7 +10,7 @@ use { }, serde::Serialize, serde_with::serde_as, - sqlx::{migrate, FromRow, Pool, QueryBuilder, Sqlite, SqlitePool}, + sqlx::{migrate, Any, AnyPool, FromRow, QueryBuilder}, std::{str::FromStr, sync::Arc}, tokio::{spawn, sync::mpsc}, utoipa::ToSchema, @@ -105,8 +105,8 @@ struct RequestRow { network_id: i64, provider: String, sequence: i64, - created_at: NaiveDateTime, - last_updated_at: NaiveDateTime, + created_at: i64, // Unix timestamp + last_updated_at: i64, // Unix timestamp state: String, request_block_number: i64, request_tx_hash: String, @@ -128,8 +128,10 @@ impl TryFrom for RequestStatus { let network_id = row.network_id as u64; let provider = row.provider.parse()?; let sequence = row.sequence as u64; - let created_at = row.created_at.and_utc(); - let last_updated_at = row.last_updated_at.and_utc(); + let created_at = DateTime::from_timestamp(row.created_at, 0) + .ok_or(anyhow::anyhow!("Invalid created_at timestamp"))?; + let last_updated_at = DateTime::from_timestamp(row.last_updated_at, 0) + .ok_or(anyhow::anyhow!("Invalid last_updated_at timestamp"))?; let request_block_number = row.request_block_number as u64; let user_random_number = hex::FromHex::from_hex(row.user_random_number)?; let request_tx_hash = row.request_tx_hash.parse()?; @@ -211,7 +213,7 @@ impl From for Option { } pub struct History { - pool: Pool, + pool: AnyPool, write_queue: mpsc::Sender, _writer_thread: Arc>, } @@ -219,7 +221,9 @@ pub struct History { impl History { const MAX_WRITE_QUEUE: usize = 1_000; pub async fn new() -> Result { - Self::new_with_url("sqlite:fortuna.db?mode=rwc").await + let database_url = std::env::var("DATABASE_URL") + .unwrap_or_else(|_| "sqlite:fortuna.db?mode=rwc".to_string()); + Self::new_with_url(&database_url).await } pub async fn new_in_memory() -> Result { @@ -227,12 +231,12 @@ impl History { } pub async fn new_with_url(url: &str) -> Result { - let pool = SqlitePool::connect(url).await?; + let pool = AnyPool::connect(url).await?; let migrator = migrate!("./migrations"); migrator.run(&pool).await?; Self::new_with_pool(pool).await } - pub async fn new_with_pool(pool: Pool) -> Result { + pub async fn new_with_pool(pool: AnyPool) -> Result { let (sender, mut receiver) = mpsc::channel(Self::MAX_WRITE_QUEUE); let pool_write_connection = pool.clone(); let writer_thread = spawn(async move { @@ -247,7 +251,7 @@ impl History { }) } - async fn update_request_status(pool: &Pool, new_status: RequestStatus) { + async fn update_request_status(pool: &AnyPool, new_status: RequestStatus) { let sequence = new_status.sequence as i64; let chain_id = new_status.chain_id; let network_id = new_status.network_id as i64; @@ -264,8 +268,8 @@ impl History { .bind(network_id) .bind(provider.clone()) .bind(sequence) - .bind(new_status.created_at) - .bind(new_status.last_updated_at) + .bind(new_status.created_at.timestamp()) + .bind(new_status.last_updated_at.timestamp()) .bind("Pending") .bind(block_number) .bind(request_tx_hash.clone()) @@ -288,7 +292,7 @@ impl History { let gas_used: String = gas_used.to_string(); let result = sqlx::query("UPDATE request SET state = ?, last_updated_at = ?, reveal_block_number = ?, reveal_tx_hash = ?, provider_random_number =?, gas_used = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ?") .bind("Completed") - .bind(new_status.last_updated_at) + .bind(new_status.last_updated_at.timestamp()) .bind(reveal_block_number) .bind(reveal_tx_hash) .bind(provider_random_number) @@ -314,7 +318,7 @@ impl History { .map(|provider_random_number| provider_random_number.encode_hex()); sqlx::query("UPDATE request SET state = ?, last_updated_at = ?, info = ?, provider_random_number = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ? AND state = 'Pending'") .bind("Failed") - .bind(new_status.last_updated_at) + .bind(new_status.last_updated_at.timestamp()) .bind(reason) .bind(provider_random_number) .bind(network_id) @@ -343,7 +347,7 @@ impl History { #[derive(Debug, Clone)] pub struct RequestQueryBuilder<'a> { - pool: &'a Pool, + pool: &'a AnyPool, pub search: Option, pub network_id: Option, pub state: Option, @@ -354,7 +358,7 @@ pub struct RequestQueryBuilder<'a> { } impl<'a> RequestQueryBuilder<'a> { - fn new(pool: &'a Pool) -> Self { + fn new(pool: &'a AnyPool) -> Self { Self { pool, search: None, @@ -442,21 +446,21 @@ impl<'a> RequestQueryBuilder<'a> { Ok(result?.into_iter().filter_map(|row| row.into()).collect()) } - pub async fn count_results(&self) -> Result { + pub async fn count_results(&self) -> Result { self.build_query("COUNT(*) AS count") - .build_query_scalar::() + .build_query_scalar::() .fetch_one(self.pool) .await .map_err(|err| err.into()) } - fn build_query(&self, columns: &str) -> QueryBuilder { + fn build_query(&self, columns: &str) -> QueryBuilder { let mut query_builder = QueryBuilder::new(format!( "SELECT {columns} FROM request WHERE created_at BETWEEN " )); - query_builder.push_bind(self.min_timestamp); + query_builder.push_bind(self.min_timestamp.timestamp()); query_builder.push(" AND "); - query_builder.push_bind(self.max_timestamp); + query_builder.push_bind(self.max_timestamp.timestamp()); match &self.search { Some(SearchField::TxHash(tx_hash)) => { @@ -486,7 +490,7 @@ impl<'a> RequestQueryBuilder<'a> { if let Some(state) = &self.state { query_builder.push(" AND state = "); - query_builder.push_bind(state); + query_builder.push_bind(state.to_string()); } query_builder.push(" ORDER BY created_at DESC"); From fad52e1b95a84429a44d054013cc7693f7ac20d7 Mon Sep 17 00:00:00 2001 From: Tejas Badadare Date: Tue, 8 Jul 2025 13:36:57 -0700 Subject: [PATCH 2/5] migrations --- apps/fortuna/.gitignore | 1 + apps/fortuna/migrations/20250707000000_init.down.sql | 1 + ...es_complete_schema.up.sql => 20250707000000_init.up.sql} | 6 ------ .../20250707000000_postgres_complete_schema.down.sql | 3 --- 4 files changed, 2 insertions(+), 9 deletions(-) create mode 100644 apps/fortuna/migrations/20250707000000_init.down.sql rename apps/fortuna/migrations/{20250707000000_postgres_complete_schema.up.sql => 20250707000000_init.up.sql} (81%) delete mode 100644 apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql diff --git a/apps/fortuna/.gitignore b/apps/fortuna/.gitignore index b978440e2e..707493b53c 100644 --- a/apps/fortuna/.gitignore +++ b/apps/fortuna/.gitignore @@ -4,3 +4,4 @@ *private-key* .envrc fortuna.db* +.env* diff --git a/apps/fortuna/migrations/20250707000000_init.down.sql b/apps/fortuna/migrations/20250707000000_init.down.sql new file mode 100644 index 0000000000..b02621d63a --- /dev/null +++ b/apps/fortuna/migrations/20250707000000_init.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS request; diff --git a/apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql b/apps/fortuna/migrations/20250707000000_init.up.sql similarity index 81% rename from apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql rename to apps/fortuna/migrations/20250707000000_init.up.sql index 7044a6b642..ff9e0cd804 100644 --- a/apps/fortuna/migrations/20250707000000_postgres_complete_schema.up.sql +++ b/apps/fortuna/migrations/20250707000000_init.up.sql @@ -1,7 +1,3 @@ --- PostgreSQL migration combining all SQLite migrations into a single comprehensive schema --- Equivalent to: init.up.sql + gas.up.sql + add_indices_for_advanced_search.up.sql + re-add_tx_hash_indices.up.sql - --- Create the main request table with all fields (including gas fields added later in SQLite) CREATE TABLE request( chain_id VARCHAR(20) NOT NULL, network_id INTEGER NOT NULL, @@ -23,7 +19,6 @@ CREATE TABLE request( PRIMARY KEY (network_id, sequence, provider, request_tx_hash) ); --- Create all the optimized indexes from the final SQLite schema CREATE INDEX request__network_id__state__created_at ON request(network_id, state, created_at); CREATE INDEX request__network_id__created_at ON request(network_id, created_at); CREATE INDEX request__sender__network_id__state__created_at ON request(sender, network_id, state, created_at); @@ -37,6 +32,5 @@ CREATE INDEX request__sequence__created_at ON request(sequence, created_at); CREATE INDEX request__state__created_at ON request(state, created_at); CREATE INDEX request__created_at ON request(created_at); --- Create transaction hash indexes with conditional WHERE clauses CREATE INDEX request__request_tx_hash ON request (request_tx_hash) WHERE request_tx_hash IS NOT NULL; CREATE INDEX request__reveal_tx_hash ON request (reveal_tx_hash) WHERE reveal_tx_hash IS NOT NULL; diff --git a/apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql b/apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql deleted file mode 100644 index f6390ab56b..0000000000 --- a/apps/fortuna/migrations/20250707000000_postgres_complete_schema.down.sql +++ /dev/null @@ -1,3 +0,0 @@ --- PostgreSQL migration rollback - drops all tables and indexes created in the up migration - -DROP TABLE IF EXISTS request; From 8a986c9f57714819cd43334fbd3b6fdf9e2b6297 Mon Sep 17 00:00:00 2001 From: Tejas Badadare Date: Tue, 8 Jul 2025 17:18:51 -0700 Subject: [PATCH 3/5] fix sqlite issues --- Cargo.lock | 18 ++ ...0b88336dd2aab632411114f02ce8dd8fe07e8.json | 12 -- ...9fa50da70066f30b74f354e5d3a843ba6a2c0.json | 12 -- ...b5d72360326593407518770fe537ac3da1e10.json | 12 -- apps/fortuna/Cargo.toml | 3 + apps/fortuna/README.md | 35 ++-- .../migrations/20250707000000_init.up.sql | 4 +- apps/fortuna/src/command/run.rs | 2 + apps/fortuna/src/history.rs | 198 ++++++++++++++---- 9 files changed, 196 insertions(+), 100 deletions(-) delete mode 100644 apps/fortuna/.sqlx/query-03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8.json delete mode 100644 apps/fortuna/.sqlx/query-4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0.json delete mode 100644 apps/fortuna/.sqlx/query-b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10.json diff --git a/Cargo.lock b/Cargo.lock index 6ce85720ee..f614c91983 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2294,6 +2294,12 @@ dependencies = [ "const-random", ] +[[package]] +name = "dotenv" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f" + [[package]] name = "dotenvy" version = "0.15.7" @@ -3058,6 +3064,7 @@ dependencies = [ "byteorder", "chrono", "clap", + "dotenv", "ethabi", "ethers", "futures", @@ -9310,6 +9317,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", + "rustls 0.23.28", "serde", "serde_json", "sha2 0.10.9", @@ -9319,6 +9327,7 @@ dependencies = [ "tokio-stream", "tracing", "url", + "webpki-roots 0.26.11", ] [[package]] @@ -10630,6 +10639,15 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.1", +] + [[package]] name = "webpki-roots" version = "1.0.1" diff --git a/apps/fortuna/.sqlx/query-03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8.json b/apps/fortuna/.sqlx/query-03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8.json deleted file mode 100644 index 28642818e3..0000000000 --- a/apps/fortuna/.sqlx/query-03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE request SET state = ?, last_updated_at = ?, info = ?, provider_random_number = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ? AND state = 'Pending'", - "describe": { - "columns": [], - "parameters": { - "Right": 8 - }, - "nullable": [] - }, - "hash": "03901bcfb28b127d99fe8a53e480b88336dd2aab632411114f02ce8dd8fe07e8" -} diff --git a/apps/fortuna/.sqlx/query-4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0.json b/apps/fortuna/.sqlx/query-4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0.json deleted file mode 100644 index 2fc58d302c..0000000000 --- a/apps/fortuna/.sqlx/query-4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "UPDATE request SET state = ?, last_updated_at = ?, reveal_block_number = ?, reveal_tx_hash = ?, provider_random_number =?, gas_used = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ?", - "describe": { - "columns": [], - "parameters": { - "Right": 10 - }, - "nullable": [] - }, - "hash": "4c8c05ec08e128d847faafdd3d79fa50da70066f30b74f354e5d3a843ba6a2c0" -} diff --git a/apps/fortuna/.sqlx/query-b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10.json b/apps/fortuna/.sqlx/query-b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10.json deleted file mode 100644 index 2888b76e36..0000000000 --- a/apps/fortuna/.sqlx/query-b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "SQLite", - "query": "INSERT INTO request(chain_id, network_id, provider, sequence, created_at, last_updated_at, state, request_block_number, request_tx_hash, user_random_number, sender, gas_limit) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", - "describe": { - "columns": [], - "parameters": { - "Right": 12 - }, - "nullable": [] - }, - "hash": "b0d9afebb3825c3509ad80e5ebab5d72360326593407518770fe537ac3da1e10" -} diff --git a/apps/fortuna/Cargo.toml b/apps/fortuna/Cargo.toml index 097cb5cf70..077dd69b48 100644 --- a/apps/fortuna/Cargo.toml +++ b/apps/fortuna/Cargo.toml @@ -48,11 +48,14 @@ thiserror = "1.0.61" futures-locks = "0.7.1" sqlx = { version = "0.8", features = [ "runtime-tokio", + "tls-rustls", "sqlite", + "any", "postgres", "chrono", ] } num-traits = "0.2.19" +dotenv = "0.15.0" [dev-dependencies] axum-test = "13.1.1" diff --git a/apps/fortuna/README.md b/apps/fortuna/README.md index 274f3eaf31..6b7be72185 100644 --- a/apps/fortuna/README.md +++ b/apps/fortuna/README.md @@ -10,35 +10,38 @@ Each blockchain is configured in `config.yaml`. ## Build & Test -We use sqlx query macros to check the SQL queries at compile time. This requires -a database to be available at build time. Create a `.env` file in the root of the project with the following content: +Fortuna uses Cargo for building and dependency management. +Simply run `cargo build` and `cargo test` to build and test the project. +To run Fortuna locally, see the [Local Development](#local-development) section below. +### Connect a database +Fortuna stores request history in a SQL database and serves it from its explorer API. +Any SQLite or Postgres database is supported. The database connection is sourced from the `DATABASE_URL` env var. +Create a `.env` file in the root of the project with a DB connection string. ``` DATABASE_URL="sqlite:fortuna.db?mode=rwc" ``` +If not provided, Fortuna will create and use a SQLite file-based database at `./fortuna.db`, as in the example above. + +### Database migrations +Fortuna will automatically apply the schema migrations in the `./migrations` directory when connecting to the database. +To manually administer the migrations, use the `sqlx` tool for cargo. The tool automatically uses the +database connection in the `.env` file. -Install sqlx for cargo with: +Install `sqlx`: ```bash cargo install sqlx ``` -Next, you need to create the database and apply the schema migrations. You can do this by running: - +To create the database if needed and apply the migrations: ```bash -cargo sqlx migrate run # automatically picks up the .env file +cargo sqlx migrate run ``` -This will create a SQLite database file called `fortuna.db` in the root of the project and apply the schema migrations to it. -This will allow `cargo check` to check the queries against the existing database. - -Fortuna uses Cargo for building and dependency management. -Simply run `cargo build` and `cargo test` to build and test the project. - -If you have changed any queries in the code, you need to update the .sqlx folder with the new queries: +To restore the database to a fresh state (drop, recreate, apply migrations): ```bash -cargo sqlx prepare +cargo sqlx database reset ``` -Please add the changed files in the `.sqlx` folder to your git commit. ## Command-Line Interface @@ -124,7 +127,7 @@ To start an instance of the webserver for local testing, you first need to perfo 1. Run `cargo run -- setup-provider` to register a randomness provider for this service. This command will update the on-chain contracts such that the configured provider key is a randomness provider, and its on-chain configuration matches `config.yaml`. - +1. Review the [Connect a database](#connect-a-database) section above. The default configuration will create a file-based DB. Once you've completed the setup, simply run the following command to start the service: ```bash diff --git a/apps/fortuna/migrations/20250707000000_init.up.sql b/apps/fortuna/migrations/20250707000000_init.up.sql index ff9e0cd804..609351b70f 100644 --- a/apps/fortuna/migrations/20250707000000_init.up.sql +++ b/apps/fortuna/migrations/20250707000000_init.up.sql @@ -3,8 +3,8 @@ CREATE TABLE request( network_id INTEGER NOT NULL, provider VARCHAR(40) NOT NULL, sequence INTEGER NOT NULL, - created_at TIMESTAMP NOT NULL, - last_updated_at TIMESTAMP NOT NULL, + created_at INTEGER NOT NULL, + last_updated_at INTEGER NOT NULL, state VARCHAR(10) NOT NULL, request_block_number INTEGER NOT NULL, request_tx_hash VARCHAR(64) NOT NULL, diff --git a/apps/fortuna/src/command/run.rs b/apps/fortuna/src/command/run.rs index af588192cc..a06909de78 100644 --- a/apps/fortuna/src/command/run.rs +++ b/apps/fortuna/src/command/run.rs @@ -84,6 +84,8 @@ pub async fn run_api( } pub async fn run(opts: &RunOptions) -> Result<()> { + // Load environment variables from a .env file if present + let _ = dotenv::dotenv()?; let config = Config::load(&opts.config.config)?; let secret = config.provider.secret.load()?.ok_or(anyhow!( "Please specify a provider secret in the config file." diff --git a/apps/fortuna/src/history.rs b/apps/fortuna/src/history.rs index 2fb53fce3a..850b3f2eaa 100644 --- a/apps/fortuna/src/history.rs +++ b/apps/fortuna/src/history.rs @@ -10,13 +10,16 @@ use { }, serde::Serialize, serde_with::serde_as, - sqlx::{migrate, Any, AnyPool, FromRow, QueryBuilder}, + sqlx::{any::AnyPoolOptions, migrate, AnyPool, FromRow}, std::{str::FromStr, sync::Arc}, tokio::{spawn, sync::mpsc}, utoipa::ToSchema, }; const LOG_RETURN_LIMIT: u64 = 1000; +const ONE_DAY: u64 = 60 * 60 * 24; +const ONE_HOUR: u64 = 60 * 60; +const DEFAULT_DATABASE_URL: &str = "sqlite:fortuna.db?mode=rwc"; #[serde_as] #[derive(Clone, Debug, Serialize, ToSchema, PartialEq)] @@ -221,17 +224,41 @@ pub struct History { impl History { const MAX_WRITE_QUEUE: usize = 1_000; pub async fn new() -> Result { - let database_url = std::env::var("DATABASE_URL") - .unwrap_or_else(|_| "sqlite:fortuna.db?mode=rwc".to_string()); + let database_url = + std::env::var("DATABASE_URL").unwrap_or_else(|_| DEFAULT_DATABASE_URL.to_string()); Self::new_with_url(&database_url).await } + /// Create a History instance with an ephemeral in-memory DB. + /// Useful for testing. pub async fn new_in_memory() -> Result { - Self::new_with_url("sqlite::memory:").await + sqlx::any::install_default_drivers(); + // Connect to an in-memory SQLite database + // Don't let the pool drop the cxn, otherwise the database will be deleted + let pool = AnyPoolOptions::new() + .min_connections(1) + .max_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect("sqlite::memory:") + .await?; + let migrator = migrate!("./migrations"); + migrator.run(&pool).await?; + Self::new_with_pool(pool).await } + /// Create a History instance with production DB parameters pub async fn new_with_url(url: &str) -> Result { - let pool = AnyPool::connect(url).await?; + sqlx::any::install_default_drivers(); + let pool = AnyPoolOptions::new() + .min_connections(0) + .max_connections(10) + // Allow the cloud DB to spin down after 1 hour of inactivity (cost savings) + .idle_timeout(std::time::Duration::from_secs(ONE_HOUR)) + // Retire the connection after 1 day to avoid memory leaks in the DB + .max_lifetime(std::time::Duration::from_secs(ONE_DAY)) + .connect(url) + .await?; let migrator = migrate!("./migrations"); migrator.run(&pool).await?; Self::new_with_pool(pool).await @@ -263,7 +290,7 @@ impl History { let block_number = new_status.request_block_number as i64; let sender: String = new_status.sender.encode_hex(); let user_random_number: String = new_status.user_random_number.encode_hex(); - sqlx::query("INSERT INTO request(chain_id, network_id, provider, sequence, created_at, last_updated_at, state, request_block_number, request_tx_hash, user_random_number, sender, gas_limit) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") + sqlx::query("INSERT INTO request(chain_id, network_id, provider, sequence, created_at, last_updated_at, state, request_block_number, request_tx_hash, user_random_number, sender, gas_limit) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)") .bind(chain_id.clone()) .bind(network_id) .bind(provider.clone()) @@ -290,7 +317,7 @@ impl History { let reveal_tx_hash: String = reveal_tx_hash.encode_hex(); let provider_random_number: String = provider_random_number.encode_hex(); let gas_used: String = gas_used.to_string(); - let result = sqlx::query("UPDATE request SET state = ?, last_updated_at = ?, reveal_block_number = ?, reveal_tx_hash = ?, provider_random_number =?, gas_used = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ?") + let result = sqlx::query("UPDATE request SET state = $1, last_updated_at = $2, reveal_block_number = $3, reveal_tx_hash = $4, provider_random_number = $5, gas_used = $6 WHERE network_id = $7 AND sequence = $8 AND provider = $9 AND request_tx_hash = $10") .bind("Completed") .bind(new_status.last_updated_at.timestamp()) .bind(reveal_block_number) @@ -316,7 +343,7 @@ impl History { } => { let provider_random_number: Option = provider_random_number .map(|provider_random_number| provider_random_number.encode_hex()); - sqlx::query("UPDATE request SET state = ?, last_updated_at = ?, info = ?, provider_random_number = ? WHERE network_id = ? AND sequence = ? AND provider = ? AND request_tx_hash = ? AND state = 'Pending'") + sqlx::query("UPDATE request SET state = $1, last_updated_at = $2, info = $3, provider_random_number = $4 WHERE network_id = $5 AND sequence = $6 AND provider = $7 AND request_tx_hash = $8 AND state = 'Pending'") .bind("Failed") .bind(new_status.last_updated_at.timestamp()) .bind(reason) @@ -430,14 +457,76 @@ impl<'a> RequestQueryBuilder<'a> { } pub async fn execute(&self) -> Result> { - let mut query_builder = self.build_query("*"); - query_builder.push(" LIMIT "); - query_builder.push_bind(self.limit); - query_builder.push(" OFFSET "); - query_builder.push_bind(self.offset); + let mut sql = "SELECT * FROM request WHERE created_at BETWEEN $1 AND $2".to_string(); + let mut param_count = 2; - let result: sqlx::Result> = - query_builder.build_query_as().fetch_all(self.pool).await; + // Build the SQL string with parameter placeholders + match &self.search { + Some(SearchField::TxHash(_)) => { + param_count += 1; + sql.push_str(&format!(" AND (request_tx_hash = ${}", param_count)); + param_count += 1; + sql.push_str(&format!(" OR reveal_tx_hash = ${})", param_count)); + } + Some(SearchField::Sender(_)) => { + param_count += 1; + sql.push_str(&format!(" AND sender = ${}", param_count)); + } + Some(SearchField::SequenceNumber(_)) => { + param_count += 1; + sql.push_str(&format!(" AND sequence = ${}", param_count)); + } + None => (), + } + + if self.network_id.is_some() { + param_count += 1; + sql.push_str(&format!(" AND network_id = ${}", param_count)); + } + + if self.state.is_some() { + param_count += 1; + sql.push_str(&format!(" AND state = ${}", param_count)); + } + + sql.push_str(" ORDER BY created_at DESC"); + + param_count += 1; + sql.push_str(&format!(" LIMIT ${}", param_count)); + param_count += 1; + sql.push_str(&format!(" OFFSET ${}", param_count)); + + // Now bind all parameters in order + let mut query = sqlx::query_as::<_, RequestRow>(&sql) + .bind(self.min_timestamp.timestamp()) + .bind(self.max_timestamp.timestamp()); + + match &self.search { + Some(SearchField::TxHash(tx_hash)) => { + let tx_hash: String = tx_hash.encode_hex(); + query = query.bind(tx_hash.clone()).bind(tx_hash); + } + Some(SearchField::Sender(sender)) => { + let sender: String = sender.encode_hex(); + query = query.bind(sender); + } + Some(SearchField::SequenceNumber(sequence_number)) => { + query = query.bind(sequence_number); + } + None => (), + } + + if let Some(network_id) = &self.network_id { + query = query.bind(network_id); + } + + if let Some(state) = &self.state { + query = query.bind(state.to_string()); + } + + query = query.bind(self.limit).bind(self.offset); + + let result: sqlx::Result> = query.fetch_all(self.pool).await; if let Err(e) = &result { tracing::error!("Failed to fetch request: {}", e); @@ -447,54 +536,67 @@ impl<'a> RequestQueryBuilder<'a> { } pub async fn count_results(&self) -> Result { - self.build_query("COUNT(*) AS count") - .build_query_scalar::() - .fetch_one(self.pool) - .await - .map_err(|err| err.into()) - } + let mut sql = "SELECT COUNT(*) FROM request WHERE created_at BETWEEN $1 AND $2".to_string(); + let mut param_count = 2; + + // Build the SQL string with parameter placeholders + match &self.search { + Some(SearchField::TxHash(_)) => { + param_count += 1; + sql.push_str(&format!(" AND (request_tx_hash = ${}", param_count)); + param_count += 1; + sql.push_str(&format!(" OR reveal_tx_hash = ${})", param_count)); + } + Some(SearchField::Sender(_)) => { + param_count += 1; + sql.push_str(&format!(" AND sender = ${}", param_count)); + } + Some(SearchField::SequenceNumber(_)) => { + param_count += 1; + sql.push_str(&format!(" AND sequence = ${}", param_count)); + } + None => (), + } + + if self.network_id.is_some() { + param_count += 1; + sql.push_str(&format!(" AND network_id = ${}", param_count)); + } + + if self.state.is_some() { + param_count += 1; + sql.push_str(&format!(" AND state = ${}", param_count)); + } - fn build_query(&self, columns: &str) -> QueryBuilder { - let mut query_builder = QueryBuilder::new(format!( - "SELECT {columns} FROM request WHERE created_at BETWEEN " - )); - query_builder.push_bind(self.min_timestamp.timestamp()); - query_builder.push(" AND "); - query_builder.push_bind(self.max_timestamp.timestamp()); + // Now bind all parameters in order + let mut query = sqlx::query_scalar::<_, i64>(&sql) + .bind(self.min_timestamp.timestamp()) + .bind(self.max_timestamp.timestamp()); match &self.search { Some(SearchField::TxHash(tx_hash)) => { let tx_hash: String = tx_hash.encode_hex(); - query_builder.push(" AND (request_tx_hash = "); - query_builder.push_bind(tx_hash.clone()); - query_builder.push(" OR reveal_tx_hash = "); - query_builder.push_bind(tx_hash); - query_builder.push(")"); + query = query.bind(tx_hash.clone()).bind(tx_hash); } Some(SearchField::Sender(sender)) => { let sender: String = sender.encode_hex(); - query_builder.push(" AND sender = "); - query_builder.push_bind(sender); + query = query.bind(sender); } Some(SearchField::SequenceNumber(sequence_number)) => { - query_builder.push(" AND sequence = "); - query_builder.push_bind(sequence_number); + query = query.bind(sequence_number); } None => (), } if let Some(network_id) = &self.network_id { - query_builder.push(" AND network_id = "); - query_builder.push_bind(network_id); + query = query.bind(network_id); } if let Some(state) = &self.state { - query_builder.push(" AND state = "); - query_builder.push_bind(state.to_string()); + query = query.bind(state.to_string()); } - query_builder.push(" ORDER BY created_at DESC"); - query_builder + query.fetch_one(self.pool).await.map_err(|err| err.into()) } } @@ -514,7 +616,11 @@ pub enum SearchField { #[cfg(test)] mod test { - use {super::*, chrono::Duration, tokio::time::sleep}; + use { + super::*, + chrono::{Duration, Timelike}, + tokio::time::sleep, + }; fn get_random_request_status() -> RequestStatus { RequestStatus { @@ -522,8 +628,8 @@ mod test { network_id: 121, provider: Address::random(), sequence: 1, - created_at: chrono::Utc::now(), - last_updated_at: chrono::Utc::now(), + created_at: chrono::Utc::now().with_nanosecond(0).unwrap(), + last_updated_at: chrono::Utc::now().with_nanosecond(0).unwrap(), request_block_number: 1, request_tx_hash: TxHash::random(), user_random_number: [20; 32], From 8683539cac5d0082145ba09ed22e46b508373a6d Mon Sep 17 00:00:00 2001 From: Tejas Badadare Date: Tue, 8 Jul 2025 17:20:21 -0700 Subject: [PATCH 4/5] fix sqlite issues --- apps/fortuna/src/history.rs | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/apps/fortuna/src/history.rs b/apps/fortuna/src/history.rs index 850b3f2eaa..9c7304774a 100644 --- a/apps/fortuna/src/history.rs +++ b/apps/fortuna/src/history.rs @@ -464,37 +464,37 @@ impl<'a> RequestQueryBuilder<'a> { match &self.search { Some(SearchField::TxHash(_)) => { param_count += 1; - sql.push_str(&format!(" AND (request_tx_hash = ${}", param_count)); + sql.push_str(&format!(" AND (request_tx_hash = ${param_count}")); param_count += 1; - sql.push_str(&format!(" OR reveal_tx_hash = ${})", param_count)); + sql.push_str(&format!(" OR reveal_tx_hash = ${param_count})")); } Some(SearchField::Sender(_)) => { param_count += 1; - sql.push_str(&format!(" AND sender = ${}", param_count)); + sql.push_str(&format!(" AND sender = ${param_count}")); } Some(SearchField::SequenceNumber(_)) => { param_count += 1; - sql.push_str(&format!(" AND sequence = ${}", param_count)); + sql.push_str(&format!(" AND sequence = ${param_count}")); } None => (), } if self.network_id.is_some() { param_count += 1; - sql.push_str(&format!(" AND network_id = ${}", param_count)); + sql.push_str(&format!(" AND network_id = ${param_count}")); } if self.state.is_some() { param_count += 1; - sql.push_str(&format!(" AND state = ${}", param_count)); + sql.push_str(&format!(" AND state = ${param_count}")); } sql.push_str(" ORDER BY created_at DESC"); param_count += 1; - sql.push_str(&format!(" LIMIT ${}", param_count)); + sql.push_str(&format!(" LIMIT ${param_count}")); param_count += 1; - sql.push_str(&format!(" OFFSET ${}", param_count)); + sql.push_str(&format!(" OFFSET ${param_count}")); // Now bind all parameters in order let mut query = sqlx::query_as::<_, RequestRow>(&sql) @@ -543,29 +543,29 @@ impl<'a> RequestQueryBuilder<'a> { match &self.search { Some(SearchField::TxHash(_)) => { param_count += 1; - sql.push_str(&format!(" AND (request_tx_hash = ${}", param_count)); + sql.push_str(&format!(" AND (request_tx_hash = ${param_count}")); param_count += 1; - sql.push_str(&format!(" OR reveal_tx_hash = ${})", param_count)); + sql.push_str(&format!(" OR reveal_tx_hash = ${param_count})")); } Some(SearchField::Sender(_)) => { param_count += 1; - sql.push_str(&format!(" AND sender = ${}", param_count)); + sql.push_str(&format!(" AND sender = ${param_count}")); } Some(SearchField::SequenceNumber(_)) => { param_count += 1; - sql.push_str(&format!(" AND sequence = ${}", param_count)); + sql.push_str(&format!(" AND sequence = ${param_count}")); } None => (), } if self.network_id.is_some() { param_count += 1; - sql.push_str(&format!(" AND network_id = ${}", param_count)); + sql.push_str(&format!(" AND network_id = ${param_count}")); } if self.state.is_some() { param_count += 1; - sql.push_str(&format!(" AND state = ${}", param_count)); + sql.push_str(&format!(" AND state = ${param_count}")); } // Now bind all parameters in order From 1e27f1099c97aef7cd5812da21da835f606b31f0 Mon Sep 17 00:00:00 2001 From: Tejas Badadare Date: Wed, 9 Jul 2025 11:03:35 -0700 Subject: [PATCH 5/5] use requested V2 event --- Cargo.lock | 2 +- apps/fortuna/Cargo.toml | 2 +- apps/fortuna/src/chain/ethereum.rs | 31 ++++++++---------------- apps/fortuna/src/chain/reader.rs | 14 +++++++---- apps/fortuna/src/history.rs | 12 ++++----- apps/fortuna/src/keeper/process_event.rs | 10 ++++---- 6 files changed, 32 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f614c91983..643f150126 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3052,7 +3052,7 @@ dependencies = [ [[package]] name = "fortuna" -version = "8.1.0" +version = "9.0.0" dependencies = [ "anyhow", "axum 0.6.20", diff --git a/apps/fortuna/Cargo.toml b/apps/fortuna/Cargo.toml index 077dd69b48..df4181d330 100644 --- a/apps/fortuna/Cargo.toml +++ b/apps/fortuna/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fortuna" -version = "8.1.0" +version = "9.0.0" edition = "2021" [lib] diff --git a/apps/fortuna/src/chain/ethereum.rs b/apps/fortuna/src/chain/ethereum.rs index fe6464ce85..f296061c34 100644 --- a/apps/fortuna/src/chain/ethereum.rs +++ b/apps/fortuna/src/chain/ethereum.rs @@ -3,10 +3,7 @@ use { crate::{ api::ChainId, - chain::reader::{ - self, BlockNumber, BlockStatus, EntropyReader, EntropyRequestInfo, - RequestedWithCallbackEvent, - }, + chain::reader::{self, BlockNumber, BlockStatus, EntropyReader, RequestedV2Event}, config::EthereumConfig, eth_utils::{ eth_gas_oracle::EthProviderOracle, @@ -280,6 +277,7 @@ impl EntropyReader for PythRandom> { block_number: request.block_number, use_blockhash: request.use_blockhash, callback_status: reader::RequestCallbackStatus::try_from(request.callback_status)?, + gas_limit_10k: request.gas_limit_1_0k, })) } @@ -302,8 +300,8 @@ impl EntropyReader for PythRandom> { from_block: BlockNumber, to_block: BlockNumber, provider: Address, - ) -> Result> { - let mut event = self.requested_with_callback_filter(); + ) -> Result> { + let mut event = self.requested_2_filter(); event.filter = event .filter .address(self.address()) @@ -311,24 +309,15 @@ impl EntropyReader for PythRandom> { .to_block(to_block) .topic1(provider); - let res: Vec<(RequestedWithCallbackFilter, LogMeta)> = event.query_with_meta().await?; + let res: Vec<(Requested2Filter, LogMeta)> = event.query_with_meta().await?; Ok(res .into_iter() - .map(|(r, meta)| RequestedWithCallbackEvent { + .map(|(r, meta)| RequestedV2Event { sequence_number: r.sequence_number, - user_random_number: r.user_random_number, - provider_address: r.request.provider, - requestor: r.requestor, - request: EntropyRequestInfo { - provider: r.request.provider, - sequence_number: r.request.sequence_number, - num_hashes: r.request.num_hashes, - commitment: r.request.commitment, - block_number: r.request.block_number, - requester: r.request.requester, - use_blockhash: r.request.use_blockhash, - is_request_with_callback: r.request.is_request_with_callback, - }, + user_random_number: r.user_contribution, + provider_address: r.provider, + sender: r.caller, + gas_limit: r.gas_limit, log_meta: meta, }) .filter(|r| r.provider_address == provider) diff --git a/apps/fortuna/src/chain/reader.rs b/apps/fortuna/src/chain/reader.rs index 44130f85c3..6814ac5731 100644 --- a/apps/fortuna/src/chain/reader.rs +++ b/apps/fortuna/src/chain/reader.rs @@ -45,12 +45,12 @@ pub struct EntropyRequestInfo { } #[derive(Clone)] -pub struct RequestedWithCallbackEvent { +pub struct RequestedV2Event { pub sequence_number: u64, pub user_random_number: [u8; 32], pub provider_address: Address, - pub requestor: Address, - pub request: EntropyRequestInfo, + pub sender: Address, + pub gas_limit: u32, pub log_meta: LogMeta, } @@ -73,7 +73,7 @@ pub trait EntropyReader: Send + Sync { from_block: BlockNumber, to_block: BlockNumber, provider: Address, - ) -> Result>; + ) -> Result>; /// Estimate the gas required to reveal a random number with a callback. async fn estimate_reveal_with_callback_gas( @@ -97,6 +97,8 @@ pub struct Request { pub block_number: BlockNumber, pub use_blockhash: bool, pub callback_status: RequestCallbackStatus, + /// The gas limit for the request, in 10k gas units. (i.e., 2 = 20k gas). + pub gas_limit_10k: u16, } /// Status values for Request.callback_status @@ -169,6 +171,7 @@ pub mod mock { block_number: b, use_blockhash: u, callback_status: RequestCallbackStatus::CallbackNotNecessary, + gas_limit_10k: 0, }) .collect(), ), @@ -189,6 +192,7 @@ pub mod mock { block_number, use_blockhash, callback_status: RequestCallbackStatus::CallbackNotNecessary, + gas_limit_10k: 0, }); self } @@ -227,7 +231,7 @@ pub mod mock { _from_block: BlockNumber, _to_block: BlockNumber, _provider: Address, - ) -> Result> { + ) -> Result> { Ok(vec![]) } diff --git a/apps/fortuna/src/history.rs b/apps/fortuna/src/history.rs index 9c7304774a..310f17436a 100644 --- a/apps/fortuna/src/history.rs +++ b/apps/fortuna/src/history.rs @@ -78,8 +78,7 @@ pub struct RequestStatus { /// Gas limit for the callback in the smallest unit of the chain. /// For example, if the native currency is ETH, this will be in wei. #[schema(example = "500000", value_type = String)] - #[serde(with = "crate::serde::u256")] - pub gas_limit: U256, + pub gas_limit: u32, /// The user contribution to the random number. #[schema(example = "a905ab56567d31a7fda38ed819d97bc257f3ebe385fc5c72ce226d3bb855f0fe")] #[serde_as(as = "serde_with::hex::Hex")] @@ -139,7 +138,9 @@ impl TryFrom for RequestStatus { let user_random_number = hex::FromHex::from_hex(row.user_random_number)?; let request_tx_hash = row.request_tx_hash.parse()?; let sender = row.sender.parse()?; - let gas_limit = U256::from_dec_str(&row.gas_limit) + let gas_limit = row + .gas_limit + .parse::() .map_err(|_| anyhow::anyhow!("Failed to parse gas limit"))?; let state = match row.state.as_str() { @@ -233,8 +234,7 @@ impl History { /// Useful for testing. pub async fn new_in_memory() -> Result { sqlx::any::install_default_drivers(); - // Connect to an in-memory SQLite database - // Don't let the pool drop the cxn, otherwise the database will be deleted + // Prevent the pool from dropping the cxn, otherwise the database will be deleted let pool = AnyPoolOptions::new() .min_connections(1) .max_connections(1) @@ -635,7 +635,7 @@ mod test { user_random_number: [20; 32], sender: Address::random(), state: RequestEntryState::Pending, - gas_limit: U256::from(500_000), + gas_limit: 500_000, } } diff --git a/apps/fortuna/src/keeper/process_event.rs b/apps/fortuna/src/keeper/process_event.rs index 7a91253be7..344093a13c 100644 --- a/apps/fortuna/src/keeper/process_event.rs +++ b/apps/fortuna/src/keeper/process_event.rs @@ -3,14 +3,14 @@ use { crate::{ chain::{ ethereum::PythRandomErrorsErrors, - reader::{RequestCallbackStatus, RequestedWithCallbackEvent}, + reader::{RequestCallbackStatus, RequestedV2Event}, }, eth_utils::utils::{submit_tx_with_backoff, SubmitTxError}, history::{RequestEntryState, RequestStatus}, keeper::block::ProcessParams, }, anyhow::{anyhow, Result}, - ethers::{abi::AbiDecode, contract::ContractError, types::U256}, + ethers::{abi::AbiDecode, contract::ContractError}, std::time::Duration, tracing, }; @@ -20,7 +20,7 @@ use { sequence_number = event.sequence_number ))] pub async fn process_event_with_backoff( - event: RequestedWithCallbackEvent, + event: RequestedV2Event, process_param: ProcessParams, ) -> Result<()> { let ProcessParams { @@ -110,10 +110,10 @@ pub async fn process_event_with_backoff( last_updated_at: chrono::Utc::now(), request_block_number: event.log_meta.block_number.as_u64(), request_tx_hash: event.log_meta.transaction_hash, - sender: event.requestor, + sender: event.sender, user_random_number: event.user_random_number, state: RequestEntryState::Pending, - gas_limit: U256::from(0), // FIXME(Tejas): set this properly + gas_limit: event.gas_limit, }; history.add(&status);