From 37a6b27ffb095898cd9dc15a687261c932eb821d Mon Sep 17 00:00:00 2001 From: lemunozm Date: Fri, 11 Aug 2023 11:21:39 +0200 Subject: [PATCH 1/4] generic nuke migration --- pallets/loans/src/lib.rs | 2 +- pallets/loans/src/migrations/nuke.rs | 120 ++++++---- runtime/altair/src/lib.rs | 4 +- runtime/altair/src/migrations.rs | 327 +-------------------------- 4 files changed, 79 insertions(+), 374 deletions(-) diff --git a/pallets/loans/src/lib.rs b/pallets/loans/src/lib.rs index ae6c6ee9fd..299fad9d08 100644 --- a/pallets/loans/src/lib.rs +++ b/pallets/loans/src/lib.rs @@ -127,7 +127,7 @@ pub mod pallet { pub type ChangeOf = Change<::LoanId, ::Rate, ::MaxWriteOffPolicySize>; - const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/pallets/loans/src/migrations/nuke.rs b/pallets/loans/src/migrations/nuke.rs index 3a21fb8f63..36d5c50a64 100644 --- a/pallets/loans/src/migrations/nuke.rs +++ b/pallets/loans/src/migrations/nuke.rs @@ -1,91 +1,111 @@ #[cfg(feature = "try-runtime")] use frame_support::ensure; use frame_support::{ - pallet_prelude::ValueQuery, - storage, storage_alias, - traits::{Get, OnRuntimeUpgrade}, - weights::Weight, - Blake2_128Concat, + dispatch::GetStorageVersion, + storage, + traits::{Get, OnRuntimeUpgrade, PalletInfoAccess, StorageVersion}, + weights::{RuntimeDbWeight, Weight}, }; #[cfg(feature = "try-runtime")] use sp_std::vec::Vec; use crate::*; -mod old { - use super::*; - - /// This storage comes from the previous pallet loans. - /// It is used as an indicator that the previous pallet loans still exists. - /// If this storage is not found, the nuking process is aborted. - #[storage_alias] - pub(crate) type NextLoanId = - StorageMap, Blake2_128Concat, ::PoolId, u128, ValueQuery>; -} - -/// This migration nukes all storages from the pallet individually. -pub struct Migration(sp_std::marker::PhantomData); - -impl OnRuntimeUpgrade for Migration { +/// This upgrade nukes all storages from the pallet individually. +/// This upgrade is only executed if pallet version has changed. +/// +/// To handle possible issues forgeting removing the upgrade, +/// you must specify the PREV_VERSION, +/// which represent the expected on-chain version when the upgrade is done +/// If these numbers mistmatch, the upgrade will not take effect. +pub struct Migration( + sp_std::marker::PhantomData<(Pallet, DbWeight)>, +); + +impl OnRuntimeUpgrade + for Migration +where + Pallet: GetStorageVersion + PalletInfoAccess, + DbWeight: Get, +{ #[cfg(feature = "try-runtime")] fn pre_upgrade() -> Result, &'static str> { ensure!( - contains_prefixed_key(&loan_prefix()), - "Pallet loans prefix doesn't exists" + Pallet::on_chain_storage_version() == StorageVersion::new(PREV_VERSION), + "Pallet on-chain version must match with PREV_VERSION" ); ensure!( - old::NextLoanId::::iter_values().count() == 1, - "Pallet loans contains doesn't contain old data" + Pallet::on_chain_storage_version() < STORAGE_VERSION, + "Pallet is already updated" + ); + + ensure!( + storage::unhashed::contains_prefixed_key(&pallet_prefix::()), + "Pallet prefix doesn't exists" ); Ok(Vec::new()) } fn on_runtime_upgrade() -> Weight { - let old_values = old::NextLoanId::::iter_values().count(); - if old_values > 0 { - let result = storage::unhashed::clear_prefix(&loan_prefix(), None, None); + if Pallet::on_chain_storage_version() == StorageVersion::new(PREV_VERSION) { + log::error!( + "Nuke-{}: Nuke aborted. This upgrade must be removed!", + Pallet::name() + ); + return Weight::zero(); + } + if Pallet::on_chain_storage_version() < STORAGE_VERSION { + log::info!("Nuke-{}: Nuking pallet...", Pallet::name()); + + // TODO: Future improvements of this upgrade should loop over `clear_prefix` + // calls removing the entire storage. + let result = storage::unhashed::clear_prefix(&pallet_prefix::(), None, None); + log::info!( + "Nuke-{}: iteration result. backend: {} unique: {} loops: {}", + Pallet::name(), + result.backend, + result.unique, + result.loops, + ); match result.maybe_cursor { - None => log::info!("Loans: storage cleared successful"), - Some(_) => log::error!("Loans: storage not totally cleared"), + None => log::info!("Nuke-{}: storage cleared successful", Pallet::name()), + Some(_) => log::error!("Nuke-{}: storage not totally cleared", Pallet::name()), } - T::DbWeight::get().writes(result.unique.into()) - + T::DbWeight::get().reads(result.loops.into()) - } else { - log::warn!("Loans: storage was already clear. This migration can be removed."); + Pallet::current_storage_version().put::(); - T::DbWeight::get().reads(old_values as u64) + DbWeight::get().writes(result.unique.into()) + + DbWeight::get().reads(result.loops.into()) + + DbWeight::get().reads_writes(1, 1) // Version read & writen + } else { + log::warn!( + "Nuke-{}: pallet on-chain version is not {STORAGE_VERSION:?}. This upgrade can be removed.", + Pallet::name() + ); + DbWeight::get().reads(1) } } #[cfg(feature = "try-runtime")] fn post_upgrade(_: Vec) -> Result<(), &'static str> { - ensure!( - !contains_prefixed_key(&loan_prefix()), - "Pallet loans prefix still exists!" + assert_eq!( + Pallet::on_chain_storage_version(), + STORAGE_VERSION, + "on-chain storage version should have been updated" ); ensure!( - old::NextLoanId::::iter_values().count() == 0, - "Pallet loans still contains old data" + !storage::unhashed::contains_prefixed_key(&pallet_prefix::()), + "Pallet prefix still exists!" ); Ok(()) } } -fn loan_prefix() -> [u8; 16] { - sp_io::hashing::twox_128(b"Loans") -} - -#[cfg(feature = "try-runtime")] -fn contains_prefixed_key(prefix: &[u8]) -> bool { - // Implementation extracted from a newer version of `frame_support`. - match sp_io::storage::next_key(prefix) { - Some(key) => key.starts_with(prefix), - None => false, - } +fn pallet_prefix() -> [u8; 16] { + sp_io::hashing::twox_128(Pallet::name().as_bytes()) } diff --git a/runtime/altair/src/lib.rs b/runtime/altair/src/lib.rs index 889a8c9977..e191034f06 100644 --- a/runtime/altair/src/lib.rs +++ b/runtime/altair/src/lib.rs @@ -124,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("altair"), impl_name: create_runtime_str!("altair"), authoring_version: 1, - spec_version: 1028, + spec_version: 1029, impl_version: 1, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -1724,7 +1724,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - migrations::UpgradeAltair1028, + migrations::UpgradeAltair1029, >; impl fp_self_contained::SelfContainedCall for RuntimeCall { diff --git a/runtime/altair/src/migrations.rs b/runtime/altair/src/migrations.rs index 81e46eea46..30be78e697 100644 --- a/runtime/altair/src/migrations.rs +++ b/runtime/altair/src/migrations.rs @@ -14,328 +14,13 @@ use cfg_types::tokens::CurrencyId; use codec::{Decode, Encode}; #[cfg(feature = "try-runtime")] use frame_support::ensure; -use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; +use frame_support::{ + traits::OnRuntimeUpgrade, + weights::{constants::RocksDbWeight, Weight}, +}; use sp_std::vec::Vec; use crate::Runtime; -pub type UpgradeAltair1028 = ( - asset_registry::CrossChainTransferabilityMigration, - orml_tokens_migration::CurrencyIdRefactorMigration, - pool_system::MigrateAUSDPools, -); - -const DEPRECATED_AUSD_CURRENCY_ID: CurrencyId = CurrencyId::AUSD; -const NEW_AUSD_CURRENCY_ID: CurrencyId = CurrencyId::ForeignAsset(2); - -mod asset_registry { - use cfg_types::{tokens as v1, tokens::CustomMetadata}; - use frame_support::{pallet_prelude::OptionQuery, storage_alias, Twox64Concat}; - use orml_traits::asset_registry::AssetMetadata; - - use super::*; - use crate::VERSION; - - /// Migrate all the registered asset's metadata to the new version of - /// `CustomMetadata` which contains a `CrossChainTransferability` property. - /// At this point in time, the `transferability` of Tranche tokens should be - /// set to `CrossChainTransferability::Xcm` and for all other tokens to - /// `CrossChainTransferability::Xcm`, with the exception of - /// `Currency::Staking` tokens which are not registered in the first place. - pub struct CrossChainTransferabilityMigration; - - // The old orml_asset_registry Metadata storage using v0::CustomMetadata - #[storage_alias] - type Metadata = StorageMap< - orml_asset_registry::Pallet, - Twox64Concat, - CurrencyId, - AssetMetadata, - OptionQuery, - >; - - impl OnRuntimeUpgrade for CrossChainTransferabilityMigration { - fn on_runtime_upgrade() -> Weight { - if VERSION.spec_version > 1028 { - return Weight::zero(); - } - - orml_asset_registry::Metadata::::translate( - |asset_id: CurrencyId, old_metadata: AssetMetadata| { - match asset_id { - CurrencyId::Staking(_) => None, - CurrencyId::Tranche(_, _) => Some(to_metadata_v1( - old_metadata, - v1::CrossChainTransferability::Connectors, - )), - _ => Some(to_metadata_v1( - old_metadata.clone(), - v1::CrossChainTransferability::Xcm(old_metadata.additional.xcm), - )), - } - }, - ); - - let n = orml_asset_registry::Metadata::::iter().count() as u64; - ::DbWeight::get().reads_writes(n, n) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let old_state: Vec<(CurrencyId, AssetMetadata)> = - Metadata::::iter().collect::>(); - - Ok(old_state.encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(old_state_encoded: Vec) -> Result<(), &'static str> { - use crate::OrmlAssetRegistry; - - let old_state = sp_std::vec::Vec::<( - CurrencyId, - AssetMetadata, - )>::decode(&mut old_state_encoded.as_ref()) - .map_err(|_| "Error decoding pre-upgrade state")?; - - for (asset_id, old_metadata) in old_state { - let new_metadata = OrmlAssetRegistry::metadata(asset_id) - .ok_or_else(|| "New state lost the metadata of an asset")?; - - match asset_id { - CurrencyId::Tranche(_, _) => ensure!(new_metadata == to_metadata_v1( - old_metadata, - v1::CrossChainTransferability::Connectors, - ), "The metadata of a tranche token wasn't just updated by setting `transferability` to `Connectors `"), - _ => ensure!(new_metadata == to_metadata_v1( - old_metadata.clone(), - v1::CrossChainTransferability::Xcm(old_metadata.additional.xcm), - ), "The metadata of a NON tranche token wasn't just updated by setting `transferability` to `Xcm`"), - } - } - - Ok(()) - } - } - - mod v0 { - use cfg_types::xcm::XcmMetadata; - use codec::{Decode, Encode, MaxEncodedLen}; - use scale_info::TypeInfo; - #[cfg(feature = "std")] - use serde::{Deserialize, Serialize}; - - // The `CustomMetadata` type as it was prior to adding the `transferability` - // field and prior to removing the `xcm` field. - #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[derive( - Clone, - Copy, - Default, - PartialOrd, - Ord, - PartialEq, - Eq, - Debug, - Encode, - Decode, - TypeInfo, - MaxEncodedLen, - )] - pub struct CustomMetadata { - pub xcm: XcmMetadata, - pub mintable: bool, - pub permissioned: bool, - pub pool_currency: bool, - } - } - - fn to_metadata_v1( - old: AssetMetadata, - transferability: v1::CrossChainTransferability, - ) -> AssetMetadata { - AssetMetadata { - decimals: old.decimals, - name: old.name, - symbol: old.symbol, - existential_deposit: old.existential_deposit, - location: old.location, - additional: CustomMetadata { - mintable: old.additional.mintable, - permissioned: old.additional.permissioned, - pool_currency: old.additional.pool_currency, - transferability, - }, - } - } -} - -mod orml_tokens_migration { - use cfg_primitives::AccountId; - use orml_tokens::AccountData; - - use super::*; - - /// As we dropped `CurrencyId::KSM` and `CurrencyId::AUSD`, we need to - /// migrate the balances under the dropped variants in favour of the new, - /// corresponding `CurrencyId::ForeignAsset`. We have never transferred KSM - /// so we only need to deal with AUSD. - pub struct CurrencyIdRefactorMigration; - - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] - pub struct OldState { - pub total_issuance: Balance, - pub entries: Vec<(AccountId, AccountData)>, - } - - impl OnRuntimeUpgrade for CurrencyIdRefactorMigration { - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let total_issuance = - orml_tokens::TotalIssuance::::get(DEPRECATED_AUSD_CURRENCY_ID); - let entries: Vec<(AccountId, AccountData)> = - orml_tokens::Accounts::::iter() - .filter(|(_, old_currency_id, _)| { - *old_currency_id == DEPRECATED_AUSD_CURRENCY_ID - }) - .map(|(account, _, account_data)| (account, account_data)) - .collect::<_>(); - - Ok(OldState { - total_issuance, - entries, - } - .encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - use crate::OrmlTokens; - - let old_state = OldState::decode(&mut state.as_ref()) - .map_err(|_| "Error decoding pre-upgrade state")?; - - let new_total_issuance = - orml_tokens::TotalIssuance::::get(NEW_AUSD_CURRENCY_ID); - - ensure!( - old_state.total_issuance == new_total_issuance, - "The old AUSD issuance differs from the new one" - ); - - for (account, account_data) in old_state.entries { - ensure!( - OrmlTokens::accounts(&account, NEW_AUSD_CURRENCY_ID) == account_data.clone(), - "The account data under the new AUSD Currency does NOT match the old one" - ); - } - - Ok(()) - } - - fn on_runtime_upgrade() -> Weight { - use frame_support::traits::tokens::fungibles::Mutate; - - let mut migrated_entries = 0; - - // Burn all AUSD tokens under the old CurrencyId and mint them under the new one - orml_tokens::Accounts::::iter() - .filter(|(_, old_currency_id, _)| *old_currency_id == DEPRECATED_AUSD_CURRENCY_ID) - .for_each(|(account, _, account_data)| { - let balance = account_data.free; - // Burn the amount under the old, hardcoded CurrencyId - as Mutate>::burn_from( - DEPRECATED_AUSD_CURRENCY_ID, - &account, - balance, - ) - .map_err(|e| { - log::error!( - "Failed to call burn_from({:?}, {:?}, {balance}): {:?}", - DEPRECATED_AUSD_CURRENCY_ID, - account, - e - ) - }) - .ok(); - // Now mint the amount under the new CurrencyID - as Mutate>::mint_into( - NEW_AUSD_CURRENCY_ID, - &account, - balance, - ) - .map_err(|e| { - log::error!( - "Failed to mint_into burn_from({:?}, {:?}, {balance}): {:?}", - NEW_AUSD_CURRENCY_ID, - account, - e - ) - }) - .ok(); - - migrated_entries += 1; - }); - - // Approximate weight given for every entry migration there are two calls being - // made, so counting the reads and writes for each call. - ::DbWeight::get() - .reads_writes(migrated_entries * 5, migrated_entries * 4) - } - } -} - -mod pool_system { - #[cfg(feature = "try-runtime")] - use cfg_primitives::PoolId; - use pallet_pool_system::pool_types::PoolDetails; - - use super::*; - - pub struct MigrateAUSDPools; - - impl OnRuntimeUpgrade for MigrateAUSDPools { - fn on_runtime_upgrade() -> Weight { - pallet_pool_system::Pool::::translate( - |_, mut details: PoolDetails| { - if details.currency == DEPRECATED_AUSD_CURRENCY_ID { - details.currency = NEW_AUSD_CURRENCY_ID; - } - - Some(details) - }, - ); - - let n = pallet_pool_system::Pool::::iter().count() as u64; - ::DbWeight::get().reads_writes(n, n) - } - - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - let ausd_pools: Vec = pallet_pool_system::Pool::::iter() - .filter(|(_, details)| details.currency == DEPRECATED_AUSD_CURRENCY_ID) - .map(|(pool_id, _)| pool_id) - .collect::<_>(); - - Ok(ausd_pools.encode()) - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(state: Vec) -> Result<(), &'static str> { - let ausd_pools = sp_std::vec::Vec::::decode(&mut state.as_ref()) - .map_err(|_| "Error decoding pre-upgrade state")?; - - for pool_id in ausd_pools { - let pool = pallet_pool_system::Pool::::get(pool_id) - .expect("AUSD Pool should exist after the migration was executed"); - - ensure!( - pool.currency == NEW_AUSD_CURRENCY_ID, - "A AUSD pool was NOT migrated to the new AUSD CurrencyId (ForeignAsset(2))", - ) - } - - Ok(()) - } - } -} +pub type UpgradeAltair1029 = + pallet_loans::migrations::nuke::Migration; From 0b89c3e3dc486b8ad900ec46a2956557ba1f948b Mon Sep 17 00:00:00 2001 From: lemunozm Date: Fri, 11 Aug 2023 11:46:23 +0200 Subject: [PATCH 2/4] moved to runtime-common to use it generically --- Cargo.lock | 2 +- pallets/loans/Cargo.toml | 6 -- pallets/loans/src/lib.rs | 6 +- pallets/loans/src/migrations/nuke.rs | 111 ---------------------- runtime/altair/src/migrations.rs | 17 +--- runtime/common/Cargo.toml | 8 +- runtime/common/src/lib.rs | 4 + runtime/common/src/migrations/nuke.rs | 127 ++++++++++++++++++++++++++ 8 files changed, 144 insertions(+), 137 deletions(-) delete mode 100644 pallets/loans/src/migrations/nuke.rs create mode 100644 runtime/common/src/migrations/nuke.rs diff --git a/Cargo.lock b/Cargo.lock index c5b3901925..6e6909efe1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7577,7 +7577,6 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "log", "orml-traits", "pallet-balances", "pallet-interest-accrual", @@ -10819,6 +10818,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal 0.2.2", + "log", "orml-oracle", "orml-traits", "pallet-anchors", diff --git a/pallets/loans/Cargo.toml b/pallets/loans/Cargo.toml index f8bd1d4297..fe6a03300e 100644 --- a/pallets/loans/Cargo.toml +++ b/pallets/loans/Cargo.toml @@ -30,10 +30,6 @@ strum = { version = "0.24", default-features = false, features = ["derive"] } # Optionals for benchmarking frame-benchmarking = { git = "https://github.com/paritytech/substrate", default-features = false, optional = true, branch = "polkadot-v0.9.38" } -# Used for migrations (no longer needed once migratios is done) -log = "0.4" -sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } - [dev-dependencies] sp-core = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" } sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.38" } @@ -49,7 +45,6 @@ cfg-mocks = { path = "../../libs/mocks" } default = ["std"] std = [ "codec/std", - "log/std", "scale-info/std", "frame-support/std", "frame-system/std", @@ -60,7 +55,6 @@ std = [ "cfg-traits/std", "cfg-types/std", "frame-benchmarking/std", - "sp-io/std", "strum/std", "orml-traits/std", ] diff --git a/pallets/loans/src/lib.rs b/pallets/loans/src/lib.rs index 299fad9d08..66ebb8dbdc 100644 --- a/pallets/loans/src/lib.rs +++ b/pallets/loans/src/lib.rs @@ -40,10 +40,6 @@ //! [`Pallet::update_portfolio_valuation()`] that should go through all active //! loans. -pub mod migrations { - pub mod nuke; -} - /// High level types that uses `pallet::Config` pub mod entities { pub mod interest; @@ -127,7 +123,7 @@ pub mod pallet { pub type ChangeOf = Change<::LoanId, ::Rate, ::MaxWriteOffPolicySize>; - pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); + const STORAGE_VERSION: StorageVersion = StorageVersion::new(2); #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] diff --git a/pallets/loans/src/migrations/nuke.rs b/pallets/loans/src/migrations/nuke.rs deleted file mode 100644 index 36d5c50a64..0000000000 --- a/pallets/loans/src/migrations/nuke.rs +++ /dev/null @@ -1,111 +0,0 @@ -#[cfg(feature = "try-runtime")] -use frame_support::ensure; -use frame_support::{ - dispatch::GetStorageVersion, - storage, - traits::{Get, OnRuntimeUpgrade, PalletInfoAccess, StorageVersion}, - weights::{RuntimeDbWeight, Weight}, -}; -#[cfg(feature = "try-runtime")] -use sp_std::vec::Vec; - -use crate::*; - -/// This upgrade nukes all storages from the pallet individually. -/// This upgrade is only executed if pallet version has changed. -/// -/// To handle possible issues forgeting removing the upgrade, -/// you must specify the PREV_VERSION, -/// which represent the expected on-chain version when the upgrade is done -/// If these numbers mistmatch, the upgrade will not take effect. -pub struct Migration( - sp_std::marker::PhantomData<(Pallet, DbWeight)>, -); - -impl OnRuntimeUpgrade - for Migration -where - Pallet: GetStorageVersion + PalletInfoAccess, - DbWeight: Get, -{ - #[cfg(feature = "try-runtime")] - fn pre_upgrade() -> Result, &'static str> { - ensure!( - Pallet::on_chain_storage_version() == StorageVersion::new(PREV_VERSION), - "Pallet on-chain version must match with PREV_VERSION" - ); - - ensure!( - Pallet::on_chain_storage_version() < STORAGE_VERSION, - "Pallet is already updated" - ); - - ensure!( - storage::unhashed::contains_prefixed_key(&pallet_prefix::()), - "Pallet prefix doesn't exists" - ); - - Ok(Vec::new()) - } - - fn on_runtime_upgrade() -> Weight { - if Pallet::on_chain_storage_version() == StorageVersion::new(PREV_VERSION) { - log::error!( - "Nuke-{}: Nuke aborted. This upgrade must be removed!", - Pallet::name() - ); - return Weight::zero(); - } - - if Pallet::on_chain_storage_version() < STORAGE_VERSION { - log::info!("Nuke-{}: Nuking pallet...", Pallet::name()); - - // TODO: Future improvements of this upgrade should loop over `clear_prefix` - // calls removing the entire storage. - let result = storage::unhashed::clear_prefix(&pallet_prefix::(), None, None); - log::info!( - "Nuke-{}: iteration result. backend: {} unique: {} loops: {}", - Pallet::name(), - result.backend, - result.unique, - result.loops, - ); - match result.maybe_cursor { - None => log::info!("Nuke-{}: storage cleared successful", Pallet::name()), - Some(_) => log::error!("Nuke-{}: storage not totally cleared", Pallet::name()), - } - - Pallet::current_storage_version().put::(); - - DbWeight::get().writes(result.unique.into()) - + DbWeight::get().reads(result.loops.into()) - + DbWeight::get().reads_writes(1, 1) // Version read & writen - } else { - log::warn!( - "Nuke-{}: pallet on-chain version is not {STORAGE_VERSION:?}. This upgrade can be removed.", - Pallet::name() - ); - DbWeight::get().reads(1) - } - } - - #[cfg(feature = "try-runtime")] - fn post_upgrade(_: Vec) -> Result<(), &'static str> { - assert_eq!( - Pallet::on_chain_storage_version(), - STORAGE_VERSION, - "on-chain storage version should have been updated" - ); - - ensure!( - !storage::unhashed::contains_prefixed_key(&pallet_prefix::()), - "Pallet prefix still exists!" - ); - - Ok(()) - } -} - -fn pallet_prefix() -> [u8; 16] { - sp_io::hashing::twox_128(Pallet::name().as_bytes()) -} diff --git a/runtime/altair/src/migrations.rs b/runtime/altair/src/migrations.rs index 30be78e697..7e9947a086 100644 --- a/runtime/altair/src/migrations.rs +++ b/runtime/altair/src/migrations.rs @@ -9,18 +9,9 @@ // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. -use cfg_primitives::Balance; -use cfg_types::tokens::CurrencyId; -use codec::{Decode, Encode}; -#[cfg(feature = "try-runtime")] -use frame_support::ensure; -use frame_support::{ - traits::OnRuntimeUpgrade, - weights::{constants::RocksDbWeight, Weight}, -}; -use sp_std::vec::Vec; -use crate::Runtime; +use frame_support::weights::constants::RocksDbWeight; -pub type UpgradeAltair1029 = - pallet_loans::migrations::nuke::Migration; +use crate::Loans; + +pub type UpgradeAltair1029 = runtime_common::migrations::nuke::Migration; diff --git a/runtime/common/Cargo.toml b/runtime/common/Cargo.toml index b139f2efa1..dd8c5ad2df 100644 --- a/runtime/common/Cargo.toml +++ b/runtime/common/Cargo.toml @@ -57,14 +57,19 @@ pallet-investments = { path = "../../pallets/investments", default-features = fa pallet-loans = { path = "../../pallets/loans", default-features = false } pallet-pool-system = { path = "../../pallets/pool-system", default-features = false } +# Used for migrations +log = "0.4" +sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } + [dev-dependencies] hex-literal = "0.2.1" -sp-io = { git = "https://github.com/paritytech/substrate", default-features = false, branch = "polkadot-v0.9.38" } +sp-io = { git = "https://github.com/paritytech/substrate", default-features = true, branch = "polkadot-v0.9.38" } [features] default = ["std"] std = [ "codec/std", + "log/std", "frame-support/std", "frame-system/std", "pallet-authorship/std", @@ -82,6 +87,7 @@ std = [ "sp-arithmetic/std", "sp-core/std", "sp-runtime/std", + "sp-io/std", "cfg-types/std", "pallet-anchors/std", "frame-support/std", diff --git a/runtime/common/src/lib.rs b/runtime/common/src/lib.rs index 6469ccc875..928f23f189 100644 --- a/runtime/common/src/lib.rs +++ b/runtime/common/src/lib.rs @@ -18,6 +18,10 @@ #[cfg(test)] mod tests; +pub mod migrations { + pub mod nuke; +} + pub mod account_conversion; pub mod apis; pub mod evm; diff --git a/runtime/common/src/migrations/nuke.rs b/runtime/common/src/migrations/nuke.rs new file mode 100644 index 0000000000..297ea029c2 --- /dev/null +++ b/runtime/common/src/migrations/nuke.rs @@ -0,0 +1,127 @@ +// Copyright 2023 Centrifuge Foundation (centrifuge.io). +// This file is part of Centrifuge chain project. + +// Centrifuge is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version (see http://www.gnu.org/licenses). + +// Centrifuge is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +use frame_support::{ + dispatch::GetStorageVersion, + storage::unhashed, + traits::{Get, OnRuntimeUpgrade, PalletInfoAccess, StorageVersion}, + weights::{RuntimeDbWeight, Weight}, +}; +#[cfg(feature = "try-runtime")] +use sp_std::vec::Vec; + +/// This upgrade nukes all storages from the pallet individually. +/// This upgrade is only executed if pallet version has changed. +/// +/// To handle possible issues forgeting removing the upgrade, +/// you must specify the ON_CHAIN_VERSION, +/// which represent the expected previous on-chain version when the upgrade is +/// done. If these numbers mistmatch, the upgrade will not take effect. +pub struct Migration( + sp_std::marker::PhantomData<(Pallet, DbWeight)>, +); + +impl OnRuntimeUpgrade + for Migration +where + Pallet: GetStorageVersion + PalletInfoAccess, + DbWeight: Get, +{ + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + ensure!( + Pallet::on_chain_storage_version() == StorageVersion::new(ON_CHAIN_VERSION), + "Pallet on-chain version must match with ON_CHAIN_VERSION" + ); + + ensure!( + Pallet::on_chain_storage_version() < Pallet::current_storage_version(), + "Pallet is already updated" + ); + + ensure!( + unhashed::contains_prefixed_key(&pallet_prefix::()), + "Pallet prefix doesn't exists" + ); + + Ok(Vec::new()) + } + + fn on_runtime_upgrade() -> Weight { + if Pallet::on_chain_storage_version() != StorageVersion::new(ON_CHAIN_VERSION) { + log::error!( + "Nuke-{}: nuke aborted. This upgrade must be removed!", + Pallet::name() + ); + return Weight::zero(); + } + + if Pallet::on_chain_storage_version() < Pallet::current_storage_version() { + log::info!("Nuke-{}: nuking pallet...", Pallet::name()); + + // TODO: How we can set the maximum for this? Currently hardcode + let result = unhashed::clear_prefix(&pallet_prefix::(), Some(1000), None); + match result.maybe_cursor { + None => log::info!("Nuke-{}: storage cleared successful", Pallet::name()), + Some(_) => { + // TODO: Should we loop over maybe_cursor as a new prefix? + // By now, returning error. + log::error!("Nuke-{}: storage not totally cleared", Pallet::name()) + } + } + + log::info!( + "Nuke-{}: iteration result. backend: {} unique: {} loops: {}", + Pallet::name(), + result.backend, + result.unique, + result.loops, + ); + + Pallet::current_storage_version().put::(); + + DbWeight::get().writes(result.unique.into()) + + DbWeight::get().reads(result.loops.into()) + + DbWeight::get().reads_writes(1, 1) // Version read & writen + } else { + log::warn!( + "Nuke-{}: pallet on-chain version is not {:?}. This upgrade can be removed.", + Pallet::name(), + Pallet::current_storage_version() + ); + DbWeight::get().reads(1) + } + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_: Vec) -> Result<(), &'static str> { + assert_eq!( + Pallet::on_chain_storage_version(), + Pallet::current_storage_version(), + "on-chain storage version should have been updated" + ); + + ensure!( + !unhashed::contains_prefixed_key(&pallet_prefix::()), + "Pallet prefix still exists!" + ); + + Ok(()) + } +} + +fn pallet_prefix() -> [u8; 16] { + sp_io::hashing::twox_128(Pallet::name().as_bytes()) +} From cbb3c597601dda9dab1a0066315e961eac5c8383 Mon Sep 17 00:00:00 2001 From: lemunozm Date: Fri, 11 Aug 2023 12:11:37 +0200 Subject: [PATCH 3/4] restore altair runtime --- runtime/altair/src/lib.rs | 4 +- runtime/altair/src/migrations.rs | 330 ++++++++++++++++++++++++++++++- 2 files changed, 329 insertions(+), 5 deletions(-) diff --git a/runtime/altair/src/lib.rs b/runtime/altair/src/lib.rs index e191034f06..889a8c9977 100644 --- a/runtime/altair/src/lib.rs +++ b/runtime/altair/src/lib.rs @@ -124,7 +124,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("altair"), impl_name: create_runtime_str!("altair"), authoring_version: 1, - spec_version: 1029, + spec_version: 1028, impl_version: 1, #[cfg(not(feature = "disable-runtime-api"))] apis: RUNTIME_API_VERSIONS, @@ -1724,7 +1724,7 @@ pub type Executive = frame_executive::Executive< frame_system::ChainContext, Runtime, AllPalletsWithSystem, - migrations::UpgradeAltair1029, + migrations::UpgradeAltair1028, >; impl fp_self_contained::SelfContainedCall for RuntimeCall { diff --git a/runtime/altair/src/migrations.rs b/runtime/altair/src/migrations.rs index 7e9947a086..81e46eea46 100644 --- a/runtime/altair/src/migrations.rs +++ b/runtime/altair/src/migrations.rs @@ -9,9 +9,333 @@ // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. +use cfg_primitives::Balance; +use cfg_types::tokens::CurrencyId; +use codec::{Decode, Encode}; +#[cfg(feature = "try-runtime")] +use frame_support::ensure; +use frame_support::{traits::OnRuntimeUpgrade, weights::Weight}; +use sp_std::vec::Vec; -use frame_support::weights::constants::RocksDbWeight; +use crate::Runtime; -use crate::Loans; +pub type UpgradeAltair1028 = ( + asset_registry::CrossChainTransferabilityMigration, + orml_tokens_migration::CurrencyIdRefactorMigration, + pool_system::MigrateAUSDPools, +); -pub type UpgradeAltair1029 = runtime_common::migrations::nuke::Migration; +const DEPRECATED_AUSD_CURRENCY_ID: CurrencyId = CurrencyId::AUSD; +const NEW_AUSD_CURRENCY_ID: CurrencyId = CurrencyId::ForeignAsset(2); + +mod asset_registry { + use cfg_types::{tokens as v1, tokens::CustomMetadata}; + use frame_support::{pallet_prelude::OptionQuery, storage_alias, Twox64Concat}; + use orml_traits::asset_registry::AssetMetadata; + + use super::*; + use crate::VERSION; + + /// Migrate all the registered asset's metadata to the new version of + /// `CustomMetadata` which contains a `CrossChainTransferability` property. + /// At this point in time, the `transferability` of Tranche tokens should be + /// set to `CrossChainTransferability::Xcm` and for all other tokens to + /// `CrossChainTransferability::Xcm`, with the exception of + /// `Currency::Staking` tokens which are not registered in the first place. + pub struct CrossChainTransferabilityMigration; + + // The old orml_asset_registry Metadata storage using v0::CustomMetadata + #[storage_alias] + type Metadata = StorageMap< + orml_asset_registry::Pallet, + Twox64Concat, + CurrencyId, + AssetMetadata, + OptionQuery, + >; + + impl OnRuntimeUpgrade for CrossChainTransferabilityMigration { + fn on_runtime_upgrade() -> Weight { + if VERSION.spec_version > 1028 { + return Weight::zero(); + } + + orml_asset_registry::Metadata::::translate( + |asset_id: CurrencyId, old_metadata: AssetMetadata| { + match asset_id { + CurrencyId::Staking(_) => None, + CurrencyId::Tranche(_, _) => Some(to_metadata_v1( + old_metadata, + v1::CrossChainTransferability::Connectors, + )), + _ => Some(to_metadata_v1( + old_metadata.clone(), + v1::CrossChainTransferability::Xcm(old_metadata.additional.xcm), + )), + } + }, + ); + + let n = orml_asset_registry::Metadata::::iter().count() as u64; + ::DbWeight::get().reads_writes(n, n) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let old_state: Vec<(CurrencyId, AssetMetadata)> = + Metadata::::iter().collect::>(); + + Ok(old_state.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(old_state_encoded: Vec) -> Result<(), &'static str> { + use crate::OrmlAssetRegistry; + + let old_state = sp_std::vec::Vec::<( + CurrencyId, + AssetMetadata, + )>::decode(&mut old_state_encoded.as_ref()) + .map_err(|_| "Error decoding pre-upgrade state")?; + + for (asset_id, old_metadata) in old_state { + let new_metadata = OrmlAssetRegistry::metadata(asset_id) + .ok_or_else(|| "New state lost the metadata of an asset")?; + + match asset_id { + CurrencyId::Tranche(_, _) => ensure!(new_metadata == to_metadata_v1( + old_metadata, + v1::CrossChainTransferability::Connectors, + ), "The metadata of a tranche token wasn't just updated by setting `transferability` to `Connectors `"), + _ => ensure!(new_metadata == to_metadata_v1( + old_metadata.clone(), + v1::CrossChainTransferability::Xcm(old_metadata.additional.xcm), + ), "The metadata of a NON tranche token wasn't just updated by setting `transferability` to `Xcm`"), + } + } + + Ok(()) + } + } + + mod v0 { + use cfg_types::xcm::XcmMetadata; + use codec::{Decode, Encode, MaxEncodedLen}; + use scale_info::TypeInfo; + #[cfg(feature = "std")] + use serde::{Deserialize, Serialize}; + + // The `CustomMetadata` type as it was prior to adding the `transferability` + // field and prior to removing the `xcm` field. + #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] + #[derive( + Clone, + Copy, + Default, + PartialOrd, + Ord, + PartialEq, + Eq, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + )] + pub struct CustomMetadata { + pub xcm: XcmMetadata, + pub mintable: bool, + pub permissioned: bool, + pub pool_currency: bool, + } + } + + fn to_metadata_v1( + old: AssetMetadata, + transferability: v1::CrossChainTransferability, + ) -> AssetMetadata { + AssetMetadata { + decimals: old.decimals, + name: old.name, + symbol: old.symbol, + existential_deposit: old.existential_deposit, + location: old.location, + additional: CustomMetadata { + mintable: old.additional.mintable, + permissioned: old.additional.permissioned, + pool_currency: old.additional.pool_currency, + transferability, + }, + } + } +} + +mod orml_tokens_migration { + use cfg_primitives::AccountId; + use orml_tokens::AccountData; + + use super::*; + + /// As we dropped `CurrencyId::KSM` and `CurrencyId::AUSD`, we need to + /// migrate the balances under the dropped variants in favour of the new, + /// corresponding `CurrencyId::ForeignAsset`. We have never transferred KSM + /// so we only need to deal with AUSD. + pub struct CurrencyIdRefactorMigration; + + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] + pub struct OldState { + pub total_issuance: Balance, + pub entries: Vec<(AccountId, AccountData)>, + } + + impl OnRuntimeUpgrade for CurrencyIdRefactorMigration { + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let total_issuance = + orml_tokens::TotalIssuance::::get(DEPRECATED_AUSD_CURRENCY_ID); + let entries: Vec<(AccountId, AccountData)> = + orml_tokens::Accounts::::iter() + .filter(|(_, old_currency_id, _)| { + *old_currency_id == DEPRECATED_AUSD_CURRENCY_ID + }) + .map(|(account, _, account_data)| (account, account_data)) + .collect::<_>(); + + Ok(OldState { + total_issuance, + entries, + } + .encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + use crate::OrmlTokens; + + let old_state = OldState::decode(&mut state.as_ref()) + .map_err(|_| "Error decoding pre-upgrade state")?; + + let new_total_issuance = + orml_tokens::TotalIssuance::::get(NEW_AUSD_CURRENCY_ID); + + ensure!( + old_state.total_issuance == new_total_issuance, + "The old AUSD issuance differs from the new one" + ); + + for (account, account_data) in old_state.entries { + ensure!( + OrmlTokens::accounts(&account, NEW_AUSD_CURRENCY_ID) == account_data.clone(), + "The account data under the new AUSD Currency does NOT match the old one" + ); + } + + Ok(()) + } + + fn on_runtime_upgrade() -> Weight { + use frame_support::traits::tokens::fungibles::Mutate; + + let mut migrated_entries = 0; + + // Burn all AUSD tokens under the old CurrencyId and mint them under the new one + orml_tokens::Accounts::::iter() + .filter(|(_, old_currency_id, _)| *old_currency_id == DEPRECATED_AUSD_CURRENCY_ID) + .for_each(|(account, _, account_data)| { + let balance = account_data.free; + // Burn the amount under the old, hardcoded CurrencyId + as Mutate>::burn_from( + DEPRECATED_AUSD_CURRENCY_ID, + &account, + balance, + ) + .map_err(|e| { + log::error!( + "Failed to call burn_from({:?}, {:?}, {balance}): {:?}", + DEPRECATED_AUSD_CURRENCY_ID, + account, + e + ) + }) + .ok(); + // Now mint the amount under the new CurrencyID + as Mutate>::mint_into( + NEW_AUSD_CURRENCY_ID, + &account, + balance, + ) + .map_err(|e| { + log::error!( + "Failed to mint_into burn_from({:?}, {:?}, {balance}): {:?}", + NEW_AUSD_CURRENCY_ID, + account, + e + ) + }) + .ok(); + + migrated_entries += 1; + }); + + // Approximate weight given for every entry migration there are two calls being + // made, so counting the reads and writes for each call. + ::DbWeight::get() + .reads_writes(migrated_entries * 5, migrated_entries * 4) + } + } +} + +mod pool_system { + #[cfg(feature = "try-runtime")] + use cfg_primitives::PoolId; + use pallet_pool_system::pool_types::PoolDetails; + + use super::*; + + pub struct MigrateAUSDPools; + + impl OnRuntimeUpgrade for MigrateAUSDPools { + fn on_runtime_upgrade() -> Weight { + pallet_pool_system::Pool::::translate( + |_, mut details: PoolDetails| { + if details.currency == DEPRECATED_AUSD_CURRENCY_ID { + details.currency = NEW_AUSD_CURRENCY_ID; + } + + Some(details) + }, + ); + + let n = pallet_pool_system::Pool::::iter().count() as u64; + ::DbWeight::get().reads_writes(n, n) + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, &'static str> { + let ausd_pools: Vec = pallet_pool_system::Pool::::iter() + .filter(|(_, details)| details.currency == DEPRECATED_AUSD_CURRENCY_ID) + .map(|(pool_id, _)| pool_id) + .collect::<_>(); + + Ok(ausd_pools.encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(state: Vec) -> Result<(), &'static str> { + let ausd_pools = sp_std::vec::Vec::::decode(&mut state.as_ref()) + .map_err(|_| "Error decoding pre-upgrade state")?; + + for pool_id in ausd_pools { + let pool = pallet_pool_system::Pool::::get(pool_id) + .expect("AUSD Pool should exist after the migration was executed"); + + ensure!( + pool.currency == NEW_AUSD_CURRENCY_ID, + "A AUSD pool was NOT migrated to the new AUSD CurrencyId (ForeignAsset(2))", + ) + } + + Ok(()) + } + } +} From a882f4afedb7bdb8e6b0afb9f21002f4c2ba0492 Mon Sep 17 00:00:00 2001 From: lemunozm Date: Fri, 11 Aug 2023 16:26:10 +0200 Subject: [PATCH 4/4] remove limit when nuking --- runtime/common/src/migrations/nuke.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/runtime/common/src/migrations/nuke.rs b/runtime/common/src/migrations/nuke.rs index 297ea029c2..65b2dc1cf0 100644 --- a/runtime/common/src/migrations/nuke.rs +++ b/runtime/common/src/migrations/nuke.rs @@ -71,8 +71,7 @@ where if Pallet::on_chain_storage_version() < Pallet::current_storage_version() { log::info!("Nuke-{}: nuking pallet...", Pallet::name()); - // TODO: How we can set the maximum for this? Currently hardcode - let result = unhashed::clear_prefix(&pallet_prefix::(), Some(1000), None); + let result = unhashed::clear_prefix(&pallet_prefix::(), None, None); match result.maybe_cursor { None => log::info!("Nuke-{}: storage cleared successful", Pallet::name()), Some(_) => {