From daf6723d4390db875c8e3d4a8bf8818f5e837351 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 11 Feb 2025 13:33:36 +0100 Subject: [PATCH] [AHM] Vesting (#575) Merging into the AHM working branch. Depends on https://github.com/polkadot-fellows/runtimes/pull/579 # Pallet Vesting Pallet vesting has one storage map to hold the vesting schedules and one storage value to track the current version of the pallet. The version can be easily migrated, but for the schedules it is a bit difficult. ## Storage: Vesting The vesting schedules are already measured in relay blocks, as can be seen [here](https://github.com/polkadot-fellows/runtimes/blob/b613b54d94af5f4702533a56c6260651a14bdccb/system-parachains/asset-hubs/asset-hub-polkadot/src/lib.rs#L297). This means that we can just integrate the existing schedules. The only possibly issue is when there are lots of pre-existing schedules. The maximal number of schedules is 28; both on Relay and AH. We cannot use the merge functionality of the vesting pallet since that can be used as an attack vector: anyone can send 28 vested transfers with very large unlock duration and low amount to force all other schedules to adapt this long unlock period. This would reduce the rewards per block, which is bad. For now, we are writing all colliding AH schedules into a storage item for manual inspection later. It could still happen that unmalicious users will have more than 28 schedules, but as nobody has used the vested transfers on AH yet. Q: Maybe we should disable vested transfers with the next runtime upgrade on AH. ## Storage: StorageVersion The vesting pallet is not using the proper FRAME version tracking; rather, it tracks its version in the `StorageVersion` value. It does this incorrectly though, with Asset Hub reporting version 0 instead of 1. We ignore and correct this by writing 1 to the storage. ## User Impact This affects users that have vesting schedules on the Relay chain or on Asset Hub. There exists a risk that the number of total schedules exceeds 28, which means that they will not fit into the storage anymore. We then prioritize the schedules from AH and pause and stash all schedules that do not fit (up to 28). - [x] Does not require a CHANGELOG entry --------- Signed-off-by: Oliver Tale-Yazdi --- Cargo.lock | 2 + pallets/ah-migrator/Cargo.toml | 4 + pallets/ah-migrator/src/benchmarking.rs | 118 ++++++++++++++++++++++++ pallets/ah-migrator/src/lib.rs | 50 ++++++++-- pallets/ah-migrator/src/vesting.rs | 88 ++++++++++++++++++ pallets/rc-migrator/Cargo.toml | 4 + pallets/rc-migrator/src/lib.rs | 40 ++++++++ pallets/rc-migrator/src/types.rs | 20 ++-- pallets/rc-migrator/src/vesting.md | 36 ++++++++ pallets/rc-migrator/src/vesting.rs | 96 +++++++++++++++++++ 10 files changed, 440 insertions(+), 18 deletions(-) create mode 100644 pallets/ah-migrator/src/benchmarking.rs create mode 100644 pallets/ah-migrator/src/vesting.rs create mode 100644 pallets/rc-migrator/src/vesting.md create mode 100644 pallets/rc-migrator/src/vesting.rs diff --git a/Cargo.lock b/Cargo.lock index 7e506b4147..182cbc6898 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7669,6 +7669,7 @@ dependencies = [ "pallet-staking", "pallet-state-trie-migration", "pallet-treasury", + "pallet-vesting", "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-runtime-common", @@ -8905,6 +8906,7 @@ dependencies = [ "pallet-scheduler", "pallet-staking", "pallet-treasury", + "pallet-vesting", "parity-scale-codec", "polkadot-parachain-primitives", "polkadot-runtime-common", diff --git a/pallets/ah-migrator/Cargo.toml b/pallets/ah-migrator/Cargo.toml index dc840a5ac0..93ada94d27 100644 --- a/pallets/ah-migrator/Cargo.toml +++ b/pallets/ah-migrator/Cargo.toml @@ -29,6 +29,7 @@ pallet-referenda = { workspace = true } pallet-scheduler = { workspace = true } pallet-staking = { workspace = true } pallet-state-trie-migration = { workspace = true } +pallet-vesting = { workspace = true } pallet-treasury = { workspace = true } polkadot-parachain-primitives = { workspace = true } polkadot-runtime-common = { workspace = true } @@ -69,6 +70,7 @@ std = [ "pallet-staking/std", "pallet-state-trie-migration/std", "pallet-treasury/std", + "pallet-vesting/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "runtime-parachains/std", @@ -101,6 +103,7 @@ runtime-benchmarks = [ "pallet-staking/runtime-benchmarks", "pallet-state-trie-migration/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", @@ -126,6 +129,7 @@ try-runtime = [ "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", "pallet-treasury/try-runtime", + "pallet-vesting/try-runtime", "polkadot-runtime-common/try-runtime", "runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/pallets/ah-migrator/src/benchmarking.rs b/pallets/ah-migrator/src/benchmarking.rs new file mode 100644 index 0000000000..201a59563f --- /dev/null +++ b/pallets/ah-migrator/src/benchmarking.rs @@ -0,0 +1,118 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! To run these benchmarks, you will need a modified version of `frame-omni-bencher` that can load +//! snapshots of the relay and asset hub. You can find it on branch `oty-ahm-omni-bencher` of the +//! SDK. Install it with +//! `cargo install --path substrate/utils/frame/omni-bencher --profile production` +//! +//! ```bash +//! frame-omni-bencher v1 benchmark pallet --runtime=target/release/wbuild/asset-hub-polkadot-runtime/asset_hub_polkadot_runtime.wasm --pallet "pallet-ah-migrator" --extrinsic "" --snap=ah-polkadot.snap --rc-snap=polkadot.snap +//! ``` + +use crate::*; +use core::str::FromStr; +use cumulus_primitives_core::{AggregateMessageOrigin, InboundDownwardMessage}; +use frame_benchmarking::v2::*; +use frame_support::{traits::EnqueueMessage, weights::WeightMeter}; +use frame_system::RawOrigin; +use pallet_rc_migrator::types::PalletMigration; +use xcm::VersionedXcm; + +#[benchmarks(where T: pallet_balances::Config)] +mod benchmarks { + use super::*; + + #[benchmark] + fn receive_multisigs() { + verify_snapshot::(); + let (messages, _cursor) = relay_snapshot(|| { + unwrap_no_debug(pallet_rc_migrator::multisig::MultisigMigrator::::migrate_out_many( + None, + &mut WeightMeter::new(), + )) + }); + + #[extrinsic_call] + _(RawOrigin::Root, messages); + + // TODO assert event + } + + #[benchmark] + fn receive_nom_pools_messages_pool_members() { + verify_snapshot::(); + let (messages, _cursor) = relay_snapshot(|| { + unwrap_no_debug(pallet_rc_migrator::staking::nom_pools::NomPoolsMigrator::::migrate_many( + None, + &mut WeightMeter::new(), + )) + }); + + #[extrinsic_call] + _(RawOrigin::Root, messages); + + // TODO assert event + } +} + +/// Unwrap something that does not implement Debug. Otherwise we would need to require +/// `pallet_rc_migrator::Config` on out runtime `T`. +pub fn unwrap_no_debug(result: Result) -> T { + match result { + Ok(t) => t, + Err(_) => panic!("unwrap_no_debug"), + } +} + +/// Check that Oliver's account has some balance on AH and Relay. +/// +/// This serves as sanity check that the snapshots were loaded correctly. +fn verify_snapshot() { + let raw_acc: [u8; 32] = + hex::decode("6c9e3102dd2c24274667d416e07570ebce6f20ab80ee3fc9917bf4a7568b8fd2") + .unwrap() + .try_into() + .unwrap(); + let acc = AccountId32::from(raw_acc); + frame_system::Pallet::::reset_events(); + + // Sanity check that this is the right account + let ah_acc = frame_system::Account::::get(&acc); + if ah_acc.data.free == 0 { + panic!("No or broken snapshot: account does not have any balance"); + } + + let key = frame_system::Account::::hashed_key_for(&acc); + let raw_acc = relay_snapshot(|| { + frame_support::storage::unhashed::get::< + pallet_balances::AccountData<::Balance>, + >(key.as_ref()) + }).unwrap(); + + if raw_acc.free == 0 { + panic!("No or broken snapshot: account does not have any balance"); + } +} + +/// Read something from the relay chain snapshot instead of the asset hub one. +fn relay_snapshot R>(f: F) -> R { + sp_io::storage::get(b"relay_chain_enable"); + let result = f(); + sp_io::storage::get(b"relay_chain_disable"); + result +} diff --git a/pallets/ah-migrator/src/lib.rs b/pallets/ah-migrator/src/lib.rs index 00ef36dae7..c56c513bfe 100644 --- a/pallets/ah-migrator/src/lib.rs +++ b/pallets/ah-migrator/src/lib.rs @@ -45,6 +45,7 @@ pub mod referenda; pub mod scheduler; pub mod staking; pub mod types; +pub mod vesting; pub use pallet::*; pub use pallet_rc_migrator::types::ZeroWeightOr; @@ -73,6 +74,7 @@ use pallet_rc_migrator::{ fast_unstake::{FastUnstakeMigrator, RcFastUnstakeMessage}, nom_pools::*, }, + vesting::RcVestingSchedule, }; use pallet_referenda::TrackIdOf; use polkadot_runtime_common::claims as pallet_claims; @@ -100,6 +102,7 @@ pub enum PalletEventName { Indices, FastUnstake, BagsList, + Vesting, Bounties, } @@ -122,6 +125,7 @@ pub mod pallet { + pallet_fast_unstake::Config + pallet_bags_list::Config + pallet_scheduler::Config + + pallet_vesting::Config + pallet_indices::Config + pallet_conviction_voting::Config + pallet_bounties::Config @@ -195,6 +199,9 @@ pub mod pallet { FailedToConvertCall, /// Failed to bound a call. FailedToBoundCall, + /// Failed to integrate a vesting schedule. + FailedToIntegrateVestingSchedule, + Unreachable, } #[pallet::event] @@ -344,6 +351,21 @@ pub mod pallet { /// How many scheduler messages failed to integrate. count_bad: u32, }, + /// Should not happen. Manual intervention by the Fellowship required. + /// + /// Can happen when existing AH and incoming RC vesting schedules have more combined + /// entries than allowed. This triggers the merging logic which has henceforth failed + /// with the given inner pallet-vesting error. + FailedToMergeVestingSchedules { + /// The account that failed to merge the schedules. + who: AccountId32, + /// The first schedule index that failed to merge. + schedule1: u32, + /// The second schedule index that failed to merge. + schedule2: u32, + /// The index of the pallet-vesting error that occurred. + pallet_vesting_error_index: Option, + }, ConvictionVotingMessagesReceived { /// How many conviction voting messages are in the batch. count: u32, @@ -465,6 +487,16 @@ pub mod pallet { } #[pallet::call_index(8)] + pub fn receive_vesting_schedules( + origin: OriginFor, + schedules: Vec>, + ) -> DispatchResult { + ensure_root(origin)?; + + Self::do_receive_vesting_schedules(schedules).map_err(Into::into) + } + + #[pallet::call_index(9)] pub fn receive_fast_unstake_messages( origin: OriginFor, messages: Vec>, @@ -475,7 +507,7 @@ pub mod pallet { } /// Receive referendum counts, deciding counts, votes for the track queue. - #[pallet::call_index(9)] + #[pallet::call_index(10)] pub fn receive_referenda_values( origin: OriginFor, referendum_count: u32, @@ -491,7 +523,7 @@ pub mod pallet { } /// Receive referendums from the Relay Chain. - #[pallet::call_index(10)] + #[pallet::call_index(11)] pub fn receive_referendums( origin: OriginFor, referendums: Vec<(u32, RcReferendumInfoOf)>, @@ -501,7 +533,7 @@ pub mod pallet { Self::do_receive_referendums(referendums).map_err(Into::into) } - #[pallet::call_index(11)] + #[pallet::call_index(12)] pub fn receive_claims( origin: OriginFor, messages: Vec>, @@ -511,7 +543,7 @@ pub mod pallet { Self::do_receive_claims(messages).map_err(Into::into) } - #[pallet::call_index(12)] + #[pallet::call_index(13)] pub fn receive_bags_list_messages( origin: OriginFor, messages: Vec>, @@ -521,7 +553,7 @@ pub mod pallet { Self::do_receive_bags_list_messages(messages).map_err(Into::into) } - #[pallet::call_index(13)] + #[pallet::call_index(14)] pub fn receive_scheduler_messages( origin: OriginFor, messages: Vec>, @@ -531,7 +563,7 @@ pub mod pallet { Self::do_receive_scheduler_messages(messages).map_err(Into::into) } - #[pallet::call_index(14)] + #[pallet::call_index(15)] pub fn receive_indices( origin: OriginFor, indices: Vec>, @@ -541,7 +573,7 @@ pub mod pallet { Self::do_receive_indices(indices).map_err(Into::into) } - #[pallet::call_index(15)] + #[pallet::call_index(16)] pub fn receive_conviction_voting_messages( origin: OriginFor, messages: Vec>, @@ -551,7 +583,7 @@ pub mod pallet { Self::do_receive_conviction_voting_messages(messages).map_err(Into::into) } - #[pallet::call_index(16)] + #[pallet::call_index(17)] pub fn receive_bounties_messages( origin: OriginFor, messages: Vec>, @@ -561,7 +593,7 @@ pub mod pallet { Self::do_receive_bounties_messages(messages).map_err(Into::into) } - #[pallet::call_index(17)] + #[pallet::call_index(18)] pub fn receive_asset_rates( origin: OriginFor, rates: Vec<(::AssetKind, FixedU128)>, diff --git a/pallets/ah-migrator/src/vesting.rs b/pallets/ah-migrator/src/vesting.rs new file mode 100644 index 0000000000..5e77d03024 --- /dev/null +++ b/pallets/ah-migrator/src/vesting.rs @@ -0,0 +1,88 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; + +impl Pallet { + pub fn do_receive_vesting_schedules( + messages: Vec>, + ) -> Result<(), Error> { + alias::StorageVersion::::put(alias::Releases::V1); + log::info!(target: LOG_TARGET, "Integrating {} vesting schedules", messages.len()); + Self::deposit_event(Event::BatchReceived { + pallet: PalletEventName::Vesting, + count: messages.len() as u32, + }); + let (mut count_good, mut count_bad) = (0, 0); + + for message in messages { + match Self::do_process_vesting_schedule(message) { + Ok(()) => count_good += 1, + Err(e) => { + count_bad += 1; + log::error!(target: LOG_TARGET, "Error while integrating vesting: {:?}", e); + }, + } + } + + Self::deposit_event(Event::BatchProcessed { + pallet: PalletEventName::Vesting, + count_good, + count_bad, + }); + + Ok(()) + } + + /// Integrate vesting schedules. + pub fn do_process_vesting_schedule(message: RcVestingSchedule) -> Result<(), Error> { + let mut ah_schedules = pallet_vesting::Vesting::::get(&message.who).unwrap_or_default(); + + if !ah_schedules.is_empty() { + defensive!("We disabled vesting, looks like someone used it. Manually verify this and then remove this defensive assert."); + } + + for schedule in message.schedules { + ah_schedules + .try_push(schedule) + .defensive() + .map_err(|_| Error::::FailedToIntegrateVestingSchedule)?; + } + + pallet_vesting::Vesting::::insert(&message.who, &ah_schedules); + log::warn!(target: LOG_TARGET, "Integrated vesting schedule for {:?}, len {}", message.who, ah_schedules.len()); + + Ok(()) + } +} + +pub mod alias { + use super::*; + + #[frame_support::storage_alias(pallet_name)] + pub type StorageVersion = + StorageValue, Releases, ValueQuery>; + + #[derive( + Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug, MaxEncodedLen, Default, TypeInfo, + )] + pub enum Releases { + #[default] + V0, + V1, + } +} diff --git a/pallets/rc-migrator/Cargo.toml b/pallets/rc-migrator/Cargo.toml index 11728c3c8f..566fca5528 100644 --- a/pallets/rc-migrator/Cargo.toml +++ b/pallets/rc-migrator/Cargo.toml @@ -33,6 +33,7 @@ pallet-preimage = { workspace = true } pallet-treasury = { workspace = true } pallet-fast-unstake = { workspace = true } pallet-referenda = { workspace = true } +pallet-vesting = { workspace = true } pallet-nomination-pools = { workspace = true } polkadot-runtime-common = { workspace = true } runtime-parachains = { workspace = true } @@ -64,6 +65,7 @@ std = [ "pallet-scheduler/std", "pallet-staking/std", "pallet-treasury/std", + "pallet-vesting/std", "polkadot-parachain-primitives/std", "polkadot-runtime-common/std", "runtime-parachains/std", @@ -95,6 +97,7 @@ runtime-benchmarks = [ "pallet-scheduler/runtime-benchmarks", "pallet-staking/runtime-benchmarks", "pallet-treasury/runtime-benchmarks", + "pallet-vesting/runtime-benchmarks", "polkadot-parachain-primitives/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", "runtime-parachains/runtime-benchmarks", @@ -119,6 +122,7 @@ try-runtime = [ "pallet-scheduler/try-runtime", "pallet-staking/try-runtime", "pallet-treasury/try-runtime", + "pallet-vesting/try-runtime", "polkadot-runtime-common/try-runtime", "runtime-parachains/try-runtime", "sp-runtime/try-runtime", diff --git a/pallets/rc-migrator/src/lib.rs b/pallets/rc-migrator/src/lib.rs index aa851ef82d..f3f414b3ef 100644 --- a/pallets/rc-migrator/src/lib.rs +++ b/pallets/rc-migrator/src/lib.rs @@ -40,6 +40,7 @@ pub mod proxy; pub mod referenda; pub mod staking; pub mod types; +pub mod vesting; mod weights; pub use pallet::*; pub mod asset_rate; @@ -82,6 +83,7 @@ use staking::{ }; use storage::TransactionOutcome; use types::{AhWeightInfo, PalletMigration}; +use vesting::VestingMigrator; use weights::WeightInfo; use xcm::prelude::*; @@ -184,6 +186,12 @@ pub enum MigrationStage, + }, + VestingMigrationDone, + FastUnstakeMigrationInit, FastUnstakeMigrationOngoing { next_key: Option>, @@ -256,6 +264,7 @@ impl Result { Ok(match s { + "skip-accounts" => MigrationStage::AccountsMigrationDone, "preimage" => MigrationStage::PreimageMigrationInit, "referenda" => MigrationStage::ReferendaMigrationInit, "multisig" => MigrationStage::MultisigMigrationInit, @@ -294,6 +303,7 @@ pub mod pallet { + pallet_fast_unstake::Config + pallet_bags_list::Config + pallet_scheduler::Config + + pallet_vesting::Config + pallet_indices::Config + pallet_conviction_voting::Config + pallet_bounties::Config @@ -694,6 +704,36 @@ pub mod pallet { } }, MigrationStage::NomPoolsMigrationDone => { + Self::transition(MigrationStage::VestingMigrationInit); + }, + + MigrationStage::VestingMigrationInit => { + Self::transition(MigrationStage::VestingMigrationOngoing { next_key: None }); + }, + MigrationStage::VestingMigrationOngoing { next_key } => { + let res = with_transaction_opaque_err::, Error, _>(|| { + match VestingMigrator::::migrate_many(next_key, &mut weight_counter) { + Ok(last_key) => TransactionOutcome::Commit(Ok(last_key)), + Err(e) => TransactionOutcome::Rollback(Err(e)), + } + }) + .expect("Always returning Ok; qed"); + + match res { + Ok(None) => { + Self::transition(MigrationStage::VestingMigrationDone); + }, + Ok(Some(next_key)) => { + Self::transition(MigrationStage::VestingMigrationOngoing { + next_key: Some(next_key), + }); + }, + e => { + defensive!("Error while migrating vesting: {:?}", e); + }, + } + }, + MigrationStage::VestingMigrationDone => { Self::transition(MigrationStage::FastUnstakeMigrationInit); }, MigrationStage::FastUnstakeMigrationInit => { diff --git a/pallets/rc-migrator/src/types.rs b/pallets/rc-migrator/src/types.rs index 743d2bf204..98bb8e2e99 100644 --- a/pallets/rc-migrator/src/types.rs +++ b/pallets/rc-migrator/src/types.rs @@ -49,30 +49,32 @@ pub enum AhMigratorCall { #[codec(index = 7)] ReceiveNomPoolsMessages { messages: Vec> }, #[codec(index = 8)] - ReceiveFastUnstakeMessages { messages: Vec> }, + ReceiveVestingSchedules { messages: Vec> }, #[codec(index = 9)] + ReceiveFastUnstakeMessages { messages: Vec> }, + #[codec(index = 10)] ReceiveReferendaValues { referendum_count: u32, deciding_count: Vec<(TrackIdOf, u32)>, track_queue: Vec<(TrackIdOf, Vec<(u32, u128)>)>, }, - #[codec(index = 10)] - ReceiveReferendums { referendums: Vec<(u32, ReferendumInfoOf)> }, #[codec(index = 11)] - ReceiveClaimsMessages { messages: Vec> }, + ReceiveReferendums { referendums: Vec<(u32, ReferendumInfoOf)> }, #[codec(index = 12)] - ReceiveBagsListMessages { messages: Vec> }, + ReceiveClaimsMessages { messages: Vec> }, #[codec(index = 13)] - ReceiveSchedulerMessages { messages: Vec> }, + ReceiveBagsListMessages { messages: Vec> }, #[codec(index = 14)] - ReceiveIndices { indices: Vec> }, + ReceiveSchedulerMessages { messages: Vec> }, #[codec(index = 15)] + ReceiveIndices { indices: Vec> }, + #[codec(index = 16)] ReceiveConvictionVotingMessages { messages: Vec>, }, - #[codec(index = 16)] - ReceiveBountiesMessages { messages: Vec> }, #[codec(index = 17)] + ReceiveBountiesMessages { messages: Vec> }, + #[codec(index = 18)] ReceiveAssetRates { asset_rates: Vec<(::AssetKind, FixedU128)> }, } diff --git a/pallets/rc-migrator/src/vesting.md b/pallets/rc-migrator/src/vesting.md new file mode 100644 index 0000000000..eb6cf87916 --- /dev/null +++ b/pallets/rc-migrator/src/vesting.md @@ -0,0 +1,36 @@ +# Pallet Vesting + +Pallet vesting has one storage map to hold the vesting schedules and one storage value to track the +current version of the pallet. The version can be easily migrated, but for the schedules it is a bit difficult. + +## Storage: Vesting + +The vesting schedules are already measured in relay blocks, as can be seen +[here](https://github.com/polkadot-fellows/runtimes/blob/b613b54d94af5f4702533a56c6260651a14bdccb/system-parachains/asset-hubs/asset-hub-polkadot/src/lib.rs#L297). +This means that we can just integrate the existing schedules. The only possibly issue is when there +are lots of pre-existing schedules. The maximal number of schedules is 28; both on Relay and AH. +We cannot use the merge functionality of the vesting pallet since that can be used as an attack +vector: anyone can send 28 vested transfers with very large unlock duration and low amount to force +all other schedules to adapt this long unlock period. This would reduce the rewards per block, which +is bad. +For now, we are writing all colliding AH schedules into a storage item for manual inspection later. +It could still happen that unmalicious users will have more than 28 schedules, but as nobody has +used the vested transfers on AH yet. + +Q: Maybe we should disable vested transfers with the next runtime upgrade on AH. + +## Storage: StorageVersion + +The vesting pallet is not using the proper FRAME version tracking; rather, it tracks its version in +the `StorageVersion` value. It does this incorrectly though, with Asset Hub reporting version 0 +instead of 1. We ignore and correct this by writing 1 to the storage. + + +## User Impact + +This affects users that have vesting schedules on the Relay chain or on Asset Hub. There exists a +risk that the number of total schedules exceeds 28, which means that they will not fit into the +storage anymore. + +We then prioritize the schedules from AH and pause and stash all schedules that do not fit (up to +28). diff --git a/pallets/rc-migrator/src/vesting.rs b/pallets/rc-migrator/src/vesting.rs new file mode 100644 index 0000000000..efe09049ed --- /dev/null +++ b/pallets/rc-migrator/src/vesting.rs @@ -0,0 +1,96 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use frame_support::traits::Currency; +use pallet_vesting::MaxVestingSchedulesGet; + +pub type BalanceOf = <::Currency as Currency< + ::AccountId, +>>::Balance; + +#[derive( + Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, TypeInfo, MaxEncodedLen, DebugNoBound, +)] +#[codec(mel_bound(T: pallet_vesting::Config))] +#[scale_info(skip_type_params(T))] +pub struct RcVestingSchedule { + pub who: ::AccountId, + pub schedules: BoundedVec< + pallet_vesting::VestingInfo, BlockNumberFor>, + MaxVestingSchedulesGet, + >, +} + +pub struct VestingMigrator { + _phantom: PhantomData, +} + +impl PalletMigration for VestingMigrator { + type Key = T::AccountId; + type Error = Error; + + fn migrate_many( + current_key: Option, + weight_counter: &mut WeightMeter, + ) -> Result, Self::Error> { + let mut inner_key = current_key; + let mut messages = Vec::new(); + + loop { + if weight_counter + .try_consume(::DbWeight::get().reads_writes(1, 1)) + .is_err() + { + if messages.is_empty() { + return Err(Error::OutOfWeight); + } else { + break; + } + } + if messages.len() > 10_000 { + log::warn!("Weight allowed very big batch, stopping"); + break; + } + + let mut iter = match inner_key { + Some(who) => pallet_vesting::Vesting::::iter_from_key(who), + None => pallet_vesting::Vesting::::iter(), + }; + + match iter.next() { + Some((who, schedules)) => { + messages.push(RcVestingSchedule { who: who.clone(), schedules }); + log::debug!(target: LOG_TARGET, "Migrating vesting schedules for {:?}", who); + inner_key = Some(who); + }, + None => { + inner_key = None; + break; + }, + } + } + + if !messages.is_empty() { + Pallet::::send_chunked_xcm(messages, |messages| { + types::AhMigratorCall::ReceiveVestingSchedules { messages } + })?; + } + + Ok(inner_key) + } +}