Skip to content

Commit

Permalink
Merge pull request casper-network#4119 from darthsiroftardis/amend-up…
Browse files Browse the repository at this point in the history
…grade-logic

Amend upgrade logic
  • Loading branch information
darthsiroftardis authored Jul 18, 2023
2 parents 0161fa0 + 6ef6387 commit fb7b0eb
Show file tree
Hide file tree
Showing 2 changed files with 114 additions and 202 deletions.
228 changes: 112 additions & 116 deletions execution_engine/src/core/engine_state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -330,6 +330,8 @@ where
CLValue::from_t(upgrade_config.chainspec_registry().clone())
.map_err(|error| Error::Bytesrepr(error.to_string()))?;

// We write the checksums of the chainspec settings to global state
// allowing verification of the chainspec data reported via the RPC.
tracking_copy.borrow_mut().write(
Key::ChainspecRegistry,
StoredValue::CLValue(cl_value_chainspec_registry),
Expand Down Expand Up @@ -417,92 +419,13 @@ where
.write(locked_funds_period_key, value);
}

// apply the arbitrary modifications
// apply the accepted modifications to global state.
for (key, value) in upgrade_config.global_state_update() {
tracking_copy.borrow_mut().write(*key, value.clone());
}

// This is a one time data transformation which will be removed
// in a following upgrade.
// TODO: CRef={https://github.com/casper-network/casper-node/issues/2479}
{
let withdraw_keys = tracking_copy
.borrow_mut()
.get_keys(correlation_id, &KeyTag::Withdraw)
.map_err(|_| Error::FailedToGetWithdrawKeys)?;

let (unbonding_delay, current_era_id) = {
let auction_contract = tracking_copy
.borrow_mut()
.get_contract(correlation_id, *auction_hash)?;

let unbonding_delay_key = auction_contract.named_keys()[UNBONDING_DELAY_KEY];
let delay = tracking_copy
.borrow_mut()
.read(correlation_id, &unbonding_delay_key)
.map_err(|error| error.into())?
.ok_or(Error::FailedToRetrieveUnbondingDelay)?
.as_cl_value()
.ok_or_else(|| Error::Bytesrepr("unbonding_delay".to_string()))?
.clone()
.into_t::<u64>()
.map_err(execution::Error::from)?;

let era_id_key = auction_contract.named_keys()[ERA_ID_KEY];

let era_id = tracking_copy
.borrow_mut()
.read(correlation_id, &era_id_key)
.map_err(|error| error.into())?
.ok_or(Error::FailedToRetrieveEraId)?
.as_cl_value()
.ok_or_else(|| Error::Bytesrepr("era_id".to_string()))?
.clone()
.into_t::<EraId>()
.map_err(execution::Error::from)?;

(delay, era_id)
};

for key in withdraw_keys {
// Transform only those withdraw purses that are still to be
// processed in the unbonding queue.
let withdraw_purses = tracking_copy
.borrow_mut()
.read(correlation_id, &key)
.map_err(|_| Error::FailedToGetWithdrawKeys)?
.ok_or(Error::FailedToGetStoredWithdraws)?
.as_withdraw()
.ok_or(Error::FailedToGetWithdrawPurses)?
.to_owned();

// Ensure that sufficient balance exists for all unbond purses that are to be
// migrated.
Self::fail_upgrade_if_withdraw_purses_lack_sufficient_balance(
&withdraw_purses,
&tracking_copy,
correlation_id,
)?;

let unbonding_purses: Vec<UnbondingPurse> = withdraw_purses
.into_iter()
.filter_map(|purse| {
if purse.era_of_creation() + unbonding_delay >= current_era_id {
return Some(UnbondingPurse::from(purse));
}
None
})
.collect();

let unbonding_key = key
.withdraw_to_unbond()
.ok_or_else(|| Error::Bytesrepr("unbond".to_string()))?;

tracking_copy
.borrow_mut()
.write(unbonding_key, StoredValue::Unbonding(unbonding_purses));
}
}
// Migrate the withdraw purses if needed.
self.migrate_withdraw_purses(correlation_id, &tracking_copy, *auction_hash)?;

// We insert the new unbonding delay once the purses to be paid out have been transformed
// based on the previous unbonding delay.
Expand All @@ -519,40 +442,6 @@ where
tracking_copy.borrow_mut().write(unbonding_delay_key, value);
}

// Perform global state migrations that require state.

if let Some(activation_point) = upgrade_config.activation_point() {
// The highest stored era is the immediate predecessor of the activation point.
let highest_era_info_id = activation_point.saturating_sub(1);
let highest_era_info_key = Key::EraInfo(highest_era_info_id);

let get_result = tracking_copy
.borrow_mut()
.get(correlation_id, &highest_era_info_key)
.map_err(|error| Error::Exec(error.into()))?;

match get_result {
Some(stored_value @ StoredValue::EraInfo(_)) => {
tracking_copy
.borrow_mut()
.write(Key::EraSummary, stored_value);
}
Some(other_stored_value) => {
// This should not happen as we only write EraInfo variants.
error!(stored_value_type_name=%other_stored_value.type_name(),
"EraInfo key contains unexpected StoredValue variant");
return Err(Error::ProtocolUpgrade(
ProtocolUpgradeError::UnexpectedStoredValueVariant,
));
}
None => {
// Can't find key
// Most likely this chain did not yet ran an auction, or recently completed a
// prune
}
};
}

let execution_effect = tracking_copy.borrow().effect();

// commit
Expand Down Expand Up @@ -2270,6 +2159,113 @@ where
maybe_proof.ok_or(Error::MissingChecksumRegistry)
}

// This is a one time data transformation which will be removed
// in a following upgrade.
// TODO: CRef={https://github.com/casper-network/casper-node/issues/2479}
/// Transform the withdraw purses that have yet to be paid out into Unbonding purses.
fn migrate_withdraw_purses(
&self,
correlation_id: CorrelationId,
tracking_copy: &Rc<RefCell<TrackingCopy<<S as StateProvider>::Reader>>>,
auction_hash: ContractHash,
) -> Result<(), Error> {
let unbonding_keys = tracking_copy
.borrow_mut()
.get_keys(correlation_id, &KeyTag::Unbond)
.map_err(Into::into)?;

// If Unbonding keys have already been written to global state it
// must mean the transformation has been performed and we can early exit.
if !unbonding_keys.is_empty() {
return Ok(());
}

let withdraw_keys = tracking_copy
.borrow_mut()
.get_keys(correlation_id, &KeyTag::Withdraw)
.map_err(|_| Error::FailedToGetWithdrawKeys)?;

if withdraw_keys.is_empty() {
return Ok(());
}

{
let (unbonding_delay, current_era_id) = {
let auction_contract = tracking_copy
.borrow_mut()
.get_contract(correlation_id, auction_hash)?;

let unbonding_delay_key = auction_contract.named_keys()[UNBONDING_DELAY_KEY];
let delay = tracking_copy
.borrow_mut()
.read(correlation_id, &unbonding_delay_key)
.map_err(|error| error.into())?
.ok_or(Error::FailedToRetrieveUnbondingDelay)?
.as_cl_value()
.ok_or_else(|| Error::Bytesrepr("unbonding_delay".to_string()))?
.clone()
.into_t::<u64>()
.map_err(execution::Error::from)?;

let era_id_key = auction_contract.named_keys()[ERA_ID_KEY];

let era_id = tracking_copy
.borrow_mut()
.read(correlation_id, &era_id_key)
.map_err(|error| error.into())?
.ok_or(Error::FailedToRetrieveEraId)?
.as_cl_value()
.ok_or_else(|| Error::Bytesrepr("era_id".to_string()))?
.clone()
.into_t::<EraId>()
.map_err(execution::Error::from)?;

(delay, era_id)
};

for key in withdraw_keys {
// Transform only those withdraw purses that are still to be
// processed in the unbonding queue.
let withdraw_purses = tracking_copy
.borrow_mut()
.read(correlation_id, &key)
.map_err(|_| Error::FailedToGetWithdrawKeys)?
.ok_or(Error::FailedToGetStoredWithdraws)?
.as_withdraw()
.ok_or(Error::FailedToGetWithdrawPurses)?
.to_owned();

// Ensure that sufficient balance exists for all unbond purses that are to be
// migrated.
Self::fail_upgrade_if_withdraw_purses_lack_sufficient_balance(
&withdraw_purses,
tracking_copy,
correlation_id,
)?;

let unbonding_purses: Vec<UnbondingPurse> = withdraw_purses
.into_iter()
.filter_map(|purse| {
if purse.era_of_creation() + unbonding_delay >= current_era_id {
return Some(UnbondingPurse::from(purse));
}
None
})
.collect();

let unbonding_key = key
.withdraw_to_unbond()
.ok_or_else(|| Error::Bytesrepr("unbond".to_string()))?;

tracking_copy
.borrow_mut()
.write(unbonding_key, StoredValue::Unbonding(unbonding_purses));
}
}

Ok(())
}

/// As the name suggests, used to ensure commit_upgrade fails if we lack sufficient balances.
fn fail_upgrade_if_withdraw_purses_lack_sufficient_balance(
withdraw_purses: &[WithdrawPurse],
Expand Down
88 changes: 2 additions & 86 deletions execution_engine_testing/tests/src/test/regression/gh_3710.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
use std::{collections::BTreeSet, convert::TryInto, fmt, iter::FromIterator};

use casper_engine_test_support::{
ExecuteRequestBuilder, InMemoryWasmTestBuilder, StepRequestBuilder, UpgradeRequestBuilder,
WasmTestBuilder, DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY,
PRODUCTION_RUN_GENESIS_REQUEST,
ExecuteRequestBuilder, InMemoryWasmTestBuilder, StepRequestBuilder, WasmTestBuilder,
DEFAULT_ACCOUNT_ADDR, DEFAULT_ACCOUNT_PUBLIC_KEY, PRODUCTION_RUN_GENESIS_REQUEST,
};
use casper_execution_engine::{
core::{
Expand All @@ -25,89 +24,6 @@ const FIXTURE_N_ERAS: usize = 10;

const GH_3710_FIXTURE: &str = "gh_3710";

#[ignore]
#[test]
fn gh_3710_should_copy_latest_era_info_to_stable_key_at_upgrade_point() {
let (mut builder, lmdb_fixture_state, _temp_dir) =
lmdb_fixture::builder_from_global_state_fixture(GH_3710_FIXTURE);

let auction_delay: u64 = lmdb_fixture_state
.genesis_request
.get("ee_config")
.expect("should have ee_config")
.get("auction_delay")
.expect("should have auction delay")
.as_i64()
.expect("auction delay should be integer")
.try_into()
.expect("auction delay should be positive");

let last_expected_era_info = EraId::new(auction_delay + FIXTURE_N_ERAS as u64);
let first_era_after_protocol_upgrade = last_expected_era_info.successor();

let pre_upgrade_state_root_hash = builder.get_post_state_hash();

let previous_protocol_version = lmdb_fixture_state.genesis_protocol_version();

let current_protocol_version = lmdb_fixture_state.genesis_protocol_version();

let new_protocol_version = ProtocolVersion::from_parts(
current_protocol_version.value().major,
current_protocol_version.value().minor,
current_protocol_version.value().patch + 1,
);

let era_info_keys_before_upgrade = builder
.get_keys(KeyTag::EraInfo)
.expect("should return all the era info keys");

assert_eq!(
era_info_keys_before_upgrade.len(),
auction_delay as usize + 1 + FIXTURE_N_ERAS,
);

let mut upgrade_request = {
UpgradeRequestBuilder::new()
.with_current_protocol_version(previous_protocol_version)
.with_new_protocol_version(new_protocol_version)
.with_activation_point(first_era_after_protocol_upgrade)
.build()
};

builder
.upgrade_with_upgrade_request(*builder.get_engine_state().config(), &mut upgrade_request)
.expect_upgrade_success();

let upgrade_result = builder.get_upgrade_result(0).expect("result");

let upgrade_success = upgrade_result.as_ref().expect("success");
assert_eq!(
upgrade_success.post_state_hash,
builder.get_post_state_hash(),
"sanity check"
);

let era_info_keys_after_upgrade = builder
.get_keys(KeyTag::EraInfo)
.expect("should return all the era info keys");

assert_eq!(era_info_keys_after_upgrade, era_info_keys_before_upgrade);

let last_era_info_value = builder
.query(
Some(pre_upgrade_state_root_hash),
Key::EraInfo(last_expected_era_info),
&[],
)
.expect("should query pre-upgrade stored value");

let era_summary = builder
.query(None, Key::EraSummary, &[])
.expect("should query stable key after the upgrade");

assert_eq!(last_era_info_value, era_summary);
}

#[ignore]
#[test]
fn gh_3710_commit_prune_with_empty_keys_should_be_noop() {
Expand Down

0 comments on commit fb7b0eb

Please sign in to comment.