Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: make AssetHub DOT reserve #896

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3,436 changes: 2,063 additions & 1,373 deletions Cargo.lock

Large diffs are not rendered by default.

839 changes: 494 additions & 345 deletions Cargo.toml

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion integration-tests/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "runtime-integration-tests"
version = "1.23.4"
version = "1.23.5"
description = "Integration tests"
authors = ["GalacticCouncil"]
edition = "2021"
Expand Down
160 changes: 140 additions & 20 deletions integration-tests/src/cross_chain_transfer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -648,38 +648,31 @@ fn transfer_foreign_asset_from_acala_to_hydra_should_not_work() {
}

#[test]
fn transfer_dot_reserve_from_asset_hub_to_hydra_should_not_work() {
fn transfer_dot_reserve_from_asset_hub_to_hydra_should_work() {
//Arrange
TestNet::reset();

Hydra::execute_with(|| {
let _ = with_transaction(|| {
register_foreign_asset();
assert_ok!(hydradx_runtime::AssetRegistry::set_location(
DOT,
hydradx_runtime::AssetLocation(MultiLocation::new(1, polkadot_xcm::opaque::v3::Junctions::Here))
));
add_currency_price(DOT, FixedU128::from(1));

add_currency_price(FOREIGN_ASSET, FixedU128::from(1));
add_currency_price(DOT, FixedU128::from(1));
assert_ok!(hydradx_runtime::Tokens::deposit(
DOT,
&AccountId::from(ALICE),
3000 * UNITS
));

TransactionOutcome::Commit(DispatchResult::Ok(()))
});
assert_ok!(hydradx_runtime::AssetRegistry::set_location(
DOT,
hydradx_runtime::AssetLocation(MultiLocation::new(1, polkadot_xcm::opaque::v3::Junctions::Here))
));
});

AssetHub::execute_with(|| {
let _ = with_transaction(|| {
register_foreign_asset();
register_dot();
TransactionOutcome::Commit(DispatchResult::Ok(()))
});

assert_ok!(hydradx_runtime::Tokens::deposit(
FOREIGN_ASSET,
&AccountId::from(ALICE),
3000 * UNITS
));

assert_ok!(hydradx_runtime::Tokens::deposit(
DOT,
&AccountId::from(ALICE),
Expand All @@ -699,7 +692,8 @@ fn transfer_dot_reserve_from_asset_hub_to_hydra_should_not_work() {
}])),
);

let xcm = xcm_for_deposit_reserve_asset_to_hydra::<hydradx_runtime::RuntimeCall>(dot, bob_beneficiary);
let xcm =
xcm_transfer_reserve_asset_and_deposit_asset_to_hydra::<hydradx_runtime::RuntimeCall>(dot, bob_beneficiary);

//Act
let res = hydradx_runtime::PolkadotXcm::execute(
Expand All @@ -719,7 +713,70 @@ fn transfer_dot_reserve_from_asset_hub_to_hydra_should_not_work() {

//Assert
Hydra::execute_with(|| {
assert_xcm_message_processing_failed();
assert_xcm_message_processing_passed();

let fee = hydradx_runtime::Tokens::free_balance(DOT, &hydradx_runtime::Treasury::account_id());
assert!(fee > 0, "treasury should have received fees");
//Check if the foreign asset from Assethub has been deposited successfully
assert_eq!(
hydradx_runtime::Currencies::free_balance(DOT, &AccountId::from(BOB)),
100 * UNITS - fee
);
});
}

#[test]
fn transfer_dot_from_hydra_to_asset_hub() {
AssetHub::execute_with(|| {
let _ = with_transaction(|| {
register_dot();
add_currency_price(DOT, FixedU128::from(1));
TransactionOutcome::Commit(DispatchResult::Ok(()))
});
});

//Arrange
Hydra::execute_with(|| {
assert_ok!(hydradx_runtime::AssetRegistry::set_location(
DOT,
hydradx_runtime::AssetLocation(MultiLocation::new(1, polkadot_xcm::opaque::v3::Junctions::Here))
));

//Act
assert_ok!(hydradx_runtime::XTokens::transfer(
hydradx_runtime::RuntimeOrigin::signed(ALICE.into()),
DOT,
3 * UNITS,
Box::new(
MultiLocation::new(
1,
X2(
Junction::Parachain(ASSET_HUB_PARA_ID),
Junction::AccountId32 { id: BOB, network: None }
)
)
.into_versioned()
),
WeightLimit::Unlimited,
));

//Assert
assert_eq!(
hydradx_runtime::Tokens::free_balance(DOT, &AccountId::from(ALICE)),
2000 * UNITS - 3 * UNITS
);
});

//Needed to process horizontal xcm messages
Rococo::execute_with(|| {});

AssetHub::execute_with(|| {
assert_xcm_message_processing_passed();

assert_eq!(
hydradx_runtime::Currencies::free_balance(DOT, &AccountId::from(BOB)),
2_899_374_643_624 // 3 * HDX - fee
);
});
}

Expand Down Expand Up @@ -862,6 +919,69 @@ fn xcm_for_deposit_reserve_asset_to_hydra<RC: Decode + GetDispatchInfo>(
VersionedXcm::from(message)
}

fn xcm_transfer_reserve_asset_and_deposit_asset_to_hydra<RC: Decode + GetDispatchInfo>(
assets: Asset,
beneficiary: Location,
) -> VersionedXcm<RC> {
use rococo_runtime::xcm_config::BaseXcmWeight;
use xcm_builder::FixedWeightBounds;
use xcm_executor::traits::WeightBounds;

type Weigher<RC> = FixedWeightBounds<BaseXcmWeight, RC, ConstU32<100>>;

let dest = Location::new(
1,
cumulus_primitives_core::Junctions::X1(Arc::new([cumulus_primitives_core::Junction::Parachain(HYDRA_PARA_ID)])),
);

let max_assets = 1 + 1;

let context = cumulus_primitives_core::Junctions::X2(Arc::new([
cumulus_primitives_core::Junction::GlobalConsensus(cumulus_primitives_core::NetworkId::Polkadot),
cumulus_primitives_core::Junction::Parachain(ASSET_HUB_PARA_ID),
]));

let fee_asset = assets.clone().reanchored(&dest, &context).expect("should reanchor");
let fees = fee_asset.clone();

let weight_limit = {
let mut remote_message = Xcm(vec![
ReserveAssetDeposited::<RC>(assets.clone().into()),
ClearOrigin,
BuyExecution {
fees: fees.clone(),
weight_limit: Limited(Weight::zero()),
},
DepositAsset {
assets: Wild(AllCounted(max_assets)),
beneficiary: beneficiary.clone(),
},
]);
// use local weight for remote message and hope for the best.
let remote_weight = Weigher::weight(&mut remote_message).expect("weighing should not fail");
Limited(remote_weight)
};
// executed on remote (on hydra)
let xcm = Xcm(vec![
//ReserveAssetDeposited(assets.clone()),
BuyExecution { fees, weight_limit },
DepositAsset {
assets: Wild(AllCounted(max_assets)),
beneficiary,
},
]);
// executed on local (AssetHub)
let message = Xcm(vec![
SetFeesMode { jit_withdraw: true },
TransferReserveAsset {
assets: assets.into(),
dest,
xcm,
},
]);
VersionedXcm::from(message)
}

fn register_foreign_asset() {
assert_ok!(AssetRegistry::register_sufficient_asset(
Some(FOREIGN_ASSET),
Expand Down
12 changes: 5 additions & 7 deletions integration-tests/src/polkadot_test_net.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
use frame_support::{
assert_ok,
sp_runtime::{
traits::{AccountIdConversion, Block as BlockT, Dispatchable},
traits::{AccountIdConversion, Block as BlockT, Dispatchable, HashingFor},
BuildStorage, FixedU128, Permill,
},
traits::{GetCallMetadata, OnInitialize},
Expand All @@ -18,7 +18,7 @@ use hex_literal::hex;
use hydradx_runtime::{evm::WETH_ASSET_LOCATION, Referrals, RuntimeOrigin};
pub use hydradx_traits::{evm::InspectEvmAccounts, registry::Mutate};
use pallet_referrals::{FeeDistribution, Level};
pub use polkadot_primitives::v6::{BlockNumber, MAX_CODE_SIZE, MAX_POV_SIZE};
pub use polkadot_primitives::v7::{BlockNumber, MAX_CODE_SIZE, MAX_POV_SIZE};
use polkadot_runtime_parachains::configuration::HostConfiguration;
use sp_consensus_beefy::ecdsa_crypto::AuthorityId as BeefyId;
use sp_core::storage::Storage;
Expand Down Expand Up @@ -120,7 +120,7 @@ decl_test_networks! {
}

decl_test_relay_chains! {
#[api_version(10)]
#[api_version(11)]
pub struct RococoRelayChain {
genesis = rococo::genesis(),
on_init = {
Expand Down Expand Up @@ -259,8 +259,6 @@ pub mod rococo {
max_code_size: MAX_CODE_SIZE,
max_pov_size: MAX_POV_SIZE,
max_head_data_size: 32 * 1024,
group_rotation_frequency: 20,
paras_availability_period: 4,
max_upward_queue_count: 8,
max_upward_queue_size: 1024 * 1024,
max_downward_message_size: 1024,
Expand Down Expand Up @@ -378,7 +376,7 @@ pub mod rococo {
},
babe: rococo_runtime::BabeConfig {
authorities: Default::default(),
epoch_config: Some(rococo_runtime::BABE_GENESIS_EPOCH_CONFIG),
epoch_config: rococo_runtime::BABE_GENESIS_EPOCH_CONFIG,
..Default::default()
},
..Default::default()
Expand Down Expand Up @@ -767,7 +765,7 @@ pub fn rococo_run_to_block(to: BlockNumber) {

pub fn hydra_live_ext(
path_to_snapshot: &str,
) -> frame_remote_externalities::RemoteExternalities<hydradx_runtime::Block> {
) -> frame_remote_externalities::RemoteExternalities<HashingFor<hydradx_runtime::Block>> {
let ext = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
Expand Down
4 changes: 2 additions & 2 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,8 @@ hydra-dx-build-script-utils = { workspace = true }

[dependencies]
codec = { package = "parity-scale-codec", version = "3.4.0" }
hex-literal = "0.3.4"
jsonrpsee = { version = "0.20.3", features = ["server", "macros"] }
hex-literal = { workspace = true }
jsonrpsee = { version = "0.22.5", features = ["server", "macros"] }
log = "0.4.17"
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0.85"
Expand Down
11 changes: 3 additions & 8 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use crate::cli::{Cli, RelayChainCli, Subcommand};
use crate::service::new_partial;

use codec::Encode;
use cumulus_client_service::storage_proof_size::HostFunctions as ReclaimHostFunctions;
use cumulus_primitives_core::ParaId;
use frame_benchmarking_cli::{BenchmarkCmd, SUBSTRATE_REFERENCE_HARDWARE};
use hydradx_runtime::Block;
Expand All @@ -28,7 +29,6 @@ use sc_cli::{
ChainSpec, CliConfiguration, DefaultConfigurationValues, ImportParams, KeystoreParams, NetworkParams, Result,
RuntimeVersion, SharedParams, SubstrateCli,
};
use sc_executor::sp_wasm_interface::ExtendedHostFunctions;
use sc_service::config::{BasePath, PrometheusConfig};
use sp_core::hexdisplay::HexDisplay;
use sp_runtime::{
Expand Down Expand Up @@ -194,12 +194,7 @@ pub fn run() -> sc_cli::Result<()> {
match cmd {
BenchmarkCmd::Pallet(cmd) => {
if cfg!(feature = "runtime-benchmarks") {
runner.sync_run(|config| {
cmd.run::<Block, ExtendedHostFunctions<
sp_io::SubstrateHostFunctions,
frame_benchmarking::benchmarking::HostFunctions,
>>(config)
})
runner.sync_run(|config| cmd.run_with_spec::<sp_runtime::traits::HashingFor<Block>, ReclaimHostFunctions>(Some(config.chain_spec)))
} else {
Err("Benchmarking wasn't enabled when building the node. \
You can enable it with `--features runtime-benchmarks`."
Expand Down Expand Up @@ -296,7 +291,7 @@ pub fn run() -> sc_cli::Result<()> {
let id = ParaId::from(para_id);

let parachain_account =
AccountIdConversion::<polkadot_primitives::v6::AccountId>::into_account_truncating(&id);
AccountIdConversion::<polkadot_primitives::v7::AccountId>::into_account_truncating(&id);

let state_version = Cli::runtime_version().state_version();

Expand Down
38 changes: 20 additions & 18 deletions node/src/rpc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,16 @@ use cumulus_primitives_core::PersistedValidationData;
use cumulus_primitives_parachain_inherent::ParachainInherentData;
use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder;
use fc_db::kv::Backend as FrontierBackend;
pub use fc_rpc::{
EthBlockDataCacheTask, OverrideHandle, RuntimeApiStorageOverride, SchemaV1Override, SchemaV2Override,
SchemaV3Override, StorageOverride,
};
use fc_rpc_core::types::CallRequest;
pub use fc_rpc::{EthBlockDataCacheTask, StorageOverride, StorageOverrideHandler};
use fc_rpc_core::types::TransactionRequest;
pub use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool};
use fp_rpc::{ConvertTransaction, ConvertTransactionRuntimeApi, EthereumRuntimeRPCApi};
use hydradx_runtime::{
opaque::{Block, Hash},
AccountId, Balance, Index,
};
use hydradx_runtime::{opaque::Block, AccountId, Balance, Index};
use sc_client_api::{
backend::{Backend, StateBackend, StorageProvider},
client::BlockchainEvents,
};
use sc_network::NetworkService;
use sc_network::service::traits::NetworkService;
use sc_network_sync::SyncingService;
use sc_rpc::SubscriptionTaskExecutor;
pub use sc_rpc_api::DenyUnsafe;
Expand All @@ -54,18 +48,26 @@ use sp_runtime::traits::{BlakeTwo256, Block as BlockT};
pub struct HydraDxEGA;

impl fc_rpc::EstimateGasAdapter for HydraDxEGA {
fn adapt_request(mut request: CallRequest) -> CallRequest {
fn adapt_request(mut request: TransactionRequest) -> TransactionRequest {
// Redirect any call to batch precompile:
// force usage of batchAll method for estimation
use sp_core::H160;
const BATCH_PRECOMPILE_ADDRESS: H160 = H160(hex_literal::hex!("0000000000000000000000000000000000000808"));
const BATCH_PRECOMPILE_BATCH_ALL_SELECTOR: [u8; 4] = hex_literal::hex!("96e292b8");
if request.to == Some(BATCH_PRECOMPILE_ADDRESS) {
if let Some(ref mut data) = request.data {
if data.0.len() >= 4 {
data.0[..4].copy_from_slice(&BATCH_PRECOMPILE_BATCH_ALL_SELECTOR);
match (&mut request.data.input, &mut request.data.data) {
(Some(ref mut input), _) => {
if input.0.len() >= 4 {
input.0[..4].copy_from_slice(&BATCH_PRECOMPILE_BATCH_ALL_SELECTOR);
}
}
(None, Some(ref mut data)) => {
if data.0.len() >= 4 {
data.0[..4].copy_from_slice(&BATCH_PRECOMPILE_BATCH_ALL_SELECTOR);
}
}
}
(_, _) => {}
};
}
request
}
Expand Down Expand Up @@ -109,13 +111,13 @@ pub struct Deps<C, P, A: ChainApi, CT> {
/// Whether to enable dev signer
pub enable_dev_signer: bool,
/// Network service
pub network: Arc<NetworkService<Block, Hash>>,
pub network: Arc<dyn NetworkService>,
/// Chain syncing service
pub sync: Arc<SyncingService<Block>>,
/// Frontier Backend.
pub frontier_backend: Arc<FrontierBackend<Block>>,
pub frontier_backend: Arc<FrontierBackend<Block, C>>,
/// Ethereum data access overrides.
pub overrides: Arc<OverrideHandle<Block>>,
pub overrides: Arc<dyn StorageOverride<Block>>,
/// Cache for Ethereum block data.
pub block_data_cache: Arc<EthBlockDataCacheTask<Block>>,
/// EthFilterApi pool.
Expand Down
Loading
Loading