diff --git a/Cargo.lock b/Cargo.lock index 877cce7a5..cb1ee7b67 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1197,7 +1197,7 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "beacon" version = "0.1.0" -source = "git+https://github.com/helium/proto?branch=master#5e38bab06b06321c3c13f42a6bb2382bbeaf2998" +source = "git+https://github.com/helium/proto?branch=master#e4b935efc2d6743d0506198d2208c49540762235" dependencies = [ "base64 0.21.0", "byteorder", @@ -1207,7 +1207,7 @@ dependencies = [ "rand_chacha 0.3.0", "rust_decimal", "serde", - "sha2 0.10.6", + "sha2 0.9.9", "thiserror", ] @@ -3047,7 +3047,7 @@ dependencies = [ [[package]] name = "helium-proto" version = "0.1.0" -source = "git+https://github.com/helium/proto?branch=master#5e38bab06b06321c3c13f42a6bb2382bbeaf2998" +source = "git+https://github.com/helium/proto?branch=master#e4b935efc2d6743d0506198d2208c49540762235" dependencies = [ "bytes", "prost", @@ -4166,6 +4166,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "base64 0.21.0", "chrono", "clap 4.4.8", @@ -4198,6 +4199,7 @@ dependencies = [ "sqlx", "thiserror", "tokio", + "tokio-stream", "tonic", "tracing", "tracing-subscriber", @@ -8330,7 +8332,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "sha2 0.10.6", + "sha2 0.9.9", "thiserror", "twox-hash", "xorf", diff --git a/Cargo.toml b/Cargo.toml index acc156d5a..2828939f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,7 +78,7 @@ rust_decimal_macros = "1" base64 = ">=0.21" sha2 = "0.10" tonic = {version = "0", features = ["tls", "tls-roots"]} -http = "0" +http = "<=0.2" triggered = "0" futures = "*" futures-util = "*" diff --git a/file_store/src/cli/dump.rs b/file_store/src/cli/dump.rs index b03b82094..6f9f0e494 100644 --- a/file_store/src/cli/dump.rs +++ b/file_store/src/cli/dump.rs @@ -63,6 +63,7 @@ impl Cmd { "operation_mode": msg.report.operation_mode, "location_validation_timestamp": msg.report.location_validation_timestamp, }); + // print_json(&msg)?; print_json(&json)?; } FileType::CellSpeedtest => { @@ -204,7 +205,6 @@ impl Cmd { print_json(&json!({ "cbsd_id": heartbeat.cbsd_id, "pub_key": PublicKey::try_from(heartbeat.pub_key)?, - "reward_multiplier": heartbeat.reward_multiplier, "timestamp": heartbeat.timestamp, "cell_type": heartbeat.cell_type, "validity": heartbeat.validity, @@ -226,6 +226,14 @@ impl Cmd { "subscriber_id": reward.subscriber_id, "discovery_location_amount": reward.discovery_location_amount, }))?, + Some(Reward::ServiceProviderReward(reward)) => print_json(&json!({ + "service_provider": reward.service_provider_id, + "amount": reward.amount, + }))?, + Some(Reward::UnallocatedReward(reward)) => print_json(&json!({ + "unallocated_reward_type": reward.reward_type, + "amount": reward.amount, + }))?, _ => (), } } diff --git a/file_store/src/file_sink.rs b/file_store/src/file_sink.rs index 1dc6eea54..773f53425 100644 --- a/file_store/src/file_sink.rs +++ b/file_store/src/file_sink.rs @@ -165,8 +165,8 @@ impl FileSinkBuilder { #[derive(Debug, Clone)] pub struct FileSinkClient { - sender: MessageSender, - metric: &'static str, + pub sender: MessageSender, + pub metric: &'static str, } const OK_LABEL: Label = Label::from_static_parts("status", "ok"); @@ -536,7 +536,7 @@ impl FileSink { } } -fn file_name(path_buf: &Path) -> Result { +pub fn file_name(path_buf: &Path) -> Result { path_buf .file_name() .map(|os_str| os_str.to_string_lossy().to_string()) diff --git a/file_store/src/heartbeat.rs b/file_store/src/heartbeat.rs index 11510c39f..f61834dfc 100644 --- a/file_store/src/heartbeat.rs +++ b/file_store/src/heartbeat.rs @@ -95,7 +95,6 @@ pub mod cli { pub struct ValidatedHeartbeat { pub cbsd_id: String, pub pub_key: PublicKeyBinary, - pub reward_multiplier: f32, pub timestamp: DateTime, pub cell_type: CellType, pub validity: HeartbeatValidity, @@ -113,7 +112,6 @@ pub mod cli { Ok(Self { cbsd_id: v.cbsd_id.clone(), pub_key: v.pub_key.clone().into(), - reward_multiplier: v.reward_multiplier, timestamp: Utc .timestamp_opt(v.timestamp as i64, 0) .single() diff --git a/file_store/src/traits/msg_verify.rs b/file_store/src/traits/msg_verify.rs index ec578c932..9eacfd810 100644 --- a/file_store/src/traits/msg_verify.rs +++ b/file_store/src/traits/msg_verify.rs @@ -79,6 +79,8 @@ impl_msg_verify!(mobile_config::AuthorizationListReqV1, signature); impl_msg_verify!(mobile_config::AuthorizationListResV1, signature); impl_msg_verify!(mobile_config::EntityVerifyReqV1, signature); impl_msg_verify!(mobile_config::EntityVerifyResV1, signature); +impl_msg_verify!(mobile_config::CarrierKeyToEntityReqV1, signature); +impl_msg_verify!(mobile_config::CarrierKeyToEntityResV1, signature); impl_msg_verify!(mobile_config::GatewayInfoReqV1, signature); impl_msg_verify!(mobile_config::GatewayInfoStreamReqV1, signature); impl_msg_verify!(mobile_config::GatewayInfoResV1, signature); diff --git a/iot_config/src/route_service.rs b/iot_config/src/route_service.rs index 751779375..a0b8262e3 100644 --- a/iot_config/src/route_service.rs +++ b/iot_config/src/route_service.rs @@ -381,7 +381,7 @@ impl iot_config::Route for RouteService { self.verify_stream_request_signature(&signer, &request)?; let since = Utc - .timestamp_millis_opt(request.since as i64) + .timestamp_opt(request.since as i64, 0) .single() .ok_or_else(|| Status::invalid_argument("unable to parse since timestamp"))?; diff --git a/iot_config/tests/route_service.rs b/iot_config/tests/route_service.rs index c65556c10..8deca131e 100644 --- a/iot_config/tests/route_service.rs +++ b/iot_config/tests/route_service.rs @@ -168,9 +168,9 @@ async fn stream_only_sends_data_modified_since(pool: Pool) { ) .await; - tokio::time::sleep(std::time::Duration::from_millis(1)).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let since = Utc::now(); - tokio::time::sleep(std::time::Duration::from_millis(1)).await; + tokio::time::sleep(std::time::Duration::from_secs(1)).await; let route2 = create_route(&mut client, &org, &admin_keypair).await; @@ -195,7 +195,7 @@ async fn stream_only_sends_data_modified_since(pool: Pool) { let response = client .stream(route_stream_req_v1( &client_keypair, - since.timestamp_millis() as u64, + since.timestamp() as u64, )) .await .expect("stream request"); @@ -331,7 +331,7 @@ where fn route_stream_req_v1(signer: &Keypair, since: u64) -> RouteStreamReqV1 { let mut request = RouteStreamReqV1 { - timestamp: Utc::now().timestamp_millis() as u64, + timestamp: Utc::now().timestamp() as u64, signer: signer.public_key().to_vec(), since, signature: vec![], @@ -408,7 +408,7 @@ async fn create_org(port: u64, admin_keypair: &Keypair) -> proto::OrgResV1 { owner: generate_keypair().public_key().to_vec(), payer: generate_keypair().public_key().to_vec(), devaddrs: 8, - timestamp: Utc::now().timestamp_millis() as u64, + timestamp: Utc::now().timestamp() as u64, signature: vec![], delegate_keys: vec![], signer: admin_keypair.public_key().into(), @@ -455,7 +455,7 @@ async fn create_route( locked: false, ignore_empty_skf: true, }), - timestamp: Utc::now().timestamp_millis() as u64, + timestamp: Utc::now().timestamp() as u64, signature: vec![], signer: signing_keypair.public_key().into(), }; @@ -493,7 +493,7 @@ async fn create_euis( let mut request = proto::RouteUpdateEuisReqV1 { action: proto::ActionV1::Add as i32, eui_pair: Some(pair), - timestamp: Utc::now().timestamp_millis() as u64, + timestamp: Utc::now().timestamp() as u64, signature: vec![], signer: signing_keypair.public_key().into(), }; @@ -528,7 +528,7 @@ async fn create_devaddr_ranges( let mut request = proto::RouteUpdateDevaddrRangesReqV1 { action: proto::ActionV1::Add as i32, devaddr_range: Some(range), - timestamp: Utc::now().timestamp_millis() as u64, + timestamp: Utc::now().timestamp() as u64, signature: vec![], signer: signing_keypair.public_key().into(), }; @@ -570,7 +570,7 @@ async fn create_skf( let mut request = proto::RouteSkfUpdateReqV1 { route_id: route.id.clone(), updates, - timestamp: Utc::now().timestamp_millis() as u64, + timestamp: Utc::now().timestamp() as u64, signature: vec![], signer: signing_keypair.public_key().into(), }; diff --git a/iot_verifier/src/poc.rs b/iot_verifier/src/poc.rs index 5976268e6..2bee5171f 100644 --- a/iot_verifier/src/poc.rs +++ b/iot_verifier/src/poc.rs @@ -52,6 +52,11 @@ lazy_static! { /// from density scaling calculations and not finding a value on subsequent lookups /// would disqualify the hotspot from validating further beacons static ref DEFAULT_TX_SCALE: Decimal = Decimal::new(2000, 4); + /// max permitted lag between the first witness and all subsequent witnesses + static ref MAX_WITNESS_LAG: Duration = Duration::milliseconds(1500); + /// max permitted lag between the beaconer and a witness + static ref MAX_BEACON_TO_WITNESS_LAG: Duration = Duration::milliseconds(4000); + } #[derive(Debug, PartialEq)] pub struct InvalidResponse { @@ -190,43 +195,47 @@ impl Poc { let mut failed_witnesses: Vec = Vec::new(); let mut existing_gateways: Vec = Vec::new(); let witnesses = self.witness_reports.clone(); - for witness_report in witnesses { - // have we already processed a witness report from this gateway ? - // if not, run verifications - // if so, skip verifications and declare the report a dup - if !existing_gateways.contains(&witness_report.report.pub_key) { - // not a dup, run the verifications - match self - .verify_witness( - deny_list, - &witness_report, - beacon_info, - gateway_cache, - hex_density_map, - ) - .await - { - Ok(verified_witness) => { - // track which gateways we have saw a witness report from - existing_gateways.push(verified_witness.report.pub_key.clone()); - verified_witnesses.push(verified_witness) + if !witnesses.is_empty() { + let witness_earliest_received_ts = witnesses[0].received_timestamp; + for witness_report in witnesses { + // have we already processed a witness report from this gateway ? + // if not, run verifications + // if so, skip verifications and declare the report a dup + if !existing_gateways.contains(&witness_report.report.pub_key) { + // not a dup, run the verifications + match self + .verify_witness( + deny_list, + &witness_report, + beacon_info, + gateway_cache, + hex_density_map, + witness_earliest_received_ts, + ) + .await + { + Ok(verified_witness) => { + // track which gateways we have saw a witness report from + existing_gateways.push(verified_witness.report.pub_key.clone()); + verified_witnesses.push(verified_witness) + } + Err(_) => failed_witnesses.push(witness_report), } - Err(_) => failed_witnesses.push(witness_report), + } else { + // the report is a dup + let dup_witness = IotVerifiedWitnessReport::invalid( + InvalidReason::Duplicate, + None, + &witness_report.report, + witness_report.received_timestamp, + None, + // if location is None, default gain and elevation to zero + 0, + 0, + InvalidParticipantSide::Witness, + ); + verified_witnesses.push(dup_witness) } - } else { - // the report is a dup - let dup_witness = IotVerifiedWitnessReport::invalid( - InvalidReason::Duplicate, - None, - &witness_report.report, - witness_report.received_timestamp, - None, - // if location is None, default gain and elevation to zero - 0, - 0, - InvalidParticipantSide::Witness, - ); - verified_witnesses.push(dup_witness) } } let resp = VerifyWitnessesResult { @@ -243,6 +252,7 @@ impl Poc { beaconer_info: &GatewayInfo, gateway_cache: &GatewayCache, hex_density_map: &HexDensityMap, + witness_first_ts: DateTime, ) -> Result> { let witness = &witness_report.report; let witness_pub_key = witness.pub_key.clone(); @@ -304,6 +314,7 @@ impl Poc { &witness_info, &self.beacon_report, beaconer_metadata, + witness_first_ts, ) { Ok(()) => { let tx_scale = hex_density_map @@ -378,6 +389,7 @@ pub fn do_beacon_verifications( Ok(()) } +#[allow(clippy::too_many_arguments)] pub fn do_witness_verifications( deny_list: &DenyList, entropy_start: DateTime, @@ -386,6 +398,7 @@ pub fn do_witness_verifications( witness_info: &GatewayInfo, beacon_report: &IotBeaconIngestReport, beaconer_metadata: &GatewayMetadata, + witness_first_ts: DateTime, ) -> GenericVerifyResult { tracing::debug!( "verifying witness from gateway: {:?}", @@ -416,6 +429,11 @@ pub fn do_witness_verifications( entropy_end, witness_report.received_timestamp, )?; + verify_witness_lag( + beacon_report.received_timestamp, + witness_first_ts, + witness_report.received_timestamp, + )?; verify_witness_data(&beacon_report.report.data, &witness_report.report.data)?; verify_gw_capability(witness_info.is_full_hotspot)?; verify_witness_freq( @@ -658,6 +676,38 @@ fn verify_gw_capability(is_full_hotspot: bool) -> GenericVerifyResult { Ok(()) } +/// verify witness lag +/// if the first received event is the beacon then, +/// all witnesses must be received within MAX_BEACON_TO_WITNESS_LAG of the beacon +/// if the first received event is a witness then, +/// all subsequent witnesses must be received within MAX_WITNESS_LAG of that first witness +fn verify_witness_lag( + beacon_received_ts: DateTime, + first_witness_ts: DateTime, + received_ts: DateTime, +) -> GenericVerifyResult { + let (first_event_ts, max_permitted_lag) = if beacon_received_ts <= first_witness_ts { + (beacon_received_ts, *MAX_BEACON_TO_WITNESS_LAG) + } else { + (first_witness_ts, *MAX_WITNESS_LAG) + }; + let this_witness_lag = received_ts - first_event_ts; + if this_witness_lag > max_permitted_lag { + tracing::debug!( + reason = ?InvalidReason::TooLate, + %received_ts, + %beacon_received_ts, + %first_witness_ts, + "witness verification failed" + ); + return Err(InvalidResponse { + reason: InvalidReason::TooLate, + details: None, + }); + } + Ok(()) +} + /// verify witness report is not in response to its own beacon fn verify_self_witness( beacon_pub_key: &PublicKeyBinary, @@ -1259,6 +1309,57 @@ mod tests { ); } + #[test] + fn test_verify_witness_lag() { + let now = Utc::now(); + // a beacon is received first and our test witness is within the acceptable lag from that beacon + assert!(verify_witness_lag( + now - Duration::seconds(60), + now - Duration::seconds(59), + now - Duration::seconds(58) + ) + .is_ok()); + // a witness is received first and our test witness is within the acceptable lag from that first witness + assert!(verify_witness_lag( + now - Duration::seconds(60), + now - Duration::seconds(64), + now - Duration::seconds(63) + ) + .is_ok()); + // a beacon is received first and our test witness is over the acceptable lag from that beacon + assert_eq!( + Err(InvalidResponse { + reason: InvalidReason::TooLate, + details: None + }), + verify_witness_lag( + now - Duration::seconds(60), + now - Duration::seconds(59), + now - Duration::seconds(55) + ) + ); + + // a witness is received first and our test witness is over the acceptable lag from that first witness + assert_eq!( + Err(InvalidResponse { + reason: InvalidReason::TooLate, + details: None + }), + verify_witness_lag( + now - Duration::seconds(55), + now - Duration::seconds(60), + now - Duration::seconds(58) + ) + ); + // a witness is received first and our test witness is that same first witness + assert!(verify_witness_lag( + now - Duration::seconds(55), + now - Duration::seconds(60), + now - Duration::seconds(60) + ) + .is_ok()); + } + #[test] fn test_verify_self_witness() { let key1 = PublicKeyBinary::from_str(PUBKEY1).unwrap(); @@ -1604,6 +1705,7 @@ mod tests { &witness_info, &beacon_report, &beaconer_metadata, + witness_report1.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1623,6 +1725,7 @@ mod tests { &witness_info, &beacon_report, &beaconer_metadata, + witness_report2.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1642,6 +1745,7 @@ mod tests { &witness_info, &beacon_report, &beaconer_metadata, + witness_report3.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1662,6 +1766,7 @@ mod tests { &witness_info4, &beacon_report, &beaconer_metadata, + witness_report4.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1681,6 +1786,7 @@ mod tests { &witness_info, &beacon_report, &beaconer_metadata, + witness_report5.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1701,6 +1807,7 @@ mod tests { &witness_info6, &beacon_report, &beaconer_metadata, + witness_report6.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1721,6 +1828,7 @@ mod tests { &witness_info7, &beacon_report, &beaconer_metadata, + witness_report7.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1741,6 +1849,7 @@ mod tests { &witness_info8, &beacon_report, &beaconer_metadata, + witness_report8.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1760,6 +1869,7 @@ mod tests { &witness_info, &beacon_report, &beaconer_metadata, + witness_report9.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1780,6 +1890,7 @@ mod tests { &witness_info10, &beacon_report, &beaconer_metadata, + witness_report10.received_timestamp, ); assert_eq!( Err(InvalidResponse { @@ -1789,9 +1900,10 @@ mod tests { resp10 ); - // for completeness, confirm our valid witness report is sane + // test witness lag from first received event let witness_report11 = valid_witness_report(PUBKEY2, entropy_start + Duration::minutes(2)); let witness_info11 = witness_gateway_info(Some(LOC4), ProtoRegion::Eu868, true); + let resp11 = do_witness_verifications( &deny_list, entropy_start, @@ -1800,8 +1912,30 @@ mod tests { &witness_info11, &beacon_report, &beaconer_metadata, + witness_report11.received_timestamp - Duration::milliseconds(6000), + ); + assert_eq!( + Err(InvalidResponse { + reason: InvalidReason::TooLate, + details: None + }), + resp11 + ); + + // for completeness, confirm our valid witness report is sane + let witness_report12 = valid_witness_report(PUBKEY2, entropy_start + Duration::minutes(2)); + let witness_info12 = witness_gateway_info(Some(LOC4), ProtoRegion::Eu868, true); + let resp12 = do_witness_verifications( + &deny_list, + entropy_start, + entropy_end, + &witness_report12, + &witness_info12, + &beacon_report, + &beaconer_metadata, + witness_report12.received_timestamp, ); - assert_eq!(Ok(()), resp11); + assert_eq!(Ok(()), resp12); } fn beaconer_gateway_info( diff --git a/iot_verifier/src/reward_share.rs b/iot_verifier/src/reward_share.rs index 16639dfb1..5186fa990 100644 --- a/iot_verifier/src/reward_share.rs +++ b/iot_verifier/src/reward_share.rs @@ -18,13 +18,15 @@ const DEFAULT_PREC: u32 = 15; lazy_static! { // TODO: year 1 emissions allocate 30% of total to PoC with 6% to beacons and 24% to witnesses but subsequent years back // total PoC percentage off 1.5% each year; determine how beacons and witnesses will split the subsequent years' allocations - static ref REWARDS_PER_DAY: Decimal = (Decimal::from(32_500_000_000_u64) / Decimal::from(366)) * Decimal::from(1_000_000); // 88_797_814_207_650.273224043715847 + pub static ref REWARDS_PER_DAY: Decimal = (Decimal::from(32_500_000_000_u64) / Decimal::from(366)) * Decimal::from(1_000_000); // 88_797_814_207_650.273224043715847 static ref BEACON_REWARDS_PER_DAY_PERCENT: Decimal = dec!(0.06); static ref WITNESS_REWARDS_PER_DAY_PERCENT: Decimal = dec!(0.24); // Data transfer is allocated 50% of daily rewards static ref DATA_TRANSFER_REWARDS_PER_DAY_PERCENT: Decimal = dec!(0.50); // Operations fund is allocated 7% of daily rewards static ref OPERATIONS_REWARDS_PER_DAY_PERCENT: Decimal = dec!(0.07); + // Oracles fund is allocated 7% of daily rewards + static ref ORACLES_REWARDS_PER_DAY_PERCENT: Decimal = dec!(0.07); // dc remainer distributed at ration of 4:1 in favour of witnesses // ie WITNESS_REWARDS_PER_DAY_PERCENT:BEACON_REWARDS_PER_DAY_PERCENT static ref WITNESS_DC_REMAINER_PERCENT: Decimal = dec!(0.80); @@ -32,13 +34,13 @@ lazy_static! { static ref DC_USD_PRICE: Decimal = dec!(0.00001); } -fn get_tokens_by_duration(tokens: Decimal, duration: Duration) -> Decimal { +pub fn get_tokens_by_duration(tokens: Decimal, duration: Duration) -> Decimal { ((tokens / Decimal::from(Duration::hours(24).num_seconds())) * Decimal::from(duration.num_seconds())) .round_dp_with_strategy(DEFAULT_PREC, RoundingStrategy::MidpointNearestEven) } -fn get_scheduled_poc_tokens( +pub fn get_scheduled_poc_tokens( duration: Duration, dc_transfer_remainder: Decimal, ) -> (Decimal, Decimal) { @@ -52,21 +54,25 @@ fn get_scheduled_poc_tokens( ) } -fn get_scheduled_dc_tokens(duration: Duration) -> Decimal { +pub fn get_scheduled_dc_tokens(duration: Duration) -> Decimal { get_tokens_by_duration( *REWARDS_PER_DAY * *DATA_TRANSFER_REWARDS_PER_DAY_PERCENT, duration, ) } -fn get_scheduled_ops_fund_tokens(duration: Duration) -> u64 { +pub fn get_scheduled_ops_fund_tokens(duration: Duration) -> Decimal { get_tokens_by_duration( *REWARDS_PER_DAY * *OPERATIONS_REWARDS_PER_DAY_PERCENT, duration, ) - .round_dp_with_strategy(0, RoundingStrategy::ToZero) - .to_u64() - .unwrap_or(0) +} + +pub fn get_scheduled_oracle_tokens(duration: Duration) -> Decimal { + get_tokens_by_duration( + *REWARDS_PER_DAY * *ORACLES_REWARDS_PER_DAY_PERCENT, + duration, + ) } #[derive(sqlx::FromRow)] @@ -213,21 +219,16 @@ impl RewardShares { } } +pub type GatewayRewardShares = HashMap; + #[derive(Default)] pub struct GatewayShares { - pub shares: HashMap, + pub shares: GatewayRewardShares, } impl GatewayShares { - pub async fn aggregate( - db: impl sqlx::PgExecutor<'_> + Copy, - reward_period: &Range>, - ) -> Result { - let mut shares = Self::default(); - // get all the shares, poc and dc - shares.aggregate_poc_shares(db, reward_period).await?; - shares.aggregate_dc_shares(db, reward_period).await?; - Ok(shares) + pub fn new(shares: GatewayRewardShares) -> anyhow::Result { + Ok(Self { shares }) } pub async fn clear_rewarded_shares( @@ -247,67 +248,58 @@ impl GatewayShares { .map(|_| ()) } - pub fn total_shares(&self) -> (Decimal, Decimal, Decimal) { - self.shares.iter().fold( - (Decimal::ZERO, Decimal::ZERO, Decimal::ZERO), - |(beacon_sum, witness_sum, dc_sum), (_, reward_shares)| { + pub fn into_iot_reward_shares( + self, + reward_period: &'_ Range>, + beacon_rewards_per_share: Decimal, + witness_rewards_per_share: Decimal, + dc_transfer_rewards_per_share: Decimal, + ) -> impl Iterator + '_ { + self.shares + .into_iter() + .map(move |(hotspot_key, reward_shares)| { + let beacon_amount = + compute_rewards(beacon_rewards_per_share, reward_shares.beacon_shares); + let witness_amount = + compute_rewards(witness_rewards_per_share, reward_shares.witness_shares); + let dc_transfer_amount = + compute_rewards(dc_transfer_rewards_per_share, reward_shares.dc_shares); + proto::GatewayReward { + hotspot_key: hotspot_key.into(), + beacon_amount, + witness_amount, + dc_transfer_amount, + } + }) + .filter(|reward_share| { + reward_share.beacon_amount > 0 + || reward_share.witness_amount > 0 + || reward_share.dc_transfer_amount > 0 + }) + .map(|gateway_reward| { + let total_gateway_reward = gateway_reward.dc_transfer_amount + + gateway_reward.beacon_amount + + gateway_reward.witness_amount; ( - beacon_sum + reward_shares.beacon_shares, - witness_sum + reward_shares.witness_shares, - dc_sum + reward_shares.dc_shares, + total_gateway_reward, + proto::IotRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::GatewayReward(gateway_reward)), + }, ) - }, - ) - } - - async fn aggregate_poc_shares( - &mut self, - db: impl sqlx::PgExecutor<'_> + Copy, - reward_period: &Range>, - ) -> Result<(), sqlx::Error> { - let mut rows = sqlx::query_as::<_, GatewayPocShare>( - "select * from gateway_shares where reward_timestamp > $1 and reward_timestamp <= $2", - ) - .bind(reward_period.start) - .bind(reward_period.end) - .fetch(db); - while let Some(gateway_share) = rows.try_next().await? { - self.shares - .entry(gateway_share.hotspot_key.clone()) - .or_default() - .add_poc_reward(&gateway_share) - } - Ok(()) - } - - async fn aggregate_dc_shares( - &mut self, - db: impl sqlx::PgExecutor<'_> + Copy, - reward_period: &Range>, - ) -> Result<(), sqlx::Error> { - let mut rows = sqlx::query_as::<_, GatewayDCShare>( - "select hotspot_key, reward_timestamp, num_dcs::numeric, id from gateway_dc_shares where reward_timestamp > $1 and reward_timestamp <= $2", - ) - .bind(reward_period.start) - .bind(reward_period.end) - .fetch(db); - while let Some(gateway_share) = rows.try_next().await? { - self.shares - .entry(gateway_share.hotspot_key.clone()) - .or_default() - .add_dc_reward(&gateway_share) - } - Ok(()) + }) } - pub fn into_iot_reward_shares( - self, + pub async fn calculate_rewards_per_share( + &self, reward_period: &'_ Range>, iot_price: Decimal, - ) -> impl Iterator + '_ { + ) -> anyhow::Result<(Decimal, Decimal, Decimal)> { // the total number of shares for beacons, witnesses and data transfer // dc shares here is the sum of all spent data transfer DC this epoch let (total_beacon_shares, total_witness_shares, total_dc_shares) = self.total_shares(); + // the total number of iot rewards for dc transfer this epoch let total_dc_transfer_rewards = get_scheduled_dc_tokens(reward_period.end - reward_period.start); @@ -329,14 +321,13 @@ impl GatewayShares { reward_period.end - reward_period.start, dc_transfer_rewards_unused, ); - // work out the rewards per share for beacons, witnesses and dc transfer let beacon_rewards_per_share = rewards_per_share(total_beacon_rewards, total_beacon_shares); let witness_rewards_per_share = rewards_per_share(total_witness_rewards, total_witness_shares); let dc_transfer_rewards_per_share = rewards_per_share(total_dc_transfer_rewards_capped, total_dc_shares); - // compute the awards per hotspot + tracing::info!( %total_dc_shares, %total_dc_transfer_rewards_used, @@ -344,48 +335,24 @@ impl GatewayShares { %dc_transfer_rewards_per_share, "data transfer rewards" ); - self.shares - .into_iter() - .map(move |(hotspot_key, reward_shares)| proto::GatewayReward { - hotspot_key: hotspot_key.into(), - beacon_amount: compute_rewards( - beacon_rewards_per_share, - reward_shares.beacon_shares, - ), - witness_amount: compute_rewards( - witness_rewards_per_share, - reward_shares.witness_shares, - ), - dc_transfer_amount: compute_rewards( - dc_transfer_rewards_per_share, - reward_shares.dc_shares, - ), - }) - .filter(|reward_share| { - reward_share.beacon_amount > 0 - || reward_share.witness_amount > 0 - || reward_share.dc_transfer_amount > 0 - }) - .map(|gateway_reward| proto::IotRewardShare { - start_period: reward_period.start.encode_timestamp(), - end_period: reward_period.end.encode_timestamp(), - reward: Some(ProtoReward::GatewayReward(gateway_reward)), - }) + Ok(( + beacon_rewards_per_share, + witness_rewards_per_share, + dc_transfer_rewards_per_share, + )) } -} - -pub mod operational_rewards { - use super::*; - pub fn compute(reward_period: &Range>) -> proto::IotRewardShare { - let op_fund_reward = proto::OperationalReward { - amount: get_scheduled_ops_fund_tokens(reward_period.end - reward_period.start), - }; - proto::IotRewardShare { - start_period: reward_period.start.encode_timestamp(), - end_period: reward_period.end.encode_timestamp(), - reward: Some(ProtoReward::OperationalReward(op_fund_reward)), - } + pub fn total_shares(&self) -> (Decimal, Decimal, Decimal) { + self.shares.iter().fold( + (Decimal::ZERO, Decimal::ZERO, Decimal::ZERO), + |(beacon_sum, witness_sum, dc_sum), (_, reward_shares)| { + ( + beacon_sum + reward_shares.beacon_shares, + witness_sum + reward_shares.witness_shares, + dc_sum + reward_shares.dc_shares, + ) + }, + ) } } @@ -446,9 +413,62 @@ fn compute_rewards(rewards_per_share: Decimal, shares: Decimal) -> u64 { .unwrap_or(0) } +pub async fn aggregate_reward_shares( + db: impl sqlx::PgExecutor<'_> + Copy, + reward_period: &Range>, +) -> Result { + let mut shares = GatewayRewardShares::default(); + aggregate_poc_shares(&mut shares, db, reward_period).await?; + aggregate_dc_shares(&mut shares, db, reward_period).await?; + Ok(shares) +} + +async fn aggregate_poc_shares( + // &mut self, + shares: &mut GatewayRewardShares, + db: impl sqlx::PgExecutor<'_> + Copy, + reward_period: &Range>, +) -> Result<(), sqlx::Error> { + let mut rows = sqlx::query_as::<_, GatewayPocShare>( + "select * from gateway_shares where reward_timestamp > $1 and reward_timestamp <= $2", + ) + .bind(reward_period.start) + .bind(reward_period.end) + .fetch(db); + while let Some(gateway_share) = rows.try_next().await? { + shares + .entry(gateway_share.hotspot_key.clone()) + .or_default() + .add_poc_reward(&gateway_share) + } + Ok(()) +} + +async fn aggregate_dc_shares( + // &mut self, + shares: &mut GatewayRewardShares, + db: impl sqlx::PgExecutor<'_> + Copy, + reward_period: &Range>, +) -> Result<(), sqlx::Error> { + let mut rows = sqlx::query_as::<_, GatewayDCShare>( + "select hotspot_key, reward_timestamp, num_dcs::numeric, id from gateway_dc_shares where reward_timestamp > $1 and reward_timestamp <= $2", + ) + .bind(reward_period.start) + .bind(reward_period.end) + .fetch(db); + while let Some(gateway_share) = rows.try_next().await? { + shares + .entry(gateway_share.hotspot_key.clone()) + .or_default() + .add_dc_reward(&gateway_share) + } + Ok(()) +} + #[cfg(test)] mod test { use super::*; + use crate::reward_share; fn reward_shares_in_dec( beacon_shares: Decimal, @@ -473,15 +493,18 @@ mod test { println!("total_tokens_for_period: {total_tokens_for_period}"); let operation_tokens_for_period = get_scheduled_ops_fund_tokens(epoch_duration); - assert_eq!(258_993_624_772, operation_tokens_for_period); + assert_eq!( + dec!(258_993_624_772.313296903460838), + operation_tokens_for_period + ); } - #[test] + #[tokio::test] // test reward distribution where there is a fixed dc spend per gateway // with the total dc spend across all gateways being significantly lower than the // total epoch dc rewards amount // this results in a significant redistribution of dc rewards to POC - fn test_reward_share_calculation_fixed_dc_spend_with_transfer_distribution() { + async fn test_reward_share_calculation_fixed_dc_spend_with_transfer_distribution() { let iot_price = dec!(359); let gw1: PublicKeyBinary = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6" .parse() @@ -552,17 +575,37 @@ mod test { reward_shares_in_dec(dec!(150), dec!(350), gw6_dc_spend), ); // 0.0150, 0.0350 - let gw_shares = GatewayShares { shares }; + let gw_shares = GatewayShares::new(shares).unwrap(); + let (beacon_rewards_per_share, witness_rewards_per_share, dc_transfer_rewards_per_share) = + gw_shares + .calculate_rewards_per_share(&reward_period, iot_price) + .await + .unwrap(); + + let (total_beacon_rewards, total_witness_rewards) = reward_share::get_scheduled_poc_tokens( + reward_period.end - reward_period.start, + dec!(0.0), + ); + let total_dc_rewards = + reward_share::get_scheduled_dc_tokens(reward_period.end - reward_period.start); + let total_poc_dc_reward_allocation = + total_beacon_rewards + total_witness_rewards + total_dc_rewards; + let mut rewards: HashMap = HashMap::new(); - let gw_reward_shares: Vec = gw_shares - .into_iot_reward_shares(&reward_period, iot_price) - .collect(); - for reward in gw_reward_shares { + let mut allocated_gateway_rewards = 0_u64; + for (reward_amount, reward) in gw_shares.into_iot_reward_shares( + &reward_period, + beacon_rewards_per_share, + witness_rewards_per_share, + dc_transfer_rewards_per_share, + ) { if let Some(ProtoReward::GatewayReward(gateway_reward)) = reward.reward { - rewards.insert( - gateway_reward.hotspot_key.clone().try_into().unwrap(), - gateway_reward, - ); + let gateway_reward_total = gateway_reward.beacon_amount + + gateway_reward.witness_amount + + gateway_reward.dc_transfer_amount; + rewards.insert(gateway_reward.hotspot_key.clone().into(), gateway_reward); + assert_eq!(reward_amount, gateway_reward_total); + allocated_gateway_rewards += reward_amount; } } @@ -652,16 +695,18 @@ mod test { let exp_sum_poc_tokens = exp_total_beacon_tokens + exp_total_witness_tokens; println!("max poc rewards: {exp_sum_poc_tokens}"); println!("total actual poc rewards distributed: {sum_poc_amounts}"); - let poc_diff = exp_sum_poc_tokens.to_i64().unwrap() - sum_poc_amounts as i64; - // the sum of rewards distributed should not exceed the epoch amount - // but due to rounding whilst going to u64 in compute_rewards, - // is permitted to be a few bones less - assert_eq!(poc_diff, 5); + + // confirm the unallocated poc reward/dc amounts + // we can loose up to 1 bone per gateway for each of beacon_amount, witness_amount and dc_amount + // due to going from decimal to u64 + let unallocated_poc_reward_amount = + total_poc_dc_reward_allocation - Decimal::from(allocated_gateway_rewards); + assert_eq!(unallocated_poc_reward_amount.to_u64().unwrap(), 6); } - #[test] + #[tokio::test] // test reward distribution where there is zero transfer of dc rewards to poc - fn test_reward_share_calculation_without_data_transfer_distribution() { + async fn test_reward_share_calculation_without_data_transfer_distribution() { let iot_price = dec!(359); let gw1: PublicKeyBinary = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6" .parse() @@ -736,17 +781,34 @@ mod test { reward_shares_in_dec(dec!(150), dec!(350), gw6_dc_spend), ); // 0.0150, 0.0350 - let gw_shares = GatewayShares { shares }; + let gw_shares = GatewayShares::new(shares).unwrap(); + let (beacon_rewards_per_share, witness_rewards_per_share, dc_transfer_rewards_per_share) = + gw_shares + .calculate_rewards_per_share(&reward_period, iot_price) + .await + .unwrap(); + + let (total_beacon_rewards, total_witness_rewards) = + get_scheduled_poc_tokens(reward_period.end - reward_period.start, dec!(0.0)); + let total_dc_rewards = get_scheduled_dc_tokens(reward_period.end - reward_period.start); + let total_poc_dc_reward_allocation = + total_beacon_rewards + total_witness_rewards + total_dc_rewards; + let mut rewards: HashMap = HashMap::new(); - let gw_reward_shares: Vec = gw_shares - .into_iot_reward_shares(&reward_period, iot_price) - .collect(); - for reward in gw_reward_shares { + let mut allocated_gateway_rewards = 0_u64; + for (reward_amount, reward) in gw_shares.into_iot_reward_shares( + &reward_period, + beacon_rewards_per_share, + witness_rewards_per_share, + dc_transfer_rewards_per_share, + ) { if let Some(ProtoReward::GatewayReward(gateway_reward)) = reward.reward { - rewards.insert( - gateway_reward.hotspot_key.clone().try_into().unwrap(), - gateway_reward, - ); + let gateway_reward_total = gateway_reward.beacon_amount + + gateway_reward.witness_amount + + gateway_reward.dc_transfer_amount; + rewards.insert(gateway_reward.hotspot_key.clone().into(), gateway_reward); + assert_eq!(reward_amount, gateway_reward_total); + allocated_gateway_rewards += reward_amount; } } @@ -828,16 +890,18 @@ mod test { let exp_sum_poc_tokens = exp_total_beacon_tokens + exp_total_witness_tokens; println!("max poc rewards: {exp_sum_poc_tokens}"); println!("total actual poc rewards distributed: {sum_poc_amounts}"); - let poc_diff = exp_sum_poc_tokens.to_i64().unwrap() - sum_poc_amounts as i64; - // the sum of rewards distributed should not exceed the epoch amount - // but due to rounding whilst going to u64 in compute_rewards, - // is permitted to be a few bones less - assert_eq!(poc_diff, 6); + + // confirm the unallocated poc reward/dc amounts + // we can loose up to 1 bone per gateway for each of beacon_amount, witness_amount and dc_amount + // due to going from decimal to u64 + let unallocated_poc_reward_amount = + total_poc_dc_reward_allocation - Decimal::from(allocated_gateway_rewards); + assert_eq!(unallocated_poc_reward_amount.to_u64().unwrap(), 8); } - #[test] + #[tokio::test] // test reward distribution where there is transfer of dc rewards to poc - fn test_reward_share_calculation_with_data_transfer_distribution() { + async fn test_reward_share_calculation_with_data_transfer_distribution() { let iot_price = dec!(359); let gw1: PublicKeyBinary = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6" .parse() @@ -904,17 +968,34 @@ mod test { reward_shares_in_dec(dec!(150), dec!(350), gw6_dc_spend), ); // 0.0150, 0.0350 - let gw_shares = GatewayShares { shares }; + let gw_shares = GatewayShares::new(shares).unwrap(); + let (beacon_rewards_per_share, witness_rewards_per_share, dc_transfer_rewards_per_share) = + gw_shares + .calculate_rewards_per_share(&reward_period, iot_price) + .await + .unwrap(); + + let (total_beacon_rewards, total_witness_rewards) = + get_scheduled_poc_tokens(reward_period.end - reward_period.start, dec!(0.0)); + let total_dc_rewards = get_scheduled_dc_tokens(reward_period.end - reward_period.start); + let total_poc_dc_reward_allocation = + total_beacon_rewards + total_witness_rewards + total_dc_rewards; + let mut rewards: HashMap = HashMap::new(); - let gw_reward_shares: Vec = gw_shares - .into_iot_reward_shares(&reward_period, iot_price) - .collect(); - for reward in gw_reward_shares { + let mut allocated_gateway_rewards = 0_u64; + for (reward_amount, reward) in gw_shares.into_iot_reward_shares( + &reward_period, + beacon_rewards_per_share, + witness_rewards_per_share, + dc_transfer_rewards_per_share, + ) { if let Some(ProtoReward::GatewayReward(gateway_reward)) = reward.reward { - rewards.insert( - gateway_reward.hotspot_key.clone().try_into().unwrap(), - gateway_reward, - ); + let gateway_reward_total = gateway_reward.beacon_amount + + gateway_reward.witness_amount + + gateway_reward.dc_transfer_amount; + rewards.insert(gateway_reward.hotspot_key.clone().into(), gateway_reward); + assert_eq!(reward_amount, gateway_reward_total); + allocated_gateway_rewards += reward_amount; } } @@ -994,11 +1075,13 @@ mod test { let exp_sum_poc_tokens = exp_total_beacon_tokens + exp_total_witness_tokens; println!("max poc rewards: {exp_sum_poc_tokens}"); println!("total actual poc rewards distributed: {sum_poc_amounts}"); - let poc_diff = exp_sum_poc_tokens.to_u64().unwrap() - sum_poc_amounts; - // the sum of rewards distributed should not exceed the epoch amount - // but due to rounding whilst going to u64 in compute_rewards, - // is permitted to be a few bones less - assert_eq!(poc_diff, 7); + + // confirm the unallocated poc reward/dc amounts + // we can loose up to 1 bone per gateway for each of beacon_amount, witness_amount and dc_amount + // due to going from decimal to u64 + let unallocated_poc_reward_amount = + total_poc_dc_reward_allocation - Decimal::from(allocated_gateway_rewards); + assert_eq!(unallocated_poc_reward_amount.to_u64().unwrap(), 7); } #[test] diff --git a/iot_verifier/src/rewarder.rs b/iot_verifier/src/rewarder.rs index fcd1002ff..1db4be290 100644 --- a/iot_verifier/src/rewarder.rs +++ b/iot_verifier/src/rewarder.rs @@ -1,15 +1,19 @@ use crate::{ - reward_share::{operational_rewards, GatewayShares}, + reward_share::{self, GatewayShares}, telemetry, }; use chrono::{DateTime, Duration, TimeZone, Utc}; use db_store::meta; use file_store::{file_sink, traits::TimestampEncode}; use futures::future::LocalBoxFuture; +use helium_proto::services::poc_lora as proto; +use helium_proto::services::poc_lora::iot_reward_share::Reward as ProtoReward; +use helium_proto::services::poc_lora::{UnallocatedReward, UnallocatedRewardType}; use helium_proto::RewardManifest; use price::PriceTracker; use reward_scheduler::Scheduler; use rust_decimal::prelude::*; +use rust_decimal_macros::dec; use sqlx::{PgExecutor, PgPool, Pool, Postgres}; use std::ops::Range; use task_manager::ManagedTask; @@ -107,29 +111,23 @@ impl Rewarder { scheduler: &Scheduler, iot_price: Decimal, ) -> anyhow::Result<()> { - let gateway_reward_shares = - GatewayShares::aggregate(&self.pool, &scheduler.reward_period).await?; - - for reward_share in - gateway_reward_shares.into_iot_reward_shares(&scheduler.reward_period, iot_price) - { - self.rewards_sink - .write(reward_share, []) - .await? - // Await the returned oneshot to ensure we wrote the file - .await??; - } + let reward_period = &scheduler.reward_period; - self.rewards_sink - .write(operational_rewards::compute(&scheduler.reward_period), []) - .await? - // Await the returned oneshot to ensure we wrote the file - .await??; + // process rewards for poc and dc + reward_poc_and_dc(&self.pool, &self.rewards_sink, reward_period, iot_price).await?; + // process rewards for the operational fund + reward_operational(&self.rewards_sink, reward_period).await?; + // process rewards for the oracle + reward_oracles(&self.rewards_sink, reward_period).await?; + + // commit the filesink let written_files = self.rewards_sink.commit().await?.await??; + // purge db let mut transaction = self.pool.begin().await?; // Clear gateway shares table period to end of reward period - GatewayShares::clear_rewarded_shares(&mut transaction, scheduler.reward_period.end).await?; + GatewayShares::clear_rewarded_shares(&mut transaction, scheduler.reward_period.start) + .await?; save_rewarded_timestamp( "last_rewarded_end_time", &scheduler.reward_period.end, @@ -206,6 +204,145 @@ impl Rewarder { } } +pub async fn reward_poc_and_dc( + pool: &Pool, + rewards_sink: &file_sink::FileSinkClient, + reward_period: &Range>, + iot_price: Decimal, +) -> anyhow::Result<()> { + let reward_shares = reward_share::aggregate_reward_shares(pool, reward_period).await?; + let gateway_shares = GatewayShares::new(reward_shares)?; + let (beacon_rewards_per_share, witness_rewards_per_share, dc_transfer_rewards_per_share) = + gateway_shares + .calculate_rewards_per_share(reward_period, iot_price) + .await?; + + // get the total poc and dc rewards for the period + let (total_beacon_rewards, total_witness_rewards) = + reward_share::get_scheduled_poc_tokens(reward_period.end - reward_period.start, dec!(0.0)); + let total_dc_rewards = + reward_share::get_scheduled_dc_tokens(reward_period.end - reward_period.start); + let total_poc_dc_reward_allocation = + total_beacon_rewards + total_witness_rewards + total_dc_rewards; + + let mut allocated_gateway_rewards = 0_u64; + for (gateway_reward_amount, reward_share) in gateway_shares.into_iot_reward_shares( + reward_period, + beacon_rewards_per_share, + witness_rewards_per_share, + dc_transfer_rewards_per_share, + ) { + rewards_sink + .write(reward_share, []) + .await? + // Await the returned oneshot to ensure we wrote the file + .await??; + allocated_gateway_rewards += gateway_reward_amount; + } + // write out any unallocated poc reward + let unallocated_poc_reward_amount = (total_poc_dc_reward_allocation + - Decimal::from(allocated_gateway_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + rewards_sink, + UnallocatedRewardType::Poc, + unallocated_poc_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +pub async fn reward_operational( + rewards_sink: &file_sink::FileSinkClient, + reward_period: &Range>, +) -> anyhow::Result<()> { + let total_operational_rewards = + reward_share::get_scheduled_ops_fund_tokens(reward_period.end - reward_period.start); + let allocated_operational_rewards = total_operational_rewards + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + let op_fund_reward = proto::OperationalReward { + amount: allocated_operational_rewards, + }; + rewards_sink + .write( + proto::IotRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::OperationalReward(op_fund_reward)), + }, + [], + ) + .await? + .await??; + // write out any unallocated operation rewards + // which for the operational fund can only relate to rounding issue + // in practice this should always be zero as there can be a max of + // one bone lost due to rounding when going from decimal to u64 + // but we run it anyway and if it is indeed zero nothing gets + // written out anyway + let unallocated_operation_reward_amount = (total_operational_rewards + - Decimal::from(allocated_operational_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + rewards_sink, + UnallocatedRewardType::Operation, + unallocated_operation_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +pub async fn reward_oracles( + rewards_sink: &file_sink::FileSinkClient, + reward_period: &Range>, +) -> anyhow::Result<()> { + // atm 100% of oracle rewards are assigned to 'unallocated' + let total_oracle_rewards = + reward_share::get_scheduled_oracle_tokens(reward_period.end - reward_period.start); + let allocated_oracle_rewards = 0_u64; + let unallocated_oracle_reward_amount = (total_oracle_rewards + - Decimal::from(allocated_oracle_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + rewards_sink, + UnallocatedRewardType::Oracle, + unallocated_oracle_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +async fn write_unallocated_reward( + rewards_sink: &file_sink::FileSinkClient, + unallocated_type: UnallocatedRewardType, + unallocated_amount: u64, + reward_period: &'_ Range>, +) -> anyhow::Result<()> { + if unallocated_amount > 0 { + let unallocated_reward = proto::IotRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::UnallocatedReward(UnallocatedReward { + reward_type: unallocated_type as i32, + amount: unallocated_amount, + })), + }; + rewards_sink.write(unallocated_reward, []).await?.await??; + }; + Ok(()) +} + pub async fn fetch_rewarded_timestamp( timestamp_key: &str, db: impl PgExecutor<'_>, diff --git a/iot_verifier/src/runner.rs b/iot_verifier/src/runner.rs index 6a2ea1220..48252de6a 100644 --- a/iot_verifier/src/runner.rs +++ b/iot_verifier/src/runner.rs @@ -690,7 +690,7 @@ mod tests { assert_eq!(1, included_witnesses.len()); assert_eq!( InvalidReason::Stale, - excluded_witnesses.get(0).unwrap().invalid_reason + excluded_witnesses.first().unwrap().invalid_reason ); assert_eq!( InvalidReason::Duplicate, diff --git a/iot_verifier/tests/common/mod.rs b/iot_verifier/tests/common/mod.rs index f6e36c36b..1e4558475 100644 --- a/iot_verifier/tests/common/mod.rs +++ b/iot_verifier/tests/common/mod.rs @@ -9,8 +9,9 @@ use file_store::{ use helium_crypto::PublicKeyBinary; use helium_proto::{ services::poc_lora::{ + iot_reward_share::Reward as IotReward, GatewayReward, IotRewardShare, LoraBeaconIngestReportV1, LoraInvalidBeaconReportV1, LoraInvalidWitnessReportV1, LoraPocV1, - LoraWitnessIngestReportV1, + LoraWitnessIngestReportV1, OperationalReward, UnallocatedReward, }, DataRate, Region as ProtoRegion, }; @@ -29,7 +30,8 @@ use std::{self, ops::DerefMut, str::FromStr}; use tokio::{sync::mpsc::error::TryRecvError, sync::Mutex, time::timeout}; pub fn create_file_sink() -> (FileSinkClient, MockFileSinkReceiver) { - let (tx, rx) = tokio::sync::mpsc::channel(5); + let (tx, rx) = tokio::sync::mpsc::channel(10); + ( FileSinkClient::new(tx, "metric"), MockFileSinkReceiver { receiver: rx }, @@ -42,15 +44,22 @@ pub struct MockFileSinkReceiver { #[allow(dead_code)] impl MockFileSinkReceiver { - pub async fn receive(&mut self) -> SinkMessage { + pub async fn receive(&mut self) -> Option> { match timeout(seconds(2), self.receiver.recv()).await { - Ok(Some(msg)) => msg, - Ok(None) => panic!("server closed connection while waiting for message"), - Err(_) => panic!("timeout while waiting for message"), + Ok(Some(SinkMessage::Data(on_write_tx, msg))) => { + let _ = on_write_tx.send(Ok(())); + Some(msg) + } + Ok(None) => None, + Err(e) => panic!("timeout while waiting for message1 {:?}", e), + Ok(Some(unexpected_msg)) => { + println!("ignoring unexpected msg {:?}", unexpected_msg); + None + } } } - pub fn assert_no_messages(mut self) { + pub fn assert_no_messages(&mut self) { let Err(TryRecvError::Empty) = self.receiver.try_recv() else { panic!("receiver should have been empty") }; @@ -58,26 +67,71 @@ impl MockFileSinkReceiver { pub async fn receive_valid_poc(&mut self) -> LoraPocV1 { match self.receive().await { - SinkMessage::Data(_, bytes) => { - LoraPocV1::decode(bytes.as_slice()).expect("decode beacon report") + Some(bytes) => { + LoraPocV1::decode(bytes.as_slice()).expect("failed to decode expected valid poc") } - _ => panic!("invalid beacon message"), + None => panic!("failed to receive valid poc"), } } pub async fn receive_invalid_beacon(&mut self) -> LoraInvalidBeaconReportV1 { match self.receive().await { - SinkMessage::Data(_, bytes) => LoraInvalidBeaconReportV1::decode(bytes.as_slice()) - .expect("decode invalid beacon report"), - _ => panic!("invalid beacon message"), + Some(bytes) => LoraInvalidBeaconReportV1::decode(bytes.as_slice()) + .expect("failed to decode expected invalid beacon report"), + None => panic!("failed to receive invalid beacon"), } } pub async fn receive_invalid_witness(&mut self) -> LoraInvalidWitnessReportV1 { match self.receive().await { - SinkMessage::Data(_, bytes) => LoraInvalidWitnessReportV1::decode(bytes.as_slice()) - .expect("decode invalid witness report"), - _ => panic!("invalid witness message"), + Some(bytes) => LoraInvalidWitnessReportV1::decode(bytes.as_slice()) + .expect("failed to decode expected invalid witness report"), + None => panic!("failed to receive invalid witness"), + } + } + + pub async fn receive_gateway_reward(&mut self) -> GatewayReward { + match self.receive().await { + Some(bytes) => { + let iot_reward = IotRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected gateway reward"); + println!("iot_reward: {:?}", iot_reward); + match iot_reward.reward { + Some(IotReward::GatewayReward(r)) => r, + _ => panic!("failed to get gateway reward"), + } + } + None => panic!("failed to receive gateway reward"), + } + } + + pub async fn receive_operational_reward(&mut self) -> OperationalReward { + match self.receive().await { + Some(bytes) => { + let iot_reward = IotRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected operational reward"); + println!("iot_reward: {:?}", iot_reward); + match iot_reward.reward { + Some(IotReward::OperationalReward(r)) => r, + _ => panic!("failed to get operational reward"), + } + } + None => panic!("failed to receive operational reward"), + } + } + + pub async fn receive_unallocated_reward(&mut self) -> UnallocatedReward { + match self.receive().await { + Some(bytes) => { + let iot_reward = IotRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected unallocated reward"); + println!("iot_reward: {:?}", iot_reward); + match iot_reward.reward { + Some(IotReward::UnallocatedReward(r)) => r, + _ => panic!("failed to get unallocated reward"), + } + } + None => panic!("failed to receive unallocated reward"), } } } @@ -86,6 +140,7 @@ fn seconds(s: u64) -> std::time::Duration { std::time::Duration::from_secs(s) } +#[allow(dead_code)] pub fn create_valid_beacon_report( pubkey: &str, received_timestamp: DateTime, @@ -353,6 +408,7 @@ pub const POC_DATA: [u8; 51] = [ 203, 122, 146, 49, 241, 156, 148, 74, 246, 68, 17, 8, 212, 48, 6, 152, 58, 221, 158, 186, 101, 37, 59, 135, 126, 18, 72, 244, 65, 174, ]; +#[allow(dead_code)] pub const ENTROPY_TIMESTAMP: i64 = 1677163710000; const EU868_PARAMS: &[u8] = &[ diff --git a/iot_verifier/tests/rewarder_operations.rs b/iot_verifier/tests/rewarder_operations.rs new file mode 100644 index 000000000..041158def --- /dev/null +++ b/iot_verifier/tests/rewarder_operations.rs @@ -0,0 +1,34 @@ +mod common; +use chrono::{Duration as ChronoDuration, Utc}; +use iot_verifier::{reward_share, rewarder}; +use rust_decimal::{prelude::ToPrimitive, Decimal, RoundingStrategy}; +use rust_decimal_macros::dec; + +#[tokio::test] +async fn test_operations() -> anyhow::Result<()> { + let (iot_rewards_client, mut iot_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + tokio::select!( + _ = rewarder::reward_operational(&iot_rewards_client, &epoch) => { println!("point 1")}, + ops_reward = iot_rewards.receive_operational_reward() => + { + println!("ops reward {:?}", ops_reward); + // confirm the total rewards allocated matches expectations + let expected_total = reward_share::get_scheduled_ops_fund_tokens(epoch.end - epoch.start) + .to_u64() + .unwrap(); + assert_eq!(ops_reward.amount, 6_215_846_994_535); + assert_eq!(ops_reward.amount, expected_total); + + // confirm the ops percentage amount matches expectations + let daily_total = *reward_share::REWARDS_PER_DAY; + let ops_percent = (Decimal::from(ops_reward.amount) / daily_total).round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(ops_percent, dec!(0.07)); + + // should be no further msgs + iot_rewards.assert_no_messages(); + }, + ); + Ok(()) +} diff --git a/iot_verifier/tests/rewarder_oracles.rs b/iot_verifier/tests/rewarder_oracles.rs new file mode 100644 index 000000000..e9822ee67 --- /dev/null +++ b/iot_verifier/tests/rewarder_oracles.rs @@ -0,0 +1,36 @@ +mod common; +use chrono::{Duration as ChronoDuration, Utc}; +use iot_verifier::{reward_share, rewarder}; +use rust_decimal::{prelude::ToPrimitive, Decimal, RoundingStrategy}; +use rust_decimal_macros::dec; +use sqlx::PgPool; + +#[sqlx::test] +async fn test_oracles(_pool: PgPool) -> anyhow::Result<()> { + let (iot_rewards_client, mut iot_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + tokio::select!( + _ = rewarder::reward_oracles(&iot_rewards_client, &epoch) => {}, + // oracles rewards are 100% unallocated atm + unallocated_oracle_reward = iot_rewards.receive_unallocated_reward() => + { + println!("unallocated oracles reward {:?}", unallocated_oracle_reward); + // confirm the total rewards matches expectations + let expected_total = reward_share::get_scheduled_oracle_tokens(epoch.end - epoch.start) + .to_u64() + .unwrap(); + assert_eq!(unallocated_oracle_reward.amount, 6_215_846_994_535); + assert_eq!(unallocated_oracle_reward.amount, expected_total); + + // confirm the ops percentage amount matches expectations + let daily_total = *reward_share::REWARDS_PER_DAY; + let oracle_percent = (Decimal::from(unallocated_oracle_reward.amount) / daily_total).round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(oracle_percent, dec!(0.07)); + + // should be no further msgs + iot_rewards.assert_no_messages(); + }, + ); + Ok(()) +} diff --git a/iot_verifier/tests/rewarder_poc_dc.rs b/iot_verifier/tests/rewarder_poc_dc.rs new file mode 100644 index 000000000..bfc450e8e --- /dev/null +++ b/iot_verifier/tests/rewarder_poc_dc.rs @@ -0,0 +1,199 @@ +mod common; +use crate::common::MockFileSinkReceiver; +use chrono::{DateTime, Duration as ChronoDuration, Utc}; +use helium_crypto::PublicKeyBinary; +use helium_proto::services::poc_lora::{GatewayReward, UnallocatedReward, UnallocatedRewardType}; +use iot_verifier::{ + poc_report::ReportType, + reward_share::{self, GatewayDCShare, GatewayPocShare}, + rewarder, +}; +use prost::Message; +use rust_decimal::{prelude::ToPrimitive, Decimal, RoundingStrategy}; +use rust_decimal_macros::dec; +use sqlx::{PgPool, Postgres, Transaction}; +use std::{self, str::FromStr}; + +const HOTSPOT_1: &str = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6"; +const HOTSPOT_2: &str = "11uJHS2YaEWJqgqC7yza9uvSmpv5FWoMQXiP8WbxBGgNUmifUJf"; +const HOTSPOT_3: &str = "11sctWiP9r5wDJVuDe1Th4XSL2vaawaLLSQF8f8iokAoMAJHxqp"; +const HOTSPOT_4: &str = "11eX55faMbqZB7jzN4p67m6w7ScPMH6ubnvCjCPLh72J49PaJEL"; + +#[sqlx::test] +async fn test_poc_and_dc_rewards(pool: PgPool) -> anyhow::Result<()> { + let (iot_rewards_client, mut iot_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + + // seed all the things + let mut txn = pool.clone().begin().await?; + seed_pocs(epoch.start, &mut txn).await?; + seed_dc(epoch.start, &mut txn).await?; + txn.commit().await?; + + // run rewards for poc and dc + tokio::select!( + _ = rewarder::reward_poc_and_dc(&pool, &iot_rewards_client, &epoch, dec!(0.0001)) => {}, + Ok((gateway_rewards, unallocated_poc_reward)) = receive_expected_rewards(&mut iot_rewards) => { + + // assert the gateway rewards + assert_eq!( + gateway_rewards[0].hotspot_key, + PublicKeyBinary::from_str(HOTSPOT_1).unwrap().as_ref() + ); + assert_eq!(gateway_rewards[0].beacon_amount, 1_775_956_284_153); + assert_eq!(gateway_rewards[0].witness_amount, 0); + assert_eq!(gateway_rewards[0].dc_transfer_amount, 14_799_635_701_275); + + assert_eq!( + gateway_rewards[1].hotspot_key, + PublicKeyBinary::from_str(HOTSPOT_2).unwrap().as_ref() + ); + assert_eq!(gateway_rewards[1].beacon_amount, 0); + assert_eq!(gateway_rewards[1].witness_amount, 8_524_590_163_934); + assert_eq!(gateway_rewards[1].dc_transfer_amount, 29_599_271_402_550); + // hotspot 2 should have double the dc rewards of hotspot 1 + assert_eq!( + gateway_rewards[1].dc_transfer_amount, + gateway_rewards[0].dc_transfer_amount * 2 + ); + + assert_eq!( + gateway_rewards[2].hotspot_key, + PublicKeyBinary::from_str(HOTSPOT_3).unwrap().as_ref() + ); + // hotspot 2 has double reward scale of hotspot 1 and thus double the beacon amount + assert_eq!(gateway_rewards[2].beacon_amount, 3_551_912_568_306); + assert_eq!( + gateway_rewards[2].beacon_amount, + gateway_rewards[0].beacon_amount * 2 + ); + assert_eq!(gateway_rewards[2].witness_amount, 0); + assert_eq!(gateway_rewards[2].dc_transfer_amount, 0); + + assert_eq!( + gateway_rewards[3].hotspot_key, + PublicKeyBinary::from_str(HOTSPOT_4).unwrap().as_ref() + ); + assert_eq!(gateway_rewards[3].beacon_amount, 0); + assert_eq!(gateway_rewards[3].witness_amount, 12_786_885_245_901); + assert_eq!(gateway_rewards[3].dc_transfer_amount, 0); + + // assert our unallocated reward + assert_eq!( + UnallocatedRewardType::Poc as i32, + unallocated_poc_reward.reward_type + ); + assert_eq!(1, unallocated_poc_reward.amount); + + // confirm the total rewards allocated matches expectations + let poc_sum: u64 = gateway_rewards + .iter() + .map(|r| r.beacon_amount + r.witness_amount) + .sum(); + let dc_sum: u64 = gateway_rewards.iter().map(|r| r.dc_transfer_amount).sum(); + let unallocated_sum: u64 = unallocated_poc_reward.amount; + + let expected_dc = reward_share::get_scheduled_dc_tokens(epoch.end - epoch.start); + let (expected_beacon_sum, expected_witness_sum) = + reward_share::get_scheduled_poc_tokens(epoch.end - epoch.start, expected_dc); + let expected_total = + expected_beacon_sum.to_u64().unwrap() + expected_witness_sum.to_u64().unwrap(); + assert_eq!(expected_total, poc_sum + dc_sum + unallocated_sum); + + // confirm the poc & dc percentage amount matches expectations + let daily_total = *reward_share::REWARDS_PER_DAY; + let poc_dc_percent = (Decimal::from(poc_sum + dc_sum + unallocated_sum) / daily_total).round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(poc_dc_percent, dec!(0.8)); + + } + ); + Ok(()) +} + +async fn receive_expected_rewards( + iot_rewards: &mut MockFileSinkReceiver, +) -> anyhow::Result<(Vec, UnallocatedReward)> { + // get the filestore outputs from rewards run + // we will have 3 gateway rewards and one unallocated reward + let gateway_reward1 = iot_rewards.receive_gateway_reward().await; + let gateway_reward2 = iot_rewards.receive_gateway_reward().await; + let gateway_reward3 = iot_rewards.receive_gateway_reward().await; + let gateway_reward4 = iot_rewards.receive_gateway_reward().await; + let unallocated_poc_reward = iot_rewards.receive_unallocated_reward().await; + // should be no further msgs + iot_rewards.assert_no_messages(); + + // ordering is not guaranteed, so stick the rewards into a vec and sort + let mut gateway_rewards = vec![ + gateway_reward1, + gateway_reward2, + gateway_reward3, + gateway_reward4, + ]; + gateway_rewards.sort_by(|a, b| b.hotspot_key.cmp(&a.hotspot_key)); + Ok((gateway_rewards, unallocated_poc_reward)) +} +async fn seed_pocs(ts: DateTime, txn: &mut Transaction<'_, Postgres>) -> anyhow::Result<()> { + let poc_beacon_1 = GatewayPocShare { + hotspot_key: HOTSPOT_1.to_string().parse().unwrap(), + reward_type: ReportType::Beacon, + reward_timestamp: ts + ChronoDuration::hours(1), + hex_scale: dec!(1.0), + reward_unit: dec!(1.0), + poc_id: "poc_id_1".to_string().encode_to_vec(), + }; + + let poc_witness_1 = GatewayPocShare { + hotspot_key: HOTSPOT_2.to_string().parse().unwrap(), + reward_type: ReportType::Witness, + reward_timestamp: ts + ChronoDuration::hours(1), + hex_scale: dec!(1.0), + reward_unit: dec!(1.0), + poc_id: "poc_id_1".to_string().encode_to_vec(), + }; + + let poc_beacon_2 = GatewayPocShare { + hotspot_key: HOTSPOT_3.to_string().parse().unwrap(), + reward_type: ReportType::Beacon, + reward_timestamp: ts + ChronoDuration::hours(1), + hex_scale: dec!(1.0), + reward_unit: dec!(2.0), + poc_id: "poc_id_2".to_string().encode_to_vec(), + }; + + let poc_witness_2 = GatewayPocShare { + hotspot_key: HOTSPOT_4.to_string().parse().unwrap(), + reward_type: ReportType::Witness, + reward_timestamp: ts + ChronoDuration::hours(1), + hex_scale: dec!(1.0), + reward_unit: dec!(1.5), + poc_id: "poc_id_2".to_string().encode_to_vec(), + }; + poc_beacon_1.save(txn).await?; + poc_witness_1.save(txn).await?; + + poc_beacon_2.save(txn).await?; + poc_witness_2.save(txn).await?; + Ok(()) +} + +async fn seed_dc(ts: DateTime, txn: &mut Transaction<'_, Postgres>) -> anyhow::Result<()> { + let dc_share_1 = GatewayDCShare { + hotspot_key: HOTSPOT_1.to_string().parse().unwrap(), + reward_timestamp: ts + ChronoDuration::hours(1), + num_dcs: dec!(1000), + id: "dc_id_1".to_string().encode_to_vec(), + }; + + let dc_share_2 = GatewayDCShare { + hotspot_key: HOTSPOT_2.to_string().parse().unwrap(), + reward_timestamp: ts + ChronoDuration::hours(1), + num_dcs: dec!(2000), + id: "dc_id_2".to_string().encode_to_vec(), + }; + + dc_share_1.save(txn).await?; + dc_share_2.save(txn).await?; + Ok(()) +} diff --git a/iot_verifier/tests/runner_tests.rs b/iot_verifier/tests/runner_tests.rs index 0c7334548..1de1598b5 100644 --- a/iot_verifier/tests/runner_tests.rs +++ b/iot_verifier/tests/runner_tests.rs @@ -164,6 +164,34 @@ async fn valid_beacon_and_witness(pool: PgPool) -> anyhow::Result<()> { Ok(()) } +#[sqlx::test] +async fn valid_beacon_and_no_witness(pool: PgPool) -> anyhow::Result<()> { + let mut ctx = TestContext::setup(pool.clone()).await?; + + // test with a valid beacon and no witnesses + let beacon_to_inject = common::create_valid_beacon_report(common::BEACONER1, ctx.entropy_ts); + common::inject_beacon_report(pool.clone(), beacon_to_inject.clone()).await?; + ctx.runner.handle_db_tick().await?; + + let valid_poc = ctx.valid_pocs.receive_valid_poc().await; + assert_eq!(0, valid_poc.selected_witnesses.len()); + assert_eq!(0, valid_poc.unselected_witnesses.len()); + let valid_beacon = valid_poc.beacon_report.unwrap().report.clone().unwrap(); + // assert the pubkeys in the outputted reports + // match those which we injected + assert_eq!( + PublicKeyBinary::from(valid_beacon.pub_key.clone()), + PublicKeyBinary::from_str(common::BEACONER1).unwrap() + ); + // assert the beacon report outputted to filestore + // is unmodified from that submitted + assert_eq!( + valid_beacon, + LoraBeaconReportReqV1::from(beacon_to_inject.clone()) + ); + Ok(()) +} + #[sqlx::test] async fn invalid_beacon_gateway_not_found(pool: PgPool) -> anyhow::Result<()> { let mut ctx = TestContext::setup(pool.clone()).await?; @@ -362,7 +390,7 @@ async fn invalid_beacon_gateway_not_found_no_witnesses(pool: PgPool) -> anyhow:: #[sqlx::test] async fn invalid_beacon_bad_payload(pool: PgPool) -> anyhow::Result<()> { - let ctx = TestContext::setup(pool.clone()).await?; + let mut ctx = TestContext::setup(pool.clone()).await?; // // test with an invalid beacon, no witnesses // the beacon will have an invalid payload, resulting in an error diff --git a/mobile_config/migrations/5_carrier_service.sql b/mobile_config/migrations/5_carrier_service.sql new file mode 100644 index 000000000..a70fbc99a --- /dev/null +++ b/mobile_config/migrations/5_carrier_service.sql @@ -0,0 +1,6 @@ +create table carrier_keys ( + pubkey text primary key not null, + entity_key text not null, + created_at timestamptz not null default now(), + updated_at timestamptz not null default now() +); diff --git a/mobile_config/src/carrier_service.rs b/mobile_config/src/carrier_service.rs new file mode 100644 index 000000000..22feb6af2 --- /dev/null +++ b/mobile_config/src/carrier_service.rs @@ -0,0 +1,78 @@ +use crate::{key_cache::KeyCache, telemetry, verify_public_key, GrpcResult}; +use chrono::Utc; +use file_store::traits::{MsgVerify, TimestampEncode}; +use helium_crypto::{Keypair, PublicKey, Sign}; +use helium_proto::{ + services::mobile_config::{self, CarrierKeyToEntityReqV1, CarrierKeyToEntityResV1}, + Message, +}; +use sqlx::{Pool, Postgres}; +use tonic::{Request, Response, Status}; + +pub struct CarrierService { + key_cache: KeyCache, + pool: Pool, + signing_key: Keypair, +} + +impl CarrierService { + pub fn new(key_cache: KeyCache, pool: Pool, signing_key: Keypair) -> Self { + Self { + key_cache, + pool, + signing_key, + } + } + + fn verify_request_signature(&self, signer: &PublicKey, request: &R) -> Result<(), Status> + where + R: MsgVerify, + { + if self.key_cache.verify_signature(signer, request).is_ok() { + tracing::info!(signer = signer.to_string(), "request authorized"); + return Ok(()); + } + Err(Status::permission_denied("unauthorized request signature")) + } + + fn sign_response(&self, response: &[u8]) -> Result, Status> { + self.signing_key + .sign(response) + .map_err(|_| Status::internal("response signing error")) + } + + async fn key_to_entity(&self, pubkey: &String) -> Result { + let entity_key = sqlx::query_scalar::<_, String>( + " select entity_key from carrier_keys where pubkey = $1 ", + ) + .bind(pubkey) + .fetch_one(&self.pool) + .await + .map_err(|_| Status::internal("carrier entity key not found"))?; + Ok(entity_key) + } +} + +#[tonic::async_trait] +impl mobile_config::CarrierService for CarrierService { + async fn key_to_entity( + &self, + request: Request, + ) -> GrpcResult { + let request = request.into_inner(); + telemetry::count_request("carrier_service", "key_to_entity"); + + let signer = verify_public_key(&request.signer)?; + self.verify_request_signature(&signer, &request)?; + + let entity_key = self.key_to_entity(&request.pubkey).await?; + let mut response = CarrierKeyToEntityResV1 { + entity_key, + timestamp: Utc::now().encode_timestamp(), + signer: self.signing_key.public_key().into(), + signature: vec![], + }; + response.signature = self.sign_response(&response.encode_to_vec())?; + Ok(Response::new(response)) + } +} diff --git a/mobile_config/src/client/carrier_service_client.rs b/mobile_config/src/client/carrier_service_client.rs new file mode 100644 index 000000000..672612595 --- /dev/null +++ b/mobile_config/src/client/carrier_service_client.rs @@ -0,0 +1,84 @@ +use super::{call_with_retry, ClientError, Settings, CACHE_EVICTION_FREQUENCY}; +use async_trait::async_trait; +use file_store::traits::MsgVerify; +use helium_crypto::{Keypair, PublicKey, Sign}; +use helium_proto::{ + services::{mobile_config, Channel}, + Message, ServiceProvider, +}; +use retainer::Cache; +use std::{str::FromStr, sync::Arc, time::Duration}; +#[async_trait] +pub trait CarrierServiceVerifier { + type Error; + async fn payer_key_to_service_provider<'a>( + &self, + payer: &str, + ) -> Result; +} +#[derive(Clone)] +pub struct CarrierServiceClient { + client: mobile_config::CarrierServiceClient, + signing_key: Arc, + config_pubkey: PublicKey, + cache: Arc>, + cache_ttl: Duration, +} + +#[async_trait] +impl CarrierServiceVerifier for CarrierServiceClient { + type Error = ClientError; + + async fn payer_key_to_service_provider<'a>( + &self, + payer: &str, + ) -> Result { + if let Some(carrier_found) = self.cache.get(&payer.to_string()).await { + return Ok(*carrier_found.value()); + } + + let mut request = mobile_config::CarrierKeyToEntityReqV1 { + pubkey: payer.to_string(), + signer: self.signing_key.public_key().into(), + signature: vec![], + }; + request.signature = self.signing_key.sign(&request.encode_to_vec())?; + tracing::debug!(?payer, "getting service provider for payer key"); + let response = match call_with_retry!(self.client.clone().key_to_entity(request.clone())) { + Ok(verify_res) => { + let response = verify_res.into_inner(); + response.verify(&self.config_pubkey)?; + ServiceProvider::from_str(&response.entity_key) + .map_err(|_| ClientError::UnknownServiceProvider(payer.to_string()))? + } + Err(status) if status.code() == tonic::Code::NotFound => { + Err(ClientError::UnknownServiceProvider(payer.to_string()))? + } + Err(status) => Err(status)?, + }; + self.cache + .insert(payer.to_string(), response, self.cache_ttl) + .await; + Ok(response) + } +} + +impl CarrierServiceClient { + pub fn from_settings(settings: &Settings) -> Result> { + let cache = Arc::new(Cache::new()); + let cloned_cache = cache.clone(); + tokio::spawn(async move { + cloned_cache + .monitor(4, 0.25, CACHE_EVICTION_FREQUENCY) + .await + }); + + Ok(Self { + client: settings.connect_carrier_service_client(), + signing_key: settings.signing_keypair()?, + config_pubkey: settings.config_pubkey()?, + cache_ttl: settings.cache_ttl(), + cache, + }) + } +} diff --git a/mobile_config/src/client/mod.rs b/mobile_config/src/client/mod.rs index 81eb20d38..68699dd0d 100644 --- a/mobile_config/src/client/mod.rs +++ b/mobile_config/src/client/mod.rs @@ -1,4 +1,5 @@ pub mod authorization_client; +pub mod carrier_service_client; pub mod entity_client; pub mod gateway_client; mod settings; @@ -6,6 +7,7 @@ mod settings; use std::time::Duration; pub use authorization_client::AuthorizationClient; +pub use carrier_service_client::CarrierServiceClient; pub use entity_client::EntityClient; pub use gateway_client::GatewayClient; pub use settings::Settings; @@ -22,6 +24,8 @@ pub enum ClientError { VerificationError(#[from] file_store::Error), #[error("error parsing gateway location {0}")] LocationParseError(#[from] std::num::ParseIntError), + #[error("unknown service provider {0}")] + UnknownServiceProvider(String), } macro_rules! call_with_retry { diff --git a/mobile_config/src/client/settings.rs b/mobile_config/src/client/settings.rs index bebaeffaf..2bebc3f0b 100644 --- a/mobile_config/src/client/settings.rs +++ b/mobile_config/src/client/settings.rs @@ -56,6 +56,11 @@ impl Settings { mobile_config::EntityClient::new(channel) } + pub fn connect_carrier_service_client(&self) -> mobile_config::CarrierServiceClient { + let channel = connect_channel(self); + mobile_config::CarrierServiceClient::new(channel) + } + pub fn signing_keypair( &self, ) -> Result, Box> { diff --git a/mobile_config/src/lib.rs b/mobile_config/src/lib.rs index 017e14829..ebd48dd69 100644 --- a/mobile_config/src/lib.rs +++ b/mobile_config/src/lib.rs @@ -6,6 +6,7 @@ use tonic::{Response, Status}; pub mod admin_service; pub mod authorization_service; +pub mod carrier_service; pub mod client; pub mod entity_service; pub mod gateway_info; diff --git a/mobile_config/src/main.rs b/mobile_config/src/main.rs index 50e9d8d79..4a6ff6052 100644 --- a/mobile_config/src/main.rs +++ b/mobile_config/src/main.rs @@ -3,12 +3,12 @@ use clap::Parser; use futures::future::LocalBoxFuture; use futures_util::TryFutureExt; use helium_proto::services::mobile_config::{ - AdminServer, AuthorizationServer, EntityServer, GatewayServer, + AdminServer, AuthorizationServer, CarrierServiceServer, EntityServer, GatewayServer, }; use mobile_config::{ admin_service::AdminService, authorization_service::AuthorizationService, - entity_service::EntityService, gateway_service::GatewayService, key_cache::KeyCache, - settings::Settings, + carrier_service::CarrierService, entity_service::EntityService, + gateway_service::GatewayService, key_cache::KeyCache, settings::Settings, }; use std::{net::SocketAddr, path::PathBuf, time::Duration}; use task_manager::{ManagedTask, TaskManager}; @@ -86,6 +86,8 @@ impl Daemon { metadata_pool.clone(), settings.signing_keypair()?, ); + let carrier_svc = + CarrierService::new(key_cache.clone(), pool.clone(), settings.signing_keypair()?); let grpc_server = GrpcServer { listen_addr, @@ -93,6 +95,7 @@ impl Daemon { gateway_svc, auth_svc, entity_svc, + carrier_svc, }; TaskManager::builder().add_task(grpc_server).start().await @@ -105,6 +108,7 @@ pub struct GrpcServer { gateway_svc: GatewayService, auth_svc: AuthorizationService, entity_svc: EntityService, + carrier_svc: CarrierService, } impl ManagedTask for GrpcServer { @@ -121,6 +125,7 @@ impl ManagedTask for GrpcServer { .add_service(GatewayServer::new(self.gateway_svc)) .add_service(AuthorizationServer::new(self.auth_svc)) .add_service(EntityServer::new(self.entity_svc)) + .add_service(CarrierServiceServer::new(self.carrier_svc)) .serve_with_shutdown(self.listen_addr, shutdown) .map_err(Error::from) .await diff --git a/mobile_config_cli/src/client.rs b/mobile_config_cli/src/client.rs index d218a5a5e..97d0d3284 100644 --- a/mobile_config_cli/src/client.rs +++ b/mobile_config_cli/src/client.rs @@ -127,7 +127,7 @@ impl AuthClient { keypair: &Keypair, ) -> Result> { let mut request = AuthorizationListReqV1 { - role: role.try_into()?, + role: role.into(), signer: keypair.public_key().into(), signature: vec![], }; diff --git a/mobile_verifier/Cargo.toml b/mobile_verifier/Cargo.toml index f07b40d30..30020ba2c 100644 --- a/mobile_verifier/Cargo.toml +++ b/mobile_verifier/Cargo.toml @@ -34,6 +34,7 @@ humantime = {workspace = true} rust_decimal = {workspace = true} rust_decimal_macros = {workspace = true} tonic = {workspace = true} +tokio-stream = { workspace = true } metrics = {workspace = true} metrics-exporter-prometheus = {workspace = true} mobile-config = {path = "../mobile_config"} @@ -46,3 +47,6 @@ rand = {workspace = true} async-trait = {workspace = true} retainer = {workspace = true} uuid = {workspace = true} + +[dev-dependencies] +backon = "0" diff --git a/mobile_verifier/migrations/24_location_trust_multiplier.sql b/mobile_verifier/migrations/24_location_trust_multiplier.sql new file mode 100644 index 000000000..f80bd4397 --- /dev/null +++ b/mobile_verifier/migrations/24_location_trust_multiplier.sql @@ -0,0 +1,18 @@ +ALTER TABLE wifi_heartbeats ADD COLUMN location_trust_score_multiplier DECIMAL; + +UPDATE wifi_heartbeats SET location_trust_score_multiplier = + CASE WHEN location_validation_timestamp IS NULL THEN + 0.25 + WHEN distance_to_asserted > 100 THEN + 0.25 + ELSE + 1.0 + END; + +ALTER TABLE wifi_heartbeats ALTER COLUMN location_trust_score_multiplier SET NOT NULL; + +ALTER TABLE cbrs_heartbeats ADD COLUMN location_trust_score_multiplier DECIMAL; + +UPDATE cbrs_heartbeats SET location_trust_score_multiplier = 1.0; + +ALTER TABLE cbrs_heartbeats ALTER COLUMN location_trust_score_multiplier SET NOT NULL; diff --git a/mobile_verifier/src/cli/reward_from_db.rs b/mobile_verifier/src/cli/reward_from_db.rs index bd77e5f1e..e484abfef 100644 --- a/mobile_verifier/src/cli/reward_from_db.rs +++ b/mobile_verifier/src/cli/reward_from_db.rs @@ -1,6 +1,6 @@ use crate::{ heartbeats::HeartbeatReward, - reward_shares::{get_scheduled_tokens_for_poc_and_dc, CoveragePoints}, + reward_shares::{get_scheduled_tokens_for_poc, CoveragePoints}, speedtests_average::SpeedtestAverages, Settings, }; @@ -30,14 +30,12 @@ impl Cmd { tracing::info!("Rewarding shares from the following time range: {start} to {end}"); let epoch = start..end; - let expected_rewards = get_scheduled_tokens_for_poc_and_dc(epoch.end - epoch.start); + let expected_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); let (shutdown_trigger, _shutdown_listener) = triggered::trigger(); let pool = settings.database.connect(env!("CARGO_PKG_NAME")).await?; - let heartbeats = - HeartbeatReward::validated(&pool, &epoch, settings.max_asserted_distance_deviation) - .await?; + let heartbeats = HeartbeatReward::validated(&pool, &epoch); let speedtest_averages = SpeedtestAverages::aggregate_epoch_averages(epoch.end, &pool).await?; let reward_shares = @@ -48,7 +46,7 @@ impl Cmd { let radio_rewards = reward_shares .into_rewards(Decimal::ZERO, &epoch) .ok_or(anyhow::anyhow!("no rewardable events"))?; - for reward in radio_rewards { + for (_reward_amount, reward) in radio_rewards { if let Some(proto::mobile_reward_share::Reward::RadioReward(proto::RadioReward { hotspot_key, poc_reward, diff --git a/mobile_verifier/src/cli/server.rs b/mobile_verifier/src/cli/server.rs index 2a3c5d7fd..054b072d1 100644 --- a/mobile_verifier/src/cli/server.rs +++ b/mobile_verifier/src/cli/server.rs @@ -15,7 +15,9 @@ use file_store::{ FileType, }; use futures_util::TryFutureExt; -use mobile_config::client::{entity_client::EntityClient, AuthorizationClient, GatewayClient}; +use mobile_config::client::{ + entity_client::EntityClient, AuthorizationClient, CarrierServiceClient, GatewayClient, +}; use price::PriceTracker; use tokio::signal; @@ -53,6 +55,7 @@ impl Cmd { let gateway_client = GatewayClient::from_settings(&settings.config_client)?; let auth_client = AuthorizationClient::from_settings(&settings.config_client)?; let entity_client = EntityClient::from_settings(&settings.config_client)?; + let carrier_client = CarrierServiceClient::from_settings(&settings.config_client)?; // price tracker let (price_tracker, tracker_process) = @@ -111,6 +114,7 @@ impl Cmd { gateway_client.clone(), cbrs_heartbeats, settings.modeled_coverage_start(), + settings.max_asserted_distance_deviation, valid_heartbeats.clone(), seniority_updates.clone(), ); @@ -120,6 +124,7 @@ impl Cmd { gateway_client.clone(), wifi_heartbeats, settings.modeled_coverage_start(), + settings.max_asserted_distance_deviation, valid_heartbeats, seniority_updates, ); @@ -218,12 +223,12 @@ impl Cmd { let rewarder = Rewarder::new( pool.clone(), + carrier_client, Duration::hours(reward_period_hours), Duration::minutes(settings.reward_offset_minutes), mobile_rewards, reward_manifests, price_tracker, - settings.max_asserted_distance_deviation, ); // subscriber location diff --git a/mobile_verifier/src/data_session.rs b/mobile_verifier/src/data_session.rs index aef0e4bcb..3f4a24679 100644 --- a/mobile_verifier/src/data_session.rs +++ b/mobile_verifier/src/data_session.rs @@ -5,7 +5,9 @@ use futures::{ TryFutureExt, }; use helium_crypto::PublicKeyBinary; -use sqlx::{PgPool, Postgres, Transaction}; +use helium_proto::ServiceProvider; +use rust_decimal::Decimal; +use sqlx::{PgPool, Postgres, Row, Transaction}; use std::{collections::HashMap, ops::Range, time::Instant}; use tokio::sync::mpsc::Receiver; @@ -13,7 +15,19 @@ pub struct DataSessionIngestor { pub pool: PgPool, } -pub type HotspotMap = HashMap; +#[derive(Default)] +pub struct HotspotReward { + pub rewardable_bytes: u64, + pub rewardable_dc: u64, +} + +#[derive(Clone, Debug)] +pub struct ServiceProviderDataSession { + pub service_provider: ServiceProvider, + pub total_dcs: Decimal, +} + +pub type HotspotMap = HashMap; impl DataSessionIngestor { pub fn new(pool: sqlx::Pool) -> Self { @@ -108,6 +122,7 @@ impl HotspotDataSession { .await?; Ok(()) } + fn from_valid_data_session( v: ValidDataTransferSession, received_timestamp: DateTime, @@ -140,13 +155,40 @@ pub async fn aggregate_hotspot_data_sessions_to_dc<'a>( data_sessions_to_dc(stream).await } +pub async fn sum_data_sessions_to_dc_by_payer<'a>( + exec: impl sqlx::PgExecutor<'a> + Copy + 'a, + epoch: &'a Range>, +) -> Result, sqlx::Error> { + Ok(sqlx::query( + r#" + SELECT payer as sp, sum(num_dcs)::bigint as total_dcs + FROM hotspot_data_transfer_sessions + WHERE received_timestamp >= $1 and received_timestamp < $2 + GROUP BY payer + "#, + ) + .bind(epoch.start) + .bind(epoch.end) + .fetch_all(exec) + .await? + .iter() + .map(|row| { + let sp = row.get::("sp"); + let dcs: u64 = row.get::("total_dcs") as u64; + (sp, dcs) + }) + .collect::>()) +} + pub async fn data_sessions_to_dc<'a>( stream: impl Stream>, ) -> Result { tokio::pin!(stream); let mut map = HotspotMap::new(); while let Some(session) = stream.try_next().await? { - *map.entry(session.pub_key).or_default() += session.num_dcs as u64 + let rewards = map.entry(session.pub_key).or_default(); + rewards.rewardable_dc += session.num_dcs as u64; + rewards.rewardable_bytes += session.upload_bytes as u64 + session.download_bytes as u64; } Ok(map) } diff --git a/mobile_verifier/src/heartbeats/cbrs.rs b/mobile_verifier/src/heartbeats/cbrs.rs index 923a53ea1..45f0ab327 100644 --- a/mobile_verifier/src/heartbeats/cbrs.rs +++ b/mobile_verifier/src/heartbeats/cbrs.rs @@ -22,6 +22,7 @@ pub struct HeartbeatDaemon { gateway_info_resolver: GIR, heartbeats: Receiver>, modeled_coverage_start: DateTime, + max_distance_to_asserted: u32, heartbeat_sink: FileSinkClient, seniority_sink: FileSinkClient, } @@ -35,6 +36,7 @@ where gateway_info_resolver: GIR, heartbeats: Receiver>, modeled_coverage_start: DateTime, + max_distance_to_asserted: u32, heartbeat_sink: FileSinkClient, seniority_sink: FileSinkClient, ) -> Self { @@ -43,6 +45,7 @@ where gateway_info_resolver, heartbeats, modeled_coverage_start, + max_distance_to_asserted, heartbeat_sink, seniority_sink, } @@ -94,7 +97,7 @@ where async fn process_file( &self, file: FileInfoStream, - heartbeat_cache: &Arc), ()>>, + heartbeat_cache: &Cache<(String, DateTime), ()>, coverage_claim_time_cache: &CoverageClaimTimeCache, coverage_objects: &CoverageObjects, ) -> anyhow::Result<()> { @@ -102,21 +105,16 @@ where let mut transaction = self.pool.begin().await?; let epoch = (file.file_info.timestamp - Duration::hours(3)) ..(file.file_info.timestamp + Duration::minutes(30)); - let heartbeat_cache_clone = heartbeat_cache.clone(); let heartbeats = file .into_stream(&mut transaction) .await? - .map(Heartbeat::from) - .filter(move |h| { - let hb_cache = heartbeat_cache_clone.clone(); - let id = h.id().unwrap(); - async move { hb_cache.get(&id).await.is_none() } - }); + .map(Heartbeat::from); process_validated_heartbeats( ValidatedHeartbeat::validate_heartbeats( &self.gateway_info_resolver, heartbeats, coverage_objects, + self.max_distance_to_asserted, &epoch, ), heartbeat_cache, diff --git a/mobile_verifier/src/heartbeats/mod.rs b/mobile_verifier/src/heartbeats/mod.rs index 7608d060b..c7fd15a3f 100644 --- a/mobile_verifier/src/heartbeats/mod.rs +++ b/mobile_verifier/src/heartbeats/mod.rs @@ -12,14 +12,15 @@ use file_store::{ file_sink::FileSinkClient, heartbeat::CbrsHeartbeatIngestReport, wifi_heartbeat::WifiHeartbeatIngestReport, }; -use futures::stream::{Stream, StreamExt, TryStreamExt}; +use futures::stream::{Stream, StreamExt}; use h3o::{CellIndex, LatLng}; use helium_crypto::PublicKeyBinary; use helium_proto::services::poc_mobile as proto; use retainer::Cache; -use rust_decimal::Decimal; +use rust_decimal::{prelude::ToPrimitive, Decimal}; +use rust_decimal_macros::dec; use sqlx::{postgres::PgTypeInfo, Decode, Encode, Postgres, Transaction, Type}; -use std::{collections::HashMap, ops::Range, pin::pin, time}; +use std::{ops::Range, pin::pin, time}; use uuid::Uuid; /// Minimum number of heartbeats required to give a reward to the hotspot. @@ -262,28 +263,14 @@ impl From for Heartbeat { } } -#[derive(Debug, Clone, sqlx::FromRow)] -pub struct HeartbeatRow { - pub hotspot_key: PublicKeyBinary, - // cell hb only - pub cbsd_id: Option, - pub cell_type: CellType, - // wifi hb only - pub location_validation_timestamp: Option>, - pub distance_to_asserted: Option, - pub coverage_object: Uuid, - pub latest_timestamp: DateTime, -} - -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, sqlx::FromRow)] pub struct HeartbeatReward { pub hotspot_key: PublicKeyBinary, - pub cell_type: CellType, // cell hb only pub cbsd_id: Option, + pub cell_type: CellType, pub location_trust_score_multiplier: Decimal, pub coverage_object: Uuid, - pub latest_timestamp: DateTime, } impl HeartbeatReward { @@ -309,69 +296,15 @@ impl HeartbeatReward { self.location_trust_score_multiplier } - pub async fn validated<'a>( + pub fn validated<'a>( exec: impl sqlx::PgExecutor<'a> + Copy + 'a, epoch: &'a Range>, - max_distance_to_asserted: u32, - ) -> anyhow::Result + 'a> { - let heartbeat_rows = - sqlx::query_as::<_, HeartbeatRow>(include_str!("valid_heartbeats.sql")) - .bind(epoch.start) - .bind(epoch.end) - .bind(MINIMUM_HEARTBEAT_COUNT) - .fetch(exec) - .try_fold( - HashMap::<(PublicKeyBinary, Option), Vec>::new(), - |mut map, row| async move { - map.entry((row.hotspot_key.clone(), row.cbsd_id.clone())) - .or_default() - .push(row); - - Ok(map) - }, - ) - .await?; - - Ok( - futures::stream::iter(heartbeat_rows).map(move |((hotspot_key, cbsd_id), rows)| { - let first = rows.first().unwrap(); - let average_location_trust_score = rows - .iter() - .map(|row| { - row.cell_type.location_weight( - row.location_validation_timestamp, - row.distance_to_asserted, - max_distance_to_asserted, - ) - }) - .sum::() - / Decimal::new(rows.len() as i64, 0); - - HeartbeatReward { - hotspot_key, - cell_type: first.cell_type, - cbsd_id, - location_trust_score_multiplier: average_location_trust_score, - coverage_object: first.coverage_object, - latest_timestamp: first.latest_timestamp, - } - }), - ) - } - - pub fn from_heartbeat_row(value: HeartbeatRow, max_distance_to_asserted: u32) -> Self { - Self { - hotspot_key: value.hotspot_key, - cell_type: value.cell_type, - cbsd_id: value.cbsd_id, - location_trust_score_multiplier: value.cell_type.location_weight( - value.location_validation_timestamp, - value.distance_to_asserted, - max_distance_to_asserted, - ), - coverage_object: value.coverage_object, - latest_timestamp: value.latest_timestamp, - } + ) -> impl Stream> + 'a { + sqlx::query_as::<_, HeartbeatReward>(include_str!("valid_radios.sql")) + .bind(epoch.start) + .bind(epoch.end) + .bind(MINIMUM_HEARTBEAT_COUNT) + .fetch(exec) } } @@ -379,6 +312,7 @@ impl HeartbeatReward { pub struct ValidatedHeartbeat { pub heartbeat: Heartbeat, pub cell_type: CellType, + pub location_trust_score_multiplier: Decimal, pub distance_to_asserted: Option, pub coverage_summary: Option, pub validity: proto::HeartbeatValidity, @@ -393,24 +327,182 @@ impl ValidatedHeartbeat { self.heartbeat.timestamp.duration_trunc(Duration::hours(1)) } + pub fn new( + heartbeat: Heartbeat, + cell_type: CellType, + location_trust_score_multiplier: Decimal, + distance_to_asserted: Option, + coverage_summary: Option, + validity: proto::HeartbeatValidity, + ) -> Self { + Self { + heartbeat, + cell_type, + location_trust_score_multiplier, + distance_to_asserted, + coverage_summary, + validity, + } + } + + /// Validate a heartbeat in the given epoch. + pub async fn validate( + heartbeat: Heartbeat, + gateway_info_resolver: &impl GatewayResolver, + coverage_cache: &CoverageObjects, + max_distance_to_asserted: u32, + epoch: &Range>, + ) -> anyhow::Result { + let Some(coverage_object) = heartbeat.coverage_object else { + return Ok(Self::new( + heartbeat, + CellType::CellTypeNone, + dec!(0), + None, + None, + proto::HeartbeatValidity::BadCoverageObject, + )); + }; + + let Some(coverage_summary) = coverage_cache + .coverage_summary(&coverage_object, heartbeat.key()) + .await? + else { + return Ok(Self::new( + heartbeat, + CellType::CellTypeNone, + dec!(0), + None, + None, + proto::HeartbeatValidity::NoSuchCoverageObject, + )); + }; + + let cell_type = match heartbeat.hb_type { + HbType::Cbrs => match heartbeat.cbsd_id.as_ref() { + Some(cbsd_id) => match CellType::from_cbsd_id(cbsd_id) { + Some(ty) => ty, + _ => { + return Ok(Self::new( + heartbeat, + CellType::CellTypeNone, + dec!(0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::BadCbsdId, + )); + } + }, + None => { + return Ok(Self::new( + heartbeat, + CellType::CellTypeNone, + dec!(0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::BadCbsdId, + )); + } + }, + HbType::Wifi => { + if coverage_summary.indoor { + CellType::NovaGenericWifiIndoor + } else { + CellType::NovaGenericWifiOutdoor + } + } + }; + + if !heartbeat.operation_mode { + return Ok(Self::new( + heartbeat, + cell_type, + dec!(0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::NotOperational, + )); + } + + if !epoch.contains(&heartbeat.timestamp) { + return Ok(Self::new( + heartbeat, + cell_type, + dec!(0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::HeartbeatOutsideRange, + )); + } + + match gateway_info_resolver + .resolve_gateway(&heartbeat.hotspot_key) + .await? + { + GatewayResolution::GatewayNotFound => Ok(Self::new( + heartbeat, + cell_type, + dec!(0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::GatewayNotFound, + )), + GatewayResolution::GatewayNotAsserted if heartbeat.hb_type == HbType::Wifi => { + Ok(Self::new( + heartbeat, + cell_type, + dec!(0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::GatewayNotAsserted, + )) + } + GatewayResolution::AssertedLocation(location) if heartbeat.hb_type == HbType::Wifi => { + let distance_to_asserted = heartbeat.asserted_distance(location)?; + let location_trust_score_multiplier = + if heartbeat.location_validation_timestamp.is_some() + && distance_to_asserted <= max_distance_to_asserted as i64 + { + dec!(1.0) + } else { + dec!(0.25) + }; + Ok(Self::new( + heartbeat, + cell_type, + location_trust_score_multiplier, + Some(distance_to_asserted), + Some(coverage_summary), + proto::HeartbeatValidity::Valid, + )) + } + _ => Ok(Self::new( + heartbeat, + cell_type, + dec!(1.0), + None, + Some(coverage_summary), + proto::HeartbeatValidity::Valid, + )), + } + } + pub fn validate_heartbeats<'a>( gateway_info_resolver: &'a impl GatewayResolver, heartbeats: impl Stream + 'a, coverage_cache: &'a CoverageObjects, + max_distance_to_asserted: u32, epoch: &'a Range>, ) -> impl Stream> + 'a { heartbeats.then(move |heartbeat| async move { - let (cell_type, distance_to_asserted, coverage_summary, validity) = - validate_heartbeat(&heartbeat, gateway_info_resolver, coverage_cache, epoch) - .await?; - - Ok(Self { + Self::validate( heartbeat, - cell_type, - distance_to_asserted, - coverage_summary, - validity, - }) + gateway_info_resolver, + coverage_cache, + max_distance_to_asserted, + epoch, + ) + .await }) } @@ -420,10 +512,13 @@ impl ValidatedHeartbeat { proto::Heartbeat { cbsd_id: self.heartbeat.cbsd_id.clone().unwrap_or_default(), pub_key: self.heartbeat.hotspot_key.as_ref().into(), - reward_multiplier: 1.0, cell_type: self.cell_type as i32, validity: self.validity as i32, timestamp: self.heartbeat.timestamp.timestamp() as u64, + location_trust_score_multiplier: (self.location_trust_score_multiplier + * dec!(1000)) + .to_u32() + .unwrap_or_default(), coverage_object: self .heartbeat .coverage_object @@ -436,6 +531,7 @@ impl ValidatedHeartbeat { .location_validation_timestamp .map_or(0, |v| v.timestamp() as u64), distance_to_asserted: self.distance_to_asserted.map_or(0, |v| v as u64), + ..Default::default() }, &[("validity", self.validity.as_str_name())], ) @@ -471,8 +567,8 @@ impl ValidatedHeartbeat { let truncated_timestamp = self.truncated_timestamp()?; sqlx::query( r#" - INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object) - VALUES ($1, $2, $3, $4, $5, $6) + INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) + VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (cbsd_id, truncated_timestamp) DO UPDATE SET latest_timestamp = EXCLUDED.latest_timestamp, coverage_object = EXCLUDED.coverage_object @@ -484,6 +580,7 @@ impl ValidatedHeartbeat { .bind(self.heartbeat.timestamp) .bind(truncated_timestamp) .bind(self.heartbeat.coverage_object) + .bind(self.location_trust_score_multiplier) .execute(&mut *exec) .await?; Ok(()) @@ -493,9 +590,8 @@ impl ValidatedHeartbeat { let truncated_timestamp = self.truncated_timestamp()?; sqlx::query( r#" - INSERT INTO wifi_heartbeats (hotspot_key, cell_type, location_validation_timestamp, distance_to_asserted, - latest_timestamp, truncated_timestamp, coverage_object) - VALUES ($1, $2, $3, $4, $5, $6, $7) + INSERT INTO wifi_heartbeats (hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) + VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (hotspot_key, truncated_timestamp) DO UPDATE SET latest_timestamp = EXCLUDED.latest_timestamp, coverage_object = EXCLUDED.coverage_object @@ -503,134 +599,17 @@ impl ValidatedHeartbeat { ) .bind(self.heartbeat.hotspot_key) .bind(self.cell_type) - .bind(self.heartbeat.location_validation_timestamp) - .bind(self.distance_to_asserted) .bind(self.heartbeat.timestamp) .bind(truncated_timestamp) .bind(self.heartbeat.coverage_object) + .bind(self.location_trust_score_multiplier) .execute(&mut *exec) .await?; Ok(()) } } -/// Validate a heartbeat in the given epoch. -// TODO(map): This needs to be changed to provide a struct instead of a tuple. -pub async fn validate_heartbeat( - heartbeat: &Heartbeat, - gateway_info_resolver: &impl GatewayResolver, - coverage_cache: &CoverageObjects, - epoch: &Range>, -) -> anyhow::Result<( - CellType, - Option, - Option, - proto::HeartbeatValidity, -)> { - let Some(coverage_object) = heartbeat.coverage_object else { - return Ok(( - CellType::CellTypeNone, - None, - None, - proto::HeartbeatValidity::BadCoverageObject, - )); - }; - - let Some(coverage_summary) = coverage_cache - .coverage_summary(&coverage_object, heartbeat.key()) - .await? - else { - return Ok(( - CellType::CellTypeNone, - None, - None, - proto::HeartbeatValidity::NoSuchCoverageObject, - )); - }; - - let cell_type = match heartbeat.hb_type { - HbType::Cbrs => match heartbeat.cbsd_id.as_ref() { - Some(cbsd_id) => match CellType::from_cbsd_id(cbsd_id) { - Some(ty) => ty, - _ => { - return Ok(( - CellType::CellTypeNone, - None, - Some(coverage_summary), - proto::HeartbeatValidity::BadCbsdId, - )) - } - }, - None => { - return Ok(( - CellType::CellTypeNone, - None, - Some(coverage_summary), - proto::HeartbeatValidity::BadCbsdId, - )) - } - }, - HbType::Wifi => { - if coverage_summary.indoor { - CellType::NovaGenericWifiIndoor - } else { - CellType::NovaGenericWifiOutdoor - } - } - }; - - if !heartbeat.operation_mode { - return Ok(( - cell_type, - None, - Some(coverage_summary), - proto::HeartbeatValidity::NotOperational, - )); - } - - if !epoch.contains(&heartbeat.timestamp) { - return Ok(( - cell_type, - None, - Some(coverage_summary), - proto::HeartbeatValidity::HeartbeatOutsideRange, - )); - } - - let distance_to_asserted = match gateway_info_resolver - .resolve_gateway(&heartbeat.hotspot_key) - .await? - { - GatewayResolution::GatewayNotFound => { - return Ok(( - cell_type, - None, - Some(coverage_summary), - proto::HeartbeatValidity::GatewayNotFound, - )) - } - GatewayResolution::GatewayNotAsserted if heartbeat.hb_type == HbType::Wifi => { - return Ok(( - cell_type, - None, - Some(coverage_summary), - proto::HeartbeatValidity::GatewayNotAsserted, - )) - } - GatewayResolution::AssertedLocation(location) if heartbeat.hb_type == HbType::Wifi => { - Some(heartbeat.asserted_distance(location)?) - } - _ => None, - }; - - Ok(( - cell_type, - distance_to_asserted, - Some(coverage_summary), - proto::HeartbeatValidity::Valid, - )) -} - +#[allow(clippy::too_many_arguments)] pub(crate) async fn process_validated_heartbeats( validated_heartbeats: impl Stream>, heartbeat_cache: &Cache<(String, DateTime), ()>, @@ -873,6 +852,7 @@ mod test { location_validation_timestamp: None, }, validity: Default::default(), + location_trust_score_multiplier: dec!(1.0), distance_to_asserted: None, coverage_summary: None, } diff --git a/mobile_verifier/src/heartbeats/valid_heartbeats.sql b/mobile_verifier/src/heartbeats/valid_heartbeats.sql deleted file mode 100644 index 59d67bab7..000000000 --- a/mobile_verifier/src/heartbeats/valid_heartbeats.sql +++ /dev/null @@ -1,110 +0,0 @@ -WITH cbrs_coverage_objs AS ( - SELECT - t1.cbsd_id, - t1.coverage_object, - t1.latest_timestamp - FROM - cbrs_heartbeats t1 - WHERE - t1.latest_timestamp = ( - SELECT - MAX(t2.latest_timestamp) - FROM - cbrs_heartbeats t2 - WHERE - t2.cbsd_id = t1.cbsd_id - AND truncated_timestamp >= $1 - AND truncated_timestamp < $2) -), -wifi_coverage_objs AS ( - SELECT - t1.hotspot_key, - t1.coverage_object, - t1.latest_timestamp - FROM - wifi_heartbeats t1 - WHERE - t1.latest_timestamp = ( - SELECT - MAX(t2.latest_timestamp) - FROM - wifi_heartbeats t2 - WHERE - t2.hotspot_key = t1.hotspot_key - AND truncated_timestamp >= $1 - AND truncated_timestamp < $2) -), -latest_hotspots AS ( - SELECT - t1.cbsd_id, - t1.hotspot_key, - t1.latest_timestamp - FROM - cbrs_heartbeats t1 - WHERE - t1.latest_timestamp = ( - SELECT - MAX(t2.latest_timestamp) - FROM - cbrs_heartbeats t2 - WHERE - t2.cbsd_id = t1.cbsd_id - AND truncated_timestamp >= $1 - AND truncated_timestamp < $2)) -SELECT - latest_hotspots.hotspot_key, - cbrs_heartbeats.cbsd_id, - cell_type, - cbrs_coverage_objs.coverage_object, - cbrs_coverage_objs.latest_timestamp, - NULL AS location_validation_timestamp, - NULL AS distance_to_asserted -FROM - cbrs_heartbeats - INNER JOIN latest_hotspots ON cbrs_heartbeats.cbsd_id = latest_hotspots.cbsd_id - INNER JOIN cbrs_coverage_objs ON cbrs_heartbeats.cbsd_id = cbrs_coverage_objs.cbsd_id -WHERE - truncated_timestamp >= $1 - AND truncated_timestamp < $2 -GROUP BY - cbrs_heartbeats.cbsd_id, - latest_hotspots.hotspot_key, - cell_type, - cbrs_coverage_objs.coverage_object, - cbrs_coverage_objs.latest_timestamp -HAVING - count(*) >= $3 -UNION ALL -SELECT - wifi_grouped.hotspot_key, - NULL AS cbsd_id, - cell_type, - wifi_coverage_objs.coverage_object, - wifi_coverage_objs.latest_timestamp, - b.location_validation_timestamp, - b.distance_to_asserted -FROM ( - SELECT - hotspot_key, - cell_type - FROM - wifi_heartbeats - WHERE - truncated_timestamp >= $1 - AND truncated_timestamp < $2 - GROUP BY - hotspot_key, - cell_type - HAVING - count(*) >= $3) AS wifi_grouped - INNER JOIN ( - SELECT - hotspot_key, - location_validation_timestamp, - distance_to_asserted - FROM - wifi_heartbeats - WHERE - wifi_heartbeats.truncated_timestamp >= $1 - AND wifi_heartbeats.truncated_timestamp < $2) AS b ON b.hotspot_key = wifi_grouped.hotspot_key - INNER JOIN wifi_coverage_objs ON wifi_grouped.hotspot_key = wifi_coverage_objs.hotspot_key diff --git a/mobile_verifier/src/heartbeats/valid_radios.sql b/mobile_verifier/src/heartbeats/valid_radios.sql new file mode 100644 index 000000000..3c4a8e522 --- /dev/null +++ b/mobile_verifier/src/heartbeats/valid_radios.sql @@ -0,0 +1,94 @@ +WITH latest_cbrs_hotspot AS ( + SELECT DISTINCT ON (cbsd_id) + cbsd_id, + hotspot_key + FROM + cbrs_heartbeats + WHERE + truncated_timestamp >= $1 + AND truncated_timestamp < $2 + ORDER BY + cbsd_id, + latest_timestamp DESC +), +heartbeats AS ( + SELECT + lch.hotspot_key, + ch.cbsd_id, + ch.cell_type, + CASE WHEN count(*) >= $3 THEN + 1.0 + ELSE + 0.0 + END AS heartbeat_multiplier, + AVG(ch.location_trust_score_multiplier) as location_trust_score_multiplier + FROM + cbrs_heartbeats ch + INNER JOIN latest_cbrs_hotspot lch ON ch.cbsd_id = lch.cbsd_id + WHERE + ch.truncated_timestamp >= $1 + AND ch.truncated_timestamp < $2 + GROUP BY + ch.cbsd_id, + lch.hotspot_key, + ch.cell_type + UNION + SELECT + hotspot_key, + NULL AS cbsd_id, + cell_type, + CASE WHEN count(*) >= $3 THEN + 1.0 + ELSE + 0.0 + END AS heartbeat_multiplier, + AVG(location_trust_score_multiplier) as location_trust_score_multiplier +FROM + wifi_heartbeats + WHERE + truncated_timestamp >= $1 + AND truncated_timestamp < $2 + GROUP BY + hotspot_key, + cell_type +), +latest_uuids AS (( SELECT DISTINCT ON (hotspot_key, + cbsd_id) + hotspot_key, + cbsd_id, + coverage_object + FROM + cbrs_heartbeats ch + WHERE + truncated_timestamp >= $1 + AND truncated_timestamp < $2 + ORDER BY + hotspot_key, + cbsd_id, + truncated_timestamp DESC) + UNION ( SELECT DISTINCT ON (hotspot_key) + hotspot_key, + NULL AS cbsd_id, + coverage_object + FROM + wifi_heartbeats wh + WHERE + truncated_timestamp >= $1 + AND truncated_timestamp < $2 + ORDER BY + hotspot_key, + truncated_timestamp DESC)) +SELECT + hb.hotspot_key, + hb.cbsd_id, + hb.cell_type, + hb.location_trust_score_multiplier, + u.coverage_object +FROM + heartbeats hb + INNER JOIN latest_uuids u ON hb.hotspot_key = u.hotspot_key + AND (hb.cbsd_id = u.cbsd_id + OR (hb.cbsd_id IS NULL + AND u.cbsd_id IS NULL)) +WHERE + hb.heartbeat_multiplier = 1.0 diff --git a/mobile_verifier/src/heartbeats/wifi.rs b/mobile_verifier/src/heartbeats/wifi.rs index 7095e1d1f..bf75e717e 100644 --- a/mobile_verifier/src/heartbeats/wifi.rs +++ b/mobile_verifier/src/heartbeats/wifi.rs @@ -21,6 +21,7 @@ pub struct HeartbeatDaemon { gateway_info_resolver: GIR, heartbeats: Receiver>, modeled_coverage_start: DateTime, + max_distance_to_asserted: u32, heartbeat_sink: FileSinkClient, seniority_sink: FileSinkClient, } @@ -34,6 +35,7 @@ where gateway_info_resolver: GIR, heartbeats: Receiver>, modeled_coverage_start: DateTime, + max_distance_to_asserted: u32, heartbeat_sink: FileSinkClient, seniority_sink: FileSinkClient, ) -> Self { @@ -42,6 +44,7 @@ where gateway_info_resolver, heartbeats, modeled_coverage_start, + max_distance_to_asserted, heartbeat_sink, seniority_sink, } @@ -110,6 +113,7 @@ where &self.gateway_info_resolver, heartbeats, coverage_objects, + self.max_distance_to_asserted, &epoch, ), heartbeat_cache, diff --git a/mobile_verifier/src/lib.rs b/mobile_verifier/src/lib.rs index 0729475d9..ee491cd0f 100644 --- a/mobile_verifier/src/lib.rs +++ b/mobile_verifier/src/lib.rs @@ -1,15 +1,15 @@ pub mod cell_type; pub mod cli; pub mod coverage; -mod data_session; +pub mod data_session; pub mod heartbeats; pub mod reward_shares; pub mod rewarder; mod settings; pub mod speedtests; pub mod speedtests_average; -mod subscriber_location; -mod telemetry; +pub mod subscriber_location; +pub mod telemetry; pub use settings::Settings; diff --git a/mobile_verifier/src/reward_shares.rs b/mobile_verifier/src/reward_shares.rs index 384dc9820..57c336284 100644 --- a/mobile_verifier/src/reward_shares.rs +++ b/mobile_verifier/src/reward_shares.rs @@ -1,6 +1,6 @@ use crate::{ coverage::{CoverageReward, CoveredHexStream, CoveredHexes}, - data_session::HotspotMap, + data_session::{HotspotMap, ServiceProviderDataSession}, heartbeats::HeartbeatReward, speedtests_average::{SpeedtestAverage, SpeedtestAverages}, subscriber_location::SubscriberValidatedLocations, @@ -9,8 +9,17 @@ use chrono::{DateTime, Duration, Utc}; use file_store::traits::TimestampEncode; use futures::{Stream, StreamExt}; use helium_crypto::PublicKeyBinary; -use helium_proto::services::poc_mobile as proto; -use helium_proto::services::poc_mobile::mobile_reward_share::Reward as ProtoReward; +use helium_proto::{ + services::{ + poc_mobile as proto, + poc_mobile::{ + mobile_reward_share::Reward as ProtoReward, UnallocatedReward, UnallocatedRewardType, + }, + }, + ServiceProvider, +}; + +use mobile_config::client::{carrier_service_client::CarrierServiceVerifier, ClientError}; use rust_decimal::prelude::*; use rust_decimal_macros::dec; use std::{collections::HashMap, ops::Range}; @@ -35,10 +44,24 @@ const MAPPERS_REWARDS_PERCENT: Decimal = dec!(0.2); /// shares of the mappers pool allocated per eligible subscriber for discovery mapping const DISCOVERY_MAPPING_SHARES: Decimal = dec!(30); +// Percent of total emissions allocated for service provider rewards +const SERVICE_PROVIDER_PERCENT: Decimal = dec!(0.1); + +// Percent of total emissions allocated for oracles +const ORACLES_PERCENT: Decimal = dec!(0.04); + +#[derive(Debug)] pub struct TransferRewards { reward_scale: Decimal, - rewards: HashMap, + rewards: HashMap, reward_sum: Decimal, + mobile_bone_price: Decimal, +} + +#[derive(Copy, Clone, Debug)] +pub struct TransferReward { + bones: Decimal, + bytes_rewarded: u64, } impl TransferRewards { @@ -52,7 +75,19 @@ impl TransferRewards { #[cfg(test)] fn reward(&self, hotspot: &PublicKeyBinary) -> Decimal { - self.rewards.get(hotspot).copied().unwrap_or(Decimal::ZERO) * self.reward_scale + self.rewards + .get(hotspot) + .copied() + .map(|x| x.bones) + .unwrap_or(Decimal::ZERO) + * self.reward_scale + } + + pub fn total(&self) -> Decimal { + self.rewards + .values() + .map(|v| v.bones * self.reward_scale) + .sum() } pub async fn from_transfer_sessions( @@ -64,10 +99,17 @@ impl TransferRewards { let rewards = transfer_sessions .into_iter() // Calculate rewards per hotspot - .map(|(pub_key, dc_amount)| { - let bones = dc_to_mobile_bones(Decimal::from(dc_amount), mobile_bone_price); + .map(|(pub_key, rewardable)| { + let bones = + dc_to_mobile_bones(Decimal::from(rewardable.rewardable_dc), mobile_bone_price); reward_sum += bones; - (pub_key, bones) + ( + pub_key, + TransferReward { + bones, + bytes_rewarded: rewardable.rewardable_bytes, + }, + ) }) .collect(); @@ -99,13 +141,14 @@ impl TransferRewards { reward_scale, rewards, reward_sum: reward_sum * reward_scale, + mobile_bone_price, } } pub fn into_rewards( self, epoch: &'_ Range>, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { let Self { reward_scale, rewards, @@ -115,25 +158,30 @@ impl TransferRewards { let end_period = epoch.end.encode_timestamp(); rewards .into_iter() - .map(move |(hotspot_key, reward)| proto::MobileRewardShare { - start_period, - end_period, - reward: Some(proto::mobile_reward_share::Reward::GatewayReward( - proto::GatewayReward { - hotspot_key: hotspot_key.into(), - dc_transfer_reward: (reward * reward_scale) - .round_dp_with_strategy(0, RoundingStrategy::ToZero) - .to_u64() - .unwrap_or(0), + .map(move |(hotspot_key, reward)| { + let dc_transfer_reward = (reward.bones * reward_scale) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + ( + dc_transfer_reward, + proto::MobileRewardShare { + start_period, + end_period, + reward: Some(proto::mobile_reward_share::Reward::GatewayReward( + proto::GatewayReward { + hotspot_key: hotspot_key.into(), + dc_transfer_reward, + rewardable_bytes: reward.bytes_rewarded, + price: (self.mobile_bone_price * dec!(1_000_000) * dec!(1_000_000)) + .to_u64() + .unwrap_or_default(), + }, + )), }, - )), - }) - .filter(|mobile_reward| match mobile_reward.reward { - Some(proto::mobile_reward_share::Reward::GatewayReward(ref gateway_reward)) => { - gateway_reward.dc_transfer_reward > 0 - } - _ => false, + ) }) + .filter(|(dc_transfer_reward, _mobile_reward)| *dc_transfer_reward > 0) } } @@ -149,17 +197,11 @@ impl MapperShares { } } - pub fn rewards_per_share( - &self, - reward_period: &'_ Range>, - ) -> anyhow::Result { + pub fn rewards_per_share(&self, total_mappers_pool: Decimal) -> anyhow::Result { // note: currently rewards_per_share calculation only takes into // consideration discovery mapping shares // in the future it will also need to take into account // verification mapping shares - let duration: Duration = reward_period.end - reward_period.start; - let total_mappers_pool = get_scheduled_tokens_for_mappers(duration); - // the number of subscribers eligible for discovery location rewards let discovery_mappers_count = Decimal::from(self.discovery_mapping_shares.len()); @@ -178,7 +220,7 @@ impl MapperShares { self, reward_period: &'_ Range>, reward_per_share: Decimal, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { self.discovery_mapping_shares .into_iter() .map(move |subscriber_id| proto::SubscriberReward { @@ -186,14 +228,140 @@ impl MapperShares { discovery_location_amount: (DISCOVERY_MAPPING_SHARES * reward_per_share) .round_dp_with_strategy(0, RoundingStrategy::ToZero) .to_u64() - .unwrap_or(0), + .unwrap_or_default(), }) .filter(|subscriber_reward| subscriber_reward.discovery_location_amount > 0) - .map(|subscriber_reward| proto::MobileRewardShare { - start_period: reward_period.start.encode_timestamp(), - end_period: reward_period.end.encode_timestamp(), - reward: Some(ProtoReward::SubscriberReward(subscriber_reward)), + .map(|subscriber_reward| { + ( + subscriber_reward.discovery_location_amount, + proto::MobileRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::SubscriberReward(subscriber_reward)), + }, + ) + }) + } +} + +#[derive(Default)] +pub struct ServiceProviderShares { + pub shares: Vec, +} + +impl ServiceProviderShares { + pub fn new(shares: Vec) -> Self { + Self { shares } + } + + pub async fn from_payers_dc( + payer_shares: HashMap, + client: &impl CarrierServiceVerifier, + ) -> anyhow::Result { + let mut sp_shares = ServiceProviderShares::default(); + for (payer, total_dcs) in payer_shares { + let service_provider = Self::payer_key_to_service_provider(&payer, client).await?; + sp_shares.shares.push(ServiceProviderDataSession { + service_provider, + total_dcs: Decimal::from(total_dcs), + }) + } + Ok(sp_shares) + } + + fn total_dc(&self) -> Decimal { + self.shares.iter().map(|v| v.total_dcs).sum() + } + + pub fn rewards_per_share( + &self, + total_sp_rewards: Decimal, + mobile_bone_price: Decimal, + ) -> anyhow::Result { + // the total amount of DC spent across all service providers + let total_sp_dc = self.total_dc(); + // the total amount of service provider rewards in bones based on the spent DC + let total_sp_rewards_used = dc_to_mobile_bones(total_sp_dc, mobile_bone_price); + // cap the service provider rewards if used > pool total + let capped_sp_rewards_used = + Self::maybe_cap_service_provider_rewards(total_sp_rewards_used, total_sp_rewards); + Ok(Self::calc_rewards_per_share( + capped_sp_rewards_used, + total_sp_dc, + )) + } + + pub fn into_service_provider_rewards( + self, + reward_period: &'_ Range>, + reward_per_share: Decimal, + ) -> impl Iterator + '_ { + self.shares + .into_iter() + .map(move |share| proto::ServiceProviderReward { + service_provider_id: share.service_provider as i32, + amount: (share.total_dcs * reward_per_share) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0), }) + .filter(|service_provider_reward| service_provider_reward.amount > 0) + .map(|service_provider_reward| { + ( + service_provider_reward.amount, + proto::MobileRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::ServiceProviderReward(service_provider_reward)), + }, + ) + }) + } + + pub fn into_unallocated_reward( + unallocated_amount: Decimal, + reward_period: &'_ Range>, + ) -> anyhow::Result { + let reward = UnallocatedReward { + reward_type: UnallocatedRewardType::ServiceProvider as i32, + amount: unallocated_amount + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0), + }; + Ok(proto::MobileRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::UnallocatedReward(reward)), + }) + } + + fn maybe_cap_service_provider_rewards( + total_sp_rewards_used: Decimal, + total_sp_rewards: Decimal, + ) -> Decimal { + match total_sp_rewards_used <= total_sp_rewards { + true => total_sp_rewards_used, + false => total_sp_rewards, + } + } + + fn calc_rewards_per_share(total_rewards: Decimal, total_shares: Decimal) -> Decimal { + if total_shares > Decimal::ZERO { + (total_rewards / total_shares) + .round_dp_with_strategy(DEFAULT_PREC, RoundingStrategy::MidpointNearestEven) + } else { + Decimal::ZERO + } + } + + async fn payer_key_to_service_provider( + payer: &str, + client: &impl CarrierServiceVerifier, + ) -> anyhow::Result { + tracing::info!(payer, "getting service provider for payer"); + let sp = client.payer_key_to_service_provider(payer).await?; + Ok(sp) } } @@ -206,16 +374,20 @@ pub fn dc_to_mobile_bones(dc_amount: Decimal, mobile_bone_price: Decimal) -> Dec #[derive(Debug)] struct RadioPoints { - heartbeat_multiplier: Decimal, + location_trust_score_multiplier: Decimal, coverage_object: Uuid, seniority: DateTime, points: Decimal, } impl RadioPoints { - fn new(heartbeat_multiplier: Decimal, coverage_object: Uuid, seniority: DateTime) -> Self { + fn new( + location_trust_score_multiplier: Decimal, + coverage_object: Uuid, + seniority: DateTime, + ) -> Self { Self { - heartbeat_multiplier, + location_trust_score_multiplier, seniority, coverage_object, points: Decimal::ZERO, @@ -223,7 +395,7 @@ impl RadioPoints { } fn points(&self) -> Decimal { - (self.heartbeat_multiplier * self.points).max(Decimal::ZERO) + (self.location_trust_score_multiplier * self.points).max(Decimal::ZERO) } } @@ -262,14 +434,14 @@ pub struct CoveragePoints { impl CoveragePoints { pub async fn aggregate_points( hex_streams: &impl CoveredHexStream, - heartbeats: impl Stream, + heartbeats: impl Stream>, speedtests: &SpeedtestAverages, period_end: DateTime, ) -> Result { let mut heartbeats = std::pin::pin!(heartbeats); let mut covered_hexes = CoveredHexes::default(); let mut coverage_points = HashMap::new(); - while let Some(heartbeat) = heartbeats.next().await { + while let Some(heartbeat) = heartbeats.next().await.transpose()? { let speedtest_multiplier = speedtests .get_average(&heartbeat.hotspot_key) .as_ref() @@ -335,12 +507,10 @@ impl CoveragePoints { pub fn into_rewards( self, - transfer_rewards_sum: Decimal, + available_poc_rewards: Decimal, epoch: &'_ Range>, - ) -> Option + '_> { + ) -> Option + '_> { let total_shares = self.total_shares(); - let available_poc_rewards = - get_scheduled_tokens_for_poc_and_dc(epoch.end - epoch.start) - transfer_rewards_sum; available_poc_rewards .checked_div(total_shares) .map(|poc_rewards_per_share| { @@ -359,12 +529,7 @@ impl CoveragePoints { hotspot_points.radio_points.into_iter(), ) }) - .filter(|mobile_reward| match mobile_reward.reward { - Some(proto::mobile_reward_share::Reward::RadioReward(ref radio_reward)) => { - radio_reward.poc_reward > 0 - } - _ => false, - }) + .filter(|(poc_reward, _mobile_reward)| *poc_reward > 0) }) } } @@ -376,7 +541,7 @@ fn radio_points_into_rewards( poc_rewards_per_share: Decimal, speedtest_multiplier: Decimal, radio_points: impl Iterator, RadioPoints)>, -) -> impl Iterator { +) -> impl Iterator { radio_points.map(move |(cbsd_id, radio_points)| { new_radio_reward( cbsd_id, @@ -398,31 +563,42 @@ fn new_radio_reward( poc_rewards_per_share: Decimal, speedtest_multiplier: Decimal, radio_points: RadioPoints, -) -> proto::MobileRewardShare { +) -> (u64, proto::MobileRewardShare) { let poc_reward = poc_rewards_per_share * speedtest_multiplier - * radio_points.heartbeat_multiplier + * radio_points.location_trust_score_multiplier * radio_points.points; let hotspot_key: Vec = hotspot_key.clone().into(); let cbsd_id = cbsd_id.unwrap_or_default(); - proto::MobileRewardShare { - start_period, - end_period, - reward: Some(proto::mobile_reward_share::Reward::RadioReward( - proto::RadioReward { - hotspot_key, - cbsd_id, - poc_reward: poc_reward - .round_dp_with_strategy(0, RoundingStrategy::ToZero) - .to_u64() - .unwrap_or(0), - coverage_points: radio_points.points.to_u64().unwrap_or(0), - seniority_timestamp: radio_points.seniority.encode_timestamp(), - coverage_object: Vec::from(radio_points.coverage_object.into_bytes()), - ..Default::default() - }, - )), - } + let poc_reward = poc_reward + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + ( + poc_reward, + proto::MobileRewardShare { + start_period, + end_period, + reward: Some(proto::mobile_reward_share::Reward::RadioReward( + proto::RadioReward { + hotspot_key, + cbsd_id, + poc_reward, + coverage_points: radio_points.points.to_u64().unwrap_or(0), + seniority_timestamp: radio_points.seniority.encode_timestamp(), + coverage_object: Vec::from(radio_points.coverage_object.into_bytes()), + location_trust_score_multiplier: (radio_points.location_trust_score_multiplier + * dec!(1000)) + .to_u32() + .unwrap_or_default(), + speedtest_multiplier: (speedtest_multiplier * dec!(1000)) + .to_u32() + .unwrap_or_default(), + ..Default::default() + }, + )), + }, + ) } pub fn get_total_scheduled_tokens(duration: Duration) -> Decimal { @@ -430,7 +606,7 @@ pub fn get_total_scheduled_tokens(duration: Duration) -> Decimal { * Decimal::from(duration.num_seconds()) } -pub fn get_scheduled_tokens_for_poc_and_dc(duration: Duration) -> Decimal { +pub fn get_scheduled_tokens_for_poc(duration: Duration) -> Decimal { get_total_scheduled_tokens(duration) * dec!(0.6) } @@ -438,15 +614,24 @@ pub fn get_scheduled_tokens_for_mappers(duration: Duration) -> Decimal { get_total_scheduled_tokens(duration) * MAPPERS_REWARDS_PERCENT } +pub fn get_scheduled_tokens_for_service_providers(duration: Duration) -> Decimal { + get_total_scheduled_tokens(duration) * SERVICE_PROVIDER_PERCENT +} + +pub fn get_scheduled_tokens_for_oracles(duration: Duration) -> Decimal { + get_total_scheduled_tokens(duration) * ORACLES_PERCENT +} + #[cfg(test)] mod test { use super::*; use crate::{ cell_type::CellType, coverage::{CoveredHexStream, HexCoverage, Seniority}, - data_session, data_session::HotspotDataSession, - heartbeats::{HeartbeatReward, HeartbeatRow, KeyType, OwnedKeyType}, + data_session::{self, HotspotReward}, + heartbeats::{HeartbeatReward, KeyType, OwnedKeyType}, + reward_shares, speedtests::Speedtest, speedtests_average::SpeedtestAverage, subscriber_location::SubscriberValidatedLocations, @@ -454,7 +639,9 @@ mod test { use chrono::{Duration, Utc}; use file_store::speedtest::CellSpeedtest; use futures::stream::{self, BoxStream}; - use helium_proto::services::poc_mobile::mobile_reward_share::Reward as MobileReward; + use helium_proto::{ + services::poc_mobile::mobile_reward_share::Reward as MobileReward, ServiceProvider, + }; use prost::Message; use std::collections::HashMap; use uuid::Uuid; @@ -471,6 +658,12 @@ mod test { ); } + fn mobile_bones_to_dc(mobile_bones_amount: Decimal, mobile_bones_price: Decimal) -> Decimal { + let mobile_value = mobile_bones_amount * mobile_bones_price; + (mobile_value / DC_USD_PRICE) + .round_dp_with_strategy(0, RoundingStrategy::ToNegativeInfinity) + } + #[tokio::test] async fn discover_mapping_amount() { // test based on example defined at https://github.com/helium/oracles/issues/422 @@ -491,7 +684,11 @@ mod test { // translate location shares into discovery mapping shares let mapping_shares = MapperShares::new(location_shares); - let rewards_per_share = mapping_shares.rewards_per_share(&epoch).unwrap(); + let total_mappers_pool = + reward_shares::get_scheduled_tokens_for_mappers(epoch.end - epoch.start); + let rewards_per_share = mapping_shares + .rewards_per_share(total_mappers_pool) + .unwrap(); // verify total rewards for the epoch let total_epoch_rewards = get_total_scheduled_tokens(epoch.end - epoch.start) @@ -510,23 +707,26 @@ mod test { let expected_reward_per_subscriber = total_mapper_rewards / NUM_SUBSCRIBERS; // get the summed rewards allocated to subscribers for discovery location - let mut total_discovery_mapping_rewards = 0_u64; - for subscriber_share in mapping_shares.into_subscriber_rewards(&epoch, rewards_per_share) { + let mut allocated_mapper_rewards = 0_u64; + for (reward_amount, subscriber_share) in + mapping_shares.into_subscriber_rewards(&epoch, rewards_per_share) + { if let Some(MobileReward::SubscriberReward(r)) = subscriber_share.reward { - total_discovery_mapping_rewards += r.discovery_location_amount; assert_eq!(expected_reward_per_subscriber, r.discovery_location_amount); + assert_eq!(reward_amount, r.discovery_location_amount); + allocated_mapper_rewards += reward_amount; } } - // verify the total rewards awared for discovery mapping - assert_eq!(16_393_442_620_000, total_discovery_mapping_rewards); + // verify the total rewards awarded for discovery mapping + assert_eq!(16_393_442_620_000, allocated_mapper_rewards); - // the sum of rewards distributed should not exceed the epoch amount - // but due to rounding whilst going to u64 for each subscriber, - // we will be some bones short of the full epoch amount - // the difference in bones cannot be more than the total number of subscribers ( 10 k) - let diff = total_mapper_rewards - total_discovery_mapping_rewards; - assert!(diff < NUM_SUBSCRIBERS); + // confirm the unallocated service provider reward amounts + // this should not be more than the total number of subscribers ( 10 k) + // as we can at max drop one bone per subscriber due to rounding + let unallocated_mapper_reward_amount = total_mapper_rewards - allocated_mapper_rewards; + assert_eq!(unallocated_mapper_reward_amount, 2950); + assert!(unallocated_mapper_reward_amount < NUM_SUBSCRIBERS); } /// Test to ensure that the correct data transfer amount is rewarded. @@ -551,12 +751,15 @@ mod test { let mut data_transfer_map = HotspotMap::new(); data_transfer_map.insert( data_transfer_session.pub_key, - data_transfer_session.num_dcs as u64, + HotspotReward { + rewardable_bytes: 0, // Not used + rewardable_dc: data_transfer_session.num_dcs as u64, + }, ); let now = Utc::now(); let epoch = (now - Duration::hours(1))..now; - let total_rewards = get_scheduled_tokens_for_poc_and_dc(epoch.end - epoch.start); + let total_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); // confirm our hourly rewards add up to expected 24hr amount // total_rewards will be in bones @@ -570,7 +773,7 @@ mod test { assert_eq!(data_transfer_rewards.reward(&owner), dec!(0.00002)); assert_eq!(data_transfer_rewards.reward_scale(), dec!(1.0)); - let available_poc_rewards = get_scheduled_tokens_for_poc_and_dc(epoch.end - epoch.start) + let available_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start) - data_transfer_rewards.reward_sum; assert_eq!( available_poc_rewards, @@ -621,7 +824,7 @@ mod test { // allotted reward amount for data transfer, which is 40% of the daily tokens. We check to // ensure that amount of tokens remaining for POC is no less than 20% of the rewards allocated // for POC and data transfer (which is 60% of the daily total emissions). - let available_poc_rewards = get_scheduled_tokens_for_poc_and_dc(epoch.end - epoch.start) + let available_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start) - data_transfer_rewards.reward_sum; assert_eq!(available_poc_rewards.trunc(), dec!(16_393_442_622_950)); assert_eq!( @@ -840,146 +1043,118 @@ mod test { let now = Utc::now(); let timestamp = now - Duration::minutes(20); - let max_asserted_distance_deviation: u32 = 300; // setup heartbeats - let heartbeat_keys = vec![ - HeartbeatRow { + let heartbeat_rewards = vec![ + HeartbeatReward { cbsd_id: Some(c2.clone()), hotspot_key: gw2.clone(), coverage_object: cov_obj_2, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c2).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c4.clone()), hotspot_key: gw3.clone(), coverage_object: cov_obj_4, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c4).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c5.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_5, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c5).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c6.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_6, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c6).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c7.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_7, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c7).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c8.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_8, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c8).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c9.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_9, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c9).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c10.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_10, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c10).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c11.clone()), hotspot_key: gw4.clone(), coverage_object: cov_obj_11, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c11).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c12.clone()), hotspot_key: gw5.clone(), coverage_object: cov_obj_12, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c12).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c13.clone()), hotspot_key: gw6.clone(), coverage_object: cov_obj_13, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c13).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c14.clone()), hotspot_key: gw7.clone(), coverage_object: cov_obj_14, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c14).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: None, hotspot_key: gw9.clone(), cell_type: CellType::NovaGenericWifiIndoor, coverage_object: cov_obj_15, - latest_timestamp: DateTime::::MIN_UTC, - location_validation_timestamp: Some(timestamp), - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: None, hotspot_key: gw10.clone(), cell_type: CellType::NovaGenericWifiIndoor, coverage_object: cov_obj_16, - latest_timestamp: DateTime::::MIN_UTC, - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(0.25), }, - HeartbeatRow { + HeartbeatReward { cbsd_id: None, hotspot_key: gw11.clone(), cell_type: CellType::NovaGenericWifiIndoor, coverage_object: cov_obj_17, - latest_timestamp: DateTime::::MIN_UTC, - location_validation_timestamp: Some(timestamp), - distance_to_asserted: Some(10000), + location_trust_score_multiplier: dec!(0.25), }, - ]; + ] + .into_iter() + .map(Ok) + .collect::>>(); // Setup hex coverages let mut hex_coverage = HashMap::new(); @@ -1044,11 +1219,6 @@ mod test { simple_hex_coverage(&gw11, 0x8c2681a306607ff), ); - let heartbeat_rewards: Vec = heartbeat_keys - .into_iter() - .map(|row| HeartbeatReward::from_heartbeat_row(row, max_asserted_distance_deviation)) - .collect(); - // setup speedtests let last_speedtest = timestamp - Duration::hours(12); let gw1_speedtests = vec![ @@ -1118,8 +1288,14 @@ mod test { // calculate the rewards for the sample group let mut owner_rewards = HashMap::::new(); + + let duration = Duration::hours(1); + let epoch = (now - duration)..now; + let total_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); + let mut allocated_poc_rewards = 0_u64; + let epoch = (now - Duration::hours(1))..now; - for mobile_reward in CoveragePoints::aggregate_points( + for (reward_amount, mobile_reward) in CoveragePoints::aggregate_points( &hex_coverage, stream::iter(heartbeat_rewards), &speedtest_avgs, @@ -1128,7 +1304,7 @@ mod test { ) .await .unwrap() - .into_rewards(Decimal::ZERO, &epoch) + .into_rewards(total_poc_rewards, &epoch) .unwrap() { let radio_reward = match mobile_reward.reward { @@ -1139,7 +1315,8 @@ mod test { .get(&PublicKeyBinary::from(radio_reward.hotspot_key)) .expect("Could not find owner") .clone(); - + assert_eq!(reward_amount, radio_reward.poc_reward); + allocated_poc_rewards += reward_amount; *owner_rewards.entry(owner).or_default() += radio_reward.poc_reward; } @@ -1189,22 +1366,16 @@ mod test { // and thus its reward scale is reduced assert_eq!((owner5_reward as f64 * 0.25) as u64, owner7_reward); - // total emissions for 1 hour - let expected_total_rewards = get_scheduled_tokens_for_poc_and_dc(Duration::hours(1)) - .to_u64() - .unwrap(); - // the emissions actually distributed for the hour - let mut distributed_total_rewards = 0; - for val in owner_rewards.values() { - distributed_total_rewards += *val - } - assert_eq!(distributed_total_rewards, 2_049_180_327_865); + // confirm total sum of allocated poc rewards + assert_eq!(allocated_poc_rewards, 2_049_180_327_865); - let diff = expected_total_rewards as i128 - distributed_total_rewards as i128; - // the sum of rewards distributed should not exceed the epoch amount - // but due to rounding whilst going to u64 when computing rewards, - // is permitted to be a few bones less - assert!(diff.abs() <= 5); + // confirm the unallocated poc reward amounts + let unallocated_sp_reward_amount = (total_poc_rewards + - Decimal::from(allocated_poc_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + assert_eq!(unallocated_sp_reward_amount, 3); } #[tokio::test] @@ -1230,7 +1401,6 @@ mod test { let now = Utc::now(); let timestamp = now - Duration::minutes(20); - let max_asserted_distance_deviation: u32 = 300; let g1_cov_obj = Uuid::new_v4(); let g2_cov_obj = Uuid::new_v4(); @@ -1239,33 +1409,27 @@ mod test { let c2 = "P27-SCE4255W".to_string(); // sercom indoor // setup heartbeats - let heartbeat_keys = vec![ + let heartbeat_rewards = vec![ // add wifi indoor HB - HeartbeatRow { + HeartbeatReward { cbsd_id: None, hotspot_key: gw1.clone(), cell_type: CellType::NovaGenericWifiIndoor, coverage_object: g1_cov_obj, - latest_timestamp: DateTime::::MIN_UTC, - location_validation_timestamp: Some(timestamp), - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, // add sercomm indoor HB - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c2.clone()), hotspot_key: gw2.clone(), cell_type: CellType::from_cbsd_id(&c2).unwrap(), - latest_timestamp: DateTime::::MIN_UTC, coverage_object: g2_cov_obj, - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - ]; - - let heartbeat_rewards: Vec = heartbeat_keys - .into_iter() - .map(|row| HeartbeatReward::from_heartbeat_row(row, max_asserted_distance_deviation)) - .collect(); + ] + .into_iter() + .map(Ok) + .collect::>>(); // setup speedtests let last_speedtest = timestamp - Duration::hours(12); @@ -1299,7 +1463,8 @@ mod test { let mut owner_rewards = HashMap::::new(); let duration = Duration::hours(1); let epoch = (now - duration)..now; - for mobile_reward in CoveragePoints::aggregate_points( + let total_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); + for (_reward_amount, mobile_reward) in CoveragePoints::aggregate_points( &hex_coverage, stream::iter(heartbeat_rewards), &speedtest_avgs, @@ -1307,7 +1472,7 @@ mod test { ) .await .unwrap() - .into_rewards(Decimal::ZERO, &epoch) + .into_rewards(total_poc_rewards, &epoch) .unwrap() { let radio_reward = match mobile_reward.reward { @@ -1321,6 +1486,7 @@ mod test { *owner_rewards.entry(owner).or_default() += radio_reward.poc_reward; } + println!("owner rewards {:?}", owner_rewards); // These were different, now they are the same: @@ -1360,7 +1526,6 @@ mod test { let now = Utc::now(); let timestamp = now - Duration::minutes(20); - let max_asserted_distance_deviation: u32 = 300; // init cells and cell_types let c2 = "P27-SCE4255W".to_string(); // sercom indoor @@ -1369,35 +1534,29 @@ mod test { let g2_cov_obj = Uuid::new_v4(); // setup heartbeats - let heartbeat_keys = vec![ + let heartbeat_rewards = vec![ // add wifi indoor HB // with distance to asserted > than max allowed // this results in reward scale dropping to 0.25 - HeartbeatRow { + HeartbeatReward { cbsd_id: None, hotspot_key: gw1.clone(), cell_type: CellType::NovaGenericWifiIndoor, coverage_object: g1_cov_obj, - latest_timestamp: DateTime::::MIN_UTC, - location_validation_timestamp: Some(timestamp), - distance_to_asserted: Some(1000), + location_trust_score_multiplier: dec!(0.25), }, // add sercomm indoor HB - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c2.clone()), hotspot_key: gw2.clone(), coverage_object: g2_cov_obj, - latest_timestamp: DateTime::::MIN_UTC, cell_type: CellType::from_cbsd_id(&c2).unwrap(), - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - ]; - - let heartbeat_rewards: Vec = heartbeat_keys - .into_iter() - .map(|row| HeartbeatReward::from_heartbeat_row(row, max_asserted_distance_deviation)) - .collect(); + ] + .into_iter() + .map(Ok) + .collect::>>(); // setup speedtests let last_speedtest = timestamp - Duration::hours(12); @@ -1432,7 +1591,8 @@ mod test { let mut owner_rewards = HashMap::::new(); let duration = Duration::hours(1); let epoch = (now - duration)..now; - for mobile_reward in CoveragePoints::aggregate_points( + let total_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); + for (_reward_amount, mobile_reward) in CoveragePoints::aggregate_points( &hex_coverage, stream::iter(heartbeat_rewards), &speedtest_avgs, @@ -1440,7 +1600,7 @@ mod test { ) .await .unwrap() - .into_rewards(Decimal::ZERO, &epoch) + .into_rewards(total_poc_rewards, &epoch) .unwrap() { let radio_reward = match mobile_reward.reward { @@ -1495,7 +1655,6 @@ mod test { let now = Utc::now(); let timestamp = now - Duration::minutes(20); - let max_asserted_distance_deviation: u32 = 300; let g1_cov_obj = Uuid::new_v4(); let g2_cov_obj = Uuid::new_v4(); @@ -1504,33 +1663,27 @@ mod test { let c2 = "P27-SCE4255W".to_string(); // sercom indoor // setup heartbeats - let heartbeat_keys = vec![ + let heartbeat_rewards = vec![ // add wifi indoor HB - HeartbeatRow { + HeartbeatReward { cbsd_id: None, hotspot_key: gw1.clone(), cell_type: CellType::NovaGenericWifiOutdoor, coverage_object: g1_cov_obj, - latest_timestamp: DateTime::::MIN_UTC, - location_validation_timestamp: Some(timestamp), - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, // add sercomm indoor HB - HeartbeatRow { + HeartbeatReward { cbsd_id: Some(c2.clone()), hotspot_key: gw2.clone(), cell_type: CellType::from_cbsd_id(&c2).unwrap(), - latest_timestamp: DateTime::::MIN_UTC, coverage_object: g2_cov_obj, - location_validation_timestamp: None, - distance_to_asserted: Some(1), + location_trust_score_multiplier: dec!(1.0), }, - ]; - - let heartbeat_rewards: Vec = heartbeat_keys - .into_iter() - .map(|row| HeartbeatReward::from_heartbeat_row(row, max_asserted_distance_deviation)) - .collect(); + ] + .into_iter() + .map(Ok) + .collect::>>(); // setup speedtests let last_speedtest = timestamp - Duration::hours(12); @@ -1564,7 +1717,8 @@ mod test { let mut owner_rewards = HashMap::::new(); let duration = Duration::hours(1); let epoch = (now - duration)..now; - for mobile_reward in CoveragePoints::aggregate_points( + let total_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); + for (_reward_amount, mobile_reward) in CoveragePoints::aggregate_points( &hex_coverage, stream::iter(heartbeat_rewards), &speedtest_avgs, @@ -1572,7 +1726,7 @@ mod test { ) .await .unwrap() - .into_rewards(Decimal::ZERO, &epoch) + .into_rewards(total_poc_rewards, &epoch) .unwrap() { let radio_reward = match mobile_reward.reward { @@ -1588,7 +1742,7 @@ mod test { } // These were different, now they are the same: - + println!("owner rewards {:?}", owner_rewards); // wifi let owner1_reward = *owner_rewards .get(&owner1) @@ -1627,7 +1781,7 @@ mod test { radio_points: vec![( Some(c1), RadioPoints { - heartbeat_multiplier: dec!(1.0), + location_trust_score_multiplier: dec!(1.0), seniority: DateTime::default(), coverage_object: Uuid::new_v4(), points: dec!(10.0), @@ -1645,7 +1799,7 @@ mod test { ( Some(c2), RadioPoints { - heartbeat_multiplier: dec!(1.0), + location_trust_score_multiplier: dec!(1.0), seniority: DateTime::default(), coverage_object: Uuid::new_v4(), points: dec!(-1.0), @@ -1654,7 +1808,7 @@ mod test { ( Some(c3), RadioPoints { - heartbeat_multiplier: dec!(1.0), + location_trust_score_multiplier: dec!(1.0), points: dec!(0.0), seniority: DateTime::default(), coverage_object: Uuid::new_v4(), @@ -1671,8 +1825,12 @@ mod test { // less than or equal to zero. let coverage_points = CoveragePoints { coverage_points }; let epoch = now - Duration::hours(1)..now; + let total_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); let expected_hotspot = gw1; - for mobile_reward in coverage_points.into_rewards(Decimal::ZERO, &epoch).unwrap() { + for (_reward_amount, mobile_reward) in coverage_points + .into_rewards(total_poc_rewards, &epoch) + .unwrap() + { let radio_reward = match mobile_reward.reward { Some(proto::mobile_reward_share::Reward::RadioReward(radio_reward)) => radio_reward, _ => unreachable!(), @@ -1690,9 +1848,199 @@ mod test { let now = Utc::now(); let epoch = now - Duration::hours(1)..now; - + let total_poc_rewards = get_scheduled_tokens_for_poc(epoch.end - epoch.start); assert!(coverage_points - .into_rewards(Decimal::ZERO, &epoch) + .into_rewards(total_poc_rewards, &epoch) .is_none()); } + + #[tokio::test] + async fn service_provider_reward_amounts() { + let mobile_bone_price = dec!(0.00001); + + let sp1 = ServiceProvider::HeliumMobile; + + let now = Utc::now(); + let epoch = (now - Duration::hours(1))..now; + + let service_provider_sessions = vec![ServiceProviderDataSession { + service_provider: sp1, + total_dcs: dec!(1000), + }]; + let sp_shares = ServiceProviderShares::new(service_provider_sessions); + let total_sp_rewards = get_scheduled_tokens_for_service_providers(epoch.end - epoch.start); + let rewards_per_share = sp_shares + .rewards_per_share(total_sp_rewards, mobile_bone_price) + .unwrap(); + + let mut sp_rewards = HashMap::::new(); + let mut allocated_sp_rewards = 0_u64; + for (reward_amount, sp_reward) in + sp_shares.into_service_provider_rewards(&epoch, rewards_per_share) + { + if let Some(MobileReward::ServiceProviderReward(r)) = sp_reward.reward { + sp_rewards.insert(r.service_provider_id, r.amount); + assert_eq!(reward_amount, r.amount); + allocated_sp_rewards += reward_amount; + } + } + + let sp1_reward_amount = *sp_rewards + .get(&(sp1 as i32)) + .expect("Could not fetch sp1 shares"); + assert_eq!(sp1_reward_amount, 1000); + + // confirm the unallocated service provider reward amounts + let unallocated_sp_reward_amount = (total_sp_rewards - Decimal::from(allocated_sp_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + assert_eq!(unallocated_sp_reward_amount, 341_530_053_644); + } + + #[tokio::test] + async fn service_provider_reward_amounts_capped() { + let mobile_bone_price = dec!(1.0); + let sp1 = ServiceProvider::HeliumMobile; + + let now = Utc::now(); + let epoch = (now - Duration::hours(1))..now; + + let total_sp_rewards_in_bones = dec!(100_000_000); + let total_rewards_value_in_dc = + mobile_bones_to_dc(total_sp_rewards_in_bones, mobile_bone_price); + + let service_provider_sessions = vec![ServiceProviderDataSession { + service_provider: ServiceProvider::HeliumMobile, + // force the service provider to have spend more DC than total rewardable + total_dcs: total_rewards_value_in_dc * dec!(2.0), + }]; + + let sp_shares = ServiceProviderShares::new(service_provider_sessions); + let rewards_per_share = sp_shares + .rewards_per_share(total_sp_rewards_in_bones, mobile_bone_price) + .unwrap(); + + let mut sp_rewards = HashMap::new(); + let mut allocated_sp_rewards = 0_u64; + for (reward_amount, sp_reward) in + sp_shares.into_service_provider_rewards(&epoch, rewards_per_share) + { + if let Some(MobileReward::ServiceProviderReward(r)) = sp_reward.reward { + sp_rewards.insert(r.service_provider_id, r.amount); + assert_eq!(reward_amount, r.amount); + allocated_sp_rewards += reward_amount; + } + } + let sp1_reward_amount = *sp_rewards + .get(&(sp1 as i32)) + .expect("Could not fetch sp1 shares"); + + assert_eq!(Decimal::from(sp1_reward_amount), total_sp_rewards_in_bones); + assert_eq!(sp1_reward_amount, 100_000_000); + + // confirm the unallocated service provider reward amounts + let unallocated_sp_reward_amount = (total_sp_rewards_in_bones + - Decimal::from(allocated_sp_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + assert_eq!(unallocated_sp_reward_amount, 0); + } + + #[tokio::test] + async fn service_provider_reward_hip87_ex1() { + // mobile price from hip example and converted to bones + let mobile_bone_price = dec!(0.0001) / dec!(1_000_000); + let sp1 = ServiceProvider::HeliumMobile; + + let now = Utc::now(); + let epoch = (now - Duration::hours(1))..now; + let total_sp_rewards_in_bones = dec!(500_000_000) * dec!(1_000_000); + + let service_provider_sessions = vec![ServiceProviderDataSession { + service_provider: sp1, + total_dcs: dec!(100_000_000), + }]; + + let sp_shares = ServiceProviderShares::new(service_provider_sessions); + let rewards_per_share = sp_shares + .rewards_per_share(total_sp_rewards_in_bones, mobile_bone_price) + .unwrap(); + + let mut sp_rewards = HashMap::new(); + let mut allocated_sp_rewards = 0_u64; + for (reward_amount, sp_reward) in + sp_shares.into_service_provider_rewards(&epoch, rewards_per_share) + { + if let Some(MobileReward::ServiceProviderReward(r)) = sp_reward.reward { + sp_rewards.insert(r.service_provider_id, r.amount); + assert_eq!(reward_amount, r.amount); + allocated_sp_rewards += reward_amount; + } + } + + let sp1_reward_amount_in_bones = *sp_rewards + .get(&(sp1 as i32)) + .expect("Could not fetch sp1 shares"); + // example in HIP gives expected reward amount in mobile whereas we use bones + // assert expected value in bones + assert_eq!(sp1_reward_amount_in_bones, 10_000_000 * 1_000_000); + + // confirm the unallocated service provider reward amounts + let unallocated_sp_reward_amount = (total_sp_rewards_in_bones + - Decimal::from(allocated_sp_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + assert_eq!(unallocated_sp_reward_amount, 490_000_000_000_000); + } + + #[tokio::test] + async fn service_provider_reward_hip87_ex2() { + // mobile price from hip example and converted to bones + let mobile_bone_price = dec!(0.0001) / dec!(1_000_000); + let sp1 = ServiceProvider::HeliumMobile; + + let now = Utc::now(); + let epoch = (now - Duration::hours(24))..now; + let total_sp_rewards_in_bones = dec!(500_000_000) * dec!(1_000_000); + + let service_provider_sessions = vec![ServiceProviderDataSession { + service_provider: sp1, + total_dcs: dec!(100_000_000_000), + }]; + + let sp_shares = ServiceProviderShares::new(service_provider_sessions); + let rewards_per_share = sp_shares + .rewards_per_share(total_sp_rewards_in_bones, mobile_bone_price) + .unwrap(); + + let mut sp_rewards = HashMap::new(); + let mut allocated_sp_rewards = 0_u64; + for (reward_amount, sp_reward) in + sp_shares.into_service_provider_rewards(&epoch, rewards_per_share) + { + if let Some(MobileReward::ServiceProviderReward(r)) = sp_reward.reward { + sp_rewards.insert(r.service_provider_id, r.amount); + assert_eq!(reward_amount, r.amount); + allocated_sp_rewards += reward_amount; + } + } + + let sp1_reward_amount_in_bones = *sp_rewards + .get(&(sp1 as i32)) + .expect("Could not fetch sp1 shares"); + // example in HIP gives expected reward amount in mobile whereas we use bones + // assert expected value in bones + assert_eq!(sp1_reward_amount_in_bones, 500_000_000 * 1_000_000); + + // confirm the unallocated service provider reward amounts + let unallocated_sp_reward_amount = (total_sp_rewards_in_bones + - Decimal::from(allocated_sp_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + assert_eq!(unallocated_sp_reward_amount, 0); + } } diff --git a/mobile_verifier/src/rewarder.rs b/mobile_verifier/src/rewarder.rs index 4bedc8c62..283b74b22 100644 --- a/mobile_verifier/src/rewarder.rs +++ b/mobile_verifier/src/rewarder.rs @@ -1,7 +1,7 @@ use crate::{ coverage, data_session, heartbeats::{self, HeartbeatReward}, - reward_shares::{CoveragePoints, MapperShares, TransferRewards}, + reward_shares::{self, CoveragePoints, MapperShares, ServiceProviderShares, TransferRewards}, speedtests, speedtests_average::SpeedtestAverages, subscriber_location, telemetry, @@ -10,10 +10,16 @@ use anyhow::bail; use chrono::{DateTime, Duration, TimeZone, Utc}; use db_store::meta; use file_store::{file_sink::FileSinkClient, traits::TimestampEncode}; + +use helium_proto::services::{ + poc_mobile as proto, poc_mobile::mobile_reward_share::Reward as ProtoReward, + poc_mobile::UnallocatedReward, poc_mobile::UnallocatedRewardType, +}; use helium_proto::RewardManifest; +use mobile_config::client::{carrier_service_client::CarrierServiceVerifier, ClientError}; use price::PriceTracker; use reward_scheduler::Scheduler; -use rust_decimal::{prelude::ToPrimitive, Decimal}; +use rust_decimal::{prelude::*, Decimal}; use rust_decimal_macros::dec; use sqlx::{PgExecutor, Pool, Postgres}; use std::ops::Range; @@ -21,35 +27,38 @@ use tokio::time::sleep; const REWARDS_NOT_CURRENT_DELAY_PERIOD: i64 = 5; -pub struct Rewarder { +pub struct Rewarder { pool: Pool, + carrier_client: A, reward_period_duration: Duration, reward_offset: Duration, - mobile_rewards: FileSinkClient, + pub mobile_rewards: FileSinkClient, reward_manifests: FileSinkClient, price_tracker: PriceTracker, - max_distance_to_asserted: u32, } -impl Rewarder { +impl Rewarder +where + A: CarrierServiceVerifier, +{ #[allow(clippy::too_many_arguments)] pub fn new( pool: Pool, + carrier_client: A, reward_period_duration: Duration, reward_offset: Duration, mobile_rewards: FileSinkClient, reward_manifests: FileSinkClient, price_tracker: PriceTracker, - max_distance_to_asserted: u32, ) -> Self { Self { pool, + carrier_client, reward_period_duration, reward_offset, mobile_rewards, reward_manifests, price_tracker, - max_distance_to_asserted, } } @@ -113,7 +122,19 @@ impl Rewarder { .await? == 0 { - tracing::info!("No heartbeats found past reward period"); + tracing::info!("No cbrs heartbeats found past reward period"); + return Ok(false); + } + + if sqlx::query_scalar::<_, i64>( + "SELECT COUNT(*) FROM wifi_heartbeats WHERE latest_timestamp >= $1", + ) + .bind(reward_period.end) + .fetch_one(&self.pool) + .await? + == 0 + { + tracing::info!("No wifi heartbeats found past reward period"); return Ok(false); } @@ -142,18 +163,6 @@ impl Rewarder { reward_period.end ); - let heartbeats = - HeartbeatReward::validated(&self.pool, reward_period, self.max_distance_to_asserted) - .await?; - let speedtest_averages = - SpeedtestAverages::aggregate_epoch_averages(reward_period.end, &self.pool).await?; - let coverage_points = CoveragePoints::aggregate_points( - &self.pool, - heartbeats, - &speedtest_averages, - reward_period.end, - ) - .await?; let mobile_price = self .price_tracker .price(&helium_proto::BlockchainTokenTypeV1::Mobile) @@ -163,72 +172,39 @@ impl Rewarder { let mobile_bone_price = Decimal::from(mobile_price) / dec!(1_000_000) // Per Mobile token / dec!(1_000_000); // Per Bone - let transfer_rewards = TransferRewards::from_transfer_sessions( - mobile_bone_price, - data_session::aggregate_hotspot_data_sessions_to_dc(&self.pool, reward_period).await?, + + // process rewards for poc and data transfer + reward_poc_and_dc( + &self.pool, + &self.mobile_rewards, reward_period, + mobile_bone_price, ) - .await; + .await?; - // It's important to gauge the scale metric. If this value is < 1.0, we are in - // big trouble. - let Some(scale) = transfer_rewards.reward_scale().to_f64() else { - bail!("The data transfer rewards scale cannot be converted to a float"); - }; - telemetry::data_transfer_rewards_scale(scale); - - if let Some(mobile_reward_shares) = - coverage_points.into_rewards(transfer_rewards.reward_sum(), reward_period) - { - for mobile_reward_share in mobile_reward_shares { - self.mobile_rewards - .write(mobile_reward_share, []) - .await? - // Await the returned one shot to ensure that we wrote the file - .await??; - } + // process rewards for mappers + reward_mappers(&self.pool, &self.mobile_rewards, reward_period).await?; - for mobile_reward_share in transfer_rewards.into_rewards(reward_period) { - self.mobile_rewards - .write(mobile_reward_share, []) - .await? - // Await the returned one shot to ensure that we wrote the file - .await??; - } - } + // process rewards for service providers + reward_service_providers( + &self.pool, + &self.carrier_client, + &self.mobile_rewards, + reward_period, + mobile_bone_price, + ) + .await?; - // Mapper rewards currently include rewards for discovery mapping only. - // Verification mapping rewards to be added - // Any subscriber for which the carrier has submitted a location sharing report - // during the epoch will be eligible for discovery mapping rewards - - // get subscriber location shares this epoch - let location_shares = - subscriber_location::aggregate_location_shares(&self.pool, reward_period).await?; - - // determine mapping shares based on location shares and data transferred - let mapping_shares = MapperShares::new(location_shares); - let rewards_per_share = mapping_shares.rewards_per_share(reward_period)?; - - // translate discovery mapping shares into subscriber rewards - for mapping_share in - mapping_shares.into_subscriber_rewards(reward_period, rewards_per_share) - { - self.mobile_rewards - .write(mapping_share.clone(), []) - .await? - // Await the returned one shot to ensure that we wrote the file - .await??; - } + // process rewards for oracles + reward_oracles(&self.mobile_rewards, reward_period).await?; let written_files = self.mobile_rewards.commit().await?.await??; let mut transaction = self.pool.begin().await?; - // clear out the various db tables heartbeats::clear_heartbeats(&mut transaction, &reward_period.start).await?; speedtests::clear_speedtests(&mut transaction, &reward_period.start).await?; - data_session::clear_hotspot_data_sessions(&mut transaction, &reward_period.end).await?; + data_session::clear_hotspot_data_sessions(&mut transaction, &reward_period.start).await?; coverage::clear_coverage_objects(&mut transaction, &reward_period.start).await?; // subscriber_location::clear_location_shares(&mut transaction, &reward_period.end).await?; @@ -256,6 +232,242 @@ impl Rewarder { } } +pub async fn reward_poc_and_dc( + pool: &Pool, + mobile_rewards: &FileSinkClient, + reward_period: &Range>, + mobile_bone_price: Decimal, +) -> anyhow::Result<()> { + let transfer_rewards = TransferRewards::from_transfer_sessions( + mobile_bone_price, + data_session::aggregate_hotspot_data_sessions_to_dc(pool, reward_period).await?, + reward_period, + ) + .await; + let transfer_rewards_sum = transfer_rewards.reward_sum(); + // It's important to gauge the scale metric. If this value is < 1.0, we are in + // big trouble. + let Some(scale) = transfer_rewards.reward_scale().to_f64() else { + bail!("The data transfer rewards scale cannot be converted to a float"); + }; + telemetry::data_transfer_rewards_scale(scale); + + reward_poc(pool, mobile_rewards, reward_period, transfer_rewards_sum).await?; + + reward_dc(mobile_rewards, reward_period, transfer_rewards).await?; + + Ok(()) +} + +async fn reward_poc( + pool: &Pool, + mobile_rewards: &FileSinkClient, + reward_period: &Range>, + transfer_reward_sum: Decimal, +) -> anyhow::Result<()> { + let total_poc_rewards = + reward_shares::get_scheduled_tokens_for_poc(reward_period.end - reward_period.start) + - transfer_reward_sum; + + let heartbeats = HeartbeatReward::validated(pool, reward_period); + let speedtest_averages = + SpeedtestAverages::aggregate_epoch_averages(reward_period.end, pool).await?; + let coverage_points = + CoveragePoints::aggregate_points(pool, heartbeats, &speedtest_averages, reward_period.end) + .await?; + + if let Some(mobile_reward_shares) = + coverage_points.into_rewards(total_poc_rewards, reward_period) + { + // handle poc reward outputs + let mut allocated_poc_rewards = 0_u64; + for (poc_reward_amount, mobile_reward_share) in mobile_reward_shares { + allocated_poc_rewards += poc_reward_amount; + mobile_rewards + .write(mobile_reward_share, []) + .await? + // Await the returned one shot to ensure that we wrote the file + .await??; + } + // write out any unallocated poc reward + let unallocated_poc_reward_amount = (total_poc_rewards + - Decimal::from(allocated_poc_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + mobile_rewards, + UnallocatedRewardType::Poc, + unallocated_poc_reward_amount, + reward_period, + ) + .await?; + }; + Ok(()) +} + +pub async fn reward_dc( + mobile_rewards: &FileSinkClient, + reward_period: &Range>, + transfer_rewards: TransferRewards, +) -> anyhow::Result<()> { + // handle dc reward outputs + let mut allocated_dc_rewards = 0_u64; + let total_dc_rewards = transfer_rewards.total(); + for (dc_reward_amount, mobile_reward_share) in transfer_rewards.into_rewards(reward_period) { + allocated_dc_rewards += dc_reward_amount; + mobile_rewards + .write(mobile_reward_share, []) + .await? + // Await the returned one shot to ensure that we wrote the file + .await??; + } + // write out any unallocated dc reward + let unallocated_dc_reward_amount = (total_dc_rewards - Decimal::from(allocated_dc_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + mobile_rewards, + UnallocatedRewardType::Data, + unallocated_dc_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +pub async fn reward_mappers( + pool: &Pool, + mobile_rewards: &FileSinkClient, + reward_period: &Range>, +) -> anyhow::Result<()> { + // Mapper rewards currently include rewards for discovery mapping only. + // Verification mapping rewards to be added + // get subscriber location shares this epoch + let location_shares = + subscriber_location::aggregate_location_shares(pool, reward_period).await?; + + // determine mapping shares based on location shares and data transferred + let mapping_shares = MapperShares::new(location_shares); + let total_mappers_pool = + reward_shares::get_scheduled_tokens_for_mappers(reward_period.end - reward_period.start); + let rewards_per_share = mapping_shares.rewards_per_share(total_mappers_pool)?; + + // translate discovery mapping shares into subscriber rewards + let mut allocated_mapping_rewards = 0_u64; + for (reward_amount, mapping_share) in + mapping_shares.into_subscriber_rewards(reward_period, rewards_per_share) + { + allocated_mapping_rewards += reward_amount; + mobile_rewards + .write(mapping_share.clone(), []) + .await? + // Await the returned one shot to ensure that we wrote the file + .await??; + } + + // write out any unallocated mapping rewards + let unallocated_mapping_reward_amount = (total_mappers_pool + - Decimal::from(allocated_mapping_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + mobile_rewards, + UnallocatedRewardType::Mapper, + unallocated_mapping_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +pub async fn reward_oracles( + mobile_rewards: &FileSinkClient, + reward_period: &Range>, +) -> anyhow::Result<()> { + // atm 100% of oracle rewards are assigned to 'unallocated' + let total_oracle_rewards = + reward_shares::get_scheduled_tokens_for_oracles(reward_period.end - reward_period.start); + let allocated_oracle_rewards = 0_u64; + let unallocated_oracle_reward_amount = (total_oracle_rewards + - Decimal::from(allocated_oracle_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + mobile_rewards, + UnallocatedRewardType::Oracle, + unallocated_oracle_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +pub async fn reward_service_providers( + pool: &Pool, + carrier_client: &impl CarrierServiceVerifier, + mobile_rewards: &FileSinkClient, + reward_period: &Range>, + mobile_bone_price: Decimal, +) -> anyhow::Result<()> { + let payer_dc_sessions = + data_session::sum_data_sessions_to_dc_by_payer(pool, reward_period).await?; + let sp_shares = + ServiceProviderShares::from_payers_dc(payer_dc_sessions, carrier_client).await?; + let total_sp_rewards = reward_shares::get_scheduled_tokens_for_service_providers( + reward_period.end - reward_period.start, + ); + let rewards_per_share = sp_shares.rewards_per_share(total_sp_rewards, mobile_bone_price)?; + // translate service provider shares into service provider rewards + // track the amount of allocated reward value as we go + let mut allocated_sp_rewards = 0_u64; + for (amount, sp_share) in + sp_shares.into_service_provider_rewards(reward_period, rewards_per_share) + { + allocated_sp_rewards += amount; + mobile_rewards.write(sp_share.clone(), []).await?.await??; + } + // write out any unallocated service provider reward + let unallocated_sp_reward_amount = (total_sp_rewards - Decimal::from(allocated_sp_rewards)) + .round_dp_with_strategy(0, RoundingStrategy::ToZero) + .to_u64() + .unwrap_or(0); + write_unallocated_reward( + mobile_rewards, + UnallocatedRewardType::ServiceProvider, + unallocated_sp_reward_amount, + reward_period, + ) + .await?; + Ok(()) +} + +async fn write_unallocated_reward( + mobile_rewards: &FileSinkClient, + unallocated_type: UnallocatedRewardType, + unallocated_amount: u64, + reward_period: &'_ Range>, +) -> anyhow::Result<()> { + if unallocated_amount > 0 { + let unallocated_reward = proto::MobileRewardShare { + start_period: reward_period.start.encode_timestamp(), + end_period: reward_period.end.encode_timestamp(), + reward: Some(ProtoReward::UnallocatedReward(UnallocatedReward { + reward_type: unallocated_type as i32, + amount: unallocated_amount, + })), + }; + mobile_rewards + .write(unallocated_reward, []) + .await? + .await??; + }; + Ok(()) +} + pub async fn last_rewarded_end_time(db: &Pool) -> db_store::Result> { Utc.timestamp_opt(meta::fetch(db, "last_rewarded_end_time").await?, 0) .single() diff --git a/mobile_verifier/src/speedtests.rs b/mobile_verifier/src/speedtests.rs index 6824419f5..c336f2635 100644 --- a/mobile_verifier/src/speedtests.rs +++ b/mobile_verifier/src/speedtests.rs @@ -149,7 +149,7 @@ where speedtest_report: CellSpeedtestIngestReport, result: SpeedtestVerificationResult, ) -> anyhow::Result<()> { - let ingest_report: SpeedtestIngestReportV1 = speedtest_report.try_into()?; + let ingest_report: SpeedtestIngestReportV1 = speedtest_report.into(); let timestamp: u64 = Utc::now().timestamp_millis() as u64; let proto = VerifiedSpeedtestProto { report: Some(ingest_report), diff --git a/mobile_verifier/src/subscriber_location.rs b/mobile_verifier/src/subscriber_location.rs index 6e61002fa..0a8d8822a 100644 --- a/mobile_verifier/src/subscriber_location.rs +++ b/mobile_verifier/src/subscriber_location.rs @@ -92,7 +92,7 @@ where // if the report is valid then save to the db // and thus available to be rewarded if verified_report_status == SubscriberReportVerificationStatus::Valid { - self.save(&loc_ingest_report, &mut transaction).await?; + save(&loc_ingest_report, &mut transaction).await?; } // write out paper trail of verified report, valid or invalid @@ -120,24 +120,6 @@ where Ok(()) } - pub async fn save( - &self, - loc_ingest_report: &SubscriberLocationIngestReport, - db: &mut Transaction<'_, Postgres>, - ) -> Result<(), sqlx::Error> { - sqlx::query( - r#" - INSERT INTO subscriber_loc_verified (subscriber_id, received_timestamp) - VALUES ($1, $2) - "#, - ) - .bind(loc_ingest_report.report.subscriber_id.clone()) - .bind(loc_ingest_report.received_timestamp) - .execute(&mut *db) - .await?; - Ok(()) - } - async fn verify_report( &self, report: &SubscriberLocationReq, @@ -174,6 +156,23 @@ where } } +pub async fn save( + loc_ingest_report: &SubscriberLocationIngestReport, + db: &mut Transaction<'_, Postgres>, +) -> Result<(), sqlx::Error> { + sqlx::query( + r#" + INSERT INTO subscriber_loc_verified (subscriber_id, received_timestamp) + VALUES ($1, $2) + "#, + ) + .bind(loc_ingest_report.report.subscriber_id.clone()) + .bind(loc_ingest_report.received_timestamp) + .execute(&mut *db) + .await?; + Ok(()) +} + #[derive(sqlx::FromRow)] pub struct SubscriberLocationShare { pub subscriber_id: Vec, diff --git a/mobile_verifier/tests/common/mod.rs b/mobile_verifier/tests/common/mod.rs new file mode 100644 index 000000000..a81dfa10f --- /dev/null +++ b/mobile_verifier/tests/common/mod.rs @@ -0,0 +1,135 @@ +use file_store::file_sink::{FileSinkClient, Message as SinkMessage}; +use helium_proto::{ + services::poc_mobile::{ + mobile_reward_share::Reward as MobileReward, GatewayReward, MobileRewardShare, RadioReward, + ServiceProviderReward, SubscriberReward, UnallocatedReward, + }, + Message, +}; +use std::collections::HashMap; +use tokio::{sync::mpsc::error::TryRecvError, time::timeout}; + +pub type ValidSpMap = HashMap; + +#[derive(Debug, Clone)] +pub struct MockCarrierServiceClient { + pub valid_sps: ValidSpMap, +} + +pub struct MockFileSinkReceiver { + pub receiver: tokio::sync::mpsc::Receiver, +} + +#[allow(dead_code)] +impl MockFileSinkReceiver { + pub async fn receive(&mut self) -> Option> { + match timeout(seconds(2), self.receiver.recv()).await { + Ok(Some(SinkMessage::Data(on_write_tx, msg))) => { + let _ = on_write_tx.send(Ok(())); + Some(msg) + } + Ok(None) => None, + Err(e) => panic!("timeout while waiting for message1 {:?}", e), + Ok(Some(unexpected_msg)) => { + println!("ignoring unexpected msg {:?}", unexpected_msg); + None + } + } + } + + pub fn assert_no_messages(&mut self) { + let Err(TryRecvError::Empty) = self.receiver.try_recv() else { + panic!("receiver should have been empty") + }; + } + + pub async fn receive_radio_reward(&mut self) -> RadioReward { + match self.receive().await { + Some(bytes) => { + let mobile_reward = MobileRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected radio reward"); + println!("mobile_reward: {:?}", mobile_reward); + match mobile_reward.reward { + Some(MobileReward::RadioReward(r)) => r, + _ => panic!("failed to get radio reward"), + } + } + None => panic!("failed to receive radio reward"), + } + } + + pub async fn receive_gateway_reward(&mut self) -> GatewayReward { + match self.receive().await { + Some(bytes) => { + let mobile_reward = MobileRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected gateway reward"); + println!("mobile_reward: {:?}", mobile_reward); + match mobile_reward.reward { + Some(MobileReward::GatewayReward(r)) => r, + _ => panic!("failed to get gateway reward"), + } + } + None => panic!("failed to receive gateway reward"), + } + } + + pub async fn receive_service_provider_reward(&mut self) -> ServiceProviderReward { + match self.receive().await { + Some(bytes) => { + let mobile_reward = MobileRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected service provider reward"); + println!("mobile_reward: {:?}", mobile_reward); + match mobile_reward.reward { + Some(MobileReward::ServiceProviderReward(r)) => r, + _ => panic!("failed to get service provider reward"), + } + } + None => panic!("failed to receive service provider reward"), + } + } + + pub async fn receive_subscriber_reward(&mut self) -> SubscriberReward { + match self.receive().await { + Some(bytes) => { + let mobile_reward = MobileRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected subscriber reward"); + println!("mobile_reward: {:?}", mobile_reward); + match mobile_reward.reward { + Some(MobileReward::SubscriberReward(r)) => r, + _ => panic!("failed to get subscriber reward"), + } + } + None => panic!("failed to receive subscriber reward"), + } + } + + pub async fn receive_unallocated_reward(&mut self) -> UnallocatedReward { + match self.receive().await { + Some(bytes) => { + let mobile_reward = MobileRewardShare::decode(bytes.as_slice()) + .expect("failed to decode expected unallocated reward"); + println!("mobile_reward: {:?}", mobile_reward); + match mobile_reward.reward { + Some(MobileReward::UnallocatedReward(r)) => r, + _ => panic!("failed to get unallocated reward"), + } + } + None => panic!("failed to receive unallocated reward"), + } + } +} + +pub fn create_file_sink() -> (FileSinkClient, MockFileSinkReceiver) { + let (tx, rx) = tokio::sync::mpsc::channel(20); + ( + FileSinkClient { + sender: tx, + metric: "metric", + }, + MockFileSinkReceiver { receiver: rx }, + ) +} + +pub fn seconds(s: u64) -> std::time::Duration { + std::time::Duration::from_secs(s) +} diff --git a/mobile_verifier/tests/heartbeats.rs b/mobile_verifier/tests/heartbeats.rs index 716d100a3..9b5700fbb 100644 --- a/mobile_verifier/tests/heartbeats.rs +++ b/mobile_verifier/tests/heartbeats.rs @@ -1,5 +1,5 @@ use chrono::{DateTime, Utc}; -use futures_util::StreamExt; +use futures_util::TryStreamExt; use helium_crypto::PublicKeyBinary; use helium_proto::services::poc_mobile::HeartbeatValidity; use mobile_verifier::cell_type::CellType; @@ -30,6 +30,7 @@ async fn test_save_wifi_heartbeat(pool: PgPool) -> anyhow::Result<()> { cell_type: CellType::SercommIndoor, distance_to_asserted: None, coverage_summary: None, + location_trust_score_multiplier: dec!(1.0), validity: HeartbeatValidity::Valid, }; @@ -67,6 +68,7 @@ async fn test_save_cbrs_heartbeat(pool: PgPool) -> anyhow::Result<()> { cell_type: CellType::SercommIndoor, distance_to_asserted: None, coverage_summary: None, + location_trust_score_multiplier: dec!(1.0), validity: HeartbeatValidity::Valid, }; @@ -95,32 +97,32 @@ async fn only_fetch_latest_hotspot(pool: PgPool) -> anyhow::Result<()> { "11sctWiP9r5wDJVuDe1Th4XSL2vaawaLLSQF8f8iokAoMAJHxqp".parse()?; sqlx::query( r#" -INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object) +INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) VALUES - ($1, $2, 'sercommindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 12:00:00+00', '2023-08-25 12:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 13:00:00+00', '2023-08-25 13:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 14:00:00+00', '2023-08-25 14:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 15:00:00+00', '2023-08-25 15:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 16:00:00+00', '2023-08-25 16:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 17:00:00+00', '2023-08-25 17:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 18:00:00+00', '2023-08-25 18:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 19:00:00+00', '2023-08-25 19:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 20:00:00+00', '2023-08-25 20:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 21:00:00+00', '2023-08-25 21:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 22:00:00+00', '2023-08-25 22:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 23:00:00+00', '2023-08-25 23:00:00+00', $4) + ($1, $2, 'sercommindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 12:00:00+00', '2023-08-25 12:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 13:00:00+00', '2023-08-25 13:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 14:00:00+00', '2023-08-25 14:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 15:00:00+00', '2023-08-25 15:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 16:00:00+00', '2023-08-25 16:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 17:00:00+00', '2023-08-25 17:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 18:00:00+00', '2023-08-25 18:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 19:00:00+00', '2023-08-25 19:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 20:00:00+00', '2023-08-25 20:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 21:00:00+00', '2023-08-25 21:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 22:00:00+00', '2023-08-25 22:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 23:00:00+00', '2023-08-25 23:00:00+00', $4, 1.0) "#, ) .bind(&cbsd_id) @@ -132,17 +134,9 @@ VALUES let start_period: DateTime = "2023-08-25 00:00:00.000000000 UTC".parse()?; let end_period: DateTime = "2023-08-26 00:00:00.000000000 UTC".parse()?; - let latest_timestamp: DateTime = "2023-08-25 23:00:00.000000000 UTC".parse()?; - let max_asserted_distance_deviation: u32 = 300; - - let heartbeat_reward: Vec<_> = HeartbeatReward::validated( - &pool, - &(start_period..end_period), - max_asserted_distance_deviation, - ) - .await? - .collect() - .await; + let heartbeat_reward: Vec<_> = HeartbeatReward::validated(&pool, &(start_period..end_period)) + .try_collect() + .await?; assert_eq!( heartbeat_reward, @@ -151,7 +145,6 @@ VALUES cell_type, cbsd_id: Some(cbsd_id), location_trust_score_multiplier: Decimal::ONE, - latest_timestamp, coverage_object, }] ); @@ -171,20 +164,20 @@ async fn ensure_hotspot_does_not_affect_count(pool: PgPool) -> anyhow::Result<() "11sctWiP9r5wDJVuDe1Th4XSL2vaawaLLSQF8f8iokAoMAJHxqp".parse()?; sqlx::query( r#" -INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object) +INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) VALUES - ($1, $2, 'sercommindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $4), - ($1, $2, 'sercommindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $4), - ($1, $3, 'sercommindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', $4) + ($1, $2, 'sercommindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $4, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $4, 1.0), + ($1, $3, 'sercommindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', $4, 1.0) "#, ) .bind(&cbsd_id) @@ -196,16 +189,9 @@ VALUES let start_period: DateTime = "2023-08-25 00:00:00.000000000 UTC".parse()?; let end_period: DateTime = "2023-08-26 00:00:00.000000000 UTC".parse()?; - let latest_timestamp: DateTime = "2023-08-25 11:00:00.000000000 UTC".parse()?; - let max_asserted_distance_deviation: u32 = 300; - let heartbeat_reward: Vec<_> = HeartbeatReward::validated( - &pool, - &(start_period..end_period), - max_asserted_distance_deviation, - ) - .await? - .collect() - .await; + let heartbeat_reward: Vec<_> = HeartbeatReward::validated(&pool, &(start_period..end_period)) + .try_collect() + .await?; assert_eq!( heartbeat_reward, @@ -214,7 +200,6 @@ VALUES cell_type, cbsd_id: Some(cbsd_id), location_trust_score_multiplier: Decimal::ONE, - latest_timestamp, coverage_object, }] ); @@ -231,19 +216,19 @@ async fn ensure_minimum_count(pool: PgPool) -> anyhow::Result<()> { "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6".parse()?; sqlx::query( r#" -INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object) +INSERT INTO cbrs_heartbeats (cbsd_id, hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) VALUES - ($1, $2, 'sercommindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $3), - ($1, $2, 'sercommindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $3) + ($1, $2, 'sercommindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $3, 1.0), + ($1, $2, 'sercommindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $3, 1.0) "#, ) .bind(&cbsd_id) @@ -254,16 +239,9 @@ VALUES let start_period: DateTime = "2023-08-25 00:00:00.000000000 UTC".parse()?; let end_period: DateTime = "2023-08-26 00:00:00.000000000 UTC".parse()?; - let max_asserted_distance_deviation: u32 = 300; - - let heartbeat_reward: Vec<_> = HeartbeatReward::validated( - &pool, - &(start_period..end_period), - max_asserted_distance_deviation, - ) - .await? - .collect() - .await; + let heartbeat_reward: Vec<_> = HeartbeatReward::validated(&pool, &(start_period..end_period)) + .try_collect() + .await?; assert!(heartbeat_reward.is_empty()); @@ -279,20 +257,20 @@ async fn ensure_wifi_hotspots_are_rewarded(pool: PgPool) -> anyhow::Result<()> { "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6".parse()?; sqlx::query( r#" -INSERT INTO wifi_heartbeats (hotspot_key, cell_type, latest_timestamp, truncated_timestamp, location_validation_timestamp, distance_to_asserted, coverage_object) +INSERT INTO wifi_heartbeats (hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) VALUES - ($1, 'novagenericwifiindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', NOW(), 300, $3) + ($1, 'novagenericwifiindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', $3, 1.0) "#, ) .bind(&hotspot) @@ -303,16 +281,9 @@ VALUES let start_period: DateTime = "2023-08-25 00:00:00.000000000 UTC".parse()?; let end_period: DateTime = "2023-08-26 00:00:00.000000000 UTC".parse()?; - let latest_timestamp: DateTime = "2023-08-25 11:00:00.000000000 UTC".parse()?; - let max_asserted_distance_deviation: u32 = 300; - let heartbeat_reward: Vec<_> = HeartbeatReward::validated( - &pool, - &(start_period..end_period), - max_asserted_distance_deviation, - ) - .await? - .collect() - .await; + let heartbeat_reward: Vec<_> = HeartbeatReward::validated(&pool, &(start_period..end_period)) + .try_collect() + .await?; assert_eq!( heartbeat_reward, @@ -321,7 +292,6 @@ VALUES cell_type: CellType::NovaGenericWifiIndoor, cbsd_id: None, location_trust_score_multiplier: dec!(1.0), - latest_timestamp, coverage_object: latest_coverage_object, }] ); @@ -338,20 +308,20 @@ async fn ensure_wifi_hotspots_use_average_location_trust_score(pool: PgPool) -> "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6".parse()?; sqlx::query( r#" -INSERT INTO wifi_heartbeats (hotspot_key, cell_type, latest_timestamp, truncated_timestamp, location_validation_timestamp, distance_to_asserted, coverage_object) +INSERT INTO wifi_heartbeats (hotspot_key, cell_type, latest_timestamp, truncated_timestamp, coverage_object, location_trust_score_multiplier) VALUES - ($1, 'novagenericwifiindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', NOW(), 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', null, 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', null, 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', null, 300, $2), - ($1, 'novagenericwifiindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', null, 300, $3) + ($1, 'novagenericwifiindoor', '2023-08-25 00:00:00+00', '2023-08-25 00:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 01:00:00+00', '2023-08-25 01:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 02:00:00+00', '2023-08-25 02:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 03:00:00+00', '2023-08-25 03:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 04:00:00+00', '2023-08-25 04:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 05:00:00+00', '2023-08-25 05:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 06:00:00+00', '2023-08-25 06:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 07:00:00+00', '2023-08-25 07:00:00+00', $2, 1.0), + ($1, 'novagenericwifiindoor', '2023-08-25 08:00:00+00', '2023-08-25 08:00:00+00', $2, 0.25), + ($1, 'novagenericwifiindoor', '2023-08-25 09:00:00+00', '2023-08-25 09:00:00+00', $2, 0.25), + ($1, 'novagenericwifiindoor', '2023-08-25 10:00:00+00', '2023-08-25 10:00:00+00', $2, 0.25), + ($1, 'novagenericwifiindoor', '2023-08-25 11:00:00+00', '2023-08-25 11:00:00+00', $3, 0.25) "#, ) .bind(&hotspot) @@ -362,16 +332,9 @@ VALUES let start_period: DateTime = "2023-08-25 00:00:00.000000000 UTC".parse()?; let end_period: DateTime = "2023-08-26 00:00:00.000000000 UTC".parse()?; - let latest_timestamp: DateTime = "2023-08-25 11:00:00.000000000 UTC".parse()?; - let max_asserted_distance_deviation: u32 = 300; - let heartbeat_reward: Vec<_> = HeartbeatReward::validated( - &pool, - &(start_period..end_period), - max_asserted_distance_deviation, - ) - .await? - .collect() - .await; + let heartbeat_reward: Vec<_> = HeartbeatReward::validated(&pool, &(start_period..end_period)) + .try_collect() + .await?; assert_eq!( heartbeat_reward, @@ -380,7 +343,6 @@ VALUES cell_type: CellType::NovaGenericWifiIndoor, cbsd_id: None, location_trust_score_multiplier: dec!(0.75), - latest_timestamp, coverage_object: latest_coverage_object, }] ); diff --git a/mobile_verifier/tests/modeled_coverage.rs b/mobile_verifier/tests/modeled_coverage.rs index d65f64749..c70ce8589 100644 --- a/mobile_verifier/tests/modeled_coverage.rs +++ b/mobile_verifier/tests/modeled_coverage.rs @@ -392,6 +392,7 @@ async fn process_input( &AllOwnersValid, stream::iter(heartbeats.map(Heartbeat::from)), &coverage_objects, + 2000, epoch, )); while let Some(heartbeat) = heartbeats.next().await.transpose()? { @@ -466,7 +467,7 @@ async fn scenario_one(pool: PgPool) -> anyhow::Result<()> { let speedtest_avgs = SpeedtestAverages { averages }; let reward_period = start..end; - let heartbeats = HeartbeatReward::validated(&pool, &reward_period, 1000).await?; + let heartbeats = HeartbeatReward::validated(&pool, &reward_period); let coverage_points = CoveragePoints::aggregate_points(&pool, heartbeats, &speedtest_avgs, end).await?; @@ -559,7 +560,7 @@ async fn scenario_two(pool: PgPool) -> anyhow::Result<()> { let speedtest_avgs = SpeedtestAverages { averages }; let reward_period = start..end; - let heartbeats = HeartbeatReward::validated(&pool, &reward_period, 1000).await?; + let heartbeats = HeartbeatReward::validated(&pool, &reward_period); let coverage_points = CoveragePoints::aggregate_points(&pool, heartbeats, &speedtest_avgs, end).await?; @@ -793,7 +794,7 @@ async fn scenario_three(pool: PgPool) -> anyhow::Result<()> { let speedtest_avgs = SpeedtestAverages { averages }; let reward_period = start..end; - let heartbeats = HeartbeatReward::validated(&pool, &reward_period, 1000).await?; + let heartbeats = HeartbeatReward::validated(&pool, &reward_period); let coverage_points = CoveragePoints::aggregate_points(&pool, heartbeats, &speedtest_avgs, end).await?; @@ -858,7 +859,7 @@ async fn scenario_four(pool: PgPool) -> anyhow::Result<()> { let speedtest_avgs = SpeedtestAverages { averages }; let reward_period = start..end; - let heartbeats = HeartbeatReward::validated(&pool, &reward_period, 1000).await?; + let heartbeats = HeartbeatReward::validated(&pool, &reward_period); let coverage_points = CoveragePoints::aggregate_points(&pool, heartbeats, &speedtest_avgs, end).await?; @@ -950,7 +951,7 @@ async fn scenario_five(pool: PgPool) -> anyhow::Result<()> { let speedtest_avgs = SpeedtestAverages { averages }; let reward_period = start..end; - let heartbeats = HeartbeatReward::validated(&pool, &reward_period, 1000).await?; + let heartbeats = HeartbeatReward::validated(&pool, &reward_period); let coverage_points = CoveragePoints::aggregate_points(&pool, heartbeats, &speedtest_avgs, end).await?; @@ -1190,7 +1191,7 @@ async fn scenario_six(pool: PgPool) -> anyhow::Result<()> { let speedtest_avgs = SpeedtestAverages { averages }; let reward_period = start..end; - let heartbeats = HeartbeatReward::validated(&pool, &reward_period, 1000).await?; + let heartbeats = HeartbeatReward::validated(&pool, &reward_period); let coverage_points = CoveragePoints::aggregate_points(&pool, heartbeats, &speedtest_avgs, end).await?; diff --git a/mobile_verifier/tests/rewarder_mappers.rs b/mobile_verifier/tests/rewarder_mappers.rs new file mode 100644 index 000000000..8b61025b9 --- /dev/null +++ b/mobile_verifier/tests/rewarder_mappers.rs @@ -0,0 +1,161 @@ +mod common; +use crate::common::MockFileSinkReceiver; +use chrono::{DateTime, Duration as ChronoDuration, Utc}; +use file_store::mobile_subscriber::{SubscriberLocationIngestReport, SubscriberLocationReq}; +use helium_crypto::PublicKeyBinary; +use helium_proto::{ + services::poc_mobile::{SubscriberReward, UnallocatedReward, UnallocatedRewardType}, + Message, +}; +use mobile_verifier::{reward_shares, rewarder, subscriber_location}; +use rust_decimal::prelude::*; +use rust_decimal_macros::dec; +use sqlx::{PgPool, Postgres, Transaction}; +use std::{str::FromStr, string::ToString}; + +const SUBSCRIBER_1: &str = "subscriber1"; +const SUBSCRIBER_2: &str = "subscriber2"; +const SUBSCRIBER_3: &str = "subscriber3"; +const HOTSPOT_1: &str = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6"; + +#[sqlx::test] +async fn test_mapper_rewards(pool: PgPool) -> anyhow::Result<()> { + let (mobile_rewards_client, mut mobile_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + + // seed db + let mut txn = pool.clone().begin().await?; + seed_mapping_data(epoch.end, &mut txn).await?; + txn.commit().await.expect("db txn failed"); + + let (_, rewards) = tokio::join!( + rewarder::reward_mappers(&pool, &mobile_rewards_client, &epoch), + receive_expected_rewards(&mut mobile_rewards) + ); + if let Ok((subscriber_rewards, unallocated_reward)) = rewards { + // assert the mapper rewards + // all 3 subscribers will have an equal share, + // requirement is 1 qualifying mapping criteria report per epoch + // subscriber 1 has two qualifying mapping criteria reports, + // other two subscribers one qualifying mapping criteria reports + assert_eq!( + SUBSCRIBER_1.to_string().encode_to_vec(), + subscriber_rewards[0].subscriber_id + ); + assert_eq!( + 5_464_480_874_316, + subscriber_rewards[0].discovery_location_amount + ); + + assert_eq!( + SUBSCRIBER_2.to_string().encode_to_vec(), + subscriber_rewards[1].subscriber_id + ); + assert_eq!( + 5_464_480_874_316, + subscriber_rewards[2].discovery_location_amount + ); + + assert_eq!( + SUBSCRIBER_3.to_string().encode_to_vec(), + subscriber_rewards[2].subscriber_id + ); + assert_eq!( + 5_464_480_874_316, + subscriber_rewards[2].discovery_location_amount + ); + + // confirm our unallocated amount + assert_eq!( + UnallocatedRewardType::Mapper as i32, + unallocated_reward.reward_type + ); + assert_eq!(2, unallocated_reward.amount); + + // confirm the total rewards allocated matches expectations + let expected_sum = reward_shares::get_scheduled_tokens_for_mappers(epoch.end - epoch.start) + .to_u64() + .unwrap(); + let subscriber_sum = subscriber_rewards[0].discovery_location_amount + + subscriber_rewards[1].discovery_location_amount + + subscriber_rewards[2].discovery_location_amount + + unallocated_reward.amount; + assert_eq!(expected_sum, subscriber_sum); + + // confirm the rewarded percentage amount matches expectations + let daily_total = reward_shares::get_total_scheduled_tokens(epoch.end - epoch.start); + let percent = (Decimal::from(subscriber_sum) / daily_total) + .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(percent, dec!(0.2)); + } else { + panic!("no rewards received"); + }; + Ok(()) +} + +async fn receive_expected_rewards( + mobile_rewards: &mut MockFileSinkReceiver, +) -> anyhow::Result<(Vec, UnallocatedReward)> { + // get the filestore outputs from rewards run + // we will have 3 radio rewards, 1 wifi radio and 2 cbrs radios + let subscriber_reward1 = mobile_rewards.receive_subscriber_reward().await; + let subscriber_reward2 = mobile_rewards.receive_subscriber_reward().await; + let subscriber_reward3 = mobile_rewards.receive_subscriber_reward().await; + let subscriber_rewards = vec![subscriber_reward1, subscriber_reward2, subscriber_reward3]; + + // expect one unallocated reward + let unallocated_reward = mobile_rewards.receive_unallocated_reward().await; + + // should be no further msgs + mobile_rewards.assert_no_messages(); + + Ok((subscriber_rewards, unallocated_reward)) +} + +async fn seed_mapping_data( + ts: DateTime, + txn: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + // subscriber 1 has two qualifying mapping criteria reports + // subscribers 2 and 3 have a single qualifying mapping criteria report + + let report1 = SubscriberLocationIngestReport { + received_timestamp: ts - ChronoDuration::hours(1), + report: SubscriberLocationReq { + subscriber_id: SUBSCRIBER_1.to_string().encode_to_vec(), + timestamp: ts - ChronoDuration::hours(1), + carrier_pub_key: PublicKeyBinary::from_str(HOTSPOT_1).unwrap(), + }, + }; + let report2 = SubscriberLocationIngestReport { + received_timestamp: ts - ChronoDuration::hours(1), + report: SubscriberLocationReq { + subscriber_id: SUBSCRIBER_1.to_string().encode_to_vec(), + timestamp: ts - ChronoDuration::hours(2), + carrier_pub_key: PublicKeyBinary::from_str(HOTSPOT_1).unwrap(), + }, + }; + let report3 = SubscriberLocationIngestReport { + received_timestamp: ts - ChronoDuration::hours(1), + report: SubscriberLocationReq { + subscriber_id: SUBSCRIBER_2.to_string().encode_to_vec(), + timestamp: ts - ChronoDuration::hours(3), + carrier_pub_key: PublicKeyBinary::from_str(HOTSPOT_1).unwrap(), + }, + }; + let report4 = SubscriberLocationIngestReport { + received_timestamp: ts - ChronoDuration::hours(1), + report: SubscriberLocationReq { + subscriber_id: SUBSCRIBER_3.to_string().encode_to_vec(), + timestamp: ts - ChronoDuration::hours(3), + carrier_pub_key: PublicKeyBinary::from_str(HOTSPOT_1).unwrap(), + }, + }; + subscriber_location::save(&report1, txn).await?; + subscriber_location::save(&report2, txn).await?; + subscriber_location::save(&report3, txn).await?; + subscriber_location::save(&report4, txn).await?; + + Ok(()) +} diff --git a/mobile_verifier/tests/rewarder_oracles.rs b/mobile_verifier/tests/rewarder_oracles.rs new file mode 100644 index 000000000..a9407c0f6 --- /dev/null +++ b/mobile_verifier/tests/rewarder_oracles.rs @@ -0,0 +1,57 @@ +mod common; +use crate::common::MockFileSinkReceiver; +use chrono::{Duration as ChronoDuration, Utc}; +use helium_proto::services::poc_mobile::{UnallocatedReward, UnallocatedRewardType}; +use mobile_verifier::{reward_shares, rewarder}; +use rust_decimal::prelude::*; +use rust_decimal_macros::dec; +use sqlx::PgPool; + +#[sqlx::test] +async fn test_oracle_rewards(_pool: PgPool) -> anyhow::Result<()> { + let (mobile_rewards_client, mut mobile_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + + let (_, rewards) = tokio::join!( + // run rewards for oracles + rewarder::reward_oracles(&mobile_rewards_client, &epoch), + receive_expected_rewards(&mut mobile_rewards) + ); + if let Ok(unallocated_reward) = rewards { + assert_eq!( + UnallocatedRewardType::Oracle as i32, + unallocated_reward.reward_type + ); + // confirm our unallocated amount + assert_eq!(3_278_688_524_590, unallocated_reward.amount); + + // confirm the total rewards allocated matches expectations + let expected_sum = reward_shares::get_scheduled_tokens_for_oracles(epoch.end - epoch.start) + .to_u64() + .unwrap(); + assert_eq!(expected_sum, unallocated_reward.amount); + + // confirm the rewarded percentage amount matches expectations + let daily_total = reward_shares::get_total_scheduled_tokens(epoch.end - epoch.start); + let percent = (Decimal::from(unallocated_reward.amount) / daily_total) + .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(percent, dec!(0.04)); + } else { + panic!("no rewards received"); + }; + Ok(()) +} + +async fn receive_expected_rewards( + mobile_rewards: &mut MockFileSinkReceiver, +) -> anyhow::Result { + // expect one unallocated reward + // as oracle rewards are currently 100% unallocated + let unallocated_reward = mobile_rewards.receive_unallocated_reward().await; + + // should be no further msgs + mobile_rewards.assert_no_messages(); + + Ok(unallocated_reward) +} diff --git a/mobile_verifier/tests/rewarder_poc_dc.rs b/mobile_verifier/tests/rewarder_poc_dc.rs new file mode 100644 index 000000000..43f354654 --- /dev/null +++ b/mobile_verifier/tests/rewarder_poc_dc.rs @@ -0,0 +1,373 @@ +mod common; +use crate::common::MockFileSinkReceiver; +use chrono::{DateTime, Duration as ChronoDuration, Utc}; +use file_store::{ + coverage::{CoverageObject as FSCoverageObject, KeyType, RadioHexSignalLevel}, + speedtest::CellSpeedtest, +}; +use helium_crypto::PublicKeyBinary; +use helium_proto::services::poc_mobile::{ + CoverageObjectValidity, GatewayReward, HeartbeatValidity, RadioReward, SeniorityUpdateReason, + SignalLevel, UnallocatedReward, UnallocatedRewardType, +}; +use mobile_verifier::{ + cell_type::CellType, + coverage::CoverageObject, + data_session, + heartbeats::{HbType, Heartbeat, ValidatedHeartbeat}, + reward_shares, rewarder, speedtests, +}; +use rust_decimal::prelude::*; +use rust_decimal_macros::dec; +use sqlx::{PgPool, Postgres, Transaction}; +use uuid::Uuid; + +const HOTSPOT_1: &str = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6"; +const HOTSPOT_2: &str = "11uJHS2YaEWJqgqC7yza9uvSmpv5FWoMQXiP8WbxBGgNUmifUJf"; +const HOTSPOT_3: &str = "11sctWiP9r5wDJVuDe1Th4XSL2vaawaLLSQF8f8iokAoMAJHxqp"; +const PAYER_1: &str = "11eX55faMbqZB7jzN4p67m6w7ScPMH6ubnvCjCPLh72J49PaJEL"; + +#[sqlx::test] +async fn test_poc_and_dc_rewards(pool: PgPool) -> anyhow::Result<()> { + let (mobile_rewards_client, mut mobile_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + + // seed all the things + let mut txn = pool.clone().begin().await?; + seed_heartbeats(epoch.start, &mut txn).await?; + seed_speedtests(epoch.end, &mut txn).await?; + seed_data_sessions(epoch.start, &mut txn).await?; + txn.commit().await?; + + let (_, rewards) = tokio::join!( + // run rewards for poc and dc + rewarder::reward_poc_and_dc(&pool, &mobile_rewards_client, &epoch, dec!(0.0001)), + receive_expected_rewards(&mut mobile_rewards) + ); + if let Ok((poc_rewards, dc_rewards, unallocated_poc_reward)) = rewards { + // assert poc reward outputs + assert_eq!(24_108_003_121_986, poc_rewards[0].poc_reward); + assert_eq!( + HOTSPOT_1.to_string(), + PublicKeyBinary::from(poc_rewards[0].hotspot_key.clone()).to_string() + ); + assert_eq!(964_320_124_879, poc_rewards[1].poc_reward); + assert_eq!( + HOTSPOT_2.to_string(), + PublicKeyBinary::from(poc_rewards[1].hotspot_key.clone()).to_string() + ); + assert_eq!(24_108_003_121_986, poc_rewards[2].poc_reward); + assert_eq!( + HOTSPOT_3.to_string(), + PublicKeyBinary::from(poc_rewards[2].hotspot_key.clone()).to_string() + ); + + // assert unallocated amount + assert_eq!( + UnallocatedRewardType::Poc as i32, + unallocated_poc_reward.reward_type + ); + assert_eq!(1, unallocated_poc_reward.amount); + + // assert the dc reward outputs + assert_eq!(500_000, dc_rewards[0].dc_transfer_reward); + assert_eq!( + HOTSPOT_1.to_string(), + PublicKeyBinary::from(dc_rewards[0].hotspot_key.clone()).to_string() + ); + assert_eq!(500_000, dc_rewards[1].dc_transfer_reward); + assert_eq!( + HOTSPOT_2.to_string(), + PublicKeyBinary::from(dc_rewards[1].hotspot_key.clone()).to_string() + ); + assert_eq!(500_000, dc_rewards[2].dc_transfer_reward); + assert_eq!( + HOTSPOT_3.to_string(), + PublicKeyBinary::from(dc_rewards[2].hotspot_key.clone()).to_string() + ); + + // confirm the total rewards allocated matches expectations + let poc_sum: u64 = poc_rewards.iter().map(|r| r.poc_reward).sum(); + let dc_sum: u64 = dc_rewards.iter().map(|r| r.dc_transfer_reward).sum(); + let unallocated_sum: u64 = unallocated_poc_reward.amount; + let total = poc_sum + dc_sum + unallocated_sum; + + let expected_sum = reward_shares::get_scheduled_tokens_for_poc(epoch.end - epoch.start) + .to_u64() + .unwrap(); + assert_eq!(expected_sum, total); + + // confirm the rewarded percentage amount matches expectations + let daily_total = reward_shares::get_total_scheduled_tokens(epoch.end - epoch.start); + let percent = (Decimal::from(total) / daily_total) + .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(percent, dec!(0.6)); + } else { + panic!("no rewards received"); + }; + Ok(()) +} + +async fn receive_expected_rewards( + mobile_rewards: &mut MockFileSinkReceiver, +) -> anyhow::Result<(Vec, Vec, UnallocatedReward)> { + // get the filestore outputs from rewards run + // we will have 3 radio rewards, 1 wifi radio and 2 cbrs radios + let radio_reward1 = mobile_rewards.receive_radio_reward().await; + let radio_reward2 = mobile_rewards.receive_radio_reward().await; + let radio_reward3 = mobile_rewards.receive_radio_reward().await; + // ordering is not guaranteed, so stick the rewards into a vec and sort + let mut poc_rewards = vec![radio_reward1, radio_reward2, radio_reward3]; + // after sorting reward 1 = cbrs radio1, 2 = cbrs radio2, 3 = wifi radio + poc_rewards.sort_by(|a, b| b.hotspot_key.cmp(&a.hotspot_key)); + + // expect one unallocated reward for poc + let unallocated_poc_reward = mobile_rewards.receive_unallocated_reward().await; + + // expect 3 gateway rewards for dc transfer + let dc_reward1 = mobile_rewards.receive_gateway_reward().await; + let dc_reward2 = mobile_rewards.receive_gateway_reward().await; + let dc_reward3 = mobile_rewards.receive_gateway_reward().await; + let mut dc_rewards = vec![dc_reward1, dc_reward2, dc_reward3]; + dc_rewards.sort_by(|a, b| b.hotspot_key.cmp(&a.hotspot_key)); + + // should be no further msgs + mobile_rewards.assert_no_messages(); + + Ok((poc_rewards, dc_rewards, unallocated_poc_reward)) +} + +async fn seed_heartbeats( + ts: DateTime, + txn: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + for n in 0..24 { + let hotspot_key1: PublicKeyBinary = HOTSPOT_1.to_string().parse().unwrap(); + let cbsd_id_1 = "P27-SCE4255W0001".to_string(); + let cov_obj_1 = create_coverage_object( + ts + ChronoDuration::hours(n), + Some(cbsd_id_1.clone()), + hotspot_key1.clone(), + 0x8a1fb466d2dffff_u64, + true, + ); + let cbrs_heartbeat_1 = ValidatedHeartbeat { + heartbeat: Heartbeat { + hb_type: HbType::Cbrs, + hotspot_key: hotspot_key1, + cbsd_id: Some(cbsd_id_1), + operation_mode: true, + lat: 0.0, + lon: 0.0, + coverage_object: Some(cov_obj_1.coverage_object.uuid), + location_validation_timestamp: None, + timestamp: ts + ChronoDuration::hours(n), + }, + cell_type: CellType::SercommIndoor, + distance_to_asserted: None, + coverage_summary: None, + location_trust_score_multiplier: dec!(1.0), + validity: HeartbeatValidity::Valid, + }; + + let hotspot_key2: PublicKeyBinary = HOTSPOT_2.to_string().parse().unwrap(); + let cbsd_id_2 = "P27-SCE4255W0002".to_string(); + let cov_obj_2 = create_coverage_object( + ts + ChronoDuration::hours(n), + Some(cbsd_id_2.clone()), + hotspot_key2.clone(), + 0x8a1fb49642dffff_u64, + false, + ); + let cbrs_heartbeat_2 = ValidatedHeartbeat { + heartbeat: Heartbeat { + hb_type: HbType::Cbrs, + hotspot_key: hotspot_key2, + cbsd_id: Some(cbsd_id_2), + operation_mode: true, + lat: 0.0, + lon: 0.0, + coverage_object: Some(cov_obj_2.coverage_object.uuid), + location_validation_timestamp: None, + timestamp: ts + ChronoDuration::hours(n), + }, + cell_type: CellType::SercommOutdoor, + distance_to_asserted: None, + coverage_summary: None, + location_trust_score_multiplier: dec!(1.0), + validity: HeartbeatValidity::Valid, + }; + + let hotspot_key3: PublicKeyBinary = HOTSPOT_3.to_string().parse().unwrap(); + let cov_obj_3 = create_coverage_object( + ts + ChronoDuration::hours(n), + None, + hotspot_key3.clone(), + 0x8c2681a306607ff_u64, + true, + ); + let wifi_heartbeat = ValidatedHeartbeat { + heartbeat: Heartbeat { + hb_type: HbType::Wifi, + hotspot_key: hotspot_key3, + cbsd_id: None, + operation_mode: true, + lat: 0.0, + lon: 0.0, + coverage_object: Some(cov_obj_3.coverage_object.uuid), + location_validation_timestamp: Some(ts - ChronoDuration::hours(24)), + timestamp: ts + ChronoDuration::hours(n), + }, + cell_type: CellType::NovaGenericWifiIndoor, + distance_to_asserted: Some(10), + coverage_summary: None, + location_trust_score_multiplier: dec!(1.0), + validity: HeartbeatValidity::Valid, + }; + + save_seniority_object(ts + ChronoDuration::hours(n), &wifi_heartbeat, txn).await?; + save_seniority_object(ts + ChronoDuration::hours(n), &cbrs_heartbeat_1, txn).await?; + save_seniority_object(ts + ChronoDuration::hours(n), &cbrs_heartbeat_2, txn).await?; + + cbrs_heartbeat_1.save(txn).await?; + cbrs_heartbeat_2.save(txn).await?; + wifi_heartbeat.save(txn).await?; + + cov_obj_1.save(txn).await?; + cov_obj_2.save(txn).await?; + cov_obj_3.save(txn).await?; + } + Ok(()) +} + +async fn seed_speedtests( + ts: DateTime, + txn: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + for n in 0..24 { + let hotspot1_speedtest = CellSpeedtest { + pubkey: HOTSPOT_1.parse().unwrap(), + serial: "serial1".to_string(), + timestamp: ts - ChronoDuration::hours(n * 4), + upload_speed: 100_000_000, + download_speed: 100_000_000, + latency: 50, + }; + + let hotspot2_speedtest = CellSpeedtest { + pubkey: HOTSPOT_2.parse().unwrap(), + serial: "serial2".to_string(), + timestamp: ts - ChronoDuration::hours(n * 4), + upload_speed: 100_000_000, + download_speed: 100_000_000, + latency: 50, + }; + + let hotspot3_speedtest = CellSpeedtest { + pubkey: HOTSPOT_3.parse().unwrap(), + serial: "serial3".to_string(), + timestamp: ts - ChronoDuration::hours(n * 4), + upload_speed: 100_000_000, + download_speed: 100_000_000, + latency: 50, + }; + + speedtests::save_speedtest(&hotspot1_speedtest, txn).await?; + speedtests::save_speedtest(&hotspot2_speedtest, txn).await?; + speedtests::save_speedtest(&hotspot3_speedtest, txn).await?; + } + Ok(()) +} + +async fn seed_data_sessions( + ts: DateTime, + txn: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + let data_session_1 = data_session::HotspotDataSession { + pub_key: HOTSPOT_1.parse().unwrap(), + payer: PAYER_1.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 50000, + num_dcs: 5000000, + received_timestamp: ts + ChronoDuration::hours(1), + }; + let data_session_2 = data_session::HotspotDataSession { + pub_key: HOTSPOT_2.parse().unwrap(), + payer: PAYER_1.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 50000, + num_dcs: 5000000, + received_timestamp: ts + ChronoDuration::hours(1), + }; + let data_session_3 = data_session::HotspotDataSession { + pub_key: HOTSPOT_3.parse().unwrap(), + payer: PAYER_1.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 50000, + num_dcs: 5000000, + received_timestamp: ts + ChronoDuration::hours(1), + }; + data_session_1.save(txn).await?; + data_session_2.save(txn).await?; + data_session_3.save(txn).await?; + Ok(()) +} + +fn create_coverage_object( + ts: DateTime, + cbsd_id: Option, + pub_key: PublicKeyBinary, + hex: u64, + indoor: bool, +) -> CoverageObject { + let location = h3o::CellIndex::try_from(hex).unwrap(); + let key_type = match cbsd_id { + Some(s) => KeyType::CbsdId(s), + None => KeyType::HotspotKey(pub_key.clone()), + }; + let report = FSCoverageObject { + pub_key, + uuid: Uuid::new_v4(), + key_type, + coverage_claim_time: ts, + coverage: vec![RadioHexSignalLevel { + location, + signal_level: SignalLevel::High, + signal_power: 1000, + }], + indoor, + trust_score: 1000, + signature: Vec::new(), + }; + CoverageObject { + coverage_object: report, + validity: CoverageObjectValidity::Valid, + } +} + +//TODO: use existing save methods instead of manual sql +async fn save_seniority_object( + ts: DateTime, + hb: &ValidatedHeartbeat, + exec: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + sqlx::query( + r#" + INSERT INTO seniority + (radio_key, last_heartbeat, uuid, seniority_ts, inserted_at, update_reason, radio_type) + VALUES + ($1, $2, $3, $4, $5, $6, $7) + "#, + ) + .bind(hb.heartbeat.key()) + .bind(hb.heartbeat.timestamp) + .bind(hb.heartbeat.coverage_object) + .bind(ts) + .bind(ts) + .bind(SeniorityUpdateReason::NewCoverageClaimTime as i32) + .bind(hb.heartbeat.hb_type) + .execute(&mut *exec) + .await?; + Ok(()) +} diff --git a/mobile_verifier/tests/rewarder_sp_rewards.rs b/mobile_verifier/tests/rewarder_sp_rewards.rs new file mode 100644 index 000000000..e542b8095 --- /dev/null +++ b/mobile_verifier/tests/rewarder_sp_rewards.rs @@ -0,0 +1,213 @@ +use std::collections::HashMap; +use std::string::ToString; + +use async_trait::async_trait; +use chrono::{DateTime, Duration as ChronoDuration, Utc}; +use helium_proto::{ + services::poc_mobile::{ServiceProviderReward, UnallocatedReward, UnallocatedRewardType}, + ServiceProvider, +}; +use rust_decimal::prelude::*; +use rust_decimal_macros::dec; +use sqlx::{PgPool, Postgres, Transaction}; + +use common::MockCarrierServiceClient; +use common::ValidSpMap; +use mobile_config::client::{carrier_service_client::CarrierServiceVerifier, ClientError}; +use mobile_verifier::{data_session, reward_shares, rewarder}; + +use crate::common::MockFileSinkReceiver; + +mod common; + +const HOTSPOT_1: &str = "112NqN2WWMwtK29PMzRby62fDydBJfsCLkCAf392stdok48ovNT6"; +const HOTSPOT_2: &str = "11eX55faMbqZB7jzN4p67m6w7ScPMH6ubnvCjCPLh72J49PaJEL"; +const PAYER_1: &str = "11uJHS2YaEWJqgqC7yza9uvSmpv5FWoMQXiP8WbxBGgNUmifUJf"; +const PAYER_2: &str = "11sctWiP9r5wDJVuDe1Th4XSL2vaawaLLSQF8f8iokAoMAJHxqp"; +const SP_1: &str = "Helium Mobile"; + +impl MockCarrierServiceClient { + fn new(valid_sps: ValidSpMap) -> Self { + Self { valid_sps } + } +} + +#[async_trait] +impl CarrierServiceVerifier for MockCarrierServiceClient { + type Error = ClientError; + + async fn payer_key_to_service_provider<'a>( + &self, + pubkey: &str, + ) -> Result { + match self.valid_sps.get(pubkey) { + Some(v) => Ok(ServiceProvider::from_str(v) + .map_err(|_| ClientError::UnknownServiceProvider(pubkey.to_string()))?), + None => Err(ClientError::UnknownServiceProvider(pubkey.to_string())), + } + } +} + +#[sqlx::test] +async fn test_service_provider_rewards(pool: PgPool) -> anyhow::Result<()> { + let mut valid_sps = HashMap::::new(); + valid_sps.insert(PAYER_1.to_string(), SP_1.to_string()); + let carrier_client = MockCarrierServiceClient::new(valid_sps); + let (mobile_rewards_client, mut mobile_rewards) = common::create_file_sink(); + + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + + // seed db with test specific data + let mut txn = pool.clone().begin().await?; + seed_hotspot_data(epoch.end, &mut txn).await?; + txn.commit().await?; + + let (_, rewards) = tokio::join!( + rewarder::reward_service_providers( + &pool, + &carrier_client, + &mobile_rewards_client, + &epoch, + dec!(0.0001), + ), + receive_expected_rewards(&mut mobile_rewards) + ); + if let Ok((sp_reward, unallocated_reward)) = rewards { + assert_eq!( + SP_1.to_string(), + ServiceProvider::from_i32(sp_reward.service_provider_id) + .unwrap() + .to_string() + ); + assert_eq!(6000, sp_reward.amount); + + assert_eq!( + UnallocatedRewardType::ServiceProvider as i32, + unallocated_reward.reward_type + ); + assert_eq!(8_196_721_305_475, unallocated_reward.amount); + // confirm the total rewards allocated matches expectations + let expected_sum = + reward_shares::get_scheduled_tokens_for_service_providers(epoch.end - epoch.start) + .to_u64() + .unwrap(); + assert_eq!(expected_sum, sp_reward.amount + unallocated_reward.amount); + + // confirm the rewarded percentage amount matches expectations + let daily_total = reward_shares::get_total_scheduled_tokens(epoch.end - epoch.start); + let percent = (Decimal::from(unallocated_reward.amount) / daily_total) + .round_dp_with_strategy(2, RoundingStrategy::MidpointNearestEven); + assert_eq!(percent, dec!(0.1)); + } else { + panic!("no rewards received"); + } + + Ok(()) +} + +#[sqlx::test] +async fn test_service_provider_rewards_invalid_sp(pool: PgPool) -> anyhow::Result<()> { + // only payer 1 has a corresponding SP key + // data sessions from payer 2 will result in an error, halting rewards + let mut valid_sps = HashMap::::new(); + valid_sps.insert(PAYER_1.to_string(), SP_1.to_string()); + let carrier_client = MockCarrierServiceClient::new(valid_sps); + + let (mobile_rewards_client, mut mobile_rewards) = common::create_file_sink(); + let now = Utc::now(); + let epoch = (now - ChronoDuration::hours(24))..now; + + let mut txn = pool.clone().begin().await?; + seed_hotspot_data_invalid_sp(epoch.end, &mut txn).await?; + txn.commit().await.expect("db txn failed"); + + let resp = rewarder::reward_service_providers( + &pool.clone(), + &carrier_client.clone(), + &mobile_rewards_client, + &epoch, + dec!(0.0001), + ) + .await; + assert_eq!( + resp.unwrap_err().to_string(), + "unknown service provider ".to_string() + PAYER_2 + ); + + // confirm we get no msgs as rewards halted + mobile_rewards.assert_no_messages(); + Ok(()) +} + +async fn receive_expected_rewards( + mobile_rewards: &mut MockFileSinkReceiver, +) -> anyhow::Result<(ServiceProviderReward, UnallocatedReward)> { + // get the filestore outputs from rewards run + // we will have 3 radio rewards, 1 wifi radio and 2 cbrs radios + let sp_reward1 = mobile_rewards.receive_service_provider_reward().await; + // let sp_reward2 = mobile_rewards.receive_service_provider_reward().await; + // dump the sp rewards into a vec and sort to get a deteminstic order + + // expect one unallocated reward + let unallocated_reward = mobile_rewards.receive_unallocated_reward().await; + + // should be no further msgs + mobile_rewards.assert_no_messages(); + + Ok((sp_reward1, unallocated_reward)) +} + +async fn seed_hotspot_data( + ts: DateTime, + txn: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + let data_session_1 = data_session::HotspotDataSession { + pub_key: HOTSPOT_1.parse().unwrap(), + payer: PAYER_1.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 10000, + num_dcs: 10000, + received_timestamp: ts - ChronoDuration::hours(1), + }; + + let data_session_2 = data_session::HotspotDataSession { + pub_key: HOTSPOT_1.parse().unwrap(), + payer: PAYER_1.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 50000, + num_dcs: 50000, + received_timestamp: ts - ChronoDuration::hours(2), + }; + + data_session_1.save(txn).await?; + data_session_2.save(txn).await?; + Ok(()) +} + +async fn seed_hotspot_data_invalid_sp( + ts: DateTime, + txn: &mut Transaction<'_, Postgres>, +) -> anyhow::Result<()> { + let data_session_1 = data_session::HotspotDataSession { + pub_key: HOTSPOT_1.parse().unwrap(), + payer: PAYER_1.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 10000, + num_dcs: 10000, + received_timestamp: ts - ChronoDuration::hours(2), + }; + + let data_session_2 = data_session::HotspotDataSession { + pub_key: HOTSPOT_2.parse().unwrap(), + payer: PAYER_2.parse().unwrap(), + upload_bytes: 1024 * 1000, + download_bytes: 1024 * 50000, + num_dcs: 50000, + received_timestamp: ts - ChronoDuration::hours(2), + }; + + data_session_1.save(txn).await?; + data_session_2.save(txn).await?; + Ok(()) +} diff --git a/mobile_verifier/tests/seniority.rs b/mobile_verifier/tests/seniority.rs index 0ff634b75..9c1139d52 100644 --- a/mobile_verifier/tests/seniority.rs +++ b/mobile_verifier/tests/seniority.rs @@ -5,6 +5,7 @@ use mobile_verifier::coverage::Seniority; use mobile_verifier::heartbeats::{ HbType, Heartbeat, SeniorityUpdate, SeniorityUpdateAction, ValidatedHeartbeat, }; +use rust_decimal_macros::dec; use sqlx::PgPool; use uuid::Uuid; @@ -29,6 +30,7 @@ async fn test_seniority_updates(pool: PgPool) -> anyhow::Result<()> { cell_type: CellType::SercommIndoor, distance_to_asserted: None, coverage_summary: None, + location_trust_score_multiplier: dec!(1.0), validity: HeartbeatValidity::Valid, }; let mut transaction = pool.begin().await?; diff --git a/reward_index/migrations/10_add_service_provider_reward_type.sql b/reward_index/migrations/10_add_service_provider_reward_type.sql new file mode 100644 index 000000000..cebb4fe2c --- /dev/null +++ b/reward_index/migrations/10_add_service_provider_reward_type.sql @@ -0,0 +1 @@ +ALTER TYPE reward_type ADD VALUE 'mobile_service_provider'; diff --git a/reward_index/migrations/8_add_mobile_unallocated_reward_type.sql b/reward_index/migrations/8_add_mobile_unallocated_reward_type.sql new file mode 100644 index 000000000..e1a75b83f --- /dev/null +++ b/reward_index/migrations/8_add_mobile_unallocated_reward_type.sql @@ -0,0 +1 @@ +ALTER TYPE reward_type ADD VALUE 'mobile_unallocated'; diff --git a/reward_index/migrations/9_add_iot_unallocated_reward_type.sql b/reward_index/migrations/9_add_iot_unallocated_reward_type.sql new file mode 100644 index 000000000..755146802 --- /dev/null +++ b/reward_index/migrations/9_add_iot_unallocated_reward_type.sql @@ -0,0 +1 @@ +ALTER TYPE reward_type ADD VALUE 'iot_unallocated'; diff --git a/reward_index/src/indexer.rs b/reward_index/src/indexer.rs index e576cf3dc..64afd4a5d 100644 --- a/reward_index/src/indexer.rs +++ b/reward_index/src/indexer.rs @@ -9,7 +9,7 @@ use helium_crypto::PublicKeyBinary; use helium_proto::{ services::poc_lora::{iot_reward_share::Reward as IotReward, IotRewardShare}, services::poc_mobile::{mobile_reward_share::Reward as MobileReward, MobileRewardShare}, - Message, + Message, ServiceProvider, }; use poc_metrics::record_duration; use sqlx::{Pool, Postgres, Transaction}; @@ -21,6 +21,7 @@ pub struct Indexer { verifier_store: FileStore, mode: settings::Mode, op_fund_key: String, + unallocated_reward_key: String, } #[derive(sqlx::Type, Debug, Clone, PartialEq, Eq, Hash)] @@ -30,6 +31,9 @@ pub enum RewardType { IotGateway, IotOperational, MobileSubscriber, + MobileServiceProvider, + MobileUnallocated, + IotUnallocated, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -50,6 +54,9 @@ impl Indexer { .ok_or_else(|| anyhow!("operation fund key is required for IOT mode"))?, settings::Mode::Mobile => String::new(), }, + unallocated_reward_key: settings + .unallocated_reward_entity_key() + .ok_or_else(|| anyhow!("missing unallocated reward key"))?, }) } @@ -150,6 +157,26 @@ impl Indexer { }, r.discovery_location_amount, )), + Some(MobileReward::ServiceProviderReward(r)) => { + if let Some(sp) = ServiceProvider::from_i32(r.service_provider_id) { + Ok(( + RewardKey { + key: sp.to_string(), + reward_type: RewardType::MobileServiceProvider, + }, + r.amount, + )) + } else { + bail!("failed to decode service provider") + } + } + Some(MobileReward::UnallocatedReward(r)) => Ok(( + RewardKey { + key: self.unallocated_reward_key.clone(), + reward_type: RewardType::MobileUnallocated, + }, + r.amount, + )), _ => bail!("got an invalid reward share"), } } @@ -170,6 +197,13 @@ impl Indexer { }, r.amount, )), + Some(IotReward::UnallocatedReward(r)) => Ok(( + RewardKey { + key: self.unallocated_reward_key.clone(), + reward_type: RewardType::IotUnallocated, + }, + r.amount, + )), _ => bail!("got an invalid iot reward share"), } } diff --git a/reward_index/src/settings.rs b/reward_index/src/settings.rs index 1ab3776ac..790f1375b 100644 --- a/reward_index/src/settings.rs +++ b/reward_index/src/settings.rs @@ -36,6 +36,7 @@ pub struct Settings { pub verifier: file_store::Settings, pub metrics: poc_metrics::Settings, pub operation_fund_key: Option, + pub unallocated_reward_entity_key: Option, #[serde(default = "default_start_after")] pub start_after: u64, } @@ -78,6 +79,10 @@ impl Settings { pub fn operation_fund_key(&self) -> Option { self.operation_fund_key.clone() } + + pub fn unallocated_reward_entity_key(&self) -> Option { + self.unallocated_reward_entity_key.clone() + } } fn default_interval() -> i64 {