diff --git a/.gitmodules b/.gitmodules index dce6843bae85e0..4a3dc37f5a8e93 100644 --- a/.gitmodules +++ b/.gitmodules @@ -203,10 +203,10 @@ gclient-condition = checkout_crubit [submodule "third_party/depot_tools"] path = third_party/depot_tools - url = https://chromium.googlesource.com/chromium/tools/depot_tools + url = https://github.com/columbia/depot_tools.git [submodule "third_party/devtools-frontend/src"] path = third_party/devtools-frontend/src - url = https://chromium.googlesource.com/devtools/devtools-frontend + url = https://github.com/columbia/devtools-frontend [submodule "third_party/devtools-frontend-internal"] path = third_party/devtools-frontend-internal url = https://chrome-internal.googlesource.com/devtools/devtools-internal diff --git a/components/attribution_reporting/BUILD.gn b/components/attribution_reporting/BUILD.gn index d00195628f1486..b78c16abc8dca1 100644 --- a/components/attribution_reporting/BUILD.gn +++ b/components/attribution_reporting/BUILD.gn @@ -77,6 +77,10 @@ component("attribution_reporting") { "trigger_config.h", "trigger_registration.cc", "trigger_registration.h", + "global_epsilon.cc", + "global_epsilon.h", + "attribution_window.cc", + "attribution_window.h", ] configs += [ "//build/config/compiler:wexit_time_destructors" ] diff --git a/components/attribution_reporting/attribution_window.cc b/components/attribution_reporting/attribution_window.cc new file mode 100644 index 00000000000000..e188ee50750f34 --- /dev/null +++ b/components/attribution_reporting/attribution_window.cc @@ -0,0 +1,115 @@ +#include "components/attribution_reporting/attribution_window.h" + +#include +#include +#include + +#include "base/check.h" +#include "base/ranges/algorithm.h" +#include "base/types/expected.h" +#include "base/types/expected_macros.h" +#include "base/values.h" +#include "components/attribution_reporting/parsing_utils.h" +#include "components/attribution_reporting/trigger_registration_error.mojom.h" + +namespace attribution_reporting { + +namespace { + +using ::attribution_reporting::mojom::TriggerRegistrationError; + +constexpr char kEpochStart[] = "epoch_start"; +constexpr char kEpochEnd[] = "epoch_end"; + +bool IsAttributionWindowValid(const uint64_t epoch_start, uint64_t epoch_end) { + return epoch_start <= epoch_end && epoch_start >=0 && epoch_end >=0; +} + +base::expected ParseAttributionWindowStart( + const base::Value::Dict& registration) { + const base::Value* v = registration.Find(kEpochStart); + if (!v) { + return base::unexpected( + TriggerRegistrationError::kAttributionWindowStartMissing); + } + if (std::optional epoch_start = v->GetIfInt()) { + if (*epoch_start < 0) { + return base::unexpected(TriggerRegistrationError::kAttributionWindowValueInvalid); + } + return *epoch_start; + } + return base::unexpected( + TriggerRegistrationError::kAttributionWindowStartMissing); +} + +base::expected ParseAttributionWindowEnd( + const base::Value::Dict& registration) { + const base::Value* v = registration.Find(kEpochEnd); + if (!v) { + return base::unexpected( + TriggerRegistrationError::kAttributionWindowEndMissing); + } + if (std::optional epoch_end = v->GetIfInt()) { + if (*epoch_end < 0) { + return base::unexpected(TriggerRegistrationError::kAttributionWindowValueInvalid); + } + return *epoch_end; + } + return base::unexpected( + TriggerRegistrationError::kAttributionWindowEndMissing); +} + + +} // namespace + +// static +std::optional AttributionWindow::Create(uint64_t epoch_start, uint64_t epoch_end) { + if (!IsAttributionWindowValid(epoch_start, epoch_end)) + return std::nullopt; + return AttributionWindow(epoch_start, epoch_end); +} + +// static +base::expected +AttributionWindow::FromJSON(const base::Value* value) { + const base::Value::Dict* dict = value->GetIfDict(); + if (!dict) { + return base::unexpected( + TriggerRegistrationError::kAttributionWindowWrongType); + } + + ASSIGN_OR_RETURN(auto epoch_start, ParseAttributionWindowStart(*dict)); + ASSIGN_OR_RETURN(auto epoch_end, ParseAttributionWindowEnd(*dict)); + return AttributionWindow(epoch_start, epoch_end); +} + +AttributionWindow::AttributionWindow() = default; + +AttributionWindow::AttributionWindow(uint64_t epoch_start, uint64_t epoch_end) + : epoch_start_(epoch_start), epoch_end_(epoch_end) { + DCHECK(IsAttributionWindowValid(epoch_start_, epoch_end_)); +} + +AttributionWindow::~AttributionWindow() = default; + +AttributionWindow::AttributionWindow( + const AttributionWindow&) = default; + +AttributionWindow& AttributionWindow::operator=( + const AttributionWindow&) = default; + +AttributionWindow::AttributionWindow(AttributionWindow&&) = + default; + +AttributionWindow& AttributionWindow::operator=( + AttributionWindow&&) = default; + +base::Value::Dict AttributionWindow::ToJson() const { + base::Value::Dict dict; + + SerializeUint64(dict, kEpochStart, epoch_start_); + SerializeUint64(dict, kEpochEnd, epoch_end_); + return dict; +} + +} // namespace attribution_reporting diff --git a/components/attribution_reporting/attribution_window.h b/components/attribution_reporting/attribution_window.h new file mode 100644 index 00000000000000..c8d663eafd00d9 --- /dev/null +++ b/components/attribution_reporting/attribution_window.h @@ -0,0 +1,49 @@ +#ifndef COMPONENTS_ATTRIBUTION_REPORTING_ATTRIBUTION_WINDOW_H_ +#define COMPONENTS_ATTRIBUTION_REPORTING_ATTRIBUTION_WINDOW_H_ + +#include +#include +#include + +#include "base/component_export.h" +#include "base/types/expected.h" +#include "base/values.h" +#include "components/attribution_reporting/trigger_registration_error.mojom-forward.h" + +namespace attribution_reporting { + +class COMPONENT_EXPORT(ATTRIBUTION_REPORTING) AttributionWindow { + public: + static std::optional Create(uint64_t epoch_start, uint64_t epoch_end); + + static base::expected + FromJSON(const base::Value* value); + + AttributionWindow(); + + ~AttributionWindow(); + + AttributionWindow(const AttributionWindow&); + AttributionWindow& operator=(const AttributionWindow&); + + AttributionWindow(AttributionWindow&&); + AttributionWindow& operator=(AttributionWindow&&); + + uint64_t epoch_start() const { return epoch_start_; } + uint64_t epoch_end() const { return epoch_end_; } + uint64_t size() const { return epoch_end_ - epoch_start_ + 1; } + + base::Value::Dict ToJson() const; + + friend bool operator==(const AttributionWindow&, const AttributionWindow&) = default; + + private: + AttributionWindow(uint64_t epoch_start, uint64_t epoch_end); + + uint64_t epoch_start_ = 0; + uint64_t epoch_end_ = 0; +}; + +} // namespace attribution_reporting + +#endif // COMPONENTS_ATTRIBUTION_REPORTING_ATTRIBUTION_WINDOW_H_ diff --git a/components/attribution_reporting/filters.cc b/components/attribution_reporting/filters.cc index 308924d36ec5b4..d9000cb74ad4b2 100644 --- a/components/attribution_reporting/filters.cc +++ b/components/attribution_reporting/filters.cc @@ -318,6 +318,45 @@ bool FilterData::Matches(mojom::SourceType source_type, }); } +bool FilterData::MatchesM2M(const FiltersDisjunction& filters, bool negated) const { + if (filters.empty()) { + return true; + } + + return base::ranges::any_of(filters, [&](const FilterConfig& config) { + return base::ranges::all_of( + config.filter_values(), [&](const auto& trigger_filter) { + + auto source_filter = filter_values_.find(trigger_filter.first); + if (source_filter == filter_values_.end()) { + return true; + } + + // Desired behavior is to treat any empty set of values as a + // single unique value itself. This means: + // - x:[] match x:[] is false when negated, and true otherwise. + // - x:[1,2,3] match x:[] is true when negated, and false + // otherwise. + if (trigger_filter.second.empty()) { + return negated != source_filter->second.empty(); + } + + bool has_intersection = base::ranges::any_of( + trigger_filter.second, [&](const std::string& value) { + return base::Contains(source_filter->second, value); + }); + // Negating filters are considered matched if the intersection of + // the filter values is empty. + return negated != has_intersection; + }); + }); +} + +bool FilterData::MatchesM2M(const FilterPair& filters) const { + return MatchesM2M(filters.positive, /*negated=*/false) && + MatchesM2M(filters.negative, /*negated=*/true); +} + bool FilterData::MatchesForTesting(mojom::SourceType source_type, const base::Time& source_time, const base::Time& trigger_time, diff --git a/components/attribution_reporting/filters.h b/components/attribution_reporting/filters.h index ade8f31e16d284..2533bf047bc7a5 100644 --- a/components/attribution_reporting/filters.h +++ b/components/attribution_reporting/filters.h @@ -58,6 +58,8 @@ class COMPONENT_EXPORT(ATTRIBUTION_REPORTING) FilterData { const base::Time& trigger_time, const FilterPair&) const; + bool MatchesM2M(const FilterPair&) const; + bool MatchesForTesting(mojom::SourceType, const base::Time& source_time, const base::Time& trigger_time, @@ -75,6 +77,9 @@ class COMPONENT_EXPORT(ATTRIBUTION_REPORTING) FilterData { const FiltersDisjunction&, bool negated) const; + bool MatchesM2M(const FiltersDisjunction&, + bool negated) const; + FilterValues filter_values_; }; diff --git a/components/attribution_reporting/global_epsilon.cc b/components/attribution_reporting/global_epsilon.cc new file mode 100644 index 00000000000000..d3188a66f86c5e --- /dev/null +++ b/components/attribution_reporting/global_epsilon.cc @@ -0,0 +1,80 @@ +#include "components/attribution_reporting/global_epsilon.h" + +#include + +#include "base/check.h" +#include "base/check_op.h" +#include "base/types/expected.h" +#include "base/values.h" +#include "components/attribution_reporting/trigger_registration_error.mojom.h" + +namespace attribution_reporting { + +namespace { + +// using ::attribution_reporting::mojom::SourceRegistrationError; +using ::attribution_reporting::mojom::TriggerRegistrationError; + +constexpr char kGlobalEpsilon[] = "global_epsilon"; + +double g_max_global_epsilon = 14; + +bool IsGlobalEpsilonValid(double epsilon) { + return epsilon >= 0 && epsilon <= g_max_global_epsilon; +} + +} // namespace + +// static +base::expected +GlobalEpsilon::Parse(const base::Value::Dict& dict) { + const base::Value* value = dict.Find(kGlobalEpsilon); + if (!value) { + return GlobalEpsilon(); + } + + std::optional d = value->GetIfDouble(); + if (!d.has_value()) { + return base::unexpected( + TriggerRegistrationError::kGlobalEpsilonWrongType); + } + + if (!IsGlobalEpsilonValid(*d)) { + return base::unexpected( + TriggerRegistrationError::kGlobalEpsilonValueInvalid); + } + + return GlobalEpsilon(*d); +} + +GlobalEpsilon::GlobalEpsilon() + : GlobalEpsilon(g_max_global_epsilon) {} + +GlobalEpsilon::GlobalEpsilon(double epsilon) : epsilon_(epsilon) { + CHECK(IsGlobalEpsilonValid(epsilon_)); +} + +bool GlobalEpsilon::SetIfValid(double epsilon) { + if (!IsGlobalEpsilonValid(epsilon)) { + return false; + } + epsilon_ = epsilon; + return true; +} + +void GlobalEpsilon::Serialize(base::Value::Dict& dict) const { + dict.Set(kGlobalEpsilon, epsilon_); +} + +ScopedMaxGlobalEpsilonForTesting::ScopedMaxGlobalEpsilonForTesting( + double epsilon) + : previous_(g_max_global_epsilon) { + CHECK_GE(epsilon, 0); + g_max_global_epsilon = epsilon; +} + +ScopedMaxGlobalEpsilonForTesting::~ScopedMaxGlobalEpsilonForTesting() { + g_max_global_epsilon = previous_; +} + +} // namespace attribution_reporting diff --git a/components/attribution_reporting/global_epsilon.h b/components/attribution_reporting/global_epsilon.h new file mode 100644 index 00000000000000..cb440611dd3e57 --- /dev/null +++ b/components/attribution_reporting/global_epsilon.h @@ -0,0 +1,72 @@ +#ifndef COMPONENTS_ATTRIBUTION_REPORTING_GLOBAL_EPSILON_H_ +#define COMPONENTS_ATTRIBUTION_REPORTING_GLOBAL_EPSILON_H_ + +#include "base/component_export.h" +#include "base/types/expected.h" +#include "base/values.h" +#include "components/attribution_reporting/trigger_registration_error.mojom-forward.h" + +namespace attribution_reporting { + +// Controls the epsilon parameter requested to be consumed by the user +class COMPONENT_EXPORT(ATTRIBUTION_REPORTING) GlobalEpsilon { + public: + static base::expected + Parse(const base::Value::Dict&); + + // Creates an epsilon with the maximum allowed value. + GlobalEpsilon(); + + // `CHECK()`s that the given value is non-negative and less than the maximum. + explicit GlobalEpsilon(double); + + ~GlobalEpsilon() = default; + + GlobalEpsilon(const GlobalEpsilon&) = default; + GlobalEpsilon& operator=(const GlobalEpsilon&) = default; + + GlobalEpsilon(GlobalEpsilon&&) = default; + GlobalEpsilon& operator=(GlobalEpsilon&&) = default; + + // This implicit conversion is allowed to ease drop-in use of + // this type in places currently requiring `int` with prior validation. + operator double() const { // NOLINT + return epsilon_; + } + + double global_epsilon() const { + return epsilon_; + } + + [[nodiscard]] bool SetIfValid(double); + + void Serialize(base::Value::Dict&) const; + + private: + double epsilon_; +}; + +class COMPONENT_EXPORT(ATTRIBUTION_REPORTING) + ScopedMaxGlobalEpsilonForTesting { + public: + explicit ScopedMaxGlobalEpsilonForTesting(double); + + ~ScopedMaxGlobalEpsilonForTesting(); + + ScopedMaxGlobalEpsilonForTesting( + const ScopedMaxGlobalEpsilonForTesting&) = delete; + ScopedMaxGlobalEpsilonForTesting& operator=( + const ScopedMaxGlobalEpsilonForTesting&) = delete; + + ScopedMaxGlobalEpsilonForTesting(ScopedMaxGlobalEpsilonForTesting&&) = + delete; + ScopedMaxGlobalEpsilonForTesting& operator=( + ScopedMaxGlobalEpsilonForTesting&&) = delete; + + private: + double previous_; +}; + +} // namespace attribution_reporting + +#endif // COMPONENTS_ATTRIBUTION_REPORTING_GLOBAL_EPSILON_H_ diff --git a/components/attribution_reporting/registration.mojom b/components/attribution_reporting/registration.mojom index c0c8aef5df42a7..85460f5a41ebf2 100644 --- a/components/attribution_reporting/registration.mojom +++ b/components/attribution_reporting/registration.mojom @@ -59,6 +59,11 @@ struct AggregatableTriggerData { FilterPair filters; }; +struct AttributionWindow { + uint64 epoch_start; + uint64 epoch_end; +}; + // Target site(s) where a source can be attributed. struct DestinationSet { array destinations; @@ -129,6 +134,8 @@ struct SourceRegistration { // the containing source registration. Must be greater than or equal to 0. // https://wicg.github.io/attribution-reporting-api/#obtain-a-randomized-source-response double event_level_epsilon; + + uint64 source_epoch; }; // Mojo representation of the trigger configuration provided by a reporting @@ -198,6 +205,22 @@ struct TriggerRegistration { // Specifies the context ID associated with the trigger. The context ID // cannot be empty or longer than 64 bytes. string? trigger_context_id; + + // Controls the epsilon parameter requested to be consumed by user. Must be greater than or equal to 0. + double global_epsilon; + + // The attribution_window in which the attribution logic will look for matching sources + AttributionWindow attribution_window; + + // A map of aggregation key identifier and the corresponding cap value. + map aggregatable_cap_values; + + // The attribution logic + string attribution_logic; + + // The partitioning logic + string partitioning_logic; + }; // Represents a source or trigger registration item that will be passed to the OS, if supported. diff --git a/components/attribution_reporting/registration_mojom_traits.cc b/components/attribution_reporting/registration_mojom_traits.cc index caca38fe761910..a716677593adb5 100644 --- a/components/attribution_reporting/registration_mojom_traits.cc +++ b/components/attribution_reporting/registration_mojom_traits.cc @@ -252,6 +252,7 @@ bool StructTraitsdebug_key = data.debug_key(); out->debug_reporting = data.debug_reporting(); out->trigger_data_matching = data.trigger_data_matching(); + out->source_epoch = data.source_epoch(); return out->IsValid(); } @@ -278,6 +279,16 @@ bool StructTraits:: + Read(attribution_reporting::mojom::AttributionWindowDataView data, + attribution_reporting::AttributionWindow* out) { + *out = std::move(*attribution_reporting::AttributionWindow::Create(data.epoch_start(), data.epoch_end())); + return true; +} + + // static bool StructTraits:: @@ -352,6 +363,20 @@ bool StructTraitsaggregatable_values = std::move(*aggregatable_values); + attribution_reporting::AggregatableValues::Values cap_values; + if (!data.ReadAggregatableCapValues(&cap_values)) { + return false; + } + + auto aggregatable_cap_values = + attribution_reporting::AggregatableValues::Create(std::move(cap_values)); + if (!aggregatable_cap_values) { + return false; + } + + out->aggregatable_cap_values = std::move(*aggregatable_cap_values); + + if (!data.ReadAggregatableDedupKeys(&out->aggregatable_dedup_keys)) { return false; } @@ -378,6 +403,23 @@ bool StructTraitsdebug_key = data.debug_key(); out->debug_reporting = data.debug_reporting(); + + if (!out->global_epsilon.SetIfValid(data.global_epsilon())) { + return false; + } + + if (!data.ReadAttributionWindow(&out->attribution_window)) { + return false; + } + + if (!data.ReadAttributionLogic(&out->attribution_logic)) { + return false; + } + + if (!data.ReadPartitioningLogic(&out->partitioning_logic)) { + return false; + } + return true; } diff --git a/components/attribution_reporting/registration_mojom_traits.h b/components/attribution_reporting/registration_mojom_traits.h index 41e4ee103b54fc..6859f8badd3b45 100644 --- a/components/attribution_reporting/registration_mojom_traits.h +++ b/components/attribution_reporting/registration_mojom_traits.h @@ -232,6 +232,11 @@ struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING_REGISTRATION_MOJOM_TRAITS) return source.event_level_epsilon; } + static uint64_t source_epoch( + const attribution_reporting::SourceRegistration& source) { + return source.source_epoch; + } + static bool Read( attribution_reporting::mojom::SourceRegistrationDataView data, attribution_reporting::SourceRegistration* out); @@ -281,6 +286,23 @@ struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING_REGISTRATION_MOJOM_TRAITS) attribution_reporting::EventTriggerData* out); }; +template <> +struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING_REGISTRATION_MOJOM_TRAITS) + StructTraits { + static uint64_t epoch_start(const attribution_reporting::AttributionWindow& attribution_window) { + return attribution_window.epoch_start(); + } + + static uint64_t epoch_end(const attribution_reporting::AttributionWindow& attribution_window) { + return attribution_window.epoch_end(); + } + + static bool Read( + attribution_reporting::mojom::AttributionWindowDataView data, + attribution_reporting::AttributionWindow* out); +}; + template <> struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING_REGISTRATION_MOJOM_TRAITS) StructTraits debug_key( const attribution_reporting::TriggerRegistration& trigger) { return trigger.debug_key; @@ -364,6 +392,26 @@ struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING_REGISTRATION_MOJOM_TRAITS) const attribution_reporting::TriggerRegistration& trigger) { return trigger.aggregatable_trigger_config.trigger_context_id(); } + + static double global_epsilon( + const attribution_reporting::TriggerRegistration& trigger) { + return trigger.global_epsilon; + } + + static const attribution_reporting::AttributionWindow& attribution_window( + const attribution_reporting::TriggerRegistration& trigger) { + return trigger.attribution_window; + } + + static std::string attribution_logic( + const attribution_reporting::TriggerRegistration& trigger) { + return trigger.attribution_logic; + } + + static std::string partitioning_logic( + const attribution_reporting::TriggerRegistration& trigger) { + return trigger.partitioning_logic; + } static bool Read( attribution_reporting::mojom::TriggerRegistrationDataView data, diff --git a/components/attribution_reporting/source_registration.cc b/components/attribution_reporting/source_registration.cc index 1aa9baf15ed3e4..19cd529293153e 100644 --- a/components/attribution_reporting/source_registration.cc +++ b/components/attribution_reporting/source_registration.cc @@ -46,6 +46,7 @@ constexpr char kDestination[] = "destination"; constexpr char kExpiry[] = "expiry"; constexpr char kFilterData[] = "filter_data"; constexpr char kSourceEventId[] = "source_event_id"; +constexpr char kSourceEpoch[] = "epoch"; base::TimeDelta AdjustExpiry(base::TimeDelta expiry, SourceType source_type) { switch (source_type) { @@ -61,7 +62,7 @@ base::TimeDelta AdjustExpiry(base::TimeDelta expiry, SourceType source_type) { void RecordSourceRegistrationError(SourceRegistrationError error) { static_assert( SourceRegistrationError::kMaxValue == - SourceRegistrationError::kEventLevelEpsilonValueInvalid, + SourceRegistrationError::kSourceEpochValueInvalid, "Bump version of Conversions.SourceRegistrationError10 histogram."); base::UmaHistogramEnumeration("Conversions.SourceRegistrationError10", error); } @@ -148,6 +149,13 @@ SourceRegistration::Parse(base::Value::Dict registration, ASSIGN_OR_RETURN(result.event_level_epsilon, EventLevelEpsilon::Parse(registration)); + ASSIGN_OR_RETURN(result.source_epoch, + ParseUint64(registration, kSourceEpoch) + .transform(&ValueOrZero), + [](absl::monostate) { + return SourceRegistrationError::kSourceEpochValueInvalid; + }); + result.debug_key = ParseDebugKey(registration); result.debug_reporting = ParseDebugReporting(registration); @@ -213,6 +221,8 @@ base::Value::Dict SourceRegistration::ToJson() const { event_level_epsilon.Serialize(dict); + SerializeUint64(dict, kSourceEpoch, source_epoch); + return dict; } diff --git a/components/attribution_reporting/source_registration.h b/components/attribution_reporting/source_registration.h index 517ed317ebd776..3e76860ade6f56 100644 --- a/components/attribution_reporting/source_registration.h +++ b/components/attribution_reporting/source_registration.h @@ -78,6 +78,7 @@ struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING) SourceRegistration { mojom::TriggerDataMatching trigger_data_matching = mojom::TriggerDataMatching::kModulus; EventLevelEpsilon event_level_epsilon; + uint64_t source_epoch; }; } // namespace attribution_reporting diff --git a/components/attribution_reporting/source_registration_error.mojom b/components/attribution_reporting/source_registration_error.mojom index 820803aa56d4a5..5aefb41826eabd 100644 --- a/components/attribution_reporting/source_registration_error.mojom +++ b/components/attribution_reporting/source_registration_error.mojom @@ -78,4 +78,6 @@ enum SourceRegistrationError { kEventLevelEpsilonWrongType = 58, kEventLevelEpsilonValueInvalid = 59, + + kSourceEpochValueInvalid = 60, }; diff --git a/components/attribution_reporting/source_registration_fuzzer_corpus/all_params.textproto b/components/attribution_reporting/source_registration_fuzzer_corpus/all_params.textproto index 95369c6e7a1e07..81af75ff22f2f8 100644 --- a/components/attribution_reporting/source_registration_fuzzer_corpus/all_params.textproto +++ b/components/attribution_reporting/source_registration_fuzzer_corpus/all_params.textproto @@ -142,4 +142,12 @@ object_value { } } } + field { + name: "source_epoch" + value { + integer_value { + value: 0 + } + } + } } diff --git a/components/attribution_reporting/trigger_registration.cc b/components/attribution_reporting/trigger_registration.cc index b172a3b489467d..b28fbd65cd14bd 100644 --- a/components/attribution_reporting/trigger_registration.cc +++ b/components/attribution_reporting/trigger_registration.cc @@ -8,6 +8,8 @@ #include #include +#include +#include "base/logging.h" #include "base/feature_list.h" #include "base/functional/function_ref.h" #include "base/json/json_reader.h" @@ -17,6 +19,8 @@ #include "base/values.h" #include "components/aggregation_service/features.h" #include "components/aggregation_service/parsing_utils.h" +#include "components/attribution_reporting/attribution_window.h" +#include "components/attribution_reporting/global_epsilon.h" #include "components/attribution_reporting/aggregatable_dedup_key.h" #include "components/attribution_reporting/aggregatable_trigger_config.h" #include "components/attribution_reporting/aggregatable_trigger_data.h" @@ -41,6 +45,11 @@ constexpr char kAggregatableDeduplicationKeys[] = constexpr char kAggregatableTriggerData[] = "aggregatable_trigger_data"; constexpr char kAggregatableValues[] = "aggregatable_values"; constexpr char kEventTriggerData[] = "event_trigger_data"; +constexpr char kAttributionWindow[] = "attribution_window"; +constexpr char kAggregatableCapValues[] = "aggregatable_cap_values"; +constexpr char kAttributionLogic[] = "attribution_logic"; +constexpr char kPartitioningLogic[] = "partitioning_logic"; + base::expected, TriggerRegistrationError> ParseAggregationCoordinator(const base::Value* value) { @@ -69,6 +78,12 @@ ParseAggregationCoordinator(const base::Value* value) { return *aggregation_coordinator_origin; } +template > +struct has_to_json : std::false_type {}; + +template +struct has_to_json().ToJson())>> : std::true_type {}; + template void SerializeListIfNotEmpty(base::Value::Dict& dict, std::string_view key, @@ -78,8 +93,14 @@ void SerializeListIfNotEmpty(base::Value::Dict& dict, } base::Value::List list; - for (const auto& value : vec) { - list.Append(value.ToJson()); + if constexpr (has_to_json::value) { + for (const auto& value : vec) { + list.Append(value.ToJson()); + } + } else { + for (const auto& value : vec) { + list.Append(base::NumberToString(value)); + } } dict.Set(key, std::move(list)); } @@ -116,7 +137,7 @@ void RecordTriggerRegistrationError(TriggerRegistrationError error) { static_assert( TriggerRegistrationError::kMaxValue == TriggerRegistrationError:: - kTriggerContextIdInvalidSourceRegistrationTimeConfig, + kAttributionOrPartitioningLogicValueInvalid, "Bump version of Conversions.TriggerRegistrationError9 histogram."); base::UmaHistogramEnumeration("Conversions.TriggerRegistrationError9", error); } @@ -151,6 +172,33 @@ TriggerRegistration::Parse(base::Value::Dict dict) { ASSIGN_OR_RETURN( registration.aggregatable_values, AggregatableValues::FromJSON(dict.Find(kAggregatableValues))); + + ASSIGN_OR_RETURN( + registration.aggregatable_cap_values, + AggregatableValues::FromJSON(dict.Find(kAggregatableCapValues))); + + ASSIGN_OR_RETURN(registration.global_epsilon, + GlobalEpsilon::Parse(dict)); + + ASSIGN_OR_RETURN( + registration.attribution_window, + AttributionWindow::FromJSON(dict.Find(kAttributionWindow))); + + auto parseStringLambda = [](base::Value& value) -> base::expected { + const std::string* str = value.GetIfString(); + if (!str) { + return base::unexpected(TriggerRegistrationError::kAttributionOrPartitioningLogicValueInvalid); + } + return *str; + }; + + ASSIGN_OR_RETURN( + registration.attribution_logic, + parseStringLambda(*dict.Find(kAttributionLogic))); + + ASSIGN_OR_RETURN( + registration.partitioning_logic, + parseStringLambda(*dict.Find(kPartitioningLogic))); if (base::FeatureList::IsEnabled( aggregation_service::kAggregationServiceMultipleCloudProviders)) { @@ -165,6 +213,9 @@ TriggerRegistration::Parse(base::Value::Dict dict) { ASSIGN_OR_RETURN(registration.aggregatable_trigger_config, AggregatableTriggerConfig::Parse(dict)); + LOG(INFO) << "PARSE TRIGGER REGISTRATION" ; + LOG(INFO) << registration.ToJson() ; + return registration; } @@ -221,6 +272,10 @@ base::Value::Dict TriggerRegistration::ToJson() const { dict.Set(kAggregatableValues, aggregatable_values.ToJson()); } + if (!aggregatable_cap_values.values().empty()) { + dict.Set(kAggregatableCapValues, aggregatable_cap_values.ToJson()); + } + SerializeDebugKey(dict, debug_key); SerializeDebugReporting(dict, debug_reporting); @@ -233,7 +288,10 @@ base::Value::Dict TriggerRegistration::ToJson() const { } aggregatable_trigger_config.Serialize(dict); - + global_epsilon.Serialize(dict); + dict.Set(kAttributionWindow, attribution_window.ToJson()); + dict.Set(kAttributionLogic, attribution_logic); + dict.Set(kPartitioningLogic, partitioning_logic); return dict; } diff --git a/components/attribution_reporting/trigger_registration.h b/components/attribution_reporting/trigger_registration.h index e672c2bdea0176..e95caa4a50dffb 100644 --- a/components/attribution_reporting/trigger_registration.h +++ b/components/attribution_reporting/trigger_registration.h @@ -14,6 +14,8 @@ #include "base/component_export.h" #include "base/types/expected.h" #include "base/values.h" +#include "components/attribution_reporting/global_epsilon.h" +#include "components/attribution_reporting/attribution_window.h" #include "components/attribution_reporting/aggregatable_trigger_config.h" #include "components/attribution_reporting/aggregatable_values.h" #include "components/attribution_reporting/filters.h" @@ -54,6 +56,7 @@ struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING) TriggerRegistration { friend bool operator==(const TriggerRegistration&, const TriggerRegistration&) = default; + FilterPair filters; std::optional debug_key; std::vector aggregatable_dedup_keys; @@ -63,6 +66,13 @@ struct COMPONENT_EXPORT(ATTRIBUTION_REPORTING) TriggerRegistration { bool debug_reporting = false; std::optional aggregation_coordinator_origin; AggregatableTriggerConfig aggregatable_trigger_config; + + GlobalEpsilon global_epsilon; + AttributionWindow attribution_window; + AggregatableValues aggregatable_cap_values; + std::string attribution_logic; + std::string partitioning_logic; + }; } // namespace attribution_reporting diff --git a/components/attribution_reporting/trigger_registration_error.mojom b/components/attribution_reporting/trigger_registration_error.mojom index 5b5bd7d58058ca..2fa1a6808ffc6e 100644 --- a/components/attribution_reporting/trigger_registration_error.mojom +++ b/components/attribution_reporting/trigger_registration_error.mojom @@ -48,4 +48,15 @@ enum TriggerRegistrationError { kTriggerContextIdInvalidValue = 37, kTriggerContextIdInvalidSourceRegistrationTimeConfig = 38, + + kGlobalEpsilonWrongType = 39, + kGlobalEpsilonValueInvalid = 40, + + kAttributionWindowWrongType = 41, + kAttributionWindowValueInvalid = 42, + kAttributionWindowStartMissing = 43, + kAttributionWindowEndMissing = 44, + kAttributionWindowListWrongType = 45, + + kAttributionOrPartitioningLogicValueInvalid = 46, }; diff --git a/components/attribution_reporting/trigger_registration_fuzzer_corpus/all_params.textproto b/components/attribution_reporting/trigger_registration_fuzzer_corpus/all_params.textproto index 75f39a9507f765..afa08ccebb5ca9 100644 --- a/components/attribution_reporting/trigger_registration_fuzzer_corpus/all_params.textproto +++ b/components/attribution_reporting/trigger_registration_fuzzer_corpus/all_params.textproto @@ -402,4 +402,80 @@ object_value { } } } -} + field { + name: "global_epsilon" + value { + number_value { + float_value { + value: 4.2 + } + } + } + } + field { + name: "attribution_window" + value { + object_value { + field { + name: "epoch_start" + value: { + integer_value { + value: 0 + } + } + } + field { + name: "epoch_end" + value: { + integer_value { + value: 1 + } + } + } + } + } + } + field { + name: "aggregatable_cap_values" + value { + object_value { + field { + name: "a" + value { + number_value { + integer_value { + value: 123 + } + } + } + } + field { + name: "b" + value { + number_value { + integer_value { + value: 456 + } + } + } + } + } + } + } + field { + name: "attribution_logic" + value { + string_value { + "123" + } + } + } + field { + name: "partitioning_logic" + value { + string_value { + "123" + } + } + } +} \ No newline at end of file diff --git a/content/browser/BUILD.gn b/content/browser/BUILD.gn index 4b2dd7b09dd4f6..06acc9080db26a 100644 --- a/content/browser/BUILD.gn +++ b/content/browser/BUILD.gn @@ -467,6 +467,8 @@ source_set("browser") { "attribution_reporting/attribution_suitable_context.h", "attribution_reporting/attribution_trigger.cc", "attribution_reporting/attribution_trigger.h", + "attribution_reporting/partition.cc", + "attribution_reporting/partition.h", "attribution_reporting/attribution_utils.cc", "attribution_reporting/attribution_utils.h", "attribution_reporting/common_source_info.cc", diff --git a/content/browser/attribution_reporting/aggregatable_attribution_utils.cc b/content/browser/attribution_reporting/aggregatable_attribution_utils.cc index 6aeab96cad4657..ba2cc112b81a8f 100644 --- a/content/browser/attribution_reporting/aggregatable_attribution_utils.cc +++ b/content/browser/attribution_reporting/aggregatable_attribution_utils.cc @@ -4,11 +4,15 @@ #include "content/browser/attribution_reporting/aggregatable_attribution_utils.h" +#include #include #include #include #include +#include "base/logging.h" +#include "base/metrics/histogram_functions.h" +#include "third_party/abseil-cpp/absl/numeric/int128.h" #include "base/check.h" #include "base/functional/overloaded.h" #include "base/metrics/histogram_functions.h" @@ -31,6 +35,7 @@ #include "content/browser/attribution_reporting/aggregatable_histogram_contribution.h" #include "content/browser/attribution_reporting/attribution_info.h" #include "content/browser/attribution_reporting/attribution_report.h" +#include "content/browser/attribution_reporting/partition.h" #include "net/base/schemeful_site.h" #include "third_party/abseil-cpp/absl/types/variant.h" #include "third_party/blink/public/mojom/private_aggregation/aggregatable_report.mojom.h" @@ -39,6 +44,9 @@ namespace content { namespace { +// using AggregatableResult = ::content::AttributionTrigger::AggregatableResult; + + // Note: use the same time serialization as in aggregatable_report.cc. // Consider sharing logic if more call-sites need this. std::string SerializeTimeRoundedDownToWholeDayInSeconds(base::Time time) { @@ -121,6 +129,141 @@ std::vector CreateAggregatableHistogram( return contributions; } + +// TODO(kelly): Organize in classes +void AttributionLogicLastTouch(Partition& partition, + base::flat_map>& trigger_keypieces_per_source) { + + auto& attribution_window = partition.attribution_window; + auto& sources_per_epoch = partition.sources_per_epoch; + + std::optional latest_source; + + for (uint64_t i=attribution_window.epoch_end(); + i>= attribution_window.epoch_start(); i--) { + + auto it = sources_per_epoch.find(i); + if (it != sources_per_epoch.end()) { + // Obtaining latest source (we fetched them in order from the database) + if (!sources_per_epoch[i].empty()) { + latest_source = sources_per_epoch[i].back(); + // Keep latest source to display in user logs + partition.logging_source = latest_source; + } + // Stop searching for more sources in other epochs + break; + } + } + + // Populate partition.report_value_pairs[*].report for all source_keys + if (latest_source.has_value()) { + auto aggregation_keys = (*latest_source)->aggregation_keys().keys(); + for (auto& pair : aggregation_keys) { + auto& source_key = pair.first; + auto& source_keypiece = pair.second; + auto& report_value_pair = partition.report_value_pairs[source_key]; + auto& trigger_keypieces = trigger_keypieces_per_source[source_key]; + + // Extend the source key_pieces for source_key + for (auto& trigger_keypiece : trigger_keypieces) { + source_keypiece |= trigger_keypiece; + } + report_value_pair.report.emplace_back(source_keypiece, report_value_pair.value); + } + } +} + +void AttributionLogicUniform(Partition& partition, + base::flat_map>& trigger_keypieces_per_source) { + + auto& attribution_window = partition.attribution_window; + auto& sources_per_epoch = partition.sources_per_epoch; + + double total_sources_count = 0; + + base::flat_map> + source_counts_per_sourcekey; + + + for (uint64_t i=attribution_window.epoch_start(); + i <= attribution_window.epoch_end(); i++) { + + // Ignore empty epochs + auto it = sources_per_epoch.find(i); + if (it == sources_per_epoch.end()) { + continue; + } + + // Count occurrences per source keypiece across all epochs + for (StoredSource* source : sources_per_epoch[i]) { + // Keep latest source to display in user logs + partition.logging_source = source; + auto aggregation_keys = source->aggregation_keys().keys(); + for (auto& pair : aggregation_keys) { + auto& source_key = pair.first; + auto& key_piece = pair.second; + + auto it1 = source_counts_per_sourcekey.find(source_key); + if (it1 == source_counts_per_sourcekey.end()) { + source_counts_per_sourcekey[source_key] = {}; + } + + auto& source_counts = source_counts_per_sourcekey[source_key]; + auto it2 = source_counts.find(key_piece); + if (it2 == source_counts.end()) { + source_counts[key_piece] = 0; + } + source_counts[key_piece] += 1; + } + total_sources_count++; + } + } + + // Populate partition.report_value_pairs[*].report for all source_keys + for (auto& outer : source_counts_per_sourcekey) { + auto source_key = outer.first; + auto& report_value_pair = partition.report_value_pairs[source_key]; + auto& trigger_keypieces = trigger_keypieces_per_source[source_key]; + + for (auto& inner : outer.second) { + auto source_keypiece = inner.first; + auto source_count = inner.second; + + // Extend the source key_pieces for source_key + for (auto& trigger_keypiece : trigger_keypieces) { + source_keypiece |= trigger_keypiece; + } + + double contribution_value = (source_count / total_sources_count) * report_value_pair.value; + report_value_pair.report.emplace_back(source_keypiece, contribution_value); + } + } + } + +void CreateAggregatableHistogramM2M( + Partition& partition, + const std::vector& aggregatable_trigger_data) { + + // Collect trigger keypieces per source_key + base::flat_map> trigger_keypieces_per_source; + for (const auto& data : aggregatable_trigger_data) { + for (const auto& source_key : data.source_keys()) { + + auto it = trigger_keypieces_per_source.find(source_key); + if (it == trigger_keypieces_per_source.end()) { + trigger_keypieces_per_source[source_key] = {}; + } + trigger_keypieces_per_source[source_key].push_back(data.key_piece()); + } + } + // Apply "attribution_logic" on the union of all epochs + if (partition.attribution_logic == "last_touch") { + AttributionLogicLastTouch(partition, trigger_keypieces_per_source); + } else if (partition.attribution_logic == "uniform") { + AttributionLogicUniform(partition, trigger_keypieces_per_source); + } +} + std::optional CreateAggregatableReportRequest( const AttributionReport& report) { base::Time source_time; diff --git a/content/browser/attribution_reporting/aggregatable_attribution_utils.h b/content/browser/attribution_reporting/aggregatable_attribution_utils.h index ed64a565ba4add..b226e55116151b 100644 --- a/content/browser/attribution_reporting/aggregatable_attribution_utils.h +++ b/content/browser/attribution_reporting/aggregatable_attribution_utils.h @@ -7,9 +7,13 @@ #include #include +#include +#include "base/metrics/histogram_functions.h" +#include "third_party/abseil-cpp/absl/numeric/int128.h" #include "components/attribution_reporting/source_type.mojom-forward.h" #include "content/common/content_export.h" +#include "content/browser/attribution_reporting/partition.h" namespace attribution_reporting { class AggregatableTriggerData; @@ -39,6 +43,16 @@ CreateAggregatableHistogram( const std::vector&, const attribution_reporting::AggregatableValues&); +CONTENT_EXPORT void AttributionLogicLastTouch(Partition& partition, + const base::flat_map>& trigger_keypieces_per_source); + +CONTENT_EXPORT void AttributionLogicUniform(Partition& partition, + const base::flat_map>& trigger_keypieces_per_source); + +CONTENT_EXPORT void CreateAggregatableHistogramM2M( + Partition& partition, + const std::vector& aggregatable_trigger_data); + // These values are persisted to logs. Entries should not be renumbered and // numeric values should never be reused. enum class AssembleAggregatableReportStatus { diff --git a/content/browser/attribution_reporting/aggregatable_histogram_contribution.cc b/content/browser/attribution_reporting/aggregatable_histogram_contribution.cc index 3e6d115ac761e3..b5d640c37bc487 100644 --- a/content/browser/attribution_reporting/aggregatable_histogram_contribution.cc +++ b/content/browser/attribution_reporting/aggregatable_histogram_contribution.cc @@ -10,9 +10,9 @@ namespace content { AggregatableHistogramContribution::AggregatableHistogramContribution( absl::uint128 key, - uint32_t value) + double value) : key_(key), value_(value) { - DCHECK_GT(value, 0u); + // DCHECK_GT(value, 0u); } } // namespace content diff --git a/content/browser/attribution_reporting/aggregatable_histogram_contribution.h b/content/browser/attribution_reporting/aggregatable_histogram_contribution.h index dd1020d010f980..9939888fb3171d 100644 --- a/content/browser/attribution_reporting/aggregatable_histogram_contribution.h +++ b/content/browser/attribution_reporting/aggregatable_histogram_contribution.h @@ -14,7 +14,7 @@ namespace content { class CONTENT_EXPORT AggregatableHistogramContribution { public: - AggregatableHistogramContribution(absl::uint128 key, uint32_t value); + AggregatableHistogramContribution(absl::uint128 key, double value); AggregatableHistogramContribution(const AggregatableHistogramContribution&) = default; AggregatableHistogramContribution& operator=( @@ -27,14 +27,14 @@ class CONTENT_EXPORT AggregatableHistogramContribution { absl::uint128 key() const { return key_; } - uint32_t value() const { return value_; } + double value() const { return value_; } friend bool operator==(const AggregatableHistogramContribution&, const AggregatableHistogramContribution&) = default; private: absl::uint128 key_; - uint32_t value_; + double value_; }; } // namespace content diff --git a/content/browser/attribution_reporting/attribution_internals.mojom b/content/browser/attribution_reporting/attribution_internals.mojom index 5cf8b98c00dd0c..8a239667fc7b6e 100644 --- a/content/browser/attribution_reporting/attribution_internals.mojom +++ b/content/browser/attribution_reporting/attribution_internals.mojom @@ -31,7 +31,7 @@ struct WebUIReportEventLevelData { struct AggregatableHistogramContribution { // Hex-encoded unsigned 128-bit integer. string key; - uint32 value; + double value; }; // Contains aggregatable attribution data to be displayed. diff --git a/content/browser/attribution_reporting/attribution_manager_impl.cc b/content/browser/attribution_reporting/attribution_manager_impl.cc index e02177f5840a02..fc39837eca902e 100644 --- a/content/browser/attribution_reporting/attribution_manager_impl.cc +++ b/content/browser/attribution_reporting/attribution_manager_impl.cc @@ -10,6 +10,7 @@ #include #include +#include "base/logging.h" #include "base/barrier_closure.h" #include "base/check.h" #include "base/check_op.h" @@ -651,6 +652,7 @@ void AttributionManagerImpl::OnSourceStored( void AttributionManagerImpl::HandleTrigger( AttributionTrigger trigger, GlobalRenderFrameHostId render_frame_id) { + MaybeEnqueueEvent(SourceOrTriggerRFH{.source_or_trigger = std::move(trigger), .rfh_id = render_frame_id}); } @@ -663,7 +665,7 @@ void AttributionManagerImpl::StoreTrigger(AttributionTrigger trigger, std::exchange(trigger.registration().debug_key, std::nullopt); } - attribution_storage_.AsyncCall(&AttributionStorage::MaybeCreateAndStoreReport) + attribution_storage_.AsyncCall(&AttributionStorage::MaybeCreateAndStoreReportM2M) .WithArgs(trigger) .Then(base::BindOnce(&AttributionManagerImpl::OnReportStored, weak_factory_.GetWeakPtr(), std::move(trigger), @@ -852,7 +854,7 @@ void AttributionManagerImpl::OnReportStored( min_new_report_time = report->report_time(); MaybeSendDebugReport(std::move(*report)); } - + // # TODO(kelly): here we might need to change the the timing of the aaggregatable report if (auto& report = result.new_aggregatable_report()) { min_new_report_time = AttributionReport::MinReportTime( min_new_report_time, report->report_time()); diff --git a/content/browser/attribution_reporting/attribution_storage.h b/content/browser/attribution_reporting/attribution_storage.h index 0ce49abc16355f..f00d5958f789f6 100644 --- a/content/browser/attribution_reporting/attribution_storage.h +++ b/content/browser/attribution_reporting/attribution_storage.h @@ -56,6 +56,9 @@ class AttributionStorage { virtual CreateReportResult MaybeCreateAndStoreReport( const AttributionTrigger& trigger) = 0; + virtual CreateReportResult MaybeCreateAndStoreReportM2M( + const AttributionTrigger& trigger) = 0; + // Returns all of the reports that should be sent before // |max_report_time|. This call is logically const, and does not modify the // underlying storage. |limit| limits the number of reports to return; use diff --git a/content/browser/attribution_reporting/attribution_storage_sql.cc b/content/browser/attribution_reporting/attribution_storage_sql.cc index 4a132d91839d6e..af03a1ba9ac7cc 100644 --- a/content/browser/attribution_reporting/attribution_storage_sql.cc +++ b/content/browser/attribution_reporting/attribution_storage_sql.cc @@ -6,7 +6,8 @@ #include #include - +#include "base/logging.h" +#include "assert.h" #include #include #include @@ -17,6 +18,8 @@ #include #include #include +#include +#include #include "base/check.h" #include "base/check_op.h" @@ -52,6 +55,7 @@ #include "components/attribution_reporting/trigger_config.h" #include "components/attribution_reporting/trigger_data_matching.mojom.h" #include "components/attribution_reporting/trigger_registration.h" +#include "content/browser/attribution_reporting/partition.h" #include "content/browser/attribution_reporting/aggregatable_attribution_utils.h" #include "content/browser/attribution_reporting/aggregatable_histogram_contribution.h" #include "content/browser/attribution_reporting/attribution_features.h" @@ -89,6 +93,10 @@ namespace content { namespace { +constexpr double kInitialBudget = 10.0; +uint32_t kOptimization = 2; +char kSensitivityMetric[] = "L1"; + using AggregatableResult = ::content::AttributionTrigger::AggregatableResult; using EventLevelResult = ::content::AttributionTrigger::EventLevelResult; @@ -215,7 +223,7 @@ std::optional ColumnUint64OrNull(sql::Statement& statement, int col) { DeserializeUint64(statement.ColumnInt64(col))); } -constexpr int kSourceColumnCount = 19; +constexpr int kSourceColumnCount = 20; int64_t StorageFileSizeKB(const base::FilePath& path_to_database) { int64_t file_size = -1; @@ -263,6 +271,7 @@ AttributionStorageSql::ReadSourceFromStatement(sql::Statement& statement) { StoredSource::Id source_id(statement.ColumnInt64(col++)); uint64_t source_event_id = DeserializeUint64(statement.ColumnInt64(col++)); + uint64_t source_epoch = DeserializeUint64(statement.ColumnInt64(col++)); std::optional source_origin = SuitableOrigin::Deserialize(statement.ColumnString(col++)); std::optional reporting_origin = @@ -420,7 +429,7 @@ AttributionStorageSql::ReadSourceFromStatement(sql::Statement& statement) { std::optional stored_source = StoredSource::Create( CommonSourceInfo(std::move(*source_origin), std::move(*reporting_origin), *source_type), - source_event_id, std::move(*destination_set), source_time, expiry_time, + source_event_id, source_epoch, std::move(*destination_set), source_time, expiry_time, std::move(trigger_specs), aggregatable_report_window_time, max_event_level_reports, priority, std::move(*filter_data), debug_key, std::move(*aggregation_keys), *attribution_logic, *active_state, @@ -510,7 +519,8 @@ StoreSourceResult AttributionStorageSql::StoreSource( DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); CHECK(!source.registration().debug_key.has_value() || debug_cookie_set); - + LOG(INFO) << "STORING SOURCE" ; + LOG(INFO) << source.registration().ToJson() ; // Force the creation of the database if it doesn't exist, as we need to // persist the source. if (!LazyInit(DbCreationPolicy::kCreateIfAbsent)) { @@ -519,76 +529,78 @@ StoreSourceResult AttributionStorageSql::StoreSource( const base::Time source_time = base::Time::Now(); - if (StoreSourceResult result = CheckDestinationRateLimit(source, source_time); - !absl::holds_alternative(result.result())) { - return result; - } + // TODO(kelly): re-enable deleting sources (when is it OK to delete a source?) + + // if (StoreSourceResult result = CheckDestinationRateLimit(source, source_time); + // !absl::holds_alternative(result.result())) { + // return result; + // } // Only delete expired impressions periodically to avoid excessive DB // operations. - const base::TimeDelta delete_frequency = - delegate_->GetDeleteExpiredSourcesFrequency(); - DCHECK_GE(delete_frequency, base::TimeDelta()); - if (source_time - last_deleted_expired_sources_ >= delete_frequency) { - if (!DeleteExpiredSources()) { - return StoreSourceResult::InternalError(); - } - last_deleted_expired_sources_ = source_time; - } + // const base::TimeDelta delete_frequency = + // delegate_->GetDeleteExpiredSourcesFrequency(); + // DCHECK_GE(delete_frequency, base::TimeDelta()); + // if (source_time - last_deleted_expired_sources_ >= delete_frequency) { + // if (!DeleteExpiredSources()) { + // return StoreSourceResult::InternalError(); + // } + // last_deleted_expired_sources_ = source_time; + // } const CommonSourceInfo& common_info = source.common_info(); const std::string serialized_source_origin = common_info.source_origin().Serialize(); - if (!HasCapacityForStoringSource(serialized_source_origin, source_time)) { - if (int64_t file_size = StorageFileSizeKB(path_to_database_); - file_size > -1) { - base::UmaHistogramCounts10M( - "Conversions.Storage.Sql.FileSizeSourcesPerOriginLimitReached2", - file_size); - std::optional number_of_sources = NumberOfSources(); - if (number_of_sources.has_value()) { - CHECK_GT(*number_of_sources, 0); - base::UmaHistogramCounts1M( - "Conversions.Storage.Sql.FileSizeSourcesPerOriginLimitReached2." - "PerSource", - file_size * 1024 / *number_of_sources); - } - } - return StoreSourceResult::InsufficientSourceCapacity( - delegate_->GetMaxSourcesPerOrigin()); - } - - switch (rate_limit_table_.SourceAllowedForDestinationLimit(&db_, source, - source_time)) { - case RateLimitResult::kAllowed: - break; - case RateLimitResult::kNotAllowed: - return StoreSourceResult::InsufficientUniqueDestinationCapacity( - delegate_->GetMaxDestinationsPerSourceSiteReportingSite()); - case RateLimitResult::kError: - return StoreSourceResult::InternalError(); - } - - switch (rate_limit_table_.SourceAllowedForReportingOriginLimit(&db_, source, - source_time)) { - case RateLimitResult::kAllowed: - break; - case RateLimitResult::kNotAllowed: - return StoreSourceResult::ExcessiveReportingOrigins(); - case RateLimitResult::kError: - return StoreSourceResult::InternalError(); - } - - switch (rate_limit_table_.SourceAllowedForReportingOriginPerSiteLimit( - &db_, source, source_time)) { - case RateLimitResult::kAllowed: - break; - case RateLimitResult::kNotAllowed: - return StoreSourceResult::ReportingOriginsPerSiteLimitReached(); - case RateLimitResult::kError: - return StoreSourceResult::InternalError(); - } + // if (!HasCapacityForStoringSource(serialized_source_origin, source_time)) { + // if (int64_t file_size = StorageFileSizeKB(path_to_database_); + // file_size > -1) { + // base::UmaHistogramCounts10M( + // "Conversions.Storage.Sql.FileSizeSourcesPerOriginLimitReached2", + // file_size); + // std::optional number_of_sources = NumberOfSources(); + // if (number_of_sources.has_value()) { + // CHECK_GT(*number_of_sources, 0); + // base::UmaHistogramCounts1M( + // "Conversions.Storage.Sql.FileSizeSourcesPerOriginLimitReached2." + // "PerSource", + // file_size * 1024 / *number_of_sources); + // } + // } + // return StoreSourceResult::InsufficientSourceCapacity( + // delegate_->GetMaxSourcesPerOrigin()); + // } + + // switch (rate_limit_table_.SourceAllowedForDestinationLimit(&db_, source, + // source_time)) { + // case RateLimitResult::kAllowed: + // break; + // case RateLimitResult::kNotAllowed: + // return StoreSourceResult::InsufficientUniqueDestinationCapacity( + // delegate_->GetMaxDestinationsPerSourceSiteReportingSite()); + // case RateLimitResult::kError: + // return StoreSourceResult::InternalError(); + // } + + // switch (rate_limit_table_.SourceAllowedForReportingOriginLimit(&db_, source, + // source_time)) { + // case RateLimitResult::kAllowed: + // break; + // case RateLimitResult::kNotAllowed: + // return StoreSourceResult::ExcessiveReportingOrigins(); + // case RateLimitResult::kError: + // return StoreSourceResult::InternalError(); + // } + + // switch (rate_limit_table_.SourceAllowedForReportingOriginPerSiteLimit( + // &db_, source, source_time)) { + // case RateLimitResult::kAllowed: + // break; + // case RateLimitResult::kNotAllowed: + // return StoreSourceResult::ReportingOriginsPerSiteLimitReached(); + // case RateLimitResult::kError: + // return StoreSourceResult::InternalError(); + // } sql::Transaction transaction(&db_); if (!transaction.Begin()) { @@ -629,40 +641,40 @@ StoreSourceResult AttributionStorageSql::StoreSource( static constexpr char kInsertImpressionSql[] = "INSERT INTO sources" - "(source_event_id,source_origin," + "(source_event_id,source_epoch,source_origin," "reporting_origin,source_time," "expiry_time,aggregatable_report_window_time," "source_type,attribution_logic,priority,source_site," "num_attributions,event_level_active,aggregatable_active,debug_key," "aggregatable_budget_consumed,num_aggregatable_reports," "aggregatable_source,filter_data,read_only_source_data)" - "VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,0,0,?,?,?)"; + "VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,0,0,?,?,?)"; sql::Statement statement( db_.GetCachedStatement(SQL_FROM_HERE, kInsertImpressionSql)); statement.BindInt64(0, SerializeUint64(reg.source_event_id)); - statement.BindString(1, serialized_source_origin); - statement.BindString(2, common_info.reporting_origin().Serialize()); - statement.BindTime(3, source_time); - statement.BindTime(4, expiry_time); - statement.BindTime(5, aggregatable_report_window_time); - statement.BindInt(6, SerializeSourceType(common_info.source_type())); - statement.BindInt(7, SerializeAttributionLogic(attribution_logic)); - statement.BindInt64(8, reg.priority); - statement.BindString(9, common_info.source_site().Serialize()); - statement.BindInt(10, num_conversions); - statement.BindBool(11, event_level_active); - statement.BindBool(12, aggregatable_active); - - BindUint64OrNull(statement, 13, reg.debug_key); + statement.BindInt64(1, SerializeUint64(reg.source_epoch)); + statement.BindString(2, serialized_source_origin); + statement.BindString(3, common_info.reporting_origin().Serialize()); + statement.BindTime(4, source_time); + statement.BindTime(5, expiry_time); + statement.BindTime(6, aggregatable_report_window_time); + statement.BindInt(7, SerializeSourceType(common_info.source_type())); + statement.BindInt(8, SerializeAttributionLogic(attribution_logic)); + statement.BindInt64(9, reg.priority); + statement.BindString(10, common_info.source_site().Serialize()); + statement.BindInt(11, num_conversions); + statement.BindBool(12, event_level_active); + statement.BindBool(13, aggregatable_active); + + BindUint64OrNull(statement, 14, reg.debug_key); std::optional active_state = GetSourceActiveState(event_level_active, aggregatable_active); DCHECK(active_state.has_value()); - statement.BindBlob(14, SerializeAggregationKeys(reg.aggregation_keys)); - statement.BindBlob(15, SerializeFilterData(reg.filter_data)); - statement.BindBlob( - 16, SerializeReadOnlySourceData( + statement.BindBlob(15, SerializeAggregationKeys(reg.aggregation_keys)); + statement.BindBlob(16, SerializeFilterData(reg.filter_data)); + statement.BindBlob(17, SerializeReadOnlySourceData( reg.event_report_windows, reg.max_event_level_reports, randomized_response_data.rate(), reg.trigger_data_matching, debug_cookie_set)); @@ -688,7 +700,7 @@ StoreSourceResult AttributionStorageSql::StoreSource( } std::optional stored_source = StoredSource::Create( - source.common_info(), reg.source_event_id, reg.destination_set, + source.common_info(), reg.source_event_id, reg.source_epoch, reg.destination_set, source_time, expiry_time, std::move(trigger_specs), aggregatable_report_window_time, reg.max_event_level_reports, reg.priority, reg.filter_data, reg.debug_key, reg.aggregation_keys, @@ -929,13 +941,365 @@ bool HasAggregatableData( } // namespace -CreateReportResult AttributionStorageSql::MaybeCreateAndStoreReport( +CreateReportResult AttributionStorageSql::MaybeCreateAndStoreReportM2M( const AttributionTrigger& trigger) { DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); const attribution_reporting::TriggerRegistration& trigger_registration = trigger.registration(); + + const base::Time trigger_time = base::Time::Now(); + + AttributionInfo attribution_info( + trigger_time, trigger_registration.debug_key, + /*context_origin=*/trigger.destination_origin()); + + // Declarations for all of the various pieces of information which may be + // collected and/or returned as a result of computing new reports in order to + // produce a `CreateReportResult`. + + std::optional aggregatable_status; + std::optional new_aggregatable_report; + std::vector sources_to_attribute; + std::optional min_null_aggregatable_report_time; + CreateReportResult::Limits limits; + + auto assemble_report_result = + [&](std::optional new_aggregatable_status) { + aggregatable_status = aggregatable_status.has_value() + ? aggregatable_status + : new_aggregatable_status; + DCHECK(aggregatable_status.has_value()); + + if (!IsSuccessResult(*aggregatable_status)) { + new_aggregatable_report = std::nullopt; + } + + if (aggregatable_status == AggregatableResult::kInternalError) { + min_null_aggregatable_report_time.reset(); + } + // Not changing CreateReportResult to minimize changes + return CreateReportResult( + trigger_time, EventLevelResult::kNotRegistered, *aggregatable_status, + std::move(std::nullopt), + std::move(std::nullopt), + std::move(new_aggregatable_report), + !sources_to_attribute.empty() + ? std::make_optional(std::move(sources_to_attribute[0])) + : std::nullopt, + limits, std::move(std::nullopt), + min_null_aggregatable_report_time); + }; + + auto generate_null_reports_and_assemble_report_result = + [&](std::optional new_aggregatable_status) + VALID_CONTEXT_REQUIRED(sequence_checker_) { + DCHECK(!new_aggregatable_report.has_value()); + + if (!GenerateNullAggregatableReportsAndStoreReports( + trigger, attribution_info, new_aggregatable_report, + min_null_aggregatable_report_time)) { + min_null_aggregatable_report_time.reset(); + } + + return assemble_report_result(new_aggregatable_status); + }; + + + if (!HasAggregatableData(trigger_registration)) { + aggregatable_status = AggregatableResult::kNotRegistered; + } + + if (aggregatable_status.has_value()) { + return assemble_report_result(std::nullopt); + } + + if (!LazyInit(DbCreationPolicy::kCreateIfAbsent)) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + std::vector source_ids_to_attribute; + if (!FindMatchingSourceForTriggerM2M(trigger, source_ids_to_attribute)) { + return assemble_report_result(AggregatableResult::kInternalError); + } + if (source_ids_to_attribute.empty()) { + return generate_null_reports_and_assemble_report_result( + AggregatableResult::kNoMatchingImpressions); + } + + for (auto source_id_to_attribute : source_ids_to_attribute) { + StoredSource source_to_attribute = ReadSourceToAttribute(source_id_to_attribute)->source; + if (source_to_attribute.filter_data().MatchesM2M(trigger_registration.filters)) { + sources_to_attribute.push_back(source_to_attribute); + } + } + + if (sources_to_attribute.empty()) { + return generate_null_reports_and_assemble_report_result( + AggregatableResult::kNoMatchingSourceFilterData); + } + + std::vector partitions; + if (!aggregatable_status.has_value()) { + if (AggregatableResult create_aggregatable_status = + MaybeCreateAggregatableAttributionReportM2M( + sources_to_attribute, trigger, partitions); + create_aggregatable_status != AggregatableResult::kSuccess) { + aggregatable_status = create_aggregatable_status; + } + } + + if (aggregatable_status == AggregatableResult::kInternalError) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + if (aggregatable_status.has_value()) { + return generate_null_reports_and_assemble_report_result(std::nullopt); + } + + sql::Transaction transaction(&db_); + if (!transaction.Begin()) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + std::optional store_aggregatable_status; + if (!aggregatable_status.has_value()) { + // DCHECK(new_aggregatable_report.has_value()); + // make sure function populates new_aggregatable report with final report + store_aggregatable_status = MaybeStoreAggregatableAttributionReportDataM2M( + attribution_info, + partitions, + new_aggregatable_report, + trigger, + sources_to_attribute[0] + ); + } + + if (store_aggregatable_status == AggregatableResult::kInternalError) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + if (!IsSuccessResult(store_aggregatable_status)) { + new_aggregatable_report.reset(); + } + + // Stores null reports and the aggregatable report here to be in the same + // transaction. + if (!GenerateNullAggregatableReportsAndStoreReports( + trigger, attribution_info, new_aggregatable_report, + min_null_aggregatable_report_time)) { + min_null_aggregatable_report_time.reset(); + return assemble_report_result(AggregatableResult::kInternalError); + } + + // Early exit if done modifying the storage. Noised reports still need to + // clean sources. + if (!IsSuccessResult(store_aggregatable_status)) { + if (!transaction.Commit()) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + return assemble_report_result(store_aggregatable_status); + } + + // Reports which are dropped do not need to make any further changes. + if (!IsSuccessResult(store_aggregatable_status)) { + if (!transaction.Commit()) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + return assemble_report_result(store_aggregatable_status); + } + + if (!transaction.Commit()) { + return assemble_report_result(AggregatableResult::kInternalError); + } + + return assemble_report_result(store_aggregatable_status); +} + +bool AttributionStorageSql::FindMatchingSourceForTriggerM2M( + const AttributionTrigger& trigger, + std::vector& source_ids_to_attribute) { + auto start = std::chrono::high_resolution_clock::now(); + // TODO(kelly): sometimes the querying origin might be the publisher/source - extend in the future + const SuitableOrigin& querying_origin = trigger.destination_origin(); + + const attribution_reporting::TriggerRegistration& trigger_registration = + trigger.registration(); + + const attribution_reporting::AttributionWindow attribution_window = + trigger_registration.attribution_window; + // Get all sources from this attribution window - will filter them later + sql::Statement statement(db_.GetCachedStatement( + SQL_FROM_HERE, attribution_queries::kGetMatchingSourcesSqlM2M)); + + statement.BindString(0, net::SchemefulSite(querying_origin).Serialize()); + statement.BindInt64(1, SerializeUint64(attribution_window.epoch_start())); + statement.BindInt64(2, SerializeUint64(attribution_window.epoch_end())); + + while (statement.Step()) { + source_ids_to_attribute.push_back(StoredSource::Id(statement.ColumnInt64(0))); + } + auto end = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end - start); + LOG(INFO) << "Source Time:"<& partitions, + const attribution_reporting::TriggerRegistration& trigger_registration) { + + auto& attribution_window = trigger_registration.attribution_window; + + // options = {"", "uniform", "weighted"} + if (trigger_registration.partitioning_logic == "") { + // No partitioning - take union of all epochs + Partition partition(attribution_window, + trigger_registration.attribution_logic); + + for (auto& pair : trigger_registration.aggregatable_values.values()) { + Partition::ReportValuePair report_value_pair; + report_value_pair.value = (double) pair.second; + partition.report_value_pairs[pair.first] = report_value_pair; + } + partitions.push_back(partition); + return true; + } + + // One epoch per partition + for (uint64_t i=attribution_window.epoch_start(); i <= attribution_window.epoch_end(); i++) { + Partition partition(*attribution_reporting::AttributionWindow().Create(i, i), + trigger_registration.attribution_logic); + partitions.push_back(partition); + } + + if (trigger_registration.partitioning_logic == "uniform") { + // Split Value uniformly across partitions + for (auto& pair : trigger_registration.aggregatable_values.values()) { + double per_partition_value = (double) pair.second / partitions.size(); + + for (auto& partition : partitions) { + Partition::ReportValuePair report_value_pair; + report_value_pair.value = per_partition_value; + partition.report_value_pairs[pair.first] = report_value_pair; + } + } + } else { + return false; + } + + return true; +} + +AggregatableResult +AttributionStorageSql::MaybeCreateAggregatableAttributionReportM2M( + std::vector& sources_to_attribute, + const AttributionTrigger& trigger, + std::vector& partitions) { + const attribution_reporting::TriggerRegistration& trigger_registration = + trigger.registration(); + + const attribution_reporting::AttributionWindow attribution_window = + trigger_registration.attribution_window; + + //-------------------------------------------------------------------------------- + // Sanity check: every source_key from trigger_data must exist in source aggregation_keys too + for (auto& pair : trigger_registration.aggregatable_values.values()) { + for (StoredSource& source_to_attribute : sources_to_attribute) { + auto aggregation_keys = source_to_attribute.aggregation_keys().keys(); + auto it = aggregation_keys.find(pair.first); + if (it == aggregation_keys.end()) { + return AggregatableResult::kInternalError; + } + } + } + //-------------------------------------------------------------------------------- + + //-------------------------------------------------------------------------------- + // Partitions epochs and splits Value among the partitions + if (!GetPartitions(partitions, trigger_registration)) { + return AggregatableResult::kInternalError; + } + + LOG(INFO) << "Partitions:"; + for (const auto& partition : partitions) { + LOG(INFO) << "Partition: " << partition.attribution_window.epoch_start() << "-" << partition.attribution_window.epoch_end() << std::endl; + for (const auto& pair : partition.report_value_pairs) { + LOG(INFO) << " Source Key: " << pair.first << ", value: " << pair.second.value << std::endl; + } + } + //-------------------------------------------------------------------------------- + + //-------------------------------------------------------------------------------- + // Groups sources_to_attribute by epoch + base::flat_map> sources_per_epoch; + for (StoredSource& source_to_attribute : sources_to_attribute) { + uint64_t epoch = source_to_attribute.source_epoch(); + + auto it = sources_per_epoch.find(epoch); + if (it == sources_per_epoch.end()) { + sources_per_epoch[epoch] = {}; + } + // Push source_to_attribute to epoch's sources + sources_per_epoch[epoch].push_back(&source_to_attribute); + } + + LOG(INFO) << "Sources per epoch:"; + for (const auto& pair : sources_per_epoch) { + LOG(INFO) << " Epoch: " << pair.first << std::endl; + for (const StoredSource* source : pair.second) { + LOG(INFO) << " Source keypiece: " << source->source_event_id() << std::endl; + } + } + + // Move each epoch-group to the relevant partition + for (auto& partition : partitions) { + for (uint64_t i=partition.attribution_window.epoch_start(); + i <= partition.attribution_window.epoch_end(); i++) { + auto it = sources_per_epoch.find(i); + if (it != sources_per_epoch.end()) { + partition.sources_per_epoch[i] = std::move(sources_per_epoch[i]); + } + } + } + //-------------------------------------------------------------------------------- + + //-------------------------------------------------------------------------------- + // Create a report per partition + for (auto& partition : partitions) { + CreateAggregatableHistogramM2M( + partition, + trigger_registration.aggregatable_trigger_data); + } + + for (auto& partition : partitions) { + LOG(INFO) << "Partition...:"; + for (auto& pair : partition.report_value_pairs) { + auto& source_key = pair.first; + auto& report_value_pair = pair.second; + LOG(INFO) << "Source_key " << source_key << " value " << report_value_pair.value; + for (auto& keyvalue : report_value_pair.report) { + LOG(INFO) << " key " << keyvalue.key() << " value " << keyvalue.value(); + } + } + } + //-------------------------------------------------------------------------------- + + return AggregatableResult::kSuccess; +} + +CreateReportResult AttributionStorageSql::MaybeCreateAndStoreReport( + const AttributionTrigger& trigger) { + auto start = std::chrono::high_resolution_clock::now(); + DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_); + + const attribution_reporting::TriggerRegistration& trigger_registration = + trigger.registration(); + const base::Time trigger_time = base::Time::Now(); AttributionInfo attribution_info( @@ -1024,6 +1388,9 @@ CreateReportResult AttributionStorageSql::MaybeCreateAndStoreReport( } if (event_level_status.has_value() && aggregatable_status.has_value()) { + auto end = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end - start); + LOG(INFO) << "Budget Time:"<(end - start); + LOG(INFO) << "Budget Time:"<(end - start); + LOG(INFO) << "Budget Time:"<(end - start); + LOG(INFO) << "Budget Time:"<(end - start); + LOG(INFO) << "Budget Time:"<(end - start); + LOG(INFO) << "Budget Time:"< + // |consumed_budget| is the budget that has been consumed so far from the filter . + static constexpr char kPerOriginFiltersTableSql[] = + "CREATE TABLE per_origin_filters(" + "epoch INTEGER NOT NULL," + "origin TEXT NOT NULL," + "initial_budget FLOAT NOT NULL," + "consumed_budget FLOAT NOT NULL," + "PRIMARY KEY (epoch, origin))"; + if (!db_.Execute(kPerOriginFiltersTableSql)) { + return false; + } + + // // All columns in this table are const except |budget_consumed| and + // // which are updated when a non null report is about to be send. + // // |epoch| is a primary key and represents the period of time covered by the filter + // // |initial_budget| is the initial budget capacity of the filter + // // |consumed_budget| is the budget that has been consumed so far from the filter . + // static constexpr char kAllOriginsFiltersTableSql[] = + // "CREATE TABLE all_origins_filters(" + // "epoch INTEGER PRIMARY KEY NOT NULL," + // // "querying_origin_type TEXT NOT NULL," + // "initial_budget FLOAT NOT NULL," + // "consumed_budget FLOAT NOT NULL)"; + // if (!db_.Execute(kAllOriginsFiltersTableSql)) { + // return false; + // } + if (!rate_limit_table_.CreateTable(&db_)) { return false; } @@ -2999,6 +3415,272 @@ bool AttributionStorageSql::StoreAttributionReport(AttributionReport& report) { return true; } +AggregatableResult AttributionStorageSql::PayAllOrNothing( + attribution_reporting::AttributionWindow attribution_window, + const attribution_reporting::SuitableOrigin& querying_origin, + double required_budget) { + + std::vector attribution_epochs; + for (uint64_t i=attribution_window.epoch_start(); + i<=attribution_window.epoch_end(); ++i) { + attribution_epochs.push_back(i); + } + return PayAllOrNothing(attribution_epochs, querying_origin, required_budget); +} + +AggregatableResult AttributionStorageSql::PayAllOrNothing( + std::vector attribution_epochs, + const attribution_reporting::SuitableOrigin& querying_origin, + double required_budget) { + + // Starting atomic operation + sql::Transaction transaction(&db_); + if (!transaction.Begin()) { + return AggregatableResult::kInternalError; + } + + // Obtain remaining budget per epoch for all attribution epochs + std::ostringstream oss; + oss << \ + "SELECT F.epoch, (F.initial_budget - F.consumed_budget) AS remaining_budgets " + "FROM per_origin_filters F " + "WHERE F.origin=? " + "AND F.epoch IN ("; + + auto& items = attribution_epochs; + if (!items.empty()) { + std::copy(items.begin(), items.end() - 1, std::ostream_iterator(oss, ", ")); + oss << items.back(); + } + oss << ")"; + + sql::Statement statement(db_.GetUniqueStatement(oss.str().c_str())); + statement.BindString(0, net::SchemefulSite(querying_origin).Serialize()); + + base::flat_map remaining_budgets; + while (statement.Step()) { + remaining_budgets[statement.ColumnInt64(0)] = statement.ColumnDouble(1); + } + + // For epochs not already stored in the DB initialize their + // remaining budget and mark them to be inserted now + std::vector epochs_to_insert; + std::vector epochs_to_update; + for (auto& epoch : attribution_epochs) { + auto it = remaining_budgets.find(epoch); + if (it == remaining_budgets.end()) { + epochs_to_insert.push_back(epoch); + remaining_budgets[epoch] = kInitialBudget; + } else { + epochs_to_update.push_back(epoch); + } + } + + // ------------------------------------------------------- + LOG(INFO) << "Epochs to insert:"; + for (auto epoch : epochs_to_insert) { + LOG(INFO) << epoch; + } + LOG(INFO) << "Epochs to update:"; + for (auto epoch : epochs_to_update) { + LOG(INFO) << epoch; + } + + LOG(INFO) << "Remaining Budgets:"; + for (const auto& pair : remaining_budgets) { + LOG(INFO) << "- Epoch " << pair.first << " Budget " << pair.second; + } +// ------------------------------------------------------- + + // Check if all remaining budgets are enough for required_budget + for (const auto& pair : remaining_budgets) { + if (pair.second < required_budget) { + return AggregatableResult::kInsufficientBudget; + } + } + + // Insert epochs not already stored + static constexpr char kInsertEpochOrigin[] = + "INSERT INTO per_origin_filters" + "(epoch,origin,initial_budget,consumed_budget)" + "VALUES(?,?,?,?)"; + + for (auto& epoch : epochs_to_insert) { + sql::Statement stmt(db_.GetCachedStatement(SQL_FROM_HERE, kInsertEpochOrigin)); + stmt.BindInt64(0, epoch); + stmt.BindString(1, net::SchemefulSite(querying_origin).Serialize()); + stmt.BindDouble(2, kInitialBudget); + stmt.BindDouble(3, required_budget); + if (!stmt.Run()) { + return AggregatableResult::kInternalError; + } + } + + // Update epochs already stored + static constexpr char kAdjustBudgetConsumed[] = + "UPDATE per_origin_filters " + "SET consumed_budget=consumed_budget+?" + "WHERE epoch=? and origin=?"; + + for (auto& epoch : epochs_to_update) { + sql::Statement stmt(db_.GetCachedStatement(SQL_FROM_HERE, kAdjustBudgetConsumed)); + stmt.BindDouble(0, required_budget); + stmt.BindInt64(1, epoch); + stmt.BindString(2, net::SchemefulSite(querying_origin).Serialize()); + if (!stmt.Run()) { + return AggregatableResult::kInternalError; + } + } + + // TODO(kelly): optimize, don't do insertions/updates in a loop? + + // Finishing atomic operation + if (!transaction.Commit()) { + return AggregatableResult::kInternalError; + } + + return AggregatableResult::kSuccess; +} + + +AggregatableResult +AttributionStorageSql::MaybeStoreAggregatableAttributionReportDataM2M( + const AttributionInfo& attribution_info, + std::vector& partitions, + std::optional& report, + const AttributionTrigger& trigger, + StoredSource& source /*for retrocompatibility*/) { + + // Computes the budget-required per requested epoch for the querying origin using + // different optimization methods. Tries to consume budget from every requested epoch from the + // querying origin. Partitions whose epochs don't have remaining budget will have their reports NULLed. + // Sums the reports across partitions to create one attribution report. + + const SuitableOrigin& querying_origin = trigger.destination_origin(); + + const attribution_reporting::TriggerRegistration& trigger_registration = + trigger.registration(); + + const attribution_reporting::AggregatableValues& aggregatable_cap_values = + trigger_registration.aggregatable_cap_values; + + const double global_epsilon = trigger_registration.global_epsilon; + + // Compute global sensitivity + double global_sensitivity = 0; + if (std::strcmp(kSensitivityMetric, "L1") == 0) { + for (auto& pair : aggregatable_cap_values.values()) { + global_sensitivity += pair.second; + } + } else if (std::strcmp(kSensitivityMetric, "L2") == 0) { + // TODO(kelly) + // sum up histograms, raise each bucket value to power of 2 sum them up and sqrt + return AggregatableResult::kInternalError; + } else { + return AggregatableResult::kInternalError; + } + LOG(INFO) << "Global Sensitivity " << kSensitivityMetric << " " << global_sensitivity; + + // Budget accounting + for (auto& partition : partitions) { + if (kOptimization == 0) { + // No optimizations. Epochs in this partition pay worst case budget + if (PayAllOrNothing(partition.attribution_window, querying_origin, + global_epsilon) != AggregatableResult::kSuccess) { + partition.null_report(); + } + continue; + } + + if (partition.attribution_window.size() == 1) { + // Partition covers only one epoch. + // The epoch in this partition pays budget based on its individual sensitivity + // Assuming Laplace + double noise_scale = global_sensitivity / global_epsilon; + double p_individual_epsilon = \ + partition.compute_sensitivity(kSensitivityMetric) / noise_scale; + + LOG(INFO) << "One epoch case: noise scale" << noise_scale \ + << "individual epsilon" << p_individual_epsilon; + + if (PayAllOrNothing(partition.attribution_window, querying_origin, + p_individual_epsilon) != AggregatableResult::kSuccess) { + partition.null_report(); + } + } else { + // Partition is union of at least two epochs. + if (kOptimization == 1) { + // Optimization 1 is for partitions that cover one epoch only + if (PayAllOrNothing(partition.attribution_window, querying_origin, + global_epsilon) != AggregatableResult::kSuccess) { + partition.null_report(); + } + } else if (kOptimization == 2) { + std::vector active_epochs; + for (uint64_t i=partition.attribution_window.epoch_start(); + i<=partition.attribution_window.epoch_end(); ++i) { + // Epochs empty of sosurces are not paying any budget + auto it = partition.sources_per_epoch.find(i); + if (it != partition.sources_per_epoch.end()) { + active_epochs.push_back(i); + } + } + if (PayAllOrNothing(active_epochs, querying_origin, + global_epsilon) != AggregatableResult::kSuccess) { + partition.null_report(); + } + } else { + return AggregatableResult::kInternalError; + } + } + } + + // Aggregating per source_key reports across all partitions + base::flat_map value_per_bucket; + for (auto& partition : partitions) { + for (auto& pair : partition.report_value_pairs) { + for (auto& histogram_contribution : pair.second.report) { + auto key = histogram_contribution.key(); + auto it = value_per_bucket.find(key); + if (it == value_per_bucket.end()) { + value_per_bucket[key] = 0; + } + value_per_bucket[key] += histogram_contribution.value(); + } + } + } + + // Collecting histogram contributions in expected format + std::vector contributions; + for (auto& pair : value_per_bucket) { + contributions.emplace_back(pair.first, pair.second); + } + + LOG(INFO) << "Final Report"; + for (auto& contribution : contributions) { + LOG(INFO) << contribution.key() << " " << contribution.value(); + + } + + // Not modifying AttributionReport to limit changes + // so adding "source" argument for retrocompatibility + base::Time report_time = + GetAggregatableReportTime(trigger, attribution_info.time); + + report = AttributionReport( + attribution_info, AttributionReport::Id(kUnsetRecordId), report_time, + /*initial_report_time=*/report_time, delegate_->NewReportID(), + /*failed_send_attempts=*/0, + AttributionReport::AggregatableAttributionData( + AttributionReport::CommonAggregatableData( + trigger_registration.aggregation_coordinator_origin, + /*verification_token=*/std::nullopt, + trigger_registration.aggregatable_trigger_config), + std::move(contributions), source)); + + return AggregatableResult::kSuccess; +} + AggregatableResult AttributionStorageSql::MaybeStoreAggregatableAttributionReportData( AttributionReport& report, diff --git a/content/browser/attribution_reporting/attribution_storage_sql.h b/content/browser/attribution_reporting/attribution_storage_sql.h index a50b9bbeb02cf3..299738c4bc1d6a 100644 --- a/content/browser/attribution_reporting/attribution_storage_sql.h +++ b/content/browser/attribution_reporting/attribution_storage_sql.h @@ -18,6 +18,7 @@ #include "base/thread_annotations.h" #include "base/time/time.h" #include "base/types/expected.h" +#include "content/browser/attribution_reporting/partition.h" #include "content/browser/attribution_reporting/attribution_report.h" #include "content/browser/attribution_reporting/attribution_storage.h" #include "content/browser/attribution_reporting/attribution_trigger.h" @@ -178,6 +179,9 @@ class CONTENT_EXPORT AttributionStorageSql : public AttributionStorage { bool delete_rate_limit_data) override; void SetDelegate(std::unique_ptr) override; + CreateReportResult MaybeCreateAndStoreReportM2M( + const AttributionTrigger& trigger) override; + [[nodiscard]] StoreSourceResult CheckDestinationRateLimit( const StorableSource& source, base::Time source_time); @@ -299,6 +303,15 @@ class CONTENT_EXPORT AttributionStorageSql : public AttributionStorage { std::vector& source_ids_to_deactivate) VALID_CONTEXT_REQUIRED(sequence_checker_); + bool FindMatchingSourceForTriggerM2M( + const AttributionTrigger& trigger, + std::vector& source_ids_to_attribute) + VALID_CONTEXT_REQUIRED(sequence_checker_); + +bool GetPartitions( + std::vector& partitions, + const attribution_reporting::TriggerRegistration& trigger_registration); + AttributionTrigger::EventLevelResult MaybeCreateEventLevelReport( const AttributionInfo& attribution_info, const StoredSource&, @@ -383,6 +396,23 @@ class CONTENT_EXPORT AttributionStorageSql : public AttributionStorage { std::optional& max_aggregatable_reports_per_destination) VALID_CONTEXT_REQUIRED(sequence_checker_); + AttributionTrigger::AggregatableResult + MaybeCreateAggregatableAttributionReportM2M( + std::vector& sources_to_attribute, + const AttributionTrigger& trigger, + std::vector& partitions) + VALID_CONTEXT_REQUIRED(sequence_checker_); + + AttributionTrigger::AggregatableResult PayAllOrNothing( + std::vector attribution_epochs, + const attribution_reporting::SuitableOrigin& querying_origin, + double required_budget) VALID_CONTEXT_REQUIRED(sequence_checker_); + + AttributionTrigger::AggregatableResult PayAllOrNothing( + attribution_reporting::AttributionWindow attribution_window, + const attribution_reporting::SuitableOrigin& querying_origin, + double required_budget) VALID_CONTEXT_REQUIRED(sequence_checker_); + // Stores the data associated with the aggregatable report, e.g. budget // consumed and dedup keys. The report itself will be stored in // `GenerateNullAggregatableReportsAndStoreReports()`. @@ -395,6 +425,15 @@ class CONTENT_EXPORT AttributionStorageSql : public AttributionStorage { std::optional& max_aggregatable_reports_per_source) VALID_CONTEXT_REQUIRED(sequence_checker_); + AttributionTrigger::AggregatableResult + MaybeStoreAggregatableAttributionReportDataM2M( + const AttributionInfo& attribution_info, + std::vector& partitions, + std::optional& report, + const AttributionTrigger& trigger, + StoredSource& source) + VALID_CONTEXT_REQUIRED(sequence_checker_); + [[nodiscard]] bool StoreAttributionReport(AttributionReport& report) VALID_CONTEXT_REQUIRED(sequence_checker_); diff --git a/content/browser/attribution_reporting/partition.cc b/content/browser/attribution_reporting/partition.cc new file mode 100644 index 00000000000000..d7b58fd3a708c4 --- /dev/null +++ b/content/browser/attribution_reporting/partition.cc @@ -0,0 +1,63 @@ + +#include "content/browser/attribution_reporting/partition.h" + +#include "components/attribution_reporting/attribution_window.h" +#include "content/browser/attribution_reporting/stored_source.h" +#include "content/browser/attribution_reporting/attribution_trigger.h" + + +namespace content { + +Partition::ReportValuePair::ReportValuePair() {} + +Partition::ReportValuePair::ReportValuePair(const ReportValuePair&) = + default; + +Partition::ReportValuePair& Partition::ReportValuePair::operator=( + const ReportValuePair&) = default; + +Partition::ReportValuePair::ReportValuePair(ReportValuePair&&) = default; + +Partition::ReportValuePair& Partition::ReportValuePair::operator=( + ReportValuePair&&) = default; + +Partition::ReportValuePair::~ReportValuePair() = default; + +Partition::Partition( + attribution_reporting::AttributionWindow attribution_window, + std::string attribution_logic) + : attribution_window(attribution_window), + attribution_logic(attribution_logic) {} + +Partition::~Partition() = default; + +Partition::Partition(const Partition&) = default; + +Partition::Partition(Partition&&) = default; + +double Partition::compute_sensitivity(const char* sensitivity_metric) { + double individual_sensitivity = 0; + if (std::strcmp(sensitivity_metric, "L1") == 0) { + for (auto& pair : report_value_pairs) { + for (auto& bucket : pair.second.report) { + individual_sensitivity += bucket.value(); + } + } + return individual_sensitivity; + } else if (std::strcmp(sensitivity_metric, "L2") == 0) { + // TODO(kelly) + // return AggregatableResult::kInternalError; + } else { + // return AggregatableResult::kInternalError; + } + return -1; +} + +void Partition::null_report() { + for (auto& pair : report_value_pairs) { + pair.second.report = {}; + } +} + + +} // namespace content diff --git a/content/browser/attribution_reporting/partition.h b/content/browser/attribution_reporting/partition.h new file mode 100644 index 00000000000000..056da9cb077755 --- /dev/null +++ b/content/browser/attribution_reporting/partition.h @@ -0,0 +1,50 @@ +#ifndef CONTENT_BROWSER_ATTRIBUTION_REPORTING_PARTITION_H_ +#define CONTENT_BROWSER_ATTRIBUTION_REPORTING_PARTITION_H_ + +#include "components/attribution_reporting/attribution_window.h" +#include "content/browser/attribution_reporting/stored_source.h" +#include "content/browser/attribution_reporting/aggregatable_histogram_contribution.h" + +namespace content { + + +// Groups together possibly many epochs +class CONTENT_EXPORT Partition { + public: + + struct CONTENT_EXPORT ReportValuePair { + ReportValuePair(); + ReportValuePair(const ReportValuePair&); + ReportValuePair& operator=(const ReportValuePair&); + ReportValuePair(ReportValuePair&&); + ReportValuePair& operator=(ReportValuePair&&); + ~ReportValuePair(); + + std::vector report; + double value; + }; + + Partition(attribution_reporting::AttributionWindow attribution_window, + std::string attribution_logic); + + ~Partition(); + + Partition(const Partition&); + Partition(Partition&&); + + double compute_sensitivity(const char* sensitivity_metric); + + void null_report(); + + + const attribution_reporting::AttributionWindow attribution_window; + const std::string attribution_logic; + base::flat_map report_value_pairs; + base::flat_map> sources_per_epoch; + std::optional logging_source; + +}; + +} // namespace content + +#endif // CONTENT_BROWSER_ATTRIBUTION_REPORTING_PARTITION_H_ diff --git a/content/browser/attribution_reporting/sql_queries.h b/content/browser/attribution_reporting/sql_queries.h index 51fef6021e066e..29d03ef173ddbd 100644 --- a/content/browser/attribution_reporting/sql_queries.h +++ b/content/browser/attribution_reporting/sql_queries.h @@ -31,6 +31,14 @@ inline constexpr const char kGetMatchingSourcesSql[] = "AND I.expiry_time>? " "ORDER BY I.priority DESC,I.source_id DESC"; +inline constexpr const char kGetMatchingSourcesSqlM2M[] = + "SELECT I.source_id " + "FROM sources I " + "JOIN source_destinations D " + "ON D.source_id=I.source_id AND D.destination_site=? " + "WHERE I.source_epoch>=? " + "AND I.source_epoch<=? "; + inline constexpr const char kSelectExpiredSourcesSql[] = "SELECT source_id FROM sources " "WHERE expiry_time<=? AND " @@ -113,6 +121,7 @@ inline constexpr const char kSetReportTimeSql[] = #define ATTRIBUTION_SOURCE_COLUMNS_SQL(prefix) \ prefix "source_id," \ prefix "source_event_id," \ + prefix "source_epoch," \ prefix "source_origin," \ prefix "reporting_origin," \ prefix "source_time," \ diff --git a/content/browser/attribution_reporting/stored_source.cc b/content/browser/attribution_reporting/stored_source.cc index c223ba42e58355..06792f913c355c 100644 --- a/content/browser/attribution_reporting/stored_source.cc +++ b/content/browser/attribution_reporting/stored_source.cc @@ -35,13 +35,14 @@ bool IsExpiryOrReportWindowTimeValid(base::Time expiry_or_report_window_time, } bool AreFieldsValid(int64_t aggregatable_budget_consumed, + uint64_t source_epoch, double randomized_response_rate, base::Time source_time, base::Time expiry_time, base::Time aggregatable_report_window_time, std::optional debug_key, bool debug_cookie_set) { - return aggregatable_budget_consumed >= 0 && randomized_response_rate >= 0 && + return aggregatable_budget_consumed >= 0 && source_epoch >=0 && randomized_response_rate >= 0 && randomized_response_rate <= 1 && IsExpiryOrReportWindowTimeValid(expiry_time, source_time) && IsExpiryOrReportWindowTimeValid(aggregatable_report_window_time, @@ -55,6 +56,7 @@ bool AreFieldsValid(int64_t aggregatable_budget_consumed, std::optional StoredSource::Create( CommonSourceInfo common_info, uint64_t source_event_id, + uint64_t source_epoch, attribution_reporting::DestinationSet destination_sites, base::Time source_time, base::Time expiry_time, @@ -73,13 +75,13 @@ std::optional StoredSource::Create( attribution_reporting::mojom::TriggerDataMatching trigger_data_matching, attribution_reporting::EventLevelEpsilon event_level_epsilon, bool debug_cookie_set) { - if (!AreFieldsValid(aggregatable_budget_consumed, randomized_response_rate, + if (!AreFieldsValid(aggregatable_budget_consumed, source_epoch, randomized_response_rate, source_time, expiry_time, aggregatable_report_window_time, debug_key, debug_cookie_set)) { return std::nullopt; } - return StoredSource(std::move(common_info), source_event_id, + return StoredSource(std::move(common_info), source_event_id, source_epoch, std::move(destination_sites), source_time, expiry_time, std::move(trigger_specs), aggregatable_report_window_time, max_event_level_reports, priority, std::move(filter_data), @@ -92,6 +94,7 @@ std::optional StoredSource::Create( StoredSource::StoredSource( CommonSourceInfo common_info, uint64_t source_event_id, + uint64_t source_epoch, attribution_reporting::DestinationSet destination_sites, base::Time source_time, base::Time expiry_time, @@ -112,6 +115,7 @@ StoredSource::StoredSource( bool debug_cookie_set) : common_info_(std::move(common_info)), source_event_id_(source_event_id), + source_epoch_(source_epoch), destination_sites_(std::move(destination_sites)), source_time_(source_time), expiry_time_(expiry_time), @@ -130,7 +134,7 @@ StoredSource::StoredSource( trigger_data_matching_(std::move(trigger_data_matching)), event_level_epsilon_(event_level_epsilon), debug_cookie_set_(debug_cookie_set) { - DCHECK(AreFieldsValid(aggregatable_budget_consumed_, + DCHECK(AreFieldsValid(aggregatable_budget_consumed_, source_epoch, randomized_response_rate_, source_time_, expiry_time_, aggregatable_report_window_time_, debug_key_, debug_cookie_set_)); diff --git a/content/browser/attribution_reporting/stored_source.h b/content/browser/attribution_reporting/stored_source.h index 7ac24b6c58a810..66c0c2580b445a 100644 --- a/content/browser/attribution_reporting/stored_source.h +++ b/content/browser/attribution_reporting/stored_source.h @@ -50,6 +50,7 @@ class CONTENT_EXPORT StoredSource { static std::optional Create( CommonSourceInfo common_info, uint64_t source_event_id, + uint64_t source_epoch, attribution_reporting::DestinationSet, base::Time source_time, base::Time expiry_time, @@ -81,6 +82,8 @@ class CONTENT_EXPORT StoredSource { uint64_t source_event_id() const { return source_event_id_; } + uint64_t source_epoch() const { return source_epoch_; } + const attribution_reporting::DestinationSet& destination_sites() const { return destination_sites_; } @@ -151,6 +154,7 @@ class CONTENT_EXPORT StoredSource { private: StoredSource(CommonSourceInfo common_info, uint64_t source_event_id, + uint64_t source_epoch, attribution_reporting::DestinationSet, base::Time source_time, base::Time expiry_time, @@ -173,6 +177,7 @@ class CONTENT_EXPORT StoredSource { CommonSourceInfo common_info_; uint64_t source_event_id_; + uint64_t source_epoch_; attribution_reporting::DestinationSet destination_sites_; base::Time source_time_; base::Time expiry_time_; diff --git a/content/browser/devtools/protocol/storage_handler.cc b/content/browser/devtools/protocol/storage_handler.cc index c5593a987fbccc..826fdd1740b047 100644 --- a/content/browser/devtools/protocol/storage_handler.cc +++ b/content/browser/devtools/protocol/storage_handler.cc @@ -5,6 +5,7 @@ #include "content/browser/devtools/protocol/storage_handler.h" #include +#include "base/logging.h" #include #include @@ -12,7 +13,6 @@ #include #include #include - #include "base/functional/bind.h" #include "base/notreached.h" #include "base/scoped_observation.h" @@ -2008,6 +2008,14 @@ ToEventTriggerData(const std::vector& return out; } +std::unique_ptr +ToAttributionWindow(const attribution_reporting::AttributionWindow& attribution_window) { + return Storage::AttributionReportingAttributionWindow::Create() + .SetEpochStart(base::NumberToString(attribution_window.epoch_start())) + .SetEpochEnd(base::NumberToString(attribution_window.epoch_end())) + .Build(); + } + std::unique_ptr> ToAggregatableTriggerData( const std::vector& @@ -2099,6 +2107,7 @@ void StorageHandler::OnSourceHandled( registration.aggregatable_report_window.InSeconds()) .SetTriggerDataMatching( ToTriggerDataMatching(registration.trigger_data_matching)) + .SetEpoch(base::NumberToString(registration.source_epoch)) .Build(); if (registration.debug_key.has_value()) { @@ -2130,6 +2139,12 @@ void StorageHandler::OnTriggerHandled(const AttributionTrigger& trigger, .SetSourceRegistrationTimeConfig(ToSourceRegistrationTimeConfig( registration.aggregatable_trigger_config .source_registration_time_config())) + .SetGlobalEpsilon(registration.global_epsilon) + .SetAttributionWindow(ToAttributionWindow(registration.attribution_window)) + .SetAggregatableCapValues( + ToAggregatableValueEntries(registration.aggregatable_cap_values)) + .SetAttributionLogic(registration.attribution_logic) + .SetPartitioningLogic(registration.partitioning_logic) .Build(); if (registration.debug_key.has_value()) { diff --git a/third_party/blink/public/devtools_protocol/browser_protocol.pdl b/third_party/blink/public/devtools_protocol/browser_protocol.pdl index f616dddd8845fb..7f711a782d17eb 100644 --- a/third_party/blink/public/devtools_protocol/browser_protocol.pdl +++ b/third_party/blink/public/devtools_protocol/browser_protocol.pdl @@ -10164,6 +10164,7 @@ experimental domain Storage array of AttributionReportingAggregationKeysEntry aggregationKeys optional UnsignedInt64AsBase10 debugKey AttributionReportingTriggerDataMatching triggerDataMatching + UnsignedInt64AsBase10 epoch experimental type AttributionReportingSourceRegistrationResult extends string enum @@ -10210,6 +10211,11 @@ experimental domain Storage array of string sourceKeys AttributionReportingFilterPair filters + experimental type AttributionReportingAttributionWindow extends object + properties + UnsignedInt64AsBase10 epochStart + UnsignedInt64AsBase10 epochEnd + experimental type AttributionReportingAggregatableDedupKey extends object properties optional UnsignedInt64AsBase10 dedupKey @@ -10227,6 +10233,11 @@ experimental domain Storage optional string aggregationCoordinatorOrigin AttributionReportingSourceRegistrationTimeConfig sourceRegistrationTimeConfig optional string triggerContextId + optional number globalEpsilon + AttributionReportingAttributionWindow attributionWindow + array of AttributionReportingAggregatableValueEntry aggregatableCapValues + optional string attributionLogic + optional string partitioningLogic experimental type AttributionReportingEventLevelResult extends string enum diff --git a/third_party/blink/renderer/core/frame/attribution_src_loader.cc b/third_party/blink/renderer/core/frame/attribution_src_loader.cc index 5d49715c6061b6..b130c30aa09e22 100644 --- a/third_party/blink/renderer/core/frame/attribution_src_loader.cc +++ b/third_party/blink/renderer/core/frame/attribution_src_loader.cc @@ -9,6 +9,7 @@ #include #include +#include "base/logging.h" #include "base/check.h" #include "base/check_op.h" #include "base/feature_list.h" @@ -868,6 +869,9 @@ void AttributionSrcLoader::ResourceClient::HandleSourceRegistration( attribution_reporting::SuitableOrigin reporting_origin) { DCHECK_NE(eligibility_, RegistrationEligibility::kTrigger); + LOG(INFO) << "HandleSourceRegistration" ; + LOG(INFO) << StringUTF8Adaptor(headers.web_trigger).AsStringPiece(); + headers.MaybeLogAllTriggerHeadersIgnored(loader_->local_frame_->DomWindow()); if (!HasEitherWebOrOsHeader(headers.source_count(), headers.request_id)) { @@ -922,6 +926,9 @@ void AttributionSrcLoader::ResourceClient::HandleSourceRegistration( /*invalid_parameter=*/headers.os_source); return; } + LOG(INFO) << "HandledSourceRegistration" ; + // LOG(INFO) << StringUTF8Adaptor(headers.web_trigger).AsStringPiece(); + data_host_->OsSourceDataAvailable(std::move(registration_items)); ++num_registrations_; } @@ -947,6 +954,8 @@ void AttributionSrcLoader::ResourceClient::HandleTriggerRegistration( headers.LogTriggerIgnored(loader_->local_frame_->DomWindow()); return; } + LOG(INFO) << "HandleTriggerRegistration" ; + LOG(INFO) << StringUTF8Adaptor(headers.web_trigger).AsStringPiece(); auto trigger_data = attribution_reporting::TriggerRegistration::Parse( StringUTF8Adaptor(headers.web_trigger).AsStringPiece()); diff --git a/third_party/depot_tools b/third_party/depot_tools index 32769fe9391bcb..4d2864f3a1e991 160000 --- a/third_party/depot_tools +++ b/third_party/depot_tools @@ -1 +1 @@ -Subproject commit 32769fe9391bcbe32eba867db2b358d47c170562 +Subproject commit 4d2864f3a1e991754ab8ab984e33ad399ee335f7 diff --git a/third_party/devtools-frontend/src b/third_party/devtools-frontend/src index f9e4033459b0e0..fda3b5e8f75617 160000 --- a/third_party/devtools-frontend/src +++ b/third_party/devtools-frontend/src @@ -1 +1 @@ -Subproject commit f9e4033459b0e0a9c70aa03d1d90c83e4f2077ba +Subproject commit fda3b5e8f756172438a4659613de80a3a7150c23 diff --git a/tools/metrics/histograms/enums.xml b/tools/metrics/histograms/enums.xml index 2cd66ef64076df..2bd55eaf1e741a 100644 --- a/tools/metrics/histograms/enums.xml +++ b/tools/metrics/histograms/enums.xml @@ -3887,6 +3887,7 @@ Called by update_net_error_codes.py.--> + @@ -3989,6 +3990,17 @@ Called by update_net_error_codes.py.--> + + + + + + + + + + +