diff --git a/Cargo.lock b/Cargo.lock
index 0d4cb818..1fbf9dda 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -606,6 +606,7 @@ name = "databroker"
version = "0.4.7-dev.0"
dependencies = [
"anyhow",
+ "async-trait",
"axum",
"chrono",
"clap",
@@ -630,6 +631,7 @@ dependencies = [
"tokio-stream",
"tonic",
"tonic-mock",
+ "tonic-reflection",
"tracing",
"tracing-subscriber",
"uuid",
@@ -3140,6 +3142,19 @@ dependencies = [
"tonic",
]
+[[package]]
+name = "tonic-reflection"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7"
+dependencies = [
+ "prost",
+ "prost-types",
+ "tokio",
+ "tokio-stream",
+ "tonic",
+]
+
[[package]]
name = "tower"
version = "0.4.13"
diff --git a/README.md b/README.md
index 70751d2b..a7711a38 100644
--- a/README.md
+++ b/README.md
@@ -60,7 +60,7 @@ The [COVESA Vehicle Signal Specification](https://covesa.github.io/vehicle_signa
However, VSS does not define how these signals are to be collected and managed within a vehicle, nor does it prescribe how other components in the vehicle can read or write signal values from and to the tree.
-**Kuksa Databroker** is a resource efficient implementation of the VSS signal tree and is intended to be run within a vehicle on a microprocessor based platform. It allows applications in the vehicle to interact with the vehicle's sensors and actuators using a uniform, high level gRPC API for querying signals, updating current and target values of sensors and actuators and getting notified about changes to signals of interest.
+**Kuksa Databroker** is a resource efficient implementation of the VSS signal tree and is intended to be run within a vehicle on a microprocessor based platform. It allows applications in the vehicle to interact with the vehicle's sensors and actuators using a uniform, high level gRPC API for querying signals, updating values of sensors and actuators and getting notified about changes to signals of interest.
@@ -88,6 +88,21 @@ Data is usually exchanged with ECUs by means of a CAN bus or Ethernet based prot
(back to top )
+
+## Kuksa analysis
+Extended [Kuksa analysis](./doc/kuksa_analysis.md) containing functional requirements, use cases diagrams, latest and new API definition `kuksa.val.v2` as well as new design discussions for future developments and improvements.
+
+### APIs supported by Databroker
+
+Kuksa Databroker implements the following service interfaces:
+
+- Enabled on Databroker by default [kuksa.val.v2.VAL](proto/kuksa/val/v2/val.proto) (recommended to use)
+- Enabled on Databroker by default [kuksa.val.v1.VAL](proto/kuksa/val/v1/val.proto)
+- Disabled on Databroker by default [sdv.databroker.v1.Broker](proto/sdv/databroker/v1/broker.proto)
+- Disabled on Databroker by default [sdv.databroker.v1.Collector](proto/sdv/databroker/v1/collector.proto)
+
+(back to top )
+
## Getting started
@@ -120,7 +135,8 @@ The quickest possible way to get Kuksa Databroker up and running.
### Reading and writing VSS data using the CLI
1. Start the CLI in a container attached to the _kuksa_ bridge network and connect to the Databroker container:
- The databroker supports both of `sdv.databroker.v1` and `kuksa.val.v1` as an API. Per default the databroker-cli uses the `sdv.databroker.v1` interface. To change it use `--protocol` option when starting. Choose either one of `kuksa-val-v1` and `sdv-databroker-v1`.
+
+ The databroker supports the lastest new API `kuksa.val.v2` and `kuksa.val.v1` by default, `sdv.databroker.v1` must be enabled using `--enable-databroker-v1`. Per default the databroker-cli uses the `kuksa.val.v1` interface, which can be changed by supplying the `--protocol` option when starting. Choose either `kuksa.val.v1` or `sdv.databroker.v1`, as databroker-cli still does not support `kuksa.val.v2`.
```sh
# in a new terminal
@@ -252,6 +268,15 @@ cargo test --all-targets
(back to top )
+## Performance
+The Kuksa team has released an official tool to measure the latency and throughput of the Databroker for all supported APIs:
+[kuksa-perf](https://github.com/eclipse-kuksa/kuksa-perf)
+
+The use case measures the time it takes for a signal to be transferred from the Provider to the Signal Consumer
+Signal Consumer(stream subscribe) <- Databroker <- Provider(stream publish)
+
+Feel free to use it and share your results with us!
+
## Contributing
Please refer to the [Kuksa Contributing Guide](CONTRIBUTING.md).
diff --git a/data/vss-core/README.md b/data/vss-core/README.md
index dbba2cff..73289905 100644
--- a/data/vss-core/README.md
+++ b/data/vss-core/README.md
@@ -60,7 +60,7 @@ use the full name. When official release is created replace the copied *.json-fi
Build and run kuksa_databroker using the new VSS file according to [documentation](../../README.md), e.g.
```sh
-$cargo run --bin databroker -- --metadata ../data/vss-core/vss_release_4.0.json
+$cargo run --bin databroker -- --metadata ./data/vss-core/vss_release_4.0.json
```
Use the client to verify that changes in VSS are reflected, by doing e.g. set/get on some new or renamed signals.
diff --git a/databroker-cli/src/sdv_cli.rs b/databroker-cli/src/sdv_cli.rs
index e52dd642..0010c2a7 100644
--- a/databroker-cli/src/sdv_cli.rs
+++ b/databroker-cli/src/sdv_cli.rs
@@ -1263,6 +1263,8 @@ mod test {
change_type: proto::v1::ChangeType::OnChange.into(),
description: "".into(),
allowed: None,
+ min: None,
+ max: None,
},
proto::v1::Metadata {
id: 2,
@@ -1272,6 +1274,8 @@ mod test {
change_type: proto::v1::ChangeType::OnChange.into(),
description: "".into(),
allowed: None,
+ min: None,
+ max: None,
},
proto::v1::Metadata {
id: 3,
@@ -1281,6 +1285,8 @@ mod test {
change_type: proto::v1::ChangeType::OnChange.into(),
description: "".into(),
allowed: None,
+ min: None,
+ max: None,
},
]
.to_vec();
diff --git a/databroker-proto/build.rs b/databroker-proto/build.rs
index d02a006d..1a832c9f 100644
--- a/databroker-proto/build.rs
+++ b/databroker-proto/build.rs
@@ -11,6 +11,8 @@
* SPDX-License-Identifier: Apache-2.0
********************************************************************************/
+use std::{env, path::PathBuf};
+
fn main() -> Result<(), Box> {
std::env::set_var("PROTOC", protobuf_src::protoc());
tonic_build::configure()
@@ -23,8 +25,23 @@ fn main() -> Result<(), Box> {
"proto/sdv/databroker/v1/collector.proto",
"proto/kuksa/val/v1/val.proto",
"proto/kuksa/val/v1/types.proto",
+ "proto/kuksa/val/v2/val.proto",
+ "proto/kuksa/val/v2/types.proto",
],
&["proto"],
)?;
+
+ let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
+ tonic_build::configure()
+ .file_descriptor_set_path(out_dir.join("kuksa.val.v2_descriptor.bin"))
+ .compile(
+ &[
+ "proto/kuksa/val/v2/val.proto",
+ "proto/kuksa/val/v2/types.proto",
+ ],
+ &["proto"],
+ )
+ .unwrap();
+
Ok(())
}
diff --git a/databroker-proto/src/lib.rs b/databroker-proto/src/lib.rs
index 83fe2005..9bc5552c 100644
--- a/databroker-proto/src/lib.rs
+++ b/databroker-proto/src/lib.rs
@@ -143,5 +143,11 @@ pub mod kuksa {
}
}
}
+ pub mod v2 {
+ tonic::include_proto!("kuksa.val.v2");
+
+ pub const FILE_DESCRIPTOR_SET: &[u8] =
+ tonic::include_file_descriptor_set!("kuksa.val.v2_descriptor");
+ }
}
}
diff --git a/databroker/Cargo.toml b/databroker/Cargo.toml
index 492b0db7..ddfdd0dc 100644
--- a/databroker/Cargo.toml
+++ b/databroker/Cargo.toml
@@ -27,6 +27,7 @@ kuksa-common = { path = "../lib/common"}
kuksa = { path = "../lib/kuksa"}
databroker-proto = { workspace = true }
tonic = { workspace = true, features = ["transport", "channel", "prost"] }
+tonic-reflection = "0.11.0"
prost = { workspace = true }
prost-types = { workspace = true }
tokio = { workspace = true, features = [
@@ -60,9 +61,11 @@ jemallocator = { version = "0.5.0", optional = true }
lazy_static = "1.4.0"
thiserror = "1.0.47"
+futures = { version = "0.3.28" }
+async-trait = "0.1.82"
+
# VISS
axum = { version = "0.6.20", optional = true, features = ["ws"] }
-futures = { version = "0.3.28", optional = true }
chrono = { version = "0.4.31", optional = true, features = ["std"] }
uuid = { version = "1.4.1", optional = true, features = ["v4"] }
@@ -74,7 +77,7 @@ sd-notify = "0.4.1"
default = ["tls"]
tls = ["tonic/tls"]
jemalloc = ["dep:jemallocator"]
-viss = ["dep:axum", "dep:chrono", "dep:futures", "dep:uuid"]
+viss = ["dep:axum", "dep:chrono", "dep:uuid"]
libtest = []
[build-dependencies]
diff --git a/databroker/src/broker.rs b/databroker/src/broker.rs
index 4e098067..219b0464 100644
--- a/databroker/src/broker.rs
+++ b/databroker/src/broker.rs
@@ -34,13 +34,28 @@ use tracing::{debug, info, warn};
use crate::glob;
#[derive(Debug)]
-pub enum UpdateError {
+pub enum ActuationError {
NotFound,
WrongType,
OutOfBounds,
UnsupportedType,
PermissionDenied,
PermissionExpired,
+ ProviderNotAvailable,
+ ProviderAlreadyExists,
+ TransmissionFailure,
+}
+
+#[derive(Debug, PartialEq)]
+pub enum UpdateError {
+ NotFound,
+ WrongType,
+ OutOfBoundsAllowed,
+ OutOfBoundsMinMax,
+ OutOfBoundsType,
+ UnsupportedType,
+ PermissionDenied,
+ PermissionExpired,
}
#[derive(Debug, Clone)]
@@ -66,6 +81,9 @@ pub struct Metadata {
pub entry_type: EntryType,
pub change_type: ChangeType,
pub description: String,
+ // Min and Max are typically never arrays
+ pub min: Option,
+ pub max: Option,
pub allowed: Option,
pub unit: Option,
}
@@ -101,6 +119,7 @@ pub struct Database {
#[derive(Default)]
pub struct Subscriptions {
+ actuation_subscriptions: Vec,
query_subscriptions: Vec,
change_subscriptions: Vec,
}
@@ -118,6 +137,7 @@ pub struct QueryField {
#[derive(Debug)]
pub struct ChangeNotification {
+ pub id: i32,
pub update: EntryUpdate,
pub fields: HashSet,
}
@@ -145,9 +165,31 @@ pub struct DataBroker {
database: Arc>,
subscriptions: Arc>,
version: String,
+ commit_sha: String,
shutdown_trigger: broadcast::Sender<()>,
}
+#[async_trait::async_trait]
+pub trait ActuationProvider {
+ async fn actuate(
+ &self,
+ actuation_changes: Vec,
+ ) -> Result<(), (ActuationError, String)>;
+ fn is_available(&self) -> bool;
+}
+
+#[derive(Clone)]
+pub struct ActuationChange {
+ pub id: i32,
+ pub data_value: DataValue,
+}
+
+pub struct ActuationSubscription {
+ vss_ids: Vec,
+ actuation_provider: Box,
+ permissions: Permissions,
+}
+
pub struct QuerySubscription {
query: query::CompiledQuery,
sender: mpsc::Sender,
@@ -182,6 +224,8 @@ pub struct EntryUpdate {
// order to be able to convey "update it to None" which would
// mean setting it to `Some(None)`.
pub allowed: Option>,
+ pub min: Option >,
+ pub max: Option >,
pub unit: Option,
}
@@ -205,6 +249,12 @@ impl Entry {
update
}
+ pub fn validate_actuator_value(&self, data_value: &DataValue) -> Result<(), UpdateError> {
+ self.validate_value(data_value)?;
+ self.validate_allowed(data_value)?;
+ Ok(())
+ }
+
pub fn validate(&self, update: &EntryUpdate) -> Result<(), UpdateError> {
if let Some(datapoint) = &update.datapoint {
self.validate_value(&datapoint.value)?;
@@ -222,26 +272,42 @@ impl Entry {
Ok(())
}
+ /**
+ * DataType is VSS type, where we have also smaller type based on 8/16 bits
+ * That we do not have for DataValue
+ */
pub fn validate_allowed_type(&self, allowed: &Option) -> Result<(), UpdateError> {
if let Some(allowed_values) = allowed {
match (allowed_values, &self.metadata.data_type) {
(DataValue::BoolArray(_allowed_values), DataType::Bool) => Ok(()),
(DataValue::StringArray(_allowed_values), DataType::String) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int8) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int16) => Ok(()),
(DataValue::Int32Array(_allowed_values), DataType::Int32) => Ok(()),
(DataValue::Int64Array(_allowed_values), DataType::Int64) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint8) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint16) => Ok(()),
(DataValue::Uint32Array(_allowed_values), DataType::Uint32) => Ok(()),
(DataValue::Uint64Array(_allowed_values), DataType::Uint64) => Ok(()),
(DataValue::FloatArray(_allowed_values), DataType::Float) => Ok(()),
(DataValue::DoubleArray(_allowed_values), DataType::Double) => Ok(()),
(DataValue::BoolArray(_allowed_values), DataType::BoolArray) => Ok(()),
(DataValue::StringArray(_allowed_values), DataType::StringArray) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int8Array) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int16Array) => Ok(()),
(DataValue::Int32Array(_allowed_values), DataType::Int32Array) => Ok(()),
(DataValue::Int64Array(_allowed_values), DataType::Int64Array) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint8Array) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint16Array) => Ok(()),
(DataValue::Uint32Array(_allowed_values), DataType::Uint32Array) => Ok(()),
(DataValue::Uint64Array(_allowed_values), DataType::Uint64Array) => Ok(()),
(DataValue::FloatArray(_allowed_values), DataType::FloatArray) => Ok(()),
(DataValue::DoubleArray(_allowed_values), DataType::DoubleArray) => Ok(()),
- _ => Err(UpdateError::WrongType {}),
+ _ => {
+ debug!("Unexpected combination - VSS datatype is {:?}, but list of allowed value use {:?}",
+ &self.metadata.data_type, allowed_values);
+ Err(UpdateError::WrongType {})
+ }
}
} else {
// it is allowed to set allowed to None
@@ -256,56 +322,56 @@ impl Entry {
(DataValue::BoolArray(allowed_values), DataValue::Bool(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::DoubleArray(allowed_values), DataValue::Double(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::FloatArray(allowed_values), DataValue::Float(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Int32Array(allowed_values), DataValue::Int32(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Int64Array(allowed_values), DataValue::Int64(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::StringArray(allowed_values), DataValue::String(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Uint32Array(allowed_values), DataValue::Uint32(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Uint64Array(allowed_values), DataValue::Uint64(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::BoolArray(allowed_values), DataValue::BoolArray(value)) => {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -314,7 +380,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -323,7 +389,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -332,7 +398,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -341,7 +407,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -350,7 +416,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -359,7 +425,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -368,7 +434,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -380,12 +446,52 @@ impl Entry {
}
}
+ /// Checks if value fulfils min/max condition
+ /// Returns OutOfBounds if not fulfilled
+ fn validate_value_min_max(&self, value: &DataValue) -> Result<(), UpdateError> {
+ // Validate Min/Max
+ if let Some(min) = &self.metadata.min {
+ debug!("Checking min, comparing value {:?} and {:?}", value, min);
+ match value.greater_than_equal(min) {
+ Ok(true) => {}
+ _ => return Err(UpdateError::OutOfBoundsMinMax),
+ };
+ }
+ if let Some(max) = &self.metadata.max {
+ debug!("Checking max, comparing value {:?} and {:?}", value, max);
+ match value.less_than_equal(max) {
+ Ok(true) => {}
+ _ => return Err(UpdateError::OutOfBoundsMinMax),
+ };
+ }
+ Ok(())
+ }
+
fn validate_value(&self, value: &DataValue) -> Result<(), UpdateError> {
// Not available is always valid
if value == &DataValue::NotAvailable {
return Ok(());
}
+ // For numeric non-arrays check min/max
+ // For arrays we check later on value
+ match self.metadata.data_type {
+ DataType::Int8
+ | DataType::Int16
+ | DataType::Int32
+ | DataType::Int64
+ | DataType::Uint8
+ | DataType::Uint16
+ | DataType::Uint32
+ | DataType::Uint64
+ | DataType::Float
+ | DataType::Double => match self.validate_value_min_max(value) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ },
+ _ => {}
+ }
+
// Validate value
match self.metadata.data_type {
DataType::Bool => match value {
@@ -399,14 +505,14 @@ impl Entry {
DataType::Int8 => match value {
DataValue::Int32(value) => match i8::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
DataType::Int16 => match value {
DataValue::Int32(value) => match i16::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
@@ -422,14 +528,14 @@ impl Entry {
DataType::Uint8 => match value {
DataValue::Uint32(value) => match u8::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
DataType::Uint16 => match value {
DataValue::Uint32(value) => match u16::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
@@ -459,106 +565,138 @@ impl Entry {
},
DataType::Int8Array => match &value {
DataValue::Int32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match i8::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
- }
+ Ok(_) => match self.validate_value_min_max(&DataValue::Int32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ },
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Int16Array => match &value {
DataValue::Int32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match i16::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
- }
+ Ok(_) => match self.validate_value_min_max(&DataValue::Int32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ },
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Int32Array => match value {
- DataValue::Int32Array(_) => Ok(()),
+ DataValue::Int32Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Int32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::Int64Array => match value {
- DataValue::Int64Array(_) => Ok(()),
+ DataValue::Int64Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Int64(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::Uint8Array => match &value {
DataValue::Uint32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match u8::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
+ Ok(_) => {
+ match self.validate_value_min_max(&DataValue::Uint32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
}
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Uint16Array => match &value {
DataValue::Uint32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match u16::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
+ Ok(_) => {
+ match self.validate_value_min_max(&DataValue::Uint32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
}
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Uint32Array => match value {
- DataValue::Uint32Array(_) => Ok(()),
+ DataValue::Uint32Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Uint32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::Uint64Array => match value {
- DataValue::Uint64Array(_) => Ok(()),
+ DataValue::Uint64Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Uint64(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::FloatArray => match value {
- DataValue::FloatArray(_) => Ok(()),
+ DataValue::FloatArray(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Float(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::DoubleArray => match value {
- DataValue::DoubleArray(_) => Ok(()),
+ DataValue::DoubleArray(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Double(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
}
@@ -599,6 +737,10 @@ pub enum SuccessfulUpdate {
}
impl Subscriptions {
+ pub fn add_actuation_subscription(&mut self, subscription: ActuationSubscription) {
+ self.actuation_subscriptions.push(subscription);
+ }
+
pub fn add_query_subscription(&mut self, subscription: QuerySubscription) {
self.query_subscriptions.push(subscription)
}
@@ -648,6 +790,7 @@ impl Subscriptions {
}
pub fn clear(&mut self) {
+ self.actuation_subscriptions.clear();
self.query_subscriptions.clear();
self.change_subscriptions.clear();
}
@@ -665,18 +808,23 @@ impl Subscriptions {
if sub.sender.is_closed() {
info!("Subscriber gone: removing subscription");
false
+ } else if sub.permissions.is_expired() {
+ info!("Permissions of Subscriber expired: removing subscription");
+ false
} else {
- match &sub.permissions.expired() {
- Ok(()) => true,
- Err(PermissionError::Expired) => {
- info!("Token expired: removing subscription");
- false
- }
- Err(err) => {
- info!("Error: {:?} -> removing subscription", err);
- false
- }
- }
+ true
+ }
+ });
+
+ self.actuation_subscriptions.retain(|sub| {
+ if !sub.actuation_provider.is_available() {
+ info!("Provider gone: removing subscription");
+ false
+ } else if sub.permissions.is_expired() {
+ info!("Permissions of Provider expired: removing subscription");
+ false
+ } else {
+ true
}
});
}
@@ -729,6 +877,7 @@ impl ChangeSubscription {
// fill unit field always
update.unit.clone_from(&entry.metadata.unit);
notifications.updates.push(ChangeNotification {
+ id: *id,
update,
fields: notify_fields,
});
@@ -778,6 +927,7 @@ impl ChangeSubscription {
notify_fields.insert(Field::ActuatorTarget);
}
notifications.updates.push(ChangeNotification {
+ id: *id,
update,
fields: notify_fields,
});
@@ -1130,6 +1280,8 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> {
change_type: ChangeType,
entry_type: EntryType,
description: String,
+ min: Option,
+ max: Option,
allowed: Option,
datapoint: Option,
unit: Option,
@@ -1162,6 +1314,8 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> {
entry_type,
description,
allowed,
+ min,
+ max,
unit,
},
datapoint: match datapoint.clone() {
@@ -1256,6 +1410,8 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
change_type: ChangeType,
entry_type: EntryType,
description: String,
+ min: Option,
+ max: Option,
allowed: Option,
unit: Option,
) -> Result {
@@ -1270,6 +1426,8 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
change_type,
entry_type,
description,
+ min,
+ max,
allowed,
None,
unit,
@@ -1530,16 +1688,309 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
Err(e) => Err(QueryError::CompilationError(format!("{e:?}"))),
}
}
+
+ pub async fn provide_actuation(
+ &self,
+ vss_ids: Vec,
+ actuation_provider: Box,
+ ) -> Result<(), (ActuationError, String)> {
+ for vss_id in vss_ids.clone() {
+ self.can_write_actuator_target(&vss_id).await?;
+ }
+
+ let provided_vss_ids: Vec = self
+ .broker
+ .subscriptions
+ .read()
+ .await
+ .actuation_subscriptions
+ .iter()
+ .flat_map(|subscription| subscription.vss_ids.clone())
+ .collect();
+ let intersection: Vec<&i32> = vss_ids
+ .iter()
+ .filter(|&x| provided_vss_ids.contains(x))
+ .collect();
+ if !intersection.is_empty() {
+ let message = format!(
+ "Providers for the following vss_ids already registered: {:?}",
+ intersection
+ );
+ return Err((ActuationError::ProviderAlreadyExists, message));
+ }
+
+ let actuation_subscription: ActuationSubscription = ActuationSubscription {
+ vss_ids,
+ actuation_provider,
+ permissions: self.permissions.clone(),
+ };
+ self.broker
+ .subscriptions
+ .write()
+ .await
+ .add_actuation_subscription(actuation_subscription);
+
+ Ok(())
+ }
+
+ async fn map_actuation_changes_by_vss_id(
+ &self,
+ actuation_changes: Vec,
+ ) -> HashMap> {
+ let mut actuation_changes_per_vss_id: HashMap> =
+ HashMap::with_capacity(actuation_changes.len());
+ for actuation_change in actuation_changes {
+ let vss_id = actuation_change.id;
+
+ let opt_vss_ids = actuation_changes_per_vss_id.get_mut(&vss_id);
+ match opt_vss_ids {
+ Some(vss_ids) => {
+ vss_ids.push(actuation_change.clone());
+ }
+ None => {
+ let vec = vec![actuation_change.clone()];
+ actuation_changes_per_vss_id.insert(vss_id, vec);
+ }
+ }
+ }
+ actuation_changes_per_vss_id
+ }
+
+ pub async fn batch_actuate(
+ &self,
+ actuation_changes: Vec,
+ ) -> Result<(), (ActuationError, String)> {
+ let read_subscription_guard = self.broker.subscriptions.read().await;
+ let actuation_subscriptions = &read_subscription_guard.actuation_subscriptions;
+
+ for actuation_change in &actuation_changes {
+ let vss_id = actuation_change.id;
+ self.can_write_actuator_target(&vss_id).await?;
+ self.validate_actuator_update(&vss_id, &actuation_change.data_value)
+ .await?;
+ }
+
+ let actuation_changes_per_vss_id = &self
+ .map_actuation_changes_by_vss_id(actuation_changes)
+ .await;
+ for actuation_change_per_vss_id in actuation_changes_per_vss_id {
+ let vss_id = *actuation_change_per_vss_id.0;
+ let actuation_changes = actuation_change_per_vss_id.1.clone();
+
+ let opt_actuation_subscription = actuation_subscriptions
+ .iter()
+ .find(|subscription| subscription.vss_ids.contains(&vss_id));
+ match opt_actuation_subscription {
+ Some(actuation_subscription) => {
+ let is_expired = actuation_subscription.permissions.is_expired();
+ if is_expired {
+ let message = format!(
+ "Permission for vss_ids {:?} expired",
+ actuation_subscription.vss_ids
+ );
+ return Err((ActuationError::PermissionExpired, message));
+ }
+
+ if !actuation_subscription.actuation_provider.is_available() {
+ let message = format!("Provider for vss_id {} does not exist", vss_id);
+ return Err((ActuationError::ProviderNotAvailable, message));
+ }
+
+ actuation_subscription
+ .actuation_provider
+ .actuate(actuation_changes)
+ .await?
+ }
+ None => {
+ let message = format!("Provider for vss_id {} not available", vss_id);
+ return Err((ActuationError::ProviderNotAvailable, message));
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ pub async fn actuate(
+ &self,
+ vss_id: &i32,
+ data_value: &DataValue,
+ ) -> Result<(), (ActuationError, String)> {
+ let vss_id = *vss_id;
+
+ self.can_write_actuator_target(&vss_id).await?;
+ self.validate_actuator_update(&vss_id, data_value).await?;
+
+ let read_subscription_guard = self.broker.subscriptions.read().await;
+ let opt_actuation_subscription = &read_subscription_guard
+ .actuation_subscriptions
+ .iter()
+ .find(|subscription| subscription.vss_ids.contains(&vss_id));
+ match opt_actuation_subscription {
+ Some(actuation_subscription) => {
+ let is_expired = actuation_subscription.permissions.is_expired();
+ if is_expired {
+ let message = format!(
+ "Permission for vss_ids {:?} expired",
+ actuation_subscription.vss_ids
+ );
+ return Err((ActuationError::PermissionExpired, message));
+ }
+
+ if !actuation_subscription.actuation_provider.is_available() {
+ let message = format!("Provider for vss_id {} does not exist", vss_id);
+ return Err((ActuationError::ProviderNotAvailable, message));
+ }
+
+ actuation_subscription
+ .actuation_provider
+ .actuate(vec![ActuationChange {
+ id: vss_id,
+ data_value: data_value.clone(),
+ }])
+ .await
+ }
+ None => {
+ let message = format!("Provider for vss_id {} does not exist", vss_id);
+ Err((ActuationError::ProviderNotAvailable, message))
+ }
+ }
+ }
+
+ async fn can_write_actuator_target(
+ &self,
+ vss_id: &i32,
+ ) -> Result<(), (ActuationError, String)> {
+ let result_entry = self.get_entry_by_id(*vss_id).await;
+ match result_entry {
+ Ok(entry) => {
+ let vss_path = entry.metadata.path;
+ let result_can_write_actuator =
+ self.permissions.can_write_actuator_target(&vss_path);
+ match result_can_write_actuator {
+ Ok(_) => Ok(()),
+ Err(PermissionError::Denied) => {
+ let message = format!("Permission denied for vss_path {}", vss_path);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(PermissionError::Expired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
+ Err(ReadError::NotFound) => {
+ let message = format!("Could not resolve vss_path of vss_id {}", vss_id);
+ Err((ActuationError::NotFound, message))
+ }
+ Err(ReadError::PermissionDenied) => {
+ let message = format!("Permission denied for vss_id {}", vss_id);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(ReadError::PermissionExpired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
+
+ async fn validate_actuator_update(
+ &self,
+ vss_id: &i32,
+ data_value: &DataValue,
+ ) -> Result<(), (ActuationError, String)> {
+ let result_entry = self.get_entry_by_id(*vss_id).await;
+ match result_entry {
+ Ok(entry) => {
+ let metadata = entry.metadata.clone();
+ let vss_path = metadata.path;
+ if metadata.entry_type != EntryType::Actuator {
+ let message = format!("Tried to set a value for a non-actuator: {}", vss_path);
+ return Err((ActuationError::WrongType, message));
+ }
+ let validation = entry.validate_actuator_value(data_value);
+ match validation {
+ Ok(_) => Ok(()),
+ Err(UpdateError::OutOfBoundsMinMax) => {
+ let message = format!(
+ "Out of bounds min/max value provided for {}: {} | Expected range [min: {}, max: {}]",
+ vss_path,
+ data_value,
+ metadata.min.map_or("None".to_string(), |value| value.to_string()),
+ metadata.max.map_or("None".to_string(), |value| value.to_string()),
+ );
+ Err((ActuationError::OutOfBounds, message.to_string()))
+ }
+ Err(UpdateError::OutOfBoundsAllowed) => {
+ let message = format!(
+ "Out of bounds allowed value provided for {}: {} | Expected values [{}]",
+ vss_path,
+ data_value,
+ metadata.allowed.map_or("None".to_string(), |value| value.to_string())
+ );
+ Err((ActuationError::OutOfBounds, message.to_string()))
+ }
+ Err(UpdateError::OutOfBoundsType) => {
+ let message = format!(
+ "Out of bounds type value provided for {}: {} | overflow for {}",
+ vss_path, data_value, metadata.data_type,
+ );
+ Err((ActuationError::OutOfBounds, message.to_string()))
+ }
+ Err(UpdateError::UnsupportedType) => {
+ let message = format!(
+ "Unsupported type for vss_path {}. Expected type: {}",
+ vss_path, metadata.data_type
+ );
+ Err((ActuationError::UnsupportedType, message))
+ }
+ Err(UpdateError::WrongType) => {
+ let message = format!(
+ "Wrong type for vss_path {}. Expected type: {}",
+ vss_path, metadata.data_type
+ );
+ Err((ActuationError::WrongType, message))
+ }
+ // Redundant errors in case UpdateError includes new errors in the future
+ Err(UpdateError::NotFound) => {
+ let message = format!("Could not resolve vss_path {}", vss_path);
+ Err((ActuationError::NotFound, message))
+ }
+ Err(UpdateError::PermissionDenied) => {
+ let message = format!("Permission denied for vss_path {}", vss_path);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(UpdateError::PermissionExpired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
+ Err(ReadError::NotFound) => {
+ let message = format!("Could not resolve vss_path of vss_id {}", vss_id);
+ Err((ActuationError::NotFound, message))
+ }
+ Err(ReadError::PermissionDenied) => {
+ let message = format!("Permission denied for vss_id {}", vss_id);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(ReadError::PermissionExpired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
}
impl DataBroker {
- pub fn new(version: impl Into) -> Self {
+ pub fn new(version: impl Into, commit_sha: impl Into) -> Self {
let (shutdown_trigger, _) = broadcast::channel::<()>(1);
DataBroker {
database: Default::default(),
subscriptions: Default::default(),
version: version.into(),
+ commit_sha: commit_sha.into(),
shutdown_trigger,
}
}
@@ -1557,13 +2008,14 @@ impl DataBroker {
pub fn start_housekeeping_task(&self) {
info!("Starting housekeeping task");
let subscriptions = self.subscriptions.clone();
+
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(1));
loop {
interval.tick().await;
- // Cleanup dropped subscriptions
- subscriptions.write().await.cleanup();
+
+ subscriptions.write().await.cleanup(); // Cleanup dropped subscriptions
}
});
}
@@ -1584,21 +2036,35 @@ impl DataBroker {
pub fn get_version(&self) -> &str {
&self.version
}
+
+ pub fn get_commit_sha(&self) -> &str {
+ &self.commit_sha
+ }
}
impl Default for DataBroker {
fn default() -> Self {
- Self::new("")
+ Self::new("", "")
}
}
#[cfg(test)]
-mod tests {
+/// Public test module to allow other files to reuse helper functions
+pub mod tests {
use crate::permissions;
use super::*;
use tokio_stream::StreamExt;
+ #[tokio::test]
+ async fn test_databroker_version_and_commit_sha() {
+ let version = "1.1.1";
+ let commit_sha = "3a3c332f5427f2db7a0b8582262c9f5089036c23";
+ let databroker = DataBroker::new(version, commit_sha);
+ assert_eq!(databroker.get_version(), version);
+ assert_eq!(databroker.get_commit_sha(), commit_sha);
+ }
+
#[tokio::test]
async fn test_register_datapoint() {
let broker = DataBroker::default();
@@ -1611,6 +2077,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::BoolArray(Vec::from([true]))),
Some("kg".to_string()),
)
@@ -1643,6 +2111,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
Some("km".to_string()),
)
@@ -1672,6 +2142,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1 (modified)".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1693,6 +2165,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test signal 3".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::Int32Array(Vec::from([1, 2, 3, 4]))),
None,
)
@@ -1717,6 +2191,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1730,6 +2206,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Actuator,
"Test datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1773,6 +2251,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1805,6 +2285,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1827,6 +2309,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1878,6 +2362,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::Int32Array(vec![100])),
None,
)
@@ -1899,6 +2385,8 @@ mod tests {
data_type: None,
description: None,
allowed: Some(Some(DataValue::Int32Array(vec![100]))),
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1921,6 +2409,8 @@ mod tests {
data_type: None,
description: None,
allowed: Some(Some(DataValue::BoolArray(vec![true]))),
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1943,6 +2433,8 @@ mod tests {
data_type: None,
description: None,
allowed: Some(None),
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1966,6 +2458,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1986,6 +2480,696 @@ mod tests {
}
}
+ // Helper for adding an int8 signal and adding value
+ async fn helper_add_int8(
+ broker: &DataBroker,
+ name: &str,
+ value: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int8,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-5)), // min
+ Some(types::DataValue::Int32(10)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_int8() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int8(&broker, "test.datapoint1", -6, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int8(&broker, "test.datapoint2", -5, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_int8(&broker, "test.datapoint3", 11, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int8(&broker, "test.datapoint4", 10, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an int8 signal and adding value
+ async fn helper_add_int16(
+ broker: &DataBroker,
+ name: &str,
+ value: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int16,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-5)), // min
+ Some(types::DataValue::Int32(10)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_int16() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int16(&broker, "test.datapoint1", -6, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int16(&broker, "test.datapoint2", -5, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_int16(&broker, "test.datapoint3", 11, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int16(&broker, "test.datapoint4", 10, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an int32 signal and adding value
+ pub async fn helper_add_int32(
+ broker: &DataBroker,
+ name: &str,
+ value: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int32,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-500)), // min
+ Some(types::DataValue::Int32(1000)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_exceeded() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int32(&broker, "test.datapoint1", -501, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_equal() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32(&broker, "test.datapoint1", -500, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_exceeded() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int32(&broker, "test.datapoint1", 1001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_equal() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32(&broker, "test.datapoint1", 1000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ /// Helper for adding an int64 signal and adding value
+ async fn helper_add_int64(
+ broker: &DataBroker,
+ name: &str,
+ value: i64,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int64,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int64(-500000)), // min
+ Some(types::DataValue::Int64(10000000)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int64(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_int64() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int64(&broker, "test.datapoint1", -500001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int64(&broker, "test.datapoint2", -500000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_int64(&broker, "test.datapoint3", 10000001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int64(&broker, "test.datapoint4", 10000000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ /// Helper for adding an uint8 signal and adding value
+ async fn helper_add_uint8(
+ broker: &DataBroker,
+ name: &str,
+ value: u32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Uint8,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Uint32(3)), // min
+ Some(types::DataValue::Uint32(26)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Uint32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_uint8() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_uint8(&broker, "test.datapoint1", 2, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_uint8(&broker, "test.datapoint2", 3, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_uint8(&broker, "test.datapoint3", 27, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_uint8(&broker, "test.datapoint4", 26, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an int32 signal and adding value
+ async fn helper_add_int32array(
+ broker: &DataBroker,
+ name: &str,
+ value1: i32,
+ value2: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int32Array,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-500)), // min
+ Some(types::DataValue::Int32(1000)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32Array(Vec::from([value1, value2])),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_exceeded_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ // First item out of bound
+ match helper_add_int32array(&broker, "test.datapoint1", -501, -500, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ // Second item out of bound
+ match helper_add_int32array(&broker, "test.datapoint2", -500, -501, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_equal_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32array(&broker, "test.datapoint1", -500, -500, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_exceeded_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int32array(&broker, "test.datapoint1", 1001, 1000, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ match helper_add_int32array(&broker, "test.datapoint2", 100, 1001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_equal_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32array(&broker, "test.datapoint1", 1000, 1000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an double array signal and adding value
+ async fn helper_add_doublearray(
+ broker: &DataBroker,
+ name: &str,
+ value1: f64,
+ value2: f64,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::DoubleArray,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Double(-500.2)), // min
+ Some(types::DataValue::Double(1000.2)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::DoubleArray(Vec::from([value1, value2])),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_doublearray() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ // First item out of bound
+ match helper_add_doublearray(&broker, "test.datapoint1", -500.3, -500.0, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ // Second item out of bound
+ match helper_add_doublearray(&broker, "test.datapoint2", -500.0, -500.3, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ // Both on min
+ if helper_add_doublearray(&broker, "test.datapoint3", -500.2, -500.2, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ // First tto large
+ match helper_add_doublearray(&broker, "test.datapoint4", 1000.3, 1000.0, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ // Second too large
+ match helper_add_doublearray(&broker, "test.datapoint5", 1000.0, 1000.3, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ // Both on max
+ if helper_add_doublearray(&broker, "test.datapoint6", 1000.2, 1000.2, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
#[tokio::test]
async fn test_subscribe_query_and_get() {
let broker = DataBroker::default();
@@ -1998,6 +3182,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2037,6 +3223,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2081,6 +3269,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2139,6 +3329,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2183,6 +3375,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2222,6 +3416,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2258,6 +3454,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1 (new description)".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2281,6 +3479,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2323,6 +3523,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2336,6 +3538,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2379,6 +3583,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
),
@@ -2396,6 +3602,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
),
@@ -2437,6 +3645,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2459,6 +3669,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2502,6 +3714,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2529,6 +3743,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2577,6 +3793,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::StringArray(vec![
String::from("yes"),
String::from("no"),
@@ -2609,6 +3827,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2666,6 +3886,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2721,6 +3943,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2745,6 +3969,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2767,6 +3993,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2796,6 +4024,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2831,6 +4061,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2853,6 +4085,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2882,6 +4116,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2920,6 +4156,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2942,6 +4180,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2985,6 +4225,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3029,6 +4271,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -3079,6 +4323,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test signal".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3091,6 +4337,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test signal".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3122,6 +4370,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test signal 3".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::Int32Array(Vec::from([1, 2, 3, 4]))),
None,
)
@@ -3136,6 +4386,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::BoolArray(Vec::from([true]))),
None,
)
diff --git a/databroker/src/grpc/kuksa_val_v1/val.rs b/databroker/src/grpc/kuksa_val_v1/val.rs
index 9cbbfc8d..02946357 100644
--- a/databroker/src/grpc/kuksa_val_v1/val.rs
+++ b/databroker/src/grpc/kuksa_val_v1/val.rs
@@ -33,6 +33,7 @@ use crate::broker::SubscriptionError;
use crate::broker::{AuthorizedAccess, EntryReadAccess};
use crate::glob::Matcher;
use crate::permissions::Permissions;
+use crate::types::{DataType, DataValue};
const MAX_REQUEST_PATH_LENGTH: usize = 1000;
@@ -687,11 +688,27 @@ fn convert_to_data_entry_error(path: &String, error: &broker::UpdateError) -> Da
message: "cannot set datapoint to value of unsupported type".to_string(),
}),
},
- broker::UpdateError::OutOfBounds => DataEntryError {
+ broker::UpdateError::OutOfBoundsAllowed => DataEntryError {
path: path.clone(),
error: Some(proto::Error {
code: 400,
- reason: String::from("value out of bounds"),
+ reason: String::from("value out of allowed bounds"),
+ message: String::from("given value exceeds type's boundaries"),
+ }),
+ },
+ broker::UpdateError::OutOfBoundsMinMax => DataEntryError {
+ path: path.clone(),
+ error: Some(proto::Error {
+ code: 400,
+ reason: String::from("value out of min/max bounds"),
+ message: String::from("given value exceeds type's boundaries"),
+ }),
+ },
+ broker::UpdateError::OutOfBoundsType => DataEntryError {
+ path: path.clone(),
+ error: Some(proto::Error {
+ code: 400,
+ reason: String::from("value out of type bounds"),
message: String::from("given value exceeds type's boundaries"),
}),
},
@@ -792,72 +809,147 @@ fn proto_entry_from_entry_and_fields(
}
if all || fields.contains(&proto::Field::MetadataValueRestriction) {
metadata_is_set = true;
- metadata.value_restriction = match entry.metadata().allowed.as_ref() {
- Some(allowed) => match allowed {
- broker::DataValue::StringArray(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::String(
- proto::ValueRestrictionString {
- allowed_values: vec.clone(),
- },
- )),
- }),
- broker::DataValue::Int32Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Signed(
- proto::ValueRestrictionInt {
- allowed_values: vec.iter().cloned().map(i64::from).collect(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::Int64Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Signed(
- proto::ValueRestrictionInt {
- allowed_values: vec.clone(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::Uint32Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Unsigned(
- proto::ValueRestrictionUint {
- allowed_values: vec.iter().cloned().map(u64::from).collect(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::Uint64Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Unsigned(
- proto::ValueRestrictionUint {
- allowed_values: vec.clone(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::FloatArray(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::FloatingPoint(
- proto::ValueRestrictionFloat {
- allowed_values: vec.iter().cloned().map(f64::from).collect(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::DoubleArray(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::FloatingPoint(
- proto::ValueRestrictionFloat {
- allowed_values: vec.clone(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- _ => None,
- },
- None => None,
+ debug!("Datatype {:?} to be handled", entry.metadata().data_type);
+ match entry.metadata().data_type {
+ DataType::String | DataType::StringArray => {
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(broker::DataValue::StringArray(vec)) => vec.clone(),
+ _ => Vec::new(),
+ };
+
+ if !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::String(
+ proto::ValueRestrictionString {
+ allowed_values: allowed,
+ },
+ )),
+ });
+ };
+ }
+ DataType::Int8
+ | DataType::Int16
+ | DataType::Int32
+ | DataType::Int64
+ | DataType::Int8Array
+ | DataType::Int16Array
+ | DataType::Int32Array
+ | DataType::Int64Array => {
+ let min_value = match entry.metadata().min {
+ Some(DataValue::Int32(value)) => Some(i64::from(value)),
+ Some(DataValue::Int64(value)) => Some(value),
+ _ => None,
+ };
+ let max_value = match entry.metadata().max {
+ Some(DataValue::Int32(value)) => Some(i64::from(value)),
+ Some(DataValue::Int64(value)) => Some(value),
+ _ => None,
+ };
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(allowed) => match allowed {
+ broker::DataValue::Int32Array(vec) => {
+ vec.iter().cloned().map(i64::from).collect()
+ }
+ broker::DataValue::Int64Array(vec) => vec.to_vec(),
+ _ => Vec::new(),
+ },
+ _ => Vec::new(),
+ };
+
+ if min_value.is_some() | max_value.is_some() | !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::Signed(
+ proto::ValueRestrictionInt {
+ allowed_values: allowed,
+ min: min_value,
+ max: max_value,
+ },
+ )),
+ });
+ };
+ }
+ DataType::Uint8
+ | DataType::Uint16
+ | DataType::Uint32
+ | DataType::Uint64
+ | DataType::Uint8Array
+ | DataType::Uint16Array
+ | DataType::Uint32Array
+ | DataType::Uint64Array => {
+ let min_value = match entry.metadata().min {
+ Some(DataValue::Uint32(value)) => Some(u64::from(value)),
+ Some(DataValue::Uint64(value)) => Some(value),
+ _ => None,
+ };
+ let max_value = match entry.metadata().max {
+ Some(DataValue::Uint32(value)) => Some(u64::from(value)),
+ Some(DataValue::Uint64(value)) => Some(value),
+ _ => None,
+ };
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(allowed) => match allowed {
+ broker::DataValue::Uint32Array(vec) => {
+ vec.iter().cloned().map(u64::from).collect()
+ }
+ broker::DataValue::Uint64Array(vec) => vec.to_vec(),
+ _ => Vec::new(),
+ },
+ _ => Vec::new(),
+ };
+
+ if min_value.is_some() | max_value.is_some() | !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::Unsigned(
+ proto::ValueRestrictionUint {
+ allowed_values: allowed,
+ min: min_value,
+ max: max_value,
+ },
+ )),
+ });
+ };
+ }
+ DataType::Float
+ | DataType::Double
+ | DataType::FloatArray
+ | DataType::DoubleArray => {
+ let min_value = match entry.metadata().min {
+ Some(DataValue::Float(value)) => Some(f64::from(value)),
+ Some(DataValue::Double(value)) => Some(value),
+ _ => None,
+ };
+ let max_value = match entry.metadata().max {
+ Some(DataValue::Float(value)) => Some(f64::from(value)),
+ Some(DataValue::Double(value)) => Some(value),
+ _ => None,
+ };
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(allowed) => match allowed {
+ broker::DataValue::FloatArray(vec) => {
+ vec.iter().cloned().map(f64::from).collect()
+ }
+ broker::DataValue::DoubleArray(vec) => vec.to_vec(),
+ _ => Vec::new(),
+ },
+ _ => Vec::new(),
+ };
+
+ if min_value.is_some() | max_value.is_some() | !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::FloatingPoint(
+ proto::ValueRestrictionFloat {
+ allowed_values: allowed,
+ min: min_value,
+ max: max_value,
+ },
+ )),
+ });
+ };
+ }
+
+ _ => {
+ debug!("Datatype {:?} not yet handled", entry.metadata().data_type);
+ }
}
}
if all || fields.contains(&proto::Field::MetadataActuator) {
@@ -984,6 +1076,8 @@ impl broker::EntryUpdate {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
}
}
@@ -1007,6 +1101,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1062,6 +1158,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
Some("km/h".to_owned()),
)
@@ -1158,6 +1256,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1171,6 +1271,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test branch datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1219,6 +1321,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
diff --git a/databroker/src/grpc/kuksa_val_v2/conversions.rs b/databroker/src/grpc/kuksa_val_v2/conversions.rs
new file mode 100644
index 00000000..e632e4f7
--- /dev/null
+++ b/databroker/src/grpc/kuksa_val_v2/conversions.rs
@@ -0,0 +1,538 @@
+// /********************************************************************************
+// * Copyright (c) 2024 Contributors to the Eclipse Foundation
+// *
+// * See the NOTICE file(s) distributed with this work for additional
+// * information regarding copyright ownership.
+// *
+// * This program and the accompanying materials are made available under the
+// * terms of the Apache License 2.0 which is available at
+// * http://www.apache.org/licenses/LICENSE-2.0
+// *
+// * SPDX-License-Identifier: Apache-2.0
+// ********************************************************************************/
+use crate::broker;
+use crate::types::DataValue;
+use databroker_proto::kuksa::val::v2 as proto;
+use kuksa::proto::v2::{
+ BoolArray, DoubleArray, FloatArray, Int32Array, Int64Array, StringArray, Uint32Array,
+ Uint64Array,
+};
+
+use std::time::SystemTime;
+use tracing::debug;
+
+impl From<&proto::Datapoint> for broker::Datapoint {
+ fn from(datapoint: &proto::Datapoint) -> Self {
+ let value = broker::DataValue::from(datapoint);
+ let ts = SystemTime::now();
+
+ match &datapoint.timestamp {
+ Some(source_timestamp) => {
+ let source: Option = match source_timestamp.clone().try_into() {
+ Ok(source) => Some(source),
+ Err(_) => None,
+ };
+ broker::Datapoint {
+ ts,
+ source_ts: source,
+ value,
+ }
+ }
+ None => broker::Datapoint {
+ ts,
+ source_ts: None,
+ value,
+ },
+ }
+ }
+}
+
+impl From for Option {
+ fn from(from: broker::Datapoint) -> Self {
+ match from.value {
+ broker::DataValue::NotAvailable => Some(proto::Datapoint {
+ value: None,
+ timestamp: Some(from.ts.into()),
+ }),
+ broker::DataValue::Bool(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(value)),
+ }),
+ }),
+ broker::DataValue::String(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::String(value)),
+ }),
+ }),
+ broker::DataValue::Int32(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(value)),
+ }),
+ }),
+ broker::DataValue::Int64(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64(value)),
+ }),
+ }),
+ broker::DataValue::Uint32(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(value)),
+ }),
+ }),
+ broker::DataValue::Uint64(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64(value)),
+ }),
+ }),
+ broker::DataValue::Float(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Float(value)),
+ }),
+ }),
+ broker::DataValue::Double(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Double(value)),
+ }),
+ }),
+ broker::DataValue::BoolArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::BoolArray(proto::BoolArray {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::StringArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::StringArray(proto::StringArray {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Int32Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32Array(proto::Int32Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Int64Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64Array(proto::Int64Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Uint32Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32Array(proto::Uint32Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Uint64Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64Array(proto::Uint64Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::FloatArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::FloatArray(proto::FloatArray {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::DoubleArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::DoubleArray(proto::DoubleArray {
+ values,
+ })),
+ }),
+ }),
+ }
+ }
+}
+
+impl From<&proto::Datapoint> for broker::DataValue {
+ fn from(datapoint: &proto::Datapoint) -> Self {
+ match &datapoint.value {
+ Some(value) => match &value.typed_value {
+ Some(proto::value::TypedValue::String(value)) => {
+ broker::DataValue::String(value.to_owned())
+ }
+ Some(proto::value::TypedValue::Bool(value)) => broker::DataValue::Bool(*value),
+ Some(proto::value::TypedValue::Int32(value)) => broker::DataValue::Int32(*value),
+ Some(proto::value::TypedValue::Int64(value)) => broker::DataValue::Int64(*value),
+ Some(proto::value::TypedValue::Uint32(value)) => broker::DataValue::Uint32(*value),
+ Some(proto::value::TypedValue::Uint64(value)) => broker::DataValue::Uint64(*value),
+ Some(proto::value::TypedValue::Float(value)) => broker::DataValue::Float(*value),
+ Some(proto::value::TypedValue::Double(value)) => broker::DataValue::Double(*value),
+ Some(proto::value::TypedValue::StringArray(array)) => {
+ broker::DataValue::StringArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::BoolArray(array)) => {
+ broker::DataValue::BoolArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int32Array(array)) => {
+ broker::DataValue::Int32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int64Array(array)) => {
+ broker::DataValue::Int64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint32Array(array)) => {
+ broker::DataValue::Uint32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint64Array(array)) => {
+ broker::DataValue::Uint64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::FloatArray(array)) => {
+ broker::DataValue::FloatArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::DoubleArray(array)) => {
+ broker::DataValue::DoubleArray(array.values.clone())
+ }
+ None => broker::DataValue::NotAvailable,
+ },
+ None => broker::DataValue::NotAvailable,
+ }
+ }
+}
+
+impl From<&broker::Metadata> for proto::Metadata {
+ fn from(metadata: &broker::Metadata) -> Self {
+ proto::Metadata {
+ id: metadata.id,
+ data_type: proto::DataType::from(metadata.data_type.clone()) as i32,
+ entry_type: proto::EntryType::from(metadata.entry_type.clone()) as i32,
+ description: metadata.description.clone(),
+ comment: String::new(),
+ deprecation: String::new(),
+ unit: metadata.unit.clone().unwrap_or_default(),
+ allowed_values: transform_allowed(&metadata.allowed),
+ min: transform_min_max(&metadata.min),
+ max: transform_min_max(&metadata.max),
+ }
+ }
+}
+
+fn transform_allowed(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ DataValue::BoolArray(_) => Some(proto::Value::from(value.clone())),
+ DataValue::StringArray(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int32Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int64Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint32Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint64Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::FloatArray(_) => Some(proto::Value::from(value.clone())),
+ DataValue::DoubleArray(_) => Some(proto::Value::from(value.clone())),
+ _ => {
+ debug!("Wrong datatype used for allowed values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
+fn transform_min_max(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ DataValue::Bool(_) => Some(proto::Value::from(value.clone())),
+ DataValue::String(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int32(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int64(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint32(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint64(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Float(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Double(_) => Some(proto::Value::from(value.clone())),
+ _ => {
+ debug!("Wrong datatype used for min/max values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
+impl From<&broker::UpdateError> for proto::Error {
+ fn from(update_error: &broker::UpdateError) -> Self {
+ match update_error {
+ broker::UpdateError::NotFound => proto::Error {
+ code: proto::ErrorCode::NotFound.into(),
+ message: "Not Found".to_string(),
+ },
+ broker::UpdateError::WrongType => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Wrong Type".to_string(),
+ },
+ broker::UpdateError::OutOfBoundsAllowed => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Out of Bounds Allowed".to_string(),
+ },
+ broker::UpdateError::OutOfBoundsMinMax => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Out of Bounds MinMax".to_string(),
+ },
+ broker::UpdateError::OutOfBoundsType => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Out of Bounds Type".to_string(),
+ },
+ broker::UpdateError::UnsupportedType => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Unsupported Type".to_string(),
+ },
+ broker::UpdateError::PermissionDenied => proto::Error {
+ code: proto::ErrorCode::PermissionDenied.into(),
+ message: "Permission Denied".to_string(),
+ },
+ broker::UpdateError::PermissionExpired => proto::Error {
+ code: proto::ErrorCode::PermissionDenied.into(),
+ message: "Permission Expired".to_string(),
+ },
+ }
+ }
+}
+
+impl From for proto::DataType {
+ fn from(from: broker::DataType) -> Self {
+ match from {
+ broker::DataType::String => proto::DataType::String,
+ broker::DataType::Bool => proto::DataType::Boolean,
+ broker::DataType::Int8 => proto::DataType::Int8,
+ broker::DataType::Int16 => proto::DataType::Int16,
+ broker::DataType::Int32 => proto::DataType::Int32,
+ broker::DataType::Int64 => proto::DataType::Int64,
+ broker::DataType::Uint8 => proto::DataType::Uint8,
+ broker::DataType::Uint16 => proto::DataType::Uint16,
+ broker::DataType::Uint32 => proto::DataType::Uint32,
+ broker::DataType::Uint64 => proto::DataType::Uint64,
+ broker::DataType::Float => proto::DataType::Float,
+ broker::DataType::Double => proto::DataType::Double,
+ broker::DataType::StringArray => proto::DataType::StringArray,
+ broker::DataType::BoolArray => proto::DataType::BooleanArray,
+ broker::DataType::Int8Array => proto::DataType::Int8Array,
+ broker::DataType::Int16Array => proto::DataType::Int16Array,
+ broker::DataType::Int32Array => proto::DataType::Int32Array,
+ broker::DataType::Int64Array => proto::DataType::Int64Array,
+ broker::DataType::Uint8Array => proto::DataType::Uint8Array,
+ broker::DataType::Uint16Array => proto::DataType::Uint16Array,
+ broker::DataType::Uint32Array => proto::DataType::Uint32Array,
+ broker::DataType::Uint64Array => proto::DataType::Uint64Array,
+ broker::DataType::FloatArray => proto::DataType::FloatArray,
+ broker::DataType::DoubleArray => proto::DataType::DoubleArray,
+ }
+ }
+}
+
+impl From for proto::EntryType {
+ fn from(from: broker::EntryType) -> Self {
+ match from {
+ broker::EntryType::Sensor => proto::EntryType::Sensor,
+ broker::EntryType::Attribute => proto::EntryType::Attribute,
+ broker::EntryType::Actuator => proto::EntryType::Actuator,
+ }
+ }
+}
+
+impl broker::UpdateError {
+ pub fn to_status_with_code(&self, id: &i32) -> tonic::Status {
+ match self {
+ broker::UpdateError::NotFound => tonic::Status::new(
+ tonic::Code::NotFound,
+ format!("Signal not found (id: {})", id),
+ ),
+ broker::UpdateError::WrongType => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Wrong type provided (id: {})", id),
+ ),
+ broker::UpdateError::OutOfBoundsAllowed => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Value out of allowed bounds (id: {})", id),
+ ),
+ broker::UpdateError::OutOfBoundsMinMax => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Value out of min/max bounds (id: {})", id),
+ ),
+ broker::UpdateError::OutOfBoundsType => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Value out of type bounds (id: {})", id),
+ ),
+ broker::UpdateError::UnsupportedType => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Unsupported type (id: {})", id),
+ ),
+ broker::UpdateError::PermissionDenied => tonic::Status::new(
+ tonic::Code::PermissionDenied,
+ format!("Permission denied (id: {})", id),
+ ),
+ broker::UpdateError::PermissionExpired => tonic::Status::new(
+ tonic::Code::Unauthenticated,
+ format!("Permission expired (id: {})", id),
+ ),
+ }
+ }
+}
+
+impl From for broker::DataValue {
+ fn from(value: proto::Value) -> Self {
+ match &value.typed_value {
+ Some(proto::value::TypedValue::String(value)) => {
+ broker::DataValue::String(value.to_owned())
+ }
+ Some(proto::value::TypedValue::Bool(value)) => broker::DataValue::Bool(*value),
+ Some(proto::value::TypedValue::Int32(value)) => broker::DataValue::Int32(*value),
+ Some(proto::value::TypedValue::Int64(value)) => broker::DataValue::Int64(*value),
+ Some(proto::value::TypedValue::Uint32(value)) => broker::DataValue::Uint32(*value),
+ Some(proto::value::TypedValue::Uint64(value)) => broker::DataValue::Uint64(*value),
+ Some(proto::value::TypedValue::Float(value)) => broker::DataValue::Float(*value),
+ Some(proto::value::TypedValue::Double(value)) => broker::DataValue::Double(*value),
+ Some(proto::value::TypedValue::StringArray(array)) => {
+ broker::DataValue::StringArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::BoolArray(array)) => {
+ broker::DataValue::BoolArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int32Array(array)) => {
+ broker::DataValue::Int32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int64Array(array)) => {
+ broker::DataValue::Int64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint32Array(array)) => {
+ broker::DataValue::Uint32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint64Array(array)) => {
+ broker::DataValue::Uint64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::FloatArray(array)) => {
+ broker::DataValue::FloatArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::DoubleArray(array)) => {
+ broker::DataValue::DoubleArray(array.values.clone())
+ }
+ None => todo!(),
+ }
+ }
+}
+
+impl From for proto::Value {
+ fn from(value: broker::DataValue) -> Self {
+ match &value {
+ broker::DataValue::String(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::String(value.to_owned())),
+ },
+
+ broker::DataValue::Bool(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(*value)),
+ },
+
+ broker::DataValue::Int32(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(*value)),
+ },
+
+ broker::DataValue::Int64(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64(*value)),
+ },
+
+ broker::DataValue::Uint32(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(*value)),
+ },
+
+ broker::DataValue::Uint64(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64(*value)),
+ },
+
+ broker::DataValue::Float(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Float(*value)),
+ },
+
+ broker::DataValue::Double(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Double(*value)),
+ },
+
+ broker::DataValue::StringArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::StringArray(StringArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::BoolArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::BoolArray(BoolArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Int32Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32Array(Int32Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Int64Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64Array(Int64Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Uint32Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32Array(Uint32Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Uint64Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64Array(Uint64Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::FloatArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::FloatArray(FloatArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::DoubleArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::DoubleArray(DoubleArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::NotAvailable => proto::Value { typed_value: None },
+ }
+ }
+}
+
+impl broker::ActuationError {
+ pub fn to_tonic_status(&self, message: String) -> tonic::Status {
+ match self {
+ broker::ActuationError::NotFound => tonic::Status::not_found(message),
+ broker::ActuationError::WrongType => tonic::Status::invalid_argument(message),
+ broker::ActuationError::OutOfBounds => tonic::Status::invalid_argument(message),
+ broker::ActuationError::UnsupportedType => tonic::Status::invalid_argument(message),
+ broker::ActuationError::PermissionDenied => tonic::Status::permission_denied(message),
+ broker::ActuationError::PermissionExpired => tonic::Status::unauthenticated(message),
+ broker::ActuationError::ProviderNotAvailable => tonic::Status::unavailable(message),
+ broker::ActuationError::ProviderAlreadyExists => tonic::Status::already_exists(message),
+ broker::ActuationError::TransmissionFailure => tonic::Status::data_loss(message),
+ }
+ }
+}
diff --git a/databroker/src/grpc/kuksa_val_v2/mod.rs b/databroker/src/grpc/kuksa_val_v2/mod.rs
new file mode 100644
index 00000000..88302b19
--- /dev/null
+++ b/databroker/src/grpc/kuksa_val_v2/mod.rs
@@ -0,0 +1,15 @@
+/********************************************************************************
+* Copyright (c) 2024 Contributors to the Eclipse Foundation
+*
+* See the NOTICE file(s) distributed with this work for additional
+* information regarding copyright ownership.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Apache License 2.0 which is available at
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* SPDX-License-Identifier: Apache-2.0
+********************************************************************************/
+
+mod conversions;
+mod val;
diff --git a/databroker/src/grpc/kuksa_val_v2/val.rs b/databroker/src/grpc/kuksa_val_v2/val.rs
new file mode 100644
index 00000000..ffe1c6a4
--- /dev/null
+++ b/databroker/src/grpc/kuksa_val_v2/val.rs
@@ -0,0 +1,3022 @@
+/********************************************************************************
+* Copyright (c) 2024 Contributors to the Eclipse Foundation
+*
+* See the NOTICE file(s) distributed with this work for additional
+* information regarding copyright ownership.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Apache License 2.0 which is available at
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* SPDX-License-Identifier: Apache-2.0
+********************************************************************************/
+
+use std::{collections::HashMap, pin::Pin};
+
+use crate::{
+ broker::{
+ self, ActuationChange, ActuationProvider, AuthorizedAccess, ReadError, SubscriptionError,
+ },
+ glob::Matcher,
+ permissions::Permissions,
+ types::DataValue,
+};
+
+use databroker_proto::kuksa::val::v2::{
+ self as proto,
+ open_provider_stream_request::Action::{
+ BatchActuateStreamResponse, ProvideActuationRequest, PublishValuesRequest,
+ },
+ open_provider_stream_response, OpenProviderStreamResponse, PublishValuesResponse,
+};
+
+use kuksa::proto::v2::{
+ signal_id, ActuateRequest, ActuateResponse, BatchActuateStreamRequest, ListMetadataResponse,
+ ProvideActuationResponse,
+};
+use std::collections::HashSet;
+use tokio::{select, sync::mpsc};
+use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt};
+use tracing::debug;
+
+const MAX_REQUEST_PATH_LENGTH: usize = 1000;
+
+pub struct Provider {
+ sender: mpsc::Sender>,
+}
+
+#[async_trait::async_trait]
+impl ActuationProvider for Provider {
+ async fn actuate(
+ &self,
+ actuation_changes: Vec,
+ ) -> Result<(), (broker::ActuationError, String)> {
+ let mut actuation_requests: Vec = vec![];
+ for actuation_change in actuation_changes {
+ let data_value = actuation_change.data_value;
+ actuation_requests.push(ActuateRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(signal_id::Signal::Id(actuation_change.id)),
+ }),
+ value: Some(proto::Value::from(data_value)),
+ });
+ }
+
+ let batch_actuate_stream_request =
+ open_provider_stream_response::Action::BatchActuateStreamRequest(
+ BatchActuateStreamRequest {
+ actuate_requests: actuation_requests,
+ },
+ );
+
+ let response = OpenProviderStreamResponse {
+ action: Some(batch_actuate_stream_request),
+ };
+
+ let result = self.sender.send(Ok(response)).await;
+ if result.is_err() {
+ return Err((
+ broker::ActuationError::TransmissionFailure,
+ "An error occured while sending the data".to_string(),
+ ));
+ }
+ return Ok(());
+ }
+
+ fn is_available(&self) -> bool {
+ !self.sender.is_closed()
+ }
+}
+
+#[tonic::async_trait]
+impl proto::val_server::Val for broker::DataBroker {
+ // Returns (GRPC error code):
+ // NOT_FOUND if the requested signal doesn't exist
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied
+ //
+ async fn get_value(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let request = request.into_inner();
+
+ let signal_id = match get_signal(request.signal_id, &broker).await {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ };
+
+ let datapoint = match broker.get_datapoint(signal_id).await {
+ Ok(datapoint) => datapoint,
+ Err(ReadError::NotFound) => return Err(tonic::Status::not_found("Path not found")),
+ Err(ReadError::PermissionDenied) => {
+ return Err(tonic::Status::permission_denied("Permission denied"))
+ }
+ Err(ReadError::PermissionExpired) => {
+ return Err(tonic::Status::unauthenticated("Permission expired"))
+ }
+ };
+
+ Ok(tonic::Response::new(proto::GetValueResponse {
+ data_point: datapoint.into(),
+ }))
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the requested signals doesn't exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the requested signals.
+ //
+ async fn get_values(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let requested = request.into_inner().signal_ids;
+ let mut response_datapoints = Vec::new();
+
+ for request in requested {
+ let signal_id = match get_signal(Some(request), &broker).await {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ };
+
+ match broker.get_datapoint(signal_id).await {
+ Ok(datapoint) => {
+ let proto_datapoint_opt: Option = datapoint.into();
+ //let proto_datapoint: proto::Datapoint = proto_datapoint_opt.into();
+ response_datapoints.push(proto_datapoint_opt.unwrap());
+ }
+ Err(ReadError::NotFound) => {
+ return Err(tonic::Status::not_found(format!(
+ "Path not found (id: {})",
+ signal_id
+ )));
+ }
+ Err(ReadError::PermissionDenied) => {
+ return Err(tonic::Status::permission_denied(format!(
+ "Permission denied(id: {})",
+ signal_id
+ )))
+ }
+ Err(ReadError::PermissionExpired) => {
+ return Err(tonic::Status::unauthenticated(format!(
+ "Permission expired (id: {})",
+ signal_id
+ )))
+ }
+ };
+ }
+
+ Ok(tonic::Response::new(proto::GetValuesResponse {
+ data_points: response_datapoints,
+ }))
+ }
+
+ type SubscribeStream = Pin<
+ Box<
+ dyn Stream- >
+ + Send
+ + Sync
+ + 'static,
+ >,
+ >;
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ //
+ async fn subscribe(
+ &self,
+ request: tonic::Request
,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let request = request.into_inner();
+
+ let signal_paths = request.signal_paths;
+ let size = signal_paths.len();
+
+ let mut valid_requests: HashMap> = HashMap::with_capacity(size);
+
+ for path in signal_paths {
+ valid_requests.insert(
+ match get_signal(
+ Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(path)),
+ }),
+ &broker,
+ )
+ .await
+ {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ },
+ vec![broker::Field::Datapoint].into_iter().collect(),
+ );
+ }
+
+ match broker.subscribe(valid_requests).await {
+ Ok(stream) => {
+ let stream = convert_to_proto_stream(stream, size);
+ Ok(tonic::Response::new(Box::pin(stream)))
+ }
+ Err(SubscriptionError::NotFound) => Err(tonic::Status::not_found("Path not found")),
+ Err(SubscriptionError::InvalidInput) => {
+ Err(tonic::Status::invalid_argument("Invalid Argument"))
+ }
+ Err(SubscriptionError::InternalError) => Err(tonic::Status::internal("Internal Error")),
+ }
+ }
+
+ type SubscribeByIdStream = Pin<
+ Box<
+ dyn Stream- >
+ + Send
+ + Sync
+ + 'static,
+ >,
+ >;
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT if the request is empty
+ //
+ async fn subscribe_by_id(
+ &self,
+ request: tonic::Request
,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let request = request.into_inner();
+
+ let signal_ids = request.signal_ids;
+ let size = signal_ids.len();
+
+ let mut valid_requests: HashMap> = HashMap::with_capacity(size);
+
+ for id in signal_ids {
+ valid_requests.insert(
+ match get_signal(
+ Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(id)),
+ }),
+ &broker,
+ )
+ .await
+ {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ },
+ vec![broker::Field::Datapoint].into_iter().collect(),
+ );
+ }
+
+ match broker.subscribe(valid_requests).await {
+ Ok(stream) => {
+ let stream = convert_to_proto_stream_id(stream, size);
+ Ok(tonic::Response::new(Box::pin(stream)))
+ }
+ Err(SubscriptionError::NotFound) => {
+ Err(tonic::Status::new(tonic::Code::NotFound, "Path not found"))
+ }
+ Err(SubscriptionError::InvalidInput) => Err(tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ "Invalid Argument",
+ )),
+ Err(SubscriptionError::InternalError) => {
+ Err(tonic::Status::new(tonic::Code::Internal, "Internal Error"))
+ }
+ }
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if the actuator does not exist.
+ // PERMISSION_DENIED if access is denied for the actuator.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing the actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if the provided path is not an actuator.
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the provided value is out of the min/max range specified
+ //
+ async fn actuate(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = request
+ .extensions()
+ .get::()
+ .ok_or(tonic::Status::unauthenticated("Unauthenticated"))?
+ .clone();
+ let broker = self.authorized_access(&permissions);
+
+ let actuator_request = request.into_inner();
+ let value = actuator_request
+ .value
+ .ok_or(tonic::Status::invalid_argument("No value provided"))?;
+
+ let signal = actuator_request
+ .signal_id
+ .ok_or(tonic::Status::invalid_argument("No signal_id provided"))?
+ .signal;
+
+ match &signal {
+ Some(proto::signal_id::Signal::Path(path)) => {
+ let id = broker
+ .get_id_by_path(path)
+ .await
+ .ok_or(tonic::Status::not_found(format!(
+ "Invalid path in signal_id provided {}",
+ path
+ )))?;
+
+ match broker.actuate(&id, &DataValue::from(value)).await {
+ Ok(()) => Ok(tonic::Response::new(ActuateResponse {})),
+ Err(error) => Err(error.0.to_tonic_status(error.1)),
+ }
+ }
+ Some(proto::signal_id::Signal::Id(id)) => {
+ match broker.actuate(id, &DataValue::from(value)).await {
+ Ok(()) => Ok(tonic::Response::new(ActuateResponse {})),
+ Err(error) => Err(error.0.to_tonic_status(error.1)),
+ }
+ }
+ None => Err(tonic::Status::invalid_argument(
+ "SignalID contains neither path or id",
+ )),
+ }
+ }
+
+ // Actuate simultaneously multiple actuators.
+ // If any error occurs, the entire operation will be aborted
+ // and no single actuator value will be forwarded to the provider.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the actuators are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the actuators.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing an actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if any of the provided actuators values are out of the min/max range specified
+ //
+ async fn batch_actuate(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+ let broker = self.authorized_access(&permissions);
+ let actuate_requests = request.into_inner().actuate_requests;
+
+ let mut actuation_changes: Vec = vec![];
+ for actuate_request in actuate_requests {
+ let vss_id = match actuate_request.signal_id {
+ Some(signal_id) => match signal_id.signal {
+ Some(proto::signal_id::Signal::Id(vss_id)) => vss_id,
+ Some(proto::signal_id::Signal::Path(vss_path)) => {
+ let result = broker.get_id_by_path(&vss_path).await;
+ match result {
+ Some(vss_id) => vss_id,
+ None => {
+ let message =
+ format!("Could not resolve vss_id for path: {}", vss_path);
+ return Err(tonic::Status::not_found(message));
+ }
+ }
+ }
+ None => return Err(tonic::Status::invalid_argument("Signal not provided")),
+ },
+ None => return Err(tonic::Status::invalid_argument("Signal_Id not provided")),
+ };
+ let data_value = match actuate_request.value {
+ Some(data_value) => DataValue::from(data_value),
+ None => return Err(tonic::Status::invalid_argument("")),
+ };
+ let actuation_change = ActuationChange {
+ id: vss_id,
+ data_value,
+ };
+ actuation_changes.push(actuation_change);
+ }
+
+ let result = broker.batch_actuate(actuation_changes).await;
+ match result {
+ Ok(_) => Ok(tonic::Response::new(proto::BatchActuateResponse {})),
+ Err(error) => return Err(error.0.to_tonic_status(error.1)),
+ }
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if the specified root branch does not exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT if the provided path or wildcard is wrong.
+ //
+ async fn list_metadata(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+ let broker = self.authorized_access(&permissions);
+
+ let metadata_request = request.into_inner();
+
+ match Matcher::new(&metadata_request.root) {
+ Ok(matcher) => {
+ let mut metadata_response = Vec::new();
+ broker
+ .for_each_entry(|entry| {
+ let entry_metadata = &entry.metadata();
+ if matcher.is_match(&entry_metadata.glob_path) {
+ metadata_response.push(proto::Metadata::from(*entry_metadata));
+ }
+ })
+ .await;
+ if metadata_response.is_empty() {
+ Err(tonic::Status::not_found(
+ "Specified root branch does not exist",
+ ))
+ } else {
+ Ok(tonic::Response::new(ListMetadataResponse {
+ metadata: metadata_response,
+ }))
+ }
+ }
+ Err(_) => Err(tonic::Status::invalid_argument("Invalid Pattern Argument")),
+ }
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ async fn publish_value(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let request = request.into_inner();
+
+ let mut updates: HashMap = HashMap::with_capacity(1);
+
+ updates.insert(
+ match get_signal(request.signal_id, &broker).await {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ },
+ broker::EntryUpdate {
+ path: None,
+ datapoint: Some(broker::Datapoint::from(&request.data_point.unwrap())),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ max: None,
+ min: None,
+ unit: None,
+ },
+ );
+
+ match broker.update_entries(updates).await {
+ Ok(()) => Ok(tonic::Response::new(proto::PublishValueResponse {})),
+ Err(errors) => {
+ if errors.is_empty() {
+ Ok(tonic::Response::new(proto::PublishValueResponse {}))
+ } else if let Some((id, err)) = errors.first() {
+ Err(err.to_status_with_code(id))
+ } else {
+ Err(tonic::Status::internal(
+ "There is no error provided for the entry",
+ ))
+ }
+ }
+ }
+ }
+
+ type OpenProviderStreamStream =
+ ReceiverStream>;
+
+ // Errors:
+ // - Provider sends ProvideActuationRequest -> Databroker returns ProvideActuationResponse
+ // Returns (GRPC error code) and closes the stream call (strict case).
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // ALREADY_EXISTS if a provider already claimed the ownership of an actuator
+ //
+ // - Provider sends PublishValuesRequest -> Databroker returns PublishValuesResponse
+ // GRPC errors are returned as messages in the stream
+ // response with the signal id `map status = 2;` (permissive case)
+ // NOT_FOUND if a signal is non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for a signal.
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ // - Provider returns BatchActuateStreamResponse <- Databroker sends BatchActuateStreamRequest
+ // No error definition, a BatchActuateStreamResponse is expected from provider.
+ //
+ async fn open_provider_stream(
+ &self,
+ request: tonic::Request>,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let mut stream = request.into_inner();
+
+ let mut shutdown_trigger = self.get_shutdown_trigger();
+
+ // Copy (to move into task below)
+ let broker = self.clone();
+ // Create stream (to be returned)
+ let (response_stream_sender, response_stream_receiver) = mpsc::channel(10);
+
+ // Listening on stream
+ tokio::spawn(async move {
+ let permissions = permissions;
+ let broker = broker.authorized_access(&permissions);
+ loop {
+ select! {
+ message = stream.message() => {
+ match message {
+ Ok(request) => {
+ match request {
+ Some(req) => {
+ match req.action {
+ Some(ProvideActuationRequest(provided_actuation)) => {
+ let response = provide_actuation(&broker, &provided_actuation, response_stream_sender.clone()).await;
+ if let Err(err) = response_stream_sender.send(response).await
+ {
+ debug!("Failed to send response: {}", err)
+ }
+ },
+ Some(PublishValuesRequest(publish_values_request)) => {
+ let response = publish_values(&broker, &publish_values_request).await;
+ if let Some(value) = response {
+ if let Err(err) = response_stream_sender.send(Ok(value)).await {
+ debug!("Failed to send error response: {}", err);
+ }
+ }
+ },
+ Some(BatchActuateStreamResponse(_batch_actuate_stream_response)) => {
+ // TODO discuss and implement
+ },
+ None => {
+
+ },
+ }
+ },
+ None => {
+ debug!("provider: no more messages");
+ break;
+ }
+ }
+ },
+ Err(err) => {
+ debug!("provider: connection broken: {:?}", err);
+ break;
+ },
+ }
+ },
+ _ = shutdown_trigger.recv() => {
+ debug!("provider: shutdown received");
+ break;
+ }
+ }
+ }
+ });
+
+ Ok(tonic::Response::new(ReceiverStream::new(
+ response_stream_receiver,
+ )))
+ }
+
+ async fn get_server_info(
+ &self,
+ _request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ let server_info = proto::GetServerInfoResponse {
+ name: "databroker".to_owned(),
+ version: self.get_version().to_owned(),
+ commit_hash: self.get_commit_sha().to_owned(),
+ };
+ Ok(tonic::Response::new(server_info))
+ }
+}
+
+async fn provide_actuation(
+ broker: &AuthorizedAccess<'_, '_>,
+ request: &databroker_proto::kuksa::val::v2::ProvideActuationRequest,
+ sender: mpsc::Sender>,
+) -> Result {
+ let vss_paths: Vec<_> = request
+ .actuator_identifiers
+ .iter()
+ .filter_map(|signal_id| match &signal_id.signal {
+ Some(proto::signal_id::Signal::Path(path)) => Some(path.clone()),
+ _ => None,
+ })
+ .collect();
+
+ let future_vss_ids = vss_paths
+ .iter()
+ .map(|vss_path| broker.get_id_by_path(vss_path));
+ let resolved_opt_vss_ids = futures::future::join_all(future_vss_ids).await;
+
+ for (index, opt_vss_id) in resolved_opt_vss_ids.iter().enumerate() {
+ if opt_vss_id.is_none() {
+ let message = format!(
+ "Could not resolve id of vss_path: {}",
+ vss_paths.get(index).unwrap()
+ );
+ return Err(tonic::Status::not_found(message));
+ }
+ }
+
+ let resolved_vss_ids: Vec = resolved_opt_vss_ids.iter().filter_map(|&opt| opt).collect();
+
+ let vss_ids: Vec<_> = request
+ .actuator_identifiers
+ .iter()
+ .filter_map(|signal_id| match &signal_id.signal {
+ Some(proto::signal_id::Signal::Id(id)) => Some(*id),
+ _ => None,
+ })
+ .collect();
+
+ let mut all_vss_ids = vec![];
+ all_vss_ids.extend(vss_ids);
+ all_vss_ids.extend(resolved_vss_ids);
+
+ let provider = Provider { sender };
+
+ match broker
+ .provide_actuation(all_vss_ids, Box::new(provider))
+ .await
+ {
+ Ok(_) => {
+ let provide_actuation_response = ProvideActuationResponse {};
+
+ let response = OpenProviderStreamResponse {
+ action: Some(
+ open_provider_stream_response::Action::ProvideActuationResponse(
+ provide_actuation_response,
+ ),
+ ),
+ };
+
+ Ok(response)
+ }
+
+ Err(error) => Err(error.0.to_tonic_status(error.1)),
+ }
+}
+
+async fn publish_values(
+ broker: &AuthorizedAccess<'_, '_>,
+ request: &databroker_proto::kuksa::val::v2::PublishValuesRequest,
+) -> Option {
+ let ids: Vec<(i32, broker::EntryUpdate)> = request
+ .datapoints
+ .iter()
+ .map(|(id, datapoint)| {
+ (
+ *id,
+ broker::EntryUpdate {
+ path: None,
+ datapoint: Some(broker::Datapoint::from(datapoint)),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )
+ })
+ .collect();
+
+ // TODO check if provider is allowed to update the entries for the provided signals?
+ match broker.update_entries(ids).await {
+ Ok(_) => None,
+ Err(err) => Some(OpenProviderStreamResponse {
+ action: Some(
+ open_provider_stream_response::Action::PublishValuesResponse(
+ PublishValuesResponse {
+ request_id: request.request_id,
+ status: err
+ .iter()
+ .map(|(id, error)| (*id, proto::Error::from(error)))
+ .collect(),
+ },
+ ),
+ ),
+ }),
+ }
+}
+
+async fn get_signal(
+ signal_id: Option,
+ broker: &AuthorizedAccess<'_, '_>,
+) -> Result {
+ if let Some(signal) = signal_id.unwrap().signal {
+ match signal {
+ proto::signal_id::Signal::Path(path) => {
+ if path.len() > MAX_REQUEST_PATH_LENGTH {
+ return Err(tonic::Status::invalid_argument(
+ "The provided path is too long",
+ ));
+ }
+ match broker.get_id_by_path(&path).await {
+ Some(id) => Ok(id),
+ None => Err(tonic::Status::not_found("Path not found")),
+ }
+ }
+ proto::signal_id::Signal::Id(id) => match broker.get_metadata(id).await {
+ Some(_metadata) => Ok(id),
+ None => Err(tonic::Status::not_found("Path not found")),
+ },
+ }
+ } else {
+ Err(tonic::Status::invalid_argument("No SignalId provided"))
+ }
+}
+
+fn convert_to_proto_stream(
+ input: impl Stream- ,
+ size: usize,
+) -> impl Stream
- > {
+ input.map(move |item| {
+ let mut entries: HashMap
= HashMap::with_capacity(size);
+ for update in item.updates {
+ let update_datapoint: Option = match update.update.datapoint {
+ Some(datapoint) => datapoint.into(),
+ None => None,
+ };
+ if let Some(dp) = update_datapoint {
+ entries.insert(
+ update
+ .update
+ .path
+ .expect("Something wrong with update path of subscriptions!"),
+ dp,
+ );
+ }
+ }
+ let response = proto::SubscribeResponse { entries };
+ Ok(response)
+ })
+}
+
+fn convert_to_proto_stream_id(
+ input: impl Stream- ,
+ size: usize,
+) -> impl Stream
- > {
+ input.map(move |item| {
+ let mut entries: HashMap
= HashMap::with_capacity(size);
+ for update in item.updates {
+ let update_datapoint: Option = match update.update.datapoint {
+ Some(datapoint) => datapoint.into(),
+ None => None,
+ };
+ if let Some(dp) = update_datapoint {
+ entries.insert(update.id, dp);
+ }
+ }
+ let response = proto::SubscribeByIdResponse { entries };
+ Ok(response)
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{broker::DataBroker, permissions};
+ use databroker_proto::kuksa::val::v2::val_server::Val;
+ use proto::open_provider_stream_response::Action::{
+ BatchActuateStreamRequest, ProvideActuationResponse, PublishValuesResponse,
+ };
+ use proto::{
+ open_provider_stream_request, BatchActuateRequest, OpenProviderStreamRequest,
+ PublishValuesRequest, SignalId, Value,
+ };
+
+ #[tokio::test]
+ async fn test_get_value_id_ok() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let entry_id = broker::tests::helper_add_int32(&broker, "test.datapoint1", -64, timestamp)
+ .await
+ .expect("Shall succeed");
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let get_response = response.into_inner();
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-64)),
+ };
+ assert_eq!(
+ get_response,
+ proto::GetValueResponse {
+ data_point: {
+ Some(proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ })
+ },
+ }
+ );
+ }
+ Err(status) => {
+ panic!("Get failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_name_ok() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let _entry_id = broker::tests::helper_add_int32(&broker, "test.datapoint1", -64, timestamp)
+ .await
+ .expect("Shall succeed");
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "test.datapoint1".to_string(),
+ )),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let get_response = response.into_inner();
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-64)),
+ };
+ assert_eq!(
+ get_response,
+ proto::GetValueResponse {
+ data_point: {
+ Some(proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ })
+ },
+ }
+ );
+ }
+ Err(status) => {
+ panic!("Get failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_id_not_authorized() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let entry_id = broker::tests::helper_add_int32(&broker, "test.datapoint1", -64, timestamp)
+ .await
+ .expect("Shall succeed");
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Do not insert permissions
+ let get_value_request = tonic::Request::new(request);
+
+ match broker.get_value(get_value_request).await {
+ Ok(_response) => {
+ panic!("Did not expect success");
+ }
+ Err(status) => {
+ assert_eq!(status.code(), tonic::Code::Unauthenticated)
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_id_no_value() {
+ // Define signal but do not assign any value
+
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_string(),
+ broker::DataType::Int32,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Some Description hat Does Not Matter".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ // Now try to get it
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let get_response = response.into_inner();
+
+ // As of today Databroker assigns "Now" when registering a Datapoint so if there is no value
+ // we do not know exact time. For now just checking that it is not None
+ assert_eq!(get_response.data_point.clone().unwrap().value, None);
+ assert_ne!(get_response.data_point.unwrap().timestamp, None);
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ panic!("Get failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_id_not_defined() {
+ let broker = DataBroker::default();
+ // Just use some arbitrary number
+ let entry_id: i32 = 12345;
+
+ // Now try to get it
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(_response) => {
+ panic!("Did not expect success");
+ }
+ Err(status) => {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_name_not_defined() {
+ let broker = DataBroker::default();
+
+ // Now try to get it
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "test.datapoint1".to_string(),
+ )),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(_response) => {
+ panic!("Did not expect success");
+ }
+ Err(status) => {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ }
+ }
+ }
+
+ struct GetValuesConfig {
+ send_auth: bool,
+ request_first: bool,
+ use_name_for_first: bool,
+ first_exist: bool,
+ auth_first: bool,
+ request_second: bool,
+ use_name_for_second: bool,
+ second_exist: bool,
+ auth_second: bool,
+ }
+
+ struct GetValuesConfigBuilder {
+ send_auth: bool,
+ request_first: bool,
+ use_name_for_first: bool,
+ first_exist: bool,
+ auth_first: bool,
+ request_second: bool,
+ use_name_for_second: bool,
+ second_exist: bool,
+ auth_second: bool,
+ }
+
+ impl GetValuesConfigBuilder {
+ fn new() -> GetValuesConfigBuilder {
+ GetValuesConfigBuilder {
+ send_auth: false,
+ request_first: false,
+ use_name_for_first: false,
+ first_exist: false,
+ auth_first: false,
+ request_second: false,
+ use_name_for_second: false,
+ second_exist: false,
+ auth_second: false,
+ }
+ }
+
+ // Request credentials to be sent.
+ // Do not need to be explcitly requested if auth_first/auth_second is used
+ fn send_auth(&mut self) -> &mut Self {
+ self.send_auth = true;
+ self
+ }
+
+ fn request_first(&mut self) -> &mut Self {
+ self.request_first = true;
+ self
+ }
+
+ fn use_name_for_first(&mut self) -> &mut Self {
+ self.use_name_for_first = true;
+ self
+ }
+
+ fn first_exist(&mut self) -> &mut Self {
+ self.first_exist = true;
+ self
+ }
+
+ // Request credentials and include credentials for signal 1
+ fn auth_first(&mut self) -> &mut Self {
+ self.auth_first = true;
+ self.send_auth = true;
+ self
+ }
+
+ fn request_second(&mut self) -> &mut Self {
+ self.request_second = true;
+ self
+ }
+
+ fn use_name_for_second(&mut self) -> &mut Self {
+ self.use_name_for_second = true;
+ self
+ }
+
+ fn second_exist(&mut self) -> &mut Self {
+ self.second_exist = true;
+ self
+ }
+
+ // Request credentials and include credentials for signal 2
+ fn auth_second(&mut self) -> &mut Self {
+ self.send_auth = true;
+ self.auth_second = true;
+ self
+ }
+
+ fn build(&self) -> GetValuesConfig {
+ GetValuesConfig {
+ send_auth: self.send_auth,
+ request_first: self.request_first,
+ use_name_for_first: self.use_name_for_first,
+ first_exist: self.first_exist,
+ auth_first: self.auth_first,
+ request_second: self.request_second,
+ use_name_for_second: self.use_name_for_second,
+ second_exist: self.second_exist,
+ auth_second: self.auth_second,
+ }
+ }
+ }
+
+ async fn test_get_values_combo(config: GetValuesConfig) {
+ static SIGNAL1: &str = "test.datapoint1";
+ static SIGNAL2: &str = "test.datapoint2";
+
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let mut entry_id = -1;
+ if config.first_exist {
+ entry_id = broker::tests::helper_add_int32(&broker, SIGNAL1, -64, timestamp)
+ .await
+ .expect("Shall succeed");
+ }
+
+ let mut entry_id2 = -1;
+ if config.second_exist {
+ entry_id2 = broker::tests::helper_add_int32(&broker, SIGNAL2, -13, timestamp)
+ .await
+ .expect("Shall succeed");
+ }
+
+ let mut permission_builder = permissions::PermissionBuilder::new();
+
+ if config.auth_first {
+ permission_builder = permission_builder
+ .add_read_permission(permissions::Permission::Glob(SIGNAL1.to_string()));
+ }
+ if config.auth_second {
+ permission_builder = permission_builder
+ .add_read_permission(permissions::Permission::Glob(SIGNAL2.to_string()));
+ }
+ let permissions = permission_builder.build().expect("Oops!");
+
+ // Build the request
+
+ let mut request_signals = Vec::new();
+ if config.request_first {
+ if !config.use_name_for_first {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ });
+ } else {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(SIGNAL1.to_string())),
+ });
+ }
+ }
+ if config.request_second {
+ if !config.use_name_for_second {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id2)),
+ });
+ } else {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(SIGNAL2.to_string())),
+ });
+ }
+ }
+
+ let request = proto::GetValuesRequest {
+ signal_ids: request_signals,
+ };
+
+ let mut tonic_request = tonic::Request::new(request);
+
+ if config.send_auth {
+ tonic_request.extensions_mut().insert(permissions);
+ }
+
+ match broker.get_values(tonic_request).await {
+ Ok(response) => {
+ // Check that we actually expect an Ok answer
+
+ if config.request_first & !config.first_exist {
+ panic!("Should not get Ok as signal test.datapoint1 should not exist")
+ }
+ if config.request_first & !config.auth_first {
+ panic!("Should not get Ok as we do not have permission for signal test.datapoint2 ")
+ }
+ if config.request_second & !config.second_exist {
+ panic!("Should not get Ok as signal test.datapoint1 should not exist")
+ }
+ if config.request_second & !config.auth_second {
+ panic!("Should not get Ok as we do not have permission for signal test.datapoint2 ")
+ }
+
+ let get_response = response.into_inner();
+
+ let mut response_signals = Vec::new();
+
+ if config.request_first {
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-64)),
+ };
+ let datapoint = proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ };
+ response_signals.push(datapoint);
+ }
+ if config.request_second {
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-13)),
+ };
+ let datapoint = proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ };
+ response_signals.push(datapoint);
+ }
+
+ assert_eq!(
+ get_response,
+ proto::GetValuesResponse {
+ data_points: response_signals,
+ }
+ );
+ }
+ Err(status) => {
+ // It can be discussed what has precendce NotFound or Unauthenticated, does not really matter
+ // For now assuming that NotFound has precedence, at least if we have a valid token
+ if !config.send_auth {
+ assert_eq!(status.code(), tonic::Code::Unauthenticated)
+ } else if config.request_first & !config.first_exist {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ } else if config.request_first & !config.auth_first {
+ assert_eq!(status.code(), tonic::Code::PermissionDenied)
+ } else if config.request_second & !config.second_exist {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ } else if config.request_second & !config.auth_second {
+ assert_eq!(status.code(), tonic::Code::PermissionDenied)
+ } else {
+ panic!("GetValues failed with status: {:?}", status);
+ }
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_one_signal_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_path_one_signal_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .use_name_for_first()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_path_two_signals_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .use_name_for_first()
+ .request_second()
+ .use_name_for_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_no_signals_ok() {
+ // Expecting an empty list back
+
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_first_missing() {
+ let config = GetValuesConfigBuilder::new()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_second_missing() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_first_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_second_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_both_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .send_auth()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_first_missing_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_second_missing_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_not_send_auth() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_publish_value() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ data_point: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ };
+
+ Some(proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ })
+ },
+ };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.publish_value(publish_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let publish_response = response.into_inner();
+ assert_eq!(publish_response, proto::PublishValueResponse {})
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ panic!("Publish failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_publish_value_signal_id_not_found() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let _entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(1234)),
+ }),
+ data_point: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ };
+
+ Some(proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ })
+ },
+ };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.publish_value(publish_value_request).await {
+ Ok(_) => {
+ // Handle the successful response
+ panic!("Should not happen!");
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ assert_eq!(status.code(), tonic::Code::NotFound);
+ assert_eq!(status.message(), "Path not found");
+ }
+ }
+ }
+
+ #[tokio::test]
+ /// For kuksa_val_v2 we only have a single test to test min/max violations
+ /// More detailed test cases for different cases/datatypes in broker.rs
+ async fn test_publish_value_min_max_not_fulfilled() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Uint32(3)), // min
+ Some(broker::types::DataValue::Uint32(26)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ data_point: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(27)),
+ };
+
+ Some(proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ })
+ },
+ };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.publish_value(publish_value_request).await {
+ Ok(_) => {
+ // Handle the successful response
+ panic!("Should not happen!");
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ assert_eq!(status.code(), tonic::Code::InvalidArgument);
+ // As of the today the first added datapoint get value 0 by default.
+ assert_eq!(status.message(), "Value out of min/max bounds (id: 0)");
+ }
+ }
+ }
+
+ async fn publish_value(
+ broker: &DataBroker,
+ entry_id: i32,
+ input_value: Option,
+ input_timestamp: Option,
+ ) {
+ let timestamp = input_timestamp.map(|input_timestamp| input_timestamp.into());
+
+ let mut request = tonic::Request::new(proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ data_point: Some(proto::Datapoint {
+ timestamp,
+
+ value: match input_value {
+ Some(true) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ Some(false) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(false)),
+ }),
+ None => None,
+ },
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+ match broker.publish_value(request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let publish_response = response.into_inner();
+
+ // Check if there is an error in the response
+ assert_eq!(publish_response, proto::PublishValueResponse {});
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ panic!("Publish failed with status: {:?}", status);
+ }
+ }
+ }
+
+ /*
+ Test subscribe service method
+ */
+ async fn test_subscribe_case(has_value: bool) {
+ async fn check_stream_next(
+ item: &Result,
+ input_value: Option,
+ ) {
+ // Create Datapoint
+ let mut expected_response: HashMap = HashMap::new();
+ // We expect to get an empty response first
+ expected_response.insert(
+ "test.datapoint1".to_string(),
+ proto::Datapoint {
+ timestamp: None,
+ value: match input_value {
+ Some(true) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ Some(false) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(false)),
+ }),
+ None => None,
+ },
+ },
+ );
+
+ match item {
+ Ok(subscribe_response) => {
+ // Process the SubscribeResponse
+ let response = &subscribe_response.entries;
+ assert_eq!(response.len(), expected_response.len());
+ for key in response
+ .keys()
+ .chain(expected_response.keys())
+ .collect::>()
+ {
+ match (response.get(key), expected_response.get(key)) {
+ (Some(entry1), Some(entry2)) => {
+ assert_eq!(entry1.value, entry2.value);
+ }
+ (Some(entry1), None) => {
+ panic!("Key '{}' is only in response: {:?}", key, entry1)
+ }
+ (None, Some(entry2)) => {
+ panic!("Key '{}' is only in expected_response: {:?}", key, entry2)
+ }
+ (None, None) => unreachable!(),
+ }
+ }
+ }
+ Err(err) => {
+ panic!("Error {:?}", err)
+ }
+ }
+ }
+
+ let broker = DataBroker::default();
+
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_string(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Some Description that Does Not Matter".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ if has_value {
+ publish_value(&broker, entry_id, Some(false), None).await
+ }
+
+ let mut request = tonic::Request::new(proto::SubscribeRequest {
+ signal_paths: vec!["test.datapoint1".to_string()],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result = tokio::task::block_in_place(|| {
+ // Blocking operation here
+ // Since broker.subscribe is async, you need to run it in an executor
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ rt.block_on(broker.subscribe(request))
+ });
+
+ // Publish "true" as value
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ // Publish "false" as value
+ publish_value(&broker, entry_id, Some(false), None).await;
+
+ // Publish "false" again but with new timestamp - as it is not an update we shall not get anything
+
+ let timestamp = std::time::SystemTime::now();
+ publish_value(&broker, entry_id, Some(false), timestamp.into()).await;
+
+ // Publish None as value, equals reset
+ publish_value(&broker, entry_id, None, None).await;
+
+ // Publish "true" as value
+
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ if let Ok(stream) = result {
+ // Process the stream by iterating over the items
+ let mut stream = stream.into_inner();
+
+ let mut item_count = 0;
+ while let Some(item) = stream.next().await {
+ match item_count {
+ 0 => {
+ check_stream_next(&item, if has_value { Some(false) } else { None }).await;
+ }
+ 1 => {
+ check_stream_next(&item, Some(true)).await;
+ }
+ 2 => {
+ // As long as value stays as false we do not get anything new, so prepare for None
+ check_stream_next(&item, Some(false)).await;
+ }
+ 3 => {
+ check_stream_next(&item, None).await;
+ }
+ 4 => {
+ check_stream_next(&item, Some(true)).await;
+ // And we do not expect more
+ break;
+ }
+ _ => panic!(
+ "You shouldn't land here too many items reported back to the stream."
+ ),
+ }
+ item_count += 1;
+ }
+ // Make sure stream is not closed in advance
+ assert_eq!(item_count, 4);
+ } else {
+ panic!("Something went wrong while getting the stream.")
+ }
+ }
+
+ /*
+ Test subscribe service method by id
+ */
+ async fn test_subscribe_case_by_id(has_value: bool) {
+ async fn check_stream_next_by_id(
+ item: &Result,
+ input_value: Option,
+ signal_id: i32,
+ ) {
+ // Create Datapoint
+ let mut expected_response: HashMap = HashMap::new();
+ // We expect to get an empty response first
+ expected_response.insert(
+ signal_id,
+ proto::Datapoint {
+ timestamp: None,
+ value: match input_value {
+ Some(true) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ Some(false) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(false)),
+ }),
+ None => None,
+ },
+ },
+ );
+
+ match item {
+ Ok(subscribe_response) => {
+ // Process the SubscribeResponse
+ let response = &subscribe_response.entries;
+ assert_eq!(response.len(), expected_response.len());
+ for key in response.keys() {
+ match (response.get(key), expected_response.get(key)) {
+ (Some(entry1), Some(entry2)) => {
+ assert_eq!(entry1.value, entry2.value);
+ }
+ (Some(entry1), None) => {
+ panic!("Key '{}' is only in response: {:?}", key, entry1)
+ }
+ (None, Some(entry2)) => {
+ panic!("Key '{}' is only in expected_response: {:?}", key, entry2)
+ }
+ (None, None) => unreachable!(),
+ }
+ }
+ }
+ Err(err) => {
+ panic!("Error {:?}", err)
+ }
+ }
+ }
+ let broker = DataBroker::default();
+
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_string(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Some Description that Does Not Matter".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ if has_value {
+ publish_value(&broker, entry_id, Some(false), None).await
+ }
+
+ let mut request = tonic::Request::new(proto::SubscribeByIdRequest {
+ signal_ids: vec![entry_id],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result = tokio::task::block_in_place(|| {
+ // Blocking operation here
+ // Since broker.subscribe is async, you need to run it in an executor
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ rt.block_on(broker.subscribe_by_id(request))
+ });
+
+ // Publish "true" as value
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ // Publish "false" as value
+ publish_value(&broker, entry_id, Some(false), None).await;
+
+ // Publish "false" again but with new timestamp - as it is not an update we shall not get anything
+
+ let timestamp = std::time::SystemTime::now();
+ publish_value(&broker, entry_id, Some(false), timestamp.into()).await;
+
+ // Publish None as value, equals reset
+ publish_value(&broker, entry_id, None, None).await;
+
+ // Publish "true" as value
+
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ if let Ok(stream) = result {
+ // Process the stream by iterating over the items
+ let mut stream = stream.into_inner();
+
+ let mut item_count = 0;
+ while let Some(item) = stream.next().await {
+ match item_count {
+ 0 => {
+ check_stream_next_by_id(
+ &item,
+ if has_value { Some(false) } else { None },
+ entry_id,
+ )
+ .await;
+ }
+ 1 => {
+ check_stream_next_by_id(&item, Some(true), entry_id).await;
+ }
+ 2 => {
+ // As long as value stays as false we do not get anything new, so prepare for None
+ check_stream_next_by_id(&item, Some(false), entry_id).await;
+ }
+ 3 => {
+ check_stream_next_by_id(&item, None, entry_id).await;
+ }
+ 4 => {
+ check_stream_next_by_id(&item, Some(true), entry_id).await;
+ // And we do not expect more
+ break;
+ }
+ _ => panic!(
+ "You shouldn't land here too many items reported back to the stream."
+ ),
+ }
+ item_count += 1;
+ }
+ // Make sure stream is not closed in advance
+ assert_eq!(item_count, 4);
+ } else {
+ panic!("Something went wrong while getting the stream.")
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_subscribe() {
+ test_subscribe_case(false).await;
+ test_subscribe_case(true).await;
+ test_subscribe_case_by_id(false).await;
+ test_subscribe_case_by_id(true).await;
+ }
+
+ /*
+ Test open_provider_stream service method
+ */
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+ async fn test_open_provider_stream() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let request_id = 1;
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = OpenProviderStreamRequest {
+ action: Some(open_provider_stream_request::Action::PublishValuesRequest(
+ PublishValuesRequest {
+ request_id,
+ datapoints: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::String(
+ "example_value".to_string(),
+ )),
+ };
+
+ let datapoint = proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ };
+
+ let mut map = HashMap::new();
+ map.insert(entry_id, datapoint);
+ map
+ },
+ },
+ )),
+ };
+
+ // Manually insert permissions
+ let mut streaming_request = tonic_mock::streaming_request(vec![request]);
+ streaming_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.open_provider_stream(streaming_request).await {
+ Ok(response) => {
+ std::thread::sleep(std::time::Duration::from_secs(3));
+ tokio::spawn(async move {
+ std::thread::sleep(std::time::Duration::from_secs(3));
+ let stream = response.into_inner();
+ let mut receiver = stream.into_inner();
+ while let Some(value) = receiver.recv().await {
+ match value {
+ Ok(value) => match value.action {
+ Some(ProvideActuationResponse(_)) => {
+ panic!("Should not happen")
+ }
+ Some(PublishValuesResponse(publish_values_response)) => {
+ assert_eq!(publish_values_response.request_id, request_id);
+ assert_eq!(publish_values_response.status.len(), 1);
+ match publish_values_response.status.get(&entry_id) {
+ Some(value) => {
+ assert_eq!(value.code, 1);
+ assert_eq!(value.message, "Wrong Type");
+ }
+ None => {
+ panic!("Should not happen")
+ }
+ }
+ }
+ Some(BatchActuateStreamRequest(_)) => {
+ panic!("Should not happen")
+ }
+ None => {
+ panic!("Should not happen")
+ }
+ },
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+ });
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_list_metadata_min_max() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Int32,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Int32(-7)), // min
+ Some(broker::types::DataValue::Int32(19)), // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut data_req = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.datapoint1".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ // Manually insert permissions
+ data_req
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, data_req)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(list_response) => {
+ let entries_size = list_response.metadata.len();
+ assert_eq!(entries_size, 1);
+
+ let min: Option = Some(Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-7)),
+ });
+ let max = Some(Value {
+ typed_value: Some(proto::value::TypedValue::Int32(19)),
+ });
+
+ assert_eq!(list_response.metadata.first().unwrap().min, min);
+ assert_eq!(list_response.metadata.first().unwrap().max, max);
+ }
+ Err(_status) => panic!("failed to execute get request"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_list_metadata_using_wildcard() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "test.branch.datapoint2".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test branch datapoint 2".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut wildcard_req_two_asteriks = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.**".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ let mut wildcard_req_one_asterik = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.*".to_owned(),
+ filter: "".to_owned(),
+ });
+ // Manually insert permissions
+ wildcard_req_two_asteriks
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ wildcard_req_one_asterik
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, wildcard_req_two_asteriks)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(list_response) => {
+ let entries_size = list_response.metadata.len();
+ assert_eq!(entries_size, 2);
+ }
+ Err(_status) => panic!("failed to execute get request"),
+ }
+
+ match proto::val_server::Val::list_metadata(&broker, wildcard_req_one_asterik)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(list_response) => {
+ let entries_size = list_response.metadata.len();
+ assert_eq!(entries_size, 1);
+ }
+ Err(_status) => panic!("failed to execute get request"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_list_metadata_bad_request_pattern_or_not_found() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut wildcard_req = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test. **".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ // Manually insert permissions
+ wildcard_req
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, wildcard_req)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(_) => {}
+ Err(error) => {
+ assert_eq!(
+ error.code(),
+ tonic::Code::InvalidArgument,
+ "unexpected error code"
+ );
+ assert_eq!(
+ error.message(),
+ "Invalid Pattern Argument",
+ "unexpected error reason"
+ );
+ }
+ }
+
+ let mut not_found_req = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.notfound".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ // Manually insert permissions
+ not_found_req
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, not_found_req)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(_) => {}
+ Err(error) => {
+ assert_eq!(error.code(), tonic::Code::NotFound, "unexpected error code");
+ assert_eq!(
+ error.message(),
+ "Specified root branch does not exist",
+ "unexpected error reason"
+ );
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_actuate_out_of_range() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ Some(broker::types::DataValue::Uint32(0)), // min
+ Some(broker::types::DataValue::Uint32(100)), // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id = authorized_access
+ .get_id_by_path("Vehicle.Cabin.Infotainment.Navigation.Volume")
+ .await
+ .expect(
+ "Resolving the id of Vehicle.Cabin.Infotainment.Navigation.Volume should succeed",
+ );
+ let vss_ids = vec![vss_id];
+
+ let (sender, _) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(200)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::InvalidArgument
+ )
+ }
+
+ #[tokio::test]
+ async fn test_actuate_signal_not_found() {
+ let broker = DataBroker::default();
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Non.Existing".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(result_response.unwrap_err().code(), tonic::Code::NotFound)
+ }
+
+ #[tokio::test]
+ async fn test_actuate_can_provider_unavailable() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::Unavailable
+ )
+ }
+
+ #[tokio::test]
+ async fn test_actuate_success() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+ let vss_ids = vec![vss_id];
+
+ let (sender, mut receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_ok());
+
+ let result_response = receiver.recv().await.expect("Option should be Some");
+ result_response.expect("Result should be Ok");
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_out_of_range() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint 'Vehicle.ADAS.ABS.IsEnabled' should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register 'Vehicle.ADAS.CruiseControl.IsActive' datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ Some(broker::types::DataValue::Uint32(0)), // min
+ Some(broker::types::DataValue::Uint32(100)), // max
+ None,
+ None,
+ )
+ .await
+ .expect(
+ "Register datapoint 'Vehicle.Cabin.Infotainment.Navigation.Volume' should succeed",
+ );
+
+ let vss_id_abs = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+ let vss_id_cruise_control = authorized_access
+ .get_id_by_path("Vehicle.ADAS.CruiseControl.IsActive")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.CruiseControl.IsActive should succeed");
+ let vss_id_navigation_volume = authorized_access
+ .get_id_by_path("Vehicle.Cabin.Infotainment.Navigation.Volume")
+ .await
+ .expect(
+ "Resolving the id of Vehicle.Cabin.Infotainment.Navigation.Volume should succeed",
+ );
+
+ let vss_ids = vec![vss_id_abs, vss_id_cruise_control, vss_id_navigation_volume];
+
+ let (sender, _receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(200)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::InvalidArgument
+ )
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_signal_not_found() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Non.Existing".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(result_response.unwrap_err().code(), tonic::Code::NotFound)
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_provider_unavailable() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id_abs = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+
+ let vss_ids = vec![vss_id_abs];
+
+ let (sender, _receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::Unavailable
+ )
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_success() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id_abs = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+ let vss_id_cruise_control = authorized_access
+ .get_id_by_path("Vehicle.ADAS.CruiseControl.IsActive")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.CruiseControl.IsActive should succeed");
+
+ let vss_ids = vec![vss_id_abs, vss_id_cruise_control];
+
+ let (sender, mut receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_ok());
+
+ let result_response = receiver.recv().await.expect("Option should be Some");
+ result_response.expect("Result should be Ok");
+ }
+
+ #[tokio::test]
+ async fn test_provide_actuation_signal_not_found() {
+ let broker = DataBroker::default();
+
+ let request = OpenProviderStreamRequest {
+ action: Some(
+ open_provider_stream_request::Action::ProvideActuationRequest(
+ proto::ProvideActuationRequest {
+ actuator_identifiers: vec![SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Non.Existing".to_string(),
+ )),
+ }],
+ },
+ ),
+ ),
+ };
+
+ let mut streaming_request = tonic_mock::streaming_request(vec![request]);
+ streaming_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::open_provider_stream(&broker, streaming_request).await {
+ Ok(response) => {
+ let stream = response.into_inner();
+ let mut receiver = stream.into_inner();
+ let result_response = receiver
+ .recv()
+ .await
+ .expect("result_response should be Some");
+ assert!(result_response.is_err());
+ assert_eq!(result_response.unwrap_err().code(), tonic::Code::NotFound)
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_provide_actuation_success() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let request = OpenProviderStreamRequest {
+ action: Some(
+ open_provider_stream_request::Action::ProvideActuationRequest(
+ proto::ProvideActuationRequest {
+ actuator_identifiers: vec![SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }],
+ },
+ ),
+ ),
+ };
+
+ let mut streaming_request = tonic_mock::streaming_request(vec![request]);
+ streaming_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::open_provider_stream(&broker, streaming_request).await {
+ Ok(response) => {
+ let stream = response.into_inner();
+ let mut receiver = stream.into_inner();
+ let result_response = receiver
+ .recv()
+ .await
+ .expect("result_response should be Some");
+
+ assert!(result_response.is_ok())
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_server_info() {
+ let version = "1.1.1";
+ let commit_hash = "3a3c332f5427f2db7a0b8582262c9f5089036c23";
+ let broker = DataBroker::new(version, commit_hash);
+
+ let request = tonic::Request::new(proto::GetServerInfoRequest {});
+
+ match proto::val_server::Val::get_server_info(&broker, request)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(response) => {
+ assert_eq!(response.name, "databroker");
+ assert_eq!(response.version, version);
+ assert_eq!(response.commit_hash, commit_hash);
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+}
diff --git a/databroker/src/grpc/mod.rs b/databroker/src/grpc/mod.rs
index c4c86d4a..a7a15a2b 100644
--- a/databroker/src/grpc/mod.rs
+++ b/databroker/src/grpc/mod.rs
@@ -14,4 +14,5 @@
pub mod server;
mod kuksa_val_v1;
+mod kuksa_val_v2;
mod sdv_databroker_v1;
diff --git a/databroker/src/grpc/sdv_databroker_v1/broker.rs b/databroker/src/grpc/sdv_databroker_v1/broker.rs
index 532ae1f1..32669fd0 100644
--- a/databroker/src/grpc/sdv_databroker_v1/broker.rs
+++ b/databroker/src/grpc/sdv_databroker_v1/broker.rs
@@ -131,6 +131,8 @@ impl proto::broker_server::Broker for broker::DataBroker {
data_type: None,
description: None,
allowed: None,
+ max: None,
+ min: None,
unit: None,
},
));
diff --git a/databroker/src/grpc/sdv_databroker_v1/collector.rs b/databroker/src/grpc/sdv_databroker_v1/collector.rs
index 4bec1701..963ab632 100644
--- a/databroker/src/grpc/sdv_databroker_v1/collector.rs
+++ b/databroker/src/grpc/sdv_databroker_v1/collector.rs
@@ -60,6 +60,8 @@ impl proto::collector_server::Collector for broker::DataBroker {
data_type: None,
description: None,
allowed: None,
+ max: None,
+ min: None,
unit: None,
},
)
@@ -129,6 +131,8 @@ impl proto::collector_server::Collector for broker::DataBroker {
data_type: None,
description: None,
allowed: None,
+ max: None,
+ min: None,
unit: None,
}
)
@@ -207,6 +211,8 @@ impl proto::collector_server::Collector for broker::DataBroker {
broker::ChangeType::from(&change_type),
broker::types::EntryType::Sensor,
metadata.description,
+ None, // min
+ None, // max
None,
None,
)
@@ -264,3 +270,85 @@ impl proto::collector_server::Collector for broker::DataBroker {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{broker::DataBroker, permissions};
+ use proto::collector_server::Collector;
+
+ #[tokio::test]
+ async fn test_publish_value_min_max_not_fulfilled() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id_1 = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Uint32(3)), // min
+ Some(broker::types::DataValue::Uint32(26)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let entry_id_2 = authorized_access
+ .add_entry(
+ "test.datapoint1.Speed".to_owned(),
+ broker::DataType::Float,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Float(1.0)), // min
+ Some(broker::types::DataValue::Float(100.0)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let datapoint: proto::Datapoint = proto::Datapoint {
+ timestamp: None,
+ value: Some(proto::datapoint::Value::Int32Value(50)),
+ };
+
+ let mut datapoints = HashMap::new();
+ datapoints.insert(entry_id_1, datapoint.clone());
+ datapoints.insert(entry_id_2, datapoint);
+
+ let request = proto::UpdateDatapointsRequest { datapoints };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.update_datapoints(publish_value_request).await {
+ Ok(response) => {
+ let response = response.into_inner();
+ assert_eq!(response.errors.len(), 2);
+
+ let error_entry_1 = response.errors.get(&entry_id_1);
+ assert_eq!(
+ error_entry_1.unwrap().clone(),
+ proto::DatapointError::OutOfBounds as i32
+ );
+
+ let error_entry_2 = response.errors.get(&entry_id_2);
+ assert_eq!(
+ error_entry_2.unwrap().clone(),
+ proto::DatapointError::InvalidType as i32
+ );
+ }
+ Err(_) => {
+ panic!("Should not happen!");
+ }
+ }
+ }
+}
diff --git a/databroker/src/grpc/sdv_databroker_v1/conversions.rs b/databroker/src/grpc/sdv_databroker_v1/conversions.rs
index d52600e6..2262b0d0 100644
--- a/databroker/src/grpc/sdv_databroker_v1/conversions.rs
+++ b/databroker/src/grpc/sdv_databroker_v1/conversions.rs
@@ -16,6 +16,7 @@ use databroker_proto::sdv::databroker::v1 as proto;
use prost_types::Timestamp;
use std::convert::TryInto;
use std::time::SystemTime;
+use tracing::debug;
use crate::broker;
@@ -302,6 +303,91 @@ impl From<&proto::ChangeType> for broker::ChangeType {
}
}
+fn transform_allowed(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ broker::DataValue::StringArray(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::StringValues(proto::StringArray {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Int32Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Int32Values(proto::Int32Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Int64Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Int64Values(proto::Int64Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Uint32Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Uint32Values(proto::Uint32Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Uint64Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Uint64Values(proto::Uint64Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::FloatArray(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::FloatValues(proto::FloatArray {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::DoubleArray(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::DoubleValues(proto::DoubleArray {
+ values: array.clone(),
+ })),
+ }),
+ _ => {
+ debug!("Wrong datatype used for allowed values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
+fn transform_min_max(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ broker::DataValue::String(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::String(
+ value.to_owned(),
+ )),
+ }),
+ broker::DataValue::Bool(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Bool(*value)),
+ }),
+ broker::DataValue::Int32(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Int32(*value)),
+ }),
+ broker::DataValue::Int64(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Int64(*value)),
+ }),
+ broker::DataValue::Uint32(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Uint32(*value)),
+ }),
+ broker::DataValue::Uint64(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Uint64(*value)),
+ }),
+ broker::DataValue::Float(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Float(*value)),
+ }),
+ broker::DataValue::Double(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Double(*value)),
+ }),
+ _ => {
+ debug!("Wrong datatype used for min/max values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
impl From<&broker::Metadata> for proto::Metadata {
fn from(metadata: &broker::Metadata) -> Self {
proto::Metadata {
@@ -311,54 +397,9 @@ impl From<&broker::Metadata> for proto::Metadata {
data_type: proto::DataType::from(&metadata.data_type) as i32,
change_type: proto::ChangeType::Continuous as i32, // TODO: Add to metadata
description: metadata.description.to_owned(),
- allowed: match metadata.allowed.as_ref() {
- Some(broker::DataValue::StringArray(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::StringValues(proto::StringArray {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Int32Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Int32Values(proto::Int32Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Int64Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Int64Values(proto::Int64Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Uint32Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Uint32Values(proto::Uint32Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Uint64Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Uint64Values(proto::Uint64Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::FloatArray(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::FloatValues(proto::FloatArray {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::DoubleArray(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::DoubleValues(proto::DoubleArray {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::BoolArray(_))
- | Some(broker::DataValue::NotAvailable)
- | Some(broker::DataValue::Bool(_))
- | Some(broker::DataValue::String(_))
- | Some(broker::DataValue::Int32(_))
- | Some(broker::DataValue::Int64(_))
- | Some(broker::DataValue::Uint32(_))
- | Some(broker::DataValue::Uint64(_))
- | Some(broker::DataValue::Float(_))
- | Some(broker::DataValue::Double(_))
- | None => None,
- },
+ allowed: transform_allowed(&metadata.allowed),
+ min: transform_min_max(&metadata.min),
+ max: transform_min_max(&metadata.max),
}
}
}
@@ -370,7 +411,9 @@ impl From<&broker::UpdateError> for proto::DatapointError {
broker::UpdateError::WrongType | broker::UpdateError::UnsupportedType => {
proto::DatapointError::InvalidType
}
- broker::UpdateError::OutOfBounds => proto::DatapointError::OutOfBounds,
+ broker::UpdateError::OutOfBoundsAllowed => proto::DatapointError::OutOfBounds,
+ broker::UpdateError::OutOfBoundsMinMax => proto::DatapointError::OutOfBounds,
+ broker::UpdateError::OutOfBoundsType => proto::DatapointError::OutOfBounds,
broker::UpdateError::PermissionDenied => proto::DatapointError::AccessDenied,
broker::UpdateError::PermissionExpired => proto::DatapointError::AccessDenied,
}
diff --git a/databroker/src/grpc/server.rs b/databroker/src/grpc/server.rs
index 8bc282ca..58222bb5 100644
--- a/databroker/src/grpc/server.rs
+++ b/databroker/src/grpc/server.rs
@@ -37,6 +37,7 @@ pub enum ServerTLS {
#[derive(PartialEq)]
pub enum Api {
KuksaValV1,
+ KuksaValV2,
SdvDatabrokerV1,
}
@@ -187,6 +188,20 @@ where
let mut router = server.add_optional_service(kuksa_val_v1);
+ if apis.contains(&Api::KuksaValV2) {
+ let service = tonic_reflection::server::Builder::configure()
+ .register_encoded_file_descriptor_set(kuksa::val::v2::FILE_DESCRIPTOR_SET)
+ .build()
+ .unwrap();
+
+ router = router.add_service(service).add_optional_service(Some(
+ kuksa::val::v2::val_server::ValServer::with_interceptor(
+ broker.clone(),
+ authorization.clone(),
+ ),
+ ));
+ }
+
if apis.contains(&Api::SdvDatabrokerV1) {
router = router.add_optional_service(Some(
sdv::databroker::v1::broker_server::BrokerServer::with_interceptor(
diff --git a/databroker/src/main.rs b/databroker/src/main.rs
index c1d8aaa0..aa1ea985 100644
--- a/databroker/src/main.rs
+++ b/databroker/src/main.rs
@@ -65,6 +65,8 @@ async fn add_kuksa_attribute(
description,
None,
None,
+ None,
+ None,
)
.await
{
@@ -83,6 +85,8 @@ async fn add_kuksa_attribute(
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)];
@@ -116,7 +120,7 @@ async fn read_metadata_file<'a, 'b>(
let entries = vss::parse_vss_from_reader(buffered)?;
for (path, entry) in entries {
- debug!("Adding VSS datapoint type {}", path);
+ debug!("Adding VSS datapoint {}", path);
match database
.add_entry(
@@ -125,6 +129,8 @@ async fn read_metadata_file<'a, 'b>(
entry.change_type,
entry.entry_type,
entry.description,
+ entry.min,
+ entry.max,
entry.allowed,
entry.unit,
)
@@ -146,6 +152,8 @@ async fn read_metadata_file<'a, 'b>(
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)];
@@ -173,6 +181,7 @@ async fn read_metadata_file<'a, 'b>(
fn main() -> Result<(), Box> {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or_default();
+ let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or_default();
let about = format!(
concat!(
@@ -356,7 +365,7 @@ fn main() -> Result<(), Box> {
.expect("port should be a number");
let addr = std::net::SocketAddr::new(ip_addr, *port);
- let broker = broker::DataBroker::new(version);
+ let broker = broker::DataBroker::new(version, commit_sha);
let database = broker.authorized_access(&permissions::ALLOW_ALL);
add_kuksa_attribute(
@@ -475,7 +484,7 @@ fn main() -> Result<(), Box> {
}
}
- let mut apis = vec![grpc::server::Api::KuksaValV1];
+ let mut apis = vec![grpc::server::Api::KuksaValV1, grpc::server::Api::KuksaValV2];
if args.get_flag("enable-databroker-v1") {
apis.push(grpc::server::Api::SdvDatabrokerV1);
diff --git a/databroker/src/permissions.rs b/databroker/src/permissions.rs
index 7da1eae1..8157b811 100644
--- a/databroker/src/permissions.rs
+++ b/databroker/src/permissions.rs
@@ -165,7 +165,9 @@ impl Permissions {
}
pub fn can_read(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.read.is_match(path) {
return Ok(());
@@ -187,7 +189,9 @@ impl Permissions {
}
pub fn can_write_actuator_target(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.actuate.is_match(path) {
return Ok(());
@@ -196,7 +200,9 @@ impl Permissions {
}
pub fn can_write_datapoint(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.provide.is_match(path) {
return Ok(());
@@ -205,7 +211,9 @@ impl Permissions {
}
pub fn can_create(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.create.is_match(path) {
return Ok(());
@@ -214,13 +222,13 @@ impl Permissions {
}
#[inline]
- pub fn expired(&self) -> Result<(), PermissionError> {
+ pub fn is_expired(&self) -> bool {
if let Some(expires_at) = self.expires_at {
if expires_at < SystemTime::now() {
- return Err(PermissionError::Expired);
+ return true;
}
}
- Ok(())
+ false
}
}
diff --git a/databroker/src/types.rs b/databroker/src/types.rs
index 6d9241fd..c176d410 100644
--- a/databroker/src/types.rs
+++ b/databroker/src/types.rs
@@ -11,7 +11,7 @@
* SPDX-License-Identifier: Apache-2.0
********************************************************************************/
-use std::convert::TryFrom;
+use std::{convert::TryFrom, fmt};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DataType {
@@ -41,6 +41,37 @@ pub enum DataType {
DoubleArray,
}
+impl fmt::Display for DataType {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ DataType::String => write!(f, "String"),
+ DataType::Bool => write!(f, "Bool"),
+ DataType::Int8 => write!(f, "Int8"),
+ DataType::Int16 => write!(f, "Int16"),
+ DataType::Int32 => write!(f, "Int32"),
+ DataType::Int64 => write!(f, "Int64"),
+ DataType::Uint8 => write!(f, "Uint8"),
+ DataType::Uint16 => write!(f, "Uint16"),
+ DataType::Uint32 => write!(f, "Uint32"),
+ DataType::Uint64 => write!(f, "Uint64"),
+ DataType::Float => write!(f, "Float"),
+ DataType::Double => write!(f, "Double"),
+ DataType::StringArray => write!(f, "StringArray"),
+ DataType::BoolArray => write!(f, "BoolArray"),
+ DataType::Int8Array => write!(f, "Int8Array"),
+ DataType::Int16Array => write!(f, "Int16Array"),
+ DataType::Int32Array => write!(f, "Int32Array"),
+ DataType::Int64Array => write!(f, "Int64Array"),
+ DataType::Uint8Array => write!(f, "Uint8Array"),
+ DataType::Uint16Array => write!(f, "Uint16Array"),
+ DataType::Uint32Array => write!(f, "Uint32Array"),
+ DataType::Uint64Array => write!(f, "Uint64Array"),
+ DataType::FloatArray => write!(f, "FloatArray"),
+ DataType::DoubleArray => write!(f, "DoubleArray"),
+ }
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum EntryType {
Sensor,
@@ -78,6 +109,29 @@ pub enum DataValue {
#[derive(Debug)]
pub struct CastError {}
+impl fmt::Display for DataValue {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ DataValue::NotAvailable => write!(f, "Not Available"),
+ DataValue::Bool(value) => write!(f, "{}", value),
+ DataValue::String(value) => write!(f, "{}", value),
+ DataValue::Int32(value) => write!(f, "{}", value),
+ DataValue::Int64(value) => write!(f, "{}", value),
+ DataValue::Uint32(value) => write!(f, "{}", value),
+ DataValue::Uint64(value) => write!(f, "{}", value),
+ DataValue::Float(value) => write!(f, "{}", value),
+ DataValue::Double(value) => write!(f, "{}", value),
+ DataValue::BoolArray(values) => write!(f, "{:?}", values),
+ DataValue::StringArray(values) => write!(f, "{:?}", values),
+ DataValue::Int32Array(values) => write!(f, "{:?}", values),
+ DataValue::Int64Array(values) => write!(f, "{:?}", values),
+ DataValue::Uint32Array(values) => write!(f, "{:?}", values),
+ DataValue::Uint64Array(values) => write!(f, "{:?}", values),
+ DataValue::FloatArray(values) => write!(f, "{:?}", values),
+ DataValue::DoubleArray(values) => write!(f, "{:?}", values),
+ }
+ }
+}
impl DataValue {
pub fn greater_than(&self, other: &DataValue) -> Result {
@@ -233,158 +287,21 @@ impl DataValue {
}
}
- pub fn less_than(&self, other: &DataValue) -> Result {
- match (&self, other) {
- (DataValue::Int32(value), DataValue::Int32(other_value)) => Ok(value < other_value),
- (DataValue::Int32(value), DataValue::Int64(other_value)) => {
- Ok(i64::from(*value) < *other_value)
- }
- (DataValue::Int32(value), DataValue::Uint32(other_value)) => {
- Ok(i64::from(*value) < i64::from(*other_value))
- }
- (DataValue::Int32(value), DataValue::Uint64(other_value)) => {
- if *value < 0 {
- Ok(true) // Negative value must be less than unsigned
- } else {
- match u64::try_from(*value) {
- Ok(value) => Ok(value < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Int32(value), DataValue::Float(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Int32(value), DataValue::Double(other_value)) => {
- Ok(f64::from(*value) < *other_value)
- }
+ pub fn greater_than_equal(&self, other: &DataValue) -> Result {
+ match self.greater_than(other) {
+ Ok(true) => Ok(true),
+ _ => self.equals(other),
+ }
+ }
- (DataValue::Int64(value), DataValue::Int32(other_value)) => {
- Ok(*value < i64::from(*other_value))
- }
- (DataValue::Int64(value), DataValue::Int64(other_value)) => Ok(value < other_value),
- (DataValue::Int64(value), DataValue::Uint32(other_value)) => {
- Ok(*value < i64::from(*other_value))
- }
- (DataValue::Int64(value), DataValue::Uint64(other_value)) => {
- if *value < 0 {
- Ok(true) // Negative value must be less than unsigned
- } else {
- match u64::try_from(*value) {
- Ok(value) => Ok(value < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Int64(value), DataValue::Float(other_value)) => match i32::try_from(*value)
- {
- Ok(value) => Ok(f64::from(value) < f64::from(*other_value)),
- Err(_) => Err(CastError {}),
- },
- (DataValue::Int64(value), DataValue::Double(other_value)) => {
- match i32::try_from(*value) {
- Ok(value) => Ok(f64::from(value) < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
+ pub fn less_than(&self, other: &DataValue) -> Result {
+ other.greater_than(self)
+ }
- (DataValue::Uint32(value), DataValue::Int32(other_value)) => {
- Ok(i64::from(*value) < i64::from(*other_value))
- }
- (DataValue::Uint32(value), DataValue::Int64(other_value)) => {
- Ok(i64::from(*value) < *other_value)
- }
- (DataValue::Uint32(value), DataValue::Uint32(other_value)) => Ok(value < other_value),
- (DataValue::Uint32(value), DataValue::Uint64(other_value)) => {
- Ok(u64::from(*value) < *other_value)
- }
- (DataValue::Uint32(value), DataValue::Float(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Uint32(value), DataValue::Double(other_value)) => {
- Ok(f64::from(*value) < *other_value)
- }
- (DataValue::Uint64(value), DataValue::Int32(other_value)) => {
- if *other_value < 0 {
- Ok(false) // Unsigned cannot be less than a negative value
- } else {
- match u64::try_from(*other_value) {
- Ok(other_value) => Ok(*value < other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Uint64(value), DataValue::Int64(other_value)) => {
- if *other_value < 0 {
- Ok(false) // Unsigned cannot be less than a negative value
- } else {
- match u64::try_from(*other_value) {
- Ok(other_value) => Ok(*value < other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Uint64(value), DataValue::Uint32(other_value)) => {
- Ok(*value < u64::from(*other_value))
- }
- (DataValue::Uint64(value), DataValue::Uint64(other_value)) => Ok(value < other_value),
- (DataValue::Uint64(value), DataValue::Float(other_value)) => {
- match u32::try_from(*value) {
- Ok(value) => Ok(f64::from(value) < f64::from(*other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Uint64(value), DataValue::Double(other_value)) => {
- match u32::try_from(*value) {
- Ok(value) => Ok(f64::from(value) < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Float(value), DataValue::Int32(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Float(value), DataValue::Int64(other_value)) => {
- match i32::try_from(*other_value) {
- Ok(other_value) => Ok(f64::from(*value) < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Float(value), DataValue::Uint32(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Float(value), DataValue::Uint64(other_value)) => {
- match u32::try_from(*other_value) {
- Ok(other_value) => Ok(f64::from(*value) < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Float(value), DataValue::Float(other_value)) => Ok(value < other_value),
- (DataValue::Float(value), DataValue::Double(other_value)) => {
- Ok(f64::from(*value) < *other_value)
- }
- (DataValue::Double(value), DataValue::Int32(other_value)) => {
- Ok(*value < f64::from(*other_value))
- }
- (DataValue::Double(value), DataValue::Int64(other_value)) => {
- match i32::try_from(*other_value) {
- Ok(other_value) => Ok(*value < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Double(value), DataValue::Uint32(other_value)) => {
- Ok(*value < f64::from(*other_value))
- }
- (DataValue::Double(value), DataValue::Uint64(other_value)) => {
- match u32::try_from(*other_value) {
- Ok(other_value) => Ok(*value < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Double(value), DataValue::Float(other_value)) => {
- Ok(*value < f64::from(*other_value))
- }
- (DataValue::Double(value), DataValue::Double(other_value)) => Ok(value < other_value),
- _ => Err(CastError {}),
+ pub fn less_than_equal(&self, other: &DataValue) -> Result {
+ match self.less_than(other) {
+ Ok(true) => Ok(true),
+ _ => self.equals(other),
}
}
diff --git a/databroker/src/viss/v2/server.rs b/databroker/src/viss/v2/server.rs
index 91a69f62..e22923b4 100644
--- a/databroker/src/viss/v2/server.rs
+++ b/databroker/src/viss/v2/server.rs
@@ -168,6 +168,8 @@ impl Viss for Server {
entry_type: None,
data_type: None,
description: None,
+ min: None,
+ max: None,
allowed: None,
unit: None,
})
@@ -197,8 +199,14 @@ impl Viss for Server {
UpdateError::WrongType => Error::BadRequest {
msg: Some("Wrong data type.".into()),
},
- UpdateError::OutOfBounds => Error::BadRequest {
- msg: Some("Value out of bounds.".into()),
+ UpdateError::OutOfBoundsAllowed => Error::BadRequest {
+ msg: Some("Value out of allowed bounds.".into()),
+ },
+ UpdateError::OutOfBoundsMinMax => Error::BadRequest {
+ msg: Some("Value out of min/max bounds.".into()),
+ },
+ UpdateError::OutOfBoundsType => Error::BadRequest {
+ msg: Some("Value out of type bounds.".into()),
},
UpdateError::UnsupportedType => Error::BadRequest {
msg: Some("Unsupported data type.".into()),
diff --git a/databroker/src/vss.rs b/databroker/src/vss.rs
index a79b0d54..f5d1a8b0 100644
--- a/databroker/src/vss.rs
+++ b/databroker/src/vss.rs
@@ -203,6 +203,10 @@ impl From for types::DataType {
}
}
+/// Try to extract an array matching the given DataType.
+/// Will success if the value is None or a an array of matching type
+/// Will fail if the value is a "single" value, i.e. not an array
+/// This method is useful for instance when extracting the "allowed" field
fn try_from_json_array(
array: Option>,
data_type: &types::DataType,
@@ -251,6 +255,11 @@ fn try_from_json_array(
}
}
+/// Try to extract a value matching the given DataType.
+/// Will success if the value is None or a a value of matching type
+/// Will fail if the value does not match the given type,
+/// for example if a single value is given for an array type or vice versa
+/// This method is useful for instance when extracting the "default" value
fn try_from_json_value(
value: Option,
data_type: &types::DataType,
@@ -350,6 +359,44 @@ fn try_from_json_value(
}
}
+/// Try to extract a single value matching the given DataType,
+/// i.e. if an array type is given it will try to find a single value of the base type
+/// For example Int32 if the type is Int32 or Int32Array
+/// Will success if the value is of matching base type
+/// Will fail otherwise
+/// This method is useful for instance when extracting the "min"/"max" field
+fn try_from_json_single_value(
+ value: Option,
+ data_type: &types::DataType,
+) -> Result, Error> {
+ match data_type {
+ types::DataType::StringArray => try_from_json_value(value, &types::DataType::String),
+ types::DataType::BoolArray => try_from_json_value(value, &types::DataType::Bool),
+ types::DataType::Int8Array => try_from_json_value(value, &types::DataType::Int8),
+ types::DataType::Int16Array => try_from_json_value(value, &types::DataType::Int16),
+ types::DataType::Int32Array => try_from_json_value(value, &types::DataType::Int32),
+ types::DataType::Int64Array => try_from_json_value(value, &types::DataType::Int64),
+ types::DataType::Uint8Array => try_from_json_value(value, &types::DataType::Uint8),
+ types::DataType::Uint16Array => try_from_json_value(value, &types::DataType::Uint16),
+ types::DataType::Uint32Array => try_from_json_value(value, &types::DataType::Uint32),
+ types::DataType::Uint64Array => try_from_json_value(value, &types::DataType::Uint64),
+ types::DataType::FloatArray => try_from_json_value(value, &types::DataType::Float),
+ types::DataType::DoubleArray => try_from_json_value(value, &types::DataType::Double),
+ types::DataType::String
+ | types::DataType::Bool
+ | types::DataType::Int8
+ | types::DataType::Int16
+ | types::DataType::Int32
+ | types::DataType::Int64
+ | types::DataType::Uint8
+ | types::DataType::Uint16
+ | types::DataType::Uint32
+ | types::DataType::Uint64
+ | types::DataType::Float
+ | types::DataType::Double => try_from_json_value(value, data_type),
+ }
+}
+
fn flatten_vss_tree(root: RootEntry) -> Result, Error> {
let mut entries = BTreeMap::new();
@@ -396,8 +443,8 @@ fn add_entry(
description: entry.description,
comment: entry.comment,
unit: entry.unit,
- min: try_from_json_value(entry.min, &data_type)?,
- max: try_from_json_value(entry.max, &data_type)?,
+ min: try_from_json_single_value(entry.min, &data_type)?,
+ max: try_from_json_single_value(entry.max, &data_type)?,
allowed: try_from_json_array(entry.allowed, &data_type)?,
default: None, // isn't used by actuators
data_type,
@@ -421,8 +468,8 @@ fn add_entry(
description: entry.description,
comment: entry.comment,
unit: entry.unit,
- min: try_from_json_value(entry.min, &data_type)?,
- max: try_from_json_value(entry.max, &data_type)?,
+ min: try_from_json_single_value(entry.min, &data_type)?,
+ max: try_from_json_single_value(entry.max, &data_type)?,
allowed: try_from_json_array(entry.allowed, &data_type)?,
default: try_from_json_value(entry.default, &data_type)?,
change_type: determine_change_type(
@@ -450,8 +497,8 @@ fn add_entry(
description: entry.description,
comment: entry.comment,
unit: entry.unit,
- min: try_from_json_value(entry.min, &data_type)?,
- max: try_from_json_value(entry.max, &data_type)?,
+ min: try_from_json_single_value(entry.min, &data_type)?,
+ max: try_from_json_single_value(entry.max, &data_type)?,
allowed: try_from_json_array(entry.allowed, &data_type)?,
change_type: determine_change_type(entry.change_type, types::EntryType::Sensor),
default: None, // isn't used by sensors
diff --git a/databroker/tests/world/mod.rs b/databroker/tests/world/mod.rs
index e3e6a7c6..e33feb72 100644
--- a/databroker/tests/world/mod.rs
+++ b/databroker/tests/world/mod.rs
@@ -190,9 +190,9 @@ impl DataBrokerWorld {
.expect("failed to determine listener's port");
tokio::spawn(async move {
- let version = option_env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT")
- .unwrap_or(option_env!("VERGEN_GIT_SHA").unwrap_or("unknown"));
- let data_broker = broker::DataBroker::new(version);
+ let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or("unknown");
+ let version = option_env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT").unwrap_or(commit_sha);
+ let data_broker = broker::DataBroker::new(version, commit_sha);
let database = data_broker.authorized_access(&permissions::ALLOW_ALL);
for (name, data_type, change_type, entry_type) in data_entries {
if let Err(_error) = database
@@ -202,6 +202,8 @@ impl DataBrokerWorld {
change_type,
entry_type,
"N/A".to_string(),
+ None, // min
+ None, // max
None,
None,
)
diff --git a/doc/diagrams/consumer_actuate.drawio b/doc/diagrams/consumer_actuate.drawio
new file mode 100644
index 00000000..806e9f34
--- /dev/null
+++ b/doc/diagrams/consumer_actuate.drawio
@@ -0,0 +1,131 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_actuate.svg b/doc/diagrams/consumer_actuate.svg
new file mode 100644
index 00000000..d684b0ef
--- /dev/null
+++ b/doc/diagrams/consumer_actuate.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer :Provider :Vehicle Network ActuateResponse Actuate(ActuateRequest=actuator_path)
Actuate(ActuateRequest=actuator_path) OpenProviderStream(stream Β OpenProviderStreamRequest=ProvideActuationRequest(actutators))
OpenProviderStream(streamΒ OpenProviderStreamRequest=ProvideActuationRequest(actutators)) stream Β OpenProviderStreamResponse=ProvideActuationResponse
streamΒ OpenProviderStreamResponse=ProvideActuationResponse stream Β OpenProviderStreamRequest=BatchActuateStreamResponse
streamΒ OpenProviderStreamRequest=BatchActuateStreamResponse stream Β OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value)
streamΒ OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value) write_data_frame(data) ack_write_operation Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_actuate_multiple_providers.drawio b/doc/diagrams/consumer_actuate_multiple_providers.drawio
new file mode 100644
index 00000000..57ee71a7
--- /dev/null
+++ b/doc/diagrams/consumer_actuate_multiple_providers.drawio
@@ -0,0 +1,212 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_actuate_multiple_providers.svg b/doc/diagrams/consumer_actuate_multiple_providers.svg
new file mode 100644
index 00000000..24c5c6da
--- /dev/null
+++ b/doc/diagrams/consumer_actuate_multiple_providers.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer :Provider Door :Vehicle Network ack_write_operation BatchActuateResponse BatchActuate(BatchActuateRequest={Door, Window})
BatchActuate(BatchActuateRequest={Door, Window}) OpenProviderStream(stream Β OpenProviderStreamRequest=ProvideActuationRequest(Door))
OpenProviderStream(streamΒ OpenProviderStreamRequest=ProvideActuationRequest(Door)) stream Β OpenProviderStreamResponse=ProvideActuationResponse
streamΒ OpenProviderStreamResponse=ProvideActuationResponse stream Β OpenProviderStreamRequest=BatchActuateStreamResponse(Door)
streamΒ OpenProviderStreamRequest=BatchActuateStreamResponse(Door) stream Β OpenProviderStreamResponse=BatchActuateStreamRequest(Door)
streamΒ OpenProviderStreamResponse=BatchActuateStreamRequest(Door) write_data_frame(data) ack_write_operation :Provider Window stream Β OpenProviderStreamRequest=BatchActuateStreamResponse(Window)
streamΒ OpenProviderStreamRequest=BatchActuateStreamResponse(Window) OpenProviderStream(stream Β OpenProviderStreamRequest=ProvideActuationRequest(Window))
OpenProviderStream(streamΒ OpenProviderStreamRequest=ProvideActuationRequest(Window)) stream Β OpenProviderStreamResponse=ProvideActuationResponse
streamΒ OpenProviderStreamResponse=ProvideActuationResponse stream Β OpenProviderStreamResponse=BatchActuateStreamRequest(Window)
streamΒ OpenProviderStreamResponse=BatchActuateStreamRequest(Window) write_data_frame(data) Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_get_values.drawio b/doc/diagrams/consumer_get_values.drawio
new file mode 100644
index 00000000..ad0a38f4
--- /dev/null
+++ b/doc/diagrams/consumer_get_values.drawio
@@ -0,0 +1,169 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_get_values.svg b/doc/diagrams/consumer_get_values.svg
new file mode 100644
index 00000000..7fe2780c
--- /dev/null
+++ b/doc/diagrams/consumer_get_values.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer :Provider :Vehicle Network GetValuesResponse GetValues(GetValuesRequest=signal_paths)
GetValues(GetValuesRequest=signal_paths) OpenProviderStream(stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values))
OpenProviderStream(streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values)) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() recv_data_frame() OEM cycle_time... OEM protocol design Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_provider_list_metadata.drawio b/doc/diagrams/consumer_provider_list_metadata.drawio
new file mode 100644
index 00000000..fd39f6ab
--- /dev/null
+++ b/doc/diagrams/consumer_provider_list_metadata.drawio
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_provider_list_metadata.svg b/doc/diagrams/consumer_provider_list_metadata.svg
new file mode 100644
index 00000000..f83682bf
--- /dev/null
+++ b/doc/diagrams/consumer_provider_list_metadata.svg
@@ -0,0 +1 @@
+:DataBroker ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...)
ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...) :Signal Consumer ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...)
ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...) ListMetadata(ListMetadataRequest(root="Vehicle.**"))
ListMetadata(ListMetadataRequest(root="Vehicle.**")) :Provider ListMetadata(ListMetadataRequest(root="Vehicle.**"))
ListMetadata(ListMetadataRequest(root="Vehicle.**")) Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_provider_server_info.drawio b/doc/diagrams/consumer_provider_server_info.drawio
new file mode 100644
index 00000000..840695d6
--- /dev/null
+++ b/doc/diagrams/consumer_provider_server_info.drawio
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_provider_server_info.svg b/doc/diagrams/consumer_provider_server_info.svg
new file mode 100644
index 00000000..550f08f4
--- /dev/null
+++ b/doc/diagrams/consumer_provider_server_info.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer GetServerInfoResponse(name, version, commit_hash)
GetServerInfoResponse(name, version, commit_hash) GetServerInfo(GetServerInfoRequest)
GetServerInfo(GetServerInfoRequest) :Provider GetServerInfo(GetServerInfoRequest)
GetServerInfo(GetServerInfoRequest) GetServerInfoResponse(name, version, commit_hash)
GetServerInfoResponse(name, version, commit_hash) Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_subscribes.drawio b/doc/diagrams/consumer_subscribes.drawio
new file mode 100644
index 00000000..37e4aa32
--- /dev/null
+++ b/doc/diagrams/consumer_subscribes.drawio
@@ -0,0 +1,303 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_subscribes.svg b/doc/diagrams/consumer_subscribes.svg
new file mode 100644
index 00000000..e8fa7da6
--- /dev/null
+++ b/doc/diagrams/consumer_subscribes.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer SubscribeById(SubscribeByIdRequest=signals_ids(1, 2, ..))
SubscribeById(SubscribeByIdRequest=signals_ids(1, 2, ..)) :Provider recv_data_frame() recv_data_frame() :Vehicle Network ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...)
ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...) ListMetadata(ListMetadataRequest(root="Vehicle.**"))
ListMetadata(ListMetadataRequest(root="Vehicle.**")) OpenProviderStream(stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values))
OpenProviderStream(streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values)) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse stream SubscribeByIdResponse(signal_values)
stream SubscribeByIdResponse(signal_values) Close Subscription recv_data_frame() stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() recv_data_frame() OEM... stream SubscribeByIdResponse(current_signal_values)
stream SubscribeByIdResponse(current_signal_values) Text is not SVG - cannot display
diff --git a/doc/diagrams/provider_publish.drawio b/doc/diagrams/provider_publish.drawio
new file mode 100644
index 00000000..6fa4b559
--- /dev/null
+++ b/doc/diagrams/provider_publish.drawio
@@ -0,0 +1,216 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/provider_publish.svg b/doc/diagrams/provider_publish.svg
new file mode 100644
index 00000000..4988dddd
--- /dev/null
+++ b/doc/diagrams/provider_publish.svg
@@ -0,0 +1 @@
+:DataBroker :Provider recv_data_frame() recv_data_frame() :Vehicle Network OpenProviderStream(stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values))
OpenProviderStream(streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values)) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamRequest=PublishValuesRequest(signal_values)
streamΒ OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream Β OpenProviderStreamResponse=PublishValuesResponse
streamΒ OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() recv_data_frame() cycle_time... Text is not SVG - cannot display
diff --git a/doc/diagrams/provider_recv_actuation.drawio b/doc/diagrams/provider_recv_actuation.drawio
new file mode 100644
index 00000000..3dd97391
--- /dev/null
+++ b/doc/diagrams/provider_recv_actuation.drawio
@@ -0,0 +1,97 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/provider_recv_actuation.svg b/doc/diagrams/provider_recv_actuation.svg
new file mode 100644
index 00000000..ec95e79f
--- /dev/null
+++ b/doc/diagrams/provider_recv_actuation.svg
@@ -0,0 +1 @@
+:DataBroker :Provider :Vehicle Network OpenProviderStream(stream Β OpenProviderStreamRequest=ProvideActuationRequest(actutators))
OpenProviderStream(streamΒ OpenProviderStreamRequest=ProvideActuationRequest(actutators)) stream Β OpenProviderStreamResponse=ProvideActuationResponse
streamΒ OpenProviderStreamResponse=ProvideActuationResponse stream Β OpenProviderStreamRequest=BatchActuateStreamResponse
streamΒ OpenProviderStreamRequest=BatchActuateStreamResponse stream Β OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value)
streamΒ OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value) write_data_frame(data) ack_write_operation Text is not SVG - cannot display
diff --git a/doc/kuksa_analysis.md b/doc/kuksa_analysis.md
new file mode 100644
index 00000000..9b075abd
--- /dev/null
+++ b/doc/kuksa_analysis.md
@@ -0,0 +1,24 @@
+# KUKSA Analysis
+
+This documentation provides a comprehensive analysis of the KUKSA project, detailing essential aspects to ensure a clear understanding and effective implementation. The sections covered include:
+
+### 1. Requirements
+The requirements for Kuksa Databroker is available in the [Kuksa requirements](./kuksa_analysis/kuksa_requirements.md)
+
+### 2. Design Topics
+The extended list of design topics for Kuksa Databroker is available in the [Kuksa design topics](./kuksa_analysis/kuksa_design_topics.md)
+
+### 3. Use Cases
+The use cases for Kuksa Databroker is available in the [Kuksa use cases](./kuksa_analysis/kuksa_use_cases.md)
+
+### 4. New API Definition
+The latest and new API `kuksa.val.v2` for Kuksa Databroker is available in [kuksa.val.v2](../proto/kuksa/val/v2/val.proto)
+
+This API is under development and will eventually replace:
+[kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API.
+[sdv.databroker.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/sdv/databroker/v1) API.
+
+
+### Documentation
+ #### [Terminology](./terminology.md)
+ #### [System Architecture](./system-architecture.md)
diff --git a/doc/kuksa_analysis/kuksa_design_topics.md b/doc/kuksa_analysis/kuksa_design_topics.md
new file mode 100644
index 00000000..971f2384
--- /dev/null
+++ b/doc/kuksa_analysis/kuksa_design_topics.md
@@ -0,0 +1,726 @@
+# Design Topics
+The document aims to gather current design decisions, sketches, and incomplete ideas regarding various design topics. This includes areas where no decisions have been made yet, as well as topics where decisions may still be pending.
+
+# Design topics status
+| Description | Status |
+|-------------------------------------------|------------------------------|
+| Implemented and Verified | π’ |
+| Approved, Not Yet Implemented | π‘ |
+| Long Term Goal | π΄ |
+
+# Content
+- [Design Topics](#design-topics)
+- [Design topics status](#design-topics-status)
+- [Content](#content)
+- [Data availability/persistence according to lifecycle of Client, Databroker and Provider](#data-availabilitypersistence-according-to-lifecycle-of-client-databroker-and-provider)
+- [Wildcard support](#wildcard-support)
+- [Registration of Datapoints](#registration-of-datapoints)
+- [Availability of Datapoints](#availability-of-datapoints)
+- [Lifecycle of components](#lifecycle-of-components)
+- [Path requests](#path-requests)
+- [Errors](#errors)
+- [Setting Values](#setting-values)
+- [Atomic operatoins](#atomic-operatoins)
+- [Update notifications](#update-notifications)
+- [Access rights](#access-rights)
+- [VSS signals - Users vs providers](#vss-signals---users-vs-providers)
+- [Enable "easy to use" user facing API](#enable-easy-to-use-user-facing-api)
+- [Performance / runtime footprint](#performance--runtime-footprint)
+- [Throttling-mode when system is overloaded.](#throttling-mode-when-system-is-overloaded)
+- [Considerations regarding shared and zero-copy memory approaches](#considerations-regarding-shared-and-zero-copy-memory-approaches)
+- [Provider control and provider capabilities](#provider-control-and-provider-capabilities)
+- [Control the rate of updates](#control-the-rate-of-updates)
+- [Differentiate between different providers of the same VSS data](#differentiate-between-different-providers-of-the-same-vss-data)
+- [Data Aliveness/Availability](#data-alivenessavailability)
+- [Missing features from `sdv.databroker.v1` in `kuksa.val.v1`](#missing-features-from-sdvdatabrokerv1-in-kuksavalv1)
+- [Exploring a design of a bidirectional streaming API](#exploring-a-design-of-a-bidirectional-streaming-api)
+ - [Design choices](#design-choices)
+ - [Stream](#stream)
+ - [Bidirectional stream](#bidirectional-stream)
+ - [Actuators](#actuators)
+ - [Overview](#overview)
+- [message Datapoint](#message-datapoint)
+ - [Alternative 1](#alternative-1)
+ - [Alternative 2](#alternative-2)
+ - [Alternative 3](#alternative-3)
+- [Split subscribe method due to performance reasons](#split-subscribe-method-due-to-performance-reasons)
+- [Service VAL better naming](#service-val-better-naming)
+- [Extend and split service definition for kuksa.val.v3 (current latest version kuksa.val.v2)?](#extend-and-split-service-definition-for-kuksavalv3-current-latest-version-kuksavalv2)
+- [COVESA topics](#covesa-topics)
+
+# Data availability/persistence according to lifecycle of Client, Databroker and Provider
+ Status: π΄
+ Current decisions:
+ 1. Databroker stores last signal values during its own lifecycle.
+ 2. It is not possible to reset values.
+ 3. Signal Consumer and Provider are not aware of each other.
+
+ Description:
+ 1. The data broker either ...
+ * Stores last set values during its own lifecycle,
+ * Stores values during the system's power cycle (i.e., "persists" values over own restarts, or
+ * store values over system's power cycles.
+
+ 2. How to "reset" values availability if its provider got inactive (without reseting the value)?
+ * -> Client's job (e.g. using timestamp)?
+ * -> Broker's job (e.g. using timestamp + minimal update cycle)?
+
+ 3. Provider and client aliveness
+ * If there is no active client subscription should the provider stop sending values to Databroker?
+ * If there is no active provider setting values while client subscription? Should Databroker or Client be aware of it?
+
+# Wildcard support
+ Status: π’
+ Current decisions:
+ Only `ListMetadata` support wildcard due to complex error handling in the implementation and usability.
+ Also due to performance issues, i.e when calling `subscribe` since the wildcard was checked for each path and each response.
+ Description:
+ * Do we want it only for `GetMetadata`?
+
+Reference -> [Wildcard](../wildcard_matching.md)
+
+# Registration of Datapoints
+ Status: π’
+ Current decisions:
+ Provider can register and claim only actuators.
+ An actuator can only by claimed by a Provider.
+ Description:
+Do we need a method for providers to register data points at runtime? **Implemented in old API?**:
+
+Its purpose would be:
+1. Publishing the availability of new data points during runtime
+2. Claiming providership of those data points
+
+
+In a mature system some "central instance", e.g., the system integrator must be responsible to ensure that no two components can register/claim providership for any data point.
+In case of a registration method the central instance would either have to
+ * make sure there is single provider per data point via configuration, or
+ * use access rights management to enforce that only the right component can register a certain data point.
+
+
+# Availability of Datapoints
+ Status: π΄
+ Current decisions:
+ Description:
+1. The system must be capable of individually checking the availability of each data point on a specific instance of the data broker. This means verifying whether there is an active provider instance installed on the system where the data broker instance is running, which is capable of supplying the data point during the current update cycle.
+
+2. It shall be possible to determine the availability of the actual value of each data point separately on a certain instance of the data broker.
+This represents the information if the provider of that data point is up and running on the system and already provided a value of that data point.
+
+
+# Lifecycle of components
+ Status: π’
+ Current decisions: Kuksa.val.v2 API as well as Databroker implementation does not depend on any certain order of starting for components.
+ Important point -> Signal Consumer and Provider should implement a retry policy in case connection get lots.
+ Description:
+The proper function of the overall system of components "around" the data broker, i.e., applications, providers, and the broker itself, shall not depend on a certain order of starting the components. This means:
+1. Any clients of the data broker (applications, providers) shall not assume the availability of the data broker service when they startup.
+2. Any clients of the data broker (applications, providers) shall not assume the permanent availability of the data broker service during their runtime.
+3. Any applications/clients shall not assume the availability of a value for any data point at their startup.
+4. Any applications/clients shall not assume the permanent presence of a value for any data point during their runtime.
+Explanation: Any component of the system can come and go - components could stop working due to a crash (what should not but will happen) or because of an update (which is a regular use case). Furthermore, components could reside on different execution environments which need restarts at different points of time. This shall not result in stopping and restarting the overall system. Instead, each and every component shall react in an appropriate way on changing availability of its dependencies.
+
+# Path requests
+ Status: π’
+ Current decisions: Databroker fully supports VSS
+ Description:
+The Databroker shall support at least those metadata elements as defined by the VSS rule set. Data points/nodes are (primarily) identified ("addressed") by their name/path which is a string.
+VSS arranges nodes in a tree structure, separating elements via a single dot ("."). This shall be supported but must not be a mandatory requirement.
+```console
+ Vehicle.Speed;
+ Vehicle.Seats.Row1.Position;
+ ...
+```
+
+# Errors
+ Status: π’
+ Current decisions: Kuksa.val.v2 API as well as Databroker implementation is consistent and it is aligned by all service calls
+ returning [gRPC Error](https://grpc.github.io/grpc/core/md_doc_statuscodes.html).
+ Description:
+Error response returned by **all gRPC service calls** must be a aligned with [gRPC Error](https://grpc.github.io/grpc/core/md_doc_statuscodes.html).
+```protobuf
+message Status {
+ // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+ int32 code = 1;
+
+ // A developer-facing error message, which should be in English. Any
+ // user-facing error message should be localized and sent in the
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+ string message = 2;
+
+ // A list of messages that carry the error details. There will be a
+ // common set of message types for APIs to use.
+ repeated google.protobuf.Any details = 3;
+}
+```
+Field `details` of type `Any` will be a serialized message as `bytes` containing an internal Databroker [Error](https://github.com/eclipse-kuksa/kuksa-databroker/blob/main/proto/kuksa/val/v1/types.proto#L246):
+```protobuf
+message Error {
+ uint32 code = 1;
+ string reason = 2;
+ string message = 3;
+}
+```
+
+# Setting Values
+ Status: π’
+ Current decisions:
+ Description:
+1. Attributes:
+ * It shall not be possible to set attribute values, except once at startup time by its respective responsible provider.
+2. Sensors:
+ * There shall be only one client able to set the current sensor value during each arbitrary span of time.
+3. Actuators:
+ * ? Actuator data points have a current and a target value. The current value represents the actual state of the actuator, whereas the target value represents a state desired by that client, who most recently set that target value.
+ * Only one client shall be able to set the current actuator value during each arbitrary span of time. This client is the provider of the data point.
+ * Multiple client may be able to set the target value of an actuator.
+ * Only the current provider client shall react on setting a new target value. It is expected that the provider tries to bring the current state of an actuator into the state requested by the target value. If this is not possible (for some reason), the provider is responsible to reset the state of the target value to that of the current value of the same data point.
+ -> This actually not a requirement to the data broker, but to the overall "usage concept" around the broker.
+ *? If no (active) provider is available for an actuator data point, its current and target value shall be "unavailable". A set request to a target value shall be "ignored" and the resulting target and current value shall stay as "unavailable".
+
+# Atomic operatoins
+ Status: π’
+ Current decisions: Kuksa.val.v2 API as well as Databroker implementation supports atomic operations handling sequentially request and responses for all the service methods.
+ Description:
+All data point values set by a single request must be updated in an atomic manner. This means:
+1. Set requests must be handled strongly sequentially in the order of reception.
+
+2. Responses to get requests and notifications on behalf of active subscriptions must represent the state of data points in-between finished set requests, i.e., a single set request (updating multiple data points) must not be interfered with get requests or update notifications.
+
+# Update notifications
+ Status: π’
+ Current decisions: Databroker implementation will only receive datapoints when their values changed.
+ Description:
+1. Update notifications for active subscriptions of multiple data points shall always contain the state of all data points of the subscription even if just one value has changed.
+
+2. If this behavior is not wanted by a client, it must subscribe data points separately.
+
+# Access rights
+ Status: π’
+ Current decisions:
+ 1. Many Providers can update a sensor at same time, just the last value will remain on Databroker database.
+ 2. Many Signal Consumers can change the value of an actuator, but only a Provider will forward its value to the Vehicle Network.
+ 3. New actuators values will be forwarded from Signal Consumer to Databroker to Provider to Vehicle Network, but Databroker will not be responsible for resetting any value on its Database, just fire and forget.
+ Description:
+1. Sensor (-like) data points: Its value shall be set be a single provider only (at least at a time)
+
+2. Actuator (-like) data points: Multiple clients may set its (target) value, a single client may act on its last set (target) value and "reset" it. Only a single client must set its current value (if there is a distinguishing).
+Hint: This does not necessarily need to ensured via the "API design" - it could also be ensured via access rights management configuration.
+
+
+# VSS signals - Users vs providers
+ Status: π’/π‘
+ Current decisions: New Databroker API kuksa.val.v2 will only have one service which will be used as entry point for Signal Consumer and Provider, it does not mean that it can not change in the future.
+ Description:
+The Vehicle Signals Specification (VSS) and Vehicle Information Service Specification (VISS) describes the standardized signals available (or not) in a vehicle. Both standards also describe how users interact with these signals.
+
+* They can read and subscribe to [actuators, sensors](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/) andΒ [attributes](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/attributes/).
+* They can set [actuators](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/).) (and only actuators).
+ For VISSv2 specifically, see ([read](https://w3c.github.io/automotive/spec/VISSv2_Core.html#read), [subscribe](https://w3c.github.io/automotive/spec/VISSv2_Core.html#subscribe)), [update](https://w3c.github.io/automotive/spec/VISSv2_Core.html#update))).
+
+VSS and VISS does _not_Β specify how signals are provided _into_ the VSS server / model.
+
+These two aspects of interacting with the signals can thus be divided into:
+* **Provider**
+ _A provider is providing a signal (sensor, attribute or actuator) to the VSS tree._
+ _A provider can also use signals as any other user._
+
+* **User**
+ _A user (or client) is using the signals in the VSS tree (without providing any of them itself)._
+
+where the VSS and VISS* specifications only specify the **User** part.
+
+When designing the databroker API, the method kuksa.val uses for providing signals was investigated. In short, kuksa-val-server takes the approach of trying to shoehorn the need of signal providers into the existing VISSv2 protocol. This is problematic for several reasons:
+
+* By reusing, **but changing the meaning** of the terminology used in VSS, there is a constant dissonance between the terms used. This is a recipe for confusion.
+* By deviating from the standard in _this particular way_, a standards compliant VISSv2 client cannot use it in any meaningful way.
+* It makes it harder to actually provide a standards compliant VISSv2 in the future .
+* By using the same methods for both signal providers _and_ users of signals it's harder (or impossible) to customize them for their different requirements.
+
+With this in mind, databroker chose to make a clear distinction between signal providers and signal users. It doesn't use this terminology though. It does this by splitting the interface into two separate services, which are customized for their different requirements / use cases. It doesn't need to be implemented in this way in order to achieve the same design goal, though.
+
+# Enable "easy to use" user facing API
+ Status: π’
+ Current decisions:
+ With the new design of kuksa.val.v2 this use case get solved by just "fire and forget" new actuator values.
+ Use case:
+ The user wants to lock a door and know when it's done / whether it worked.
+ 1. User calls `subscribe(Vehicle.Door.Locked)`
+ 2. User calls `actuate(Vehicle.Door.Locked, true)`
+ 3. Provider receives the request and starts forwards the request to the Vehicle Network.
+ 4. Provider at some point in time receives a new value for Vehicle.Door.Locked signal from the Vehicle Network.
+ 5. Provider publishes the new value to Databroker.
+ 6. User receives a new changed value for Vehicle.Door.Locked and concludes that the door has now been locked.
+
+ Note: User should define its own timeout for each application in case the actuate value expected is not received.
+
+ Or if there are no providers.
+ 1. User calls `subscribe(Vehicle.Door.Locked)`
+ 2. User calls `actuate(Vehicle.Door.Locked, true)`
+ 3. Databroker returns an error when no available provider has claimed the actuator signal.
+ Description:
+This is meant to illustrate what type of user APIs that can be created depending on what the provider API looks like (assuming we have one).
+
+Use case:
+
+The user wants to lock a door and know when it's done / whether it worked.
+
+**The kuksa.val.v1 or "key-value store" way.**
+
+Something conceptually like this:
+1. User calls `set(Vehicle.Door.Locked, field=TARGET_VALUE, true)`
+2. User calls `subscribe(Vehicle.Door.Locked, field=VALUE)`
+3. Provider (subscribing to TARGET_VALUE) receives the request and starts actuating, providing VALUE when it changes.
+4. User is notified when VALUE turns to true, and concludes that the door has now been locked.
+
+But what happens if the door fails to lock, e.g. the door is not close or the actuator is broken?
+* What should the user subscribe to for this information?
+* And how long should it wait before concluding that it failed?
+* And what happens if there is no provider of this actuator?
+
+Another question, a bit convoluted for a quick actuator like this (but applicable for slower moving things), is happens if another user calls set(..., false) before the actuator change has taken place?
+
+This can be solved by subscribing to both VALUE and TARGET_VALUE.
+
+1. User calls `set(Vehicle.Door.Locked, field=TARGET_VALUE, true)`
+2. User calls `subscribe(Vehicle.Door.Locked, fields=[VALUE, TARGET_VALUE])`
+3. Provider (subscribing to TARGET_VALUE) receives the request and starts actuating, providing VALUE when it changes.
+4. User is notified when VALUE turns true, and concludes that the door has now been locked, or the user is notified when TARGET_VALUE turns false, and knows that the operation was cancelled.
+
+
+**The user API + provider API way.**
+
+So what could this look like if we instead had an "easy to use" user API + a provider API and the server in between.
+
+Something like this:
+1. User calls `set(Vehicle.Door.Locked, true)`
+2. Server receives the request and sends an ACTUATOR_TARGET value to the provider of this signal.
+3. The provider receives it and starts actuating and provides VALUE back to the server when it changes.
+4. The provider sends "actuator target reached" back to the server.
+5. The server sends a (success) response back to the client.
+
+Or in case of failure:
+
+User calls `set(Vehicle.Door.Locked, true)`
+1. Server receives the request and sends an ACTUATOR_TARGET value to the provider of this signal.
+2. The provider receives it and starts actuating but notice that it fails, or that it's not allowed at the moment.
+3. The provider sends "actuator failed" or something back.
+4. The server sends a response "actuator failed" back to the client.
+
+Or if there are no providers.
+1. User calls `set(Vehicle.Door.Locked, true)`.
+2. The server knows that there are no providers, providing that signal.
+3. The server sends a response "actuator failed" or "not available" back to the client
+
+This latter approach would seem to represent an easier to use API for the user/library.
+
+**Note**
+Doing it like this puts the requirement to know the details of the actuator on the actuator provider.
+
+The actuator provider is better suited to know of reasonable timeouts etc in comparison to the users of signals (or the server). The user doesn't need to know how long to wait for something or to which error to subscribe. The server would only have to handle the presence detection which is a generic feature that doesn't require knowledge of sensor specifics.
+
+
+
+# Performance / runtime footprint
+ Status: π’
+ Current decisions: A detailed performance report will be provided after release of kuksa.val.v2. So far kuksa.val.v2 shows better performance than kuksa.val.v1 and sdv.databroker.v1
+ Description:
+Providers, especially of sensor data, are often setting values in rapid succession over long periods of time. Using unary GRPC calls for `Set` operations, is less efficient in terms of throughput when compared to GRPC streams. It's also more CPU intensive.
+
+The current design of `kuksa.val.v1` only provides unary call to set values. This represents a pure regression when compared to the databroker API.
+
+It's not a huge issue (in practice) if users avoid using `kuksa_client.KuksaClientThread()`. If they use that, I would say it's barely usable for e.g. CAN in it's current form.
+
+**Python performance setting signal values**
+
+| Set only | type | throughput |
+|-----------------------------|-------|---------------:|
+| kuksa_client (1) | | ~ 80 / s |
+| kuksa.grpc (2) | async | ~ 2 500 / s |
+| kuksa.val.v1 (3) | async | ~ 6 500 / s |
+| kuksa.val.v1 (3) | sync | ~ 9 000 / s |
+| databroker (4) | sync | ~ 26 000 / s |
+
+1 kuksa_client is using `kuksa_client.KuksaClientThread()`
+
+2 kuksa.grpc is using `kuksa_client.grpc` without the legacy `kuksa_client.KuksaClientThread()` wrapping it
+
+3 uses the generated `val_pb2*.py` python lib directly
+
+4 uses the generated `collector_pb2*.py` python lib directly
+
+
+Improvements:
+* Higher throughput.
+* Reduced CPU load.
+* Lower latency (probably, hasn't been measured)
+
+What's needed:
+* Introduce a streaming (GRPC) interface for providing sensor data.
+
+# Throttling-mode when system is overloaded.
+ Status: π΄
+ Current decisions:
+ Description:
+Is it worth to consider some throttling mode to be activated by the user in case system or any component is overloaded?
+Throttling modes to think about:
+ * Rate Limiting
+ * Bandwidth Throttling
+ * CPU Throttling
+
+# Considerations regarding shared and zero-copy memory approaches
+ Status: π΄
+ Current decisions:
+ Description:
+Pros:
+Cons:
+
+# Provider control and provider capabilities
+ Status: π΄
+ Current decisions:
+ Description:
+Open questions:
+
+Should the "change type" of a sensor (i.e. CONTINUOUS vs ON_CHANGE) be decided by providers
+or in the VSS metadata?
+It only makes sense for consumers to request their preferred rate of updates when they are
+subscribing to a sensor of type CONTINUOUS. That would be an argument for providing this
+information as part of the VSS metadata, so that it doesn't vary between vehicles.
+
+# Control the rate of updates
+ Status: π΄
+ Current decisions:
+ Description:
+Users of (continuous) sensor data can have different preferences with regard to how often
+they would like to receive updates. E.g. Vehicle.Speed is updated 100 times per second, but
+a consumer would only need it 1 time per second. This would introduce unnecessary processing
+requirements on the consumer (and provider).
+
+Currently there is no way for databroker to know how often a provider should provide updates.
+There is also no way for clients to instruct databroker of what rate they want.
+Sensor data is just sent at the rate it is received and providers are just sending sensor data at the rate
+they themselves decide.
+
+If a consumer can communicate this wish, there are several options for improvement.
+
+Improvements:
+* Reduction in load for consumers by adapting the update rate based on their preferences.
+* Reduction in load for for providers by propagating needed update rate.
+* Reduction in load for databroker by disabling unneeded providers.
+
+What's needed:
+* Introduce a way for clients to tell databroker of their preferred rate of updates.
+* Introduce a way for databroker to tell providers of highest needed frequency of sensor
+ data to which they can then adapt.
+ [probably needs] open stream `databroker` -> `provider`
+
+Other considerations:
+
+Setting the desired rate of update would only make sense for sensors of type CONTINUOUS.
+Sensors of type ON_CHANGE would always provide updates when the value changes.
+It could also make sense to introduce a way to request a momentary value from a provider,
+which would be used if a consumer only requests a momentary value (and doesn't subscribe).
+
+
+# Differentiate between different providers of the same VSS data
+ Status: π΄
+ Current decisions:
+ Description:
+Different sensors can provide data that is mapped to the same VSS signal / entry.
+This data can be of different resolution and / or quality. For example, an accelerometer
+can be used to infer the current speed of a vehicle, but a speedometer would probably
+provide a higher quality measurement. In the same way that a consumer could instruct
+databroker of a preferred update rate, it could also instruct the databroker of what
+accuracy of a sensor it needs.
+
+It's currently possible for multiple providers to set the same VSS entry, but there is no
+way for databroker to differentiate between them in any way.
+
+It could make sense to introduce a way for providers to describe themselves in order to
+make it possible to differentiate between them with regard to update rates, power consumption, accuracy or quality of their sensor data.
+
+This would give databroker the clients a way to to differentiate (and choose) different
+sources of data and make informed decisions based on that.
+
+Improvements:
+* Choose between providers based on available update frequency.
+* Fallback when sensor information from one sensor isn't available.
+
+What's needed:
+* Introduce a way for providers to describe their capabilities and properties of their
+ provided sensor data.
+
+Optional improvements:
+* Choose between providers based on needed quality / accuracy of sensor.
+ [needs] control plane, i.e. an open stream `databroker` -> `provider`
+* Consumers can get extended sensor data information.
+
+Optionally needed:
+* Introduce a way for consumers to tell databroker of their needed quality / accuracy
+ of VSS signal.
+
+
+# Data Aliveness/Availability
+ Status: π΄
+ Current decisions:
+ Description:
+The VSS signals / datapoints that are accessed through databroker can have a value and a
+timestamp. If they have never been set, they will have neither.
+
+There is no way for databroker to know if a value is up to date or not, since it doesn't
+have any information with regard to how often it should be updated or a way to determine
+if a provider has stopped providing data.
+
+For signals with a fixed update rate (sensors of type CONTINUOUS), it would theoretically
+be possible for either clients or the databroker to determine if a signal is up to date,
+by keeping track of the time since the last update.
+
+The providers of sensor data would be better suited to know the rate of update, and if
+the databroker where provided this information, it could automatically determine if a
+sensor hasn't been update within it's expected time window.
+
+For signals that only update based on events (i.e. a door opens), this isn't possible.
+Tracking the liveness of these signals would either require the providers to continuously
+send the same value even though it hasn't changed, or to have an open connection or
+another heartbeat mechanism between databroker and the provider to detect a missing provider.
+
+If there was a way to determine the availability of providers, the databroker could
+automatically determine that a signal was stale if it's provider is no longer available.
+
+Improvements:
+* Track availability / liveness of a VSS signals.
+
+What's needed:
+* Introduce a way to signal that a signal is available / not available (in `kuksa.val.v1`).
+* Introduce a way for providers to tell databroker of a signals time to live (TTL).
+* Introduce a way for databroker to track availability of providers (and which VSS signals
+ they are providing).
+ [needs] an open stream `provider` -> `databroker` or `databroker` -> `provider`
+* Implement tracking of TTL in databroker to automatically set unavailable status to signals
+ that have not been updated in time.
+
+
+Other considerations:
+Attributes probably don't need to have aliveness functionality. They would be
+unavailable if they have never been set, but since they shouldn't update at runtime,
+once set they should be valid indefinitely.
+
+
+# Missing features from `sdv.databroker.v1` in `kuksa.val.v1`
+ Status: π’
+ Current decisions: New API kuksa.val.v2 will cover and combine feature from both APIs.
+ Description:
+Sort list: What features would be lost if removing sdv.databroker.v1 today
+ * Registration of new datapoints
+ * SQL queries
+ * Streaming updates (i.e. worse performance)
+ * Connectivity check (no streaming interface)
+
+
+# Exploring a design of a bidirectional streaming API
+ Status: π’
+ Current decisions: New bidirectional streaming service method was added to `kuksa.val.v2`
+ SensorCommand (start/stop) should be implemented at some point.π‘
+ Description:
+This represent one way to design an interface that would enable most of the improvements
+listed above and provide a clear path forward for introducing them.
+
+
+## Design choices
+
+In this design, a single bidirection stream is used to provide everything needed by
+providers:
+`rpc Provide(stream ProviderRequest) returns (stream ProviderResponse);`
+
+This combines the control channel and the data channel into one. An alternative
+would be to split it into two bidirectional streams, one for control and the other
+for data. I'm not sure which makes the most sense.
+
+#### Stream
+By having an open stream (at all) between databroker and the provider, both ends
+can detect if the other goes away.
+
+Furthermore:
+
+A stream from provider -> databroker:
+- Enables higher throughput for sensor data updates and lower CPU usage.
+
+A stream from databroker -> provider:
+- Provides a way for databroker to send control commands to this provider.
+- Provides a way for databroker to send errors to this provider.
+
+#### Bidirectional stream
+
+A bidirectional stream between Provider <-> databroker
+- Provides databroker with a way to associate information sent from the provider
+ (e.g. capabilities, which actuators it provides, which sensors etc) with the stream
+ it uses to control it.
+
+#### Actuators
+VSS defines three types of signals:
+* Attribute
+* Sensor
+* Actuator
+
+An actuator acts as both something you can actuate and something providing values (a sensor).
+It's not even necessarily the same component providing the implementation of these separate concerns. With this in mind, a provider providing an VSS actuator would in this design provide
+an `Actuator(path)` to the server in order to receive `ActuateCommand`s, and provide
+`Sensor(path)` and send `SensorData` when they are providing sensor data.
+
+The alternative would be to duplicate everything in `Sensor` for `Actuator`s.
+
+
+## Overview
+The stream in each direction would consist of different "messages" implemented with `oneof {...}`.
+
+In the direction of *Provider -> Server*, at least three types of "messages" would flow(2) :
+* `SensorData` containing data streamed by the provider.
+* `Sensor` containing sensor information from the provider.
+* `Actuator` containing actuator information from the provider.
+* `Attribute` containing attribute including the value. (1)
+
+In the direction of *Server -> Provider*, at least three types of "messages" would flow:
+* `ActuateCommand`, tells actuator to actuate (triggered by someone setting actuator targets).
+* `SensorCommand`, controls behaviour of a sensor, i.e.
+ "start", "stop", "try to use this update frequency" etc..
+* `Error`, an error occurred. One type at the moment. It would probably make sense to
+ split it into errors that can occur at any time & errors that are directly caused by
+ things the provider provided.
+
+(1) It would probably make sense to introduce a separate `Error` in the direction of
+ *Provider -> Server*. Currently, the only errors in that direction is `ReadError` as part of the
+ sensor data.
+
+(2) It's possible that it makes mores sense to provide a separate unary RPC call for setting attributes,
+ since attributes (probably) don't update frequently and (probably) wont need availability status etc..
+
+```proto
+service VAL {
+ ...
+ rpc Provide(stream ProviderRequest) returns (stream ProviderResponse);
+}
+
+message ProviderRequest {
+ oneof provide {
+ Sensor sensor = 1;
+ Actuator actuator = 2;
+ Attribute attribute = 3;
+ SensorData sensor_data = 4;
+ }
+}
+
+message ProviderResponse {
+ oneof command {
+ ActuateCommand actuator_command = 1;
+ SensorCommand sensor_command = 2;
+ Error error = 3;
+ }
+}
+
+...
+
+```
+
+
+# message Datapoint
+ Status: π’
+ Current decisions: Alternative 1 was selected for development since `value` it is easy to access and in Protobuf could be `None`, meaning signal exists but does not have a value at the moment.
+ Description:
+Suggestion -> https://github.com/boschglobal/kuksa-databroker/pull/4#discussion_r1766459917
+
+Discussion -> https://github.com/eclipse-kuksa/kuksa-databroker/pull/33#discussion_r1776993874
+
+### Alternative 1
+```proto
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1;
+ Value value = 1;
+}
+```
+pros : easy to access value
+
+cons : no possible value status/state
+
+### Alternative 2
+```proto
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1;
+
+ oneof value_state {
+ State state = 2;
+ Value value = 3;
+ }
+}
+
+enum State {
+ // Unspecified value failure, reserved for gRPC backwards compatibility
+ // (see https://protobuf.dev/programming-guides/dos-donts/#unspecified-enum)
+ UNSPECIFIED = 0;
+ // The signal is known and provided, but doesn't have a valid value
+ INVALID_VALUE = 1;
+ // The signal is known, but no value is provided currently
+ NOT_PROVIDED = 2;
+}
+```
+
+pros : easy to understand
+
+cons : "more difficult to implement" depend on programming language
+
+### Alternative 3
+```proto
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1;
+
+ Status status = 2;
+ Value value = 3;
+}
+
+enum Status {
+ STATUS_UNSPECIFIED = 0;
+ STATUS_OK = 1;
+ STATUS_VALUE_NOT_AVAILBLE = 2;
+}
+```
+
+pros : easy to understand and access
+
+cons : Difficult to keep consistency between `status` and `value` values.
+
+# Split subscribe method due to performance reasons
+ Status: π’
+ Current decisions: Implemented
+ Description:
+ Before:
+ rpc Subscribe(SubscribeRequest) returns (stream SubscribeResponse); -> Signal_ID (path, id)
+
+
+ Now:
+ rpc Subscribe(SubscribeRequest) returns (stream SubscribeResponse); -> strings path
+
+ rpc SubscribeId(SubscribeRequestId) returns (stream SubscribeResponseId); -> int32 path -> faster
+
+
+message SubscribeByIdResponse {
+ map entries = 1;
+ //map entries = 1; // add to discussion PR
+}
+
+# Service VAL better naming
+ Status: π΄
+ Current decisions:
+ Description:
+
+# Extend and split service definition for kuksa.val.v3 (current latest version kuksa.val.v2)?
+ Status: π΄
+ Current decisions:
+ Description:
+* split up signal service (consumer, provider)
+
+* split services (signals -> VAL, metadata -> server_info, health_check -> metrics, num providers, vss_validation, ...)
+
+
+# COVESA topics
+ Status: π΄
+ Current decisions:
+ Description:
diff --git a/doc/kuksa_analysis/kuksa_requirements.md b/doc/kuksa_analysis/kuksa_requirements.md
new file mode 100644
index 00000000..ceef743f
--- /dev/null
+++ b/doc/kuksa_analysis/kuksa_requirements.md
@@ -0,0 +1,241 @@
+# Rerequirements
+
+# Content
+- [Rerequirements](#rerequirements)
+- [Content](#content)
+- [Requirements status](#requirements-status)
+- [Functional requirements](#functional-requirements)
+ - [As Signal Consumer](#as-signal-consumer)
+ - [FR1-ASC](#fr1-asc)
+ - [FR2-ASC](#fr2-asc)
+ - [FR3-ASC](#fr3-asc)
+ - [FR4-ASC](#fr4-asc)
+ - [FR5-ASC](#fr5-asc)
+ - [FR6-ASC](#fr6-asc)
+ - [FR7-ASC](#fr7-asc)
+ - [FR8-ASC](#fr8-asc)
+ - [As Databroker](#as-databroker)
+ - [FR1-AD](#fr1-ad)
+ - [FR2-AD](#fr2-ad)
+ - [FR3-AD](#fr3-ad)
+ - [FR4-AD](#fr4-ad)
+ - [FR5-AD](#fr5-ad)
+ - [As Provider](#as-provider)
+ - [FR1-AP](#fr1-ap)
+ - [FR2-AP](#fr2-ap)
+ - [FR3-AP](#fr3-ap)
+ - [FR4-AP](#fr4-ap)
+ - [FR5-AP](#fr5-ap)
+- [Non-Functional Requirements](#non-functional-requirements)
+- [Domain requirements](#domain-requirements)
+
+# Requirements status
+| Description | Status |
+|-------------------------------------------|------------------------------|
+| Implemented and Verified | π’ |
+| Approved, Not Yet Implemented | π‘ |
+| Long Term Goal | π΄ |
+
+# Functional requirements
+
+## As Signal Consumer
+### FR1-ASC
+ Title: Single access service Point for Signal Consumer
+
+ Status: π’
+
+ Description: The Signal Consumer shall have a single service point accessible.
+
+### FR2-ASC
+ Title: Uniform retrieval of vehicle and environmental Signal Values
+
+ Status: π’
+
+ Description: The Signal Consumer shall get sensor, actuator and attributes values of the vehicle signals and its environment(air temperature, etc) in a uniform manner.
+ 1. IF there is an error
+ * THEN the Signal Consumer shall not receive any signal value, just one single error with detailed error information
+ * ELSE the Signal Consumer shall receive exactly all the signalsβ values requested.
+
+ n. A signal can have value out of the set of the defined value restriction/data type and its 'value' can be explicitly 'None', meaning the Signal exists but no value is present.
+
+ 2. The Signal Consumer only shall get values for signal to which it has permission to.
+ 3. The Signal Consumer shall provide the paths or ids(int32) of the requested signals.
+
+### FR3-ASC
+ Title: Subscription and high frequency notification for Signal Value changes
+
+ Status: π’
+
+ Description: The Signal Consumer shall be able to subscribe to sensor or actuator values of the vehicle signals and get immediate notifications when any of the signal values change.
+ * IF there is an error
+ THEN the Signal Consumer shall not receive any signal value, just one single error with detailed error information
+ ELSE the Signal Consumer shall receive exactly all the signalsβ values requested that have change.
+ * The Signal Consumer only shall get values for signal to which it has permission to.
+ * The Signal Consumer shall provide the paths or the ids(int32) of the requested signals.
+
+### FR4-ASC
+ Title: Filtered interval subscription for Signal Values
+
+ Status: π΄
+
+ Description: The Signal Consumer shall subscribe and be able to apply a filter to receive a signal values with an interval of x milliseconds.
+ * IF there is an error
+ THEN the Signal Consumer shall not receive any signal value, just one single error with detailed error information
+ ELSE the Signal Consumer shall receive exactly all the signalsβ values requested.
+ * The Signal Consumer only shall get values for signal to which it has permission to.
+ * The Signal Consumer shall provide the paths or the ids(int32) of the requested signals.
+
+### FR5-ASC
+ Title: Accessing static metadata of signals
+
+ Status: π’
+
+ Description: A Signal Consumer shall be able to get static metadata from signals.
+ * IF there is an error
+ THEN the Signal Consumer shall not receive any signal metadata, just one single error with detailed error information
+ ELSE the Signal Consumer shall receive exactly all the signalsβ metadata requested.
+
+ * All sensors, actuators, and attributes values for which a Signal Consumer has permission to.
+ * The Signal Consumer shall provide the path, paths or wildcard of the signals.
+
+### FR6-ASC
+ Title: Actuation of actuator value with Databroker forwarded acknowledgment.
+
+ Status: π’
+
+ Description: The Signal Consumer shall be able to actuate the value of an actuator. This value should be forwarded to the actuator's provider if the provider is available, provider to Vehicle Network and get an acknowledgment response back.
+ * Databroker should not store the provided value but only forward it to the provider.
+
+ * IF no provider is connected
+ THEN Signal Consumer shall receive an error that no provider is available.
+ ELSE IF databroker successfully forwarded the value to the provider
+ THEN Signal Consumer shall receive an acknowledgement of receipt.
+ * IF provided signal path is not an actuator Signal Consumer should receive an error.
+
+### FR7-ASC
+ Title: Actuate multiple actuators simultaneously with Databroker forwarded acknowledgment.
+
+ Status: π’
+
+ Description: The Signal Consumer shall be able to actuate the values of multiple actuators simultaneously. These values should be forwarded to the corresponding actuators' providers only if all providers are available.
+ * Databroker should not store the provided value but only forward them to the providers.
+
+ * IF any provider is not connected
+ THEN Signal Consumer shall receive an error that no provider is available.
+ ELSE IF databroker successfully forwarded the values to all providers
+ THEN Signal Consumer shall receive an acknowledgement of receipt.
+ * IF provided signal path is not an actuator Signal Consumer should receive an error.
+
+### FR8-ASC
+ Title: Provider availability detection for Signal Consumer
+
+ Status: π΄
+
+ Description: The Signal Consumer shall be able to know if there is a provider up and running.
+
+## As Databroker
+### FR1-AD
+ Title: Handling of COVESA Vehicle Signal Specification (VSS) syntax by Databroker
+
+ Status: π’
+
+ Description: The Databroker shall handle catalogs of signals described by the syntax as defined by the COVESA Vehicle Signal Specification (VSS). This relates to all aspects of the VSS syntax definition, which is also called VSS rule set. This implies that the Databroker can handle the signal catalog as defined by the COVESA VSS.
+
+### FR2-AD
+ Title: Support for VSS metadata elements by Databroker
+
+ Status: π’
+
+ Description: The Databroker shall support at least those metadata elements as defined by the VSS rule set.
+
+### FR3-AD
+ Title: Consumer subscription management by Databroker
+
+ Status: π’
+
+ Description: The Databroker shall keep a local record of all subscriptions of signal consumers.
+ * The Databroker shall add or remove subscriptions to a subscription pool according to the subscription requests of the Signal Consumer.
+
+### FR4-AD
+ Title: Actuator ownership claim management by Databroker
+
+ Status: π’
+
+ Description: The Databroker shall maintain a local record of all actuator ownership claims made by signal providers.
+ * The Databroker shall manage an "ownership claim actuation pool," adding or removing claims based on the requests from signal providers.
+ * Each actuator can be claimed by only one provider at any given time.
+
+
+### FR5-AD
+ Title: Command transmission capabilities of Databroker to Provider
+
+ Status: π’/π‘
+
+ Description: The Databroker shall be able to send to the Provider the following commands
+ * Actuate on actuator. π’
+ * Start receiving signal values from the Provider. π‘
+ * Stop receiving signal values from the Provider. π‘
+
+## As Provider
+### FR1-AP
+ Title: Provider actuator ownership claim management by Databroker
+
+ Status: π’
+
+ Description: The Databroker shall offer a method to Providers allowing to claim providership of a set of actuators.
+ * IF all claimed actuators are known AND
+ the provider has providing rights for all claimed actuators AND
+ all claimed actuators are NOT yet claimed by another provider
+ THEN the Databroker shall accept and store the claim
+ ELSE the Databroker shall reject the overall claim and return an error containing the reason.
+
+ * The Databroker shall remember accepted claims of a provider if the connection to the provider is lost.
+ * The Databroker shall allow providers to re-claim previously successfully claimed actuators.
+
+### FR2-AP
+ Title: High-Frequency publishing of Signal Values by Provider
+
+ Status: π’
+
+ Description: The Databroker shall be capable of publishing signal values at the cycle time received from the Vehicle Network.
+ * IF all published signal are known AND
+ the provider has providing rights for all signals
+ THEN the Databroker shall accept and store the values
+ ELSE the Databroker shall reject the overall request and return an error response containing the reason.
+
+### FR3-AP
+ Title: Publishing of Signal Values by Provider
+
+ Status: π’
+
+ Description: The Databroker shall be able to publish signal values.
+ * IF all published signal are known AND
+ the provider has providing rights for all signals
+ THEN the Databroker shall accept and store the values
+ ELSE the Databroker shall reject the overall request and return an error response containing the reason.
+
+### FR4-AP
+ Title: Actuation notification handling
+
+ Status: π’
+
+ Description: The Databroker shall offer a method to providers to subscribe for actuation notifications on a (sub-) set of claimed actuators.
+ * Provider shall only receive actuation requests for actuators that it owns or manages.
+ * Provider shall process actuation requests and forward them to the Vehicle Network.
+ * Provider shall notify the Databroker back right after the actuation request was forwarded.
+
+### FR5-AP
+ Title: Signal state update mechanism for providers by Databroker
+
+ Status: π΄
+
+ Description: The Databroker shall offer a method to providers to update the current state of a set of signals.
+ a. The current state consists a timestamp and either of valid value or a failure state.
+ b. The Databroker shall offer a method optimized for frequent updates.
+ c. The Databroker should offer a second method for non-frequent updates that is easy to use in a provider's implementation.
+ d. The Databroker shall reject updating the current state of signals where the client is not the provider of.
+ e. The Databroker shall store the updated value or failure state of a signal.
+
+# Non-Functional Requirements
+
+# Domain requirements
diff --git a/doc/kuksa_analysis/kuksa_use_cases.md b/doc/kuksa_analysis/kuksa_use_cases.md
new file mode 100644
index 00000000..5bb15296
--- /dev/null
+++ b/doc/kuksa_analysis/kuksa_use_cases.md
@@ -0,0 +1,332 @@
+# KUKSA Use Cases
+
+
+# Content
+- [KUKSA Use Cases](#kuksa-use-cases)
+- [Content](#content)
+- [Use Cases status](#use-cases-status)
+- [Use cases:](#use-cases)
+ - [UC1](#uc1)
+ - [UC2](#uc2)
+ - [UC3](#uc3)
+ - [UC4](#uc4)
+ - [UC5](#uc5)
+ - [UC6](#uc6)
+ - [UC7](#uc7)
+ - [UC8](#uc8)
+
+# Use Cases status
+| Description | Status |
+|-------------------------------------------|------------------------------|
+| Implemented and Verified | π’ |
+| Approved, Not Yet Implemented | π‘ |
+| Long Term Goal | π΄ |
+
+# Use cases:
+
+## UC1
+ Title: Get sensor values.
+
+ Status: π’
+
+ Description: Signal consumer gets sensor and actuator values of signals.
+
+**Primary Actor:** Signal consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+ * Signal Consumer and Provider contain a valid authentication token to connect and perform calls to Databroker.
+ * Provider can read from Vehicle Network.
+ * Provider can publish signal values to Databroker.
+
+**Special requirements:**
+
+**Assumptions:**
+* Singal Consumer should get exactly all signal values requested.
+
+**Postconditions:**
+* Signal Consumer receives a list with all signals requested in same order as requested.
+
+**Sequence diagram:**
+
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and starts publishing sensor and actuator values at the cycle time received from Vehicle network.
+4. Signal Consumer calls GetValues with signal paths/ids.
+5. Databroker sends the response with signal values back to the Signal Consumer.
+9. Use case finishes.
+
+**Exceptions:**
+
+## UC2
+ Title: Signal consumber subscribes by id(int32) to all signal values.
+
+ Status: π’
+
+ Description: Signal Consumer subscribes to all sensor and actuator values by ids and gets notifications when values changed.
+
+**Primary Actor:** Signal Consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority**: High
+
+**Preconditions:**
+ * Signal Consumer and Provider contain a valid authentication token to connect and perform calls to Databroker.
+ * Provider can read from Vehicle Network.
+ * Provider can constantly publish signal values to Databroker at a high frequency.
+
+**Special requirements:**
+ * The use case must meet a high frequency notification rate.
+
+**Assumptions:**
+ * Signal Consumer subscribes to all possible sensor and actuator values.
+ * Provider porvides signals values to Databroker as fast as receives data from the Vehicle Network.
+
+**Postconditions:**
+ * A subscription is created, and the consumer receives updates for those signals where the state changes.
+
+**Sequence diagram:**
+
+
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and starts publishing sensor and actuator values at the cycle time received from Vehicle network.
+2. Signal Consumer calls list metadata to get all signals ids:
+3. Signal Consumer subscribes to all the sensor and actuator values by their ids.
+4. Databroker sends the current values stream back to the Signal Consumer.
+5. Databroker receives from Provider the new signal values and update its database.
+6. Databroker sends the changed values stream back to the Signal Consumer.
+7. Signal Consumer closes subscription to Databroker.
+8. Use case finishes.
+
+**Exceptions:**
+
+
+## UC3
+ Title: The consumer wants to set the value of an actuator signal.
+
+ Status: π’
+
+ Description: Signal Consumer actuates on an actuator.
+
+**Primary Actor:** Signal Consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+ * Signal Consumer and Provider contain a valid authentication token to connect and perform calls to Databroker.
+ * Provider can write to Vehicle Network.
+ * No other Provider has claimed the ownership of the actuator to be actuated.
+
+**Special requirements:**
+
+**Assumptions:**
+ * It does not necessarily mean that the actuator successfully updated its value to the desired new value. The entire chain is only responsible for forwarding the actuation request from Signal Consumer to the Vehicle Network.
+
+**Postconditions:**
+ * Provider forward an ack of receipt back to the Databroker immediately after the actuation request is forwarded.
+ * Signal Consumer receives a response which indicates the operation was successfully forwarded.
+
+**Sequence diagram:**
+
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and sends a request to claim ownership of specific actuators.
+2. Databroker stores the claim request.
+3. Signal Consumer calls actuate with new actuator value.
+4. Databroker forwards the request to the corresponding provider.
+5. Provider receives the request and sends ack response back to Databroker.
+6. Databroker sends ack response back to the Signal Consumer.
+7. Provider sends the actuation request to the Vehicle Network.
+8. Use case finishes.
+
+**Exceptions:**
+
+
+## UC4
+ Title: Signal Consumer actuates on multiple actuator.
+
+ Status: π’
+
+ Description: Signal Consumer actuates on multiple actuator.
+
+**Primary Actor:** Signal Consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+ * Signal Consumer and Providers contain a valid authentication token to connect and perform calls to Databroker.
+ * Providers can write to Vehicle Network.
+ * No other Provider has claimed the ownership of the actuator to be actuated.
+
+**Special requirements:**
+
+**Assumptions:**
+
+**Postconditions:**
+ * Providers forward an ack of receipt back to the Databroker immediately after the actuation request is forwarded.
+ * Signal Consumer receives a response which indicates the operations were successfully forwarded.
+
+**Sequence diagram:**
+
+
+**Basic Flow:**
+1. Door Provider opens bidirectional stream and sends a ownership claim request of the Door actuator.
+2. Window Provider opens bidirectional stream and sends a ownership claim request of the Window actuator.
+3. Databroker stores the claims requests.
+4. Signal Consumer calls actuate with new Door and Window values.
+5. Databroker forwards the actuation request to the corresponding provider.
+6. Door Provider receives the Door actuation request and sends ack response back to Databroker.
+7. Window Provider receives the Window actuation request and sends ack response back to Databroker.
+8. Databroker sends ack response back to the Signal Consumer.
+9. Door Provider sends the Door actuation request to the Vehicle Network.
+10. Window Provider sends the Window actuation request to the Vehicle Network.
+11. Use case finishes.
+
+**Exceptions:**
+
+## UC5
+ Title: Signal Consumer and Provider get metadata of signals.
+
+ Status: π’
+
+ Description: Signal Consumer and Provider receives a list with metadata of VSS signals present in Databroker.
+
+**Primary Actor:** Signal Consumer, Provider
+
+**Secondary Actors:** Databroker
+
+**Priority:** High
+
+**Preconditions:**
+
+**Special requirements:**
+
+**Assumptions:**
+
+**Postconditions:**
+
+**Sequence diagram:**
+
+
+**Basic Flow:**
+1. Signal Consumer calls list metadata to get all signals metadata.
+2. Provider calls list metadata to get all signals metadata.
+3. Use case finishes.
+
+**Exceptions:**
+
+## UC6
+ Title: Signal Consumer and Provider get server info
+
+ Status: π’
+
+ Description: Signal Consumer and Provider get server info
+
+**Primary Actor:** Signal Consumer, Provider
+
+**Secondary Actors:** Databroker
+
+**Priority:** High
+
+**Preconditions:**
+
+**Special requirements:**
+
+**Assumptions:**
+
+**Postconditions:**
+
+**Sequence diagram:**
+
+
+**Basic Flow:**
+1. Signal Consumer calls get server info.
+2. Provider calls get server info.
+3. Use case finishes.
+
+**Exceptions:**
+
+
+## UC7
+ Title: Provider publishes signal values at high frequency.
+
+ Status: π’
+
+ Description: Provider publishes signals values to Databroker at a high frequency according to the cycle time from the Vehicle Network.
+
+**Primary Actor:** Provider
+
+**Secondary Actors:** Databroker, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+* Provider can read from Vehicle Network.
+
+**Special requirements:**
+* Provider publishes signal values to Databroker atomically.
+
+**Assumptions:**
+* Provider has a list of valid signals with their ids(int32) that are present on Databroker.
+
+**Postconditions:**
+* Databroker stores on database all the sensor values.
+
+**Sequence diagram:**
+
+
+**Basic Flow:**
+1. Provider start publishing at high frequency sensor and actuator values (by their ids(int32)) received from Vehicle network.
+2. Databroker send publish response back to provider.
+3. Use case finishes.
+
+**Alternative Flows:**
+
+**Exceptions:**
+
+## UC8
+ Title: Forward actuation request to Vehicle Netwrok
+
+ Status: π’
+
+ Description: Provider receives an actuator request to change an actuator value on the Vehicle Network.
+
+**Primary Actor:** Provider
+
+**Secondary Actors:** Databroker, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+
+**Special requirements:**
+
+**Assumptions:**
+* Provider can stablish a connection with the Vehicle Network.
+* There is an instance of Databroker up and running.
+* Signal Consumer calls actuate with new actuator value.
+
+**Postconditions:**
+
+**Sequence diagram:**
+
+
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and sends a claim actuators request.
+2. Databroker stores the claim request.
+5. Databroker forwards the actuation request to the corresponding provider.
+6. Provider sends the actuation request to the Vehicle Network.
+7. Provider sends ack response back to Databroker.
+9. Use case finishes.
+
+**Exceptions:**
diff --git a/doc/protocol/README.md b/doc/protocol/README.md
index e746f206..8090b933 100644
--- a/doc/protocol/README.md
+++ b/doc/protocol/README.md
@@ -7,6 +7,7 @@ This file contains an overview what the KUKSA Server and databroker each support
| VISS V1 | - | - |
| VISS V2 | x/- | x/- |
| gRPC (kuksa) | x | - |
+| gRPC (kuksa.val.v2) | - | x |
| gRPC (kuksa.val.v1) | - | x |
| gRPC (sdv.databroker.v1) | - | x |
@@ -80,12 +81,12 @@ TLS is currently not supported.
The VISS Standard is not applicable for gRPC protocols. Here is an overview what the gRPC API in KUKSA databroker is capable of:
- Read: Reading VSS datapoints
- - Reading current or target value for actuators
+ - Reading value for actuators (for kuksa.val.v1 current or target values)
- Reading some metadata information from VSS datapoints
- Write: Writing VSS datapoints
- Writing sensor values
- - Writing current or target value for actuators
+ - Writing value for actuators (for kuksa.val.v1 current or target value)
- Soon: Writing some metadata information from VSS datapoints
- Subscription: Subscribing VSS datapoints
- Subscribing sensor values
- - Subscribing current or target value for actuators
+ - Subscribing value for actuators (for kuksa.val.v1 current or target value)
diff --git a/doc/quickstart.md b/doc/quickstart.md
index 66e403e4..f7995e06 100644
--- a/doc/quickstart.md
+++ b/doc/quickstart.md
@@ -1,4 +1,4 @@
-# KUKSA.val Quickstart
+# KUKSA.val Quickstart using kuksa.val.v1 API
The quickest possible way to get KUKSA.val up and running
diff --git a/doc/terminology.md b/doc/terminology.md
index 6ff94080..980c67b9 100644
--- a/doc/terminology.md
+++ b/doc/terminology.md
@@ -2,23 +2,25 @@
This pages gives an overview about the terms we use, when talking about KUKSA components or systems built with KUKSA.
-* [Terminology](#terminology)
-* [KUKSA.val components](#kuksaval-components)
- * [VSS Server](#vss-server)
- * [Client](#client)
- * [Clients: VSS Consumers](#clients-vss-consumers)
- * [Clients: VSS Providers](#clients-vss-providers)
- * [data-provider](#data-provider)
- * [actuation-provider](#actuation-provider)
-* [Vehicle Signal Specification (VSS)](#vehicle-signal-specification-vss)
- * [Signal](#signal)
- * [Sensor](#sensor)
- * [Actuator](#actuator)
- * [Attribute](#attribute)
- * [Value](#value)
- * [Metadata](#metadata)
- * [Overlay](#overlay)
- * [Datapoint](#datapoint)
+- [Terminology](#terminology)
+- [KUKSA.val components](#kuksaval-components)
+ - [VSS Server](#vss-server)
+ - [Client](#client)
+ - [Clients: VSS Consumers](#clients-vss-consumers)
+ - [Clients: VSS Providers](#clients-vss-providers)
+ - [data-provider](#data-provider)
+ - [actuation-provider](#actuation-provider)
+- [Vehicle Signal Specification (VSS)](#vehicle-signal-specification-vss)
+ - [Signal](#signal)
+ - [Sensor](#sensor)
+ - [Actuator](#actuator)
+ - [Attribute](#attribute)
+ - [Value](#value)
+ - [for kuksa.val.v1](#for-kuksavalv1)
+ - [for kuksa.val.v2:](#for-kuksavalv2)
+ - [Metadata](#metadata)
+ - [Overlay](#overlay)
+ - [Datapoint](#datapoint)
# KUKSA.val components
@@ -64,9 +66,9 @@ A data-provider intends to make sure that the actual state of a vehicle is curre
Historically you also may still find the term "feeder", when referring to a data-provider.
### actuation-provider
-An actuation-provider is trying to ensure that the target value of a VSS actuator is reflected by the actual state of a vehicle.
+An actuation-provider is trying to ensure that the value (called `target_value` for `kuksa.val.v1`) of a VSS actuator is reflected by the actual state of a vehicle.
-To this end, an actuation-provider can subscribe to the target value of a VSS actuator in the server.
+To this end, an actuation-provider can subscribe to the value of a VSS actuator in the server.
If a VSS consumer requests the _desired_ state of the VSS actuator `Vehicle.Body.Trunk.Rear.IsOpen` to be `true`, the actuation-provider for `Vehicle.Body.Trunk.Rear.IsOpen` would try to interact with a vehicle's system trying to unlock and open the trunk.
While from the server's point of view, an actuation provider is just a client, actuation-providers can not be passive towards other in-vehicle systems. Therefore, considering safety in an actuation-provider or underlying systems is very important.
@@ -97,7 +99,13 @@ Actuators are signals that are used to control the desired value of a property.
Attributes are signals that have a default value, specified by its default member in VSS. Like sensors, attribute values can also change, similar to sensor values. The latter can be useful for attribute values that are likely to change during the lifetime of the vehicle. However, attribute values should typically not change more than once per ignition cycle. Example: `Vehicle.VehicleIdentification.VIN`. [[Source]](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/attributes/)
## Value
-The value of a signal. The data type of the value must match the data type specified in the VSS entry for the signal. Currently KUKSA.val supports the _current_value_ for sensors, actuators and attributes as well as _target_value_ for actuators
+The value of a signal. The data type of the value must match the data type specified in the VSS entry for the signal.
+
+#### for kuksa.val.v1
+kuksa.val.v1 supports the _current_value_ for sensors, actuators and attributes as well as _target_value_ for actuators
+
+#### for kuksa.val.v2:
+There is not _current value_ or _target value_ concepts, there are just simply _data value_ for sensors, actuators and attributes.
## Metadata
Metadata of a VSS signal is data belonging to a signal, that is not the value. Standard VSS metadata are [unit](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/data_units/) and [datatype](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/data_types/) as well as some human readable description or comments. Custom metadata entries may be defined in [VSS overlays](https://covesa.github.io/vehicle_signal_specification/rule_set/overlay/). Currently KUKSA.val does not support custom metadata.
diff --git a/doc/user_guide.md b/doc/user_guide.md
index d39b333e..783ec34e 100644
--- a/doc/user_guide.md
+++ b/doc/user_guide.md
@@ -145,7 +145,46 @@ docker run --rm -it --network kuksa -v ./certificates:/opt/kuksa ghcr.io/eclipse
(back to top )
-## Query Syntax
+## APIs supported by Databroker
+
+Kuksa Databroker provides [gRPC](https://grpc.io/) based API endpoints which can be used by
+clients to interact with the server.
+
+gRPC services are specified by means of `.proto` files which define the services and the data
+exchanged between server and client.
+
+[Tooling](https://grpc.io/docs/languages/) is available for most popular programming languages to create
+client stubs for invoking the services.
+
+The Databroker uses gRPC's default HTTP/2 transport and [protocol buffers](https://developers.google.com/protocol-buffers) for message serialization.
+The same `.proto` file can be used to generate server skeleton and client stubs for other transports and serialization formats as well.
+
+HTTP/2 is a binary replacement for HTTP/1.1 used for handling connections, multiplexing (channels) and providing a standardized way to add headers for authorization and TLS for encryption/authentication.
+It also supports bi-directional streaming between client and server.
+
+Kuksa Databroker implements the following service interfaces:
+
+- Enabled on Databroker by default [kuksa.val.v2.VAL](../proto/kuksa/val/v2/val.proto) (recommended to use but still not supported by databroker-cli)
+- Enabled on Databroker by default [kuksa.val.v1.VAL](../proto/kuksa/val/v1/val.proto)
+- Disabled on Databroker by default, use `--enable-databroker-v1` to enable [sdv.databroker.v1.Broker](../proto/sdv/databroker/v1/broker.proto)
+- Disabled on Databroker by default, use `--enable-databroker-v1` to enable [sdv.databroker.v1.Collector](../proto/sdv/databroker/v1/collector.proto)
+
+(back to top )
+
+
+## Current and target value concept vs data value concept.
+For some of the APIs (`sdv.databroker.v1` and `kuksa.val.v1`), the concepts of `current_value` and `target_value` were introduced to differentiate between the expected or desired value for an actuator and the current value published by the provider (both stored in the Databrokerβs database).
+
+This concept has been removed in `kuksa.val.v2`. Now, there is only a single `data_value` for sensors and actuators, meaning that desired actuator values are simply forwarded from the Signal Consumer to the Databroker and then to the Provider. The Provider is responsible for updating on Databroker the `data_value` received from the vehicle network.
+
+**Kuksa does not guarantee that the desired actuator value will be fully updated on the vehicle network; it only forwards actuator values from the Signal Consumer to the vehicle network.**
+
+**Do not mix different versions of APIs for providers and clients, as this will cause issues; kuksa.val.v2 is not backward compatible with sdv.databroker.v1 and kuksa.val.v1**
+
+(back to top )
+
+
+## sdv.databroker.v1 Query Syntax, disabled by default, use `--enable-databroker-v1` to enable it
Clients can subscribe to updates of data entries of interest using an SQL-based [query syntax](./QUERY.md).
@@ -229,8 +268,17 @@ Vehicle.Cabin.Door.Row1.Left.IsOpen:
description: Is door open or closed
```
+#### For kuksa.val.v1:
+
The change types currently apply on _current_ values, when subscribing to a _target value_, as an actuation provider would do, any set on the target value is propagated just like in `continuous` mode, even if a datapoint (and thus its current value behavior) is set to `onchange` or `static`. The idea here is, that a "set" by an application is the intent to actuate something (maybe a retry even), and should thus always be forwarded to the provider.
+#### For kuksa.val.v2:
+The concept of _current value_ and _target value_ does not exist in `kuksa.val.v2`, there are just simply _data value_ for `sensor` and `actuator` which are registered by default as `continuous`.
+The change types apply to the _data value_, meaning that if `x-kuksa-changetype` is not specified (`continuous` by default), subscribers will be notified whenever the provider publishes a new value, whether there has been a change or not. Notifications for changes will only occur if the type is set to `onchange`.
+
+(back to top )
+
+
## Configuration Reference
The default configuration can be overridden by means of setting the corresponding environment variables and/or providing options on the command line as illustrated in the previous sections.
@@ -250,30 +298,6 @@ The default configuration can be overridden by means of setting the correspondin
(back to top )
-## API
-
-Kuksa Databroker provides [gRPC](https://grpc.io/) based API endpoints which can be used by
-clients to interact with the server.
-
-gRPC services are specified by means of `.proto` files which define the services and the data
-exchanged between server and client.
-
-[Tooling](https://grpc.io/docs/languages/) is available for most popular programming languages to create
-client stubs for invoking the services.
-
-The Databroker uses gRPC's default HTTP/2 transport and [protocol buffers](https://developers.google.com/protocol-buffers) for message serialization.
-The same `.proto` file can be used to generate server skeleton and client stubs for other transports and serialization formats as well.
-
-HTTP/2 is a binary replacement for HTTP/1.1 used for handling connections, multiplexing (channels) and providing a standardized way to add headers for authorization and TLS for encryption/authentication.
-It also supports bi-directional streaming between client and server.
-
-Kuksa Databroker implements the following service interfaces:
-
-- [kuksa.val.v1.VAL](../proto/kuksa/val/v1/val.proto)
-- [sdv.databroker.v1.Broker](../proto/sdv/databroker/v1/broker.proto)
-- [sdv.databroker.v1.Collector](../proto/sdv/databroker/v1/collector.proto)
-
-(back to top )
## Troubleshooting
diff --git a/jwt/README.md b/jwt/README.md
index 13ba5eb1..f7e29d70 100644
--- a/jwt/README.md
+++ b/jwt/README.md
@@ -9,8 +9,8 @@ For more information on token format see [documentation](../doc/authorization.md
## Available tokens
-* `actuate-provide-all.token` - gives access to set target value and actual value for all signals
-* `provide-all.token` - gives access to set actual value for all signals, but not target value
+* `actuate-provide-all.token` - gives access to set value and actual value for all signals
+* `provide-all.token` - gives access to set actual value for all signals, but not value
* `read-all.token` - gives access to read actual and current value for all signals
* `provide-vehicle-speed.token` - gives access to write and read actual value for Vehicle.Speed. Does not give access to other signals
* `read-vehicle-speed.token` - gives access to read actual value for Vehicle.Speed. Does not give access to other signals
diff --git a/proto/kuksa/val/v1/README.md b/proto/kuksa/val/v1/README.md
index 9baba4b0..777187d2 100644
--- a/proto/kuksa/val/v1/README.md
+++ b/proto/kuksa/val/v1/README.md
@@ -2,5 +2,6 @@
This directory contain a Protobuf API supported by KUKSA.val Databroker, KUKSA.val Python Client and KUKSA.val Go Client.
-This API is under development and will eventually replace the
-[sdv.databroker.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/sdv/databroker/v1) API.
+This API may in the future be deprecated. It is recommended to use
+the [kuksa.val.v2](../v2/val.proto) API, unless you need
+functionality currently only provided by this API.
diff --git a/proto/kuksa/val/v2/README.md b/proto/kuksa/val/v2/README.md
new file mode 100644
index 00000000..10e87723
--- /dev/null
+++ b/proto/kuksa/val/v2/README.md
@@ -0,0 +1,7 @@
+# kuksa.val.v2 protobuf API
+
+This directory contain a Protobuf API supported by KUKSA.val Databroker and KUKSA.val Python Client.
+
+This API is under development and will eventually replace:
+[kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API.
+[sdv.databroker.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/sdv/databroker/v1) API.
diff --git a/proto/kuksa/val/v2/types.proto b/proto/kuksa/val/v2/types.proto
new file mode 100644
index 00000000..911e611d
--- /dev/null
+++ b/proto/kuksa/val/v2/types.proto
@@ -0,0 +1,187 @@
+/********************************************************************************
+ * Copyright (c) 2024 Contributors to the Eclipse Foundation
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information regarding copyright ownership.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Apache License 2.0 which is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ ********************************************************************************/
+
+syntax = "proto3";
+// Please do not add optional fields due to older proto3 versions limitations
+
+package kuksa.val.v2;
+import "google/protobuf/timestamp.proto";
+
+option go_package = "kuksa/val/v2";
+
+// A Datapoint represents a timestamped value.
+// The 'value' field can be explicitly 'None', meaning the Datapoint exists but no value is present.
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1; // The timestamp of the datapoint.
+ Value value = 2; // The value associated with the timestamp. If no value is present, this field can be 'None'.
+}
+
+message Value {
+ oneof typed_value {
+ string string = 11;
+ bool bool = 12;
+ sint32 int32 = 13;
+ sint64 int64 = 14;
+ uint32 uint32 = 15;
+ uint64 uint64 = 16;
+ float float = 17;
+ double double = 18;
+ StringArray string_array = 21;
+ BoolArray bool_array = 22;
+ Int32Array int32_array = 23;
+ Int64Array int64_array = 24;
+ Uint32Array uint32_array = 25;
+ Uint64Array uint64_array = 26;
+ FloatArray float_array = 27;
+ DoubleArray double_array = 28;
+ }
+}
+
+message SignalID {
+ oneof signal {
+ // Numeric identifier to the signal
+ // As of today Databroker assigns arbitrary unique numbers to each registered signal
+ // at startup, meaning that identifiers may change after restarting Databroker.
+ // A mechanism for static identifiers may be introduced in the future.
+ int32 id = 1;
+ // Full VSS-style path to a specific signal, like "Vehicle.Speed"
+ // Wildcards and paths to branches are not supported.
+ // The given path must be known by the Databroker.
+ string path = 2;
+ }
+}
+
+message Error {
+ ErrorCode code = 1;
+ string message = 2;
+}
+
+enum ErrorCode {
+ OK = 0;
+ INVALID_ARGUMENT = 1;
+ NOT_FOUND = 2;
+ PERMISSION_DENIED = 3;
+}
+
+message Metadata {
+ // ID field
+ int32 id = 10;
+
+ // Data type
+ // The VSS data type of the entry (i.e. the value, min, max etc).
+ //
+ // NOTE: protobuf doesn't have int8, int16, uint8 or uint16 which means
+ // that these values must be serialized as int32 and uint32 respectively.
+ DataType data_type = 11;
+
+ // Entry type
+ EntryType entry_type = 12;
+
+ // Description
+ // Describes the meaning and content of the entry.
+ string description = 13;
+
+ // Comment
+ // A comment can be used to provide additional informal information
+ // on a entry.
+ string comment = 14;
+
+ // Deprecation
+ // Whether this entry is deprecated. Can contain recommendations of what
+ // to use instead.
+ string deprecation = 15;
+
+ // Unit
+ // The unit of measurement
+ string unit = 16;
+
+ // Value restrictions checked/enforced by Databroker
+ Value allowed_values = 17; // Must be of array type
+ Value min = 18;
+ Value max = 19;
+}
+
+// VSS Data type of a signal
+//
+// Protobuf doesn't support int8, int16, uint8 or uint16.
+// These are mapped to int32 and uint32 respectively.
+//
+enum DataType {
+ DATA_TYPE_UNSPECIFIED = 0;
+ DATA_TYPE_STRING = 1;
+ DATA_TYPE_BOOLEAN = 2;
+ DATA_TYPE_INT8 = 3;
+ DATA_TYPE_INT16 = 4;
+ DATA_TYPE_INT32 = 5;
+ DATA_TYPE_INT64 = 6;
+ DATA_TYPE_UINT8 = 7;
+ DATA_TYPE_UINT16 = 8;
+ DATA_TYPE_UINT32 = 9;
+ DATA_TYPE_UINT64 = 10;
+ DATA_TYPE_FLOAT = 11;
+ DATA_TYPE_DOUBLE = 12;
+ DATA_TYPE_TIMESTAMP = 13;
+ DATA_TYPE_STRING_ARRAY = 20;
+ DATA_TYPE_BOOLEAN_ARRAY = 21;
+ DATA_TYPE_INT8_ARRAY = 22;
+ DATA_TYPE_INT16_ARRAY = 23;
+ DATA_TYPE_INT32_ARRAY = 24;
+ DATA_TYPE_INT64_ARRAY = 25;
+ DATA_TYPE_UINT8_ARRAY = 26;
+ DATA_TYPE_UINT16_ARRAY = 27;
+ DATA_TYPE_UINT32_ARRAY = 28;
+ DATA_TYPE_UINT64_ARRAY = 29;
+ DATA_TYPE_FLOAT_ARRAY = 30;
+ DATA_TYPE_DOUBLE_ARRAY = 31;
+ DATA_TYPE_TIMESTAMP_ARRAY = 32;
+}
+
+// Entry type
+enum EntryType {
+ ENTRY_TYPE_UNSPECIFIED = 0;
+ ENTRY_TYPE_ATTRIBUTE = 1;
+ ENTRY_TYPE_SENSOR = 2;
+ ENTRY_TYPE_ACTUATOR = 3;
+}
+
+message StringArray {
+ repeated string values = 1;
+}
+
+message BoolArray {
+ repeated bool values = 1;
+}
+
+message Int32Array {
+ repeated sint32 values = 1;
+}
+
+message Int64Array {
+ repeated sint64 values = 1;
+}
+
+message Uint32Array {
+ repeated uint32 values = 1;
+}
+
+message Uint64Array {
+ repeated uint64 values = 1;
+}
+
+message FloatArray {
+ repeated float values = 1;
+}
+
+message DoubleArray {
+ repeated double values = 1;
+}
diff --git a/proto/kuksa/val/v2/val.proto b/proto/kuksa/val/v2/val.proto
new file mode 100644
index 00000000..b880b847
--- /dev/null
+++ b/proto/kuksa/val/v2/val.proto
@@ -0,0 +1,296 @@
+/********************************************************************************
+ * Copyright (c) 2024 Contributors to the Eclipse Foundation
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information regarding copyright ownership.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Apache License 2.0 which is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ ********************************************************************************/
+
+syntax = "proto3";
+// Please do not add optional fields due to older proto3 versions limitations
+
+package kuksa.val.v2;
+
+option go_package = "kuksa/val/v2";
+
+import "kuksa/val/v2/types.proto";
+
+service VAL {
+ // Get the latest value of a signal
+ // If the signal exist but does not have a valid value
+ // a DataPoint where value is None shall be returned.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if the requested signal doesn't exist
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ // - MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ //
+ rpc GetValue(GetValueRequest) returns (GetValueResponse);
+
+ // Get the latest values of a set of signals.
+ // The returned list of data points has the same order as the list of the request.
+ // If a requested signal has no value a DataPoint where value is None will be returned.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the requested signals doesn't exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the requested signals.
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ // - MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ //
+ rpc GetValues(GetValuesRequest) returns (GetValuesResponse);
+
+ // Subscribe to a set of signals using string path parameters
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ // - MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ //
+ // When subscribing, Databroker shall immediately return the value for all
+ // subscribed entries.
+ // If a value isn't available when subscribing to a it, it should return None
+ //
+ rpc Subscribe(SubscribeRequest) returns (stream SubscribeResponse);
+
+ // Subscribe to a set of signals using i32 id parameters
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ //
+ // When subscribing, Databroker shall immediately return the value for all
+ // subscribed entries.
+ // If a value isn't available when subscribing to a it, it should return None
+ //
+ rpc SubscribeById(SubscribeByIdRequest) returns (stream SubscribeByIdResponse);
+
+ // Actuate a single actuator
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if the actuator does not exist.
+ // PERMISSION_DENIED if access is denied for the actuator.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing the actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if the provided path is not an actuator.
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the provided value is out of the min/max range specified
+ //
+ rpc Actuate(ActuateRequest) returns (ActuateResponse);
+
+ // Actuate simultaneously multiple actuators.
+ // If any error occurs, the entire operation will be aborted
+ // and no single actuator value will be forwarded to the provider.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the actuators are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the actuators.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing an actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if any of the provided path is not an actuator.
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if any of the provided actuators values are out of the min/max range specified
+ //
+ rpc BatchActuate(BatchActuateRequest) returns (BatchActuateResponse);
+
+ // List metadata of signals matching the request.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if the specified root branch does not exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT if the provided path or wildcard is wrong.
+ //
+ rpc ListMetadata(ListMetadataRequest) returns (ListMetadataResponse);
+
+ // Publish a signal value. Used for low frequency signals (e.g. attributes).
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ rpc PublishValue(PublishValueRequest) returns (PublishValueResponse);
+
+ // Open a stream used to provide actuation and/or publishing values using
+ // a streaming interface. Used to provide actuators and to enable high frequency
+ // updates of values.
+ //
+ // The open stream is used for request / response type communication between the
+ // provider and server (where the initiator of a request can vary).
+ //
+ // Errors:
+ // - Provider sends ProvideActuationRequest -> Databroker returns ProvideActuationResponse
+ // Returns (GRPC error code) and closes the stream call (strict case).
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // ALREADY_EXISTS if a provider already claimed the ownership of an actuator
+ //
+ // - Provider sends PublishValuesRequest -> Databroker returns PublishValuesResponse upon error, and nothing upon success
+ // GRPC errors are returned as messages in the stream
+ // response with the signal id `map status = 2;` (permissive case)
+ // NOT_FOUND if a signal is non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for a signal.
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ // - Provider returns BatchActuateStreamResponse <- Databroker sends BatchActuateStreamRequest
+ // No error definition, a BatchActuateStreamResponse is expected from provider.
+ //
+ rpc OpenProviderStream(stream OpenProviderStreamRequest) returns (stream OpenProviderStreamResponse);
+
+ // Get server information
+ rpc GetServerInfo(GetServerInfoRequest) returns (GetServerInfoResponse);
+}
+
+message GetValueRequest {
+ SignalID signal_id = 1;
+}
+
+message GetValueResponse {
+ Datapoint data_point = 1;
+}
+
+message GetValuesRequest {
+ repeated SignalID signal_ids = 1;
+}
+
+message GetValuesResponse {
+ repeated Datapoint data_points = 1;
+}
+
+message SubscribeRequest {
+ repeated string signal_paths = 1;
+}
+
+message SubscribeResponse {
+ map entries = 1;
+}
+
+message SubscribeByIdRequest {
+ repeated int32 signal_ids = 1;
+}
+
+message SubscribeByIdResponse {
+ map entries = 1;
+}
+
+message ActuateRequest {
+ SignalID signal_id = 1;
+ Value value = 2;
+}
+
+message ActuateResponse {
+}
+
+message BatchActuateRequest {
+ repeated ActuateRequest actuate_requests = 1;
+}
+
+message BatchActuateResponse {
+}
+
+message ListMetadataRequest {
+ string root = 1;
+ string filter = 2;
+}
+
+message ListMetadataResponse {
+ repeated Metadata metadata = 1;
+}
+
+message PublishValueRequest {
+ SignalID signal_id = 1;
+ Datapoint data_point = 2;
+}
+
+message PublishValueResponse {
+}
+
+message PublishValuesRequest {
+ int32 request_id = 1; /// Unique request id for the stream that can be used to identify the response.
+ map datapoints = 2;
+}
+
+message PublishValuesResponse {
+ int32 request_id = 1;
+ map status = 2;
+}
+
+message ProvideActuationRequest {
+ repeated SignalID actuator_identifiers = 1;
+}
+
+message ProvideActuationResponse {
+}
+
+message BatchActuateStreamRequest {
+ repeated ActuateRequest actuate_requests = 1;
+}
+
+message BatchActuateStreamResponse {
+}
+
+message OpenProviderStreamRequest {
+ oneof action {
+ // Inform server of an actuator this provider provides.
+ ProvideActuationRequest provide_actuation_request = 1;
+ // Publish a value.
+ PublishValuesRequest publish_values_request = 2;
+ // Sent to acknowledge the acceptance of a batch actuate
+ // request.
+ BatchActuateStreamResponse batch_actuate_stream_response = 3;
+ }
+}
+
+message OpenProviderStreamResponse {
+ oneof action {
+ // Response to a provide actuator request.
+ ProvideActuationResponse provide_actuation_response = 1;
+ // Acknowledgement that a published value was received.
+ PublishValuesResponse publish_values_response = 2;
+ // Send a batch actuate request to a provider.
+ BatchActuateStreamRequest batch_actuate_stream_request = 3;
+ }
+}
+
+message GetServerInfoRequest {
+ // Nothing yet
+}
+
+message GetServerInfoResponse {
+ string name = 1;
+ string version = 2;
+ string commit_hash = 3;
+}
diff --git a/proto/sdv/databroker/v1/README.md b/proto/sdv/databroker/v1/README.md
index a669e5a3..440b77bb 100644
--- a/proto/sdv/databroker/v1/README.md
+++ b/proto/sdv/databroker/v1/README.md
@@ -2,11 +2,6 @@
This directory contain a Protobuf API supported by KUKSA.val Databroker.
-As of today KUKSA.val Databroker supports both this API and the
-[kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API.
-The [kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API is the newer API and is still
-in development. It does not yet support all features supported by this API.
-
This API may in the future be deprecated. It is recommended to use
-the [kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API, unless you need
+the [kuksa.val.v2](../../../kuksa/val/v2/val.proto) API, unless you need
functionality currently only provided by this API.
diff --git a/proto/sdv/databroker/v1/types.proto b/proto/sdv/databroker/v1/types.proto
index 44988098..4c002192 100644
--- a/proto/sdv/databroker/v1/types.proto
+++ b/proto/sdv/databroker/v1/types.proto
@@ -12,6 +12,7 @@
********************************************************************************/
syntax = "proto3";
+// Please do not add optional fields due to older proto3 versions limitations
import "google/protobuf/timestamp.proto";
@@ -143,6 +144,20 @@ message Datapoint {
}
}
+message Metadata {
+ int32 id = 1;
+ EntryType entry_type = 2;
+ string name = 4;
+ DataType data_type = 5;
+ ChangeType change_type = 6; // CONTINUOUS or STATIC or ON_CHANGE
+ string description = 7;
+
+ // Value restrictions checked/enforced by Databroker.
+ Allowed allowed = 10;
+ ValueRestriction min = 11;
+ ValueRestriction max = 12;
+}
+
message Allowed {
oneof values {
StringArray string_values = 1;
@@ -155,15 +170,15 @@ message Allowed {
}
}
-message Metadata {
- int32 id = 1;
- EntryType entry_type = 2;
- string name = 4;
- DataType data_type = 5;
- ChangeType change_type = 6; // CONTINUOUS or STATIC or ON_CHANGE
- string description = 7;
-
- Allowed allowed = 10;
- // int32 min_update_hz = 10; // Only for CONTINUOUS
- // int32 max_update_hz = 11; // Only for CONTINUOUS
-};
+message ValueRestriction {
+ oneof typed_value {
+ string string = 1;
+ bool bool = 2;
+ sint32 int32 = 3;
+ sint64 int64 = 4;
+ uint32 uint32 = 5;
+ uint64 uint64 = 6;
+ float float = 7;
+ double double = 8;
+ }
+}