diff --git a/.github/workflows/kuksa_databroker_build.yml b/.github/workflows/kuksa_databroker_build.yml
index 9d204866..7765acab 100644
--- a/.github/workflows/kuksa_databroker_build.yml
+++ b/.github/workflows/kuksa_databroker_build.yml
@@ -24,6 +24,9 @@ on:
QUAY_IO_USERNAME:
required: true
workflow_dispatch:
+ # Run every Sunday night to check regressions, for example from clippy
+ schedule:
+ - cron: "0 4 * * 0"
# suffix to avoid cancellation when running from release workflow
concurrency:
diff --git a/Cargo.lock b/Cargo.lock
index 6a18de88..8191f980 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4,9 +4,9 @@ version = 3
[[package]]
name = "addr2line"
-version = "0.24.1"
+version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375"
+checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
"gimli",
]
@@ -40,9 +40,9 @@ dependencies = [
[[package]]
name = "allocator-api2"
-version = "0.2.18"
+version = "0.2.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f"
+checksum = "45862d1c77f2228b9e10bc609d5bc203d86ebc9b87ad8d5d5167a6c9abf739d9"
[[package]]
name = "android-tzdata"
@@ -70,9 +70,9 @@ dependencies = [
[[package]]
name = "anstream"
-version = "0.6.15"
+version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526"
+checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b"
dependencies = [
"anstyle",
"anstyle-parse",
@@ -85,43 +85,43 @@ dependencies = [
[[package]]
name = "anstyle"
-version = "1.0.8"
+version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1"
+checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9"
[[package]]
name = "anstyle-parse"
-version = "0.2.5"
+version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb"
+checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
-version = "1.1.1"
+version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a"
+checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c"
dependencies = [
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
name = "anstyle-wincon"
-version = "3.0.4"
+version = "3.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8"
+checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125"
dependencies = [
"anstyle",
- "windows-sys 0.52.0",
+ "windows-sys 0.59.0",
]
[[package]]
name = "anyhow"
-version = "1.0.87"
+version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8"
+checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
[[package]]
name = "arc-swap"
@@ -131,9 +131,9 @@ checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
[[package]]
name = "arrayref"
-version = "0.3.8"
+version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a"
+checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb"
[[package]]
name = "arrayvec"
@@ -143,9 +143,9 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "async-stream"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51"
+checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
@@ -154,31 +154,31 @@ dependencies = [
[[package]]
name = "async-stream-impl"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
+checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "async-trait"
-version = "0.1.82"
+version = "0.1.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1"
+checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "autocfg"
-version = "1.3.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "autotools"
@@ -253,7 +253,7 @@ dependencies = [
"miniz_oxide",
"object",
"rustc-demangle",
- "windows-targets 0.52.6",
+ "windows-targets",
]
[[package]]
@@ -308,12 +308,12 @@ dependencies = [
[[package]]
name = "bstr"
-version = "1.10.0"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "40723b8fb387abc38f4f4a37c09073622e41dd12327033091ef8950659e6dc0c"
+checksum = "1a68f1f47cdf0ec8ee4b941b2eee2a80cb796db73118c0dd09ac63fbe405be22"
dependencies = [
"memchr",
- "regex-automata 0.4.7",
+ "regex-automata 0.4.9",
"serde",
]
@@ -337,9 +337,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
-version = "1.7.1"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
+checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da"
[[package]]
name = "camino"
@@ -370,14 +370,14 @@ dependencies = [
"semver",
"serde",
"serde_json",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
name = "cc"
-version = "1.1.18"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476"
+checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47"
dependencies = [
"shlex",
]
@@ -399,14 +399,14 @@ dependencies = [
"js-sys",
"num-traits",
"wasm-bindgen",
- "windows-targets 0.52.6",
+ "windows-targets",
]
[[package]]
name = "clap"
-version = "4.5.17"
+version = "4.5.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3e5a21b8495e732f1b3c364c9949b201ca7bae518c502c80256c96ad79eaf6ac"
+checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f"
dependencies = [
"clap_builder",
"clap_derive",
@@ -414,9 +414,9 @@ dependencies = [
[[package]]
name = "clap_builder"
-version = "4.5.17"
+version = "4.5.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8cf2dd12af7a047ad9d6da2b6b249759a22a7abc0f474c1dae1777afa4b21a73"
+checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec"
dependencies = [
"anstream",
"anstyle",
@@ -427,21 +427,21 @@ dependencies = [
[[package]]
name = "clap_derive"
-version = "4.5.13"
+version = "4.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0"
+checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab"
dependencies = [
"heck 0.5.0",
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "clap_lex"
-version = "0.7.2"
+version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97"
+checksum = "afb84c814227b90d6895e01398aee0d8033c00e7466aca416fb6a8e0eb19d8a7"
[[package]]
name = "clru"
@@ -451,9 +451,9 @@ checksum = "cbd0f76e066e64fdc5631e3bb46381254deab9ef1158292f27c8c57e3bf3fe59"
[[package]]
name = "colorchoice"
-version = "1.0.2"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0"
+checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990"
[[package]]
name = "console"
@@ -482,9 +482,9 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "cpufeatures"
-version = "0.2.14"
+version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0"
+checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3"
dependencies = [
"libc",
]
@@ -586,7 +586,7 @@ dependencies = [
"proc-macro2",
"quote",
"regex",
- "syn 2.0.77",
+ "syn 2.0.89",
"synthez",
]
@@ -625,9 +625,10 @@ checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2"
[[package]]
name = "databroker"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"anyhow",
+ "async-trait",
"axum",
"chrono",
"clap",
@@ -650,11 +651,12 @@ dependencies = [
"serde",
"serde_json",
"sqlparser",
- "thiserror",
+ "thiserror 1.0.69",
"tokio",
"tokio-stream",
"tonic 0.11.0",
"tonic-mock",
+ "tonic-reflection",
"tracing",
"tracing-opentelemetry",
"tracing-subscriber",
@@ -664,7 +666,7 @@ dependencies = [
[[package]]
name = "databroker-cli"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"ansi_term",
"clap",
@@ -684,7 +686,7 @@ dependencies = [
[[package]]
name = "databroker-proto"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"prost 0.12.6",
"prost-types",
@@ -710,7 +712,7 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -754,6 +756,17 @@ dependencies = [
"winapi",
]
+[[package]]
+name = "displaydoc"
+version = "0.2.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+]
+
[[package]]
name = "drain_filter_polyfill"
version = "0.1.3"
@@ -802,9 +815,9 @@ checksum = "a2a2b11eda1d40935b26cf18f6833c526845ae8c41e58d09af6adeb6f0269183"
[[package]]
name = "fastrand"
-version = "2.1.1"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
+checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4"
[[package]]
name = "filetime"
@@ -826,9 +839,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]]
name = "flate2"
-version = "1.0.33"
+version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253"
+checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c"
dependencies = [
"crc32fast",
"miniz_oxide",
@@ -851,9 +864,9 @@ dependencies = [
[[package]]
name = "futures"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
+checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
@@ -866,9 +879,9 @@ dependencies = [
[[package]]
name = "futures-channel"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
+checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
"futures-sink",
@@ -876,15 +889,15 @@ dependencies = [
[[package]]
name = "futures-core"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
+checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
+checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
@@ -893,38 +906,38 @@ dependencies = [
[[package]]
name = "futures-io"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
+checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
+checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "futures-sink"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
+checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
[[package]]
name = "futures-task"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
+checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
[[package]]
name = "futures-util"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
+checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
@@ -983,17 +996,17 @@ dependencies = [
"quote",
"serde",
"serde_json",
- "syn 2.0.77",
+ "syn 2.0.89",
"textwrap",
- "thiserror",
+ "thiserror 1.0.69",
"typed-builder",
]
[[package]]
name = "gimli"
-version = "0.31.0"
+version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64"
+checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "gix"
@@ -1034,7 +1047,7 @@ dependencies = [
"parking_lot",
"signal-hook",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1047,26 +1060,26 @@ dependencies = [
"gix-date",
"gix-utils",
"itoa",
- "thiserror",
+ "thiserror 1.0.69",
"winnow",
]
[[package]]
name = "gix-bitmap"
-version = "0.2.11"
+version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a371db66cbd4e13f0ed9dc4c0fea712d7276805fccc877f77e96374d317e87ae"
+checksum = "d48b897b4bbc881aea994b4a5bbb340a04979d7be9089791304e04a9fbc66b53"
dependencies = [
- "thiserror",
+ "thiserror 2.0.3",
]
[[package]]
name = "gix-chunk"
-version = "0.4.8"
+version = "0.4.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "45c8751169961ba7640b513c3b24af61aa962c967aaf04116734975cd5af0c52"
+checksum = "c6ffbeb3a5c0b8b84c3fe4133a6f8c82fa962f4caefe8d0762eced025d3eb4f7"
dependencies = [
- "thiserror",
+ "thiserror 2.0.3",
]
[[package]]
@@ -1080,7 +1093,7 @@ dependencies = [
"gix-features",
"gix-hash",
"memmap2",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1099,22 +1112,22 @@ dependencies = [
"memchr",
"once_cell",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
"unicode-bom",
"winnow",
]
[[package]]
name = "gix-config-value"
-version = "0.14.8"
+version = "0.14.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "03f76169faa0dec598eac60f83d7fcdd739ec16596eca8fb144c88973dbe6f8c"
+checksum = "49aaeef5d98390a3bcf9dbc6440b520b793d1bf3ed99317dc407b02be995b28e"
dependencies = [
"bitflags 2.6.0",
"bstr",
"gix-path",
"libc",
- "thiserror",
+ "thiserror 2.0.3",
]
[[package]]
@@ -1125,7 +1138,7 @@ checksum = "9eed6931f21491ee0aeb922751bd7ec97b4b2fe8fbfedcb678e2a2dce5f3b8c0"
dependencies = [
"bstr",
"itoa",
- "thiserror",
+ "thiserror 1.0.69",
"time",
]
@@ -1138,7 +1151,7 @@ dependencies = [
"bstr",
"gix-hash",
"gix-object",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1154,7 +1167,7 @@ dependencies = [
"gix-path",
"gix-ref",
"gix-sec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1172,7 +1185,7 @@ dependencies = [
"once_cell",
"prodash",
"sha1_smol",
- "thiserror",
+ "thiserror 1.0.69",
"walkdir",
]
@@ -1206,7 +1219,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f93d7df7366121b5018f947a04d37f034717e113dcf9ccd85c34b58e57a74d5e"
dependencies = [
"faster-hex",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1245,7 +1258,7 @@ dependencies = [
"memmap2",
"rustix",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1256,7 +1269,7 @@ checksum = "e3bc7fe297f1f4614774989c00ec8b1add59571dc9b024b4c00acb7dedd4e19d"
dependencies = [
"gix-tempfile",
"gix-utils",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1267,7 +1280,7 @@ checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -1285,7 +1298,7 @@ dependencies = [
"gix-validate",
"itoa",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
"winnow",
]
@@ -1306,7 +1319,7 @@ dependencies = [
"gix-quote",
"parking_lot",
"tempfile",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1324,31 +1337,31 @@ dependencies = [
"gix-path",
"memmap2",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
name = "gix-path"
-version = "0.10.11"
+version = "0.10.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ebfc4febd088abdcbc9f1246896e57e37b7a34f6909840045a1767c6dafac7af"
+checksum = "afc292ef1a51e340aeb0e720800338c805975724c1dfbd243185452efd8645b7"
dependencies = [
"bstr",
"gix-trace",
"home",
"once_cell",
- "thiserror",
+ "thiserror 2.0.3",
]
[[package]]
name = "gix-quote"
-version = "0.4.12"
+version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cbff4f9b9ea3fa7a25a70ee62f545143abef624ac6aa5884344e70c8b0a1d9ff"
+checksum = "64a1e282216ec2ab2816cd57e6ed88f8009e634aec47562883c05ac8a7009a63"
dependencies = [
"bstr",
"gix-utils",
- "thiserror",
+ "thiserror 2.0.3",
]
[[package]]
@@ -1369,7 +1382,7 @@ dependencies = [
"gix-utils",
"gix-validate",
"memmap2",
- "thiserror",
+ "thiserror 1.0.69",
"winnow",
]
@@ -1384,7 +1397,7 @@ dependencies = [
"gix-revision",
"gix-validate",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1400,7 +1413,7 @@ dependencies = [
"gix-object",
"gix-revwalk",
"gix-trace",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1415,14 +1428,14 @@ dependencies = [
"gix-hashtable",
"gix-object",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
name = "gix-sec"
-version = "0.10.8"
+version = "0.10.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fe4d52f30a737bbece5276fab5d3a8b276dc2650df963e293d0673be34e7a5f"
+checksum = "a8b876ef997a955397809a2ec398d6a45b7a55b4918f2446344330f778d14fd6"
dependencies = [
"bitflags 2.6.0",
"gix-path",
@@ -1447,9 +1460,9 @@ dependencies = [
[[package]]
name = "gix-trace"
-version = "0.1.10"
+version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6cae0e8661c3ff92688ce1c8b8058b3efb312aba9492bbe93661a21705ab431b"
+checksum = "04bdde120c29f1fc23a24d3e115aeeea3d60d8e65bab92cc5f9d90d9302eb952"
[[package]]
name = "gix-traverse"
@@ -1465,7 +1478,7 @@ dependencies = [
"gix-object",
"gix-revwalk",
"smallvec",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1478,15 +1491,15 @@ dependencies = [
"gix-features",
"gix-path",
"home",
- "thiserror",
+ "thiserror 1.0.69",
"url",
]
[[package]]
name = "gix-utils"
-version = "0.1.12"
+version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "35192df7fd0fa112263bad8021e2df7167df4cc2a6e6d15892e1e55621d3d4dc"
+checksum = "ba427e3e9599508ed98a6ddf8ed05493db114564e338e41f6a996d2e4790335f"
dependencies = [
"fastrand",
"unicode-normalization",
@@ -1499,7 +1512,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82c27dd34a49b1addf193c92070bcbf3beaf6e10f16a78544de6372e146a0acf"
dependencies = [
"bstr",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
@@ -1517,8 +1530,8 @@ dependencies = [
"aho-corasick",
"bstr",
"log",
- "regex-automata 0.4.7",
- "regex-syntax 0.8.4",
+ "regex-automata 0.4.9",
+ "regex-syntax 0.8.5",
]
[[package]]
@@ -1544,7 +1557,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http",
- "indexmap 2.5.0",
+ "indexmap 2.6.0",
"slab",
"tokio",
"tokio-util",
@@ -1567,6 +1580,12 @@ dependencies = [
"allocator-api2",
]
+[[package]]
+name = "hashbrown"
+version = "0.15.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
+
[[package]]
name = "heck"
version = "0.4.1"
@@ -1618,9 +1637,9 @@ dependencies = [
[[package]]
name = "httparse"
-version = "1.9.4"
+version = "1.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9"
+checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946"
[[package]]
name = "httpdate"
@@ -1636,9 +1655,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "hyper"
-version = "0.14.30"
+version = "0.14.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9"
+checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85"
dependencies = [
"bytes",
"futures-channel",
@@ -1672,9 +1691,9 @@ dependencies = [
[[package]]
name = "iana-time-zone"
-version = "0.1.60"
+version = "0.1.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141"
+checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220"
dependencies = [
"android_system_properties",
"core-foundation-sys",
@@ -1693,14 +1712,143 @@ dependencies = [
"cc",
]
+[[package]]
+name = "icu_collections"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526"
+dependencies = [
+ "displaydoc",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637"
+dependencies = [
+ "displaydoc",
+ "litemap",
+ "tinystr",
+ "writeable",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_locid_transform_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_locid_transform_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e"
+
+[[package]]
+name = "icu_normalizer"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_normalizer_data",
+ "icu_properties",
+ "icu_provider",
+ "smallvec",
+ "utf16_iter",
+ "utf8_iter",
+ "write16",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_normalizer_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516"
+
+[[package]]
+name = "icu_properties"
+version = "1.5.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5"
+dependencies = [
+ "displaydoc",
+ "icu_collections",
+ "icu_locid_transform",
+ "icu_properties_data",
+ "icu_provider",
+ "tinystr",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_properties_data"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569"
+
+[[package]]
+name = "icu_provider"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9"
+dependencies = [
+ "displaydoc",
+ "icu_locid",
+ "icu_provider_macros",
+ "stable_deref_trait",
+ "tinystr",
+ "writeable",
+ "yoke",
+ "zerofrom",
+ "zerovec",
+]
+
+[[package]]
+name = "icu_provider_macros"
+version = "1.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+]
+
[[package]]
name = "idna"
-version = "0.5.0"
+version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6"
+checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e"
dependencies = [
- "unicode-bidi",
- "unicode-normalization",
+ "idna_adapter",
+ "smallvec",
+ "utf8_iter",
+]
+
+[[package]]
+name = "idna_adapter"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71"
+dependencies = [
+ "icu_normalizer",
+ "icu_properties",
]
[[package]]
@@ -1713,7 +1861,7 @@ dependencies = [
"globset",
"log",
"memchr",
- "regex-automata 0.4.7",
+ "regex-automata 0.4.9",
"same-file",
"walkdir",
"winapi-util",
@@ -1731,12 +1879,12 @@ dependencies = [
[[package]]
name = "indexmap"
-version = "2.5.0"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5"
+checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
- "hashbrown 0.14.5",
+ "hashbrown 0.15.2",
]
[[package]]
@@ -1777,9 +1925,9 @@ dependencies = [
[[package]]
name = "itoa"
-version = "1.0.11"
+version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
[[package]]
name = "jemalloc-sys"
@@ -1803,9 +1951,9 @@ dependencies = [
[[package]]
name = "js-sys"
-version = "0.3.70"
+version = "0.3.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a"
+checksum = "6a88f1bda2bd75b0452a14784937d796722fdebfe50df998aeb3f0b7603019a9"
dependencies = [
"wasm-bindgen",
]
@@ -1827,7 +1975,7 @@ dependencies = [
[[package]]
name = "kuksa"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"databroker-proto",
"http",
@@ -1839,7 +1987,7 @@ dependencies = [
[[package]]
name = "kuksa-common"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"databroker-proto",
"http",
@@ -1850,7 +1998,7 @@ dependencies = [
[[package]]
name = "kuksa-sdv"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"databroker-proto",
"http",
@@ -1880,7 +2028,7 @@ dependencies = [
"proc-macro2",
"quote",
"regex",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -1891,9 +2039,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
-version = "0.2.158"
+version = "0.2.165"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e"
[[package]]
name = "libredox"
@@ -1903,7 +2051,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d"
dependencies = [
"bitflags 2.6.0",
"libc",
- "redox_syscall 0.5.3",
+ "redox_syscall 0.5.7",
]
[[package]]
@@ -1929,6 +2077,12 @@ version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
+[[package]]
+name = "litemap"
+version = "0.7.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104"
+
[[package]]
name = "lock_api"
version = "0.4.12"
@@ -1968,9 +2122,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "memmap2"
-version = "0.9.4"
+version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fe751422e4a8caa417e13c3ea66452215d7d63e19e604f4980461212f3ae1322"
+checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f"
dependencies = [
"libc",
]
@@ -2117,18 +2271,18 @@ dependencies = [
[[package]]
name = "object"
-version = "0.36.4"
+version = "0.36.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a"
+checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
-version = "1.19.0"
+version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "opentelemetry"
@@ -2153,7 +2307,7 @@ dependencies = [
"opentelemetry",
"opentelemetry-proto",
"prost 0.11.9",
- "thiserror",
+ "thiserror 1.0.69",
"tokio",
"tonic 0.8.3",
]
@@ -2192,7 +2346,7 @@ dependencies = [
"indexmap 1.9.3",
"once_cell",
"pin-project-lite",
- "thiserror",
+ "thiserror 1.0.69",
"urlencoding",
]
@@ -2213,7 +2367,7 @@ dependencies = [
"opentelemetry_api",
"percent-encoding",
"rand",
- "thiserror",
+ "thiserror 1.0.69",
"tokio",
"tokio-stream",
]
@@ -2242,9 +2396,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8"
dependencies = [
"cfg-if",
"libc",
- "redox_syscall 0.5.3",
+ "redox_syscall 0.5.7",
"smallvec",
- "windows-targets 0.52.6",
+ "windows-targets",
]
[[package]]
@@ -2297,7 +2451,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset",
- "indexmap 2.5.0",
+ "indexmap 2.6.0",
]
[[package]]
@@ -2340,29 +2494,29 @@ dependencies = [
[[package]]
name = "pin-project"
-version = "1.1.5"
+version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3"
+checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
-version = "1.1.5"
+version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
+checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "pin-project-lite"
-version = "0.2.14"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
+checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
[[package]]
name = "pin-utils"
@@ -2387,19 +2541,19 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.22"
+version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba"
+checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
dependencies = [
"proc-macro2",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "proc-macro2"
-version = "1.0.86"
+version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
@@ -2447,7 +2601,7 @@ dependencies = [
"prost 0.12.6",
"prost-types",
"regex",
- "syn 2.0.77",
+ "syn 2.0.89",
"tempfile",
]
@@ -2474,7 +2628,7 @@ dependencies = [
"itertools 0.12.1",
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -2542,9 +2696,9 @@ checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "redox_syscall"
-version = "0.5.3"
+version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4"
+checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f"
dependencies = [
"bitflags 2.6.0",
]
@@ -2568,19 +2722,19 @@ checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43"
dependencies = [
"getrandom 0.2.15",
"libredox",
- "thiserror",
+ "thiserror 1.0.69",
]
[[package]]
name = "regex"
-version = "1.10.6"
+version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
- "regex-automata 0.4.7",
- "regex-syntax 0.8.4",
+ "regex-automata 0.4.9",
+ "regex-syntax 0.8.5",
]
[[package]]
@@ -2594,13 +2748,13 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.7"
+version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
- "regex-syntax 0.8.4",
+ "regex-syntax 0.8.5",
]
[[package]]
@@ -2617,9 +2771,9 @@ checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
[[package]]
name = "regex-syntax"
-version = "0.8.4"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "ring"
@@ -2656,9 +2810,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
[[package]]
name = "rustix"
-version = "0.38.36"
+version = "0.38.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36"
+checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6"
dependencies = [
"bitflags 2.6.0",
"errno",
@@ -2683,25 +2837,24 @@ dependencies = [
[[package]]
name = "rustls-pemfile"
-version = "2.1.3"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425"
+checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
dependencies = [
- "base64 0.22.1",
"rustls-pki-types",
]
[[package]]
name = "rustls-pki-types"
-version = "1.8.0"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
+checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b"
[[package]]
name = "rustls-webpki"
-version = "0.102.7"
+version = "0.102.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56"
+checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9"
dependencies = [
"ring",
"rustls-pki-types",
@@ -2710,9 +2863,9 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.17"
+version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
+checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248"
[[package]]
name = "ryu"
@@ -2737,9 +2890,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "sd-notify"
-version = "0.4.2"
+version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4646d6f919800cd25c50edb49438a1381e2cd4833c027e75e8897981c50b8b5e"
+checksum = "1be20c5f7f393ee700f8b2f28ea35812e4e212f40774b550cd2a93ea91684451"
[[package]]
name = "sealed"
@@ -2750,7 +2903,7 @@ dependencies = [
"heck 0.4.1",
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -2764,29 +2917,29 @@ dependencies = [
[[package]]
name = "serde"
-version = "1.0.210"
+version = "1.0.215"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
+checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.210"
+version = "1.0.215"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
+checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "serde_json"
-version = "1.0.128"
+version = "1.0.133"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
+checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377"
dependencies = [
"itoa",
"memchr",
@@ -2875,7 +3028,7 @@ checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085"
dependencies = [
"num-bigint",
"num-traits",
- "thiserror",
+ "thiserror 1.0.69",
"time",
]
@@ -2917,7 +3070,7 @@ checksum = "0eb01866308440fc64d6c44d9e86c5cc17adfe33c4d6eed55da9145044d0ffc1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -2951,6 +3104,12 @@ dependencies = [
"log",
]
+[[package]]
+name = "stable_deref_trait"
+version = "1.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
+
[[package]]
name = "strsim"
version = "0.11.1"
@@ -2976,9 +3135,9 @@ dependencies = [
[[package]]
name = "syn"
-version = "2.0.77"
+version = "2.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
+checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e"
dependencies = [
"proc-macro2",
"quote",
@@ -2991,13 +3150,24 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
+[[package]]
+name = "synstructure"
+version = "0.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+]
+
[[package]]
name = "synthez"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a3d2c2202510a1e186e63e596d9318c91a8cbe85cd1a56a7be0c333e5f59ec8d"
dependencies = [
- "syn 2.0.77",
+ "syn 2.0.89",
"synthez-codegen",
"synthez-core",
]
@@ -3008,7 +3178,7 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f724aa6d44b7162f3158a57bccd871a77b39a4aef737e01bcdff41f4772c7746"
dependencies = [
- "syn 2.0.77",
+ "syn 2.0.89",
"synthez-core",
]
@@ -3021,14 +3191,14 @@ dependencies = [
"proc-macro2",
"quote",
"sealed",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "tempfile"
-version = "3.12.0"
+version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
+checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c"
dependencies = [
"cfg-if",
"fastrand",
@@ -3039,12 +3209,12 @@ dependencies = [
[[package]]
name = "terminal_size"
-version = "0.3.0"
+version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7"
+checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef"
dependencies = [
"rustix",
- "windows-sys 0.48.0",
+ "windows-sys 0.59.0",
]
[[package]]
@@ -3073,22 +3243,42 @@ dependencies = [
[[package]]
name = "thiserror"
-version = "1.0.63"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
+dependencies = [
+ "thiserror-impl 1.0.69",
+]
+
+[[package]]
+name = "thiserror"
+version = "2.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724"
+checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa"
dependencies = [
- "thiserror-impl",
+ "thiserror-impl 2.0.3",
]
[[package]]
name = "thiserror-impl"
-version = "1.0.63"
+version = "1.0.69"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+]
+
+[[package]]
+name = "thiserror-impl"
+version = "2.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261"
+checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -3134,6 +3324,16 @@ dependencies = [
"time-core",
]
+[[package]]
+name = "tinystr"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f"
+dependencies = [
+ "displaydoc",
+ "zerovec",
+]
+
[[package]]
name = "tinyvec"
version = "1.8.0"
@@ -3151,9 +3351,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
-version = "1.40.0"
+version = "1.41.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
+checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33"
dependencies = [
"backtrace",
"bytes",
@@ -3184,7 +3384,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -3307,7 +3507,7 @@ dependencies = [
"proc-macro2",
"prost-build",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -3324,6 +3524,19 @@ dependencies = [
"tonic 0.11.0",
]
+[[package]]
+name = "tonic-reflection"
+version = "0.11.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7"
+dependencies = [
+ "prost 0.12.6",
+ "prost-types",
+ "tokio",
+ "tokio-stream",
+ "tonic 0.11.0",
+]
+
[[package]]
name = "tower"
version = "0.4.13"
@@ -3376,14 +3589,14 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
name = "tracing-core"
-version = "0.1.32"
+version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
+checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
dependencies = [
"once_cell",
"valuable",
@@ -3460,7 +3673,7 @@ dependencies = [
"log",
"rand",
"sha1",
- "thiserror",
+ "thiserror 1.0.69",
"url",
"utf-8",
]
@@ -3482,7 +3695,7 @@ checksum = "29a3151c41d0b13e3d011f98adc24434560ef06673a155a6c7f66b9879eecce2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
]
[[package]]
@@ -3491,12 +3704,6 @@ version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
-[[package]]
-name = "unicode-bidi"
-version = "0.3.15"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"
-
[[package]]
name = "unicode-bom"
version = "2.0.3"
@@ -3505,9 +3712,9 @@ checksum = "7eec5d1121208364f6793f7d2e222bf75a915c19557537745b195b253dd64217"
[[package]]
name = "unicode-ident"
-version = "1.0.12"
+version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "unicode-linebreak"
@@ -3517,18 +3724,18 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f"
[[package]]
name = "unicode-normalization"
-version = "0.1.23"
+version = "0.1.24"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5"
+checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-width"
-version = "0.1.13"
+version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d"
+checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
[[package]]
name = "untrusted"
@@ -3538,9 +3745,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1"
[[package]]
name = "url"
-version = "2.5.2"
+version = "2.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c"
+checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60"
dependencies = [
"form_urlencoded",
"idna",
@@ -3559,6 +3766,18 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9"
+[[package]]
+name = "utf16_iter"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246"
+
+[[package]]
+name = "utf8_iter"
+version = "1.0.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
+
[[package]]
name = "utf8parse"
version = "0.2.2"
@@ -3567,9 +3786,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
[[package]]
name = "uuid"
-version = "1.10.0"
+version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314"
+checksum = "f8c5f0a0af699448548ad1a2fbf920fb4bee257eae39953ba95cb84891a0446a"
dependencies = [
"getrandom 0.2.15",
]
@@ -3634,9 +3853,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "wasm-bindgen"
-version = "0.2.93"
+version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
+checksum = "128d1e363af62632b8eb57219c8fd7877144af57558fb2ef0368d0087bddeb2e"
dependencies = [
"cfg-if",
"once_cell",
@@ -3645,24 +3864,24 @@ dependencies = [
[[package]]
name = "wasm-bindgen-backend"
-version = "0.2.93"
+version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
+checksum = "cb6dd4d3ca0ddffd1dd1c9c04f94b868c37ff5fac97c30b97cff2d74fce3a358"
dependencies = [
"bumpalo",
"log",
"once_cell",
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-macro"
-version = "0.2.93"
+version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
+checksum = "e79384be7f8f5a9dd5d7167216f022090cf1f9ec128e6e6a482a2cb5c5422c56"
dependencies = [
"quote",
"wasm-bindgen-macro-support",
@@ -3670,22 +3889,22 @@ dependencies = [
[[package]]
name = "wasm-bindgen-macro-support"
-version = "0.2.93"
+version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
+checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
"wasm-bindgen-backend",
"wasm-bindgen-shared",
]
[[package]]
name = "wasm-bindgen-shared"
-version = "0.2.93"
+version = "0.2.95"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
+checksum = "65fc09f10666a9f147042251e0dda9c18f166ff7de300607007e96bdebc1068d"
[[package]]
name = "winapi"
@@ -3724,16 +3943,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
- "windows-targets 0.52.6",
-]
-
-[[package]]
-name = "windows-sys"
-version = "0.48.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
-dependencies = [
- "windows-targets 0.48.5",
+ "windows-targets",
]
[[package]]
@@ -3742,7 +3952,7 @@ version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
- "windows-targets 0.52.6",
+ "windows-targets",
]
[[package]]
@@ -3751,22 +3961,7 @@ version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
- "windows-targets 0.52.6",
-]
-
-[[package]]
-name = "windows-targets"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
-dependencies = [
- "windows_aarch64_gnullvm 0.48.5",
- "windows_aarch64_msvc 0.48.5",
- "windows_i686_gnu 0.48.5",
- "windows_i686_msvc 0.48.5",
- "windows_x86_64_gnu 0.48.5",
- "windows_x86_64_gnullvm 0.48.5",
- "windows_x86_64_msvc 0.48.5",
+ "windows-targets",
]
[[package]]
@@ -3775,46 +3970,28 @@ version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
- "windows_aarch64_gnullvm 0.52.6",
- "windows_aarch64_msvc 0.52.6",
- "windows_i686_gnu 0.52.6",
+ "windows_aarch64_gnullvm",
+ "windows_aarch64_msvc",
+ "windows_i686_gnu",
"windows_i686_gnullvm",
- "windows_i686_msvc 0.52.6",
- "windows_x86_64_gnu 0.52.6",
- "windows_x86_64_gnullvm 0.52.6",
- "windows_x86_64_msvc 0.52.6",
+ "windows_i686_msvc",
+ "windows_x86_64_gnu",
+ "windows_x86_64_gnullvm",
+ "windows_x86_64_msvc",
]
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
-
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
-
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
-[[package]]
-name = "windows_i686_gnu"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
-
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
@@ -3829,59 +4006,71 @@ checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
-version = "0.48.5"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
+checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
-name = "windows_i686_msvc"
+name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
+checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
-name = "windows_x86_64_gnu"
-version = "0.48.5"
+name = "windows_x86_64_gnullvm"
+version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
+checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
-name = "windows_x86_64_gnu"
+name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
+checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.48.5"
+name = "winnow"
+version = "0.6.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
+checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b"
+dependencies = [
+ "memchr",
+]
[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.52.6"
+name = "write16"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
+checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936"
[[package]]
-name = "windows_x86_64_msvc"
-version = "0.48.5"
+name = "writeable"
+version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
+checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51"
[[package]]
-name = "windows_x86_64_msvc"
-version = "0.52.6"
+name = "yoke"
+version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
+checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40"
+dependencies = [
+ "serde",
+ "stable_deref_trait",
+ "yoke-derive",
+ "zerofrom",
+]
[[package]]
-name = "winnow"
-version = "0.6.18"
+name = "yoke-derive"
+version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f"
+checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154"
dependencies = [
- "memchr",
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+ "synstructure",
]
[[package]]
@@ -3902,7 +4091,28 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.77",
+ "syn 2.0.89",
+]
+
+[[package]]
+name = "zerofrom"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e"
+dependencies = [
+ "zerofrom-derive",
+]
+
+[[package]]
+name = "zerofrom-derive"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+ "synstructure",
]
[[package]]
@@ -3910,3 +4120,25 @@ name = "zeroize"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
+
+[[package]]
+name = "zerovec"
+version = "0.10.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079"
+dependencies = [
+ "yoke",
+ "zerofrom",
+ "zerovec-derive",
+]
+
+[[package]]
+name = "zerovec-derive"
+version = "0.10.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn 2.0.89",
+]
diff --git a/README.md b/README.md
index f7696c3f..ca18a291 100644
--- a/README.md
+++ b/README.md
@@ -46,6 +46,8 @@
Usage
Building
+ Performance
+ Additional Documentation
Contributing
License
Contact
@@ -60,7 +62,7 @@ The [COVESA Vehicle Signal Specification](https://covesa.github.io/vehicle_signa
However, VSS does not define how these signals are to be collected and managed within a vehicle, nor does it prescribe how other components in the vehicle can read or write signal values from and to the tree.
-**Kuksa Databroker** is a resource efficient implementation of the VSS signal tree and is intended to be run within a vehicle on a microprocessor based platform. It allows applications in the vehicle to interact with the vehicle's sensors and actuators using a uniform, high level gRPC API for querying signals, updating current and target values of sensors and actuators and getting notified about changes to signals of interest.
+**Kuksa Databroker** is a resource efficient implementation of the VSS signal tree and is intended to be run within a vehicle on a microprocessor based platform. It allows applications in the vehicle to interact with the vehicle's sensors and actuators using a uniform, high level gRPC API for querying signals, updating values of sensors and actuators and getting notified about changes to signals of interest.
@@ -88,6 +90,33 @@ Data is usually exchanged with ECUs by means of a CAN bus or Ethernet based prot
(back to top )
+### APIs supported by Databroker
+
+Kuksa Databroker provides [gRPC](https://grpc.io/) based API endpoints which can be used by
+clients to interact with the server. gRPC services are specified by means of `.proto` files which define the services and the data
+exchanged between server and client.
+[Tooling](https://grpc.io/docs/languages/) is available for most popular programming languages to create
+client stubs for invoking the services.
+The Databroker uses gRPC's default HTTP/2 transport and [protocol buffers](https://developers.google.com/protocol-buffers) for message serialization.
+The same `.proto` file can be used to generate server skeleton and client stubs for other transports and serialization formats as well.
+
+HTTP/2 is a binary replacement for HTTP/1.1 used for handling connections, multiplexing (channels) and providing a standardized way to add headers for authorization and TLS for encryption/authentication.
+It also supports bi-directional streaming between client and server.
+
+Kuksa Databroker implements the following gRPC service interfaces:
+
+- Enabled on Databroker by default [kuksa.val.v2.VAL](proto/kuksa/val/v2/val.proto)
+- Enabled on Databroker by default [kuksa.val.v1.VAL](proto/kuksa/val/v1/val.proto) (Deprecated!)
+- Disabled on Databroker by default [sdv.databroker.v1.Broker](proto/sdv/databroker/v1/broker.proto) (Deprecated!)
+- Disabled on Databroker by default [sdv.databroker.v1.Collector](proto/sdv/databroker/v1/collector.proto) (Deprecated!)
+
+In addition to the gRPC interfaces the Kuksa Databroker also supports a subset of the [COVESA VISS v2 Protocol[(https://github.com/COVESA/vehicle-information-service-specification)
+using WebSocket.
+Please visit the [user guide](doc/user_guide.md) for more information on how the interfaces can be enabled and configured in the Databroker.
+Please visit the [protocol documentation](doc/protocol.md) for more information on the APIs.
+
+(back to top )
+
## Getting started
@@ -114,10 +143,14 @@ The quickest possible way to get Kuksa Databroker up and running.
> :bulb: **Tip:** You can stop the container using `ctrl-c`.
+*Note that not all APIs are enabled by default, see [user guide](doc/user_guide.md) and*
+*[protocols](doc/protocol.md) for more information!*
+
### Reading and writing VSS data using the CLI
1. Start the CLI in a container attached to the _kuksa_ bridge network and connect to the Databroker container:
- The databroker supports both of `sdv.databroker.v1` and `kuksa.val.v1` as an API. Per default the databroker-cli uses the `sdv.databroker.v1` interface. To change it use `--protocol` option when starting. Chosse eihter one of `kuksa-val-v1` and `sdv-databroker-v1`.
+
+ The databroker supports the lastest new API `kuksa.val.v2` and `kuksa.val.v1` by default, `sdv.databroker.v1` must be enabled using `--enable-databroker-v1`. Per default the databroker-cli uses the `kuksa.val.v1` interface, which can be changed by supplying the `--protocol` option when starting. Choose either `kuksa.val.v1` or `sdv.databroker.v1`, as databroker-cli still does not support `kuksa.val.v2`.
```sh
# in a new terminal
@@ -249,12 +282,32 @@ cargo test --all-targets
(back to top )
+## Performance
+The Kuksa team has released an official tool to measure the latency and throughput of the Databroker for all supported APIs:
+[kuksa-perf](https://github.com/eclipse-kuksa/kuksa-perf)
+
+The use case measures the time it takes for a signal to be transferred from the Provider to the Signal Consumer
+Signal Consumer(stream subscribe) <- Databroker <- Provider(stream publish)
+
+Feel free to use it and share your results with us!
+
+## Additional Documentation
+
+Additional documentation is available in the [repository documentation folder](doc).
+
+(back to top )
+
## Contributing
Please refer to the [Kuksa Contributing Guide](CONTRIBUTING.md).
(back to top )
+
+
+## Kuksa analysis
+Extended [Kuksa analysis](./doc/kuksa_analysis.md) containing functional requirements, use cases diagrams, latest and new API definition `kuksa.val.v2` as well as new design discussions for future developments and improvements.
+
## License
Kuksa Databroker is provided under the terms of the [Apache Software License 2.0](LICENSE).
diff --git a/data/vss-core/README.md b/data/vss-core/README.md
index dbba2cff..73289905 100644
--- a/data/vss-core/README.md
+++ b/data/vss-core/README.md
@@ -60,7 +60,7 @@ use the full name. When official release is created replace the copied *.json-fi
Build and run kuksa_databroker using the new VSS file according to [documentation](../../README.md), e.g.
```sh
-$cargo run --bin databroker -- --metadata ../data/vss-core/vss_release_4.0.json
+$cargo run --bin databroker -- --metadata ./data/vss-core/vss_release_4.0.json
```
Use the client to verify that changes in VSS are reflected, by doing e.g. set/get on some new or renamed signals.
diff --git a/databroker-cli/Cargo.toml b/databroker-cli/Cargo.toml
index ea8363e4..59f146c7 100644
--- a/databroker-cli/Cargo.toml
+++ b/databroker-cli/Cargo.toml
@@ -13,7 +13,7 @@
[package]
name = "databroker-cli"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
authors = ["Eclipse KUKSA Project"]
edition = "2021"
license = "Apache-2.0"
diff --git a/databroker-cli/src/sdv_cli.rs b/databroker-cli/src/sdv_cli.rs
index e52dd642..0010c2a7 100644
--- a/databroker-cli/src/sdv_cli.rs
+++ b/databroker-cli/src/sdv_cli.rs
@@ -1263,6 +1263,8 @@ mod test {
change_type: proto::v1::ChangeType::OnChange.into(),
description: "".into(),
allowed: None,
+ min: None,
+ max: None,
},
proto::v1::Metadata {
id: 2,
@@ -1272,6 +1274,8 @@ mod test {
change_type: proto::v1::ChangeType::OnChange.into(),
description: "".into(),
allowed: None,
+ min: None,
+ max: None,
},
proto::v1::Metadata {
id: 3,
@@ -1281,6 +1285,8 @@ mod test {
change_type: proto::v1::ChangeType::OnChange.into(),
description: "".into(),
allowed: None,
+ min: None,
+ max: None,
},
]
.to_vec();
diff --git a/databroker-proto/Cargo.toml b/databroker-proto/Cargo.toml
index 22ad6791..a038556e 100644
--- a/databroker-proto/Cargo.toml
+++ b/databroker-proto/Cargo.toml
@@ -13,7 +13,7 @@
[package]
name = "databroker-proto"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
authors = ["Eclipse KUKSA Project"]
edition = "2021"
license = "Apache-2.0"
diff --git a/databroker-proto/build.rs b/databroker-proto/build.rs
index d02a006d..1a832c9f 100644
--- a/databroker-proto/build.rs
+++ b/databroker-proto/build.rs
@@ -11,6 +11,8 @@
* SPDX-License-Identifier: Apache-2.0
********************************************************************************/
+use std::{env, path::PathBuf};
+
fn main() -> Result<(), Box> {
std::env::set_var("PROTOC", protobuf_src::protoc());
tonic_build::configure()
@@ -23,8 +25,23 @@ fn main() -> Result<(), Box> {
"proto/sdv/databroker/v1/collector.proto",
"proto/kuksa/val/v1/val.proto",
"proto/kuksa/val/v1/types.proto",
+ "proto/kuksa/val/v2/val.proto",
+ "proto/kuksa/val/v2/types.proto",
],
&["proto"],
)?;
+
+ let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
+ tonic_build::configure()
+ .file_descriptor_set_path(out_dir.join("kuksa.val.v2_descriptor.bin"))
+ .compile(
+ &[
+ "proto/kuksa/val/v2/val.proto",
+ "proto/kuksa/val/v2/types.proto",
+ ],
+ &["proto"],
+ )
+ .unwrap();
+
Ok(())
}
diff --git a/databroker-proto/src/lib.rs b/databroker-proto/src/lib.rs
index 83fe2005..9bc5552c 100644
--- a/databroker-proto/src/lib.rs
+++ b/databroker-proto/src/lib.rs
@@ -143,5 +143,11 @@ pub mod kuksa {
}
}
}
+ pub mod v2 {
+ tonic::include_proto!("kuksa.val.v2");
+
+ pub const FILE_DESCRIPTOR_SET: &[u8] =
+ tonic::include_file_descriptor_set!("kuksa.val.v2_descriptor");
+ }
}
}
diff --git a/databroker/Cargo.toml b/databroker/Cargo.toml
index 688afb74..a4d0b514 100644
--- a/databroker/Cargo.toml
+++ b/databroker/Cargo.toml
@@ -13,7 +13,7 @@
[package]
name = "databroker"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
authors = ["Eclipse KUKSA Project"]
edition = "2021"
license = "Apache-2.0"
@@ -27,6 +27,7 @@ kuksa-common = { path = "../lib/common"}
kuksa = { path = "../lib/kuksa"}
databroker-proto = { workspace = true }
tonic = { workspace = true, features = ["transport", "channel", "prost"] }
+tonic-reflection = "0.11.0"
prost = { workspace = true }
prost-types = { workspace = true }
tokio = { workspace = true, features = [
@@ -60,10 +61,11 @@ glob-match = "0.2.1"
jemallocator = { version = "0.5.0", optional = true }
lazy_static = "1.4.0"
thiserror = "1.0.47"
+futures = { version = "0.3.28" }
+async-trait = "0.1.82"
# VISS
axum = { version = "0.6.20", optional = true, features = ["ws"] }
-futures = { version = "0.3.28", optional = true }
chrono = { version = "0.4.31", optional = true, features = ["std"] }
uuid = { version = "1.4.1", optional = true, features = ["v4"] }
@@ -81,7 +83,7 @@ sd-notify = "0.4.1"
default = ["tls"]
tls = ["tonic/tls"]
jemalloc = ["dep:jemallocator"]
-viss = ["dep:axum", "dep:chrono", "dep:futures", "dep:uuid"]
+viss = ["dep:axum", "dep:chrono", "dep:uuid"]
libtest = []
otel = ["dep:chrono", "dep:opentelemetry", "dep:opentelemetry-otlp", "dep:opentelemetry-semantic-conventions", "dep:tracing-opentelemetry"]
diff --git a/databroker/src/broker.rs b/databroker/src/broker.rs
index ea631358..95fc215c 100644
--- a/databroker/src/broker.rs
+++ b/databroker/src/broker.rs
@@ -18,8 +18,9 @@ use crate::query;
pub use crate::types::{ChangeType, DataType, DataValue, EntryType};
use tokio::sync::{broadcast, mpsc, RwLock};
+use tokio_stream::wrappers::BroadcastStream;
use tokio_stream::wrappers::ReceiverStream;
-use tokio_stream::Stream;
+use tokio_stream::{Stream, StreamExt};
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
@@ -40,14 +41,31 @@ tracing_opentelemetry::OpenTelemetrySpanExt,
use crate::glob;
+const MAX_SUBSCRIBE_BUFFER_SIZE: usize = 1000;
+
#[derive(Debug)]
-pub enum UpdateError {
+pub enum ActuationError {
NotFound,
WrongType,
OutOfBounds,
UnsupportedType,
PermissionDenied,
PermissionExpired,
+ ProviderNotAvailable,
+ ProviderAlreadyExists,
+ TransmissionFailure,
+}
+
+#[derive(Debug, PartialEq)]
+pub enum UpdateError {
+ NotFound,
+ WrongType,
+ OutOfBoundsAllowed,
+ OutOfBoundsMinMax,
+ OutOfBoundsType,
+ UnsupportedType,
+ PermissionDenied,
+ PermissionExpired,
}
#[derive(Debug, Clone)]
@@ -73,6 +91,9 @@ pub struct Metadata {
pub entry_type: EntryType,
pub change_type: ChangeType,
pub description: String,
+ // Min and Max are typically never arrays
+ pub min: Option,
+ pub max: Option,
pub allowed: Option,
pub unit: Option,
}
@@ -108,6 +129,7 @@ pub struct Database {
#[derive(Default)]
pub struct Subscriptions {
+ actuation_subscriptions: Vec,
query_subscriptions: Vec,
change_subscriptions: Vec,
}
@@ -123,13 +145,14 @@ pub struct QueryField {
pub value: DataValue,
}
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub struct ChangeNotification {
+ pub id: i32,
pub update: EntryUpdate,
pub fields: HashSet,
}
-#[derive(Debug, Default)]
+#[derive(Debug, Default, Clone)]
pub struct EntryUpdates {
pub updates: Vec,
}
@@ -144,6 +167,7 @@ pub enum QueryError {
pub enum SubscriptionError {
NotFound,
InvalidInput,
+ InvalidBufferSize,
InternalError,
}
@@ -152,9 +176,31 @@ pub struct DataBroker {
database: Arc>,
subscriptions: Arc>,
version: String,
+ commit_sha: String,
shutdown_trigger: broadcast::Sender<()>,
}
+#[async_trait::async_trait]
+pub trait ActuationProvider {
+ async fn actuate(
+ &self,
+ actuation_changes: Vec,
+ ) -> Result<(), (ActuationError, String)>;
+ fn is_available(&self) -> bool;
+}
+
+#[derive(Clone)]
+pub struct ActuationChange {
+ pub id: i32,
+ pub data_value: DataValue,
+}
+
+pub struct ActuationSubscription {
+ vss_ids: Vec,
+ actuation_provider: Box,
+ permissions: Permissions,
+}
+
pub struct QuerySubscription {
query: query::CompiledQuery,
sender: mpsc::Sender,
@@ -163,7 +209,7 @@ pub struct QuerySubscription {
pub struct ChangeSubscription {
entries: HashMap>,
- sender: mpsc::Sender,
+ sender: broadcast::Sender,
permissions: Permissions,
}
@@ -189,6 +235,8 @@ pub struct EntryUpdate {
// order to be able to convey "update it to None" which would
// mean setting it to `Some(None)`.
pub allowed: Option>,
+ pub min: Option >,
+ pub max: Option >,
pub unit: Option,
}
@@ -213,6 +261,11 @@ impl Entry {
update
}
+ pub fn validate_actuator_value(&self, data_value: &DataValue) -> Result<(), UpdateError> {
+ self.validate_value(data_value)?;
+ self.validate_allowed(data_value)?;
+ Ok(())
+ }
#[cfg_attr(feature="otel", tracing::instrument(name="broker_validate", skip(self, update), fields(timestamp=chrono::Utc::now().to_string())))]
pub fn validate(&self, update: &EntryUpdate) -> Result<(), UpdateError> {
if let Some(datapoint) = &update.datapoint {
@@ -232,26 +285,42 @@ impl Entry {
}
#[cfg_attr(feature="otel", tracing::instrument(name="broker_validate_allowed_type", skip(self, allowed), fields(timestamp=chrono::Utc::now().to_string())))]
+ /**
+ * DataType is VSS type, where we have also smaller type based on 8/16 bits
+ * That we do not have for DataValue
+ */
pub fn validate_allowed_type(&self, allowed: &Option) -> Result<(), UpdateError> {
if let Some(allowed_values) = allowed {
match (allowed_values, &self.metadata.data_type) {
(DataValue::BoolArray(_allowed_values), DataType::Bool) => Ok(()),
(DataValue::StringArray(_allowed_values), DataType::String) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int8) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int16) => Ok(()),
(DataValue::Int32Array(_allowed_values), DataType::Int32) => Ok(()),
(DataValue::Int64Array(_allowed_values), DataType::Int64) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint8) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint16) => Ok(()),
(DataValue::Uint32Array(_allowed_values), DataType::Uint32) => Ok(()),
(DataValue::Uint64Array(_allowed_values), DataType::Uint64) => Ok(()),
(DataValue::FloatArray(_allowed_values), DataType::Float) => Ok(()),
(DataValue::DoubleArray(_allowed_values), DataType::Double) => Ok(()),
(DataValue::BoolArray(_allowed_values), DataType::BoolArray) => Ok(()),
(DataValue::StringArray(_allowed_values), DataType::StringArray) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int8Array) => Ok(()),
+ (DataValue::Int32Array(_allowed_values), DataType::Int16Array) => Ok(()),
(DataValue::Int32Array(_allowed_values), DataType::Int32Array) => Ok(()),
(DataValue::Int64Array(_allowed_values), DataType::Int64Array) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint8Array) => Ok(()),
+ (DataValue::Uint32Array(_allowed_values), DataType::Uint16Array) => Ok(()),
(DataValue::Uint32Array(_allowed_values), DataType::Uint32Array) => Ok(()),
(DataValue::Uint64Array(_allowed_values), DataType::Uint64Array) => Ok(()),
(DataValue::FloatArray(_allowed_values), DataType::FloatArray) => Ok(()),
(DataValue::DoubleArray(_allowed_values), DataType::DoubleArray) => Ok(()),
- _ => Err(UpdateError::WrongType {}),
+ _ => {
+ debug!("Unexpected combination - VSS datatype is {:?}, but list of allowed value use {:?}",
+ &self.metadata.data_type, allowed_values);
+ Err(UpdateError::WrongType {})
+ }
}
} else {
// it is allowed to set allowed to None
@@ -267,56 +336,56 @@ impl Entry {
(DataValue::BoolArray(allowed_values), DataValue::Bool(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::DoubleArray(allowed_values), DataValue::Double(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::FloatArray(allowed_values), DataValue::Float(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Int32Array(allowed_values), DataValue::Int32(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Int64Array(allowed_values), DataValue::Int64(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::StringArray(allowed_values), DataValue::String(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Uint32Array(allowed_values), DataValue::Uint32(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::Uint64Array(allowed_values), DataValue::Uint64(value)) => {
match allowed_values.contains(value) {
true => Ok(()),
- false => Err(UpdateError::OutOfBounds),
+ false => Err(UpdateError::OutOfBoundsAllowed),
}
}
(DataValue::BoolArray(allowed_values), DataValue::BoolArray(value)) => {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -325,7 +394,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -334,7 +403,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -343,7 +412,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -352,7 +421,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -361,7 +430,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -370,7 +439,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -379,7 +448,7 @@ impl Entry {
for item in value {
match allowed_values.contains(item) {
true => (),
- false => return Err(UpdateError::OutOfBounds),
+ false => return Err(UpdateError::OutOfBoundsAllowed),
}
}
Ok(())
@@ -391,6 +460,27 @@ impl Entry {
}
}
+ /// Checks if value fulfils min/max condition
+ /// Returns OutOfBounds if not fulfilled
+ fn validate_value_min_max(&self, value: &DataValue) -> Result<(), UpdateError> {
+ // Validate Min/Max
+ if let Some(min) = &self.metadata.min {
+ debug!("Checking min, comparing value {:?} and {:?}", value, min);
+ match value.greater_than_equal(min) {
+ Ok(true) => {}
+ _ => return Err(UpdateError::OutOfBoundsMinMax),
+ };
+ }
+ if let Some(max) = &self.metadata.max {
+ debug!("Checking max, comparing value {:?} and {:?}", value, max);
+ match value.less_than_equal(max) {
+ Ok(true) => {}
+ _ => return Err(UpdateError::OutOfBoundsMinMax),
+ };
+ }
+ Ok(())
+ }
+
#[cfg_attr(feature="otel", tracing::instrument(name="broker_validate_value", skip(self, value), fields(timestamp=chrono::Utc::now().to_string())))]
fn validate_value(&self, value: &DataValue) -> Result<(), UpdateError> {
// Not available is always valid
@@ -398,6 +488,25 @@ impl Entry {
return Ok(());
}
+ // For numeric non-arrays check min/max
+ // For arrays we check later on value
+ match self.metadata.data_type {
+ DataType::Int8
+ | DataType::Int16
+ | DataType::Int32
+ | DataType::Int64
+ | DataType::Uint8
+ | DataType::Uint16
+ | DataType::Uint32
+ | DataType::Uint64
+ | DataType::Float
+ | DataType::Double => match self.validate_value_min_max(value) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ },
+ _ => {}
+ }
+
// Validate value
match self.metadata.data_type {
DataType::Bool => match value {
@@ -411,14 +520,14 @@ impl Entry {
DataType::Int8 => match value {
DataValue::Int32(value) => match i8::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
DataType::Int16 => match value {
DataValue::Int32(value) => match i16::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
@@ -434,14 +543,14 @@ impl Entry {
DataType::Uint8 => match value {
DataValue::Uint32(value) => match u8::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
DataType::Uint16 => match value {
DataValue::Uint32(value) => match u16::try_from(*value) {
Ok(_) => Ok(()),
- Err(_) => Err(UpdateError::OutOfBounds),
+ Err(_) => Err(UpdateError::OutOfBoundsType),
},
_ => Err(UpdateError::WrongType),
},
@@ -471,106 +580,138 @@ impl Entry {
},
DataType::Int8Array => match &value {
DataValue::Int32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match i8::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
- }
+ Ok(_) => match self.validate_value_min_max(&DataValue::Int32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ },
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Int16Array => match &value {
DataValue::Int32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match i16::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
- }
+ Ok(_) => match self.validate_value_min_max(&DataValue::Int32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ },
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Int32Array => match value {
- DataValue::Int32Array(_) => Ok(()),
+ DataValue::Int32Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Int32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::Int64Array => match value {
- DataValue::Int64Array(_) => Ok(()),
+ DataValue::Int64Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Int64(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::Uint8Array => match &value {
DataValue::Uint32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match u8::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
+ Ok(_) => {
+ match self.validate_value_min_max(&DataValue::Uint32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
}
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Uint16Array => match &value {
DataValue::Uint32Array(array) => {
- let mut out_of_bounds = false;
for value in array {
match u16::try_from(*value) {
- Ok(_) => {}
- Err(_) => {
- out_of_bounds = true;
- break;
+ Ok(_) => {
+ match self.validate_value_min_max(&DataValue::Uint32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
}
+ Err(_) => return Err(UpdateError::OutOfBoundsType),
}
}
- if out_of_bounds {
- Err(UpdateError::OutOfBounds)
- } else {
- Ok(())
- }
+ Ok(())
}
_ => Err(UpdateError::WrongType),
},
DataType::Uint32Array => match value {
- DataValue::Uint32Array(_) => Ok(()),
+ DataValue::Uint32Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Uint32(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::Uint64Array => match value {
- DataValue::Uint64Array(_) => Ok(()),
+ DataValue::Uint64Array(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Uint64(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::FloatArray => match value {
- DataValue::FloatArray(_) => Ok(()),
+ DataValue::FloatArray(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Float(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
DataType::DoubleArray => match value {
- DataValue::DoubleArray(_) => Ok(()),
+ DataValue::DoubleArray(array) => {
+ for value in array {
+ match self.validate_value_min_max(&DataValue::Double(*value)) {
+ Ok(_) => {}
+ Err(err) => return Err(err),
+ }
+ }
+ Ok(())
+ }
_ => Err(UpdateError::WrongType),
},
}
@@ -593,10 +734,6 @@ impl Entry {
self.actuator_target = actuator_target;
changed.insert(Field::ActuatorTarget);
}
- if let Some(metadata_description) = update.description {
- self.metadata.description = metadata_description;
- // changed.insert(Field::ActuatorTarget);
- }
if let Some(updated_allowed) = update.allowed {
if updated_allowed != self.metadata.allowed {
self.metadata.allowed = updated_allowed;
@@ -616,6 +753,10 @@ pub enum SuccessfulUpdate {
}
impl Subscriptions {
+ pub fn add_actuation_subscription(&mut self, subscription: ActuationSubscription) {
+ self.actuation_subscriptions.push(subscription);
+ }
+
pub fn add_query_subscription(&mut self, subscription: QuerySubscription) {
self.query_subscriptions.push(subscription)
}
@@ -667,6 +808,7 @@ impl Subscriptions {
}
pub fn clear(&mut self) {
+ self.actuation_subscriptions.clear();
self.query_subscriptions.clear();
self.change_subscriptions.clear();
}
@@ -682,21 +824,26 @@ impl Subscriptions {
}
});
self.change_subscriptions.retain(|sub| {
- if sub.sender.is_closed() {
+ if sub.sender.receiver_count() == 0 {
info!("Subscriber gone: removing subscription");
false
+ } else if sub.permissions.is_expired() {
+ info!("Permissions of Subscriber expired: removing subscription");
+ false
} else {
- match &sub.permissions.expired() {
- Ok(()) => true,
- Err(PermissionError::Expired) => {
- info!("Token expired: removing subscription");
- false
- }
- Err(err) => {
- info!("Error: {:?} -> removing subscription", err);
- false
- }
- }
+ true
+ }
+ });
+
+ self.actuation_subscriptions.retain(|sub| {
+ if !sub.actuation_provider.is_available() {
+ info!("Provider gone: removing subscription");
+ false
+ } else if sub.permissions.is_expired() {
+ info!("Permissions of Provider expired: removing subscription");
+ false
+ } else {
+ true
}
});
}
@@ -787,20 +934,8 @@ impl ChangeSubscription {
}
// fill unit field always
update.unit.clone_from(&entry.metadata.unit);
- update.description = Some(entry.metadata.description.clone());
- #[cfg(feature = "otel")] // This block will only compile if the "otel" feature is enabled
- {
- let mut metadata = MetadataMap::new();
- // @TODO: Speak to Kuksa team regarding MetadataMap in proto file
- let mut injector = MetadataMapInjector(&mut metadata);
- opentelemetry::global::get_text_map_propagator(|propagator| {
- propagator.inject_context(¤t_span.context(), &mut injector);
- });
- let description = metadatamap_to_string(&metadata);
-
- update.description = Some(description);
- }
notifications.updates.push(ChangeNotification {
+ id: *id,
update,
fields: notify_fields,
});
@@ -821,9 +956,12 @@ impl ChangeSubscription {
if notifications.updates.is_empty() {
Ok(())
} else {
- match self.sender.send(notifications).await {
- Ok(()) => Ok(()),
- Err(_) => Err(NotificationError {}),
+ match self.sender.send(notifications) {
+ Ok(_number_of_receivers) => Ok(()),
+ Err(err) => {
+ debug!("Send error for entry{}: ", err);
+ Err(NotificationError {})
+ }
}
}
} else {
@@ -841,7 +979,6 @@ impl ChangeSubscription {
let mut notify_fields = HashSet::new();
// TODO: Perhaps make path optional
update.path = Some(entry.metadata.path.clone());
- update.description = Some(entry.metadata.description.clone());
if fields.contains(&Field::Datapoint) {
update.datapoint = Some(entry.datapoint.clone());
notify_fields.insert(Field::Datapoint);
@@ -851,6 +988,7 @@ impl ChangeSubscription {
notify_fields.insert(Field::ActuatorTarget);
}
notifications.updates.push(ChangeNotification {
+ id: *id,
update,
fields: notify_fields,
});
@@ -862,9 +1000,12 @@ impl ChangeSubscription {
}
notifications
};
- match self.sender.send(notifications).await {
- Ok(()) => Ok(()),
- Err(_) => Err(NotificationError {}),
+ match self.sender.send(notifications) {
+ Ok(_number_of_receivers) => Ok(()),
+ Err(err) => {
+ debug!("Send error for entry{}: ", err);
+ Err(NotificationError {})
+ }
}
}
}
@@ -1028,8 +1169,8 @@ pub enum EntryReadAccess<'a> {
Entry(&'a Entry),
Err(&'a Metadata, ReadError),
}
-
-impl<'a> EntryReadAccess<'a> {
+
+impl EntryReadAccess<'_> {
#[cfg_attr(feature="otel", tracing::instrument(name="broker_datapoint", skip(self), fields(timestamp=chrono::Utc::now().to_string())))]
pub fn datapoint(&self) -> Result<&Datapoint, ReadError> {
match self {
@@ -1071,7 +1212,7 @@ pub struct EntryReadIterator<'a, 'b> {
permissions: &'b Permissions,
}
-impl<'a, 'b> Iterator for EntryReadIterator<'a, 'b> {
+impl<'a> Iterator for EntryReadIterator<'a, '_> {
type Item = EntryReadAccess<'a>;
#[inline]
@@ -1087,7 +1228,7 @@ impl<'a, 'b> Iterator for EntryReadIterator<'a, 'b> {
}
}
-impl<'a, 'b> DatabaseReadAccess<'a, 'b> {
+impl DatabaseReadAccess<'_, '_> {
#[cfg_attr(feature="otel", tracing::instrument(name="get_entry_by_id", skip(self, id), fields(timestamp=chrono::Utc::now().to_string())))]
pub fn get_entry_by_id(&self, id: i32) -> Result<&Entry, ReadError> {
match self.db.entries.get(&id) {
@@ -1127,7 +1268,7 @@ impl<'a, 'b> DatabaseReadAccess<'a, 'b> {
}
}
-impl<'a, 'b> DatabaseWriteAccess<'a, 'b> {
+impl DatabaseWriteAccess<'_, '_> {
pub fn update_by_path(
&mut self,
path: &str,
@@ -1160,7 +1301,6 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> {
if update.path.is_some()
|| update.entry_type.is_some()
|| update.data_type.is_some()
- // || update.description.is_some()
{
return Err(UpdateError::PermissionDenied);
}
@@ -1216,6 +1356,8 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> {
change_type: ChangeType,
entry_type: EntryType,
description: String,
+ min: Option,
+ max: Option,
allowed: Option,
datapoint: Option,
unit: Option,
@@ -1248,6 +1390,8 @@ impl<'a, 'b> DatabaseWriteAccess<'a, 'b> {
entry_type,
description,
allowed,
+ min,
+ max,
unit,
},
datapoint: match datapoint.clone() {
@@ -1321,7 +1465,7 @@ impl Database {
}
}
-impl<'a, 'b> query::CompilationInput for DatabaseReadAccess<'a, 'b> {
+impl query::CompilationInput for DatabaseReadAccess<'_, '_> {
fn get_datapoint_type(&self, path: &str) -> Result {
match self.get_metadata_by_path(path) {
Some(metadata) => Ok(metadata.data_type.to_owned()),
@@ -1344,6 +1488,8 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
change_type: ChangeType,
entry_type: EntryType,
description: String,
+ min: Option,
+ max: Option,
allowed: Option,
unit: Option,
) -> Result {
@@ -1358,6 +1504,8 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
change_type,
entry_type,
description,
+ min,
+ max,
allowed,
None,
unit,
@@ -1557,12 +1705,24 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
pub async fn subscribe(
&self,
valid_entries: HashMap>,
+ buffer_size: Option,
) -> Result, SubscriptionError> {
if valid_entries.is_empty() {
return Err(SubscriptionError::InvalidInput);
}
- let (sender, receiver) = mpsc::channel(10);
+ let channel_capacity = if let Some(cap) = buffer_size {
+ if cap > MAX_SUBSCRIBE_BUFFER_SIZE {
+ return Err(SubscriptionError::InvalidBufferSize);
+ }
+ // Requested capacity for old messages plus 1 for latest
+ cap + 1
+ } else {
+ // Just latest message
+ 1
+ };
+
+ let (sender, receiver) = broadcast::channel(channel_capacity);
let subscription = ChangeSubscription {
entries: valid_entries,
sender,
@@ -1583,7 +1743,13 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
.await
.add_change_subscription(subscription);
- let stream = ReceiverStream::new(receiver);
+ let stream = BroadcastStream::new(receiver).filter_map(|result| match result {
+ Ok(message) => Some(message),
+ Err(err) => {
+ debug!("Lagged entries: {}", err);
+ None
+ }
+ });
Ok(stream)
}
@@ -1623,16 +1789,309 @@ impl<'a, 'b> AuthorizedAccess<'a, 'b> {
Err(e) => Err(QueryError::CompilationError(format!("{e:?}"))),
}
}
+
+ pub async fn provide_actuation(
+ &self,
+ vss_ids: Vec,
+ actuation_provider: Box,
+ ) -> Result<(), (ActuationError, String)> {
+ for vss_id in vss_ids.clone() {
+ self.can_write_actuator_target(&vss_id).await?;
+ }
+
+ let provided_vss_ids: Vec = self
+ .broker
+ .subscriptions
+ .read()
+ .await
+ .actuation_subscriptions
+ .iter()
+ .flat_map(|subscription| subscription.vss_ids.clone())
+ .collect();
+ let intersection: Vec<&i32> = vss_ids
+ .iter()
+ .filter(|&x| provided_vss_ids.contains(x))
+ .collect();
+ if !intersection.is_empty() {
+ let message = format!(
+ "Providers for the following vss_ids already registered: {:?}",
+ intersection
+ );
+ return Err((ActuationError::ProviderAlreadyExists, message));
+ }
+
+ let actuation_subscription: ActuationSubscription = ActuationSubscription {
+ vss_ids,
+ actuation_provider,
+ permissions: self.permissions.clone(),
+ };
+ self.broker
+ .subscriptions
+ .write()
+ .await
+ .add_actuation_subscription(actuation_subscription);
+
+ Ok(())
+ }
+
+ async fn map_actuation_changes_by_vss_id(
+ &self,
+ actuation_changes: Vec,
+ ) -> HashMap> {
+ let mut actuation_changes_per_vss_id: HashMap> =
+ HashMap::with_capacity(actuation_changes.len());
+ for actuation_change in actuation_changes {
+ let vss_id = actuation_change.id;
+
+ let opt_vss_ids = actuation_changes_per_vss_id.get_mut(&vss_id);
+ match opt_vss_ids {
+ Some(vss_ids) => {
+ vss_ids.push(actuation_change.clone());
+ }
+ None => {
+ let vec = vec![actuation_change.clone()];
+ actuation_changes_per_vss_id.insert(vss_id, vec);
+ }
+ }
+ }
+ actuation_changes_per_vss_id
+ }
+
+ pub async fn batch_actuate(
+ &self,
+ actuation_changes: Vec,
+ ) -> Result<(), (ActuationError, String)> {
+ let read_subscription_guard = self.broker.subscriptions.read().await;
+ let actuation_subscriptions = &read_subscription_guard.actuation_subscriptions;
+
+ for actuation_change in &actuation_changes {
+ let vss_id = actuation_change.id;
+ self.can_write_actuator_target(&vss_id).await?;
+ self.validate_actuator_update(&vss_id, &actuation_change.data_value)
+ .await?;
+ }
+
+ let actuation_changes_per_vss_id = &self
+ .map_actuation_changes_by_vss_id(actuation_changes)
+ .await;
+ for actuation_change_per_vss_id in actuation_changes_per_vss_id {
+ let vss_id = *actuation_change_per_vss_id.0;
+ let actuation_changes = actuation_change_per_vss_id.1.clone();
+
+ let opt_actuation_subscription = actuation_subscriptions
+ .iter()
+ .find(|subscription| subscription.vss_ids.contains(&vss_id));
+ match opt_actuation_subscription {
+ Some(actuation_subscription) => {
+ let is_expired = actuation_subscription.permissions.is_expired();
+ if is_expired {
+ let message = format!(
+ "Permission for vss_ids {:?} expired",
+ actuation_subscription.vss_ids
+ );
+ return Err((ActuationError::PermissionExpired, message));
+ }
+
+ if !actuation_subscription.actuation_provider.is_available() {
+ let message = format!("Provider for vss_id {} does not exist", vss_id);
+ return Err((ActuationError::ProviderNotAvailable, message));
+ }
+
+ actuation_subscription
+ .actuation_provider
+ .actuate(actuation_changes)
+ .await?
+ }
+ None => {
+ let message = format!("Provider for vss_id {} not available", vss_id);
+ return Err((ActuationError::ProviderNotAvailable, message));
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ pub async fn actuate(
+ &self,
+ vss_id: &i32,
+ data_value: &DataValue,
+ ) -> Result<(), (ActuationError, String)> {
+ let vss_id = *vss_id;
+
+ self.can_write_actuator_target(&vss_id).await?;
+ self.validate_actuator_update(&vss_id, data_value).await?;
+
+ let read_subscription_guard = self.broker.subscriptions.read().await;
+ let opt_actuation_subscription = &read_subscription_guard
+ .actuation_subscriptions
+ .iter()
+ .find(|subscription| subscription.vss_ids.contains(&vss_id));
+ match opt_actuation_subscription {
+ Some(actuation_subscription) => {
+ let is_expired = actuation_subscription.permissions.is_expired();
+ if is_expired {
+ let message = format!(
+ "Permission for vss_ids {:?} expired",
+ actuation_subscription.vss_ids
+ );
+ return Err((ActuationError::PermissionExpired, message));
+ }
+
+ if !actuation_subscription.actuation_provider.is_available() {
+ let message = format!("Provider for vss_id {} does not exist", vss_id);
+ return Err((ActuationError::ProviderNotAvailable, message));
+ }
+
+ actuation_subscription
+ .actuation_provider
+ .actuate(vec![ActuationChange {
+ id: vss_id,
+ data_value: data_value.clone(),
+ }])
+ .await
+ }
+ None => {
+ let message = format!("Provider for vss_id {} does not exist", vss_id);
+ Err((ActuationError::ProviderNotAvailable, message))
+ }
+ }
+ }
+
+ async fn can_write_actuator_target(
+ &self,
+ vss_id: &i32,
+ ) -> Result<(), (ActuationError, String)> {
+ let result_entry = self.get_entry_by_id(*vss_id).await;
+ match result_entry {
+ Ok(entry) => {
+ let vss_path = entry.metadata.path;
+ let result_can_write_actuator =
+ self.permissions.can_write_actuator_target(&vss_path);
+ match result_can_write_actuator {
+ Ok(_) => Ok(()),
+ Err(PermissionError::Denied) => {
+ let message = format!("Permission denied for vss_path {}", vss_path);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(PermissionError::Expired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
+ Err(ReadError::NotFound) => {
+ let message = format!("Could not resolve vss_path of vss_id {}", vss_id);
+ Err((ActuationError::NotFound, message))
+ }
+ Err(ReadError::PermissionDenied) => {
+ let message = format!("Permission denied for vss_id {}", vss_id);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(ReadError::PermissionExpired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
+
+ async fn validate_actuator_update(
+ &self,
+ vss_id: &i32,
+ data_value: &DataValue,
+ ) -> Result<(), (ActuationError, String)> {
+ let result_entry = self.get_entry_by_id(*vss_id).await;
+ match result_entry {
+ Ok(entry) => {
+ let metadata = entry.metadata.clone();
+ let vss_path = metadata.path;
+ if metadata.entry_type != EntryType::Actuator {
+ let message = format!("Tried to set a value for a non-actuator: {}", vss_path);
+ return Err((ActuationError::WrongType, message));
+ }
+ let validation = entry.validate_actuator_value(data_value);
+ match validation {
+ Ok(_) => Ok(()),
+ Err(UpdateError::OutOfBoundsMinMax) => {
+ let message = format!(
+ "Out of bounds min/max value provided for {}: {} | Expected range [min: {}, max: {}]",
+ vss_path,
+ data_value,
+ metadata.min.map_or("None".to_string(), |value| value.to_string()),
+ metadata.max.map_or("None".to_string(), |value| value.to_string()),
+ );
+ Err((ActuationError::OutOfBounds, message.to_string()))
+ }
+ Err(UpdateError::OutOfBoundsAllowed) => {
+ let message = format!(
+ "Out of bounds allowed value provided for {}: {} | Expected values [{}]",
+ vss_path,
+ data_value,
+ metadata.allowed.map_or("None".to_string(), |value| value.to_string())
+ );
+ Err((ActuationError::OutOfBounds, message.to_string()))
+ }
+ Err(UpdateError::OutOfBoundsType) => {
+ let message = format!(
+ "Out of bounds type value provided for {}: {} | overflow for {}",
+ vss_path, data_value, metadata.data_type,
+ );
+ Err((ActuationError::OutOfBounds, message.to_string()))
+ }
+ Err(UpdateError::UnsupportedType) => {
+ let message = format!(
+ "Unsupported type for vss_path {}. Expected type: {}",
+ vss_path, metadata.data_type
+ );
+ Err((ActuationError::UnsupportedType, message))
+ }
+ Err(UpdateError::WrongType) => {
+ let message = format!(
+ "Wrong type for vss_path {}. Expected type: {}",
+ vss_path, metadata.data_type
+ );
+ Err((ActuationError::WrongType, message))
+ }
+ // Redundant errors in case UpdateError includes new errors in the future
+ Err(UpdateError::NotFound) => {
+ let message = format!("Could not resolve vss_path {}", vss_path);
+ Err((ActuationError::NotFound, message))
+ }
+ Err(UpdateError::PermissionDenied) => {
+ let message = format!("Permission denied for vss_path {}", vss_path);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(UpdateError::PermissionExpired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
+ Err(ReadError::NotFound) => {
+ let message = format!("Could not resolve vss_path of vss_id {}", vss_id);
+ Err((ActuationError::NotFound, message))
+ }
+ Err(ReadError::PermissionDenied) => {
+ let message = format!("Permission denied for vss_id {}", vss_id);
+ Err((ActuationError::PermissionDenied, message))
+ }
+ Err(ReadError::PermissionExpired) => Err((
+ ActuationError::PermissionExpired,
+ "Permission expired".to_string(),
+ )),
+ }
+ }
}
impl DataBroker {
- pub fn new(version: impl Into) -> Self {
+ pub fn new(version: impl Into, commit_sha: impl Into) -> Self {
let (shutdown_trigger, _) = broadcast::channel::<()>(1);
DataBroker {
database: Default::default(),
subscriptions: Default::default(),
version: version.into(),
+ commit_sha: commit_sha.into(),
shutdown_trigger,
}
}
@@ -1651,13 +2110,14 @@ impl DataBroker {
pub fn start_housekeeping_task(&self) {
info!("Starting housekeeping task");
let subscriptions = self.subscriptions.clone();
+
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(1));
loop {
interval.tick().await;
- // Cleanup dropped subscriptions
- subscriptions.write().await.cleanup();
+
+ subscriptions.write().await.cleanup(); // Cleanup dropped subscriptions
}
});
}
@@ -1678,21 +2138,35 @@ impl DataBroker {
pub fn get_version(&self) -> &str {
&self.version
}
+
+ pub fn get_commit_sha(&self) -> &str {
+ &self.commit_sha
+ }
}
impl Default for DataBroker {
fn default() -> Self {
- Self::new("")
+ Self::new("", "")
}
}
#[cfg(test)]
-mod tests {
+/// Public test module to allow other files to reuse helper functions
+pub mod tests {
use crate::permissions;
use super::*;
use tokio_stream::StreamExt;
+ #[tokio::test]
+ async fn test_databroker_version_and_commit_sha() {
+ let version = "1.1.1";
+ let commit_sha = "3a3c332f5427f2db7a0b8582262c9f5089036c23";
+ let databroker = DataBroker::new(version, commit_sha);
+ assert_eq!(databroker.get_version(), version);
+ assert_eq!(databroker.get_commit_sha(), commit_sha);
+ }
+
#[tokio::test]
async fn test_register_datapoint() {
let broker = DataBroker::default();
@@ -1705,6 +2179,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::BoolArray(Vec::from([true]))),
Some("kg".to_string()),
)
@@ -1737,6 +2213,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
Some("km".to_string()),
)
@@ -1766,6 +2244,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1 (modified)".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1787,6 +2267,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test signal 3".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::Int32Array(Vec::from([1, 2, 3, 4]))),
None,
)
@@ -1811,6 +2293,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1824,6 +2308,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Actuator,
"Test datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1867,6 +2353,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1899,6 +2387,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1921,6 +2411,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -1972,6 +2464,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::Int32Array(vec![100])),
None,
)
@@ -1993,6 +2487,8 @@ mod tests {
data_type: None,
description: None,
allowed: Some(Some(DataValue::Int32Array(vec![100]))),
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2015,6 +2511,8 @@ mod tests {
data_type: None,
description: None,
allowed: Some(Some(DataValue::BoolArray(vec![true]))),
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2037,6 +2535,8 @@ mod tests {
data_type: None,
description: None,
allowed: Some(None),
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2060,6 +2560,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2080,39 +2582,731 @@ mod tests {
}
}
- #[tokio::test]
- async fn test_subscribe_query_and_get() {
- let broker = DataBroker::default();
- let broker = broker.authorized_access(&permissions::ALLOW_ALL);
-
- let id1 = broker
+ // Helper for adding an int8 signal and adding value
+ async fn helper_add_int8(
+ broker: &DataBroker,
+ name: &str,
+ value: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
.add_entry(
- "test.datapoint1".to_owned(),
- DataType::Int32,
+ name.to_owned(),
+ DataType::Int8,
ChangeType::OnChange,
EntryType::Sensor,
- "Test datapoint 1".to_owned(),
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-5)), // min
+ Some(types::DataValue::Int32(10)), // max
None,
None,
)
.await
- .expect("Register datapoint should succeed");
-
- let mut stream = broker
- .subscribe_query("SELECT test.datapoint1")
- .await
- .expect("Setup subscription");
+ .unwrap();
- // Expect an initial query response
- // No value has been set yet, so value should be NotAvailable
- match stream.next().await {
- Some(query_resp) => {
- assert_eq!(query_resp.fields.len(), 1);
- assert_eq!(query_resp.fields[0].name, "test.datapoint1");
- assert_eq!(query_resp.fields[0].value, DataValue::NotAvailable);
- }
- None => {
- panic!("did not expect stream end")
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_int8() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int8(&broker, "test.datapoint1", -6, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int8(&broker, "test.datapoint2", -5, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_int8(&broker, "test.datapoint3", 11, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int8(&broker, "test.datapoint4", 10, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an int8 signal and adding value
+ async fn helper_add_int16(
+ broker: &DataBroker,
+ name: &str,
+ value: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int16,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-5)), // min
+ Some(types::DataValue::Int32(10)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_int16() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int16(&broker, "test.datapoint1", -6, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int16(&broker, "test.datapoint2", -5, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_int16(&broker, "test.datapoint3", 11, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int16(&broker, "test.datapoint4", 10, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an int32 signal and adding value
+ pub async fn helper_add_int32(
+ broker: &DataBroker,
+ name: &str,
+ value: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int32,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-500)), // min
+ Some(types::DataValue::Int32(1000)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_exceeded() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int32(&broker, "test.datapoint1", -501, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_equal() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32(&broker, "test.datapoint1", -500, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_exceeded() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int32(&broker, "test.datapoint1", 1001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_equal() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32(&broker, "test.datapoint1", 1000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ /// Helper for adding an int64 signal and adding value
+ async fn helper_add_int64(
+ broker: &DataBroker,
+ name: &str,
+ value: i64,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int64,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int64(-500000)), // min
+ Some(types::DataValue::Int64(10000000)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int64(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_int64() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int64(&broker, "test.datapoint1", -500001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int64(&broker, "test.datapoint2", -500000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_int64(&broker, "test.datapoint3", 10000001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_int64(&broker, "test.datapoint4", 10000000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ /// Helper for adding an uint8 signal and adding value
+ async fn helper_add_uint8(
+ broker: &DataBroker,
+ name: &str,
+ value: u32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Uint8,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Uint32(3)), // min
+ Some(types::DataValue::Uint32(26)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Uint32(value),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_uint8() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_uint8(&broker, "test.datapoint1", 2, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_uint8(&broker, "test.datapoint2", 3, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ match helper_add_uint8(&broker, "test.datapoint3", 27, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ if helper_add_uint8(&broker, "test.datapoint4", 26, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an int32 signal and adding value
+ async fn helper_add_int32array(
+ broker: &DataBroker,
+ name: &str,
+ value1: i32,
+ value2: i32,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::Int32Array,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Int32(-500)), // min
+ Some(types::DataValue::Int32(1000)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::Int32Array(Vec::from([value1, value2])),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_exceeded_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ // First item out of bound
+ match helper_add_int32array(&broker, "test.datapoint1", -501, -500, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ // Second item out of bound
+ match helper_add_int32array(&broker, "test.datapoint2", -500, -501, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_equal_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32array(&broker, "test.datapoint1", -500, -500, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_exceeded_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ match helper_add_int32array(&broker, "test.datapoint1", 1001, 1000, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ match helper_add_int32array(&broker, "test.datapoint2", 100, 1001, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_max_equal_int32array() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ if helper_add_int32array(&broker, "test.datapoint1", 1000, 1000, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ // Helper for adding an double array signal and adding value
+ async fn helper_add_doublearray(
+ broker: &DataBroker,
+ name: &str,
+ value1: f64,
+ value2: f64,
+ timestamp: std::time::SystemTime,
+ ) -> Result> {
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ name.to_owned(),
+ DataType::DoubleArray,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Some Description That Does Not Matter".to_owned(),
+ Some(types::DataValue::Double(-500.2)), // min
+ Some(types::DataValue::Double(1000.2)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ match authorized_access
+ .update_entries([(
+ entry_id,
+ EntryUpdate {
+ path: None,
+ datapoint: Some(Datapoint {
+ ts: timestamp,
+ source_ts: None,
+ value: types::DataValue::DoubleArray(Vec::from([value1, value2])),
+ }),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )])
+ .await
+ {
+ Ok(_) => Ok(entry_id),
+ Err(details) => Err(details),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_update_entries_min_max_doublearray() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ // First item out of bound
+ match helper_add_doublearray(&broker, "test.datapoint1", -500.3, -500.0, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+ // Second item out of bound
+ match helper_add_doublearray(&broker, "test.datapoint2", -500.0, -500.3, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ // Both on min
+ if helper_add_doublearray(&broker, "test.datapoint3", -500.2, -500.2, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+
+ // First tto large
+ match helper_add_doublearray(&broker, "test.datapoint4", 1000.3, 1000.0, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ // Second too large
+ match helper_add_doublearray(&broker, "test.datapoint5", 1000.0, 1000.3, timestamp).await {
+ Err(err_vec) => {
+ assert_eq!(err_vec.len(), 1);
+ assert_eq!(err_vec.first().expect("").1, UpdateError::OutOfBoundsMinMax)
+ }
+ _ => panic!("Failure expected"),
+ }
+
+ // Both on max
+ if helper_add_doublearray(&broker, "test.datapoint6", 1000.2, 1000.2, timestamp)
+ .await
+ .is_err()
+ {
+ panic!("Success expected")
+ }
+ }
+
+ #[tokio::test]
+ async fn test_subscribe_query_and_get() {
+ let broker = DataBroker::default();
+ let broker = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let id1 = broker
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ DataType::Int32,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut stream = broker
+ .subscribe_query("SELECT test.datapoint1")
+ .await
+ .expect("Setup subscription");
+
+ // Expect an initial query response
+ // No value has been set yet, so value should be NotAvailable
+ match stream.next().await {
+ Some(query_resp) => {
+ assert_eq!(query_resp.fields.len(), 1);
+ assert_eq!(query_resp.fields[0].name, "test.datapoint1");
+ assert_eq!(query_resp.fields[0].value, DataValue::NotAvailable);
+ }
+ None => {
+ panic!("did not expect stream end")
}
}
@@ -2131,6 +3325,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2175,6 +3371,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2233,6 +3431,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2277,6 +3477,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2316,6 +3518,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2352,6 +3556,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1 (new description)".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2375,6 +3581,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2417,6 +3625,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2430,6 +3640,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2473,6 +3685,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
),
@@ -2490,6 +3704,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
),
@@ -2531,6 +3747,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2553,6 +3771,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2596,6 +3816,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2623,6 +3845,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2671,6 +3895,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::StringArray(vec![
String::from("yes"),
String::from("no"),
@@ -2703,6 +3929,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2760,6 +3988,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2815,6 +4045,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2839,6 +4071,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2861,6 +4095,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2890,6 +4126,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2925,6 +4163,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -2947,6 +4187,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -2976,6 +4218,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -3014,6 +4258,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test array".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3036,6 +4282,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -3067,8 +4315,7 @@ mod tests {
}
}
- #[tokio::test]
- async fn test_subscribe_and_get() {
+ async fn test_subscribe_and_get_buffer_size(buffer_size: Option) {
let broker = DataBroker::default();
let broker = broker.authorized_access(&permissions::ALLOW_ALL);
@@ -3079,6 +4326,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3086,20 +4335,23 @@ mod tests {
.expect("Register datapoint should succeed");
let mut stream = broker
- .subscribe(HashMap::from([(id1, HashSet::from([Field::Datapoint]))]))
+ .subscribe(
+ HashMap::from([(id1, HashSet::from([Field::Datapoint]))]),
+ buffer_size,
+ )
.await
.expect("subscription should succeed");
// Stream should yield initial notification with current values i.e. NotAvailable
match stream.next().await {
- Some(next) => {
- assert_eq!(next.updates.len(), 1);
+ Some(entry) => {
+ assert_eq!(entry.updates.len(), 1);
assert_eq!(
- next.updates[0].update.path,
+ entry.updates[0].update.path,
Some("test.datapoint1".to_string())
);
assert_eq!(
- next.updates[0].update.datapoint.as_ref().unwrap().value,
+ entry.updates[0].update.datapoint.as_ref().unwrap().value,
DataValue::NotAvailable
);
}
@@ -3123,6 +4375,8 @@ mod tests {
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)])
@@ -3131,14 +4385,14 @@ mod tests {
// Value has been set, expect the next item in stream to match.
match stream.next().await {
- Some(next) => {
- assert_eq!(next.updates.len(), 1);
+ Some(entry) => {
+ assert_eq!(entry.updates.len(), 1);
assert_eq!(
- next.updates[0].update.path,
+ entry.updates[0].update.path,
Some("test.datapoint1".to_string())
);
assert_eq!(
- next.updates[0].update.datapoint.as_ref().unwrap().value,
+ entry.updates[0].update.datapoint.as_ref().unwrap().value,
DataValue::Int32(101)
);
}
@@ -3161,6 +4415,49 @@ mod tests {
}
}
+ #[tokio::test]
+ async fn test_subscribe_and_get() {
+ // None and 0-1000 is valid range
+ test_subscribe_and_get_buffer_size(None).await;
+ test_subscribe_and_get_buffer_size(Some(0)).await;
+ test_subscribe_and_get_buffer_size(Some(1000)).await;
+ }
+
+ #[tokio::test]
+ async fn test_subscribe_buffersize_out_of_range() {
+ let broker = DataBroker::default();
+ let broker = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let id1 = broker
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ DataType::Int32,
+ ChangeType::OnChange,
+ EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ match broker
+ .subscribe(
+ HashMap::from([(id1, HashSet::from([Field::Datapoint]))]),
+ // 1001 is just outside valid range 0-1000
+ Some(1001),
+ )
+ .await
+ {
+ Err(SubscriptionError::InvalidBufferSize) => {}
+ _ => {
+ panic!("expected it to fail with InvalidBufferSize");
+ }
+ }
+ }
+
#[tokio::test]
async fn test_metadata_for_each() {
let db = DataBroker::default();
@@ -3173,6 +4470,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test signal".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3185,6 +4484,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Run of the mill test signal".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -3216,6 +4517,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test signal 3".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::Int32Array(Vec::from([1, 2, 3, 4]))),
None,
)
@@ -3230,6 +4533,8 @@ mod tests {
ChangeType::OnChange,
EntryType::Sensor,
"Test datapoint".to_owned(),
+ None, // min
+ None, // max
Some(DataValue::BoolArray(Vec::from([true]))),
None,
)
diff --git a/databroker/src/grpc/kuksa_val_v1/conversions.rs b/databroker/src/grpc/kuksa_val_v1/conversions.rs
index 6dc5d197..59d6472f 100644
--- a/databroker/src/grpc/kuksa_val_v1/conversions.rs
+++ b/databroker/src/grpc/kuksa_val_v1/conversions.rs
@@ -333,7 +333,6 @@ impl From for proto::DataEntry {
metadata: {
let metadata = proto::Metadata {
unit: from.unit,
- description: from.description,
..Default::default()
};
Some(metadata)
diff --git a/databroker/src/grpc/kuksa_val_v1/val.rs b/databroker/src/grpc/kuksa_val_v1/val.rs
index 8d6ddb9c..8f5b6ca4 100644
--- a/databroker/src/grpc/kuksa_val_v1/val.rs
+++ b/databroker/src/grpc/kuksa_val_v1/val.rs
@@ -43,6 +43,7 @@ use crate::broker::SubscriptionError;
use crate::broker::{AuthorizedAccess, EntryReadAccess};
use crate::glob::Matcher;
use crate::permissions::Permissions;
+use crate::types::{DataType, DataValue};
const MAX_REQUEST_PATH_LENGTH: usize = 1000;
@@ -623,7 +624,7 @@ impl proto::val_server::Val for broker::DataBroker {
}
}
- match broker.subscribe(entries).await {
+ match broker.subscribe(entries, None).await {
Ok(stream) => {
let stream = convert_to_proto_stream(stream);
Ok(tonic::Response::new(Box::pin(stream)))
@@ -638,6 +639,10 @@ impl proto::val_server::Val for broker::DataBroker {
Err(SubscriptionError::InternalError) => {
Err(tonic::Status::new(tonic::Code::Internal, "Internal Error"))
}
+ Err(SubscriptionError::InvalidBufferSize) => Err(tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ "Subscription buffer_size max allowed value is 1000",
+ )),
}
}
@@ -712,11 +717,27 @@ fn convert_to_data_entry_error(path: &String, error: &broker::UpdateError) -> Da
message: "cannot set datapoint to value of unsupported type".to_string(),
}),
},
- broker::UpdateError::OutOfBounds => DataEntryError {
+ broker::UpdateError::OutOfBoundsAllowed => DataEntryError {
+ path: path.clone(),
+ error: Some(proto::Error {
+ code: 400,
+ reason: String::from("value out of allowed bounds"),
+ message: String::from("given value exceeds type's boundaries"),
+ }),
+ },
+ broker::UpdateError::OutOfBoundsMinMax => DataEntryError {
+ path: path.clone(),
+ error: Some(proto::Error {
+ code: 400,
+ reason: String::from("value out of min/max bounds"),
+ message: String::from("given value exceeds type's boundaries"),
+ }),
+ },
+ broker::UpdateError::OutOfBoundsType => DataEntryError {
path: path.clone(),
error: Some(proto::Error {
code: 400,
- reason: String::from("value out of bounds"),
+ reason: String::from("value out of type bounds"),
message: String::from("given value exceeds type's boundaries"),
}),
},
@@ -818,72 +839,147 @@ fn proto_entry_from_entry_and_fields(
}
if all || fields.contains(&proto::Field::MetadataValueRestriction) {
metadata_is_set = true;
- metadata.value_restriction = match entry.metadata().allowed.as_ref() {
- Some(allowed) => match allowed {
- broker::DataValue::StringArray(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::String(
- proto::ValueRestrictionString {
- allowed_values: vec.clone(),
- },
- )),
- }),
- broker::DataValue::Int32Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Signed(
- proto::ValueRestrictionInt {
- allowed_values: vec.iter().cloned().map(i64::from).collect(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::Int64Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Signed(
- proto::ValueRestrictionInt {
- allowed_values: vec.clone(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::Uint32Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Unsigned(
- proto::ValueRestrictionUint {
- allowed_values: vec.iter().cloned().map(u64::from).collect(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::Uint64Array(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::Unsigned(
- proto::ValueRestrictionUint {
- allowed_values: vec.clone(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::FloatArray(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::FloatingPoint(
- proto::ValueRestrictionFloat {
- allowed_values: vec.iter().cloned().map(f64::from).collect(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- broker::DataValue::DoubleArray(vec) => Some(proto::ValueRestriction {
- r#type: Some(proto::value_restriction::Type::FloatingPoint(
- proto::ValueRestrictionFloat {
- allowed_values: vec.clone(),
- min: None, // TODO: Implement
- max: None, // TODO: Implement
- },
- )),
- }),
- _ => None,
- },
- None => None,
+ debug!("Datatype {:?} to be handled", entry.metadata().data_type);
+ match entry.metadata().data_type {
+ DataType::String | DataType::StringArray => {
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(broker::DataValue::StringArray(vec)) => vec.clone(),
+ _ => Vec::new(),
+ };
+
+ if !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::String(
+ proto::ValueRestrictionString {
+ allowed_values: allowed,
+ },
+ )),
+ });
+ };
+ }
+ DataType::Int8
+ | DataType::Int16
+ | DataType::Int32
+ | DataType::Int64
+ | DataType::Int8Array
+ | DataType::Int16Array
+ | DataType::Int32Array
+ | DataType::Int64Array => {
+ let min_value = match entry.metadata().min {
+ Some(DataValue::Int32(value)) => Some(i64::from(value)),
+ Some(DataValue::Int64(value)) => Some(value),
+ _ => None,
+ };
+ let max_value = match entry.metadata().max {
+ Some(DataValue::Int32(value)) => Some(i64::from(value)),
+ Some(DataValue::Int64(value)) => Some(value),
+ _ => None,
+ };
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(allowed) => match allowed {
+ broker::DataValue::Int32Array(vec) => {
+ vec.iter().cloned().map(i64::from).collect()
+ }
+ broker::DataValue::Int64Array(vec) => vec.to_vec(),
+ _ => Vec::new(),
+ },
+ _ => Vec::new(),
+ };
+
+ if min_value.is_some() | max_value.is_some() | !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::Signed(
+ proto::ValueRestrictionInt {
+ allowed_values: allowed,
+ min: min_value,
+ max: max_value,
+ },
+ )),
+ });
+ };
+ }
+ DataType::Uint8
+ | DataType::Uint16
+ | DataType::Uint32
+ | DataType::Uint64
+ | DataType::Uint8Array
+ | DataType::Uint16Array
+ | DataType::Uint32Array
+ | DataType::Uint64Array => {
+ let min_value = match entry.metadata().min {
+ Some(DataValue::Uint32(value)) => Some(u64::from(value)),
+ Some(DataValue::Uint64(value)) => Some(value),
+ _ => None,
+ };
+ let max_value = match entry.metadata().max {
+ Some(DataValue::Uint32(value)) => Some(u64::from(value)),
+ Some(DataValue::Uint64(value)) => Some(value),
+ _ => None,
+ };
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(allowed) => match allowed {
+ broker::DataValue::Uint32Array(vec) => {
+ vec.iter().cloned().map(u64::from).collect()
+ }
+ broker::DataValue::Uint64Array(vec) => vec.to_vec(),
+ _ => Vec::new(),
+ },
+ _ => Vec::new(),
+ };
+
+ if min_value.is_some() | max_value.is_some() | !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::Unsigned(
+ proto::ValueRestrictionUint {
+ allowed_values: allowed,
+ min: min_value,
+ max: max_value,
+ },
+ )),
+ });
+ };
+ }
+ DataType::Float
+ | DataType::Double
+ | DataType::FloatArray
+ | DataType::DoubleArray => {
+ let min_value = match entry.metadata().min {
+ Some(DataValue::Float(value)) => Some(f64::from(value)),
+ Some(DataValue::Double(value)) => Some(value),
+ _ => None,
+ };
+ let max_value = match entry.metadata().max {
+ Some(DataValue::Float(value)) => Some(f64::from(value)),
+ Some(DataValue::Double(value)) => Some(value),
+ _ => None,
+ };
+ let allowed = match entry.metadata().allowed.as_ref() {
+ Some(allowed) => match allowed {
+ broker::DataValue::FloatArray(vec) => {
+ vec.iter().cloned().map(f64::from).collect()
+ }
+ broker::DataValue::DoubleArray(vec) => vec.to_vec(),
+ _ => Vec::new(),
+ },
+ _ => Vec::new(),
+ };
+
+ if min_value.is_some() | max_value.is_some() | !allowed.is_empty() {
+ metadata.value_restriction = Some(proto::ValueRestriction {
+ r#type: Some(proto::value_restriction::Type::FloatingPoint(
+ proto::ValueRestrictionFloat {
+ allowed_values: allowed,
+ min: min_value,
+ max: max_value,
+ },
+ )),
+ });
+ };
+ }
+
+ _ => {
+ debug!("Datatype {:?} not yet handled", entry.metadata().data_type);
+ }
}
}
if all || fields.contains(&proto::Field::MetadataActuator) {
@@ -1005,28 +1101,6 @@ impl<'a> opentelemetry::propagation::Extractor for MetadataMapExtractor<'a> {
}
}
-#[cfg(feature="otel")]
-#[cfg_attr(feature="otel", tracing::instrument(name="val_read_incoming_trace_id", skip(request), fields(timestamp=chrono::Utc::now().to_string())))]
-fn read_incoming_trace_id(request: tonic::Request) -> (String, tonic::Request){
- let mut trace_id: String = String::from("");
- let request_copy = tonic::Request::new(request.get_ref().clone());
- for request in request_copy.into_inner().updates {
- match &request.entry {
- Some(entry) => match &entry.metadata {
- Some(metadata) => match &metadata.description{
- Some(description)=> {
- trace_id = String::from(description);
- }
- None => trace_id = String::from("")
- }
- None => trace_id = String::from("")
- }
- None => trace_id = String::from("")
- }
- }
- return(trace_id, request);
-}
-
impl broker::EntryUpdate {
#[cfg_attr(feature="otel", tracing::instrument(name = "val_from_proto_entry_and_fields",skip(entry,fields), fields(timestamp=chrono::Utc::now().to_string())))]
fn from_proto_entry_and_fields(
@@ -1066,8 +1140,10 @@ impl broker::EntryUpdate {
actuator_target,
entry_type: None,
data_type: None,
- description: metadata_description.cloned(),
+ description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
}
}
@@ -1091,6 +1167,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1146,6 +1224,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
Some("km/h".to_owned()),
)
@@ -1242,6 +1322,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1255,6 +1337,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test branch datapoint 2".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
@@ -1303,6 +1387,8 @@ mod tests {
broker::ChangeType::OnChange,
broker::EntryType::Sensor,
"Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
None,
None,
)
diff --git a/databroker/src/grpc/kuksa_val_v2/conversions.rs b/databroker/src/grpc/kuksa_val_v2/conversions.rs
new file mode 100644
index 00000000..e632e4f7
--- /dev/null
+++ b/databroker/src/grpc/kuksa_val_v2/conversions.rs
@@ -0,0 +1,538 @@
+// /********************************************************************************
+// * Copyright (c) 2024 Contributors to the Eclipse Foundation
+// *
+// * See the NOTICE file(s) distributed with this work for additional
+// * information regarding copyright ownership.
+// *
+// * This program and the accompanying materials are made available under the
+// * terms of the Apache License 2.0 which is available at
+// * http://www.apache.org/licenses/LICENSE-2.0
+// *
+// * SPDX-License-Identifier: Apache-2.0
+// ********************************************************************************/
+use crate::broker;
+use crate::types::DataValue;
+use databroker_proto::kuksa::val::v2 as proto;
+use kuksa::proto::v2::{
+ BoolArray, DoubleArray, FloatArray, Int32Array, Int64Array, StringArray, Uint32Array,
+ Uint64Array,
+};
+
+use std::time::SystemTime;
+use tracing::debug;
+
+impl From<&proto::Datapoint> for broker::Datapoint {
+ fn from(datapoint: &proto::Datapoint) -> Self {
+ let value = broker::DataValue::from(datapoint);
+ let ts = SystemTime::now();
+
+ match &datapoint.timestamp {
+ Some(source_timestamp) => {
+ let source: Option = match source_timestamp.clone().try_into() {
+ Ok(source) => Some(source),
+ Err(_) => None,
+ };
+ broker::Datapoint {
+ ts,
+ source_ts: source,
+ value,
+ }
+ }
+ None => broker::Datapoint {
+ ts,
+ source_ts: None,
+ value,
+ },
+ }
+ }
+}
+
+impl From for Option {
+ fn from(from: broker::Datapoint) -> Self {
+ match from.value {
+ broker::DataValue::NotAvailable => Some(proto::Datapoint {
+ value: None,
+ timestamp: Some(from.ts.into()),
+ }),
+ broker::DataValue::Bool(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(value)),
+ }),
+ }),
+ broker::DataValue::String(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::String(value)),
+ }),
+ }),
+ broker::DataValue::Int32(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(value)),
+ }),
+ }),
+ broker::DataValue::Int64(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64(value)),
+ }),
+ }),
+ broker::DataValue::Uint32(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(value)),
+ }),
+ }),
+ broker::DataValue::Uint64(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64(value)),
+ }),
+ }),
+ broker::DataValue::Float(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Float(value)),
+ }),
+ }),
+ broker::DataValue::Double(value) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Double(value)),
+ }),
+ }),
+ broker::DataValue::BoolArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::BoolArray(proto::BoolArray {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::StringArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::StringArray(proto::StringArray {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Int32Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32Array(proto::Int32Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Int64Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64Array(proto::Int64Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Uint32Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32Array(proto::Uint32Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::Uint64Array(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64Array(proto::Uint64Array {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::FloatArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::FloatArray(proto::FloatArray {
+ values,
+ })),
+ }),
+ }),
+ broker::DataValue::DoubleArray(values) => Some(proto::Datapoint {
+ timestamp: Some(from.ts.into()),
+ value: Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::DoubleArray(proto::DoubleArray {
+ values,
+ })),
+ }),
+ }),
+ }
+ }
+}
+
+impl From<&proto::Datapoint> for broker::DataValue {
+ fn from(datapoint: &proto::Datapoint) -> Self {
+ match &datapoint.value {
+ Some(value) => match &value.typed_value {
+ Some(proto::value::TypedValue::String(value)) => {
+ broker::DataValue::String(value.to_owned())
+ }
+ Some(proto::value::TypedValue::Bool(value)) => broker::DataValue::Bool(*value),
+ Some(proto::value::TypedValue::Int32(value)) => broker::DataValue::Int32(*value),
+ Some(proto::value::TypedValue::Int64(value)) => broker::DataValue::Int64(*value),
+ Some(proto::value::TypedValue::Uint32(value)) => broker::DataValue::Uint32(*value),
+ Some(proto::value::TypedValue::Uint64(value)) => broker::DataValue::Uint64(*value),
+ Some(proto::value::TypedValue::Float(value)) => broker::DataValue::Float(*value),
+ Some(proto::value::TypedValue::Double(value)) => broker::DataValue::Double(*value),
+ Some(proto::value::TypedValue::StringArray(array)) => {
+ broker::DataValue::StringArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::BoolArray(array)) => {
+ broker::DataValue::BoolArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int32Array(array)) => {
+ broker::DataValue::Int32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int64Array(array)) => {
+ broker::DataValue::Int64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint32Array(array)) => {
+ broker::DataValue::Uint32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint64Array(array)) => {
+ broker::DataValue::Uint64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::FloatArray(array)) => {
+ broker::DataValue::FloatArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::DoubleArray(array)) => {
+ broker::DataValue::DoubleArray(array.values.clone())
+ }
+ None => broker::DataValue::NotAvailable,
+ },
+ None => broker::DataValue::NotAvailable,
+ }
+ }
+}
+
+impl From<&broker::Metadata> for proto::Metadata {
+ fn from(metadata: &broker::Metadata) -> Self {
+ proto::Metadata {
+ id: metadata.id,
+ data_type: proto::DataType::from(metadata.data_type.clone()) as i32,
+ entry_type: proto::EntryType::from(metadata.entry_type.clone()) as i32,
+ description: metadata.description.clone(),
+ comment: String::new(),
+ deprecation: String::new(),
+ unit: metadata.unit.clone().unwrap_or_default(),
+ allowed_values: transform_allowed(&metadata.allowed),
+ min: transform_min_max(&metadata.min),
+ max: transform_min_max(&metadata.max),
+ }
+ }
+}
+
+fn transform_allowed(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ DataValue::BoolArray(_) => Some(proto::Value::from(value.clone())),
+ DataValue::StringArray(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int32Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int64Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint32Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint64Array(_) => Some(proto::Value::from(value.clone())),
+ DataValue::FloatArray(_) => Some(proto::Value::from(value.clone())),
+ DataValue::DoubleArray(_) => Some(proto::Value::from(value.clone())),
+ _ => {
+ debug!("Wrong datatype used for allowed values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
+fn transform_min_max(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ DataValue::Bool(_) => Some(proto::Value::from(value.clone())),
+ DataValue::String(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int32(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Int64(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint32(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Uint64(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Float(_) => Some(proto::Value::from(value.clone())),
+ DataValue::Double(_) => Some(proto::Value::from(value.clone())),
+ _ => {
+ debug!("Wrong datatype used for min/max values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
+impl From<&broker::UpdateError> for proto::Error {
+ fn from(update_error: &broker::UpdateError) -> Self {
+ match update_error {
+ broker::UpdateError::NotFound => proto::Error {
+ code: proto::ErrorCode::NotFound.into(),
+ message: "Not Found".to_string(),
+ },
+ broker::UpdateError::WrongType => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Wrong Type".to_string(),
+ },
+ broker::UpdateError::OutOfBoundsAllowed => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Out of Bounds Allowed".to_string(),
+ },
+ broker::UpdateError::OutOfBoundsMinMax => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Out of Bounds MinMax".to_string(),
+ },
+ broker::UpdateError::OutOfBoundsType => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Out of Bounds Type".to_string(),
+ },
+ broker::UpdateError::UnsupportedType => proto::Error {
+ code: proto::ErrorCode::InvalidArgument.into(),
+ message: "Unsupported Type".to_string(),
+ },
+ broker::UpdateError::PermissionDenied => proto::Error {
+ code: proto::ErrorCode::PermissionDenied.into(),
+ message: "Permission Denied".to_string(),
+ },
+ broker::UpdateError::PermissionExpired => proto::Error {
+ code: proto::ErrorCode::PermissionDenied.into(),
+ message: "Permission Expired".to_string(),
+ },
+ }
+ }
+}
+
+impl From for proto::DataType {
+ fn from(from: broker::DataType) -> Self {
+ match from {
+ broker::DataType::String => proto::DataType::String,
+ broker::DataType::Bool => proto::DataType::Boolean,
+ broker::DataType::Int8 => proto::DataType::Int8,
+ broker::DataType::Int16 => proto::DataType::Int16,
+ broker::DataType::Int32 => proto::DataType::Int32,
+ broker::DataType::Int64 => proto::DataType::Int64,
+ broker::DataType::Uint8 => proto::DataType::Uint8,
+ broker::DataType::Uint16 => proto::DataType::Uint16,
+ broker::DataType::Uint32 => proto::DataType::Uint32,
+ broker::DataType::Uint64 => proto::DataType::Uint64,
+ broker::DataType::Float => proto::DataType::Float,
+ broker::DataType::Double => proto::DataType::Double,
+ broker::DataType::StringArray => proto::DataType::StringArray,
+ broker::DataType::BoolArray => proto::DataType::BooleanArray,
+ broker::DataType::Int8Array => proto::DataType::Int8Array,
+ broker::DataType::Int16Array => proto::DataType::Int16Array,
+ broker::DataType::Int32Array => proto::DataType::Int32Array,
+ broker::DataType::Int64Array => proto::DataType::Int64Array,
+ broker::DataType::Uint8Array => proto::DataType::Uint8Array,
+ broker::DataType::Uint16Array => proto::DataType::Uint16Array,
+ broker::DataType::Uint32Array => proto::DataType::Uint32Array,
+ broker::DataType::Uint64Array => proto::DataType::Uint64Array,
+ broker::DataType::FloatArray => proto::DataType::FloatArray,
+ broker::DataType::DoubleArray => proto::DataType::DoubleArray,
+ }
+ }
+}
+
+impl From for proto::EntryType {
+ fn from(from: broker::EntryType) -> Self {
+ match from {
+ broker::EntryType::Sensor => proto::EntryType::Sensor,
+ broker::EntryType::Attribute => proto::EntryType::Attribute,
+ broker::EntryType::Actuator => proto::EntryType::Actuator,
+ }
+ }
+}
+
+impl broker::UpdateError {
+ pub fn to_status_with_code(&self, id: &i32) -> tonic::Status {
+ match self {
+ broker::UpdateError::NotFound => tonic::Status::new(
+ tonic::Code::NotFound,
+ format!("Signal not found (id: {})", id),
+ ),
+ broker::UpdateError::WrongType => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Wrong type provided (id: {})", id),
+ ),
+ broker::UpdateError::OutOfBoundsAllowed => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Value out of allowed bounds (id: {})", id),
+ ),
+ broker::UpdateError::OutOfBoundsMinMax => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Value out of min/max bounds (id: {})", id),
+ ),
+ broker::UpdateError::OutOfBoundsType => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Value out of type bounds (id: {})", id),
+ ),
+ broker::UpdateError::UnsupportedType => tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ format!("Unsupported type (id: {})", id),
+ ),
+ broker::UpdateError::PermissionDenied => tonic::Status::new(
+ tonic::Code::PermissionDenied,
+ format!("Permission denied (id: {})", id),
+ ),
+ broker::UpdateError::PermissionExpired => tonic::Status::new(
+ tonic::Code::Unauthenticated,
+ format!("Permission expired (id: {})", id),
+ ),
+ }
+ }
+}
+
+impl From for broker::DataValue {
+ fn from(value: proto::Value) -> Self {
+ match &value.typed_value {
+ Some(proto::value::TypedValue::String(value)) => {
+ broker::DataValue::String(value.to_owned())
+ }
+ Some(proto::value::TypedValue::Bool(value)) => broker::DataValue::Bool(*value),
+ Some(proto::value::TypedValue::Int32(value)) => broker::DataValue::Int32(*value),
+ Some(proto::value::TypedValue::Int64(value)) => broker::DataValue::Int64(*value),
+ Some(proto::value::TypedValue::Uint32(value)) => broker::DataValue::Uint32(*value),
+ Some(proto::value::TypedValue::Uint64(value)) => broker::DataValue::Uint64(*value),
+ Some(proto::value::TypedValue::Float(value)) => broker::DataValue::Float(*value),
+ Some(proto::value::TypedValue::Double(value)) => broker::DataValue::Double(*value),
+ Some(proto::value::TypedValue::StringArray(array)) => {
+ broker::DataValue::StringArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::BoolArray(array)) => {
+ broker::DataValue::BoolArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int32Array(array)) => {
+ broker::DataValue::Int32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Int64Array(array)) => {
+ broker::DataValue::Int64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint32Array(array)) => {
+ broker::DataValue::Uint32Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::Uint64Array(array)) => {
+ broker::DataValue::Uint64Array(array.values.clone())
+ }
+ Some(proto::value::TypedValue::FloatArray(array)) => {
+ broker::DataValue::FloatArray(array.values.clone())
+ }
+ Some(proto::value::TypedValue::DoubleArray(array)) => {
+ broker::DataValue::DoubleArray(array.values.clone())
+ }
+ None => todo!(),
+ }
+ }
+}
+
+impl From for proto::Value {
+ fn from(value: broker::DataValue) -> Self {
+ match &value {
+ broker::DataValue::String(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::String(value.to_owned())),
+ },
+
+ broker::DataValue::Bool(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(*value)),
+ },
+
+ broker::DataValue::Int32(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(*value)),
+ },
+
+ broker::DataValue::Int64(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64(*value)),
+ },
+
+ broker::DataValue::Uint32(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(*value)),
+ },
+
+ broker::DataValue::Uint64(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64(*value)),
+ },
+
+ broker::DataValue::Float(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Float(*value)),
+ },
+
+ broker::DataValue::Double(value) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Double(*value)),
+ },
+
+ broker::DataValue::StringArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::StringArray(StringArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::BoolArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::BoolArray(BoolArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Int32Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32Array(Int32Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Int64Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int64Array(Int64Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Uint32Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32Array(Uint32Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::Uint64Array(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint64Array(Uint64Array {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::FloatArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::FloatArray(FloatArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::DoubleArray(array) => proto::Value {
+ typed_value: Some(proto::value::TypedValue::DoubleArray(DoubleArray {
+ values: array.clone(),
+ })),
+ },
+
+ broker::DataValue::NotAvailable => proto::Value { typed_value: None },
+ }
+ }
+}
+
+impl broker::ActuationError {
+ pub fn to_tonic_status(&self, message: String) -> tonic::Status {
+ match self {
+ broker::ActuationError::NotFound => tonic::Status::not_found(message),
+ broker::ActuationError::WrongType => tonic::Status::invalid_argument(message),
+ broker::ActuationError::OutOfBounds => tonic::Status::invalid_argument(message),
+ broker::ActuationError::UnsupportedType => tonic::Status::invalid_argument(message),
+ broker::ActuationError::PermissionDenied => tonic::Status::permission_denied(message),
+ broker::ActuationError::PermissionExpired => tonic::Status::unauthenticated(message),
+ broker::ActuationError::ProviderNotAvailable => tonic::Status::unavailable(message),
+ broker::ActuationError::ProviderAlreadyExists => tonic::Status::already_exists(message),
+ broker::ActuationError::TransmissionFailure => tonic::Status::data_loss(message),
+ }
+ }
+}
diff --git a/databroker/src/grpc/kuksa_val_v2/mod.rs b/databroker/src/grpc/kuksa_val_v2/mod.rs
new file mode 100644
index 00000000..88302b19
--- /dev/null
+++ b/databroker/src/grpc/kuksa_val_v2/mod.rs
@@ -0,0 +1,15 @@
+/********************************************************************************
+* Copyright (c) 2024 Contributors to the Eclipse Foundation
+*
+* See the NOTICE file(s) distributed with this work for additional
+* information regarding copyright ownership.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Apache License 2.0 which is available at
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* SPDX-License-Identifier: Apache-2.0
+********************************************************************************/
+
+mod conversions;
+mod val;
diff --git a/databroker/src/grpc/kuksa_val_v2/val.rs b/databroker/src/grpc/kuksa_val_v2/val.rs
new file mode 100644
index 00000000..809fae4f
--- /dev/null
+++ b/databroker/src/grpc/kuksa_val_v2/val.rs
@@ -0,0 +1,3066 @@
+/********************************************************************************
+* Copyright (c) 2024 Contributors to the Eclipse Foundation
+*
+* See the NOTICE file(s) distributed with this work for additional
+* information regarding copyright ownership.
+*
+* This program and the accompanying materials are made available under the
+* terms of the Apache License 2.0 which is available at
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* SPDX-License-Identifier: Apache-2.0
+********************************************************************************/
+
+use std::{collections::HashMap, pin::Pin};
+
+use crate::{
+ broker::{
+ self, ActuationChange, ActuationProvider, AuthorizedAccess, ReadError, SubscriptionError,
+ },
+ glob::Matcher,
+ permissions::Permissions,
+ types::DataValue,
+};
+
+use databroker_proto::kuksa::val::v2::{
+ self as proto,
+ open_provider_stream_request::Action::{
+ BatchActuateStreamResponse, ProvideActuationRequest, PublishValuesRequest,
+ },
+ open_provider_stream_response, OpenProviderStreamResponse, PublishValuesResponse,
+};
+
+use kuksa::proto::v2::{
+ signal_id, ActuateRequest, ActuateResponse, BatchActuateStreamRequest, ErrorCode,
+ ListMetadataResponse, ProvideActuationResponse,
+};
+use std::collections::HashSet;
+use tokio::{select, sync::mpsc};
+use tokio_stream::{wrappers::ReceiverStream, Stream, StreamExt};
+use tracing::debug;
+
+const MAX_REQUEST_PATH_LENGTH: usize = 1000;
+
+pub struct Provider {
+ sender: mpsc::Sender>,
+}
+
+#[async_trait::async_trait]
+impl ActuationProvider for Provider {
+ async fn actuate(
+ &self,
+ actuation_changes: Vec,
+ ) -> Result<(), (broker::ActuationError, String)> {
+ let mut actuation_requests: Vec = vec![];
+ for actuation_change in actuation_changes {
+ let data_value = actuation_change.data_value;
+ actuation_requests.push(ActuateRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(signal_id::Signal::Id(actuation_change.id)),
+ }),
+ value: Some(proto::Value::from(data_value)),
+ });
+ }
+
+ let batch_actuate_stream_request =
+ open_provider_stream_response::Action::BatchActuateStreamRequest(
+ BatchActuateStreamRequest {
+ actuate_requests: actuation_requests,
+ },
+ );
+
+ let response = OpenProviderStreamResponse {
+ action: Some(batch_actuate_stream_request),
+ };
+
+ let result = self.sender.send(Ok(response)).await;
+ if result.is_err() {
+ return Err((
+ broker::ActuationError::TransmissionFailure,
+ "An error occured while sending the data".to_string(),
+ ));
+ }
+ return Ok(());
+ }
+
+ fn is_available(&self) -> bool {
+ !self.sender.is_closed()
+ }
+}
+
+#[tonic::async_trait]
+impl proto::val_server::Val for broker::DataBroker {
+ // Returns (GRPC error code):
+ // NOT_FOUND if the requested signal doesn't exist
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied
+ //
+ async fn get_value(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let request = request.into_inner();
+
+ let signal_id = match get_signal(request.signal_id, &broker).await {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ };
+
+ let datapoint = match broker.get_datapoint(signal_id).await {
+ Ok(datapoint) => datapoint,
+ Err(ReadError::NotFound) => return Err(tonic::Status::not_found("Path not found")),
+ Err(ReadError::PermissionDenied) => {
+ return Err(tonic::Status::permission_denied("Permission denied"))
+ }
+ Err(ReadError::PermissionExpired) => {
+ return Err(tonic::Status::unauthenticated("Permission expired"))
+ }
+ };
+
+ Ok(tonic::Response::new(proto::GetValueResponse {
+ data_point: datapoint.into(),
+ }))
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the requested signals doesn't exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the requested signals.
+ //
+ async fn get_values(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let requested = request.into_inner().signal_ids;
+ let mut response_datapoints = Vec::new();
+
+ for request in requested {
+ let signal_id = match get_signal(Some(request), &broker).await {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ };
+
+ match broker.get_datapoint(signal_id).await {
+ Ok(datapoint) => {
+ let proto_datapoint_opt: Option = datapoint.into();
+ //let proto_datapoint: proto::Datapoint = proto_datapoint_opt.into();
+ response_datapoints.push(proto_datapoint_opt.unwrap());
+ }
+ Err(ReadError::NotFound) => {
+ return Err(tonic::Status::not_found(format!(
+ "Path not found (id: {})",
+ signal_id
+ )));
+ }
+ Err(ReadError::PermissionDenied) => {
+ return Err(tonic::Status::permission_denied(format!(
+ "Permission denied(id: {})",
+ signal_id
+ )))
+ }
+ Err(ReadError::PermissionExpired) => {
+ return Err(tonic::Status::unauthenticated(format!(
+ "Permission expired (id: {})",
+ signal_id
+ )))
+ }
+ };
+ }
+
+ Ok(tonic::Response::new(proto::GetValuesResponse {
+ data_points: response_datapoints,
+ }))
+ }
+
+ type SubscribeStream = Pin<
+ Box<
+ dyn Stream- >
+ + Send
+ + Sync
+ + 'static,
+ >,
+ >;
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ //
+ async fn subscribe(
+ &self,
+ request: tonic::Request
,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let request = request.into_inner();
+
+ let broker = self.authorized_access(&permissions);
+
+ let signal_paths = request.signal_paths;
+ let size = signal_paths.len();
+
+ let mut valid_requests: HashMap> = HashMap::with_capacity(size);
+
+ for path in signal_paths {
+ valid_requests.insert(
+ match get_signal(
+ Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(path)),
+ }),
+ &broker,
+ )
+ .await
+ {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ },
+ vec![broker::Field::Datapoint].into_iter().collect(),
+ );
+ }
+
+ match broker
+ .subscribe(valid_requests, Some(request.buffer_size as usize))
+ .await
+ {
+ Ok(stream) => {
+ let stream = convert_to_proto_stream(stream, size);
+ Ok(tonic::Response::new(Box::pin(stream)))
+ }
+ Err(SubscriptionError::NotFound) => Err(tonic::Status::not_found("Path not found")),
+ Err(SubscriptionError::InvalidInput) => {
+ Err(tonic::Status::invalid_argument("Invalid Argument"))
+ }
+ Err(SubscriptionError::InternalError) => Err(tonic::Status::internal("Internal Error")),
+ Err(SubscriptionError::InvalidBufferSize) => Err(tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ "Subscription buffer_size max allowed value is 1000",
+ )),
+ }
+ }
+
+ type SubscribeByIdStream = Pin<
+ Box<
+ dyn Stream- >
+ + Send
+ + Sync
+ + 'static,
+ >,
+ >;
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT if the request is empty
+ //
+ async fn subscribe_by_id(
+ &self,
+ request: tonic::Request
,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let request = request.into_inner();
+
+ let broker = self.authorized_access(&permissions);
+
+ let signal_ids = request.signal_ids;
+ let size = signal_ids.len();
+
+ let mut valid_requests: HashMap> = HashMap::with_capacity(size);
+
+ for id in signal_ids {
+ valid_requests.insert(
+ match get_signal(
+ Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(id)),
+ }),
+ &broker,
+ )
+ .await
+ {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ },
+ vec![broker::Field::Datapoint].into_iter().collect(),
+ );
+ }
+
+ match broker
+ .subscribe(valid_requests, Some(request.buffer_size as usize))
+ .await
+ {
+ Ok(stream) => {
+ let stream = convert_to_proto_stream_id(stream, size);
+ Ok(tonic::Response::new(Box::pin(stream)))
+ }
+ Err(SubscriptionError::NotFound) => {
+ Err(tonic::Status::new(tonic::Code::NotFound, "Path not found"))
+ }
+ Err(SubscriptionError::InvalidInput) => Err(tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ "Invalid Argument",
+ )),
+ Err(SubscriptionError::InternalError) => {
+ Err(tonic::Status::new(tonic::Code::Internal, "Internal Error"))
+ }
+ Err(SubscriptionError::InvalidBufferSize) => Err(tonic::Status::new(
+ tonic::Code::InvalidArgument,
+ "Subscription buffer_size max allowed value is 1000",
+ )),
+ }
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if the actuator does not exist.
+ // PERMISSION_DENIED if access is denied for the actuator.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing the actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if the provided path is not an actuator.
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the provided value is out of the min/max range specified
+ //
+ async fn actuate(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = request
+ .extensions()
+ .get::()
+ .ok_or(tonic::Status::unauthenticated("Unauthenticated"))?
+ .clone();
+ let broker = self.authorized_access(&permissions);
+
+ let actuator_request = request.into_inner();
+ let value = actuator_request
+ .value
+ .ok_or(tonic::Status::invalid_argument("No value provided"))?;
+
+ let signal = actuator_request
+ .signal_id
+ .ok_or(tonic::Status::invalid_argument("No signal_id provided"))?
+ .signal;
+
+ match &signal {
+ Some(proto::signal_id::Signal::Path(path)) => {
+ let id = broker
+ .get_id_by_path(path)
+ .await
+ .ok_or(tonic::Status::not_found(format!(
+ "Invalid path in signal_id provided {}",
+ path
+ )))?;
+
+ match broker.actuate(&id, &DataValue::from(value)).await {
+ Ok(()) => Ok(tonic::Response::new(ActuateResponse {})),
+ Err(error) => Err(error.0.to_tonic_status(error.1)),
+ }
+ }
+ Some(proto::signal_id::Signal::Id(id)) => {
+ match broker.actuate(id, &DataValue::from(value)).await {
+ Ok(()) => Ok(tonic::Response::new(ActuateResponse {})),
+ Err(error) => Err(error.0.to_tonic_status(error.1)),
+ }
+ }
+ None => Err(tonic::Status::invalid_argument(
+ "SignalID contains neither path or id",
+ )),
+ }
+ }
+
+ // Actuate simultaneously multiple actuators.
+ // If any error occurs, the entire operation will be aborted
+ // and no single actuator value will be forwarded to the provider.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the actuators are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the actuators.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing an actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if any of the provided actuators values are out of the min/max range specified
+ //
+ async fn batch_actuate(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+ let broker = self.authorized_access(&permissions);
+ let actuate_requests = request.into_inner().actuate_requests;
+
+ let mut actuation_changes: Vec = vec![];
+ for actuate_request in actuate_requests {
+ let vss_id = match actuate_request.signal_id {
+ Some(signal_id) => match signal_id.signal {
+ Some(proto::signal_id::Signal::Id(vss_id)) => vss_id,
+ Some(proto::signal_id::Signal::Path(vss_path)) => {
+ let result = broker.get_id_by_path(&vss_path).await;
+ match result {
+ Some(vss_id) => vss_id,
+ None => {
+ let message =
+ format!("Could not resolve vss_id for path: {}", vss_path);
+ return Err(tonic::Status::not_found(message));
+ }
+ }
+ }
+ None => return Err(tonic::Status::invalid_argument("Signal not provided")),
+ },
+ None => return Err(tonic::Status::invalid_argument("Signal_Id not provided")),
+ };
+ let data_value = match actuate_request.value {
+ Some(data_value) => DataValue::from(data_value),
+ None => return Err(tonic::Status::invalid_argument("")),
+ };
+ let actuation_change = ActuationChange {
+ id: vss_id,
+ data_value,
+ };
+ actuation_changes.push(actuation_change);
+ }
+
+ let result = broker.batch_actuate(actuation_changes).await;
+ match result {
+ Ok(_) => Ok(tonic::Response::new(proto::BatchActuateResponse {})),
+ Err(error) => return Err(error.0.to_tonic_status(error.1)),
+ }
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if the specified root branch does not exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT if the provided path or wildcard is wrong.
+ //
+ async fn list_metadata(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+ let broker = self.authorized_access(&permissions);
+
+ let metadata_request = request.into_inner();
+
+ match Matcher::new(&metadata_request.root) {
+ Ok(matcher) => {
+ let mut metadata_response = Vec::new();
+ broker
+ .for_each_entry(|entry| {
+ let entry_metadata = &entry.metadata();
+ if matcher.is_match(&entry_metadata.glob_path) {
+ metadata_response.push(proto::Metadata::from(*entry_metadata));
+ }
+ })
+ .await;
+ if metadata_response.is_empty() {
+ Err(tonic::Status::not_found(
+ "Specified root branch does not exist",
+ ))
+ } else {
+ Ok(tonic::Response::new(ListMetadataResponse {
+ metadata: metadata_response,
+ }))
+ }
+ }
+ Err(_) => Err(tonic::Status::invalid_argument("Invalid Pattern Argument")),
+ }
+ }
+
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ async fn publish_value(
+ &self,
+ request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let broker = self.authorized_access(&permissions);
+
+ let request = request.into_inner();
+
+ let mut updates: HashMap = HashMap::with_capacity(1);
+
+ updates.insert(
+ match get_signal(request.signal_id, &broker).await {
+ Ok(signal_id) => signal_id,
+ Err(err) => return Err(err),
+ },
+ broker::EntryUpdate {
+ path: None,
+ datapoint: Some(broker::Datapoint::from(&request.data_point.unwrap())),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ max: None,
+ min: None,
+ unit: None,
+ },
+ );
+
+ match broker.update_entries(updates).await {
+ Ok(()) => Ok(tonic::Response::new(proto::PublishValueResponse {})),
+ Err(errors) => {
+ if errors.is_empty() {
+ Ok(tonic::Response::new(proto::PublishValueResponse {}))
+ } else if let Some((id, err)) = errors.first() {
+ Err(err.to_status_with_code(id))
+ } else {
+ Err(tonic::Status::internal(
+ "There is no error provided for the entry",
+ ))
+ }
+ }
+ }
+ }
+
+ type OpenProviderStreamStream =
+ ReceiverStream>;
+
+ // Errors:
+ // - Provider sends ProvideActuationRequest -> Databroker returns ProvideActuationResponse
+ // Returns (GRPC error code) and closes the stream call (strict case).
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // ALREADY_EXISTS if a provider already claimed the ownership of an actuator
+ //
+ // - Provider sends PublishValuesRequest -> Databroker returns PublishValuesResponse
+ // GRPC errors are returned as messages in the stream
+ // response with the signal id `map status = 2;` (permissive case)
+ // NOT_FOUND if a signal is non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for a signal.
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ // - Databroker sends BatchActuateStreamRequest -> Provider shall return a BatchActuateStreamResponse,
+ // for every signal requested to indicate if the request was accepted or not.
+ // It is up to the provider to decide if the stream shall be closed,
+ // as of today Databroker will not react on the received error message.
+ //
+ async fn open_provider_stream(
+ &self,
+ request: tonic::Request>,
+ ) -> Result, tonic::Status> {
+ debug!(?request);
+ let permissions = match request.extensions().get::() {
+ Some(permissions) => {
+ debug!(?permissions);
+ permissions.clone()
+ }
+ None => return Err(tonic::Status::unauthenticated("Unauthenticated")),
+ };
+
+ let mut stream = request.into_inner();
+
+ let mut shutdown_trigger = self.get_shutdown_trigger();
+
+ // Copy (to move into task below)
+ let broker = self.clone();
+ // Create stream (to be returned)
+ let (response_stream_sender, response_stream_receiver) = mpsc::channel(10);
+
+ // Listening on stream
+ tokio::spawn(async move {
+ let permissions = permissions;
+ let broker = broker.authorized_access(&permissions);
+ loop {
+ select! {
+ message = stream.message() => {
+ match message {
+ Ok(request) => {
+ match request {
+ Some(req) => {
+ match req.action {
+ Some(ProvideActuationRequest(provided_actuation)) => {
+ let response = provide_actuation(&broker, &provided_actuation, response_stream_sender.clone()).await;
+ if let Err(err) = response_stream_sender.send(response).await
+ {
+ debug!("Failed to send response: {}", err)
+ }
+ },
+ Some(PublishValuesRequest(publish_values_request)) => {
+ let response = publish_values(&broker, &publish_values_request).await;
+ if let Some(value) = response {
+ if let Err(err) = response_stream_sender.send(Ok(value)).await {
+ debug!("Failed to send error response: {}", err);
+ }
+ }
+ },
+ Some(BatchActuateStreamResponse(batch_actuate_stream_response)) => {
+
+ if let Some(error) = batch_actuate_stream_response.error {
+ match error.code() {
+ ErrorCode::Ok => {},
+ _ => {
+ let mut msg : String = "Batch actuate stream response error".to_string();
+ if let Some(signal_id) = batch_actuate_stream_response.signal_id {
+ match signal_id.signal {
+ Some(proto::signal_id::Signal::Path(path)) => {
+ msg = format!("{}, path: {}", msg, &path);
+ }
+ Some(proto::signal_id::Signal::Id(id)) => {
+ msg = format!("{}, id: {}",msg, &id.to_string());
+ }
+ None => {}
+ }
+ }
+ msg = format!("{}, error code: {}, error message: {}", msg, &error.code.to_string(), &error.message);
+ debug!(msg)
+ }
+ }
+ }
+
+ },
+ None => {
+
+ },
+ }
+ },
+ None => {
+ debug!("provider: no more messages");
+ break;
+ }
+ }
+ },
+ Err(err) => {
+ debug!("provider: connection broken: {:?}", err);
+ break;
+ },
+ }
+ },
+ _ = shutdown_trigger.recv() => {
+ debug!("provider: shutdown received");
+ break;
+ }
+ }
+ }
+ });
+
+ Ok(tonic::Response::new(ReceiverStream::new(
+ response_stream_receiver,
+ )))
+ }
+
+ async fn get_server_info(
+ &self,
+ _request: tonic::Request,
+ ) -> Result, tonic::Status> {
+ let server_info = proto::GetServerInfoResponse {
+ name: "databroker".to_owned(),
+ version: self.get_version().to_owned(),
+ commit_hash: self.get_commit_sha().to_owned(),
+ };
+ Ok(tonic::Response::new(server_info))
+ }
+}
+
+async fn provide_actuation(
+ broker: &AuthorizedAccess<'_, '_>,
+ request: &databroker_proto::kuksa::val::v2::ProvideActuationRequest,
+ sender: mpsc::Sender>,
+) -> Result {
+ let vss_paths: Vec<_> = request
+ .actuator_identifiers
+ .iter()
+ .filter_map(|signal_id| match &signal_id.signal {
+ Some(proto::signal_id::Signal::Path(path)) => Some(path.clone()),
+ _ => None,
+ })
+ .collect();
+
+ let future_vss_ids = vss_paths
+ .iter()
+ .map(|vss_path| broker.get_id_by_path(vss_path));
+ let resolved_opt_vss_ids = futures::future::join_all(future_vss_ids).await;
+
+ for (index, opt_vss_id) in resolved_opt_vss_ids.iter().enumerate() {
+ if opt_vss_id.is_none() {
+ let message = format!(
+ "Could not resolve id of vss_path: {}",
+ vss_paths.get(index).unwrap()
+ );
+ return Err(tonic::Status::not_found(message));
+ }
+ }
+
+ let resolved_vss_ids: Vec = resolved_opt_vss_ids.iter().filter_map(|&opt| opt).collect();
+
+ let vss_ids: Vec<_> = request
+ .actuator_identifiers
+ .iter()
+ .filter_map(|signal_id| match &signal_id.signal {
+ Some(proto::signal_id::Signal::Id(id)) => Some(*id),
+ _ => None,
+ })
+ .collect();
+
+ let mut all_vss_ids = vec![];
+ all_vss_ids.extend(vss_ids);
+ all_vss_ids.extend(resolved_vss_ids);
+
+ let provider = Provider { sender };
+
+ match broker
+ .provide_actuation(all_vss_ids, Box::new(provider))
+ .await
+ {
+ Ok(_) => {
+ let provide_actuation_response = ProvideActuationResponse {};
+
+ let response = OpenProviderStreamResponse {
+ action: Some(
+ open_provider_stream_response::Action::ProvideActuationResponse(
+ provide_actuation_response,
+ ),
+ ),
+ };
+
+ Ok(response)
+ }
+
+ Err(error) => Err(error.0.to_tonic_status(error.1)),
+ }
+}
+
+async fn publish_values(
+ broker: &AuthorizedAccess<'_, '_>,
+ request: &databroker_proto::kuksa::val::v2::PublishValuesRequest,
+) -> Option {
+ let ids: Vec<(i32, broker::EntryUpdate)> = request
+ .data_points
+ .iter()
+ .map(|(id, datapoint)| {
+ (
+ *id,
+ broker::EntryUpdate {
+ path: None,
+ datapoint: Some(broker::Datapoint::from(datapoint)),
+ actuator_target: None,
+ entry_type: None,
+ data_type: None,
+ description: None,
+ allowed: None,
+ min: None,
+ max: None,
+ unit: None,
+ },
+ )
+ })
+ .collect();
+
+ // TODO check if provider is allowed to update the entries for the provided signals?
+ match broker.update_entries(ids).await {
+ Ok(_) => None,
+ Err(err) => Some(OpenProviderStreamResponse {
+ action: Some(
+ open_provider_stream_response::Action::PublishValuesResponse(
+ PublishValuesResponse {
+ request_id: request.request_id,
+ status: err
+ .iter()
+ .map(|(id, error)| (*id, proto::Error::from(error)))
+ .collect(),
+ },
+ ),
+ ),
+ }),
+ }
+}
+
+async fn get_signal(
+ signal_id: Option,
+ broker: &AuthorizedAccess<'_, '_>,
+) -> Result {
+ if let Some(signal) = signal_id.unwrap().signal {
+ match signal {
+ proto::signal_id::Signal::Path(path) => {
+ if path.len() > MAX_REQUEST_PATH_LENGTH {
+ return Err(tonic::Status::invalid_argument(
+ "The provided path is too long",
+ ));
+ }
+ match broker.get_id_by_path(&path).await {
+ Some(id) => Ok(id),
+ None => Err(tonic::Status::not_found("Path not found")),
+ }
+ }
+ proto::signal_id::Signal::Id(id) => match broker.get_metadata(id).await {
+ Some(_metadata) => Ok(id),
+ None => Err(tonic::Status::not_found("Path not found")),
+ },
+ }
+ } else {
+ Err(tonic::Status::invalid_argument("No SignalId provided"))
+ }
+}
+
+fn convert_to_proto_stream(
+ input: impl Stream- ,
+ size: usize,
+) -> impl Stream
- > {
+ input.map(move |item| {
+ let mut entries: HashMap
= HashMap::with_capacity(size);
+ for update in item.updates {
+ let update_datapoint: Option = match update.update.datapoint {
+ Some(datapoint) => datapoint.into(),
+ None => None,
+ };
+ if let Some(dp) = update_datapoint {
+ entries.insert(
+ update
+ .update
+ .path
+ .expect("Something wrong with update path of subscriptions!"),
+ dp,
+ );
+ }
+ }
+ let response = proto::SubscribeResponse { entries };
+ Ok(response)
+ })
+}
+
+fn convert_to_proto_stream_id(
+ input: impl Stream- ,
+ size: usize,
+) -> impl Stream
- > {
+ input.map(move |item| {
+ let mut entries: HashMap
= HashMap::with_capacity(size);
+ for update in item.updates {
+ let update_datapoint: Option = match update.update.datapoint {
+ Some(datapoint) => datapoint.into(),
+ None => None,
+ };
+ if let Some(dp) = update_datapoint {
+ entries.insert(update.id, dp);
+ }
+ }
+ let response = proto::SubscribeByIdResponse { entries };
+ Ok(response)
+ })
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{broker::DataBroker, permissions};
+ use databroker_proto::kuksa::val::v2::val_server::Val;
+ use proto::open_provider_stream_response::Action::{
+ BatchActuateStreamRequest, ProvideActuationResponse, PublishValuesResponse,
+ };
+ use proto::{
+ open_provider_stream_request, BatchActuateRequest, OpenProviderStreamRequest,
+ PublishValuesRequest, SignalId, Value,
+ };
+
+ #[tokio::test]
+ async fn test_get_value_id_ok() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let entry_id = broker::tests::helper_add_int32(&broker, "test.datapoint1", -64, timestamp)
+ .await
+ .expect("Shall succeed");
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let get_response = response.into_inner();
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-64)),
+ };
+ assert_eq!(
+ get_response,
+ proto::GetValueResponse {
+ data_point: {
+ Some(proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ })
+ },
+ }
+ );
+ }
+ Err(status) => {
+ panic!("Get failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_name_ok() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let _entry_id = broker::tests::helper_add_int32(&broker, "test.datapoint1", -64, timestamp)
+ .await
+ .expect("Shall succeed");
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "test.datapoint1".to_string(),
+ )),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let get_response = response.into_inner();
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-64)),
+ };
+ assert_eq!(
+ get_response,
+ proto::GetValueResponse {
+ data_point: {
+ Some(proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ })
+ },
+ }
+ );
+ }
+ Err(status) => {
+ panic!("Get failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_id_not_authorized() {
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let entry_id = broker::tests::helper_add_int32(&broker, "test.datapoint1", -64, timestamp)
+ .await
+ .expect("Shall succeed");
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Do not insert permissions
+ let get_value_request = tonic::Request::new(request);
+
+ match broker.get_value(get_value_request).await {
+ Ok(_response) => {
+ panic!("Did not expect success");
+ }
+ Err(status) => {
+ assert_eq!(status.code(), tonic::Code::Unauthenticated)
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_id_no_value() {
+ // Define signal but do not assign any value
+
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_string(),
+ broker::DataType::Int32,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Some Description hat Does Not Matter".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ // Now try to get it
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let get_response = response.into_inner();
+
+ // As of today Databroker assigns "Now" when registering a Datapoint so if there is no value
+ // we do not know exact time. For now just checking that it is not None
+ assert_eq!(get_response.data_point.clone().unwrap().value, None);
+ assert_ne!(get_response.data_point.unwrap().timestamp, None);
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ panic!("Get failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_id_not_defined() {
+ let broker = DataBroker::default();
+ // Just use some arbitrary number
+ let entry_id: i32 = 12345;
+
+ // Now try to get it
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(_response) => {
+ panic!("Did not expect success");
+ }
+ Err(status) => {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_value_name_not_defined() {
+ let broker = DataBroker::default();
+
+ // Now try to get it
+
+ let request = proto::GetValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "test.datapoint1".to_string(),
+ )),
+ }),
+ };
+
+ // Manually insert permissions
+ let mut get_value_request = tonic::Request::new(request);
+ get_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.get_value(get_value_request).await {
+ Ok(_response) => {
+ panic!("Did not expect success");
+ }
+ Err(status) => {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ }
+ }
+ }
+
+ struct GetValuesConfig {
+ send_auth: bool,
+ request_first: bool,
+ use_name_for_first: bool,
+ first_exist: bool,
+ auth_first: bool,
+ request_second: bool,
+ use_name_for_second: bool,
+ second_exist: bool,
+ auth_second: bool,
+ }
+
+ struct GetValuesConfigBuilder {
+ send_auth: bool,
+ request_first: bool,
+ use_name_for_first: bool,
+ first_exist: bool,
+ auth_first: bool,
+ request_second: bool,
+ use_name_for_second: bool,
+ second_exist: bool,
+ auth_second: bool,
+ }
+
+ impl GetValuesConfigBuilder {
+ fn new() -> GetValuesConfigBuilder {
+ GetValuesConfigBuilder {
+ send_auth: false,
+ request_first: false,
+ use_name_for_first: false,
+ first_exist: false,
+ auth_first: false,
+ request_second: false,
+ use_name_for_second: false,
+ second_exist: false,
+ auth_second: false,
+ }
+ }
+
+ // Request credentials to be sent.
+ // Do not need to be explcitly requested if auth_first/auth_second is used
+ fn send_auth(&mut self) -> &mut Self {
+ self.send_auth = true;
+ self
+ }
+
+ fn request_first(&mut self) -> &mut Self {
+ self.request_first = true;
+ self
+ }
+
+ fn use_name_for_first(&mut self) -> &mut Self {
+ self.use_name_for_first = true;
+ self
+ }
+
+ fn first_exist(&mut self) -> &mut Self {
+ self.first_exist = true;
+ self
+ }
+
+ // Request credentials and include credentials for signal 1
+ fn auth_first(&mut self) -> &mut Self {
+ self.auth_first = true;
+ self.send_auth = true;
+ self
+ }
+
+ fn request_second(&mut self) -> &mut Self {
+ self.request_second = true;
+ self
+ }
+
+ fn use_name_for_second(&mut self) -> &mut Self {
+ self.use_name_for_second = true;
+ self
+ }
+
+ fn second_exist(&mut self) -> &mut Self {
+ self.second_exist = true;
+ self
+ }
+
+ // Request credentials and include credentials for signal 2
+ fn auth_second(&mut self) -> &mut Self {
+ self.send_auth = true;
+ self.auth_second = true;
+ self
+ }
+
+ fn build(&self) -> GetValuesConfig {
+ GetValuesConfig {
+ send_auth: self.send_auth,
+ request_first: self.request_first,
+ use_name_for_first: self.use_name_for_first,
+ first_exist: self.first_exist,
+ auth_first: self.auth_first,
+ request_second: self.request_second,
+ use_name_for_second: self.use_name_for_second,
+ second_exist: self.second_exist,
+ auth_second: self.auth_second,
+ }
+ }
+ }
+
+ async fn test_get_values_combo(config: GetValuesConfig) {
+ static SIGNAL1: &str = "test.datapoint1";
+ static SIGNAL2: &str = "test.datapoint2";
+
+ let broker = DataBroker::default();
+
+ let timestamp = std::time::SystemTime::now();
+
+ let mut entry_id = -1;
+ if config.first_exist {
+ entry_id = broker::tests::helper_add_int32(&broker, SIGNAL1, -64, timestamp)
+ .await
+ .expect("Shall succeed");
+ }
+
+ let mut entry_id2 = -1;
+ if config.second_exist {
+ entry_id2 = broker::tests::helper_add_int32(&broker, SIGNAL2, -13, timestamp)
+ .await
+ .expect("Shall succeed");
+ }
+
+ let mut permission_builder = permissions::PermissionBuilder::new();
+
+ if config.auth_first {
+ permission_builder = permission_builder
+ .add_read_permission(permissions::Permission::Glob(SIGNAL1.to_string()));
+ }
+ if config.auth_second {
+ permission_builder = permission_builder
+ .add_read_permission(permissions::Permission::Glob(SIGNAL2.to_string()));
+ }
+ let permissions = permission_builder.build().expect("Oops!");
+
+ // Build the request
+
+ let mut request_signals = Vec::new();
+ if config.request_first {
+ if !config.use_name_for_first {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ });
+ } else {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(SIGNAL1.to_string())),
+ });
+ }
+ }
+ if config.request_second {
+ if !config.use_name_for_second {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id2)),
+ });
+ } else {
+ request_signals.push(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Path(SIGNAL2.to_string())),
+ });
+ }
+ }
+
+ let request = proto::GetValuesRequest {
+ signal_ids: request_signals,
+ };
+
+ let mut tonic_request = tonic::Request::new(request);
+
+ if config.send_auth {
+ tonic_request.extensions_mut().insert(permissions);
+ }
+
+ match broker.get_values(tonic_request).await {
+ Ok(response) => {
+ // Check that we actually expect an Ok answer
+
+ if config.request_first & !config.first_exist {
+ panic!("Should not get Ok as signal test.datapoint1 should not exist")
+ }
+ if config.request_first & !config.auth_first {
+ panic!("Should not get Ok as we do not have permission for signal test.datapoint2 ")
+ }
+ if config.request_second & !config.second_exist {
+ panic!("Should not get Ok as signal test.datapoint1 should not exist")
+ }
+ if config.request_second & !config.auth_second {
+ panic!("Should not get Ok as we do not have permission for signal test.datapoint2 ")
+ }
+
+ let get_response = response.into_inner();
+
+ let mut response_signals = Vec::new();
+
+ if config.request_first {
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-64)),
+ };
+ let datapoint = proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ };
+ response_signals.push(datapoint);
+ }
+ if config.request_second {
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-13)),
+ };
+ let datapoint = proto::Datapoint {
+ timestamp: Some(timestamp.into()),
+ value: Some(value),
+ };
+ response_signals.push(datapoint);
+ }
+
+ assert_eq!(
+ get_response,
+ proto::GetValuesResponse {
+ data_points: response_signals,
+ }
+ );
+ }
+ Err(status) => {
+ // It can be discussed what has precendce NotFound or Unauthenticated, does not really matter
+ // For now assuming that NotFound has precedence, at least if we have a valid token
+ if !config.send_auth {
+ assert_eq!(status.code(), tonic::Code::Unauthenticated)
+ } else if config.request_first & !config.first_exist {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ } else if config.request_first & !config.auth_first {
+ assert_eq!(status.code(), tonic::Code::PermissionDenied)
+ } else if config.request_second & !config.second_exist {
+ assert_eq!(status.code(), tonic::Code::NotFound)
+ } else if config.request_second & !config.auth_second {
+ assert_eq!(status.code(), tonic::Code::PermissionDenied)
+ } else {
+ panic!("GetValues failed with status: {:?}", status);
+ }
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_one_signal_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_path_one_signal_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .use_name_for_first()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_path_two_signals_ok() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .use_name_for_first()
+ .request_second()
+ .use_name_for_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_no_signals_ok() {
+ // Expecting an empty list back
+
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_first_missing() {
+ let config = GetValuesConfigBuilder::new()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_second_missing() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_first_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_second_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_both_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .send_auth()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_first_missing_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .auth_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_second_missing_unauthorized() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .request_first()
+ .request_second()
+ .auth_first()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_get_values_id_two_signals_not_send_auth() {
+ let config = GetValuesConfigBuilder::new()
+ .first_exist()
+ .second_exist()
+ .request_first()
+ .request_second()
+ .build();
+ test_get_values_combo(config).await;
+ }
+
+ #[tokio::test]
+ async fn test_publish_value() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ data_point: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ };
+
+ Some(proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ })
+ },
+ };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.publish_value(publish_value_request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let publish_response = response.into_inner();
+ assert_eq!(publish_response, proto::PublishValueResponse {})
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ panic!("Publish failed with status: {:?}", status);
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_publish_value_signal_id_not_found() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let _entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(1234)),
+ }),
+ data_point: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ };
+
+ Some(proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ })
+ },
+ };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.publish_value(publish_value_request).await {
+ Ok(_) => {
+ // Handle the successful response
+ panic!("Should not happen!");
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ assert_eq!(status.code(), tonic::Code::NotFound);
+ assert_eq!(status.message(), "Path not found");
+ }
+ }
+ }
+
+ #[tokio::test]
+ /// For kuksa_val_v2 we only have a single test to test min/max violations
+ /// More detailed test cases for different cases/datatypes in broker.rs
+ async fn test_publish_value_min_max_not_fulfilled() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Uint32(3)), // min
+ Some(broker::types::DataValue::Uint32(26)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ data_point: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(27)),
+ };
+
+ Some(proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ })
+ },
+ };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.publish_value(publish_value_request).await {
+ Ok(_) => {
+ // Handle the successful response
+ panic!("Should not happen!");
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ assert_eq!(status.code(), tonic::Code::InvalidArgument);
+ // As of the today the first added datapoint get value 0 by default.
+ assert_eq!(status.message(), "Value out of min/max bounds (id: 0)");
+ }
+ }
+ }
+
+ async fn publish_value(
+ broker: &DataBroker,
+ entry_id: i32,
+ input_value: Option,
+ input_timestamp: Option,
+ ) {
+ let timestamp = input_timestamp.map(|input_timestamp| input_timestamp.into());
+
+ let mut request = tonic::Request::new(proto::PublishValueRequest {
+ signal_id: Some(proto::SignalId {
+ signal: Some(proto::signal_id::Signal::Id(entry_id)),
+ }),
+ data_point: Some(proto::Datapoint {
+ timestamp,
+
+ value: match input_value {
+ Some(true) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ Some(false) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(false)),
+ }),
+ None => None,
+ },
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+ match broker.publish_value(request).await {
+ Ok(response) => {
+ // Handle the successful response
+ let publish_response = response.into_inner();
+
+ // Check if there is an error in the response
+ assert_eq!(publish_response, proto::PublishValueResponse {});
+ }
+ Err(status) => {
+ // Handle the error from the publish_value function
+ panic!("Publish failed with status: {:?}", status);
+ }
+ }
+ }
+
+ /*
+ Test subscribe service method
+ */
+ async fn test_subscribe_case(has_value: bool) {
+ async fn check_stream_next(
+ item: &Result,
+ input_value: Option,
+ ) {
+ // Create Datapoint
+ let mut expected_response: HashMap = HashMap::new();
+ // We expect to get an empty response first
+ expected_response.insert(
+ "test.datapoint1".to_string(),
+ proto::Datapoint {
+ timestamp: None,
+ value: match input_value {
+ Some(true) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ Some(false) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(false)),
+ }),
+ None => None,
+ },
+ },
+ );
+
+ match item {
+ Ok(subscribe_response) => {
+ // Process the SubscribeResponse
+ let response = &subscribe_response.entries;
+ assert_eq!(response.len(), expected_response.len());
+ for key in response
+ .keys()
+ .chain(expected_response.keys())
+ .collect::>()
+ {
+ match (response.get(key), expected_response.get(key)) {
+ (Some(entry1), Some(entry2)) => {
+ assert_eq!(entry1.value, entry2.value);
+ }
+ (Some(entry1), None) => {
+ panic!("Key '{}' is only in response: {:?}", key, entry1)
+ }
+ (None, Some(entry2)) => {
+ panic!("Key '{}' is only in expected_response: {:?}", key, entry2)
+ }
+ (None, None) => unreachable!(),
+ }
+ }
+ }
+ Err(err) => {
+ panic!("Error {:?}", err)
+ }
+ }
+ }
+
+ let broker = DataBroker::default();
+
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_string(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Some Description that Does Not Matter".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ if has_value {
+ publish_value(&broker, entry_id, Some(false), None).await
+ }
+
+ let mut request = tonic::Request::new(proto::SubscribeRequest {
+ signal_paths: vec!["test.datapoint1".to_string()],
+ buffer_size: 5,
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result = tokio::task::block_in_place(|| {
+ // Blocking operation here
+ // Since broker.subscribe is async, you need to run it in an executor
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ rt.block_on(broker.subscribe(request))
+ });
+
+ // Publish "true" as value
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ // Publish "false" as value
+ publish_value(&broker, entry_id, Some(false), None).await;
+
+ // Publish "false" again but with new timestamp - as it is not an update we shall not get anything
+
+ let timestamp = std::time::SystemTime::now();
+ publish_value(&broker, entry_id, Some(false), timestamp.into()).await;
+
+ // Publish None as value, equals reset
+ publish_value(&broker, entry_id, None, None).await;
+
+ // Publish "true" as value
+
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ if let Ok(stream) = result {
+ // Process the stream by iterating over the items
+ let mut stream = stream.into_inner();
+
+ let mut item_count = 0;
+ while let Some(item) = stream.next().await {
+ match item_count {
+ 0 => {
+ check_stream_next(&item, if has_value { Some(false) } else { None }).await;
+ }
+ 1 => {
+ check_stream_next(&item, Some(true)).await;
+ }
+ 2 => {
+ // As long as value stays as false we do not get anything new, so prepare for None
+ check_stream_next(&item, Some(false)).await;
+ }
+ 3 => {
+ check_stream_next(&item, None).await;
+ }
+ 4 => {
+ check_stream_next(&item, Some(true)).await;
+ // And we do not expect more
+ break;
+ }
+ _ => panic!(
+ "You shouldn't land here too many items reported back to the stream."
+ ),
+ }
+ item_count += 1;
+ }
+ // Make sure stream is not closed in advance
+ assert_eq!(item_count, 4);
+ } else {
+ panic!("Something went wrong while getting the stream.")
+ }
+ }
+
+ /*
+ Test subscribe service method by id
+ */
+ async fn test_subscribe_case_by_id(has_value: bool) {
+ async fn check_stream_next_by_id(
+ item: &Result,
+ input_value: Option,
+ signal_id: i32,
+ ) {
+ // Create Datapoint
+ let mut expected_response: HashMap = HashMap::new();
+ // We expect to get an empty response first
+ expected_response.insert(
+ signal_id,
+ proto::Datapoint {
+ timestamp: None,
+ value: match input_value {
+ Some(true) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ Some(false) => Some(proto::Value {
+ typed_value: Some(proto::value::TypedValue::Bool(false)),
+ }),
+ None => None,
+ },
+ },
+ );
+
+ match item {
+ Ok(subscribe_response) => {
+ // Process the SubscribeResponse
+ let response = &subscribe_response.entries;
+ assert_eq!(response.len(), expected_response.len());
+ for key in response.keys() {
+ match (response.get(key), expected_response.get(key)) {
+ (Some(entry1), Some(entry2)) => {
+ assert_eq!(entry1.value, entry2.value);
+ }
+ (Some(entry1), None) => {
+ panic!("Key '{}' is only in response: {:?}", key, entry1)
+ }
+ (None, Some(entry2)) => {
+ panic!("Key '{}' is only in expected_response: {:?}", key, entry2)
+ }
+ (None, None) => unreachable!(),
+ }
+ }
+ }
+ Err(err) => {
+ panic!("Error {:?}", err)
+ }
+ }
+ }
+ let broker = DataBroker::default();
+
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_string(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Some Description that Does Not Matter".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ if has_value {
+ publish_value(&broker, entry_id, Some(false), None).await
+ }
+
+ let mut request = tonic::Request::new(proto::SubscribeByIdRequest {
+ signal_ids: vec![entry_id],
+ buffer_size: 5,
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result = tokio::task::block_in_place(|| {
+ // Blocking operation here
+ // Since broker.subscribe is async, you need to run it in an executor
+ let rt = tokio::runtime::Runtime::new().unwrap();
+ rt.block_on(broker.subscribe_by_id(request))
+ });
+
+ // Publish "true" as value
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ // Publish "false" as value
+ publish_value(&broker, entry_id, Some(false), None).await;
+
+ // Publish "false" again but with new timestamp - as it is not an update we shall not get anything
+
+ let timestamp = std::time::SystemTime::now();
+ publish_value(&broker, entry_id, Some(false), timestamp.into()).await;
+
+ // Publish None as value, equals reset
+ publish_value(&broker, entry_id, None, None).await;
+
+ // Publish "true" as value
+
+ publish_value(&broker, entry_id, Some(true), None).await;
+
+ if let Ok(stream) = result {
+ // Process the stream by iterating over the items
+ let mut stream = stream.into_inner();
+
+ let mut item_count = 0;
+ while let Some(item) = stream.next().await {
+ match item_count {
+ 0 => {
+ check_stream_next_by_id(
+ &item,
+ if has_value { Some(false) } else { None },
+ entry_id,
+ )
+ .await;
+ }
+ 1 => {
+ check_stream_next_by_id(&item, Some(true), entry_id).await;
+ }
+ 2 => {
+ // As long as value stays as false we do not get anything new, so prepare for None
+ check_stream_next_by_id(&item, Some(false), entry_id).await;
+ }
+ 3 => {
+ check_stream_next_by_id(&item, None, entry_id).await;
+ }
+ 4 => {
+ check_stream_next_by_id(&item, Some(true), entry_id).await;
+ // And we do not expect more
+ break;
+ }
+ _ => panic!(
+ "You shouldn't land here too many items reported back to the stream."
+ ),
+ }
+ item_count += 1;
+ }
+ // Make sure stream is not closed in advance
+ assert_eq!(item_count, 4);
+ } else {
+ panic!("Something went wrong while getting the stream.")
+ }
+ }
+
+ #[tokio::test(flavor = "multi_thread")]
+ async fn test_subscribe() {
+ test_subscribe_case(false).await;
+ test_subscribe_case(true).await;
+ test_subscribe_case_by_id(false).await;
+ test_subscribe_case_by_id(true).await;
+ }
+
+ /*
+ Test open_provider_stream service method
+ */
+ #[tokio::test(flavor = "multi_thread", worker_threads = 2)]
+ async fn test_open_provider_stream() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+ let request_id = 1;
+
+ let entry_id = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let request = OpenProviderStreamRequest {
+ action: Some(open_provider_stream_request::Action::PublishValuesRequest(
+ PublishValuesRequest {
+ request_id,
+ data_points: {
+ let timestamp = Some(std::time::SystemTime::now().into());
+
+ let value = proto::Value {
+ typed_value: Some(proto::value::TypedValue::String(
+ "example_value".to_string(),
+ )),
+ };
+
+ let datapoint = proto::Datapoint {
+ timestamp,
+ value: Some(value),
+ };
+
+ let mut map = HashMap::new();
+ map.insert(entry_id, datapoint);
+ map
+ },
+ },
+ )),
+ };
+
+ // Manually insert permissions
+ let mut streaming_request = tonic_mock::streaming_request(vec![request]);
+ streaming_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.open_provider_stream(streaming_request).await {
+ Ok(response) => {
+ std::thread::sleep(std::time::Duration::from_secs(3));
+ tokio::spawn(async move {
+ std::thread::sleep(std::time::Duration::from_secs(3));
+ let stream = response.into_inner();
+ let mut receiver = stream.into_inner();
+ while let Some(value) = receiver.recv().await {
+ match value {
+ Ok(value) => match value.action {
+ Some(ProvideActuationResponse(_)) => {
+ panic!("Should not happen")
+ }
+ Some(PublishValuesResponse(publish_values_response)) => {
+ assert_eq!(publish_values_response.request_id, request_id);
+ assert_eq!(publish_values_response.status.len(), 1);
+ match publish_values_response.status.get(&entry_id) {
+ Some(value) => {
+ assert_eq!(value.code, 1);
+ assert_eq!(value.message, "Wrong Type");
+ }
+ None => {
+ panic!("Should not happen")
+ }
+ }
+ }
+ Some(BatchActuateStreamRequest(_)) => {
+ panic!("Should not happen")
+ }
+ None => {
+ panic!("Should not happen")
+ }
+ },
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+ });
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_list_metadata_min_max() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Int32,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Int32(-7)), // min
+ Some(broker::types::DataValue::Int32(19)), // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut data_req = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.datapoint1".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ // Manually insert permissions
+ data_req
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, data_req)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(list_response) => {
+ let entries_size = list_response.metadata.len();
+ assert_eq!(entries_size, 1);
+
+ let min: Option = Some(Value {
+ typed_value: Some(proto::value::TypedValue::Int32(-7)),
+ });
+ let max = Some(Value {
+ typed_value: Some(proto::value::TypedValue::Int32(19)),
+ });
+
+ assert_eq!(list_response.metadata.first().unwrap().min, min);
+ assert_eq!(list_response.metadata.first().unwrap().max, max);
+ }
+ Err(_status) => panic!("failed to execute get request"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_list_metadata_using_wildcard() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "test.branch.datapoint2".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test branch datapoint 2".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut wildcard_req_two_asteriks = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.**".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ let mut wildcard_req_one_asterik = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.*".to_owned(),
+ filter: "".to_owned(),
+ });
+ // Manually insert permissions
+ wildcard_req_two_asteriks
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ wildcard_req_one_asterik
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, wildcard_req_two_asteriks)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(list_response) => {
+ let entries_size = list_response.metadata.len();
+ assert_eq!(entries_size, 2);
+ }
+ Err(_status) => panic!("failed to execute get request"),
+ }
+
+ match proto::val_server::Val::list_metadata(&broker, wildcard_req_one_asterik)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(list_response) => {
+ let entries_size = list_response.metadata.len();
+ assert_eq!(entries_size, 1);
+ }
+ Err(_status) => panic!("failed to execute get request"),
+ }
+ }
+
+ #[tokio::test]
+ async fn test_list_metadata_bad_request_pattern_or_not_found() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut wildcard_req = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test. **".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ // Manually insert permissions
+ wildcard_req
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, wildcard_req)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(_) => {
+ panic!("We shall not succeed with a blank before *")
+ }
+ Err(error) => {
+ assert_eq!(
+ error.code(),
+ tonic::Code::InvalidArgument,
+ "unexpected error code"
+ );
+ assert_eq!(
+ error.message(),
+ "Invalid Pattern Argument",
+ "unexpected error reason"
+ );
+ }
+ }
+
+ let mut not_found_req = tonic::Request::new(proto::ListMetadataRequest {
+ root: "test.notfound".to_owned(),
+ filter: "".to_owned(),
+ });
+
+ // Manually insert permissions
+ not_found_req
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::list_metadata(&broker, not_found_req)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(_) => {
+ panic!("Success not expected!")
+ }
+ Err(error) => {
+ assert_eq!(error.code(), tonic::Code::NotFound, "unexpected error code");
+ assert_eq!(
+ error.message(),
+ "Specified root branch does not exist",
+ "unexpected error reason"
+ );
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_actuate_out_of_range() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ Some(broker::types::DataValue::Uint32(0)), // min
+ Some(broker::types::DataValue::Uint32(100)), // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id = authorized_access
+ .get_id_by_path("Vehicle.Cabin.Infotainment.Navigation.Volume")
+ .await
+ .expect(
+ "Resolving the id of Vehicle.Cabin.Infotainment.Navigation.Volume should succeed",
+ );
+ let vss_ids = vec![vss_id];
+
+ let (sender, _) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(200)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::InvalidArgument
+ )
+ }
+
+ #[tokio::test]
+ async fn test_actuate_signal_not_found() {
+ let broker = DataBroker::default();
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Non.Existing".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(result_response.unwrap_err().code(), tonic::Code::NotFound)
+ }
+
+ #[tokio::test]
+ async fn test_actuate_can_provider_unavailable() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::Unavailable
+ )
+ }
+
+ #[tokio::test]
+ async fn test_actuate_success() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+ let vss_ids = vec![vss_id];
+
+ let (sender, mut receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::actuate(&broker, request).await;
+ assert!(result_response.is_ok());
+
+ let result_response = receiver.recv().await.expect("Option should be Some");
+ result_response.expect("Result should be Ok");
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_out_of_range() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint 'Vehicle.ADAS.ABS.IsEnabled' should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register 'Vehicle.ADAS.CruiseControl.IsActive' datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ Some(broker::types::DataValue::Uint32(0)), // min
+ Some(broker::types::DataValue::Uint32(100)), // max
+ None,
+ None,
+ )
+ .await
+ .expect(
+ "Register datapoint 'Vehicle.Cabin.Infotainment.Navigation.Volume' should succeed",
+ );
+
+ let vss_id_abs = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+ let vss_id_cruise_control = authorized_access
+ .get_id_by_path("Vehicle.ADAS.CruiseControl.IsActive")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.CruiseControl.IsActive should succeed");
+ let vss_id_navigation_volume = authorized_access
+ .get_id_by_path("Vehicle.Cabin.Infotainment.Navigation.Volume")
+ .await
+ .expect(
+ "Resolving the id of Vehicle.Cabin.Infotainment.Navigation.Volume should succeed",
+ );
+
+ let vss_ids = vec![vss_id_abs, vss_id_cruise_control, vss_id_navigation_volume];
+
+ let (sender, _receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Infotainment.Navigation.Volume".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Uint32(200)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::InvalidArgument
+ )
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_signal_not_found() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Non.Existing".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(result_response.unwrap_err().code(), tonic::Code::NotFound)
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_provider_unavailable() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id_abs = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+
+ let vss_ids = vec![vss_id_abs];
+
+ let (sender, _receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_err());
+ assert_eq!(
+ result_response.unwrap_err().code(),
+ tonic::Code::Unavailable
+ )
+ }
+
+ #[tokio::test]
+ async fn test_batch_actuate_success() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let vss_id_abs = authorized_access
+ .get_id_by_path("Vehicle.ADAS.ABS.IsEnabled")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.ABS.IsEnabled should succeed");
+ let vss_id_cruise_control = authorized_access
+ .get_id_by_path("Vehicle.ADAS.CruiseControl.IsActive")
+ .await
+ .expect("Resolving the id of Vehicle.ADAS.CruiseControl.IsActive should succeed");
+
+ let vss_ids = vec![vss_id_abs, vss_id_cruise_control];
+
+ let (sender, mut receiver) = mpsc::channel(10);
+ let actuation_provider = Provider { sender };
+ authorized_access
+ .provide_actuation(vss_ids, Box::new(actuation_provider))
+ .await
+ .expect("Registering a new Actuation Provider should succeed");
+
+ let mut request = tonic::Request::new(BatchActuateRequest {
+ actuate_requests: vec![
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ActuateRequest {
+ signal_id: Some(SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.CruiseControl.IsActive".to_string(),
+ )),
+ }),
+ value: Some(Value {
+ typed_value: Some(proto::value::TypedValue::Bool(true)),
+ }),
+ },
+ ],
+ });
+
+ request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ let result_response = proto::val_server::Val::batch_actuate(&broker, request).await;
+ assert!(result_response.is_ok());
+
+ let result_response = receiver.recv().await.expect("Option should be Some");
+ result_response.expect("Result should be Ok");
+ }
+
+ #[tokio::test]
+ async fn test_provide_actuation_signal_not_found() {
+ let broker = DataBroker::default();
+
+ let request = OpenProviderStreamRequest {
+ action: Some(
+ open_provider_stream_request::Action::ProvideActuationRequest(
+ proto::ProvideActuationRequest {
+ actuator_identifiers: vec![SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.Cabin.Non.Existing".to_string(),
+ )),
+ }],
+ },
+ ),
+ ),
+ };
+
+ let mut streaming_request = tonic_mock::streaming_request(vec![request]);
+ streaming_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::open_provider_stream(&broker, streaming_request).await {
+ Ok(response) => {
+ let stream = response.into_inner();
+ let mut receiver = stream.into_inner();
+ let result_response = receiver
+ .recv()
+ .await
+ .expect("result_response should be Some");
+ assert!(result_response.is_err());
+ assert_eq!(result_response.unwrap_err().code(), tonic::Code::NotFound)
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_provide_actuation_success() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ authorized_access
+ .add_entry(
+ "Vehicle.ADAS.ABS.IsEnabled".to_owned(),
+ broker::DataType::Bool,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Actuator,
+ "Some funny description".to_owned(),
+ None, // min
+ None, // max
+ None,
+ None,
+ )
+ .await
+ .expect("Register datapoint should succeed");
+
+ let request = OpenProviderStreamRequest {
+ action: Some(
+ open_provider_stream_request::Action::ProvideActuationRequest(
+ proto::ProvideActuationRequest {
+ actuator_identifiers: vec![SignalId {
+ signal: Some(proto::signal_id::Signal::Path(
+ "Vehicle.ADAS.ABS.IsEnabled".to_string(),
+ )),
+ }],
+ },
+ ),
+ ),
+ };
+
+ let mut streaming_request = tonic_mock::streaming_request(vec![request]);
+ streaming_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match proto::val_server::Val::open_provider_stream(&broker, streaming_request).await {
+ Ok(response) => {
+ let stream = response.into_inner();
+ let mut receiver = stream.into_inner();
+ let result_response = receiver
+ .recv()
+ .await
+ .expect("result_response should be Some");
+
+ assert!(result_response.is_ok())
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+
+ #[tokio::test]
+ async fn test_get_server_info() {
+ let version = "1.1.1";
+ let commit_hash = "3a3c332f5427f2db7a0b8582262c9f5089036c23";
+ let broker = DataBroker::new(version, commit_hash);
+
+ let request = tonic::Request::new(proto::GetServerInfoRequest {});
+
+ match proto::val_server::Val::get_server_info(&broker, request)
+ .await
+ .map(|res| res.into_inner())
+ {
+ Ok(response) => {
+ assert_eq!(response.name, "databroker");
+ assert_eq!(response.version, version);
+ assert_eq!(response.commit_hash, commit_hash);
+ }
+ Err(_) => {
+ panic!("Should not happen")
+ }
+ }
+ }
+}
diff --git a/databroker/src/grpc/mod.rs b/databroker/src/grpc/mod.rs
index c4c86d4a..a7a15a2b 100644
--- a/databroker/src/grpc/mod.rs
+++ b/databroker/src/grpc/mod.rs
@@ -14,4 +14,5 @@
pub mod server;
mod kuksa_val_v1;
+mod kuksa_val_v2;
mod sdv_databroker_v1;
diff --git a/databroker/src/grpc/sdv_databroker_v1/broker.rs b/databroker/src/grpc/sdv_databroker_v1/broker.rs
index 532ae1f1..32669fd0 100644
--- a/databroker/src/grpc/sdv_databroker_v1/broker.rs
+++ b/databroker/src/grpc/sdv_databroker_v1/broker.rs
@@ -131,6 +131,8 @@ impl proto::broker_server::Broker for broker::DataBroker {
data_type: None,
description: None,
allowed: None,
+ max: None,
+ min: None,
unit: None,
},
));
diff --git a/databroker/src/grpc/sdv_databroker_v1/collector.rs b/databroker/src/grpc/sdv_databroker_v1/collector.rs
index 4bec1701..963ab632 100644
--- a/databroker/src/grpc/sdv_databroker_v1/collector.rs
+++ b/databroker/src/grpc/sdv_databroker_v1/collector.rs
@@ -60,6 +60,8 @@ impl proto::collector_server::Collector for broker::DataBroker {
data_type: None,
description: None,
allowed: None,
+ max: None,
+ min: None,
unit: None,
},
)
@@ -129,6 +131,8 @@ impl proto::collector_server::Collector for broker::DataBroker {
data_type: None,
description: None,
allowed: None,
+ max: None,
+ min: None,
unit: None,
}
)
@@ -207,6 +211,8 @@ impl proto::collector_server::Collector for broker::DataBroker {
broker::ChangeType::from(&change_type),
broker::types::EntryType::Sensor,
metadata.description,
+ None, // min
+ None, // max
None,
None,
)
@@ -264,3 +270,85 @@ impl proto::collector_server::Collector for broker::DataBroker {
}
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{broker::DataBroker, permissions};
+ use proto::collector_server::Collector;
+
+ #[tokio::test]
+ async fn test_publish_value_min_max_not_fulfilled() {
+ let broker = DataBroker::default();
+ let authorized_access = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ let entry_id_1 = authorized_access
+ .add_entry(
+ "test.datapoint1".to_owned(),
+ broker::DataType::Uint8,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Uint32(3)), // min
+ Some(broker::types::DataValue::Uint32(26)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let entry_id_2 = authorized_access
+ .add_entry(
+ "test.datapoint1.Speed".to_owned(),
+ broker::DataType::Float,
+ broker::ChangeType::OnChange,
+ broker::EntryType::Sensor,
+ "Test datapoint 1".to_owned(),
+ Some(broker::types::DataValue::Float(1.0)), // min
+ Some(broker::types::DataValue::Float(100.0)), // max
+ None,
+ None,
+ )
+ .await
+ .unwrap();
+
+ let datapoint: proto::Datapoint = proto::Datapoint {
+ timestamp: None,
+ value: Some(proto::datapoint::Value::Int32Value(50)),
+ };
+
+ let mut datapoints = HashMap::new();
+ datapoints.insert(entry_id_1, datapoint.clone());
+ datapoints.insert(entry_id_2, datapoint);
+
+ let request = proto::UpdateDatapointsRequest { datapoints };
+
+ // Manually insert permissions
+ let mut publish_value_request = tonic::Request::new(request);
+ publish_value_request
+ .extensions_mut()
+ .insert(permissions::ALLOW_ALL.clone());
+
+ match broker.update_datapoints(publish_value_request).await {
+ Ok(response) => {
+ let response = response.into_inner();
+ assert_eq!(response.errors.len(), 2);
+
+ let error_entry_1 = response.errors.get(&entry_id_1);
+ assert_eq!(
+ error_entry_1.unwrap().clone(),
+ proto::DatapointError::OutOfBounds as i32
+ );
+
+ let error_entry_2 = response.errors.get(&entry_id_2);
+ assert_eq!(
+ error_entry_2.unwrap().clone(),
+ proto::DatapointError::InvalidType as i32
+ );
+ }
+ Err(_) => {
+ panic!("Should not happen!");
+ }
+ }
+ }
+}
diff --git a/databroker/src/grpc/sdv_databroker_v1/conversions.rs b/databroker/src/grpc/sdv_databroker_v1/conversions.rs
index d52600e6..2262b0d0 100644
--- a/databroker/src/grpc/sdv_databroker_v1/conversions.rs
+++ b/databroker/src/grpc/sdv_databroker_v1/conversions.rs
@@ -16,6 +16,7 @@ use databroker_proto::sdv::databroker::v1 as proto;
use prost_types::Timestamp;
use std::convert::TryInto;
use std::time::SystemTime;
+use tracing::debug;
use crate::broker;
@@ -302,6 +303,91 @@ impl From<&proto::ChangeType> for broker::ChangeType {
}
}
+fn transform_allowed(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ broker::DataValue::StringArray(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::StringValues(proto::StringArray {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Int32Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Int32Values(proto::Int32Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Int64Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Int64Values(proto::Int64Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Uint32Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Uint32Values(proto::Uint32Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::Uint64Array(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::Uint64Values(proto::Uint64Array {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::FloatArray(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::FloatValues(proto::FloatArray {
+ values: array.clone(),
+ })),
+ }),
+ broker::DataValue::DoubleArray(array) => Some(proto::Allowed {
+ values: Some(proto::allowed::Values::DoubleValues(proto::DoubleArray {
+ values: array.clone(),
+ })),
+ }),
+ _ => {
+ debug!("Wrong datatype used for allowed values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
+fn transform_min_max(value: &Option) -> Option {
+ match value {
+ Some(value) => match value {
+ broker::DataValue::String(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::String(
+ value.to_owned(),
+ )),
+ }),
+ broker::DataValue::Bool(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Bool(*value)),
+ }),
+ broker::DataValue::Int32(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Int32(*value)),
+ }),
+ broker::DataValue::Int64(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Int64(*value)),
+ }),
+ broker::DataValue::Uint32(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Uint32(*value)),
+ }),
+ broker::DataValue::Uint64(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Uint64(*value)),
+ }),
+ broker::DataValue::Float(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Float(*value)),
+ }),
+ broker::DataValue::Double(value) => Some(proto::ValueRestriction {
+ typed_value: Some(proto::value_restriction::TypedValue::Double(*value)),
+ }),
+ _ => {
+ debug!("Wrong datatype used for min/max values");
+ None
+ }
+ },
+ None => None,
+ }
+}
+
impl From<&broker::Metadata> for proto::Metadata {
fn from(metadata: &broker::Metadata) -> Self {
proto::Metadata {
@@ -311,54 +397,9 @@ impl From<&broker::Metadata> for proto::Metadata {
data_type: proto::DataType::from(&metadata.data_type) as i32,
change_type: proto::ChangeType::Continuous as i32, // TODO: Add to metadata
description: metadata.description.to_owned(),
- allowed: match metadata.allowed.as_ref() {
- Some(broker::DataValue::StringArray(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::StringValues(proto::StringArray {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Int32Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Int32Values(proto::Int32Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Int64Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Int64Values(proto::Int64Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Uint32Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Uint32Values(proto::Uint32Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::Uint64Array(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::Uint64Values(proto::Uint64Array {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::FloatArray(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::FloatValues(proto::FloatArray {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::DoubleArray(vec)) => Some(proto::Allowed {
- values: Some(proto::allowed::Values::DoubleValues(proto::DoubleArray {
- values: vec.clone(),
- })),
- }),
- Some(broker::DataValue::BoolArray(_))
- | Some(broker::DataValue::NotAvailable)
- | Some(broker::DataValue::Bool(_))
- | Some(broker::DataValue::String(_))
- | Some(broker::DataValue::Int32(_))
- | Some(broker::DataValue::Int64(_))
- | Some(broker::DataValue::Uint32(_))
- | Some(broker::DataValue::Uint64(_))
- | Some(broker::DataValue::Float(_))
- | Some(broker::DataValue::Double(_))
- | None => None,
- },
+ allowed: transform_allowed(&metadata.allowed),
+ min: transform_min_max(&metadata.min),
+ max: transform_min_max(&metadata.max),
}
}
}
@@ -370,7 +411,9 @@ impl From<&broker::UpdateError> for proto::DatapointError {
broker::UpdateError::WrongType | broker::UpdateError::UnsupportedType => {
proto::DatapointError::InvalidType
}
- broker::UpdateError::OutOfBounds => proto::DatapointError::OutOfBounds,
+ broker::UpdateError::OutOfBoundsAllowed => proto::DatapointError::OutOfBounds,
+ broker::UpdateError::OutOfBoundsMinMax => proto::DatapointError::OutOfBounds,
+ broker::UpdateError::OutOfBoundsType => proto::DatapointError::OutOfBounds,
broker::UpdateError::PermissionDenied => proto::DatapointError::AccessDenied,
broker::UpdateError::PermissionExpired => proto::DatapointError::AccessDenied,
}
diff --git a/databroker/src/grpc/server.rs b/databroker/src/grpc/server.rs
index 8bc282ca..c4b95753 100644
--- a/databroker/src/grpc/server.rs
+++ b/databroker/src/grpc/server.rs
@@ -13,11 +13,15 @@
use std::{convert::TryFrom, future::Future, time::Duration};
-use tokio::net::TcpListener;
-use tokio_stream::wrappers::TcpListenerStream;
-use tonic::transport::Server;
+use futures::Stream;
+use tokio::{
+ io::{AsyncRead, AsyncWrite},
+ net::{TcpListener, UnixListener},
+};
+use tokio_stream::wrappers::{TcpListenerStream, UnixListenerStream};
#[cfg(feature = "tls")]
use tonic::transport::ServerTlsConfig;
+use tonic::transport::{server::Connected, Server};
use tracing::{debug, info};
use databroker_proto::{kuksa, sdv};
@@ -34,9 +38,10 @@ pub enum ServerTLS {
Enabled { tls_config: ServerTlsConfig },
}
-#[derive(PartialEq)]
+#[derive(PartialEq, Clone)]
pub enum Api {
KuksaValV1,
+ KuksaValV2,
SdvDatabrokerV1,
}
@@ -95,7 +100,7 @@ where
databroker.shutdown().await;
}
-pub async fn serve(
+pub async fn serve_tcp(
addr: impl Into,
broker: broker::DataBroker,
#[cfg(feature = "tls")] server_tls: ServerTLS,
@@ -109,25 +114,14 @@ where
let socket_addr = addr.into();
let listener = TcpListener::bind(socket_addr).await?;
- /* On Linux systems try to notify daemon readiness to systemd.
- * This function determines whether the a system is using systemd
- * or not, so it is safe to use on non-systemd systems as well.
- */
- #[cfg(target_os = "linux")]
- {
- match sd_notify::booted() {
- Ok(true) => {
- info!("Notifying systemd that the service is ready");
- sd_notify::notify(false, &[sd_notify::NotifyState::Ready])?;
- }
- _ => {
- debug!("System is not using systemd, will not try to notify");
- }
- }
+ if let Ok(addr) = listener.local_addr() {
+ info!("Listening on {}", addr);
}
+ let incoming = TcpListenerStream::new(listener);
+
serve_with_incoming_shutdown(
- listener,
+ incoming,
broker,
#[cfg(feature = "tls")]
server_tls,
@@ -138,10 +132,9 @@ where
.await
}
-pub async fn serve_with_incoming_shutdown(
- listener: TcpListener,
+pub async fn serve_uds(
+ path: impl AsRef,
broker: broker::DataBroker,
- #[cfg(feature = "tls")] server_tls: ServerTLS,
apis: &[Api],
authorization: Authorization,
signal: F,
@@ -149,12 +142,45 @@ pub async fn serve_with_incoming_shutdown(
where
F: Future,
{
- broker.start_housekeeping_task();
+ let listener = UnixListener::bind(path)?;
+
if let Ok(addr) = listener.local_addr() {
- info!("Listening on {}", addr);
+ match addr.as_pathname() {
+ Some(pathname) => info!("Listening on unix socket at {}", pathname.display()),
+ None => info!("Listening on unix socket (unknown path)"),
+ }
}
- let incoming = TcpListenerStream::new(listener);
+ let incoming = UnixListenerStream::new(listener);
+
+ serve_with_incoming_shutdown(
+ incoming,
+ broker,
+ ServerTLS::Disabled,
+ apis,
+ authorization,
+ signal,
+ )
+ .await
+}
+
+pub async fn serve_with_incoming_shutdown(
+ incoming: I,
+ broker: broker::DataBroker,
+ #[cfg(feature = "tls")] server_tls: ServerTLS,
+ apis: &[Api],
+ authorization: Authorization,
+ signal: F,
+) -> Result<(), Box>
+where
+ F: Future,
+ I: Stream- >,
+ IO: AsyncRead + AsyncWrite + Connected + Unpin + Send + 'static,
+ IO::ConnectInfo: Clone + Send + Sync + 'static,
+ IE: Into
>,
+{
+ broker.start_housekeeping_task();
+
let mut server = Server::builder()
.http2_keepalive_interval(Some(Duration::from_secs(10)))
.http2_keepalive_timeout(Some(Duration::from_secs(20)));
@@ -187,6 +213,20 @@ where
let mut router = server.add_optional_service(kuksa_val_v1);
+ if apis.contains(&Api::KuksaValV2) {
+ let service = tonic_reflection::server::Builder::configure()
+ .register_encoded_file_descriptor_set(kuksa::val::v2::FILE_DESCRIPTOR_SET)
+ .build()
+ .unwrap();
+
+ router = router.add_service(service).add_optional_service(Some(
+ kuksa::val::v2::val_server::ValServer::with_interceptor(
+ broker.clone(),
+ authorization.clone(),
+ ),
+ ));
+ }
+
if apis.contains(&Api::SdvDatabrokerV1) {
router = router.add_optional_service(Some(
sdv::databroker::v1::broker_server::BrokerServer::with_interceptor(
diff --git a/databroker/src/main.rs b/databroker/src/main.rs
index 1281241d..bc6d5007 100644
--- a/databroker/src/main.rs
+++ b/databroker/src/main.rs
@@ -15,12 +15,19 @@
#[global_allocator]
static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
+static DEFAULT_UNIX_SOCKET_PATH: &str = "/run/kuksa/databroker.sock";
+
+use std::io;
+use std::os::unix::fs::FileTypeExt;
+use std::path::Path;
+
use databroker::authorization::Authorization;
use databroker::broker::RegistrationError;
#[cfg(feature = "tls")]
use databroker::grpc::server::ServerTLS;
+use std::thread::available_parallelism;
use tokio::select;
use tokio::signal::unix::{signal, SignalKind};
#[cfg(feature = "tls")]
@@ -64,6 +71,8 @@ async fn add_kuksa_attribute(
description,
None,
None,
+ None,
+ None,
)
.await
{
@@ -82,6 +91,8 @@ async fn add_kuksa_attribute(
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)];
@@ -111,10 +122,11 @@ async fn read_metadata_file<'a, 'b>(
let path = filename.trim();
info!("Populating metadata from file '{}'", path);
let metadata_file = std::fs::OpenOptions::new().read(true).open(filename)?;
- let entries = vss::parse_vss_from_reader(&metadata_file)?;
+ let buffered = std::io::BufReader::new(metadata_file);
+ let entries = vss::parse_vss_from_reader(buffered)?;
for (path, entry) in entries {
- debug!("Adding VSS datapoint type {}", path);
+ debug!("Adding VSS datapoint {}", path);
match database
.add_entry(
@@ -123,6 +135,8 @@ async fn read_metadata_file<'a, 'b>(
entry.change_type,
entry.entry_type,
entry.description,
+ entry.min,
+ entry.max,
entry.allowed,
entry.unit,
)
@@ -144,6 +158,8 @@ async fn read_metadata_file<'a, 'b>(
data_type: None,
description: None,
allowed: None,
+ min: None,
+ max: None,
unit: None,
},
)];
@@ -169,9 +185,18 @@ async fn read_metadata_file<'a, 'b>(
Ok(())
}
-#[tokio::main]
-async fn main() -> Result<(), Box> {
+fn unlink_unix_domain_socket(path: impl AsRef) -> Result<(), io::Error> {
+ if let Ok(metadata) = std::fs::metadata(&path) {
+ if metadata.file_type().is_socket() {
+ std::fs::remove_file(&path)?;
+ }
+ };
+ Ok(())
+}
+
+fn main() -> Result<(), Box> {
let version = option_env!("CARGO_PKG_VERSION").unwrap_or_default();
+ let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or_default();
let about = format!(
concat!(
@@ -218,8 +243,26 @@ async fn main() -> Result<(), Box> {
.default_value("55555"),
)
.arg(
- Arg::new("vss-file")
+ Arg::new("enable-unix-socket")
+ .display_order(3)
+ .long("enable-unix-socket")
+ .help("Listen on unix socket, default /run/kuksa/databroker.sock")
+ .action(ArgAction::SetTrue)
+ .env("KUKSA_DATABROKER_ENABLE_UNIX_SOCKET")
+ )
+ .arg(
+ Arg::new("unix-socket")
.display_order(4)
+ .long("unix-socket")
+ .help("Listen on unix socket, e.g. /tmp/kuksa/databroker.sock")
+ .action(ArgAction::Set)
+ .value_name("PATH")
+ .required(false)
+ .env("KUKSA_DATABROKER_UNIX_SOCKET"),
+ )
+ .arg(
+ Arg::new("vss-file")
+ .display_order(5)
.alias("metadata")
.long("vss")
.help("Populate data broker with VSS metadata from (comma-separated) list of files")
@@ -232,7 +275,7 @@ async fn main() -> Result<(), Box> {
)
.arg(
Arg::new("jwt-public-key")
- .display_order(5)
+ .display_order(6)
.long("jwt-public-key")
.help("Public key used to verify JWT access tokens")
.action(ArgAction::Set)
@@ -241,17 +284,27 @@ async fn main() -> Result<(), Box> {
)
.arg(
Arg::new("disable-authorization")
- .display_order(6)
+ .display_order(7)
.long("disable-authorization")
.help("Disable authorization")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("enable-databroker-v1")
- .display_order(30)
+ .display_order(33)
.long("enable-databroker-v1")
.help("Enable sdv.databroker.v1 (GRPC) service")
.action(ArgAction::SetTrue),
+ )
+ .arg(
+ Arg::new("worker-threads")
+ .display_order(34)
+ .long("worker-threads")
+ .help("How many worker threads will be spawned by the tokio runtime. Default is as many cores are detected on the system")
+ .value_name("WORKER_THREADS")
+ .required(false)
+ .env("KUKSA_WORKER_THREADS")
+ .value_parser(clap::value_parser!(usize))
);
#[cfg(feature = "tls")]
@@ -320,152 +373,215 @@ async fn main() -> Result<(), Box> {
let args = parser.get_matches();
- // install global collector configured based on RUST_LOG env var.
- databroker::init_logging();
-
- info!("Starting Kuksa Databroker {}", version);
-
- let ip_addr = args.get_one::("address").unwrap().parse()?;
- let port = args
- .get_one::("port")
- .expect("port should be a number");
- let addr = std::net::SocketAddr::new(ip_addr, *port);
-
- let broker = broker::DataBroker::new(version);
- let database = broker.authorized_access(&permissions::ALLOW_ALL);
-
- add_kuksa_attribute(
- &database,
- "Kuksa.Databroker.GitVersion".to_owned(),
- option_env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT")
- .unwrap_or("N/A")
- .to_owned(),
- "Databroker version as reported by GIT".to_owned(),
- )
- .await;
-
- add_kuksa_attribute(
- &database,
- "Kuksa.Databroker.CargoVersion".to_owned(),
- option_env!("CARGO_PKG_VERSION").unwrap_or("N/A").to_owned(),
- "Databroker version as reported by GIT".to_owned(),
- )
- .await;
-
- add_kuksa_attribute(
- &database,
- "Kuksa.Databroker.CommitSha".to_owned(),
- option_env!("VERGEN_GIT_SHA").unwrap_or("N/A").to_owned(),
- "Commit SHA of current version".to_owned(),
- )
- .await;
-
- if let Some(metadata_filenames) = args.get_many::("vss-file") {
- for filename in metadata_filenames {
- read_metadata_file(&database, filename).await?;
+ let cores = available_parallelism().unwrap().get();
+ let worker_threads: &usize = args.get_one::("worker-threads").unwrap_or(&cores);
+
+ let runtime = tokio::runtime::Builder::new_multi_thread()
+ .worker_threads(*worker_threads)
+ .enable_all()
+ .build()
+ .unwrap();
+
+ runtime.block_on(async {
+ // install global collector configured based on RUST_LOG env var.
+ databroker::init_logging();
+
+ info!("Starting Kuksa Databroker {}", version);
+ info!(
+ "Using {} threads with {} cores available on the system",
+ worker_threads, cores
+ );
+
+ let ip_addr = args.get_one::("address").unwrap().parse()?;
+ let port = args
+ .get_one::("port")
+ .expect("port should be a number");
+ let addr = std::net::SocketAddr::new(ip_addr, *port);
+
+ let broker = broker::DataBroker::new(version, commit_sha);
+ let database = broker.authorized_access(&permissions::ALLOW_ALL);
+
+ add_kuksa_attribute(
+ &database,
+ "Kuksa.Databroker.GitVersion".to_owned(),
+ option_env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT")
+ .unwrap_or("N/A")
+ .to_owned(),
+ "Databroker version as reported by GIT".to_owned(),
+ )
+ .await;
+
+ add_kuksa_attribute(
+ &database,
+ "Kuksa.Databroker.CargoVersion".to_owned(),
+ option_env!("CARGO_PKG_VERSION").unwrap_or("N/A").to_owned(),
+ "Databroker version as reported by GIT".to_owned(),
+ )
+ .await;
+
+ add_kuksa_attribute(
+ &database,
+ "Kuksa.Databroker.CommitSha".to_owned(),
+ option_env!("VERGEN_GIT_SHA").unwrap_or("N/A").to_owned(),
+ "Commit SHA of current version".to_owned(),
+ )
+ .await;
+
+ if let Some(metadata_filenames) = args.get_many::("vss-file") {
+ for filename in metadata_filenames {
+ read_metadata_file(&database, filename).await?;
+ }
}
- }
- #[cfg(feature = "tls")]
- let tls_config = if args.get_flag("insecure") {
- ServerTLS::Disabled
- } else {
- let cert_file = args.get_one::("tls-cert");
- let key_file = args.get_one::("tls-private-key");
- match (cert_file, key_file) {
- (Some(cert_file), Some(key_file)) => {
- let cert = std::fs::read(cert_file)?;
- let key = std::fs::read(key_file)?;
- let identity = tonic::transport::Identity::from_pem(cert, key);
- ServerTLS::Enabled {
- tls_config: tonic::transport::ServerTlsConfig::new().identity(identity),
+ #[cfg(feature = "tls")]
+ let tls_config = if args.get_flag("insecure") {
+ ServerTLS::Disabled
+ } else {
+ let cert_file = args.get_one::("tls-cert");
+ let key_file = args.get_one::("tls-private-key");
+ match (cert_file, key_file) {
+ (Some(cert_file), Some(key_file)) => {
+ let cert = std::fs::read(cert_file)?;
+ let key = std::fs::read(key_file)?;
+ let identity = tonic::transport::Identity::from_pem(cert, key);
+ ServerTLS::Enabled {
+ tls_config: tonic::transport::ServerTlsConfig::new().identity(identity),
+ }
+ }
+ (Some(_), None) => {
+ return Err(
+ "TLS private key (--tls-private-key) must be set if --tls-cert is.".into(),
+ );
+ }
+ (None, Some(_)) => {
+ return Err(
+ "TLS certificate (--tls-cert) must be set if --tls-private-key is.".into(),
+ );
+ }
+ (None, None) => {
+ warn!(
+ "TLS is not enabled. Default behavior of accepting insecure connections \
+ when TLS is not configured may change in the future! \
+ Please use --insecure to explicitly enable this behavior."
+ );
+ ServerTLS::Disabled
}
}
- (Some(_), None) => {
- return Err(
- "TLS private key (--tls-private-key) must be set if --tls-cert is.".into(),
- );
- }
- (None, Some(_)) => {
- return Err(
- "TLS certificate (--tls-cert) must be set if --tls-private-key is.".into(),
- );
- }
- (None, None) => {
- warn!(
- "TLS is not enabled. Default behavior of accepting insecure connections \
- when TLS is not configured may change in the future! \
- Please use --insecure to explicitly enable this behavior."
- );
- ServerTLS::Disabled
- }
- }
- };
+ };
- let enable_authorization = !args.get_flag("disable-authorization");
- let jwt_public_key = match args.get_one::("jwt-public-key") {
- Some(pub_key_filename) => match std::fs::read_to_string(pub_key_filename) {
- Ok(pub_key) => {
- info!("Using '{pub_key_filename}' to authenticate access tokens");
- Ok(Some(pub_key))
+ let enable_authorization = !args.get_flag("disable-authorization");
+ let jwt_public_key = match args.get_one::("jwt-public-key") {
+ Some(pub_key_filename) => match std::fs::read_to_string(pub_key_filename) {
+ Ok(pub_key) => {
+ info!("Using '{pub_key_filename}' to authenticate access tokens");
+ Ok(Some(pub_key))
+ }
+ Err(err) => {
+ error!("Failed to open file {:?}: {}", pub_key_filename, err);
+ Err(err)
+ }
+ },
+ None => Ok(None),
+ }?;
+
+ let authorization = match (enable_authorization, jwt_public_key) {
+ (true, Some(pub_key)) => Authorization::new(pub_key)?,
+ (true, None) => {
+ warn!("Authorization is not enabled.");
+ Authorization::Disabled
}
- Err(err) => {
- error!("Failed to open file {:?}: {}", pub_key_filename, err);
- Err(err)
+ (false, _) => Authorization::Disabled,
+ };
+
+ #[cfg(feature = "viss")]
+ {
+ let viss_bind_addr = if args.contains_id("viss-address") {
+ args.get_one::("viss-address").unwrap().parse()?
+ } else {
+ args.get_one::("address").unwrap().parse()?
+ };
+
+ let viss_port = args
+ .get_one::("viss-port")
+ .expect("port should be a number");
+ let viss_addr = std::net::SocketAddr::new(viss_bind_addr, *viss_port);
+
+ if args.get_flag("enable-viss") {
+ let broker = broker.clone();
+ let authorization = authorization.clone();
+ tokio::spawn(async move {
+ if let Err(err) = viss::server::serve(viss_addr, broker, authorization).await {
+ error!("{err}");
+ }
+ });
}
- },
- None => Ok(None),
- }?;
-
- let authorization = match (enable_authorization, jwt_public_key) {
- (true, Some(pub_key)) => Authorization::new(pub_key)?,
- (true, None) => {
- warn!("Authorization is not enabled.");
- Authorization::Disabled
}
- (false, _) => Authorization::Disabled,
- };
- #[cfg(feature = "viss")]
- {
- let viss_bind_addr = if args.contains_id("viss-address") {
- args.get_one::("viss-address").unwrap().parse()?
- } else {
- args.get_one::("address").unwrap().parse()?
- };
+ let mut apis = vec![grpc::server::Api::KuksaValV1, grpc::server::Api::KuksaValV2];
- let viss_port = args
- .get_one::("viss-port")
- .expect("port should be a number");
- let viss_addr = std::net::SocketAddr::new(viss_bind_addr, *viss_port);
+ if args.get_flag("enable-databroker-v1") {
+ apis.push(grpc::server::Api::SdvDatabrokerV1);
+ }
- if args.get_flag("enable-viss") {
+ let unix_socket_path = args.get_one::("unix-socket").cloned().or_else(|| {
+ // If the --unix-socket PATH is not explicitly set, check whether it
+ // should be enabled using the default path
+ if args.get_flag("enable-unix-socket") {
+ Some(DEFAULT_UNIX_SOCKET_PATH.into())
+ } else {
+ None
+ }
+ });
+
+ if let Some(path) = unix_socket_path {
+ // We cannot assume that the socket was closed down properly
+ // so unlink before we recreate it.
+ unlink_unix_domain_socket(&path)?;
+ std::fs::create_dir_all(Path::new(&path).parent().unwrap())?;
let broker = broker.clone();
let authorization = authorization.clone();
+ let apis = apis.clone();
tokio::spawn(async move {
- if let Err(err) = viss::server::serve(viss_addr, broker, authorization).await {
+ if let Err(err) =
+ grpc::server::serve_uds(&path, broker, &apis, authorization, shutdown_handler())
+ .await
+ {
error!("{err}");
}
+
+ info!("Unlinking unix domain socket at {}", path);
+ unlink_unix_domain_socket(path)
+ .unwrap_or_else(|_| error!("Failed to unlink unix domain socket"));
});
}
- }
-
- let mut apis = vec![grpc::server::Api::KuksaValV1];
- if args.get_flag("enable-databroker-v1") {
- apis.push(grpc::server::Api::SdvDatabrokerV1);
- }
+ // On Linux systems try to notify daemon readiness to systemd.
+ // This function determines whether the a system is using systemd
+ // or not, so it is safe to use on non-systemd systems as well.
+ #[cfg(target_os = "linux")]
+ {
+ match sd_notify::booted() {
+ Ok(true) => {
+ info!("Notifying systemd that the service is ready");
+ sd_notify::notify(false, &[sd_notify::NotifyState::Ready])?;
+ }
+ _ => {
+ debug!("System is not using systemd, will not try to notify");
+ }
+ }
+ }
- grpc::server::serve(
- addr,
- broker,
- #[cfg(feature = "tls")]
- tls_config,
- &apis,
- authorization,
- shutdown_handler(),
- )
- .await?;
+ grpc::server::serve_tcp(
+ addr,
+ broker,
+ #[cfg(feature = "tls")]
+ tls_config,
+ &apis,
+ authorization,
+ shutdown_handler(),
+ )
+ .await
+ })?;
Ok(())
}
diff --git a/databroker/src/permissions.rs b/databroker/src/permissions.rs
index f301c1af..f9b0f5d3 100644
--- a/databroker/src/permissions.rs
+++ b/databroker/src/permissions.rs
@@ -165,7 +165,9 @@ impl Permissions {
}
pub fn can_read(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.read.is_match(path) {
return Ok(());
@@ -188,7 +190,9 @@ impl Permissions {
#[cfg_attr(feature="otel", tracing::instrument(name="permissions_can_write_actuator_target", skip(self, path), fields(timestamp=chrono::Utc::now().to_string())))]
pub fn can_write_actuator_target(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.actuate.is_match(path) {
return Ok(());
@@ -198,7 +202,9 @@ impl Permissions {
#[cfg_attr(feature="otel", tracing::instrument(name="permissions_can_write_datapoint", skip(self, path), fields(timestamp=chrono::Utc::now().to_string())))]
pub fn can_write_datapoint(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.provide.is_match(path) {
return Ok(());
@@ -207,7 +213,9 @@ impl Permissions {
}
pub fn can_create(&self, path: &str) -> Result<(), PermissionError> {
- self.expired()?;
+ if self.is_expired() {
+ return Err(PermissionError::Expired);
+ }
if self.create.is_match(path) {
return Ok(());
@@ -217,13 +225,13 @@ impl Permissions {
#[cfg_attr(feature="otel", tracing::instrument(name="permissions_expired", skip(self), fields(timestamp=chrono::Utc::now().to_string())))]
#[inline]
- pub fn expired(&self) -> Result<(), PermissionError> {
+ pub fn is_expired(&self) -> bool {
if let Some(expires_at) = self.expires_at {
if expires_at < SystemTime::now() {
- return Err(PermissionError::Expired);
+ return true;
}
}
- Ok(())
+ false
}
}
diff --git a/databroker/src/types.rs b/databroker/src/types.rs
index 6d9241fd..c176d410 100644
--- a/databroker/src/types.rs
+++ b/databroker/src/types.rs
@@ -11,7 +11,7 @@
* SPDX-License-Identifier: Apache-2.0
********************************************************************************/
-use std::convert::TryFrom;
+use std::{convert::TryFrom, fmt};
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DataType {
@@ -41,6 +41,37 @@ pub enum DataType {
DoubleArray,
}
+impl fmt::Display for DataType {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ DataType::String => write!(f, "String"),
+ DataType::Bool => write!(f, "Bool"),
+ DataType::Int8 => write!(f, "Int8"),
+ DataType::Int16 => write!(f, "Int16"),
+ DataType::Int32 => write!(f, "Int32"),
+ DataType::Int64 => write!(f, "Int64"),
+ DataType::Uint8 => write!(f, "Uint8"),
+ DataType::Uint16 => write!(f, "Uint16"),
+ DataType::Uint32 => write!(f, "Uint32"),
+ DataType::Uint64 => write!(f, "Uint64"),
+ DataType::Float => write!(f, "Float"),
+ DataType::Double => write!(f, "Double"),
+ DataType::StringArray => write!(f, "StringArray"),
+ DataType::BoolArray => write!(f, "BoolArray"),
+ DataType::Int8Array => write!(f, "Int8Array"),
+ DataType::Int16Array => write!(f, "Int16Array"),
+ DataType::Int32Array => write!(f, "Int32Array"),
+ DataType::Int64Array => write!(f, "Int64Array"),
+ DataType::Uint8Array => write!(f, "Uint8Array"),
+ DataType::Uint16Array => write!(f, "Uint16Array"),
+ DataType::Uint32Array => write!(f, "Uint32Array"),
+ DataType::Uint64Array => write!(f, "Uint64Array"),
+ DataType::FloatArray => write!(f, "FloatArray"),
+ DataType::DoubleArray => write!(f, "DoubleArray"),
+ }
+ }
+}
+
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum EntryType {
Sensor,
@@ -78,6 +109,29 @@ pub enum DataValue {
#[derive(Debug)]
pub struct CastError {}
+impl fmt::Display for DataValue {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ DataValue::NotAvailable => write!(f, "Not Available"),
+ DataValue::Bool(value) => write!(f, "{}", value),
+ DataValue::String(value) => write!(f, "{}", value),
+ DataValue::Int32(value) => write!(f, "{}", value),
+ DataValue::Int64(value) => write!(f, "{}", value),
+ DataValue::Uint32(value) => write!(f, "{}", value),
+ DataValue::Uint64(value) => write!(f, "{}", value),
+ DataValue::Float(value) => write!(f, "{}", value),
+ DataValue::Double(value) => write!(f, "{}", value),
+ DataValue::BoolArray(values) => write!(f, "{:?}", values),
+ DataValue::StringArray(values) => write!(f, "{:?}", values),
+ DataValue::Int32Array(values) => write!(f, "{:?}", values),
+ DataValue::Int64Array(values) => write!(f, "{:?}", values),
+ DataValue::Uint32Array(values) => write!(f, "{:?}", values),
+ DataValue::Uint64Array(values) => write!(f, "{:?}", values),
+ DataValue::FloatArray(values) => write!(f, "{:?}", values),
+ DataValue::DoubleArray(values) => write!(f, "{:?}", values),
+ }
+ }
+}
impl DataValue {
pub fn greater_than(&self, other: &DataValue) -> Result {
@@ -233,158 +287,21 @@ impl DataValue {
}
}
- pub fn less_than(&self, other: &DataValue) -> Result {
- match (&self, other) {
- (DataValue::Int32(value), DataValue::Int32(other_value)) => Ok(value < other_value),
- (DataValue::Int32(value), DataValue::Int64(other_value)) => {
- Ok(i64::from(*value) < *other_value)
- }
- (DataValue::Int32(value), DataValue::Uint32(other_value)) => {
- Ok(i64::from(*value) < i64::from(*other_value))
- }
- (DataValue::Int32(value), DataValue::Uint64(other_value)) => {
- if *value < 0 {
- Ok(true) // Negative value must be less than unsigned
- } else {
- match u64::try_from(*value) {
- Ok(value) => Ok(value < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Int32(value), DataValue::Float(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Int32(value), DataValue::Double(other_value)) => {
- Ok(f64::from(*value) < *other_value)
- }
+ pub fn greater_than_equal(&self, other: &DataValue) -> Result {
+ match self.greater_than(other) {
+ Ok(true) => Ok(true),
+ _ => self.equals(other),
+ }
+ }
- (DataValue::Int64(value), DataValue::Int32(other_value)) => {
- Ok(*value < i64::from(*other_value))
- }
- (DataValue::Int64(value), DataValue::Int64(other_value)) => Ok(value < other_value),
- (DataValue::Int64(value), DataValue::Uint32(other_value)) => {
- Ok(*value < i64::from(*other_value))
- }
- (DataValue::Int64(value), DataValue::Uint64(other_value)) => {
- if *value < 0 {
- Ok(true) // Negative value must be less than unsigned
- } else {
- match u64::try_from(*value) {
- Ok(value) => Ok(value < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Int64(value), DataValue::Float(other_value)) => match i32::try_from(*value)
- {
- Ok(value) => Ok(f64::from(value) < f64::from(*other_value)),
- Err(_) => Err(CastError {}),
- },
- (DataValue::Int64(value), DataValue::Double(other_value)) => {
- match i32::try_from(*value) {
- Ok(value) => Ok(f64::from(value) < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
+ pub fn less_than(&self, other: &DataValue) -> Result {
+ other.greater_than(self)
+ }
- (DataValue::Uint32(value), DataValue::Int32(other_value)) => {
- Ok(i64::from(*value) < i64::from(*other_value))
- }
- (DataValue::Uint32(value), DataValue::Int64(other_value)) => {
- Ok(i64::from(*value) < *other_value)
- }
- (DataValue::Uint32(value), DataValue::Uint32(other_value)) => Ok(value < other_value),
- (DataValue::Uint32(value), DataValue::Uint64(other_value)) => {
- Ok(u64::from(*value) < *other_value)
- }
- (DataValue::Uint32(value), DataValue::Float(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Uint32(value), DataValue::Double(other_value)) => {
- Ok(f64::from(*value) < *other_value)
- }
- (DataValue::Uint64(value), DataValue::Int32(other_value)) => {
- if *other_value < 0 {
- Ok(false) // Unsigned cannot be less than a negative value
- } else {
- match u64::try_from(*other_value) {
- Ok(other_value) => Ok(*value < other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Uint64(value), DataValue::Int64(other_value)) => {
- if *other_value < 0 {
- Ok(false) // Unsigned cannot be less than a negative value
- } else {
- match u64::try_from(*other_value) {
- Ok(other_value) => Ok(*value < other_value),
- Err(_) => Err(CastError {}),
- }
- }
- }
- (DataValue::Uint64(value), DataValue::Uint32(other_value)) => {
- Ok(*value < u64::from(*other_value))
- }
- (DataValue::Uint64(value), DataValue::Uint64(other_value)) => Ok(value < other_value),
- (DataValue::Uint64(value), DataValue::Float(other_value)) => {
- match u32::try_from(*value) {
- Ok(value) => Ok(f64::from(value) < f64::from(*other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Uint64(value), DataValue::Double(other_value)) => {
- match u32::try_from(*value) {
- Ok(value) => Ok(f64::from(value) < *other_value),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Float(value), DataValue::Int32(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Float(value), DataValue::Int64(other_value)) => {
- match i32::try_from(*other_value) {
- Ok(other_value) => Ok(f64::from(*value) < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Float(value), DataValue::Uint32(other_value)) => {
- Ok(f64::from(*value) < f64::from(*other_value))
- }
- (DataValue::Float(value), DataValue::Uint64(other_value)) => {
- match u32::try_from(*other_value) {
- Ok(other_value) => Ok(f64::from(*value) < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Float(value), DataValue::Float(other_value)) => Ok(value < other_value),
- (DataValue::Float(value), DataValue::Double(other_value)) => {
- Ok(f64::from(*value) < *other_value)
- }
- (DataValue::Double(value), DataValue::Int32(other_value)) => {
- Ok(*value < f64::from(*other_value))
- }
- (DataValue::Double(value), DataValue::Int64(other_value)) => {
- match i32::try_from(*other_value) {
- Ok(other_value) => Ok(*value < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Double(value), DataValue::Uint32(other_value)) => {
- Ok(*value < f64::from(*other_value))
- }
- (DataValue::Double(value), DataValue::Uint64(other_value)) => {
- match u32::try_from(*other_value) {
- Ok(other_value) => Ok(*value < f64::from(other_value)),
- Err(_) => Err(CastError {}),
- }
- }
- (DataValue::Double(value), DataValue::Float(other_value)) => {
- Ok(*value < f64::from(*other_value))
- }
- (DataValue::Double(value), DataValue::Double(other_value)) => Ok(value < other_value),
- _ => Err(CastError {}),
+ pub fn less_than_equal(&self, other: &DataValue) -> Result {
+ match self.less_than(other) {
+ Ok(true) => Ok(true),
+ _ => self.equals(other),
}
}
diff --git a/databroker/src/viss/v2/server.rs b/databroker/src/viss/v2/server.rs
index 91a69f62..476670c5 100644
--- a/databroker/src/viss/v2/server.rs
+++ b/databroker/src/viss/v2/server.rs
@@ -168,6 +168,8 @@ impl Viss for Server {
entry_type: None,
data_type: None,
description: None,
+ min: None,
+ max: None,
allowed: None,
unit: None,
})
@@ -197,8 +199,14 @@ impl Viss for Server {
UpdateError::WrongType => Error::BadRequest {
msg: Some("Wrong data type.".into()),
},
- UpdateError::OutOfBounds => Error::BadRequest {
- msg: Some("Value out of bounds.".into()),
+ UpdateError::OutOfBoundsAllowed => Error::BadRequest {
+ msg: Some("Value out of allowed bounds.".into()),
+ },
+ UpdateError::OutOfBoundsMinMax => Error::BadRequest {
+ msg: Some("Value out of min/max bounds.".into()),
+ },
+ UpdateError::OutOfBoundsType => Error::BadRequest {
+ msg: Some("Value out of type bounds.".into()),
},
UpdateError::UnsupportedType => Error::BadRequest {
msg: Some("Unsupported data type.".into()),
@@ -263,7 +271,7 @@ impl Viss for Server {
});
};
- match broker.subscribe(entries).await {
+ match broker.subscribe(entries, None).await {
Ok(stream) => {
let subscription_id = SubscriptionId::new();
@@ -295,6 +303,7 @@ impl Viss for Server {
broker::SubscriptionError::NotFound => Error::NotFoundInvalidPath,
broker::SubscriptionError::InvalidInput => Error::NotFoundInvalidPath,
broker::SubscriptionError::InternalError => Error::InternalServerError,
+ broker::SubscriptionError::InvalidBufferSize => Error::InternalServerError,
},
ts: SystemTime::now().into(),
}),
diff --git a/databroker/src/vss.rs b/databroker/src/vss.rs
index a79b0d54..f5d1a8b0 100644
--- a/databroker/src/vss.rs
+++ b/databroker/src/vss.rs
@@ -203,6 +203,10 @@ impl From for types::DataType {
}
}
+/// Try to extract an array matching the given DataType.
+/// Will success if the value is None or a an array of matching type
+/// Will fail if the value is a "single" value, i.e. not an array
+/// This method is useful for instance when extracting the "allowed" field
fn try_from_json_array(
array: Option>,
data_type: &types::DataType,
@@ -251,6 +255,11 @@ fn try_from_json_array(
}
}
+/// Try to extract a value matching the given DataType.
+/// Will success if the value is None or a a value of matching type
+/// Will fail if the value does not match the given type,
+/// for example if a single value is given for an array type or vice versa
+/// This method is useful for instance when extracting the "default" value
fn try_from_json_value(
value: Option,
data_type: &types::DataType,
@@ -350,6 +359,44 @@ fn try_from_json_value(
}
}
+/// Try to extract a single value matching the given DataType,
+/// i.e. if an array type is given it will try to find a single value of the base type
+/// For example Int32 if the type is Int32 or Int32Array
+/// Will success if the value is of matching base type
+/// Will fail otherwise
+/// This method is useful for instance when extracting the "min"/"max" field
+fn try_from_json_single_value(
+ value: Option,
+ data_type: &types::DataType,
+) -> Result, Error> {
+ match data_type {
+ types::DataType::StringArray => try_from_json_value(value, &types::DataType::String),
+ types::DataType::BoolArray => try_from_json_value(value, &types::DataType::Bool),
+ types::DataType::Int8Array => try_from_json_value(value, &types::DataType::Int8),
+ types::DataType::Int16Array => try_from_json_value(value, &types::DataType::Int16),
+ types::DataType::Int32Array => try_from_json_value(value, &types::DataType::Int32),
+ types::DataType::Int64Array => try_from_json_value(value, &types::DataType::Int64),
+ types::DataType::Uint8Array => try_from_json_value(value, &types::DataType::Uint8),
+ types::DataType::Uint16Array => try_from_json_value(value, &types::DataType::Uint16),
+ types::DataType::Uint32Array => try_from_json_value(value, &types::DataType::Uint32),
+ types::DataType::Uint64Array => try_from_json_value(value, &types::DataType::Uint64),
+ types::DataType::FloatArray => try_from_json_value(value, &types::DataType::Float),
+ types::DataType::DoubleArray => try_from_json_value(value, &types::DataType::Double),
+ types::DataType::String
+ | types::DataType::Bool
+ | types::DataType::Int8
+ | types::DataType::Int16
+ | types::DataType::Int32
+ | types::DataType::Int64
+ | types::DataType::Uint8
+ | types::DataType::Uint16
+ | types::DataType::Uint32
+ | types::DataType::Uint64
+ | types::DataType::Float
+ | types::DataType::Double => try_from_json_value(value, data_type),
+ }
+}
+
fn flatten_vss_tree(root: RootEntry) -> Result, Error> {
let mut entries = BTreeMap::new();
@@ -396,8 +443,8 @@ fn add_entry(
description: entry.description,
comment: entry.comment,
unit: entry.unit,
- min: try_from_json_value(entry.min, &data_type)?,
- max: try_from_json_value(entry.max, &data_type)?,
+ min: try_from_json_single_value(entry.min, &data_type)?,
+ max: try_from_json_single_value(entry.max, &data_type)?,
allowed: try_from_json_array(entry.allowed, &data_type)?,
default: None, // isn't used by actuators
data_type,
@@ -421,8 +468,8 @@ fn add_entry(
description: entry.description,
comment: entry.comment,
unit: entry.unit,
- min: try_from_json_value(entry.min, &data_type)?,
- max: try_from_json_value(entry.max, &data_type)?,
+ min: try_from_json_single_value(entry.min, &data_type)?,
+ max: try_from_json_single_value(entry.max, &data_type)?,
allowed: try_from_json_array(entry.allowed, &data_type)?,
default: try_from_json_value(entry.default, &data_type)?,
change_type: determine_change_type(
@@ -450,8 +497,8 @@ fn add_entry(
description: entry.description,
comment: entry.comment,
unit: entry.unit,
- min: try_from_json_value(entry.min, &data_type)?,
- max: try_from_json_value(entry.max, &data_type)?,
+ min: try_from_json_single_value(entry.min, &data_type)?,
+ max: try_from_json_single_value(entry.max, &data_type)?,
allowed: try_from_json_array(entry.allowed, &data_type)?,
change_type: determine_change_type(entry.change_type, types::EntryType::Sensor),
default: None, // isn't used by sensors
diff --git a/databroker/tests/world/mod.rs b/databroker/tests/world/mod.rs
index e3e6a7c6..3630a992 100644
--- a/databroker/tests/world/mod.rs
+++ b/databroker/tests/world/mod.rs
@@ -32,6 +32,7 @@ use databroker::{
};
use tokio::net::TcpListener;
+use tokio_stream::wrappers::TcpListenerStream;
use tracing::debug;
use lazy_static::lazy_static;
@@ -188,11 +189,12 @@ impl DataBrokerWorld {
let addr = listener
.local_addr()
.expect("failed to determine listener's port");
+ let incoming = TcpListenerStream::new(listener);
tokio::spawn(async move {
- let version = option_env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT")
- .unwrap_or(option_env!("VERGEN_GIT_SHA").unwrap_or("unknown"));
- let data_broker = broker::DataBroker::new(version);
+ let commit_sha = option_env!("VERGEN_GIT_SHA").unwrap_or("unknown");
+ let version = option_env!("VERGEN_GIT_SEMVER_LIGHTWEIGHT").unwrap_or(commit_sha);
+ let data_broker = broker::DataBroker::new(version, commit_sha);
let database = data_broker.authorized_access(&permissions::ALLOW_ALL);
for (name, data_type, change_type, entry_type) in data_entries {
if let Err(_error) = database
@@ -202,6 +204,8 @@ impl DataBrokerWorld {
change_type,
entry_type,
"N/A".to_string(),
+ None, // min
+ None, // max
None,
None,
)
@@ -228,7 +232,7 @@ impl DataBrokerWorld {
}
grpc::server::serve_with_incoming_shutdown(
- listener,
+ incoming,
data_broker,
#[cfg(feature = "tls")]
CERTS.server_tls_config(),
diff --git a/doc/QUERY.md b/doc/QUERY.md
index c2a9c3a0..a64ae41c 100644
--- a/doc/QUERY.md
+++ b/doc/QUERY.md
@@ -1,4 +1,7 @@
-# 1. Data Broker Query Syntax
+# 1. Databroker sdv.databroker.v1 Query Syntax
+
+*Note! This document is only relevant for the [sdv.databroker.v1](../proto/sdv/databroker/v1) API!*
+
- [1. Data Broker Query Syntax](#1-data-broker-query-syntax)
- [1.1. Intro](#11-intro)
diff --git a/doc/README.md b/doc/README.md
new file mode 100644
index 00000000..490f7ca4
--- /dev/null
+++ b/doc/README.md
@@ -0,0 +1,19 @@
+# KUKSA Documentation
+
+This folder contain various documents related to KUKSA
+
+Document | Content/Comment
+----------|--------------------
+[Eclipse Kuksaâ„¢ Databroker User Guide](user_guide.md) | Guide on how to start and configure Kuksa Databroker
+[Kuksa Quickstart](quickstart.md) | Introduction on how to interact with Kuksa Databroker
+[Kuksa System Components and Deployment](system-architecture.md) | Description of the Kuksa Architecture
+[Supported Protocols](protocol.md) | Protocols supported by Kuksa Databroker
+[Databroker sdv.databroker.v1 Query Syntax](QUERY.md) | Query syntax available in `sdv.databroker.v1`
+[Kuksa TLS concept](tls.md) | Introduction to the Kuksa TLS Concept
+[Kuksa Authorization Concept](authorization.md) | Introduction to the Kuksa Token Concept
+[Runtime behavior and potential attacks](behavior.md) | Description of the runtime behavior of Kuksa Databroker
+[Deployment Blueprints](deployment.md) | Logical description of possible deployments of Kuksa
+[Kuksa Analysis](kuksa_analysis.md) |Functional requirements, design topics and use case diagrams
+[Mapping VSS data types to protobuf Types](TYPES.md) | How VSS datatypes are represented internally in Kuksa Databroker
+[Terminology](terminology.md) | Description of terms commonly used in Kuksa Documentation
+[Wildcard Matching Rules](wildcard_matching.md) | Wildcard matching rules for `sdv.databroker.v1` and `kuksa.val.v1`
diff --git a/doc/TYPES.md b/doc/TYPES.md
index 2e25de4c..1e99f26e 100644
--- a/doc/TYPES.md
+++ b/doc/TYPES.md
@@ -1,4 +1,4 @@
-# Mapping data types
+# Mapping VSS data types to Protobuf Types
This is how [VSS data types](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/data_types/)
defined by [COVESA VSS](https://covesa.github.io/vehicle_signal_specification/) are mapped to the data types used by
@@ -37,4 +37,4 @@ See `enum DataType` in [types.proto](../proto/kuksa/val/v1/types.proto) for the
| uint64[] | UINT64_ARRAY | - |
| float[] | FLOAT_ARRAY | - |
| double[] | DOUBLE_ARRAY | - |
-| timestamp[] | TIMESTAMP_ARRAY | - |
\ No newline at end of file
+| timestamp[] | TIMESTAMP_ARRAY | - |
diff --git a/doc/authorization.md b/doc/authorization.md
index 46b8eb08..e9f9bc8e 100644
--- a/doc/authorization.md
+++ b/doc/authorization.md
@@ -1,4 +1,4 @@
-# Authorization in KUKSA.VAL
+# Kuksa Authorization Concept
* [Background](#background)
* [Introduction to OAuth2](#introduction)
@@ -64,7 +64,7 @@ orientation around how it fits into the overall OAuth 2.0 Authorization Framewor
# Introduction to OAuth2
Using an authorization framework like OAuth2 is well suited for an environment where third party
applications need delegated access to a resource, while at the same time restricting the scope
-of this access to minimum.
+of this access to minimum.
See [The OAuth 2.0 Authorization Framework](#the-oauth-20-authorization-framework) for a more
detailed description of the benefits, and the problems the framework aims to solve.
@@ -75,7 +75,7 @@ OAuth 2.0 defines four roles:
> An entity capable of granting access to a protected resource.
> When the resource owner is a person, it is referred to as an
> end-user.
-
+
The resource owner in this context could be the OEM or the owner of a vehicle.
* **Resource server**
@@ -421,19 +421,19 @@ Example:
> * Third-party applications are required to store the resource
> owner's credentials for future use, typically a password in
> clear-text.
->
+>
> * Servers are required to support password authentication, despite
> the security weaknesses inherent in passwords.
->
+>
> * Third-party applications gain overly broad access to the resource
> owner's protected resources, leaving resource owners without any
> ability to restrict duration or access to a limited subset of
> resources.
->
+>
> * Resource owners cannot revoke access to an individual third party
> without revoking access to all third parties, and must do so by
> changing the third party's password.
->
+>
> * Compromise of any third-party application results in compromise of
> the end-user's password and all of the data protected by that
> password.
@@ -462,47 +462,47 @@ Example:
> represent specific scopes and durations of access, granted by the
> resource owner, and enforced by the resource server and authorization
> server.
->
+>
> The token may denote an identifier used to retrieve the authorization
> information or may self-contain the authorization information in a
> verifiable manner (i.e., a token string consisting of some data and a
> signature). Additional authentication credentials, which are beyond
> the scope of this specification, may be required in order for the
> client to use a token.
->
+>
> The access token provides an abstraction layer, replacing different
> authorization constructs (e.g., username and password) with a single
> token understood by the resource server. This abstraction enables
> issuing access tokens more restrictive than the authorization grant
> used to obtain them, as well as removing the resource server's need
> to understand a wide range of authentication methods.
-
+
[[RFC 6749](https://www.rfc-editor.org/rfc/rfc6749#section-1.4)]
#### Roles
> OAuth defines four roles:
->
+>
> * **resource owner**
->
+>
> An entity capable of granting access to a protected resource.
> When the resource owner is a person, it is referred to as an
> end-user.
->
+>
> * **resource server**
->
+>
> The server hosting the protected resources, capable of accepting
> and responding to protected resource requests using access tokens.
->
+>
> * **client**
->
+>
> An application making protected resource requests on behalf of the
> resource owner and with its authorization. The term "client" does
> not imply any particular implementation characteristics (e.g.,
> whether the application executes on a server, a desktop, or other
> devices).
->
+>
> * **authorization server**
->
+>
> The server issuing access tokens to the client after successfully
> authenticating the resource owner and obtaining authorization.
@@ -515,13 +515,13 @@ Example:
> This specification defines a profile for issuing OAuth 2.0 access tokens in JSON Web Token (JWT)
format. Authorization servers and resource servers from different vendors can leverage this
profile to issue and consume access tokens in an interoperable manner.
->
+>
> The original OAuth 2.0 Authorization Framework [RFC6749] specification does not mandate any
specific format for access tokens. While that remains perfectly appropriate for many important
scenarios, in-market use has shown that many commercial OAuth 2.0 implementations elected to
issue access tokens using a format that can be parsed and validated by resource servers directly,
without further authorization server involvement.
->
+>
> This specification aims to provide a standardized and interoperable profile as an alternative to
the proprietary JWT access token layouts going forward. Besides defining a common set of
mandatory and optional claims, the profile provides clear indications on how authorization
@@ -557,4 +557,3 @@ Example:
* [The OAuth 2.0 Authorization Framework [RFC 6749]](https://www.rfc-editor.org/rfc/rfc6749)
* [JSON Web Token (JWT) Profile for OAuth 2.0 Access Tokens [RFC 9068]](https://datatracker.ietf.org/doc/html/rfc9068)
-
diff --git a/doc/behavior.md b/doc/behavior.md
index bee5441f..5c6f4f0a 100644
--- a/doc/behavior.md
+++ b/doc/behavior.md
@@ -2,3 +2,6 @@
The implementation of KUKSA databroker shall represent the latest value of a ```Datapoint```. Therefore the databroker always sets a ```timestamp``` for a ```Datapoint```. This means if a new value comes in it overwrites the older value. We opted for this behavior because a actuator/provider/application can have no access to a system time. For some use cases it could be interesting to provide a timestamp set by the actuator/provider/application. For this we added a so called source timestamp (short ```source_ts```) to the ```Datapoint``` class. This source timestamp is optional and per default set to None.
If an attacker gets an authorized connection to the databroker he can set the source_timestamp and overwrite the value with a new one. But for this he/she needs read and write access through JWT tokens. If a provider decides to work with ```source_ts``` of a ```Datapoint``` than it should be clear that they can be false/outdated.
+
+# Tokio runtime behavior
+If you do not specify anything tokio will spawn as many threads as cores (virtual and physical) are detected on the system. If you want to optimize cpu load you can specify the threads spawned as workers by the tokio runtime. Therfore use the runtime option `--worker-threads` and specify how many threads you want to be spawned.
diff --git a/doc/deployment.md b/doc/deployment.md
index e3b32f33..4b35a278 100644
--- a/doc/deployment.md
+++ b/doc/deployment.md
@@ -1,6 +1,5 @@
# Deployment Blueprints
-
- [Deployment Blueprints](#deployment-blueprints)
- [Deployed Elements](#deployed-elements)
- [KUKSA.val databroker (or server)](#kuksaval-databroker-or-server)
@@ -45,37 +44,37 @@ Intuitively, it can be seen that the security and safety requirements in an end-
# Deployment Blueprint 1: Internal API
-You are using VSS and KUKSA.val as an internal API in your system to ease system development. You control the whole system/system integration.
+You are using VSS and KUKSA.val as an internal API in your system to ease system development. You control the whole system/system integration.
![Deployment Blueprint 1: Internal API](./pictures/deployment_blueprint1.svg)
| Aspect | Design Choice |
| ---------------- | ------------- |
-| Users | You |
-| System Updates | Complete |
-| Security | None/Fixed |
-| VSS model | Static |
-| KUKSA deployment | Firmware |
+| Users | You |
+| System Updates | Complete |
+| Security | None/Fixed |
+| VSS model | Static |
+| KUKSA deployment | Firmware |
You are not exposing any VSS API to external parties, you control all components interacting with VSS, the system state/composition is under your control. In this case you make KUKSA APIs available only within your system. You might even opt to disabling encryption for higher performance and not using any tokens for authentication. We would still recommend leaving basic security measures intact, but this deployment does not need fine-grained control of permission rights or fast rotation/revocation of tokens.
# Deployment Blueprint 2: Exposing a subset of VSS to another system
-You control the system (e.g. Vehicle Computer), that has KUKSA.val deployed. You want to make a subset of capabilities available to another system or middleware, that may have its own API/security mechanisms. An example would be an Android Automotive based IVI system,
+You control the system (e.g. Vehicle Computer), that has KUKSA.val deployed. You want to make a subset of capabilities available to another system or middleware, that may have its own API/security mechanisms. An example would be an Android Automotive based IVI system,
![Deployment Blueprint 2: Exposing a subset of VSS to another system](./pictures/deployment_blueprint2.svg)
| Aspect | Design Choice |
| ----------------- | ------------- |
| Users | Other trusted platforms |
-| System Updates | Firmware on controlled system, unknown on third party system |
-| Security | TLS+Foreign platform token |
-| VSS model | Static |
-| KUKSA deployment | Firmware or Software package |
+| System Updates | Firmware on controlled system, unknown on third party system |
+| Security | TLS+Foreign platform token |
+| VSS model | Static |
+| KUKSA deployment | Firmware or Software package |
In this deployment the foreign system is treated as a single client, which has a certain level of access and trust. Whether that system restricts access further for certain hosted apps is opaque to KUKSA.val. In this deployment you need to enable and configure TLS to provide confidentiality, as well as providing a single token limiting the access of the foreign platform.
-If the token is time limited, the foreign platform needs to be provided with a new token in time. In case certificates on the KUKSA.val side are changed, the foreign system needs to be updated accordingly.
+If the token is time limited, the foreign platform needs to be provided with a new token in time. In case certificates on the KUKSA.val side are changed, the foreign system needs to be updated accordingly.
Doing so, you are using KUKSA to enable other ecosystems, while making the usage of KUKSA transparent to application in that ecosystem.
@@ -87,11 +86,11 @@ You control the system running KUKSA.val. You intend to integrate applications f
| Aspect | Design Choice |
| ---------------- | ------------- |
-| Users | 3rd parties | |
-| System Updates | App-level |
-| Security | TLS+Individual Consumer tokens |
-| VSS | Static |
-| KUKSA deployment | Software package |
+| Users | 3rd parties | |
+| System Updates | App-level |
+| Security | TLS+Individual Consumer tokens |
+| VSS | Static |
+| KUKSA deployment | Software package |
In this deployment you need to enable and configure TLS to provide confidentiality, as well as providing an individual security token to each VSS consumer, limiting the access of each app.
@@ -99,7 +98,7 @@ The scope (and longevity) of those tokens will likely depend on the relationship
When you update/rotate the keys for the VSS server you need a process to make sure to update customers at the same time.
-In this blueprint you expose an attack surface to a larger group of potential adversaries. To be able to react to security issues faster, you might want to deploy KUKSA.val in a way that allows you to update it individually (i.e. deploy it as a container), instead of just baking it into firmware.
+In this blueprint you expose an attack surface to a larger group of potential adversaries. To be able to react to security issues faster, you might want to deploy KUKSA.val in a way that allows you to update it individually (i.e. deploy it as a container), instead of just baking it into firmware.
# Deployment Blueprint 4: Dynamic Applications and VSS extensions
@@ -112,14 +111,14 @@ This is similar to the *Individual Applications* Use Case with the added capabil
| Aspect | Design Choice |
| -------------- | ------------- |
| Users | 3rd parties |
-| System Updates | App-level |
-| Security | Individual App tokens |
-| VSS | Dynamic |
-| KUKSA deployment | Software package |
+| System Updates | App-level |
+| Security | Individual App tokens |
+| VSS | Dynamic |
+| KUKSA deployment | Software package |
The main difference to the previous use case is allowing providers to add datapoints to the VSS tree managed by KUKSA.val.
-This could be a very elegant setup, even in a more static deployment, where KUKSA.val starts with an empty tree, and once the relevant software components come up, it is extended step by step.
+This could be a very elegant setup, even in a more static deployment, where KUKSA.val starts with an empty tree, and once the relevant software components come up, it is extended step by step.
However, the security and safety implications in such a scenario are the highest: While the security aspect can be handled by KUKSA.val, giving specific applications the right to extend the tree, the overall requirements on system design get harder:
@@ -130,5 +129,5 @@ However, the security and safety implications in such a scenario are the highest
# Mixing
The aforementioned blueprints are examples and any specific deployment might combine aspects of several of them.
-
- There could be many domains in a vehicle, where a fully static "walled" deployment as described in Blueprint 1 is the right thing to do, while other use cases in the same vehicle require the capabilities found in Blueprint 4. In that case, the right design choice could be to deploy several KUKSA.val instances in a car as sketched in [System Architecture -> (Distributed) KUKSA.val deployment](./system-architecture.md#distributed-kuksaval-deployment).
\ No newline at end of file
+
+ There could be many domains in a vehicle, where a fully static "walled" deployment as described in Blueprint 1 is the right thing to do, while other use cases in the same vehicle require the capabilities found in Blueprint 4. In that case, the right design choice could be to deploy several KUKSA.val instances in a car as sketched in [System Architecture -> (Distributed) KUKSA.val deployment](./system-architecture.md#distributed-kuksaval-deployment).
diff --git a/doc/diagrams/consumer_actuate.drawio b/doc/diagrams/consumer_actuate.drawio
new file mode 100644
index 00000000..806e9f34
--- /dev/null
+++ b/doc/diagrams/consumer_actuate.drawio
@@ -0,0 +1,131 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_actuate.svg b/doc/diagrams/consumer_actuate.svg
new file mode 100644
index 00000000..d684b0ef
--- /dev/null
+++ b/doc/diagrams/consumer_actuate.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer :Provider :Vehicle Network ActuateResponse Actuate(ActuateRequest=actuator_path)
Actuate(ActuateRequest=actuator_path) OpenProviderStream(stream  OpenProviderStreamRequest=ProvideActuationRequest(actutators))
OpenProviderStream(stream OpenProviderStreamRequest=ProvideActuationRequest(actutators)) stream  OpenProviderStreamResponse=ProvideActuationResponse
stream OpenProviderStreamResponse=ProvideActuationResponse stream  OpenProviderStreamRequest=BatchActuateStreamResponse
stream OpenProviderStreamRequest=BatchActuateStreamResponse stream  OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value)
stream OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value) write_data_frame(data) ack_write_operation Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_actuate_multiple_providers.drawio b/doc/diagrams/consumer_actuate_multiple_providers.drawio
new file mode 100644
index 00000000..57ee71a7
--- /dev/null
+++ b/doc/diagrams/consumer_actuate_multiple_providers.drawio
@@ -0,0 +1,212 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_actuate_multiple_providers.svg b/doc/diagrams/consumer_actuate_multiple_providers.svg
new file mode 100644
index 00000000..24c5c6da
--- /dev/null
+++ b/doc/diagrams/consumer_actuate_multiple_providers.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer :Provider Door :Vehicle Network ack_write_operation BatchActuateResponse BatchActuate(BatchActuateRequest={Door, Window})
BatchActuate(BatchActuateRequest={Door, Window}) OpenProviderStream(stream  OpenProviderStreamRequest=ProvideActuationRequest(Door))
OpenProviderStream(stream OpenProviderStreamRequest=ProvideActuationRequest(Door)) stream  OpenProviderStreamResponse=ProvideActuationResponse
stream OpenProviderStreamResponse=ProvideActuationResponse stream  OpenProviderStreamRequest=BatchActuateStreamResponse(Door)
stream OpenProviderStreamRequest=BatchActuateStreamResponse(Door) stream  OpenProviderStreamResponse=BatchActuateStreamRequest(Door)
stream OpenProviderStreamResponse=BatchActuateStreamRequest(Door) write_data_frame(data) ack_write_operation :Provider Window stream  OpenProviderStreamRequest=BatchActuateStreamResponse(Window)
stream OpenProviderStreamRequest=BatchActuateStreamResponse(Window) OpenProviderStream(stream  OpenProviderStreamRequest=ProvideActuationRequest(Window))
OpenProviderStream(stream OpenProviderStreamRequest=ProvideActuationRequest(Window)) stream  OpenProviderStreamResponse=ProvideActuationResponse
stream OpenProviderStreamResponse=ProvideActuationResponse stream  OpenProviderStreamResponse=BatchActuateStreamRequest(Window)
stream OpenProviderStreamResponse=BatchActuateStreamRequest(Window) write_data_frame(data) Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_get_values.drawio b/doc/diagrams/consumer_get_values.drawio
new file mode 100644
index 00000000..ad0a38f4
--- /dev/null
+++ b/doc/diagrams/consumer_get_values.drawio
@@ -0,0 +1,169 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_get_values.svg b/doc/diagrams/consumer_get_values.svg
new file mode 100644
index 00000000..7fe2780c
--- /dev/null
+++ b/doc/diagrams/consumer_get_values.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer :Provider :Vehicle Network GetValuesResponse GetValues(GetValuesRequest=signal_paths)
GetValues(GetValuesRequest=signal_paths) OpenProviderStream(stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values))
OpenProviderStream(stream OpenProviderStreamRequest=PublishValuesRequest(signal_values)) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() recv_data_frame() OEM cycle_time... OEM protocol design Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_provider_list_metadata.drawio b/doc/diagrams/consumer_provider_list_metadata.drawio
new file mode 100644
index 00000000..fd39f6ab
--- /dev/null
+++ b/doc/diagrams/consumer_provider_list_metadata.drawio
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_provider_list_metadata.svg b/doc/diagrams/consumer_provider_list_metadata.svg
new file mode 100644
index 00000000..f83682bf
--- /dev/null
+++ b/doc/diagrams/consumer_provider_list_metadata.svg
@@ -0,0 +1 @@
+:DataBroker ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...)
ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...) :Signal Consumer ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...)
ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...) ListMetadata(ListMetadataRequest(root="Vehicle.**"))
ListMetadata(ListMetadataRequest(root="Vehicle.**")) :Provider ListMetadata(ListMetadataRequest(root="Vehicle.**"))
ListMetadata(ListMetadataRequest(root="Vehicle.**")) Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_provider_server_info.drawio b/doc/diagrams/consumer_provider_server_info.drawio
new file mode 100644
index 00000000..840695d6
--- /dev/null
+++ b/doc/diagrams/consumer_provider_server_info.drawio
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_provider_server_info.svg b/doc/diagrams/consumer_provider_server_info.svg
new file mode 100644
index 00000000..550f08f4
--- /dev/null
+++ b/doc/diagrams/consumer_provider_server_info.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer GetServerInfoResponse(name, version, commit_hash)
GetServerInfoResponse(name, version, commit_hash) GetServerInfo(GetServerInfoRequest)
GetServerInfo(GetServerInfoRequest) :Provider GetServerInfo(GetServerInfoRequest)
GetServerInfo(GetServerInfoRequest) GetServerInfoResponse(name, version, commit_hash)
GetServerInfoResponse(name, version, commit_hash) Text is not SVG - cannot display
diff --git a/doc/diagrams/consumer_subscribes.drawio b/doc/diagrams/consumer_subscribes.drawio
new file mode 100644
index 00000000..37e4aa32
--- /dev/null
+++ b/doc/diagrams/consumer_subscribes.drawio
@@ -0,0 +1,303 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/consumer_subscribes.svg b/doc/diagrams/consumer_subscribes.svg
new file mode 100644
index 00000000..e8fa7da6
--- /dev/null
+++ b/doc/diagrams/consumer_subscribes.svg
@@ -0,0 +1 @@
+:DataBroker :Signal Consumer SubscribeById(SubscribeByIdRequest=signals_ids(1, 2, ..))
SubscribeById(SubscribeByIdRequest=signals_ids(1, 2, ..)) :Provider recv_data_frame() recv_data_frame() :Vehicle Network ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...)
ListMetadataResponse(metadata=[{Vehicle.Speed, id: 1]},...) ListMetadata(ListMetadataRequest(root="Vehicle.**"))
ListMetadata(ListMetadataRequest(root="Vehicle.**")) OpenProviderStream(stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values))
OpenProviderStream(stream OpenProviderStreamRequest=PublishValuesRequest(signal_values)) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse stream SubscribeByIdResponse(signal_values)
stream SubscribeByIdResponse(signal_values) Close Subscription recv_data_frame() stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() recv_data_frame() OEM... stream SubscribeByIdResponse(current_signal_values)
stream SubscribeByIdResponse(current_signal_values) Text is not SVG - cannot display
diff --git a/doc/diagrams/provider_publish.drawio b/doc/diagrams/provider_publish.drawio
new file mode 100644
index 00000000..6fa4b559
--- /dev/null
+++ b/doc/diagrams/provider_publish.drawio
@@ -0,0 +1,216 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/provider_publish.svg b/doc/diagrams/provider_publish.svg
new file mode 100644
index 00000000..4988dddd
--- /dev/null
+++ b/doc/diagrams/provider_publish.svg
@@ -0,0 +1 @@
+:DataBroker :Provider recv_data_frame() recv_data_frame() :Vehicle Network OpenProviderStream(stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values))
OpenProviderStream(stream OpenProviderStreamRequest=PublishValuesRequest(signal_values)) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamRequest=PublishValuesRequest(signal_values)
stream OpenProviderStreamRequest=PublishValuesRequest(signal_values) stream  OpenProviderStreamResponse=PublishValuesResponse
stream OpenProviderStreamResponse=PublishValuesResponse recv_data_frame() recv_data_frame() cycle_time... Text is not SVG - cannot display
diff --git a/doc/diagrams/provider_recv_actuation.drawio b/doc/diagrams/provider_recv_actuation.drawio
new file mode 100644
index 00000000..3dd97391
--- /dev/null
+++ b/doc/diagrams/provider_recv_actuation.drawio
@@ -0,0 +1,97 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/diagrams/provider_recv_actuation.svg b/doc/diagrams/provider_recv_actuation.svg
new file mode 100644
index 00000000..ec95e79f
--- /dev/null
+++ b/doc/diagrams/provider_recv_actuation.svg
@@ -0,0 +1 @@
+:DataBroker :Provider :Vehicle Network OpenProviderStream(stream  OpenProviderStreamRequest=ProvideActuationRequest(actutators))
OpenProviderStream(stream OpenProviderStreamRequest=ProvideActuationRequest(actutators)) stream  OpenProviderStreamResponse=ProvideActuationResponse
stream OpenProviderStreamResponse=ProvideActuationResponse stream  OpenProviderStreamRequest=BatchActuateStreamResponse
stream OpenProviderStreamRequest=BatchActuateStreamResponse stream  OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value)
stream OpenProviderStreamResponse=BatchActuateStreamRequest(actuator_path, value) write_data_frame(data) ack_write_operation Text is not SVG - cannot display
diff --git a/doc/kuksa_analysis.md b/doc/kuksa_analysis.md
new file mode 100644
index 00000000..af6534d8
--- /dev/null
+++ b/doc/kuksa_analysis.md
@@ -0,0 +1,16 @@
+# KUKSA Analysis
+
+This documentation provides a comprehensive analysis of the KUKSA project, detailing essential aspects to ensure a clear understanding and effective implementation. The sections covered include:
+
+### 1. Requirements
+The requirements for Kuksa Databroker is available in the [Kuksa requirements](./kuksa_analysis/kuksa_requirements.md)
+
+### 2. Design Topics
+The extended list of design topics for Kuksa Databroker is available in the [Kuksa design topics](./kuksa_analysis/kuksa_design_topics.md)
+
+### 3. Use Cases
+The use cases for Kuksa Databroker is available in the [Kuksa use cases](./kuksa_analysis/kuksa_use_cases.md)
+
+### Documentation
+ #### [Terminology](./terminology.md)
+ #### [System Architecture](./system-architecture.md)
diff --git a/doc/kuksa_analysis/kuksa_design_topics.md b/doc/kuksa_analysis/kuksa_design_topics.md
new file mode 100644
index 00000000..b122358c
--- /dev/null
+++ b/doc/kuksa_analysis/kuksa_design_topics.md
@@ -0,0 +1,726 @@
+# Design Topics
+The document aims to gather current design decisions, sketches, and incomplete ideas regarding various design topics. This includes areas where no decisions have been made yet, as well as topics where decisions may still be pending.
+
+# Design topics status
+| Description | Status |
+|-------------------------------------------|------------------------------|
+| Implemented and Verified | 🟢 |
+| Approved, Not Yet Implemented | 🟡 |
+| Long Term Goal | 🔴 |
+
+# Content
+- [Design Topics](#design-topics)
+- [Design topics status](#design-topics-status)
+- [Content](#content)
+- [Data availability/persistence according to lifecycle of Client, Databroker and Provider](#data-availabilitypersistence-according-to-lifecycle-of-client-databroker-and-provider)
+- [Wildcard support](#wildcard-support)
+- [Registration of Datapoints](#registration-of-datapoints)
+- [Availability of Datapoints](#availability-of-datapoints)
+- [Lifecycle of components](#lifecycle-of-components)
+- [Path requests](#path-requests)
+- [Errors](#errors)
+- [Setting Values](#setting-values)
+- [Atomic operatoins](#atomic-operatoins)
+- [Update notifications](#update-notifications)
+- [Access rights](#access-rights)
+- [VSS signals - Users vs providers](#vss-signals---users-vs-providers)
+- [Enable "easy to use" user facing API](#enable-easy-to-use-user-facing-api)
+- [Performance / runtime footprint](#performance--runtime-footprint)
+- [Throttling-mode when system is overloaded.](#throttling-mode-when-system-is-overloaded)
+- [Considerations regarding shared and zero-copy memory approaches](#considerations-regarding-shared-and-zero-copy-memory-approaches)
+- [Provider control and provider capabilities](#provider-control-and-provider-capabilities)
+- [Control the rate of updates](#control-the-rate-of-updates)
+- [Differentiate between different providers of the same VSS data](#differentiate-between-different-providers-of-the-same-vss-data)
+- [Data Aliveness/Availability](#data-alivenessavailability)
+- [Missing features from `sdv.databroker.v1` in `kuksa.val.v1`](#missing-features-from-sdvdatabrokerv1-in-kuksavalv1)
+- [Exploring a design of a bidirectional streaming API](#exploring-a-design-of-a-bidirectional-streaming-api)
+ - [Design choices](#design-choices)
+ - [Stream](#stream)
+ - [Bidirectional stream](#bidirectional-stream)
+ - [Actuators](#actuators)
+ - [Overview](#overview)
+- [message Datapoint](#message-datapoint)
+ - [Alternative 1](#alternative-1)
+ - [Alternative 2](#alternative-2)
+ - [Alternative 3](#alternative-3)
+- [Split subscribe method due to performance reasons](#split-subscribe-method-due-to-performance-reasons)
+- [Service VAL better naming](#service-val-better-naming)
+- [Extend and split service definition for kuksa.val.v3 (current latest version kuksa.val.v2)?](#extend-and-split-service-definition-for-kuksavalv3-current-latest-version-kuksavalv2)
+- [COVESA topics](#covesa-topics)
+
+# Data availability/persistence according to lifecycle of Client, Databroker and Provider
+ Status: 🔴
+ Current decisions:
+ 1. Databroker stores last signal values during its own lifecycle.
+ 2. It is not possible to reset values.
+ 3. Signal Consumer and Provider are not aware of each other.
+
+ Description:
+ 1. The data broker either ...
+ * Stores last set values during its own lifecycle,
+ * Stores values during the system's power cycle (i.e., "persists" values over own restarts, or
+ * store values over system's power cycles.
+
+ 2. How to "reset" values availability if its provider got inactive (without reseting the value)?
+ * -> Client's job (e.g. using timestamp)?
+ * -> Broker's job (e.g. using timestamp + minimal update cycle)?
+
+ 3. Provider and client aliveness
+ * If there is no active client subscription should the provider stop sending values to Databroker?
+ * If there is no active provider setting values while client subscription? Should Databroker or Client be aware of it?
+
+# Wildcard support
+ Status: 🟢
+ Current decisions:
+ Only `ListMetadata` support wildcard due to complex error handling in the implementation and usability.
+ Also due to performance issues, i.e when calling `subscribe` since the wildcard was checked for each path and each response.
+ Description:
+ * Do we want it only for `GetMetadata`?
+
+Reference -> [Wildcard](../wildcard_matching.md)
+
+# Registration of Datapoints
+ Status: 🟢
+ Current decisions:
+ Provider can register and claim only actuators.
+ An actuator can only by claimed by a Provider.
+ Description:
+Do we need a method for providers to register data points at runtime? **Implemented in old API?**:
+
+Its purpose would be:
+1. Publishing the availability of new data points during runtime
+2. Claiming providership of those data points
+
+
+In a mature system some "central instance", e.g., the system integrator must be responsible to ensure that no two components can register/claim providership for any data point.
+In case of a registration method the central instance would either have to
+ * make sure there is single provider per data point via configuration, or
+ * use access rights management to enforce that only the right component can register a certain data point.
+
+
+# Availability of Datapoints
+ Status: 🔴
+ Current decisions:
+ Description:
+1. The system must be capable of individually checking the availability of each data point on a specific instance of the data broker. This means verifying whether there is an active provider instance installed on the system where the data broker instance is running, which is capable of supplying the data point during the current update cycle.
+
+2. It shall be possible to determine the availability of the actual value of each data point separately on a certain instance of the data broker.
+This represents the information if the provider of that data point is up and running on the system and already provided a value of that data point.
+
+
+# Lifecycle of components
+ Status: 🟢
+ Current decisions: Kuksa.val.v2 API as well as Databroker implementation does not depend on any certain order of starting for components.
+ Important point -> Signal Consumer and Provider should implement a retry policy in case connection get lots.
+ Description:
+The proper function of the overall system of components "around" the data broker, i.e., applications, providers, and the broker itself, shall not depend on a certain order of starting the components. This means:
+1. Any clients of the data broker (applications, providers) shall not assume the availability of the data broker service when they startup.
+2. Any clients of the data broker (applications, providers) shall not assume the permanent availability of the data broker service during their runtime.
+3. Any applications/clients shall not assume the availability of a value for any data point at their startup.
+4. Any applications/clients shall not assume the permanent presence of a value for any data point during their runtime.
+Explanation: Any component of the system can come and go - components could stop working due to a crash (what should not but will happen) or because of an update (which is a regular use case). Furthermore, components could reside on different execution environments which need restarts at different points of time. This shall not result in stopping and restarting the overall system. Instead, each and every component shall react in an appropriate way on changing availability of its dependencies.
+
+# Path requests
+ Status: 🟢
+ Current decisions: Databroker fully supports VSS
+ Description:
+The Databroker shall support at least those metadata elements as defined by the VSS rule set. Data points/nodes are (primarily) identified ("addressed") by their name/path which is a string.
+VSS arranges nodes in a tree structure, separating elements via a single dot ("."). This shall be supported but must not be a mandatory requirement.
+```console
+ Vehicle.Speed;
+ Vehicle.Seats.Row1.Position;
+ ...
+```
+
+# Errors
+ Status: 🟢
+ Current decisions: Kuksa.val.v2 API as well as Databroker implementation is consistent and it is aligned by all service calls
+ returning [gRPC Error](https://grpc.github.io/grpc/core/md_doc_statuscodes.html).
+ Description:
+Error response returned by **all gRPC service calls** must be a aligned with [gRPC Error](https://grpc.github.io/grpc/core/md_doc_statuscodes.html).
+```protobuf
+message Status {
+ // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+ int32 code = 1;
+
+ // A developer-facing error message, which should be in English. Any
+ // user-facing error message should be localized and sent in the
+ // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+ string message = 2;
+
+ // A list of messages that carry the error details. There will be a
+ // common set of message types for APIs to use.
+ repeated google.protobuf.Any details = 3;
+}
+```
+Field `details` of type `Any` will be a serialized message as `bytes` containing an internal Databroker [Error](https://github.com/eclipse-kuksa/kuksa-databroker/blob/main/proto/kuksa/val/v1/types.proto#L246):
+```protobuf
+message Error {
+ uint32 code = 1;
+ string reason = 2;
+ string message = 3;
+}
+```
+
+# Setting Values
+ Status: 🟢
+ Current decisions:
+ Description:
+1. Attributes:
+ * It shall not be possible to set attribute values, except once at startup time by its respective responsible provider.
+2. Sensors:
+ * There shall be only one client able to set the current sensor value during each arbitrary span of time.
+3. Actuators:
+ * ? Actuator data points have a current and a target value. The current value represents the actual state of the actuator, whereas the target value represents a state desired by that client, who most recently set that target value.
+ * Only one client shall be able to set the current actuator value during each arbitrary span of time. This client is the provider of the data point.
+ * Multiple client may be able to set the target value of an actuator.
+ * Only the current provider client shall react on setting a new target value. It is expected that the provider tries to bring the current state of an actuator into the state requested by the target value. If this is not possible (for some reason), the provider is responsible to reset the state of the target value to that of the current value of the same data point.
+ -> This actually not a requirement to the data broker, but to the overall "usage concept" around the broker.
+ *? If no (active) provider is available for an actuator data point, its current and target value shall be "unavailable". A set request to a target value shall be "ignored" and the resulting target and current value shall stay as "unavailable".
+
+# Atomic operatoins
+ Status: 🟢
+ Current decisions: Kuksa.val.v2 API as well as Databroker implementation supports atomic operations handling sequentially request and responses for all the service methods.
+ Description:
+All data point values set by a single request must be updated in an atomic manner. This means:
+1. Set requests must be handled strongly sequentially in the order of reception.
+
+2. Responses to get requests and notifications on behalf of active subscriptions must represent the state of data points in-between finished set requests, i.e., a single set request (updating multiple data points) must not be interfered with get requests or update notifications.
+
+# Update notifications
+ Status: 🟢
+ Current decisions: Databroker implementation will only receive datapoints when their values changed.
+ Description:
+1. Update notifications for active subscriptions of multiple data points shall always contain the state of all data points of the subscription even if just one value has changed.
+
+2. If this behavior is not wanted by a client, it must subscribe data points separately.
+
+# Access rights
+ Status: 🟢
+ Current decisions:
+ 1. Many Providers can update a sensor at same time, just the last value will remain on Databroker database.
+ 2. Many Signal Consumers can change the value of an actuator, but only a Provider will forward its value to the Vehicle Network.
+ 3. New actuators values will be forwarded from Signal Consumer to Databroker to Provider to Vehicle Network, but Databroker will not be responsible for resetting any value on its Database, just fire and forget.
+ Description:
+1. Sensor (-like) data points: Its value shall be set be a single provider only (at least at a time)
+
+2. Actuator (-like) data points: Multiple clients may set its (target) value, a single client may act on its last set (target) value and "reset" it. Only a single client must set its current value (if there is a distinguishing).
+Hint: This does not necessarily need to ensured via the "API design" - it could also be ensured via access rights management configuration.
+
+
+# VSS signals - Users vs providers
+ Status: 🟢/🟡
+ Current decisions: New Databroker API kuksa.val.v2 will only have one service which will be used as entry point for Signal Consumer and Provider, it does not mean that it can not change in the future.
+ Description:
+The Vehicle Signals Specification (VSS) and Vehicle Information Service Specification (VISS) describes the standardized signals available (or not) in a vehicle. Both standards also describe how users interact with these signals.
+
+* They can read and subscribe to [actuators, sensors](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/) and [attributes](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/).
+* They can set [actuators](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/).) (and only actuators).
+ For VISSv2 specifically, see ([read](https://w3c.github.io/automotive/spec/VISSv2_Core.html#read), [subscribe](https://w3c.github.io/automotive/spec/VISSv2_Core.html#subscribe)), [update](https://w3c.github.io/automotive/spec/VISSv2_Core.html#update))).
+
+VSS and VISS does _not_Â specify how signals are provided _into_ the VSS server / model.
+
+These two aspects of interacting with the signals can thus be divided into:
+* **Provider**
+ _A provider is providing a signal (sensor, attribute or actuator) to the VSS tree._
+ _A provider can also use signals as any other user._
+
+* **User**
+ _A user (or client) is using the signals in the VSS tree (without providing any of them itself)._
+
+where the VSS and VISS* specifications only specify the **User** part.
+
+When designing the databroker API, the method kuksa.val uses for providing signals was investigated. In short, kuksa-val-server takes the approach of trying to shoehorn the need of signal providers into the existing VISSv2 protocol. This is problematic for several reasons:
+
+* By reusing, **but changing the meaning** of the terminology used in VSS, there is a constant dissonance between the terms used. This is a recipe for confusion.
+* By deviating from the standard in _this particular way_, a standards compliant VISSv2 client cannot use it in any meaningful way.
+* It makes it harder to actually provide a standards compliant VISSv2 in the future .
+* By using the same methods for both signal providers _and_ users of signals it's harder (or impossible) to customize them for their different requirements.
+
+With this in mind, databroker chose to make a clear distinction between signal providers and signal users. It doesn't use this terminology though. It does this by splitting the interface into two separate services, which are customized for their different requirements / use cases. It doesn't need to be implemented in this way in order to achieve the same design goal, though.
+
+# Enable "easy to use" user facing API
+ Status: 🟢
+ Current decisions:
+ With the new design of kuksa.val.v2 this use case get solved by just "fire and forget" new actuator values.
+ Use case:
+ The user wants to lock a door and know when it's done / whether it worked.
+ 1. User calls `subscribe(Vehicle.Door.Locked)`
+ 2. User calls `actuate(Vehicle.Door.Locked, true)`
+ 3. Provider receives the request and starts forwards the request to the Vehicle Network.
+ 4. Provider at some point in time receives a new value for Vehicle.Door.Locked signal from the Vehicle Network.
+ 5. Provider publishes the new value to Databroker.
+ 6. User receives a new changed value for Vehicle.Door.Locked and concludes that the door has now been locked.
+
+ Note: User should define its own timeout for each application in case the actuate value expected is not received.
+
+ Or if there are no providers.
+ 1. User calls `subscribe(Vehicle.Door.Locked)`
+ 2. User calls `actuate(Vehicle.Door.Locked, true)`
+ 3. Databroker returns an error when no available provider has claimed the actuator signal.
+ Description:
+This is meant to illustrate what type of user APIs that can be created depending on what the provider API looks like (assuming we have one).
+
+Use case:
+
+The user wants to lock a door and know when it's done / whether it worked.
+
+**The kuksa.val.v1 or "key-value store" way.**
+
+Something conceptually like this:
+1. User calls `set(Vehicle.Door.Locked, field=TARGET_VALUE, true)`
+2. User calls `subscribe(Vehicle.Door.Locked, field=VALUE)`
+3. Provider (subscribing to TARGET_VALUE) receives the request and starts actuating, providing VALUE when it changes.
+4. User is notified when VALUE turns to true, and concludes that the door has now been locked.
+
+But what happens if the door fails to lock, e.g. the door is not close or the actuator is broken?
+* What should the user subscribe to for this information?
+* And how long should it wait before concluding that it failed?
+* And what happens if there is no provider of this actuator?
+
+Another question, a bit convoluted for a quick actuator like this (but applicable for slower moving things), is happens if another user calls set(..., false) before the actuator change has taken place?
+
+This can be solved by subscribing to both VALUE and TARGET_VALUE.
+
+1. User calls `set(Vehicle.Door.Locked, field=TARGET_VALUE, true)`
+2. User calls `subscribe(Vehicle.Door.Locked, fields=[VALUE, TARGET_VALUE])`
+3. Provider (subscribing to TARGET_VALUE) receives the request and starts actuating, providing VALUE when it changes.
+4. User is notified when VALUE turns true, and concludes that the door has now been locked, or the user is notified when TARGET_VALUE turns false, and knows that the operation was cancelled.
+
+
+**The user API + provider API way.**
+
+So what could this look like if we instead had an "easy to use" user API + a provider API and the server in between.
+
+Something like this:
+1. User calls `set(Vehicle.Door.Locked, true)`
+2. Server receives the request and sends an ACTUATOR_TARGET value to the provider of this signal.
+3. The provider receives it and starts actuating and provides VALUE back to the server when it changes.
+4. The provider sends "actuator target reached" back to the server.
+5. The server sends a (success) response back to the client.
+
+Or in case of failure:
+
+User calls `set(Vehicle.Door.Locked, true)`
+1. Server receives the request and sends an ACTUATOR_TARGET value to the provider of this signal.
+2. The provider receives it and starts actuating but notice that it fails, or that it's not allowed at the moment.
+3. The provider sends "actuator failed" or something back.
+4. The server sends a response "actuator failed" back to the client.
+
+Or if there are no providers.
+1. User calls `set(Vehicle.Door.Locked, true)`.
+2. The server knows that there are no providers, providing that signal.
+3. The server sends a response "actuator failed" or "not available" back to the client
+
+This latter approach would seem to represent an easier to use API for the user/library.
+
+**Note**
+Doing it like this puts the requirement to know the details of the actuator on the actuator provider.
+
+The actuator provider is better suited to know of reasonable timeouts etc in comparison to the users of signals (or the server). The user doesn't need to know how long to wait for something or to which error to subscribe. The server would only have to handle the presence detection which is a generic feature that doesn't require knowledge of sensor specifics.
+
+
+
+# Performance / runtime footprint
+ Status: 🟢
+ Current decisions: A detailed performance report will be provided after release of kuksa.val.v2. So far kuksa.val.v2 shows better performance than kuksa.val.v1 and sdv.databroker.v1
+ Description:
+Providers, especially of sensor data, are often setting values in rapid succession over long periods of time. Using unary GRPC calls for `Set` operations, is less efficient in terms of throughput when compared to GRPC streams. It's also more CPU intensive.
+
+The current design of `kuksa.val.v1` only provides unary call to set values. This represents a pure regression when compared to the databroker API.
+
+It's not a huge issue (in practice) if users avoid using `kuksa_client.KuksaClientThread()`. If they use that, I would say it's barely usable for e.g. CAN in it's current form.
+
+**Python performance setting signal values**
+
+| Set only | type | throughput |
+|-----------------------------|-------|---------------:|
+| kuksa_client (1) | | ~ 80 / s |
+| kuksa.grpc (2) | async | ~ 2 500 / s |
+| kuksa.val.v1 (3) | async | ~ 6 500 / s |
+| kuksa.val.v1 (3) | sync | ~ 9 000 / s |
+| databroker (4) | sync | ~ 26 000 / s |
+
+1 kuksa_client is using `kuksa_client.KuksaClientThread()`
+
+2 kuksa.grpc is using `kuksa_client.grpc` without the legacy `kuksa_client.KuksaClientThread()` wrapping it
+
+3 uses the generated `val_pb2*.py` python lib directly
+
+4 uses the generated `collector_pb2*.py` python lib directly
+
+
+Improvements:
+* Higher throughput.
+* Reduced CPU load.
+* Lower latency (probably, hasn't been measured)
+
+What's needed:
+* Introduce a streaming (GRPC) interface for providing sensor data.
+
+# Throttling-mode when system is overloaded.
+ Status: 🔴
+ Current decisions:
+ Description:
+Is it worth to consider some throttling mode to be activated by the user in case system or any component is overloaded?
+Throttling modes to think about:
+ * Rate Limiting
+ * Bandwidth Throttling
+ * CPU Throttling
+
+# Considerations regarding shared and zero-copy memory approaches
+ Status: 🔴
+ Current decisions:
+ Description:
+Pros:
+Cons:
+
+# Provider control and provider capabilities
+ Status: 🔴
+ Current decisions:
+ Description:
+Open questions:
+
+Should the "change type" of a sensor (i.e. CONTINUOUS vs ON_CHANGE) be decided by providers
+or in the VSS metadata?
+It only makes sense for consumers to request their preferred rate of updates when they are
+subscribing to a sensor of type CONTINUOUS. That would be an argument for providing this
+information as part of the VSS metadata, so that it doesn't vary between vehicles.
+
+# Control the rate of updates
+ Status: 🔴
+ Current decisions:
+ Description:
+Users of (continuous) sensor data can have different preferences with regard to how often
+they would like to receive updates. E.g. Vehicle.Speed is updated 100 times per second, but
+a consumer would only need it 1 time per second. This would introduce unnecessary processing
+requirements on the consumer (and provider).
+
+Currently there is no way for databroker to know how often a provider should provide updates.
+There is also no way for clients to instruct databroker of what rate they want.
+Sensor data is just sent at the rate it is received and providers are just sending sensor data at the rate
+they themselves decide.
+
+If a consumer can communicate this wish, there are several options for improvement.
+
+Improvements:
+* Reduction in load for consumers by adapting the update rate based on their preferences.
+* Reduction in load for for providers by propagating needed update rate.
+* Reduction in load for databroker by disabling unneeded providers.
+
+What's needed:
+* Introduce a way for clients to tell databroker of their preferred rate of updates.
+* Introduce a way for databroker to tell providers of highest needed frequency of sensor
+ data to which they can then adapt.
+ [probably needs] open stream `databroker` -> `provider`
+
+Other considerations:
+
+Setting the desired rate of update would only make sense for sensors of type CONTINUOUS.
+Sensors of type ON_CHANGE would always provide updates when the value changes.
+It could also make sense to introduce a way to request a momentary value from a provider,
+which would be used if a consumer only requests a momentary value (and doesn't subscribe).
+
+
+# Differentiate between different providers of the same VSS data
+ Status: 🔴
+ Current decisions:
+ Description:
+Different sensors can provide data that is mapped to the same VSS signal / entry.
+This data can be of different resolution and / or quality. For example, an accelerometer
+can be used to infer the current speed of a vehicle, but a speedometer would probably
+provide a higher quality measurement. In the same way that a consumer could instruct
+databroker of a preferred update rate, it could also instruct the databroker of what
+accuracy of a sensor it needs.
+
+It's currently possible for multiple providers to set the same VSS entry, but there is no
+way for databroker to differentiate between them in any way.
+
+It could make sense to introduce a way for providers to describe themselves in order to
+make it possible to differentiate between them with regard to update rates, power consumption, accuracy or quality of their sensor data.
+
+This would give databroker the clients a way to to differentiate (and choose) different
+sources of data and make informed decisions based on that.
+
+Improvements:
+* Choose between providers based on available update frequency.
+* Fallback when sensor information from one sensor isn't available.
+
+What's needed:
+* Introduce a way for providers to describe their capabilities and properties of their
+ provided sensor data.
+
+Optional improvements:
+* Choose between providers based on needed quality / accuracy of sensor.
+ [needs] control plane, i.e. an open stream `databroker` -> `provider`
+* Consumers can get extended sensor data information.
+
+Optionally needed:
+* Introduce a way for consumers to tell databroker of their needed quality / accuracy
+ of VSS signal.
+
+
+# Data Aliveness/Availability
+ Status: 🔴
+ Current decisions:
+ Description:
+The VSS signals / datapoints that are accessed through databroker can have a value and a
+timestamp. If they have never been set, they will have neither.
+
+There is no way for databroker to know if a value is up to date or not, since it doesn't
+have any information with regard to how often it should be updated or a way to determine
+if a provider has stopped providing data.
+
+For signals with a fixed update rate (sensors of type CONTINUOUS), it would theoretically
+be possible for either clients or the databroker to determine if a signal is up to date,
+by keeping track of the time since the last update.
+
+The providers of sensor data would be better suited to know the rate of update, and if
+the databroker where provided this information, it could automatically determine if a
+sensor hasn't been update within it's expected time window.
+
+For signals that only update based on events (i.e. a door opens), this isn't possible.
+Tracking the liveness of these signals would either require the providers to continuously
+send the same value even though it hasn't changed, or to have an open connection or
+another heartbeat mechanism between databroker and the provider to detect a missing provider.
+
+If there was a way to determine the availability of providers, the databroker could
+automatically determine that a signal was stale if it's provider is no longer available.
+
+Improvements:
+* Track availability / liveness of a VSS signals.
+
+What's needed:
+* Introduce a way to signal that a signal is available / not available (in `kuksa.val.v1`).
+* Introduce a way for providers to tell databroker of a signals time to live (TTL).
+* Introduce a way for databroker to track availability of providers (and which VSS signals
+ they are providing).
+ [needs] an open stream `provider` -> `databroker` or `databroker` -> `provider`
+* Implement tracking of TTL in databroker to automatically set unavailable status to signals
+ that have not been updated in time.
+
+
+Other considerations:
+Attributes probably don't need to have aliveness functionality. They would be
+unavailable if they have never been set, but since they shouldn't update at runtime,
+once set they should be valid indefinitely.
+
+
+# Missing features from `sdv.databroker.v1` in `kuksa.val.v1`
+ Status: 🟢
+ Current decisions: New API kuksa.val.v2 will cover and combine feature from both APIs.
+ Description:
+Sort list: What features would be lost if removing sdv.databroker.v1 today
+ * Registration of new datapoints
+ * SQL queries
+ * Streaming updates (i.e. worse performance)
+ * Connectivity check (no streaming interface)
+
+
+# Exploring a design of a bidirectional streaming API
+ Status: 🟢
+ Current decisions: New bidirectional streaming service method was added to `kuksa.val.v2`
+ SensorCommand (start/stop) should be implemented at some point.🟡
+ Description:
+This represent one way to design an interface that would enable most of the improvements
+listed above and provide a clear path forward for introducing them.
+
+
+## Design choices
+
+In this design, a single bidirection stream is used to provide everything needed by
+providers:
+`rpc Provide(stream ProviderRequest) returns (stream ProviderResponse);`
+
+This combines the control channel and the data channel into one. An alternative
+would be to split it into two bidirectional streams, one for control and the other
+for data. I'm not sure which makes the most sense.
+
+#### Stream
+By having an open stream (at all) between databroker and the provider, both ends
+can detect if the other goes away.
+
+Furthermore:
+
+A stream from provider -> databroker:
+- Enables higher throughput for sensor data updates and lower CPU usage.
+
+A stream from databroker -> provider:
+- Provides a way for databroker to send control commands to this provider.
+- Provides a way for databroker to send errors to this provider.
+
+#### Bidirectional stream
+
+A bidirectional stream between Provider <-> databroker
+- Provides databroker with a way to associate information sent from the provider
+ (e.g. capabilities, which actuators it provides, which sensors etc) with the stream
+ it uses to control it.
+
+#### Actuators
+VSS defines three types of signals:
+* Attribute
+* Sensor
+* Actuator
+
+An actuator acts as both something you can actuate and something providing values (a sensor).
+It's not even necessarily the same component providing the implementation of these separate concerns. With this in mind, a provider providing an VSS actuator would in this design provide
+an `Actuator(path)` to the server in order to receive `ActuateCommand`s, and provide
+`Sensor(path)` and send `SensorData` when they are providing sensor data.
+
+The alternative would be to duplicate everything in `Sensor` for `Actuator`s.
+
+
+## Overview
+The stream in each direction would consist of different "messages" implemented with `oneof {...}`.
+
+In the direction of *Provider -> Server*, at least three types of "messages" would flow(2) :
+* `SensorData` containing data streamed by the provider.
+* `Sensor` containing sensor information from the provider.
+* `Actuator` containing actuator information from the provider.
+* `Attribute` containing attribute including the value. (1)
+
+In the direction of *Server -> Provider*, at least three types of "messages" would flow:
+* `ActuateCommand`, tells actuator to actuate (triggered by someone setting actuator targets).
+* `SensorCommand`, controls behaviour of a sensor, i.e.
+ "start", "stop", "try to use this update frequency" etc..
+* `Error`, an error occurred. One type at the moment. It would probably make sense to
+ split it into errors that can occur at any time & errors that are directly caused by
+ things the provider provided.
+
+(1) It would probably make sense to introduce a separate `Error` in the direction of
+ *Provider -> Server*. Currently, the only errors in that direction is `ReadError` as part of the
+ sensor data.
+
+(2) It's possible that it makes mores sense to provide a separate unary RPC call for setting attributes,
+ since attributes (probably) don't update frequently and (probably) wont need availability status etc..
+
+```proto
+service VAL {
+ ...
+ rpc Provide(stream ProviderRequest) returns (stream ProviderResponse);
+}
+
+message ProviderRequest {
+ oneof provide {
+ Sensor sensor = 1;
+ Actuator actuator = 2;
+ Attribute attribute = 3;
+ SensorData sensor_data = 4;
+ }
+}
+
+message ProviderResponse {
+ oneof command {
+ ActuateCommand actuator_command = 1;
+ SensorCommand sensor_command = 2;
+ Error error = 3;
+ }
+}
+
+...
+
+```
+
+
+# message Datapoint
+ Status: 🟢
+ Current decisions: Alternative 1 was selected for development since `value` it is easy to access and in Protobuf could be `None`, meaning signal exists but does not have a value at the moment.
+ Description:
+Suggestion -> https://github.com/boschglobal/kuksa-databroker/pull/4#discussion_r1766459917
+
+Discussion -> https://github.com/eclipse-kuksa/kuksa-databroker/pull/33#discussion_r1776993874
+
+### Alternative 1
+```proto
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1;
+ Value value = 1;
+}
+```
+pros : easy to access value
+
+cons : no possible value status/state
+
+### Alternative 2
+```proto
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1;
+
+ oneof value_state {
+ State state = 2;
+ Value value = 3;
+ }
+}
+
+enum State {
+ // Unspecified value failure, reserved for gRPC backwards compatibility
+ // (see https://protobuf.dev/programming-guides/dos-donts/#unspecified-enum)
+ UNSPECIFIED = 0;
+ // The signal is known and provided, but doesn't have a valid value
+ INVALID_VALUE = 1;
+ // The signal is known, but no value is provided currently
+ NOT_PROVIDED = 2;
+}
+```
+
+pros : easy to understand
+
+cons : "more difficult to implement" depend on programming language
+
+### Alternative 3
+```proto
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1;
+
+ Status status = 2;
+ Value value = 3;
+}
+
+enum Status {
+ STATUS_UNSPECIFIED = 0;
+ STATUS_OK = 1;
+ STATUS_VALUE_NOT_AVAILBLE = 2;
+}
+```
+
+pros : easy to understand and access
+
+cons : Difficult to keep consistency between `status` and `value` values.
+
+# Split subscribe method due to performance reasons
+ Status: 🟢
+ Current decisions: Implemented
+ Description:
+ Before:
+ rpc Subscribe(SubscribeRequest) returns (stream SubscribeResponse); -> Signal_ID (path, id)
+
+
+ Now:
+ rpc Subscribe(SubscribeRequest) returns (stream SubscribeResponse); -> strings path
+
+ rpc SubscribeId(SubscribeRequestId) returns (stream SubscribeResponseId); -> int32 path -> faster
+
+
+message SubscribeByIdResponse {
+ map entries = 1;
+ //map entries = 1; // add to discussion PR
+}
+
+# Service VAL better naming
+ Status: 🔴
+ Current decisions:
+ Description:
+
+# Extend and split service definition for kuksa.val.v3 (current latest version kuksa.val.v2)?
+ Status: 🔴
+ Current decisions:
+ Description:
+* split up signal service (consumer, provider)
+
+* split services (signals -> VAL, metadata -> server_info, health_check -> metrics, num providers, vss_validation, ...)
+
+
+# COVESA topics
+ Status: 🔴
+ Current decisions:
+ Description:
diff --git a/doc/kuksa_analysis/kuksa_requirements.md b/doc/kuksa_analysis/kuksa_requirements.md
new file mode 100644
index 00000000..ceef743f
--- /dev/null
+++ b/doc/kuksa_analysis/kuksa_requirements.md
@@ -0,0 +1,241 @@
+# Rerequirements
+
+# Content
+- [Rerequirements](#rerequirements)
+- [Content](#content)
+- [Requirements status](#requirements-status)
+- [Functional requirements](#functional-requirements)
+ - [As Signal Consumer](#as-signal-consumer)
+ - [FR1-ASC](#fr1-asc)
+ - [FR2-ASC](#fr2-asc)
+ - [FR3-ASC](#fr3-asc)
+ - [FR4-ASC](#fr4-asc)
+ - [FR5-ASC](#fr5-asc)
+ - [FR6-ASC](#fr6-asc)
+ - [FR7-ASC](#fr7-asc)
+ - [FR8-ASC](#fr8-asc)
+ - [As Databroker](#as-databroker)
+ - [FR1-AD](#fr1-ad)
+ - [FR2-AD](#fr2-ad)
+ - [FR3-AD](#fr3-ad)
+ - [FR4-AD](#fr4-ad)
+ - [FR5-AD](#fr5-ad)
+ - [As Provider](#as-provider)
+ - [FR1-AP](#fr1-ap)
+ - [FR2-AP](#fr2-ap)
+ - [FR3-AP](#fr3-ap)
+ - [FR4-AP](#fr4-ap)
+ - [FR5-AP](#fr5-ap)
+- [Non-Functional Requirements](#non-functional-requirements)
+- [Domain requirements](#domain-requirements)
+
+# Requirements status
+| Description | Status |
+|-------------------------------------------|------------------------------|
+| Implemented and Verified | 🟢 |
+| Approved, Not Yet Implemented | 🟡 |
+| Long Term Goal | 🔴 |
+
+# Functional requirements
+
+## As Signal Consumer
+### FR1-ASC
+ Title: Single access service Point for Signal Consumer
+
+ Status: 🟢
+
+ Description: The Signal Consumer shall have a single service point accessible.
+
+### FR2-ASC
+ Title: Uniform retrieval of vehicle and environmental Signal Values
+
+ Status: 🟢
+
+ Description: The Signal Consumer shall get sensor, actuator and attributes values of the vehicle signals and its environment(air temperature, etc) in a uniform manner.
+ 1. IF there is an error
+ * THEN the Signal Consumer shall not receive any signal value, just one single error with detailed error information
+ * ELSE the Signal Consumer shall receive exactly all the signals’ values requested.
+
+ n. A signal can have value out of the set of the defined value restriction/data type and its 'value' can be explicitly 'None', meaning the Signal exists but no value is present.
+
+ 2. The Signal Consumer only shall get values for signal to which it has permission to.
+ 3. The Signal Consumer shall provide the paths or ids(int32) of the requested signals.
+
+### FR3-ASC
+ Title: Subscription and high frequency notification for Signal Value changes
+
+ Status: 🟢
+
+ Description: The Signal Consumer shall be able to subscribe to sensor or actuator values of the vehicle signals and get immediate notifications when any of the signal values change.
+ * IF there is an error
+ THEN the Signal Consumer shall not receive any signal value, just one single error with detailed error information
+ ELSE the Signal Consumer shall receive exactly all the signals’ values requested that have change.
+ * The Signal Consumer only shall get values for signal to which it has permission to.
+ * The Signal Consumer shall provide the paths or the ids(int32) of the requested signals.
+
+### FR4-ASC
+ Title: Filtered interval subscription for Signal Values
+
+ Status: 🔴
+
+ Description: The Signal Consumer shall subscribe and be able to apply a filter to receive a signal values with an interval of x milliseconds.
+ * IF there is an error
+ THEN the Signal Consumer shall not receive any signal value, just one single error with detailed error information
+ ELSE the Signal Consumer shall receive exactly all the signals’ values requested.
+ * The Signal Consumer only shall get values for signal to which it has permission to.
+ * The Signal Consumer shall provide the paths or the ids(int32) of the requested signals.
+
+### FR5-ASC
+ Title: Accessing static metadata of signals
+
+ Status: 🟢
+
+ Description: A Signal Consumer shall be able to get static metadata from signals.
+ * IF there is an error
+ THEN the Signal Consumer shall not receive any signal metadata, just one single error with detailed error information
+ ELSE the Signal Consumer shall receive exactly all the signals’ metadata requested.
+
+ * All sensors, actuators, and attributes values for which a Signal Consumer has permission to.
+ * The Signal Consumer shall provide the path, paths or wildcard of the signals.
+
+### FR6-ASC
+ Title: Actuation of actuator value with Databroker forwarded acknowledgment.
+
+ Status: 🟢
+
+ Description: The Signal Consumer shall be able to actuate the value of an actuator. This value should be forwarded to the actuator's provider if the provider is available, provider to Vehicle Network and get an acknowledgment response back.
+ * Databroker should not store the provided value but only forward it to the provider.
+
+ * IF no provider is connected
+ THEN Signal Consumer shall receive an error that no provider is available.
+ ELSE IF databroker successfully forwarded the value to the provider
+ THEN Signal Consumer shall receive an acknowledgement of receipt.
+ * IF provided signal path is not an actuator Signal Consumer should receive an error.
+
+### FR7-ASC
+ Title: Actuate multiple actuators simultaneously with Databroker forwarded acknowledgment.
+
+ Status: 🟢
+
+ Description: The Signal Consumer shall be able to actuate the values of multiple actuators simultaneously. These values should be forwarded to the corresponding actuators' providers only if all providers are available.
+ * Databroker should not store the provided value but only forward them to the providers.
+
+ * IF any provider is not connected
+ THEN Signal Consumer shall receive an error that no provider is available.
+ ELSE IF databroker successfully forwarded the values to all providers
+ THEN Signal Consumer shall receive an acknowledgement of receipt.
+ * IF provided signal path is not an actuator Signal Consumer should receive an error.
+
+### FR8-ASC
+ Title: Provider availability detection for Signal Consumer
+
+ Status: 🔴
+
+ Description: The Signal Consumer shall be able to know if there is a provider up and running.
+
+## As Databroker
+### FR1-AD
+ Title: Handling of COVESA Vehicle Signal Specification (VSS) syntax by Databroker
+
+ Status: 🟢
+
+ Description: The Databroker shall handle catalogs of signals described by the syntax as defined by the COVESA Vehicle Signal Specification (VSS). This relates to all aspects of the VSS syntax definition, which is also called VSS rule set. This implies that the Databroker can handle the signal catalog as defined by the COVESA VSS.
+
+### FR2-AD
+ Title: Support for VSS metadata elements by Databroker
+
+ Status: 🟢
+
+ Description: The Databroker shall support at least those metadata elements as defined by the VSS rule set.
+
+### FR3-AD
+ Title: Consumer subscription management by Databroker
+
+ Status: 🟢
+
+ Description: The Databroker shall keep a local record of all subscriptions of signal consumers.
+ * The Databroker shall add or remove subscriptions to a subscription pool according to the subscription requests of the Signal Consumer.
+
+### FR4-AD
+ Title: Actuator ownership claim management by Databroker
+
+ Status: 🟢
+
+ Description: The Databroker shall maintain a local record of all actuator ownership claims made by signal providers.
+ * The Databroker shall manage an "ownership claim actuation pool," adding or removing claims based on the requests from signal providers.
+ * Each actuator can be claimed by only one provider at any given time.
+
+
+### FR5-AD
+ Title: Command transmission capabilities of Databroker to Provider
+
+ Status: 🟢/🟡
+
+ Description: The Databroker shall be able to send to the Provider the following commands
+ * Actuate on actuator. 🟢
+ * Start receiving signal values from the Provider. 🟡
+ * Stop receiving signal values from the Provider. 🟡
+
+## As Provider
+### FR1-AP
+ Title: Provider actuator ownership claim management by Databroker
+
+ Status: 🟢
+
+ Description: The Databroker shall offer a method to Providers allowing to claim providership of a set of actuators.
+ * IF all claimed actuators are known AND
+ the provider has providing rights for all claimed actuators AND
+ all claimed actuators are NOT yet claimed by another provider
+ THEN the Databroker shall accept and store the claim
+ ELSE the Databroker shall reject the overall claim and return an error containing the reason.
+
+ * The Databroker shall remember accepted claims of a provider if the connection to the provider is lost.
+ * The Databroker shall allow providers to re-claim previously successfully claimed actuators.
+
+### FR2-AP
+ Title: High-Frequency publishing of Signal Values by Provider
+
+ Status: 🟢
+
+ Description: The Databroker shall be capable of publishing signal values at the cycle time received from the Vehicle Network.
+ * IF all published signal are known AND
+ the provider has providing rights for all signals
+ THEN the Databroker shall accept and store the values
+ ELSE the Databroker shall reject the overall request and return an error response containing the reason.
+
+### FR3-AP
+ Title: Publishing of Signal Values by Provider
+
+ Status: 🟢
+
+ Description: The Databroker shall be able to publish signal values.
+ * IF all published signal are known AND
+ the provider has providing rights for all signals
+ THEN the Databroker shall accept and store the values
+ ELSE the Databroker shall reject the overall request and return an error response containing the reason.
+
+### FR4-AP
+ Title: Actuation notification handling
+
+ Status: 🟢
+
+ Description: The Databroker shall offer a method to providers to subscribe for actuation notifications on a (sub-) set of claimed actuators.
+ * Provider shall only receive actuation requests for actuators that it owns or manages.
+ * Provider shall process actuation requests and forward them to the Vehicle Network.
+ * Provider shall notify the Databroker back right after the actuation request was forwarded.
+
+### FR5-AP
+ Title: Signal state update mechanism for providers by Databroker
+
+ Status: 🔴
+
+ Description: The Databroker shall offer a method to providers to update the current state of a set of signals.
+ a. The current state consists a timestamp and either of valid value or a failure state.
+ b. The Databroker shall offer a method optimized for frequent updates.
+ c. The Databroker should offer a second method for non-frequent updates that is easy to use in a provider's implementation.
+ d. The Databroker shall reject updating the current state of signals where the client is not the provider of.
+ e. The Databroker shall store the updated value or failure state of a signal.
+
+# Non-Functional Requirements
+
+# Domain requirements
diff --git a/doc/kuksa_analysis/kuksa_use_cases.md b/doc/kuksa_analysis/kuksa_use_cases.md
new file mode 100644
index 00000000..5bb15296
--- /dev/null
+++ b/doc/kuksa_analysis/kuksa_use_cases.md
@@ -0,0 +1,332 @@
+# KUKSA Use Cases
+
+
+# Content
+- [KUKSA Use Cases](#kuksa-use-cases)
+- [Content](#content)
+- [Use Cases status](#use-cases-status)
+- [Use cases:](#use-cases)
+ - [UC1](#uc1)
+ - [UC2](#uc2)
+ - [UC3](#uc3)
+ - [UC4](#uc4)
+ - [UC5](#uc5)
+ - [UC6](#uc6)
+ - [UC7](#uc7)
+ - [UC8](#uc8)
+
+# Use Cases status
+| Description | Status |
+|-------------------------------------------|------------------------------|
+| Implemented and Verified | 🟢 |
+| Approved, Not Yet Implemented | 🟡 |
+| Long Term Goal | 🔴 |
+
+# Use cases:
+
+## UC1
+ Title: Get sensor values.
+
+ Status: 🟢
+
+ Description: Signal consumer gets sensor and actuator values of signals.
+
+**Primary Actor:** Signal consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+ * Signal Consumer and Provider contain a valid authentication token to connect and perform calls to Databroker.
+ * Provider can read from Vehicle Network.
+ * Provider can publish signal values to Databroker.
+
+**Special requirements:**
+
+**Assumptions:**
+* Singal Consumer should get exactly all signal values requested.
+
+**Postconditions:**
+* Signal Consumer receives a list with all signals requested in same order as requested.
+
+**Sequence diagram:**
+![Signal Consumer](../diagrams/consumer_get_values.svg)
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and starts publishing sensor and actuator values at the cycle time received from Vehicle network.
+4. Signal Consumer calls GetValues with signal paths/ids.
+5. Databroker sends the response with signal values back to the Signal Consumer.
+9. Use case finishes.
+
+**Exceptions:**
+
+## UC2
+ Title: Signal consumber subscribes by id(int32) to all signal values.
+
+ Status: 🟢
+
+ Description: Signal Consumer subscribes to all sensor and actuator values by ids and gets notifications when values changed.
+
+**Primary Actor:** Signal Consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority**: High
+
+**Preconditions:**
+ * Signal Consumer and Provider contain a valid authentication token to connect and perform calls to Databroker.
+ * Provider can read from Vehicle Network.
+ * Provider can constantly publish signal values to Databroker at a high frequency.
+
+**Special requirements:**
+ * The use case must meet a high frequency notification rate.
+
+**Assumptions:**
+ * Signal Consumer subscribes to all possible sensor and actuator values.
+ * Provider porvides signals values to Databroker as fast as receives data from the Vehicle Network.
+
+**Postconditions:**
+ * A subscription is created, and the consumer receives updates for those signals where the state changes.
+
+**Sequence diagram:**
+
+![Signal Consumer Subscribes](../diagrams/consumer_subscribes.svg)
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and starts publishing sensor and actuator values at the cycle time received from Vehicle network.
+2. Signal Consumer calls list metadata to get all signals ids:
+3. Signal Consumer subscribes to all the sensor and actuator values by their ids.
+4. Databroker sends the current values stream back to the Signal Consumer.
+5. Databroker receives from Provider the new signal values and update its database.
+6. Databroker sends the changed values stream back to the Signal Consumer.
+7. Signal Consumer closes subscription to Databroker.
+8. Use case finishes.
+
+**Exceptions:**
+
+
+## UC3
+ Title: The consumer wants to set the value of an actuator signal.
+
+ Status: 🟢
+
+ Description: Signal Consumer actuates on an actuator.
+
+**Primary Actor:** Signal Consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+ * Signal Consumer and Provider contain a valid authentication token to connect and perform calls to Databroker.
+ * Provider can write to Vehicle Network.
+ * No other Provider has claimed the ownership of the actuator to be actuated.
+
+**Special requirements:**
+
+**Assumptions:**
+ * It does not necessarily mean that the actuator successfully updated its value to the desired new value. The entire chain is only responsible for forwarding the actuation request from Signal Consumer to the Vehicle Network.
+
+**Postconditions:**
+ * Provider forward an ack of receipt back to the Databroker immediately after the actuation request is forwarded.
+ * Signal Consumer receives a response which indicates the operation was successfully forwarded.
+
+**Sequence diagram:**
+![Signal Consumer Actuate](../diagrams/consumer_actuate.svg)
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and sends a request to claim ownership of specific actuators.
+2. Databroker stores the claim request.
+3. Signal Consumer calls actuate with new actuator value.
+4. Databroker forwards the request to the corresponding provider.
+5. Provider receives the request and sends ack response back to Databroker.
+6. Databroker sends ack response back to the Signal Consumer.
+7. Provider sends the actuation request to the Vehicle Network.
+8. Use case finishes.
+
+**Exceptions:**
+
+
+## UC4
+ Title: Signal Consumer actuates on multiple actuator.
+
+ Status: 🟢
+
+ Description: Signal Consumer actuates on multiple actuator.
+
+**Primary Actor:** Signal Consumer
+
+**Secondary Actors:** Databroker, Provider, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+ * Signal Consumer and Providers contain a valid authentication token to connect and perform calls to Databroker.
+ * Providers can write to Vehicle Network.
+ * No other Provider has claimed the ownership of the actuator to be actuated.
+
+**Special requirements:**
+
+**Assumptions:**
+
+**Postconditions:**
+ * Providers forward an ack of receipt back to the Databroker immediately after the actuation request is forwarded.
+ * Signal Consumer receives a response which indicates the operations were successfully forwarded.
+
+**Sequence diagram:**
+![Signal Consumer Actuate](../diagrams/consumer_actuate_multiple_providers.svg)
+
+**Basic Flow:**
+1. Door Provider opens bidirectional stream and sends a ownership claim request of the Door actuator.
+2. Window Provider opens bidirectional stream and sends a ownership claim request of the Window actuator.
+3. Databroker stores the claims requests.
+4. Signal Consumer calls actuate with new Door and Window values.
+5. Databroker forwards the actuation request to the corresponding provider.
+6. Door Provider receives the Door actuation request and sends ack response back to Databroker.
+7. Window Provider receives the Window actuation request and sends ack response back to Databroker.
+8. Databroker sends ack response back to the Signal Consumer.
+9. Door Provider sends the Door actuation request to the Vehicle Network.
+10. Window Provider sends the Window actuation request to the Vehicle Network.
+11. Use case finishes.
+
+**Exceptions:**
+
+## UC5
+ Title: Signal Consumer and Provider get metadata of signals.
+
+ Status: 🟢
+
+ Description: Signal Consumer and Provider receives a list with metadata of VSS signals present in Databroker.
+
+**Primary Actor:** Signal Consumer, Provider
+
+**Secondary Actors:** Databroker
+
+**Priority:** High
+
+**Preconditions:**
+
+**Special requirements:**
+
+**Assumptions:**
+
+**Postconditions:**
+
+**Sequence diagram:**
+![Signal Consumer Actuate](../diagrams/consumer_provider_list_metadata.svg)
+
+**Basic Flow:**
+1. Signal Consumer calls list metadata to get all signals metadata.
+2. Provider calls list metadata to get all signals metadata.
+3. Use case finishes.
+
+**Exceptions:**
+
+## UC6
+ Title: Signal Consumer and Provider get server info
+
+ Status: 🟢
+
+ Description: Signal Consumer and Provider get server info
+
+**Primary Actor:** Signal Consumer, Provider
+
+**Secondary Actors:** Databroker
+
+**Priority:** High
+
+**Preconditions:**
+
+**Special requirements:**
+
+**Assumptions:**
+
+**Postconditions:**
+
+**Sequence diagram:**
+![Signal Consumer Actuate](../diagrams/consumer_provider_server_info.svg)
+
+**Basic Flow:**
+1. Signal Consumer calls get server info.
+2. Provider calls get server info.
+3. Use case finishes.
+
+**Exceptions:**
+
+
+## UC7
+ Title: Provider publishes signal values at high frequency.
+
+ Status: 🟢
+
+ Description: Provider publishes signals values to Databroker at a high frequency according to the cycle time from the Vehicle Network.
+
+**Primary Actor:** Provider
+
+**Secondary Actors:** Databroker, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+* Provider can read from Vehicle Network.
+
+**Special requirements:**
+* Provider publishes signal values to Databroker atomically.
+
+**Assumptions:**
+* Provider has a list of valid signals with their ids(int32) that are present on Databroker.
+
+**Postconditions:**
+* Databroker stores on database all the sensor values.
+
+**Sequence diagram:**
+![Provider publish signals](../diagrams/provider_publish.svg)
+
+**Basic Flow:**
+1. Provider start publishing at high frequency sensor and actuator values (by their ids(int32)) received from Vehicle network.
+2. Databroker send publish response back to provider.
+3. Use case finishes.
+
+**Alternative Flows:**
+
+**Exceptions:**
+
+## UC8
+ Title: Forward actuation request to Vehicle Netwrok
+
+ Status: 🟢
+
+ Description: Provider receives an actuator request to change an actuator value on the Vehicle Network.
+
+**Primary Actor:** Provider
+
+**Secondary Actors:** Databroker, Vehicle Network
+
+**Priority:** High
+
+**Preconditions:**
+
+**Special requirements:**
+
+**Assumptions:**
+* Provider can stablish a connection with the Vehicle Network.
+* There is an instance of Databroker up and running.
+* Signal Consumer calls actuate with new actuator value.
+
+**Postconditions:**
+
+**Sequence diagram:**
+![Provider received actuation](../diagrams/provider_recv_actuation.svg)
+
+
+**Basic Flow:**
+1. Provider opens bidirectional stream and sends a claim actuators request.
+2. Databroker stores the claim request.
+5. Databroker forwards the actuation request to the corresponding provider.
+6. Provider sends the actuation request to the Vehicle Network.
+7. Provider sends ack response back to Databroker.
+9. Use case finishes.
+
+**Exceptions:**
diff --git a/doc/protocol.md b/doc/protocol.md
new file mode 100644
index 00000000..eafe4b1f
--- /dev/null
+++ b/doc/protocol.md
@@ -0,0 +1,77 @@
+# Supported protocols in Kuksa Databroker
+
+This file contains an overview of the protocols supported by Kuksa Databroker.
+To be able to understand the differences between protocols it is required to understand the data handling in Kuksa Databroker. The Databroker use datapoints defined by the [COVESA VSS syntax](https://github.com/COVESA/vehicle_signal_specification), and handles them from three perspectives:
+
+Perspective | Meaning | Set Supported | Get Supported | Subscribe Supported
+-------------------|---------|---------------|---------------|--------------------
+Current Value | Current value of a property, for example current window position | Yes | Yes, latest value is stored but not persisted in Databroker | Yes
+Target Value | Wanted value of a property, for example wanted window position | Yes (for VSS actuator)| Yes, latest value is stored but not persisted in Databroker | Yes
+Actuation Value | Wanted value of a property, for example wanted window position | Yes (for VSS actuator)| No, value is not stored in Databroker | Yes
+
+*Target Value* and *Actuation Value* are quite similar. They both represent the wanted value of a property they are handled as separate "channels" in the Databroker.
+This means that if a someone provides a wanted value to Databroker using an Actuation method, only subscribers for Actuation will be notified.
+An API typically supports either *Target Value* or *Actuation Value*, not both!
+
+Use of the *Target Value* perspective is deprecated!
+
+## Overview
+
+This is an overview of the APIs supported described using the perspectives above
+
+| Protocol | Current Value - Set | Current Value - Get | Current Value - Subscribe | Target Value - Set | Target Value - Get | Target Value - Subscribe | Actuation Value - Set | Actuation Value - Get | Actuation Value - Subscribe
+| ------------------------ |-----|-----|-----|-----|-----|-----|-----|-----|-----|
+| gRPC (kuksa.val.v2) | Yes | Yes | Yes | No | No | No | Yes | No | Yes
+| gRPC (kuksa.val.v1) *Deprecated!* | Yes | Yes | Yes | Yes | Yes | Yes | No | No | No
+| gRPC (sdv.databroker.v1) *Deprecated!* | Yes | Yes | Yes | Yes | No | No | No | No | No
+| VISS v2 | No | Yes | Yes | Yes | No | No | No | No | No
+
+In general it is possible to mix protocols in a deployment, as long as the difference concerning Target/Actuation values are observed.
+That means, if you want to manage the wanted value of a Datapoint in the system, you must decide if you should use protocols that support the *Target Value* perspective or protocols that support the *Actuation Value* perspective for those Datapoints.
+
+## `kuksa.val.v2` gRPC Protocol
+
+The `kuksa.val.v2` is the newest protocol supported by Kuksa Databroker, and the only protocol which may be further developed.
+It was created as the `kuksa.val.v1` protocol was not optimal from performance perspective.
+For more information see the `kuksa.val.v2` [documentation](../proto/kuksa/val/v2/README.md).
+
+## `kuksa.val.v1` gRPC Protocol
+
+This is the predecessor to `kuksa.val.v2`.
+For more information see the `kuksa.val.v1` [documentation](../proto/kuksa/val/v1/README.md).
+
+## `sdv.databroker.v1` gRPC Protocol
+
+This is the predecessor to `kuksa.val.v1`.
+For more information see the `sdv.databroker.v1` [documentation](../proto/sdv/databroker/v1/README.md).
+
+To enable the legacy `sdv.databroker.v1` API you must start Databroker with the `--enable-databroker-v1` argument.
+
+### VISS v2
+
+KUKSA databroker aims to provide a standards compliant implementation of [VISS](https://github.com/COVESA/vehicle-information-service-specification) v2 (using the websocket transport).
+
+It supports authorization using the access token format specified in [authorization.md](authorization.md).
+
+VISSv2 support in databroker is included by building it with the `viss` feature flag.
+
+```shell
+$ cargo build --features viss
+```
+
+The `enable-viss` flag must be provided at startup in order to enable the VISSv2 websocket interface.
+
+```shell
+$ databroker --enable-viss
+```
+
+The arguments `--viss-address` and `--viss-port` can be used if you want to use a different address or port than default for VISS.
+If not specified, the address `127.0.0.1` will be used unless otherwise specified with `--address`, and the port 8090 will be used.
+
+Using kuksa-client, the VISSv2 interface of databroker is available using the `ws` protocol in the uri, i.e.:
+
+```shell
+$ kuksa-client ws://127.0.0.1:8090
+```
+
+TLS is currently not supported.
diff --git a/doc/protocol/README.md b/doc/protocol/README.md
deleted file mode 100644
index e2c7fe1b..00000000
--- a/doc/protocol/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-## Supported protocols
-
-This file contains an overview what the KUKSA Server and databroker each supports. It focuses on gRPC and VISS support and also what feeders are supported.
-
-| Protocol | KUKSA server | KUKSA databroker |
-| ------------------------ | :----------: | :--------------: |
-| VISS V1 | - | - |
-| VISS V2 | x/- | x/- |
-| gRPC (kuksa) | x | - |
-| gRPC (kuksa.val.v1) | - | x |
-| gRPC (sdv.databroker.v1) | - | x |
-
-x = supported; x/- = partially supported; - = not supported
-
-### VISSv2 support (websocket transport)
-
-| Feature | KUKSA server | KUKSA databroker |
-| ------------------------- | :-------------: | :--------------: |
-| Read | | |
-| - Authorized Read | x1,2 | x |
-| - Search Read | - | - |
-| - History Read | - | - |
-| - Static Metadata Read | - | x |
-| - Dynamic Metadata Read | - | - |
-| Update | | |
-| - Authorized Update | x1,2 | x |
-| Subscribe | | |
-| - Authorized Subscribe | x1,2 | x |
-| - Curve Logging Subscribe | - | - |
-| - Range Subscribe | - | - |
-| - Change Subscribe | - | - |
-| Unsubscribe | x | x |
-| Subscription | x | x |
-| Error messages | x | x |
-| Timestamps | x | x |
-
-x = supported
-
-x1 Authorization is done using a non-standard standalone call which is incompatible with standards compliant clients.
-
-x2 Relies on the non-standard `attribute` values which doesn't work with standards compliant clients.
-
-For a more detailed view of the supported JSON-schemas [click here](https://github.com/eclipse/kuksa.val/blob/master/kuksa-val-server/include/VSSRequestJsonSchema.hpp)
-
-### VISSv2 in KUKSA Databroker
-
-KUKSA databroker aims to provide a standards compliant implementation of VISSv2 (using the websocket transport).
-
-It supports authorization using the access token format specified in [authorization.md](../authorization.md).
-
-VISSv2 support in databroker is included by building it with the `viss` feature flag.
-
-```shell
-$ cargo build --features viss
-```
-
-The `enable-viss` flag must be provided at startup in order to enable the VISSv2 websocket interface.
-
-```shell
-$ databroker --enable-viss
-```
-
-Using kuksa-client, the VISSv2 interface of databroker is available using the `ws` protocol in the uri, i.e.:
-
-```shell
-$ kuksa-client ws://127.0.0.1:8090
-```
-
-TLS is currently not supported.
-
-### KUKSA databroker gRPC API
-
-The VISS Standard is not applicable for gRPC protocols. Here is an overview what the gRPC API in KUKSA databroker is capable of:
-
-- Read: Reading VSS datapoints
- - Reading current or target value for actuators
- - Reading some metadata information from VSS datapoints
-- Write: Writing VSS datapoints
- - Writing sensor values
- - Writing current or target value for actuators
- - Soon: Writing some metadata information from VSS datapoints
-- Subscription: Subscribing VSS datapoints
- - Subscribing sensor values
- - Subscribing current or target value for actuators
diff --git a/doc/quickstart.md b/doc/quickstart.md
index 66e403e4..40477391 100644
--- a/doc/quickstart.md
+++ b/doc/quickstart.md
@@ -1,4 +1,4 @@
-# KUKSA.val Quickstart
+# KUKSA quickstart using kuksa.val.v1 API
The quickest possible way to get KUKSA.val up and running
diff --git a/doc/system-architecture.md b/doc/system-architecture.md
index caca40cb..f05a2d52 100644
--- a/doc/system-architecture.md
+++ b/doc/system-architecture.md
@@ -1,4 +1,4 @@
-# KUKSA System Components and Deployment
+# Kuksa System Components and Deployment
This document shows basic KUKSA deployments and gives examples for provider components.
diff --git a/doc/terminology.md b/doc/terminology.md
index 6ff94080..b515231e 100644
--- a/doc/terminology.md
+++ b/doc/terminology.md
@@ -2,23 +2,25 @@
This pages gives an overview about the terms we use, when talking about KUKSA components or systems built with KUKSA.
-* [Terminology](#terminology)
-* [KUKSA.val components](#kuksaval-components)
- * [VSS Server](#vss-server)
- * [Client](#client)
- * [Clients: VSS Consumers](#clients-vss-consumers)
- * [Clients: VSS Providers](#clients-vss-providers)
- * [data-provider](#data-provider)
- * [actuation-provider](#actuation-provider)
-* [Vehicle Signal Specification (VSS)](#vehicle-signal-specification-vss)
- * [Signal](#signal)
- * [Sensor](#sensor)
- * [Actuator](#actuator)
- * [Attribute](#attribute)
- * [Value](#value)
- * [Metadata](#metadata)
- * [Overlay](#overlay)
- * [Datapoint](#datapoint)
+- [Terminology](#terminology)
+- [KUKSA.val components](#kuksaval-components)
+ - [VSS Server](#vss-server)
+ - [Client](#client)
+ - [Clients: VSS Consumers](#clients-vss-consumers)
+ - [Clients: VSS Providers](#clients-vss-providers)
+ - [data-provider](#data-provider)
+ - [actuation-provider](#actuation-provider)
+- [Vehicle Signal Specification (VSS)](#vehicle-signal-specification-vss)
+ - [Signal](#signal)
+ - [Sensor](#sensor)
+ - [Actuator](#actuator)
+ - [Attribute](#attribute)
+ - [Value](#value)
+ - [for kuksa.val.v1](#for-kuksavalv1)
+ - [for kuksa.val.v2:](#for-kuksavalv2)
+ - [Metadata](#metadata)
+ - [Overlay](#overlay)
+ - [Datapoint](#datapoint)
# KUKSA.val components
@@ -64,9 +66,9 @@ A data-provider intends to make sure that the actual state of a vehicle is curre
Historically you also may still find the term "feeder", when referring to a data-provider.
### actuation-provider
-An actuation-provider is trying to ensure that the target value of a VSS actuator is reflected by the actual state of a vehicle.
+An actuation-provider is trying to ensure that the value (called `target_value` for `kuksa.val.v1`) of a VSS actuator is reflected by the actual state of a vehicle.
-To this end, an actuation-provider can subscribe to the target value of a VSS actuator in the server.
+To this end, an actuation-provider can subscribe to the value of a VSS actuator in the server.
If a VSS consumer requests the _desired_ state of the VSS actuator `Vehicle.Body.Trunk.Rear.IsOpen` to be `true`, the actuation-provider for `Vehicle.Body.Trunk.Rear.IsOpen` would try to interact with a vehicle's system trying to unlock and open the trunk.
While from the server's point of view, an actuation provider is just a client, actuation-providers can not be passive towards other in-vehicle systems. Therefore, considering safety in an actuation-provider or underlying systems is very important.
@@ -94,10 +96,16 @@ Signals of this type represent sensors in the vehicle. The value of a sensor typ
Actuators are signals that are used to control the desired value of a property. _Every Actuator in VSS is also a Sensor_. Some properties in a vehicle cannot change instantly. A typical example is position of a seat. Reading a value of an actuator shall return the current actual value (i.e. the sensor trait of that actuator signal), e.g. the current position of the seat, rather than the wanted/desired position. Example: `set Vehicle.Cabin.Seat.Row1.Pos1.Position`. [[Source]](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/)
### Attribute
-Attributes are signals that have a default value, specified by its default member in VSS. Like sensors, attribute values can also change, similar to sensor values. The latter can be useful for attribute values that are likely to change during the lifetime of the vehicle. However, attribute values should typically not change more than once per ignition cycle. Example: `Vehicle.VehicleIdentification.VIN`. [[Source]](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/attributes/)
+Attributes are signals that have a default value, specified by its default member in VSS. Like sensors, attribute values can also change, similar to sensor values. The latter can be useful for attribute values that are likely to change during the lifetime of the vehicle. However, attribute values should typically not change more than once per ignition cycle. Example: `Vehicle.VehicleIdentification.VIN`. [[Source]](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/sensor_actuator/)
## Value
-The value of a signal. The data type of the value must match the data type specified in the VSS entry for the signal. Currently KUKSA.val supports the _current_value_ for sensors, actuators and attributes as well as _target_value_ for actuators
+The value of a signal. The data type of the value must match the data type specified in the VSS entry for the signal.
+
+#### for kuksa.val.v1
+kuksa.val.v1 supports the _current_value_ for sensors, actuators and attributes as well as _target_value_ for actuators
+
+#### for kuksa.val.v2:
+There is not _current value_ or _target value_ concepts, there are just simply _data value_ for sensors, actuators and attributes.
## Metadata
Metadata of a VSS signal is data belonging to a signal, that is not the value. Standard VSS metadata are [unit](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/data_units/) and [datatype](https://covesa.github.io/vehicle_signal_specification/rule_set/data_entry/data_types/) as well as some human readable description or comments. Custom metadata entries may be defined in [VSS overlays](https://covesa.github.io/vehicle_signal_specification/rule_set/overlay/). Currently KUKSA.val does not support custom metadata.
diff --git a/doc/tls.md b/doc/tls.md
index e176f89e..dca1411b 100644
--- a/doc/tls.md
+++ b/doc/tls.md
@@ -1,4 +1,4 @@
-# KUKSA TLS concept
+# Kuksa TLS concept
This page describes the TLS support in KUKSA
diff --git a/doc/user_guide.md b/doc/user_guide.md
index 34cd877d..cc8fd644 100644
--- a/doc/user_guide.md
+++ b/doc/user_guide.md
@@ -14,11 +14,12 @@ The following sections provide information for running and configuring Databroke
Running Databroker
Enabling Authorization
Enabling TLS
- Query Syntax
+ APIs supported by Databroker
+ Current and target value concept vs data value concept
Using Custom VSS Data Entries
- Configuration Reference
Signal Change Types
- API
+ Configuration Reference
+ Troubleshooting
Known Limitations
@@ -37,12 +38,18 @@ Usage: databroker [OPTIONS]
Options:
--address Bind address [env: KUKSA_DATABROKER_ADDR=] [default: 127.0.0.1]
--port Bind port [env: KUKSA_DATABROKER_PORT=] [default: 55555]
+ --enable-unix-socket Listen on unix socket, default /run/kuksa/databroker.sock [env: KUKSA_DATABROKER_ENABLE_UNIX_SOCKET=]
+ --unix-socket Listen on unix socket, e.g. /tmp/kuksa/databroker.sock [env: KUKSA_DATABROKER_UNIX_SOCKET=]
--vss Populate data broker with VSS metadata from (comma-separated) list of files [env: KUKSA_DATABROKER_METADATA_FILE=]
--jwt-public-key Public key used to verify JWT access tokens
--disable-authorization Disable authorization
--insecure Allow insecure connections
--tls-cert TLS certificate file (.pem)
--tls-private-key TLS private key file (.key)
+ --enable-databroker-v1 Enable sdv.databroker.v1 (GRPC) service
+ --enable-viss Enable VISSv2 (websocket) service
+ --viss-address Bind address for VISS server, if argument is not provided, the value of --address is used [env: KUKSA_DATABROKER_VISS_ADDR=]
+ --viss-port VISS port [env: KUKSA_DATABROKER_VISS_PORT=] [default: 8090]
-h, --help Print help
-V, --version Print version
```
@@ -141,24 +148,30 @@ docker run --rm -it --network kuksa -v ./certificates:/opt/kuksa ghcr.io/eclipse
(back to top )
-## Query Syntax
+## APIs supported by Databroker
-Clients can subscribe to updates of data entries of interest using an SQL-based [query syntax](./QUERY.md).
+Kuksa Databroker provides [gRPC](https://grpc.io/) based API endpoints which can be used by
+clients to interact with the server.
-You can try it out using the `subscribe` command in the client:
+Kuksa Databroker implements the following service interfaces:
-```shell
-subscribe
-SELECT
- Vehicle.ADAS.ABS.IsError
-WHERE
- Vehicle.ADAS.ABS.IsEngaged
-```
+- Enabled on Databroker by default [kuksa.val.v2.VAL](../proto/kuksa/val/v2/val.proto) (recommended to use but still not supported by databroker-cli)
+- Enabled on Databroker by default [kuksa.val.v1.VAL](../proto/kuksa/val/v1/val.proto)
+- Disabled on Databroker by default, use `--enable-databroker-v1` to enable [sdv.databroker.v1.Broker](../proto/sdv/databroker/v1/broker.proto)
+- Disabled on Databroker by default, use `--enable-databroker-v1` to enable [sdv.databroker.v1.Collector](../proto/sdv/databroker/v1/collector.proto)
-```console
-[subscribe] OK
-Subscription is now running in the background. Received data is identified by [1].
-```
+Please visit [protocol documentation](protocol.md) for more information on the APIs.
+
+(back to top )
+
+## Current and target value concept vs data value concept.
+For some of the APIs (`sdv.databroker.v1` and `kuksa.val.v1`), the concepts of `current_value` and `target_value` were introduced to differentiate between the expected or desired value for an actuator and the current value published by the provider (both stored in the Databroker’s database).
+
+This concept has been removed in `kuksa.val.v2`. Now, there is only a single `data_value` for sensors and actuators, meaning that desired actuator values are simply forwarded from the Signal Consumer to the Databroker and then to the Provider. The Provider is responsible for updating on Databroker the `data_value` received from the vehicle network.
+
+**Kuksa does not guarantee that the desired actuator value will be fully updated on the vehicle network; it only forwards actuator values from the Signal Consumer to the vehicle network.**
+
+**Do not mix different versions of APIs for providers and clients, as this will cause issues; kuksa.val.v2 is not backward compatible with sdv.databroker.v1 and kuksa.val.v1**
(back to top )
@@ -231,40 +244,20 @@ The change types currently apply on _current_ values, when subscribing to a _tar
The default configuration can be overridden by means of setting the corresponding environment variables and/or providing options on the command line as illustrated in the previous sections.
-| CLI option | Environment Variable | Default Value | Description |
-| ------------------------ | -------------------------------- | ------------- | ----------------------------------------------------------------------------------------------------- |
-| `--vss`, `--metadata` | `KUKSA_DATABROKER_METADATA_FILE` | | Populate data broker with metadata from file |
-| `--address` | `KUKSA_DATABROKER_ADDR` | `127.0.0.1` | Listen for rpc calls |
-| `--port` | `KUKSA_DATABROKER_PORT` | `55555` | Listen for rpc calls |
-| `--jwt-public-key` | | | Public key used to verify JWT access tokens |
-| `--tls-cert` | | | TLS certificate file (.pem) |
-| `--tls-private-key` | | | TLS private key file (.key) |
-| `--insecure` | | | Allow insecure connections (default unless `--tls-cert` and `--tls-private-key` options are provided) |
-
-(back to top )
-
-## API
-
-Kuksa Databroker provides [gRPC](https://grpc.io/) based API endpoints which can be used by
-clients to interact with the server.
-
-gRPC services are specified by means of `.proto` files which define the services and the data
-exchanged between server and client.
-
-[Tooling](https://grpc.io/docs/languages/) is available for most popular programming languages to create
-client stubs for invoking the services.
-
-The Databroker uses gRPC's default HTTP/2 transport and [protocol buffers](https://developers.google.com/protocol-buffers) for message serialization.
-The same `.proto` file can be used to generate server skeleton and client stubs for other transports and serialization formats as well.
-
-HTTP/2 is a binary replacement for HTTP/1.1 used for handling connections, multiplexing (channels) and providing a standardized way to add headers for authorization and TLS for encryption/authentication.
-It also supports bi-directional streaming between client and server.
-
-Kuksa Databroker implements the following service interfaces:
-
-- [kuksa.val.v1.VAL](../proto/kuksa/val/v1/val.proto)
-- [sdv.databroker.v1.Broker](../proto/sdv/databroker/v1/broker.proto)
-- [sdv.databroker.v1.Collector](../proto/sdv/databroker/v1/collector.proto)
+| CLI option | Environment Variable | Default Value | Description |
+| ------------------------- | -------------------------------- | --------------------------------------------------- | ----------------------------------------------------------------------------------------------------- |
+| `--vss`, `--metadata` | `KUKSA_DATABROKER_METADATA_FILE` | | Populate data broker with metadata from file |
+| `--address` | `KUKSA_DATABROKER_ADDR` | `127.0.0.1` | Listen for rpc calls |
+| `--port` | `KUKSA_DATABROKER_PORT` | `55555` | Listen for rpc calls |
+| `--enable-unix-socket` | `KUKSA_DATABROKER_ENABLE_UNIX_SOCKET` | | Listen on unix socket, default `/run/kuksa/databroker.sock` |
+| `--unix-socket` | `KUKSA_DATABROKER_UNIX_SOCKET` | | Listen on unix socket, e.g. `/tmp/kuksa/databroker.sockcalls` |
+| `--jwt-public-key` | | | Public key used to verify JWT access tokens |
+| `--tls-cert` | | | TLS certificate file (.pem) |
+| `--tls-private-key` | | | TLS private key file (.key) |
+| `--disable-authorization` | | `true` | Disable authorization |
+| `--insecure` | | | Allow insecure connections (default unless `--tls-cert` and `--tls-private-key` options are provided) |
+| `--worker-threads` | `KUKSA_WORKER_THREADS` | as many threads as cores are detected on the system | How many worker threads will be spawned by the tokio runtime. |
+| `--enable-databroker-v1` | | `false` | Enable sdv.databroker.v1 (GRPC) service |
(back to top )
diff --git a/doc/wildcard_matching.md b/doc/wildcard_matching.md
index 640cfb08..15b1fa1c 100644
--- a/doc/wildcard_matching.md
+++ b/doc/wildcard_matching.md
@@ -1,4 +1,8 @@
-### Matching rules
+# Wildcard Matching rules
+
+*Note! This document applies to `sdv.databroker.v1` and `kuksa.val.v1`!*
+*It also currently applies to `root` in `ListMetadata` in `kuksa.val.v2` but that may change in the future.*
+
* An empty pattern "" will match any signal.
* A pattern without any asterisk - a path in other words - matches either a signal directly or any signal that is a direct or indirect child of the branch with that path.
diff --git a/jwt/README.md b/jwt/README.md
index 13ba5eb1..f7e29d70 100644
--- a/jwt/README.md
+++ b/jwt/README.md
@@ -9,8 +9,8 @@ For more information on token format see [documentation](../doc/authorization.md
## Available tokens
-* `actuate-provide-all.token` - gives access to set target value and actual value for all signals
-* `provide-all.token` - gives access to set actual value for all signals, but not target value
+* `actuate-provide-all.token` - gives access to set value and actual value for all signals
+* `provide-all.token` - gives access to set actual value for all signals, but not value
* `read-all.token` - gives access to read actual and current value for all signals
* `provide-vehicle-speed.token` - gives access to write and read actual value for Vehicle.Speed. Does not give access to other signals
* `read-vehicle-speed.token` - gives access to read actual value for Vehicle.Speed. Does not give access to other signals
diff --git a/lib/Cargo.lock b/lib/Cargo.lock
index e688f626..e56be362 100644
--- a/lib/Cargo.lock
+++ b/lib/Cargo.lock
@@ -4,9 +4,9 @@ version = 3
[[package]]
name = "addr2line"
-version = "0.24.1"
+version = "0.24.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375"
+checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
dependencies = [
"gimli",
]
@@ -28,15 +28,15 @@ dependencies = [
[[package]]
name = "anyhow"
-version = "1.0.87"
+version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8"
+checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
[[package]]
name = "async-stream"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51"
+checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
@@ -45,9 +45,9 @@ dependencies = [
[[package]]
name = "async-stream-impl"
-version = "0.3.5"
+version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193"
+checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
@@ -56,9 +56,9 @@ dependencies = [
[[package]]
name = "async-trait"
-version = "0.1.82"
+version = "0.1.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1"
+checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
dependencies = [
"proc-macro2",
"quote",
@@ -67,9 +67,9 @@ dependencies = [
[[package]]
name = "autocfg"
-version = "1.3.0"
+version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
+checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26"
[[package]]
name = "autotools"
@@ -146,12 +146,6 @@ version = "0.21.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
-[[package]]
-name = "base64"
-version = "0.22.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
-
[[package]]
name = "bitflags"
version = "1.3.2"
@@ -172,15 +166,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
-version = "1.7.1"
+version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50"
+checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da"
[[package]]
name = "cc"
-version = "1.1.18"
+version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b62ac837cdb5cb22e10a256099b4fc502b1dfe560cb282963a974d7abd80e476"
+checksum = "fd9de9f2205d5ef3fd67e685b0df337994ddd4495e2a28d185500d0e1edfea47"
dependencies = [
"shlex",
]
@@ -193,7 +187,7 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "databroker-proto"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"prost",
"prost-types",
@@ -226,9 +220,9 @@ dependencies = [
[[package]]
name = "fastrand"
-version = "2.1.1"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6"
+checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4"
[[package]]
name = "fixedbitset"
@@ -244,36 +238,36 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "futures-channel"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
+checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
]
[[package]]
name = "futures-core"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
+checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-sink"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
+checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
[[package]]
name = "futures-task"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
+checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
[[package]]
name = "futures-util"
-version = "0.3.30"
+version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
+checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-core",
"futures-task",
@@ -294,9 +288,9 @@ dependencies = [
[[package]]
name = "gimli"
-version = "0.31.0"
+version = "0.31.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64"
+checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "h2"
@@ -310,7 +304,7 @@ dependencies = [
"futures-sink",
"futures-util",
"http",
- "indexmap 2.5.0",
+ "indexmap 2.6.0",
"slab",
"tokio",
"tokio-util",
@@ -325,9 +319,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
[[package]]
name = "hashbrown"
-version = "0.14.5"
+version = "0.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
+checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289"
[[package]]
name = "heck"
@@ -365,9 +359,9 @@ dependencies = [
[[package]]
name = "httparse"
-version = "1.9.4"
+version = "1.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9"
+checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946"
[[package]]
name = "httpdate"
@@ -377,9 +371,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "hyper"
-version = "0.14.30"
+version = "0.14.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9"
+checksum = "8c08302e8fa335b151b788c775ff56e7a03ae64ff85c548ee820fecb70356e85"
dependencies = [
"bytes",
"futures-channel",
@@ -423,12 +417,12 @@ dependencies = [
[[package]]
name = "indexmap"
-version = "2.5.0"
+version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5"
+checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da"
dependencies = [
"equivalent",
- "hashbrown 0.14.5",
+ "hashbrown 0.15.2",
]
[[package]]
@@ -442,13 +436,13 @@ dependencies = [
[[package]]
name = "itoa"
-version = "1.0.11"
+version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
+checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674"
[[package]]
name = "kuksa"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"databroker-proto",
"http",
@@ -460,7 +454,7 @@ dependencies = [
[[package]]
name = "kuksa-common"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"databroker-proto",
"http",
@@ -471,7 +465,7 @@ dependencies = [
[[package]]
name = "kuksa-sdv"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
dependencies = [
"databroker-proto",
"http",
@@ -483,9 +477,9 @@ dependencies = [
[[package]]
name = "libc"
-version = "0.2.158"
+version = "0.2.165"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
+checksum = "fcb4d3d38eab6c5239a362fa8bae48c03baf980a6e7079f063942d563ef3533e"
[[package]]
name = "linux-raw-sys"
@@ -546,18 +540,18 @@ checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
[[package]]
name = "object"
-version = "0.36.4"
+version = "0.36.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a"
+checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e"
dependencies = [
"memchr",
]
[[package]]
name = "once_cell"
-version = "1.19.0"
+version = "1.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
+checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775"
[[package]]
name = "percent-encoding"
@@ -572,23 +566,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset",
- "indexmap 2.5.0",
+ "indexmap 2.6.0",
]
[[package]]
name = "pin-project"
-version = "1.1.5"
+version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3"
+checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
-version = "1.1.5"
+version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965"
+checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c"
dependencies = [
"proc-macro2",
"quote",
@@ -597,9 +591,9 @@ dependencies = [
[[package]]
name = "pin-project-lite"
-version = "0.2.14"
+version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
+checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff"
[[package]]
name = "pin-utils"
@@ -618,9 +612,9 @@ dependencies = [
[[package]]
name = "prettyplease"
-version = "0.2.22"
+version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba"
+checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033"
dependencies = [
"proc-macro2",
"syn",
@@ -628,9 +622,9 @@ dependencies = [
[[package]]
name = "proc-macro2"
-version = "1.0.86"
+version = "1.0.92"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
+checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0"
dependencies = [
"unicode-ident",
]
@@ -738,9 +732,9 @@ dependencies = [
[[package]]
name = "regex"
-version = "1.10.6"
+version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
+checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
@@ -750,9 +744,9 @@ dependencies = [
[[package]]
name = "regex-automata"
-version = "0.4.7"
+version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
+checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908"
dependencies = [
"aho-corasick",
"memchr",
@@ -761,9 +755,9 @@ dependencies = [
[[package]]
name = "regex-syntax"
-version = "0.8.4"
+version = "0.8.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
+checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c"
[[package]]
name = "ring"
@@ -788,9 +782,9 @@ checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f"
[[package]]
name = "rustix"
-version = "0.38.36"
+version = "0.38.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36"
+checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6"
dependencies = [
"bitflags 2.6.0",
"errno",
@@ -815,25 +809,24 @@ dependencies = [
[[package]]
name = "rustls-pemfile"
-version = "2.1.3"
+version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425"
+checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50"
dependencies = [
- "base64 0.22.1",
"rustls-pki-types",
]
[[package]]
name = "rustls-pki-types"
-version = "1.8.0"
+version = "1.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0"
+checksum = "16f1201b3c9a7ee8039bcadc17b7e605e2945b27eee7631788c1bd2b0643674b"
[[package]]
name = "rustls-webpki"
-version = "0.102.7"
+version = "0.102.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56"
+checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9"
dependencies = [
"ring",
"rustls-pki-types",
@@ -842,24 +835,24 @@ dependencies = [
[[package]]
name = "rustversion"
-version = "1.0.17"
+version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
+checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248"
[[package]]
name = "serde"
-version = "1.0.210"
+version = "1.0.215"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
+checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
-version = "1.0.210"
+version = "1.0.215"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
+checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0"
dependencies = [
"proc-macro2",
"quote",
@@ -905,9 +898,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
[[package]]
name = "syn"
-version = "2.0.77"
+version = "2.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
+checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e"
dependencies = [
"proc-macro2",
"quote",
@@ -922,9 +915,9 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
[[package]]
name = "tempfile"
-version = "3.12.0"
+version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64"
+checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c"
dependencies = [
"cfg-if",
"fastrand",
@@ -935,9 +928,9 @@ dependencies = [
[[package]]
name = "tokio"
-version = "1.40.0"
+version = "1.41.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998"
+checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33"
dependencies = [
"backtrace",
"bytes",
@@ -1015,7 +1008,7 @@ dependencies = [
"async-stream",
"async-trait",
"axum",
- "base64 0.21.7",
+ "base64",
"bytes",
"h2",
"http",
@@ -1105,9 +1098,9 @@ dependencies = [
[[package]]
name = "tracing-core"
-version = "0.1.32"
+version = "0.1.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
+checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c"
dependencies = [
"once_cell",
]
@@ -1120,9 +1113,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "unicode-ident"
-version = "1.0.12"
+version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83"
[[package]]
name = "untrusted"
diff --git a/lib/common/Cargo.toml b/lib/common/Cargo.toml
index 132a1a37..cdb532bc 100644
--- a/lib/common/Cargo.toml
+++ b/lib/common/Cargo.toml
@@ -13,7 +13,7 @@
[package]
name = "kuksa-common"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
authors = ["Eclipse KUKSA Project"]
edition = "2021"
license = "Apache-2.0"
diff --git a/lib/kuksa/Cargo.toml b/lib/kuksa/Cargo.toml
index acad09b6..c1796cb8 100644
--- a/lib/kuksa/Cargo.toml
+++ b/lib/kuksa/Cargo.toml
@@ -13,7 +13,7 @@
[package]
name = "kuksa"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
authors = ["Eclipse KUKSA Project"]
edition = "2021"
license = "Apache-2.0"
diff --git a/lib/sdv/Cargo.toml b/lib/sdv/Cargo.toml
index f2fe2e85..a8a3b702 100644
--- a/lib/sdv/Cargo.toml
+++ b/lib/sdv/Cargo.toml
@@ -13,7 +13,7 @@
[package]
name = "kuksa-sdv"
-version = "0.4.7-dev.0"
+version = "0.6.0-dev.0"
authors = ["Eclipse KUKSA Project"]
edition = "2021"
license = "Apache-2.0"
diff --git a/proto/kuksa/val/v1/README.md b/proto/kuksa/val/v1/README.md
index 9baba4b0..a93e2e42 100644
--- a/proto/kuksa/val/v1/README.md
+++ b/proto/kuksa/val/v1/README.md
@@ -1,6 +1,6 @@
# kuksa.val.v1 protobuf API
-This directory contain a Protobuf API supported by KUKSA.val Databroker, KUKSA.val Python Client and KUKSA.val Go Client.
+This directory contain a Protobuf API supported by KUKSA Databroker.
-This API is under development and will eventually replace the
-[sdv.databroker.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/sdv/databroker/v1) API.
+This API is deprecated. It is recommended to use
+the [kuksa.val.v2](../v2/val.proto).
diff --git a/proto/kuksa/val/v2/README.md b/proto/kuksa/val/v2/README.md
new file mode 100644
index 00000000..23f57247
--- /dev/null
+++ b/proto/kuksa/val/v2/README.md
@@ -0,0 +1,7 @@
+# kuksa.val.v2 protobuf API
+
+This directory contain a Protobuf API supported by KUKSA Databroker.
+
+This API replaces:
+[kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API and
+[sdv.databroker.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/sdv/databroker/v1) API.
diff --git a/proto/kuksa/val/v2/types.proto b/proto/kuksa/val/v2/types.proto
new file mode 100644
index 00000000..8955f6de
--- /dev/null
+++ b/proto/kuksa/val/v2/types.proto
@@ -0,0 +1,188 @@
+/********************************************************************************
+ * Copyright (c) 2024 Contributors to the Eclipse Foundation
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information regarding copyright ownership.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Apache License 2.0 which is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ ********************************************************************************/
+
+syntax = "proto3";
+// Please do not add optional fields due to older proto3 versions limitations
+
+package kuksa.val.v2;
+import "google/protobuf/timestamp.proto";
+
+option go_package = "kuksa/val/v2";
+
+// A Datapoint represents a timestamped value.
+// The 'value' field can be explicitly 'None', meaning the Datapoint exists but no value is present.
+message Datapoint {
+ google.protobuf.Timestamp timestamp = 1; // The timestamp of the datapoint.
+ Value value = 2; // The value associated with the timestamp. If no value is present, this field can be 'None'.
+}
+
+message Value {
+ oneof typed_value {
+ string string = 11;
+ bool bool = 12;
+ sint32 int32 = 13;
+ sint64 int64 = 14;
+ uint32 uint32 = 15;
+ uint64 uint64 = 16;
+ float float = 17;
+ double double = 18;
+ StringArray string_array = 21;
+ BoolArray bool_array = 22;
+ Int32Array int32_array = 23;
+ Int64Array int64_array = 24;
+ Uint32Array uint32_array = 25;
+ Uint64Array uint64_array = 26;
+ FloatArray float_array = 27;
+ DoubleArray double_array = 28;
+ }
+}
+
+message SignalID {
+ oneof signal {
+ // Numeric identifier to the signal
+ // As of today Databroker assigns arbitrary unique numbers to each registered signal
+ // at startup, meaning that identifiers may change after restarting Databroker.
+ // A mechanism for static identifiers may be introduced in the future.
+ int32 id = 1;
+ // Full VSS-style path to a specific signal, like "Vehicle.Speed"
+ // Wildcards and paths to branches are not supported.
+ // The given path must be known by the Databroker.
+ string path = 2;
+ }
+}
+
+message Error {
+ ErrorCode code = 1;
+ string message = 2;
+}
+
+enum ErrorCode {
+ ERROR_CODE_UNSPECIFIED = 0; // Default value, never to be explicitly set,
+ ERROR_CODE_OK = 1;
+ ERROR_CODE_INVALID_ARGUMENT = 2;
+ ERROR_CODE_NOT_FOUND = 3;
+ ERROR_CODE_PERMISSION_DENIED = 4;
+}
+
+message Metadata {
+ // ID field
+ int32 id = 10;
+
+ // Data type
+ // The VSS data type of the entry (i.e. the value, min, max etc).
+ //
+ // NOTE: protobuf doesn't have int8, int16, uint8 or uint16 which means
+ // that these values must be serialized as int32 and uint32 respectively.
+ DataType data_type = 11;
+
+ // Entry type
+ EntryType entry_type = 12;
+
+ // Description
+ // Describes the meaning and content of the entry.
+ string description = 13;
+
+ // Comment
+ // A comment can be used to provide additional informal information
+ // on a entry.
+ string comment = 14;
+
+ // Deprecation
+ // Whether this entry is deprecated. Can contain recommendations of what
+ // to use instead.
+ string deprecation = 15;
+
+ // Unit
+ // The unit of measurement
+ string unit = 16;
+
+ // Value restrictions checked/enforced by Databroker
+ Value allowed_values = 17; // Must be of array type
+ Value min = 18;
+ Value max = 19;
+}
+
+// VSS Data type of a signal
+//
+// Protobuf doesn't support int8, int16, uint8 or uint16.
+// These are mapped to int32 and uint32 respectively.
+//
+enum DataType {
+ DATA_TYPE_UNSPECIFIED = 0;
+ DATA_TYPE_STRING = 1;
+ DATA_TYPE_BOOLEAN = 2;
+ DATA_TYPE_INT8 = 3;
+ DATA_TYPE_INT16 = 4;
+ DATA_TYPE_INT32 = 5;
+ DATA_TYPE_INT64 = 6;
+ DATA_TYPE_UINT8 = 7;
+ DATA_TYPE_UINT16 = 8;
+ DATA_TYPE_UINT32 = 9;
+ DATA_TYPE_UINT64 = 10;
+ DATA_TYPE_FLOAT = 11;
+ DATA_TYPE_DOUBLE = 12;
+ DATA_TYPE_TIMESTAMP = 13;
+ DATA_TYPE_STRING_ARRAY = 20;
+ DATA_TYPE_BOOLEAN_ARRAY = 21;
+ DATA_TYPE_INT8_ARRAY = 22;
+ DATA_TYPE_INT16_ARRAY = 23;
+ DATA_TYPE_INT32_ARRAY = 24;
+ DATA_TYPE_INT64_ARRAY = 25;
+ DATA_TYPE_UINT8_ARRAY = 26;
+ DATA_TYPE_UINT16_ARRAY = 27;
+ DATA_TYPE_UINT32_ARRAY = 28;
+ DATA_TYPE_UINT64_ARRAY = 29;
+ DATA_TYPE_FLOAT_ARRAY = 30;
+ DATA_TYPE_DOUBLE_ARRAY = 31;
+ DATA_TYPE_TIMESTAMP_ARRAY = 32;
+}
+
+// Entry type
+enum EntryType {
+ ENTRY_TYPE_UNSPECIFIED = 0;
+ ENTRY_TYPE_ATTRIBUTE = 1;
+ ENTRY_TYPE_SENSOR = 2;
+ ENTRY_TYPE_ACTUATOR = 3;
+}
+
+message StringArray {
+ repeated string values = 1;
+}
+
+message BoolArray {
+ repeated bool values = 1;
+}
+
+message Int32Array {
+ repeated sint32 values = 1;
+}
+
+message Int64Array {
+ repeated sint64 values = 1;
+}
+
+message Uint32Array {
+ repeated uint32 values = 1;
+}
+
+message Uint64Array {
+ repeated uint64 values = 1;
+}
+
+message FloatArray {
+ repeated float values = 1;
+}
+
+message DoubleArray {
+ repeated double values = 1;
+}
diff --git a/proto/kuksa/val/v2/val.proto b/proto/kuksa/val/v2/val.proto
new file mode 100644
index 00000000..2756ac19
--- /dev/null
+++ b/proto/kuksa/val/v2/val.proto
@@ -0,0 +1,332 @@
+/********************************************************************************
+ * Copyright (c) 2024 Contributors to the Eclipse Foundation
+ *
+ * See the NOTICE file(s) distributed with this work for additional
+ * information regarding copyright ownership.
+ *
+ * This program and the accompanying materials are made available under the
+ * terms of the Apache License 2.0 which is available at
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ ********************************************************************************/
+
+syntax = "proto3";
+// Please do not add optional fields due to older proto3 versions limitations
+
+package kuksa.val.v2;
+
+option go_package = "kuksa/val/v2";
+
+import "kuksa/val/v2/types.proto";
+
+service VAL {
+ // Get the latest value of a signal
+ // If the signal exist but does not have a valid value
+ // a DataPoint where value is None shall be returned.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if the requested signal doesn't exist
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ // - MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ //
+ rpc GetValue(GetValueRequest) returns (GetValueResponse);
+
+ // Get the latest values of a set of signals.
+ // The returned list of data points has the same order as the list of the request.
+ // If a requested signal has no value a DataPoint where value is None will be returned.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the requested signals doesn't exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the requested signals.
+ // INVALID_ARGUMENT if the request is empty or provided path is too long
+ // - MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ //
+ rpc GetValues(GetValuesRequest) returns (GetValuesResponse);
+
+ // Subscribe to a set of signals using string path parameters
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT
+ // - if the request is empty or provided path is too long
+ // MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ // - if buffer_size exceeds the maximum permitted
+ // MAX_BUFFER_SIZE: usize = 1000;
+ //
+ // When subscribing, Databroker shall immediately return the value for all
+ // subscribed entries.
+ // If a value isn't available when subscribing to a it, it should return None
+ //
+ // If a subscriber is slow to consume signals, messages will be buffered up
+ // to the specified buffer_size before the oldest messages are dropped.
+ //
+ rpc Subscribe(SubscribeRequest) returns (stream SubscribeResponse);
+
+ // Subscribe to a set of signals using i32 id parameters
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // INVALID_ARGUMENT
+ // - if the request is empty or provided path is too long
+ // MAX_REQUEST_PATH_LENGTH: usize = 1000;
+ // - if buffer_size exceeds the maximum permitted
+ // MAX_BUFFER_SIZE: usize = 1000;
+ //
+ // When subscribing, Databroker shall immediately return the value for all
+ // subscribed entries.
+ // If a value isn't available when subscribing to a it, it should return None
+ //
+ // If a subscriber is slow to consume signals, messages will be buffered up
+ // to the specified buffer_size before the oldest messages are dropped.
+ //
+ rpc SubscribeById(SubscribeByIdRequest) returns (stream SubscribeByIdResponse);
+
+ // Actuate a single actuator
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if the actuator does not exist.
+ // PERMISSION_DENIED if access is denied for the actuator.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing the actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if the provided path is not an actuator.
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the provided value is out of the min/max range specified
+ //
+ rpc Actuate(ActuateRequest) returns (ActuateResponse);
+
+ // Actuate simultaneously multiple actuators.
+ // If any error occurs, the entire operation will be aborted
+ // and no single actuator value will be forwarded to the provider.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the actuators are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the actuators.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // UNAVAILABLE if there is no provider currently providing an actuator
+ // DATA_LOSS is there is a internal TransmissionFailure
+ // INVALID_ARGUMENT
+ // - if any of the provided path is not an actuator.
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the requested value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if any of the provided actuators values are out of the min/max range specified
+ //
+ rpc BatchActuate(BatchActuateRequest) returns (BatchActuateResponse);
+
+ // List metadata of signals matching the request.
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if the specified root branch does not exist.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT if the provided path or wildcard is wrong.
+ //
+ rpc ListMetadata(ListMetadataRequest) returns (ListMetadataResponse);
+
+ // Publish a signal value. Used for low frequency signals (e.g. attributes).
+ //
+ // Returns (GRPC error code):
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ rpc PublishValue(PublishValueRequest) returns (PublishValueResponse);
+
+ // Open a stream used to provide actuation and/or publishing values using
+ // a streaming interface. Used to provide actuators and to enable high frequency
+ // updates of values.
+ //
+ // The open stream is used for request / response type communication between the
+ // provider and server (where the initiator of a request can vary).
+ //
+ // Errors:
+ // - Provider sends ProvideActuationRequest -> Databroker returns ProvideActuationResponse
+ // Returns (GRPC error code) and closes the stream call (strict case).
+ // NOT_FOUND if any of the signals are non-existant.
+ // PERMISSION_DENIED if access is denied for any of the signals.
+ // UNAUTHENTICATED if no credentials provided or credentials has expired
+ // ALREADY_EXISTS if a provider already claimed the ownership of an actuator
+ //
+ // - Provider sends PublishValuesRequest -> Databroker returns PublishValuesResponse upon error, and nothing upon success
+ // GRPC errors are returned as messages in the stream
+ // response with the signal id `map status = 2;` (permissive case)
+ // NOT_FOUND if a signal is non-existant.
+ // PERMISSION_DENIED
+ // - if access is denied for a signal.
+ // INVALID_ARGUMENT
+ // - if the data type used in the request does not match
+ // the data type of the addressed signal
+ // - if the published value is not accepted,
+ // e.g. if sending an unsupported enum value
+ // - if the published value is out of the min/max range specified
+ //
+ // - Databroker sends BatchActuateStreamRequest -> Provider shall return a BatchActuateStreamResponse,
+ // for every signal requested to indicate if the request was accepted or not.
+ // It is up to the provider to decide if the stream shall be closed,
+ // as of today Databroker will not react on the received error message.
+ //
+ rpc OpenProviderStream(stream OpenProviderStreamRequest) returns (stream OpenProviderStreamResponse);
+
+ // Get server information
+ rpc GetServerInfo(GetServerInfoRequest) returns (GetServerInfoResponse);
+}
+
+message GetValueRequest {
+ SignalID signal_id = 1;
+}
+
+message GetValueResponse {
+ Datapoint data_point = 1;
+}
+
+message GetValuesRequest {
+ repeated SignalID signal_ids = 1;
+}
+
+message GetValuesResponse {
+ repeated Datapoint data_points = 1;
+}
+
+message SubscribeRequest {
+ repeated string signal_paths = 1;
+
+ // Specifies the number of messages that can be buffered for
+ // slow subscribers before the oldest messages are dropped.
+ // Default (0) results in that only latest message is kept.
+ // Maximum value supported is implementation dependent.
+ uint32 buffer_size = 2;
+}
+
+message SubscribeResponse {
+ map entries = 1;
+}
+
+message SubscribeByIdRequest {
+ repeated int32 signal_ids = 1;
+
+ // Specifies the number of messages that can be buffered for
+ // slow subscribers before the oldest messages are dropped.
+ // Default (0) results in that only latest message is kept.
+ // Maximum value supported is implementation dependent.
+ uint32 buffer_size = 2;
+}
+
+message SubscribeByIdResponse {
+ map entries = 1;
+}
+
+message ActuateRequest {
+ SignalID signal_id = 1;
+ Value value = 2;
+}
+
+message ActuateResponse {
+}
+
+message BatchActuateRequest {
+ repeated ActuateRequest actuate_requests = 1;
+}
+
+message BatchActuateResponse {
+}
+
+message ListMetadataRequest {
+ // Root path to be used when listing metadata
+ // Shall correspond to a VSS branch, e.g. "Vehicle", "Vehicle.Cabin"
+ // Metadata for all signals under that branch will be returned unless filtered by filter.
+ // NOTE: Currently Databroker supports also signals and wildcards in root but that may
+ // be removed in a future release!
+ string root = 1;
+ // NOTE : Currently not considered by Databroker, all signals matching root are returned
+ string filter = 2;
+}
+
+message ListMetadataResponse {
+ repeated Metadata metadata = 1;
+}
+
+message PublishValueRequest {
+ SignalID signal_id = 1;
+ Datapoint data_point = 2;
+}
+
+message PublishValueResponse {
+}
+
+message PublishValuesRequest {
+ int32 request_id = 1; /// Unique request id for the stream that can be used to identify the response.
+ map data_points = 2;
+}
+
+message PublishValuesResponse {
+ int32 request_id = 1;
+ map status = 2;
+}
+
+message ProvideActuationRequest {
+ repeated SignalID actuator_identifiers = 1;
+}
+
+message ProvideActuationResponse {
+}
+
+message BatchActuateStreamRequest {
+ repeated ActuateRequest actuate_requests = 1;
+}
+
+// Message that shall be used by provider to indicate if an actuation request was accepted.
+message BatchActuateStreamResponse {
+ SignalID signal_id = 1;
+ Error error = 2;
+}
+
+message OpenProviderStreamRequest {
+ oneof action {
+ // Inform server of an actuator this provider provides.
+ ProvideActuationRequest provide_actuation_request = 1;
+ // Publish a value.
+ PublishValuesRequest publish_values_request = 2;
+ // Sent to acknowledge the acceptance of a batch actuate
+ // request.
+ BatchActuateStreamResponse batch_actuate_stream_response = 3;
+ }
+}
+
+message OpenProviderStreamResponse {
+ oneof action {
+ // Response to a provide actuator request.
+ ProvideActuationResponse provide_actuation_response = 1;
+ // Acknowledgement that a published value was received.
+ PublishValuesResponse publish_values_response = 2;
+ // Send a batch actuate request to a provider.
+ BatchActuateStreamRequest batch_actuate_stream_request = 3;
+ }
+}
+
+message GetServerInfoRequest {
+ // Nothing yet
+}
+
+message GetServerInfoResponse {
+ string name = 1;
+ string version = 2;
+ string commit_hash = 3;
+}
diff --git a/proto/sdv/databroker/v1/README.md b/proto/sdv/databroker/v1/README.md
index a669e5a3..2fd44c87 100644
--- a/proto/sdv/databroker/v1/README.md
+++ b/proto/sdv/databroker/v1/README.md
@@ -1,12 +1,8 @@
# sdv.databroker.v1 protobuf API
-This directory contain a Protobuf API supported by KUKSA.val Databroker.
+This directory contain a Protobuf API supported by KUKSA Databroker.
-As of today KUKSA.val Databroker supports both this API and the
-[kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API.
-The [kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API is the newer API and is still
-in development. It does not yet support all features supported by this API.
+To enable the legacy `sdv.databroker.v1` API you must start Databroker with the `--enable-databroker-v1` argument.
-This API may in the future be deprecated. It is recommended to use
-the [kuksa.val.v1](https://github.com/eclipse-kuksa/kuksa-databroker/tree/main/proto/kuksa/val/v1) API, unless you need
-functionality currently only provided by this API.
+This API is deprecated. It is recommended to use
+the [kuksa.val.v2](../../../kuksa/val/v2/val.proto).
diff --git a/proto/sdv/databroker/v1/types.proto b/proto/sdv/databroker/v1/types.proto
index 44988098..4c002192 100644
--- a/proto/sdv/databroker/v1/types.proto
+++ b/proto/sdv/databroker/v1/types.proto
@@ -12,6 +12,7 @@
********************************************************************************/
syntax = "proto3";
+// Please do not add optional fields due to older proto3 versions limitations
import "google/protobuf/timestamp.proto";
@@ -143,6 +144,20 @@ message Datapoint {
}
}
+message Metadata {
+ int32 id = 1;
+ EntryType entry_type = 2;
+ string name = 4;
+ DataType data_type = 5;
+ ChangeType change_type = 6; // CONTINUOUS or STATIC or ON_CHANGE
+ string description = 7;
+
+ // Value restrictions checked/enforced by Databroker.
+ Allowed allowed = 10;
+ ValueRestriction min = 11;
+ ValueRestriction max = 12;
+}
+
message Allowed {
oneof values {
StringArray string_values = 1;
@@ -155,15 +170,15 @@ message Allowed {
}
}
-message Metadata {
- int32 id = 1;
- EntryType entry_type = 2;
- string name = 4;
- DataType data_type = 5;
- ChangeType change_type = 6; // CONTINUOUS or STATIC or ON_CHANGE
- string description = 7;
-
- Allowed allowed = 10;
- // int32 min_update_hz = 10; // Only for CONTINUOUS
- // int32 max_update_hz = 11; // Only for CONTINUOUS
-};
+message ValueRestriction {
+ oneof typed_value {
+ string string = 1;
+ bool bool = 2;
+ sint32 int32 = 3;
+ sint64 int64 = 4;
+ uint32 uint32 = 5;
+ uint64 uint64 = 6;
+ float float = 7;
+ double double = 8;
+ }
+}