diff --git a/.github/workflows/build-dependencies.yml b/.github/workflows/build-dependencies.yml
index 0bed67de09..aa5910a039 100644
--- a/.github/workflows/build-dependencies.yml
+++ b/.github/workflows/build-dependencies.yml
@@ -32,13 +32,14 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
+ cache: false
- name: Build all dependencies
run: make deps
diff --git a/.github/workflows/build-then-deploy-ami.yml b/.github/workflows/build-then-deploy-ami.yml
index 4423d70659..3051380e1f 100644
--- a/.github/workflows/build-then-deploy-ami.yml
+++ b/.github/workflows/build-then-deploy-ami.yml
@@ -48,7 +48,7 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Environment version target
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> ${GITHUB_ENV}
@@ -89,10 +89,10 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Terraform action setup
- uses: hashicorp/setup-terraform@v2
+ uses: hashicorp/setup-terraform@v3
with:
terraform_version: 1.3.7
diff --git a/.github/workflows/check-data-format-changes.yml b/.github/workflows/check-data-format-changes.yml
new file mode 100644
index 0000000000..b9b2406c89
--- /dev/null
+++ b/.github/workflows/check-data-format-changes.yml
@@ -0,0 +1,50 @@
+# Copyright 2024 Democratized Data Foundation
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+name: Check Data Format Changes Workflow
+
+on:
+ pull_request:
+ branches:
+ - master
+ - develop
+
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+ branches:
+ - master
+ - develop
+
+jobs:
+ check-data-format-changes:
+ name: Check data format changes job
+
+ runs-on: ubuntu-latest
+
+ steps:
+
+ - name: Checkout code into the directory
+ uses: actions/checkout@v4
+
+ - name: Setup Go environment explicitly
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: false
+
+ - name: Build dependencies
+ run: |
+ make deps:modules
+ make deps:test
+
+ - name: Run data format change detection tests
+ run: make test:changes
diff --git a/.github/workflows/check-documentation.yml b/.github/workflows/check-documentation.yml
new file mode 100644
index 0000000000..c4cf4ca792
--- /dev/null
+++ b/.github/workflows/check-documentation.yml
@@ -0,0 +1,105 @@
+# Copyright 2024 Democratized Data Foundation
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+# This workflow checks that all documentation is up to date.
+# If any documentation is not up to date then this action will fail.
+name: Check Documentation Workflow
+
+on:
+ pull_request:
+ branches:
+ - master
+ - develop
+
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+ branches:
+ - master
+ - develop
+
+jobs:
+ check-cli-documentation:
+ name: Check cli documentation job
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code into the directory
+ uses: actions/checkout@v4
+
+ - name: Setup Go environment explicitly
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: false
+
+ - name: Try generating cli documentation
+ run: make docs:cli
+
+ - name: Check no new changes exist
+ uses: tj-actions/verify-changed-files@v20
+ with:
+ fail-if-changed: true
+ files: |
+ docs/website/references/cli
+
+ check-http-documentation:
+ name: Check http documentation job
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code into the directory
+ uses: actions/checkout@v4
+
+ - name: Setup Go environment explicitly
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: false
+
+ - name: Try generating http documentation
+ run: make docs:http
+
+ - name: Check no new changes exist
+ uses: tj-actions/verify-changed-files@v20
+ with:
+ fail-if-changed: true
+ files: |
+ docs/website/references/http
+
+ check-readme-toc:
+ name: Check readme toc job
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code into the directory
+ uses: actions/checkout@v4
+
+ - name: Setup Go environment explicitly
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: false
+
+ - name: Try generating readme toc
+ run: make toc
+
+ - name: Check no new changes exist
+ uses: tj-actions/verify-changed-files@v20
+ with:
+ fail-if-changed: true
+ files: |
+ README.md
diff --git a/.github/workflows/check-mocks.yml b/.github/workflows/check-mocks.yml
new file mode 100644
index 0000000000..5866ea699b
--- /dev/null
+++ b/.github/workflows/check-mocks.yml
@@ -0,0 +1,51 @@
+# Copyright 2024 Democratized Data Foundation
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+# This workflow checks that all mocks are up to date.
+# If the mocks are not up to date then this action will fail.
+name: Check Mocks Workflow
+
+on:
+ pull_request:
+ branches:
+ - master
+ - develop
+
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+ branches:
+ - master
+ - develop
+
+jobs:
+ check-mocks:
+ name: Check mocks job
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code into the directory
+ uses: actions/checkout@v4
+
+ - name: Setup Go environment explicitly
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: false
+
+ - name: Try generating mocks
+ run: make mocks
+
+ - name: Check no new changes exist
+ uses: tj-actions/verify-changed-files@v20
+ with:
+ fail-if-changed: true
diff --git a/.github/workflows/check-tidy.yml b/.github/workflows/check-tidy.yml
new file mode 100644
index 0000000000..bbcf2d620b
--- /dev/null
+++ b/.github/workflows/check-tidy.yml
@@ -0,0 +1,60 @@
+# Copyright 2024 Democratized Data Foundation
+#
+# Use of this software is governed by the Business Source License
+# included in the file licenses/BSL.txt.
+#
+# As of the Change Date specified in that file, in accordance with
+# the Business Source License, use of this software will be governed
+# by the Apache License, Version 2.0, included in the file
+# licenses/APL.txt.
+
+# This workflow checks that go mod tidy command we have set for the specific
+# go version is not broken, for example `go mod tidy -go=1.21.3`. This
+# can cause some head scratching at times, so better catch this in the PR.
+#
+# Inaddition to that also checks that we are currently in a `tidy` state.
+name: Check Tidy Workflow
+
+on:
+ pull_request:
+ branches:
+ - master
+ - develop
+
+ push:
+ tags:
+ - 'v[0-9]+.[0-9]+.[0-9]+'
+ branches:
+ - master
+ - develop
+
+jobs:
+ check-tidy:
+ name: Check mod tidy job
+
+ runs-on: ubuntu-latest
+
+ steps:
+
+ - name: Checkout code into the directory
+ uses: actions/checkout@v4
+
+ - name: Setup Go environment explicitly
+ uses: actions/setup-go@v5
+ with:
+ go-version-file: 'go.mod'
+ check-latest: true
+ cache: false
+
+ # This checks mod tidy is not broken.
+ - name: Check mod tidy
+ run: make tidy
+
+ # This checks mod tidy is up to date.
+ - name: Check no new changes exist
+ uses: tj-actions/verify-changed-files@v20
+ with:
+ fail-if-changed: true
+ files: |
+ go.mod
+ go.sum
diff --git a/.github/workflows/check-vulnerabilities.yml b/.github/workflows/check-vulnerabilities.yml
index 67d806ab6e..6f1b2fd35f 100644
--- a/.github/workflows/check-vulnerabilities.yml
+++ b/.github/workflows/check-vulnerabilities.yml
@@ -36,4 +36,4 @@ jobs:
go-version-input: "1.21"
go-package: ./...
check-latest: true
- cache: true
+ cache: false
diff --git a/.github/workflows/combine-bot-prs.yml b/.github/workflows/combine-bot-prs.yml
index abe21143d8..bb861fbecd 100644
--- a/.github/workflows/combine-bot-prs.yml
+++ b/.github/workflows/combine-bot-prs.yml
@@ -49,7 +49,7 @@ jobs:
- name: Create combined pr
id: create-combined-pr
- uses: actions/github-script@v6
+ uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
diff --git a/.github/workflows/lint-then-benchmark.yml b/.github/workflows/lint-then-benchmark.yml
index c984cce3ef..1b1b1a73ba 100644
--- a/.github/workflows/lint-then-benchmark.yml
+++ b/.github/workflows/lint-then-benchmark.yml
@@ -51,17 +51,18 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
# Setting up Go explicitly is required for v3.0.0+ of golangci/golangci-lint-action.
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
+ cache: false
- name: Run the golangci-lint
- uses: golangci/golangci-lint-action@v3
+ uses: golangci/golangci-lint-action@v6
with:
# Required: the version of golangci-lint is required.
@@ -192,7 +193,7 @@ jobs:
steps:
- name: Checkout code
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Run the full bechmarking suite
if: needs.decide-benchmark-type.outputs.benchmark-type == 'FULL'
@@ -214,7 +215,7 @@ jobs:
if: |
github.event_name == 'push' &&
github.ref_name == 'develop'
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
name: bench-artifact-${{ github.sha }}
path: bench-artifact-${{ github.sha }}.txt
@@ -245,7 +246,7 @@ jobs:
if: |
github.event_name == 'pull_request' &&
github.base_ref == 'develop'
- uses: dawidd6/action-download-artifact@v2
+ uses: dawidd6/action-download-artifact@v6
with:
github_token: ${{ secrets.ONLY_DEFRADB_REPO_CI_PAT }}
workflow: lint-then-benchmark.yml
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index b482903cad..20f9128f59 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -19,9 +19,12 @@ on:
push:
permissions:
- # Allow read access to pull request (Required for the `only-new-issues` option.)
+ # Required for the `only-new-issues` option.
pull-requests: read
+ # Required for analysis.
contents: read
+ # Required to annotate code in the PR.
+ checks: write
jobs:
lint-go:
@@ -31,16 +34,18 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
+ # Setting up Go explicitly is required for v3.0.0+ of golangci/golangci-lint-action.
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
+ cache: false
- name: Run golangci-lint linter
- uses: golangci/golangci-lint-action@v3
+ uses: golangci/golangci-lint-action@v6
with:
# Required: the version of golangci-lint is required.
# Note: The version should not pick the patch version as the latest patch
@@ -75,7 +80,7 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Run yamllint linter
uses: ibiqlik/action-yamllint@v3
diff --git a/.github/workflows/preview-ami-with-terraform-plan.yml b/.github/workflows/preview-ami-with-terraform-plan.yml
index 25e975a247..40f4b1c948 100644
--- a/.github/workflows/preview-ami-with-terraform-plan.yml
+++ b/.github/workflows/preview-ami-with-terraform-plan.yml
@@ -49,7 +49,7 @@ jobs:
- name: Stop and notify the use of unprivileged flow or missing tokens
if: env.AWS_ACCESS_KEY_ID == '' || env.AWS_SECRET_ACCESS_KEY == ''
# Note: Fail this step, as we don't want unprivileged access doing these changes.
- uses: actions/github-script@v6
+ uses: actions/github-script@v7
with:
script: |
let unprivileged_warning =
@@ -64,10 +64,10 @@ jobs:
core.setFailed(unprivileged_warning)
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Terraform action setup
- uses: hashicorp/setup-terraform@v2
+ uses: hashicorp/setup-terraform@v3
with:
terraform_version: 1.3.7
@@ -93,7 +93,7 @@ jobs:
continue-on-error: true
- name: Comment results on pull request
- uses: actions/github-script@v6
+ uses: actions/github-script@v7
env:
TERRAFORM_PLAN_OUTPUT: "Terraform Plan Output:\n${{ steps.terraform-plan.outputs.stdout }}\n"
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index bf8332107a..2eee0900a9 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -31,16 +31,16 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
- cache: true
+ cache: false
- name: Apply tag
run: git tag ${{ github.event.inputs.tag }}
@@ -50,18 +50,18 @@ jobs:
- name: Set up QEMU
if: matrix.os == 'ubuntu-latest'
- uses: docker/setup-qemu-action@v2
+ uses: docker/setup-qemu-action@v3
- name: Log in to Docker Hub
if: matrix.os == 'ubuntu-latest'
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to the Container registry
if: matrix.os == 'ubuntu-latest'
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.actor }}
@@ -72,7 +72,7 @@ jobs:
run: echo "sha_short=$(git rev-parse --short HEAD)" >> ${GITHUB_ENV}
- name: Run GoReleaser
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: latest
@@ -109,7 +109,7 @@ jobs:
needs: prepare
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -117,14 +117,14 @@ jobs:
run: git tag ${{ github.event.inputs.tag }}
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
- cache: true
+ cache: false
- name: Log in to Docker Hub
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
@@ -169,7 +169,7 @@ jobs:
run: exit 1
- name: Do the release, only if all OS caches were restored
- uses: goreleaser/goreleaser-action@v5
+ uses: goreleaser/goreleaser-action@v6
with:
distribution: goreleaser-pro
version: latest
@@ -193,7 +193,7 @@ jobs:
steps:
- name: Log in to Docker Hub
- uses: docker/login-action@v2
+ uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
diff --git a/.github/workflows/start-binary.yml b/.github/workflows/start-binary.yml
index 35fea3c022..f77962f0e6 100644
--- a/.github/workflows/start-binary.yml
+++ b/.github/workflows/start-binary.yml
@@ -32,13 +32,14 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
+ cache: false
- name: Build modules
run: make deps:modules
@@ -48,7 +49,7 @@ jobs:
- name: Attempt to start binary
run: |
- ./build/defradb start &
+ ./build/defradb start --no-keyring &
sleep 5
- name: Check if binary is still running
diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml
index 491b674906..392cef826b 100644
--- a/.github/workflows/test-and-upload-coverage.yml
+++ b/.github/workflows/test-and-upload-coverage.yml
@@ -34,25 +34,41 @@ jobs:
client-type: [go, http, cli]
database-type: [badger-file, badger-memory]
mutation-type: [gql, collection-named, collection-save]
- detect-changes: [false]
+ lens-type: [wasm-time]
+ database-encryption: [false]
include:
- os: ubuntu-latest
client-type: go
database-type: badger-memory
mutation-type: collection-save
- detect-changes: true
+ lens-type: wasm-time
+ database-encryption: true
+ - os: ubuntu-latest
+ client-type: go
+ database-type: badger-memory
+ mutation-type: collection-save
+ lens-type: wazero
+ database-encryption: false
+ - os: ubuntu-latest
+ client-type: go
+ database-type: badger-memory
+ mutation-type: collection-save
+ lens-type: wasmer
+ database-encryption: false
- os: macos-latest
client-type: go
database-type: badger-memory
mutation-type: collection-save
- detect-changes: false
+ lens-type: wasm-time
+ database-encryption: false
## TODO: https://github.com/sourcenetwork/defradb/issues/2080
## Uncomment the lines below to Re-enable the windows build once this todo is resolved.
## - os: windows-latest
## client-type: go
## database-type: badger-memory
## mutation-type: collection-save
-## detect-changes: false
+## lens-type: wasm-time
+## database-encryption: false
runs-on: ${{ matrix.os }}
@@ -68,17 +84,59 @@ jobs:
DEFRA_CLIENT_CLI: ${{ matrix.client-type == 'cli' }}
DEFRA_BADGER_MEMORY: ${{ matrix.database-type == 'badger-memory' }}
DEFRA_BADGER_FILE: ${{ matrix.database-type == 'badger-file' }}
+ DEFRA_BADGER_ENCRYPTION: ${{ matrix.database-encryption }}
DEFRA_MUTATION_TYPE: ${{ matrix.mutation-type }}
+ DEFRA_LENS_TYPE: ${{ matrix.lens-type }}
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Setup Go environment explicitly
- uses: actions/setup-go@v3
+ uses: actions/setup-go@v5
with:
- go-version: "1.21"
+ go-version-file: 'go.mod'
check-latest: true
+ cache: false
+
+ - name: Set cache paths
+ id: cache-paths
+ shell: bash
+ run: |
+ echo "GO_CACHE=$(go env GOCACHE)" >> "${GITHUB_OUTPUT}"
+ echo "GO_MODCACHE=$(go env GOMODCACHE)" >> "${GITHUB_OUTPUT}"
+ echo "CARGO_CACHE=~/.cargo" >> "${GITHUB_OUTPUT}"
+
+ - name: Go cache/restore
+ uses: actions/cache@v4
+ with:
+ key: ${{ runner.os }}-go-${{ hashFiles('**/go.mod') }}
+ path: |
+ ${{ steps.cache-paths.outputs.GO_CACHE }}
+ ${{ steps.cache-paths.outputs.GO_MODCACHE }}
+
+ - name: Cargo cache/restore
+ # A very cool post: https://blog.arriven.wtf/posts/rust-ci-cache
+ uses: actions/cache@v4
+ with:
+ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml') }}
+ # Here are some directories we shouldn't forget about:
+ # ~/.cargo/.*
+ # ~/.cargo/bin/
+ # ~/.cargo/git/db/
+ # ~/.cargo/registry/cache/
+ # ~/.cargo/registry/index/
+ # **/target/*/*.d
+ # **/target/*/*.rlib
+ # **/target/*/.fingerprint
+ # **/target/*/build
+ # **/target/*/deps
+ path: |
+ ${{ steps.cache-paths.outputs.CARGO_CACHE }}
+ **/target/
+
+ - name: Restore modified time
+ uses: chetan/git-restore-mtime-action@v2
- name: Build dependencies
run: |
@@ -86,18 +144,22 @@ jobs:
make deps:test
- name: Run integration tests
- if: ${{ !matrix.detect-changes }}
run: make test:coverage
- - name: Run change detector tests
- if: ${{ matrix.detect-changes }}
- run: make test:changes
-
- name: Upload coverage artifact
- if: ${{ !matrix.detect-changes }}
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
with:
- name: ${{ matrix.os }}_${{ matrix.client-type }}_${{ matrix.database-type }}_${{ matrix.mutation-type }}
+ # Make sure the name is always unique per job as artifacts are now immutable.
+ # Note Issue: https://github.com/actions/upload-artifact/issues/478
+ # Solve: https://github.com/actions/upload-artifact/issues/478#issuecomment-1885470013
+ name: "coverage\
+ _${{ matrix.os }}\
+ _${{ matrix.client-type }}\
+ _${{ matrix.database-type }}\
+ _${{ matrix.mutation-type }}\
+ _${{ matrix.lens-type }}\
+ _${{ matrix.database-encryption }}\
+ "
path: coverage.txt
if-no-files-found: error
retention-days: 7
@@ -116,18 +178,22 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Download coverage reports
- uses: actions/download-artifact@v3
+ uses: actions/download-artifact@v4
with:
+ pattern: coverage_*
+ # Note: https://github.com/actions/download-artifact/blob/main/docs/MIGRATION.md
+ merge-multiple: false
path: coverage_reports
- name: Upload coverage to Codecov
- uses: codecov/codecov-action@v3
+ uses: codecov/codecov-action@v4
with:
token: ${{ secrets.CODECOV_TOKEN }}
name: defradb-codecov
+ files: coverage_reports/**/*.txt
flags: all-tests
os: 'linux'
fail_ci_if_error: true
diff --git a/.github/workflows/validate-containerfile.yml b/.github/workflows/validate-containerfile.yml
index 260e0dba89..b0dc0d56c9 100644
--- a/.github/workflows/validate-containerfile.yml
+++ b/.github/workflows/validate-containerfile.yml
@@ -36,16 +36,16 @@ jobs:
steps:
- name: Check out the repo
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Set up QEMU
- uses: docker/setup-qemu-action@v2
+ uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
- uses: docker/setup-buildx-action@v2
+ uses: docker/setup-buildx-action@v3
- name: Build Docker image
- uses: docker/build-push-action@v4
+ uses: docker/build-push-action@v6
with:
context: .
file: tools/defradb.containerfile
diff --git a/.github/workflows/validate-title.yml b/.github/workflows/validate-title.yml
index 9899a9b92a..fa054663ef 100644
--- a/.github/workflows/validate-title.yml
+++ b/.github/workflows/validate-title.yml
@@ -28,7 +28,7 @@ jobs:
steps:
- name: Checkout code into the directory
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
- name: Ensure the scripts are not broken
run: make test:scripts
diff --git a/.goreleaser.yaml b/.goreleaser.yaml
index 05f201200f..4abe0b198a 100644
--- a/.goreleaser.yaml
+++ b/.goreleaser.yaml
@@ -1,4 +1,4 @@
-version: 1
+version: 2
before:
hooks:
diff --git a/CHANGELOG copy.md b/CHANGELOG copy.md
new file mode 100644
index 0000000000..7345a58cc8
--- /dev/null
+++ b/CHANGELOG copy.md
@@ -0,0 +1,1170 @@
+
+
+## [v0.11.0](https://github.com/sourcenetwork/defradb/compare/v0.10.0...v0.11.0)
+
+> 2024-05-03
+
+DefraDB v0.11 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.10.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Update corelog to 0.0.7 ([#2547](https://github.com/sourcenetwork/defradb/issues/2547))
+* Move relation field properties onto collection ([#2529](https://github.com/sourcenetwork/defradb/issues/2529))
+* Lens runtime config ([#2497](https://github.com/sourcenetwork/defradb/issues/2497))
+* Add P Counter CRDT ([#2482](https://github.com/sourcenetwork/defradb/issues/2482))
+* Add Access Control Policy ([#2338](https://github.com/sourcenetwork/defradb/issues/2338))
+* Force explicit primary decl. in SDL for one-ones ([#2462](https://github.com/sourcenetwork/defradb/issues/2462))
+* Allow mutation of col sources via PatchCollection ([#2424](https://github.com/sourcenetwork/defradb/issues/2424))
+* Add Defra-Lens support for branching schema ([#2421](https://github.com/sourcenetwork/defradb/issues/2421))
+* Add PatchCollection ([#2402](https://github.com/sourcenetwork/defradb/issues/2402))
+
+### Fixes
+
+* Return correct results from one-many indexed filter ([#2579](https://github.com/sourcenetwork/defradb/issues/2579))
+* Handle compound filters on related indexed fields ([#2575](https://github.com/sourcenetwork/defradb/issues/2575))
+* Add check to filter result for logical ops ([#2573](https://github.com/sourcenetwork/defradb/issues/2573))
+* Make all array kinds nillable ([#2534](https://github.com/sourcenetwork/defradb/issues/2534))
+* Allow update when updating non-indexed field ([#2511](https://github.com/sourcenetwork/defradb/issues/2511))
+
+### Documentation
+
+* Add data definition document ([#2544](https://github.com/sourcenetwork/defradb/issues/2544))
+
+### Refactoring
+
+* Merge collection UpdateWith and DeleteWith ([#2531](https://github.com/sourcenetwork/defradb/issues/2531))
+* DB transactions context ([#2513](https://github.com/sourcenetwork/defradb/issues/2513))
+* Add NormalValue ([#2404](https://github.com/sourcenetwork/defradb/issues/2404))
+* Clean up client/request package ([#2443](https://github.com/sourcenetwork/defradb/issues/2443))
+* Rewrite convertImmutable ([#2445](https://github.com/sourcenetwork/defradb/issues/2445))
+* Unify Field Kind and Schema properties ([#2414](https://github.com/sourcenetwork/defradb/issues/2414))
+* Replace logging package with corelog ([#2406](https://github.com/sourcenetwork/defradb/issues/2406))
+
+### Testing
+
+* Add flag to skip network tests ([#2495](https://github.com/sourcenetwork/defradb/issues/2495))
+
+### Bot
+
+* Update dependencies (bulk dependabot PRs) 30-04-2024 ([#2570](https://github.com/sourcenetwork/defradb/issues/2570))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.7.0 to 7.7.1 in /playground ([#2550](https://github.com/sourcenetwork/defradb/issues/2550))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.7.0 to 7.7.1 in /playground ([#2551](https://github.com/sourcenetwork/defradb/issues/2551))
+* Bump swagger-ui-react from 5.16.2 to 5.17.0 in /playground ([#2549](https://github.com/sourcenetwork/defradb/issues/2549))
+* Update dependencies (bulk dependabot PRs) 23-04-2023 ([#2548](https://github.com/sourcenetwork/defradb/issues/2548))
+* Bump go.opentelemetry.io/otel/sdk/metric from 1.24.0 to 1.25.0 ([#2499](https://github.com/sourcenetwork/defradb/issues/2499))
+* Bump typescript from 5.4.3 to 5.4.5 in /playground ([#2515](https://github.com/sourcenetwork/defradb/issues/2515))
+* Bump swagger-ui-react from 5.14.0 to 5.15.0 in /playground ([#2514](https://github.com/sourcenetwork/defradb/issues/2514))
+* Update dependencies (bulk dependabot PRs) 2024-04-09 ([#2509](https://github.com/sourcenetwork/defradb/issues/2509))
+* Update dependencies (bulk dependabot PRs) 2024-04-03 ([#2492](https://github.com/sourcenetwork/defradb/issues/2492))
+* Update dependencies (bulk dependabot PRs) 03-04-2024 ([#2486](https://github.com/sourcenetwork/defradb/issues/2486))
+* Bump github.com/multiformats/go-multiaddr from 0.12.2 to 0.12.3 ([#2480](https://github.com/sourcenetwork/defradb/issues/2480))
+* Bump [@types](https://github.com/types)/react from 18.2.66 to 18.2.67 in /playground ([#2427](https://github.com/sourcenetwork/defradb/issues/2427))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.2.0 to 7.3.1 in /playground ([#2428](https://github.com/sourcenetwork/defradb/issues/2428))
+* Update dependencies (bulk dependabot PRs) 19-03-2024 ([#2426](https://github.com/sourcenetwork/defradb/issues/2426))
+* Update dependencies (bulk dependabot PRs) 03-11-2024 ([#2399](https://github.com/sourcenetwork/defradb/issues/2399))
+
+
+## [v0.10.0](https://github.com/sourcenetwork/defradb/compare/v0.9.0...v0.10.0)
+
+> 2024-03-08
+
+DefraDB v0.10 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.9.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Add case insensitive `like` operator ([#2368](https://github.com/sourcenetwork/defradb/issues/2368))
+* Reverted order for indexed fields ([#2335](https://github.com/sourcenetwork/defradb/issues/2335))
+* Rework GetCollection/SchemaByFoo funcs into single ([#2319](https://github.com/sourcenetwork/defradb/issues/2319))
+* Add support for views with Lens transforms ([#2311](https://github.com/sourcenetwork/defradb/issues/2311))
+* Model Col. SchemaVersions and migrations on Cols ([#2286](https://github.com/sourcenetwork/defradb/issues/2286))
+* Replace FieldDescription.RelationType with IsPrimary ([#2288](https://github.com/sourcenetwork/defradb/issues/2288))
+* Multiple docs with nil value on unique-indexed field ([#2276](https://github.com/sourcenetwork/defradb/issues/2276))
+* Allow setting null values on doc fields ([#2273](https://github.com/sourcenetwork/defradb/issues/2273))
+* Add JSON scalar ([#2254](https://github.com/sourcenetwork/defradb/issues/2254))
+* Generate OpenAPI command ([#2235](https://github.com/sourcenetwork/defradb/issues/2235))
+* Add composite indexes ([#2226](https://github.com/sourcenetwork/defradb/issues/2226))
+
+### Fixes
+
+* Add `latest` image tag for ghcr ([#2340](https://github.com/sourcenetwork/defradb/issues/2340))
+* Move field id off of schema ([#2336](https://github.com/sourcenetwork/defradb/issues/2336))
+* Make returned collections respect explicit transactions ([#2385](https://github.com/sourcenetwork/defradb/issues/2385))
+* Update GetCollections behaviour ([#2378](https://github.com/sourcenetwork/defradb/issues/2378))
+* Add missing directive definitions ([#2369](https://github.com/sourcenetwork/defradb/issues/2369))
+* Add validation to JSON fields ([#2375](https://github.com/sourcenetwork/defradb/issues/2375))
+* Make peers sync secondary index ([#2390](https://github.com/sourcenetwork/defradb/issues/2390))
+* Load root dir before loading config ([#2266](https://github.com/sourcenetwork/defradb/issues/2266))
+* Mark docs as deleted when querying in delete mut ([#2298](https://github.com/sourcenetwork/defradb/issues/2298))
+* Add missing logs at startup ([#2391](https://github.com/sourcenetwork/defradb/issues/2391))
+* Add missing delta payload ([#2306](https://github.com/sourcenetwork/defradb/issues/2306))
+* Fix compound relational filters in aggregates ([#2297](https://github.com/sourcenetwork/defradb/issues/2297))
+
+### Refactoring
+
+* Generate field ids using a sequence ([#2339](https://github.com/sourcenetwork/defradb/issues/2339))
+* Make config internal to CLI ([#2310](https://github.com/sourcenetwork/defradb/issues/2310))
+* Node config ([#2296](https://github.com/sourcenetwork/defradb/issues/2296))
+* HTTP config ([#2278](https://github.com/sourcenetwork/defradb/issues/2278))
+* Remove unused Delete field from client.Document ([#2275](https://github.com/sourcenetwork/defradb/issues/2275))
+* Decouple net config ([#2258](https://github.com/sourcenetwork/defradb/issues/2258))
+* Make CollectionDescription.Name Option ([#2223](https://github.com/sourcenetwork/defradb/issues/2223))
+
+### Chore
+
+* Bump to GoLang v1.21 ([#2195](https://github.com/sourcenetwork/defradb/issues/2195))
+
+### Bot
+
+* Update dependencies (bulk dependabot PRs) 05-02-2024 ([#2372](https://github.com/sourcenetwork/defradb/issues/2372))
+* Update dependencies (bulk dependabot PRs) 02-27-2024 ([#2353](https://github.com/sourcenetwork/defradb/issues/2353))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.21.0 to 7.0.1 in /playground ([#2331](https://github.com/sourcenetwork/defradb/issues/2331))
+* Bump google.golang.org/grpc from 1.61.0 to 1.61.1 ([#2320](https://github.com/sourcenetwork/defradb/issues/2320))
+* Update dependencies (bulk dependabot PRs) 2024-02-19 ([#2330](https://github.com/sourcenetwork/defradb/issues/2330))
+* Bump vite from 5.1.1 to 5.1.2 in /playground ([#2317](https://github.com/sourcenetwork/defradb/issues/2317))
+* Bump golang.org/x/net from 0.20.0 to 0.21.0 ([#2301](https://github.com/sourcenetwork/defradb/issues/2301))
+* Update dependencies (bulk dependabot PRs) 2023-02-14 ([#2313](https://github.com/sourcenetwork/defradb/issues/2313))
+* Update dependencies (bulk dependabot PRs) 02-07-2024 ([#2294](https://github.com/sourcenetwork/defradb/issues/2294))
+* Update dependencies (bulk dependabot PRs) 30-01-2024 ([#2270](https://github.com/sourcenetwork/defradb/issues/2270))
+* Update dependencies (bulk dependabot PRs) 23-01-2024 ([#2252](https://github.com/sourcenetwork/defradb/issues/2252))
+* Bump vite from 5.0.11 to 5.0.12 in /playground ([#2236](https://github.com/sourcenetwork/defradb/issues/2236))
+* Bump github.com/evanphx/json-patch/v5 from 5.7.0 to 5.8.1 ([#2233](https://github.com/sourcenetwork/defradb/issues/2233))
+
+
+## [v0.9.0](https://github.com/sourcenetwork/defradb/compare/v0.8.0...v0.9.0)
+
+> 2024-01-18
+
+DefraDB v0.9 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.8.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Mutation typed input ([#2167](https://github.com/sourcenetwork/defradb/issues/2167))
+* Add PN Counter CRDT type ([#2119](https://github.com/sourcenetwork/defradb/issues/2119))
+* Allow users to add Views ([#2114](https://github.com/sourcenetwork/defradb/issues/2114))
+* Add unique secondary index ([#2131](https://github.com/sourcenetwork/defradb/issues/2131))
+* New cmd for docs auto generation ([#2096](https://github.com/sourcenetwork/defradb/issues/2096))
+* Add blob scalar type ([#2091](https://github.com/sourcenetwork/defradb/issues/2091))
+
+### Fixes
+
+* Add entropy to counter CRDT type updates ([#2186](https://github.com/sourcenetwork/defradb/issues/2186))
+* Handle multiple nil values on unique indexed fields ([#2178](https://github.com/sourcenetwork/defradb/issues/2178))
+* Filtering on unique index if there is no match ([#2177](https://github.com/sourcenetwork/defradb/issues/2177))
+
+### Performance
+
+* Switch LensVM to wasmtime runtime ([#2030](https://github.com/sourcenetwork/defradb/issues/2030))
+
+### Refactoring
+
+* Add strong typing to document creation ([#2161](https://github.com/sourcenetwork/defradb/issues/2161))
+* Rename key,id,dockey to docID terminology ([#1749](https://github.com/sourcenetwork/defradb/issues/1749))
+* Simplify Merkle CRDT workflow ([#2111](https://github.com/sourcenetwork/defradb/issues/2111))
+
+### Testing
+
+* Add auto-doc generation ([#2051](https://github.com/sourcenetwork/defradb/issues/2051))
+
+### Continuous integration
+
+* Add windows test runner ([#2033](https://github.com/sourcenetwork/defradb/issues/2033))
+
+### Chore
+
+* Update Lens to v0.5 ([#2083](https://github.com/sourcenetwork/defradb/issues/2083))
+
+### Bot
+
+* Bump [@types](https://github.com/types)/react from 18.2.47 to 18.2.48 in /playground ([#2213](https://github.com/sourcenetwork/defradb/issues/2213))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.18.0 to 6.18.1 in /playground ([#2215](https://github.com/sourcenetwork/defradb/issues/2215))
+* Update dependencies (bulk dependabot PRs) 15-01-2024 ([#2217](https://github.com/sourcenetwork/defradb/issues/2217))
+* Bump follow-redirects from 1.15.3 to 1.15.4 in /playground ([#2181](https://github.com/sourcenetwork/defradb/issues/2181))
+* Bump github.com/getkin/kin-openapi from 0.120.0 to 0.122.0 ([#2097](https://github.com/sourcenetwork/defradb/issues/2097))
+* Update dependencies (bulk dependabot PRs) 08-01-2024 ([#2173](https://github.com/sourcenetwork/defradb/issues/2173))
+* Bump github.com/bits-and-blooms/bitset from 1.12.0 to 1.13.0 ([#2160](https://github.com/sourcenetwork/defradb/issues/2160))
+* Bump [@types](https://github.com/types)/react from 18.2.45 to 18.2.46 in /playground ([#2159](https://github.com/sourcenetwork/defradb/issues/2159))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.15.0 to 6.16.0 in /playground ([#2156](https://github.com/sourcenetwork/defradb/issues/2156))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.15.0 to 6.16.0 in /playground ([#2155](https://github.com/sourcenetwork/defradb/issues/2155))
+* Update dependencies (bulk dependabot PRs) 27-12-2023 ([#2154](https://github.com/sourcenetwork/defradb/issues/2154))
+* Bump github.com/spf13/viper from 1.17.0 to 1.18.2 ([#2145](https://github.com/sourcenetwork/defradb/issues/2145))
+* Bump golang.org/x/crypto from 0.16.0 to 0.17.0 ([#2144](https://github.com/sourcenetwork/defradb/issues/2144))
+* Update dependencies (bulk dependabot PRs) 18-12-2023 ([#2142](https://github.com/sourcenetwork/defradb/issues/2142))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.13.2 to 6.14.0 in /playground ([#2136](https://github.com/sourcenetwork/defradb/issues/2136))
+* Bump [@types](https://github.com/types)/react from 18.2.43 to 18.2.45 in /playground ([#2134](https://github.com/sourcenetwork/defradb/issues/2134))
+* Bump vite from 5.0.7 to 5.0.10 in /playground ([#2135](https://github.com/sourcenetwork/defradb/issues/2135))
+* Update dependencies (bulk dependabot PRs) 04-12-2023 ([#2133](https://github.com/sourcenetwork/defradb/issues/2133))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.13.1 to 6.13.2 in /playground ([#2109](https://github.com/sourcenetwork/defradb/issues/2109))
+* Bump vite from 5.0.2 to 5.0.5 in /playground ([#2112](https://github.com/sourcenetwork/defradb/issues/2112))
+* Bump [@types](https://github.com/types)/react from 18.2.41 to 18.2.42 in /playground ([#2108](https://github.com/sourcenetwork/defradb/issues/2108))
+* Update dependencies (bulk dependabot PRs) 04-12-2023 ([#2107](https://github.com/sourcenetwork/defradb/issues/2107))
+* Bump [@types](https://github.com/types)/react from 18.2.38 to 18.2.39 in /playground ([#2086](https://github.com/sourcenetwork/defradb/issues/2086))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.12.0 to 6.13.0 in /playground ([#2085](https://github.com/sourcenetwork/defradb/issues/2085))
+* Update dependencies (bulk dependabot PRs) 27-11-2023 ([#2081](https://github.com/sourcenetwork/defradb/issues/2081))
+* Bump swagger-ui-react from 5.10.0 to 5.10.3 in /playground ([#2067](https://github.com/sourcenetwork/defradb/issues/2067))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.11.0 to 6.12.0 in /playground ([#2068](https://github.com/sourcenetwork/defradb/issues/2068))
+* Update dependencies (bulk dependabot PRs) 20-11-2023 ([#2066](https://github.com/sourcenetwork/defradb/issues/2066))
+
+
+## [v0.8.0](https://github.com/sourcenetwork/defradb/compare/v0.7.0...v0.8.0)
+
+> 2023-11-14
+
+DefraDB v0.8 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.7.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Add means to fetch schema ([#2006](https://github.com/sourcenetwork/defradb/issues/2006))
+* Rename Schema.SchemaID to Schema.Root ([#2005](https://github.com/sourcenetwork/defradb/issues/2005))
+* Enable playground in Docker build ([#1986](https://github.com/sourcenetwork/defradb/issues/1986))
+* Change GetCollectionBySchemaFoo funcs to return many ([#1984](https://github.com/sourcenetwork/defradb/issues/1984))
+* Add Swagger UI to playground ([#1979](https://github.com/sourcenetwork/defradb/issues/1979))
+* Add OpenAPI route ([#1960](https://github.com/sourcenetwork/defradb/issues/1960))
+* Remove CollectionDescription.Schema ([#1965](https://github.com/sourcenetwork/defradb/issues/1965))
+* Remove collection from patch schema ([#1957](https://github.com/sourcenetwork/defradb/issues/1957))
+* Make queries utilise secondary indexes ([#1925](https://github.com/sourcenetwork/defradb/issues/1925))
+* Allow setting of default schema version ([#1888](https://github.com/sourcenetwork/defradb/issues/1888))
+* Add CCIP Support ([#1896](https://github.com/sourcenetwork/defradb/issues/1896))
+
+### Fixes
+
+* Fix test module relying on closed memory leak ([#2037](https://github.com/sourcenetwork/defradb/issues/2037))
+* Make return type for FieldKind_INT an int64 ([#1982](https://github.com/sourcenetwork/defradb/issues/1982))
+* Node private key requires data directory ([#1938](https://github.com/sourcenetwork/defradb/issues/1938))
+* Remove collection name from schema ID generation ([#1920](https://github.com/sourcenetwork/defradb/issues/1920))
+* Infinite loop when updating one-one relation ([#1915](https://github.com/sourcenetwork/defradb/issues/1915))
+
+### Refactoring
+
+* CRDT merge direction ([#2016](https://github.com/sourcenetwork/defradb/issues/2016))
+* Reorganise collection description storage ([#1988](https://github.com/sourcenetwork/defradb/issues/1988))
+* Add peerstore to multistore ([#1980](https://github.com/sourcenetwork/defradb/issues/1980))
+* P2P client interface ([#1924](https://github.com/sourcenetwork/defradb/issues/1924))
+* Deprecate CollectionDescription.Schema ([#1939](https://github.com/sourcenetwork/defradb/issues/1939))
+* Remove net GRPC API ([#1927](https://github.com/sourcenetwork/defradb/issues/1927))
+* CLI client interface ([#1839](https://github.com/sourcenetwork/defradb/issues/1839))
+
+### Continuous integration
+
+* Add goreleaser workflow ([#2040](https://github.com/sourcenetwork/defradb/issues/2040))
+* Add mac test runner ([#2035](https://github.com/sourcenetwork/defradb/issues/2035))
+* Parallelize change detector ([#1871](https://github.com/sourcenetwork/defradb/issues/1871))
+
+### Chore
+
+* Update dependencies ([#2044](https://github.com/sourcenetwork/defradb/issues/2044))
+
+### Bot
+
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.10.0 to 6.11.0 in /playground ([#2053](https://github.com/sourcenetwork/defradb/issues/2053))
+* Update dependencies (bulk dependabot PRs) 13-11-2023 ([#2052](https://github.com/sourcenetwork/defradb/issues/2052))
+* Bump axios from 1.5.1 to 1.6.1 in /playground ([#2041](https://github.com/sourcenetwork/defradb/issues/2041))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.9.1 to 6.10.0 in /playground ([#2042](https://github.com/sourcenetwork/defradb/issues/2042))
+* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.4.0 to 3.4.1 in /playground ([#2022](https://github.com/sourcenetwork/defradb/issues/2022))
+* Update dependencies (bulk dependabot PRs) 08-11-2023 ([#2038](https://github.com/sourcenetwork/defradb/issues/2038))
+* Update dependencies (bulk dependabot PRs) 30-10-2023 ([#2015](https://github.com/sourcenetwork/defradb/issues/2015))
+* Bump eslint-plugin and parser from 6.8.0 to 6.9.0 in /playground ([#2000](https://github.com/sourcenetwork/defradb/issues/2000))
+* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1998](https://github.com/sourcenetwork/defradb/issues/1998))
+* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1976](https://github.com/sourcenetwork/defradb/issues/1976))
+* Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#1961](https://github.com/sourcenetwork/defradb/issues/1961))
+* Bump [@types](https://github.com/types)/react-dom from 18.2.11 to 18.2.12 in /playground ([#1952](https://github.com/sourcenetwork/defradb/issues/1952))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.7.4 to 6.7.5 in /playground ([#1953](https://github.com/sourcenetwork/defradb/issues/1953))
+* Bump combined dependencies 09-10-2023 ([#1951](https://github.com/sourcenetwork/defradb/issues/1951))
+* Bump [@types](https://github.com/types)/react from 18.2.24 to 18.2.25 in /playground ([#1932](https://github.com/sourcenetwork/defradb/issues/1932))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.7.3 to 6.7.4 in /playground ([#1933](https://github.com/sourcenetwork/defradb/issues/1933))
+* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.3.2 to 3.4.0 in /playground ([#1904](https://github.com/sourcenetwork/defradb/issues/1904))
+* Bump combined dependencies 19-09-2023 ([#1931](https://github.com/sourcenetwork/defradb/issues/1931))
+* Bump graphql from 16.8.0 to 16.8.1 in /playground ([#1901](https://github.com/sourcenetwork/defradb/issues/1901))
+* Update combined dependabot PRs 19-09-2023 ([#1898](https://github.com/sourcenetwork/defradb/issues/1898))
+
+
+## [v0.7.0](https://github.com/sourcenetwork/defradb/compare/v0.6.0...v0.7.0)
+
+> 2023-09-18
+
+DefraDB v0.7 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+This release has focused on robustness, testing, and schema management. Some highlight new features include notable expansions to the expressiveness of schema migrations.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.6.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Allow field indexing by name in PatchSchema ([#1810](https://github.com/sourcenetwork/defradb/issues/1810))
+* Auto-create relation id fields via PatchSchema ([#1807](https://github.com/sourcenetwork/defradb/issues/1807))
+* Support PatchSchema relational field kind substitution ([#1777](https://github.com/sourcenetwork/defradb/issues/1777))
+* Add support for adding of relational fields ([#1766](https://github.com/sourcenetwork/defradb/issues/1766))
+* Enable downgrading of documents via Lens inverses ([#1721](https://github.com/sourcenetwork/defradb/issues/1721))
+
+### Fixes
+
+* Correctly handle serialisation of nil field values ([#1872](https://github.com/sourcenetwork/defradb/issues/1872))
+* Compound filter operators with relations ([#1855](https://github.com/sourcenetwork/defradb/issues/1855))
+* Only update updated fields via update requests ([#1817](https://github.com/sourcenetwork/defradb/issues/1817))
+* Error when saving a deleted document ([#1806](https://github.com/sourcenetwork/defradb/issues/1806))
+* Prevent multiple docs from being linked in one one ([#1790](https://github.com/sourcenetwork/defradb/issues/1790))
+* Handle the querying of secondary relation id fields ([#1768](https://github.com/sourcenetwork/defradb/issues/1768))
+* Improve the way migrations handle transactions ([#1737](https://github.com/sourcenetwork/defradb/issues/1737))
+
+### Tooling
+
+* Add Akash deployment configuration ([#1736](https://github.com/sourcenetwork/defradb/issues/1736))
+
+### Refactoring
+
+* HTTP client interface ([#1776](https://github.com/sourcenetwork/defradb/issues/1776))
+* Simplify fetcher interface ([#1746](https://github.com/sourcenetwork/defradb/issues/1746))
+
+### Testing
+
+* Convert and move out of place explain tests ([#1878](https://github.com/sourcenetwork/defradb/issues/1878))
+* Update mutation tests to make use of mutation system ([#1853](https://github.com/sourcenetwork/defradb/issues/1853))
+* Test top level agg. with compound relational filter ([#1870](https://github.com/sourcenetwork/defradb/issues/1870))
+* Skip unsupported mutation types at test level ([#1850](https://github.com/sourcenetwork/defradb/issues/1850))
+* Extend mutation tests with col.Update and Create ([#1838](https://github.com/sourcenetwork/defradb/issues/1838))
+* Add tests for multiple one-one joins ([#1793](https://github.com/sourcenetwork/defradb/issues/1793))
+
+### Chore
+
+* Update Badger version to v4 ([#1740](https://github.com/sourcenetwork/defradb/issues/1740))
+* Update go-libp2p to 0.29.2 ([#1780](https://github.com/sourcenetwork/defradb/issues/1780))
+* Bump golangci-lint to v1.54 ([#1881](https://github.com/sourcenetwork/defradb/issues/1881))
+* Bump go.opentelemetry.io/otel/metric from 1.17.0 to 1.18.0 ([#1890](https://github.com/sourcenetwork/defradb/issues/1890))
+* Bump [@tanstack](https://github.com/tanstack)/react-query from 4.35.0 to 4.35.3 in /playground ([#1876](https://github.com/sourcenetwork/defradb/issues/1876))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.5.0 to 6.7.0 in /playground ([#1874](https://github.com/sourcenetwork/defradb/issues/1874))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.6.0 to 6.7.0 in /playground ([#1875](https://github.com/sourcenetwork/defradb/issues/1875))
+* Combined PRs 2023-09-14 ([#1873](https://github.com/sourcenetwork/defradb/issues/1873))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.4.0 to 6.5.0 in /playground ([#1827](https://github.com/sourcenetwork/defradb/issues/1827))
+* Bump go.opentelemetry.io/otel/sdk/metric from 0.39.0 to 0.40.0 ([#1829](https://github.com/sourcenetwork/defradb/issues/1829))
+* Bump github.com/ipfs/go-block-format from 0.1.2 to 0.2.0 ([#1819](https://github.com/sourcenetwork/defradb/issues/1819))
+* Combined PRs ([#1826](https://github.com/sourcenetwork/defradb/issues/1826))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.4.0 to 6.4.1 in /playground ([#1804](https://github.com/sourcenetwork/defradb/issues/1804))
+* Combined PRs ([#1803](https://github.com/sourcenetwork/defradb/issues/1803))
+* Combined PRs ([#1791](https://github.com/sourcenetwork/defradb/issues/1791))
+* Combined PRs ([#1778](https://github.com/sourcenetwork/defradb/issues/1778))
+* Bump dependencies ([#1761](https://github.com/sourcenetwork/defradb/issues/1761))
+* Bump vite from 4.3.9 to 4.4.8 in /playground ([#1748](https://github.com/sourcenetwork/defradb/issues/1748))
+* Bump graphiql from 3.0.4 to 3.0.5 in /playground ([#1730](https://github.com/sourcenetwork/defradb/issues/1730))
+* Combined bumps of dependencies under /playground ([#1744](https://github.com/sourcenetwork/defradb/issues/1744))
+* Bump github.com/ipfs/boxo from 0.10.2 to 0.11.0 ([#1726](https://github.com/sourcenetwork/defradb/issues/1726))
+* Bump github.com/libp2p/go-libp2p-kad-dht from 0.24.2 to 0.24.3 ([#1724](https://github.com/sourcenetwork/defradb/issues/1724))
+* Bump google.golang.org/grpc from 1.56.2 to 1.57.0 ([#1725](https://github.com/sourcenetwork/defradb/issues/1725))
+
+
+## [v0.6.0](https://github.com/sourcenetwork/defradb/compare/v0.5.1...v0.6.0)
+
+> 2023-07-31
+
+DefraDB v0.6 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+There are several new and powerful features, important bug fixes, and notable refactors in this release. Some highlight features include: The initial release of our LensVM based schema migration engine powered by WebAssembly ([#1650](https://github.com/sourcenetwork/defradb/issues/1650)), newly embedded DefraDB Playround which includes a bundled GraphQL client and schema manager, and last but not least a relation field (_id) alias to improve the developer experience ([#1609](https://github.com/sourcenetwork/defradb/issues/1609)).
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.5.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Add `_not` operator ([#1631](https://github.com/sourcenetwork/defradb/issues/1631))
+* Schema list API ([#1625](https://github.com/sourcenetwork/defradb/issues/1625))
+* Add simple data import and export ([#1630](https://github.com/sourcenetwork/defradb/issues/1630))
+* Playground ([#1575](https://github.com/sourcenetwork/defradb/issues/1575))
+* Add schema migration get and set cmds to CLI ([#1650](https://github.com/sourcenetwork/defradb/issues/1650))
+* Allow relation alias on create and update ([#1609](https://github.com/sourcenetwork/defradb/issues/1609))
+* Make fetcher calculate docFetches and fieldFetches ([#1713](https://github.com/sourcenetwork/defradb/issues/1713))
+* Add lens migration engine to defra ([#1564](https://github.com/sourcenetwork/defradb/issues/1564))
+* Add `_keys` attribute to `selectNode` simple explain ([#1546](https://github.com/sourcenetwork/defradb/issues/1546))
+* CLI commands for secondary indexes ([#1595](https://github.com/sourcenetwork/defradb/issues/1595))
+* Add alias to `groupBy` related object ([#1579](https://github.com/sourcenetwork/defradb/issues/1579))
+* Non-unique secondary index (no querying) ([#1450](https://github.com/sourcenetwork/defradb/issues/1450))
+* Add ability to explain-debug all nodes ([#1563](https://github.com/sourcenetwork/defradb/issues/1563))
+* Include dockey in doc exists err ([#1558](https://github.com/sourcenetwork/defradb/issues/1558))
+
+### Fixes
+
+* Better wait in CLI integration test ([#1415](https://github.com/sourcenetwork/defradb/issues/1415))
+* Return error when relation is not defined on both types ([#1647](https://github.com/sourcenetwork/defradb/issues/1647))
+* Change `core.DocumentMapping` to pointer ([#1528](https://github.com/sourcenetwork/defradb/issues/1528))
+* Fix invalid (badger) datastore state ([#1685](https://github.com/sourcenetwork/defradb/issues/1685))
+* Discard index and subscription implicit transactions ([#1715](https://github.com/sourcenetwork/defradb/issues/1715))
+* Remove duplicated `peers` in peerstore prefix ([#1678](https://github.com/sourcenetwork/defradb/issues/1678))
+* Return errors from typeJoinOne ([#1716](https://github.com/sourcenetwork/defradb/issues/1716))
+* Document change detector breaking change ([#1531](https://github.com/sourcenetwork/defradb/issues/1531))
+* Standardise `schema migration` CLI errors ([#1682](https://github.com/sourcenetwork/defradb/issues/1682))
+* Introspection OrderArg returns null inputFields ([#1633](https://github.com/sourcenetwork/defradb/issues/1633))
+* Avoid duplicated requestable fields ([#1621](https://github.com/sourcenetwork/defradb/issues/1621))
+* Normalize int field kind ([#1619](https://github.com/sourcenetwork/defradb/issues/1619))
+* Change the WriteSyncer to use lock when piping ([#1608](https://github.com/sourcenetwork/defradb/issues/1608))
+* Filter splitting and rendering for related types ([#1541](https://github.com/sourcenetwork/defradb/issues/1541))
+
+### Documentation
+
+* Improve CLI command documentation ([#1505](https://github.com/sourcenetwork/defradb/issues/1505))
+
+### Refactoring
+
+* Schema list output to include schemaVersionID ([#1706](https://github.com/sourcenetwork/defradb/issues/1706))
+* Reuse lens wasm modules ([#1641](https://github.com/sourcenetwork/defradb/issues/1641))
+* Remove redundant txn param from fetcher start ([#1635](https://github.com/sourcenetwork/defradb/issues/1635))
+* Remove first CRDT byte from field encoded values ([#1622](https://github.com/sourcenetwork/defradb/issues/1622))
+* Merge `node` into `net` and improve coverage ([#1593](https://github.com/sourcenetwork/defradb/issues/1593))
+* Fetcher filter and field optimization ([#1500](https://github.com/sourcenetwork/defradb/issues/1500))
+
+### Testing
+
+* Rework transaction test framework capabilities ([#1603](https://github.com/sourcenetwork/defradb/issues/1603))
+* Expand backup integration tests ([#1699](https://github.com/sourcenetwork/defradb/issues/1699))
+* Disable test ([#1675](https://github.com/sourcenetwork/defradb/issues/1675))
+* Add tests for 1-1 group by id ([#1655](https://github.com/sourcenetwork/defradb/issues/1655))
+* Remove CLI tests from make test ([#1643](https://github.com/sourcenetwork/defradb/issues/1643))
+* Bundle test state into single var ([#1645](https://github.com/sourcenetwork/defradb/issues/1645))
+* Convert explain group tests to new explain setup ([#1537](https://github.com/sourcenetwork/defradb/issues/1537))
+* Add tests for foo_id field name clashes ([#1521](https://github.com/sourcenetwork/defradb/issues/1521))
+* Resume wait correctly following test node restart ([#1515](https://github.com/sourcenetwork/defradb/issues/1515))
+* Require no errors when none expected ([#1509](https://github.com/sourcenetwork/defradb/issues/1509))
+
+### Continuous integration
+
+* Add workflows to push, pull, and validate docker images ([#1676](https://github.com/sourcenetwork/defradb/issues/1676))
+* Build mocks using make ([#1612](https://github.com/sourcenetwork/defradb/issues/1612))
+* Fix terraform plan and merge AMI build + deploy workflow ([#1514](https://github.com/sourcenetwork/defradb/issues/1514))
+* Reconfigure CodeCov action to ensure stability ([#1414](https://github.com/sourcenetwork/defradb/issues/1414))
+
+### Chore
+
+* Bump to GoLang v1.20 ([#1689](https://github.com/sourcenetwork/defradb/issues/1689))
+* Update to ipfs boxo 0.10.0 ([#1573](https://github.com/sourcenetwork/defradb/issues/1573))
+
+
+
+## [v0.5.1](https://github.com/sourcenetwork/defradb/compare/v0.5.0...v0.5.1)
+
+> 2023-05-16
+
+### Features
+
+* Add collection response information on creation ([#1499](https://github.com/sourcenetwork/defradb/issues/1499))
+* CLI client request from file ([#1503](https://github.com/sourcenetwork/defradb/issues/1503))
+* Add commits fieldName and fieldId fields ([#1451](https://github.com/sourcenetwork/defradb/issues/1451))
+* Add allowed origins config ([#1408](https://github.com/sourcenetwork/defradb/issues/1408))
+* Add descriptions to all system defined GQL stuff ([#1387](https://github.com/sourcenetwork/defradb/issues/1387))
+* Strongly type Request.Errors ([#1364](https://github.com/sourcenetwork/defradb/issues/1364))
+
+### Fixes
+
+* Skip new test packages in change detector ([#1495](https://github.com/sourcenetwork/defradb/issues/1495))
+* Make nested joins work correctly from primary direction ([#1491](https://github.com/sourcenetwork/defradb/issues/1491))
+* Add reconnection to known peers ([#1482](https://github.com/sourcenetwork/defradb/issues/1482))
+* Rename commit field input arg to fieldId ([#1460](https://github.com/sourcenetwork/defradb/issues/1460))
+* Reference collectionID in p2p readme ([#1466](https://github.com/sourcenetwork/defradb/issues/1466))
+* Handling SIGTERM in CLI `start` command ([#1459](https://github.com/sourcenetwork/defradb/issues/1459))
+* Update QL documentation link and replicator command ([#1440](https://github.com/sourcenetwork/defradb/issues/1440))
+* Fix typo in readme ([#1419](https://github.com/sourcenetwork/defradb/issues/1419))
+* Limit the size of http request bodies that we handle ([#1405](https://github.com/sourcenetwork/defradb/issues/1405))
+* Improve P2P event handling ([#1388](https://github.com/sourcenetwork/defradb/issues/1388))
+* Serialize DB errors to json in http package ([#1401](https://github.com/sourcenetwork/defradb/issues/1401))
+* Do not commit if errors have been returned ([#1390](https://github.com/sourcenetwork/defradb/issues/1390))
+* Unlock replicator lock before returning error ([#1369](https://github.com/sourcenetwork/defradb/issues/1369))
+* Improve NonNull error message ([#1362](https://github.com/sourcenetwork/defradb/issues/1362))
+* Use ring-buffer for WaitForFoo chans ([#1359](https://github.com/sourcenetwork/defradb/issues/1359))
+* Guarantee event processing order ([#1352](https://github.com/sourcenetwork/defradb/issues/1352))
+* Explain of _group with dockeys filter to be []string ([#1348](https://github.com/sourcenetwork/defradb/issues/1348))
+
+### Refactoring
+
+* Use `int32` for proper gql scalar Int parsing ([#1493](https://github.com/sourcenetwork/defradb/issues/1493))
+* Improve rollback on peer P2P collection error ([#1461](https://github.com/sourcenetwork/defradb/issues/1461))
+* Improve CLI with test suite and builder pattern ([#928](https://github.com/sourcenetwork/defradb/issues/928))
+
+### Testing
+
+* Add DB/Node Restart tests ([#1504](https://github.com/sourcenetwork/defradb/issues/1504))
+* Provide tests for client introspection query ([#1492](https://github.com/sourcenetwork/defradb/issues/1492))
+* Convert explain count tests to new explain setup ([#1488](https://github.com/sourcenetwork/defradb/issues/1488))
+* Convert explain sum tests to new explain setup ([#1489](https://github.com/sourcenetwork/defradb/issues/1489))
+* Convert explain average tests to new explain setup ([#1487](https://github.com/sourcenetwork/defradb/issues/1487))
+* Convert explain top-level tests to new explain setup ([#1480](https://github.com/sourcenetwork/defradb/issues/1480))
+* Convert explain order tests to new explain setup ([#1478](https://github.com/sourcenetwork/defradb/issues/1478))
+* Convert explain join tests to new explain setup ([#1476](https://github.com/sourcenetwork/defradb/issues/1476))
+* Convert explain dagscan tests to new explain setup ([#1474](https://github.com/sourcenetwork/defradb/issues/1474))
+* Add tests to assert schema id order independence ([#1456](https://github.com/sourcenetwork/defradb/issues/1456))
+* Capitalize all integration schema types ([#1445](https://github.com/sourcenetwork/defradb/issues/1445))
+* Convert explain limit tests to new explain setup ([#1446](https://github.com/sourcenetwork/defradb/issues/1446))
+* Improve change detector performance ([#1433](https://github.com/sourcenetwork/defradb/issues/1433))
+* Convert mutation explain tests to new explain setup ([#1416](https://github.com/sourcenetwork/defradb/issues/1416))
+* Convert filter explain tests to new explain setup ([#1380](https://github.com/sourcenetwork/defradb/issues/1380))
+* Retry test doc mutation on transaction conflict ([#1366](https://github.com/sourcenetwork/defradb/issues/1366))
+
+### Continuous integration
+
+* Remove secret ssh key stuff from change detector wf ([#1438](https://github.com/sourcenetwork/defradb/issues/1438))
+* Fix the SSH security issue from AMI scan report ([#1426](https://github.com/sourcenetwork/defradb/issues/1426))
+* Add a separate workflow to run the linter ([#1434](https://github.com/sourcenetwork/defradb/issues/1434))
+* Allow CI to work from forked repo ([#1392](https://github.com/sourcenetwork/defradb/issues/1392))
+* Bump go version within packer for AWS AMI ([#1344](https://github.com/sourcenetwork/defradb/issues/1344))
+
+### Chore
+
+* Enshrine defra logger names ([#1410](https://github.com/sourcenetwork/defradb/issues/1410))
+* Remove some dead code ([#1470](https://github.com/sourcenetwork/defradb/issues/1470))
+* Update graphql-go ([#1422](https://github.com/sourcenetwork/defradb/issues/1422))
+* Improve logging consistency ([#1424](https://github.com/sourcenetwork/defradb/issues/1424))
+* Makefile tests with shorter timeout and common flags ([#1397](https://github.com/sourcenetwork/defradb/issues/1397))
+* Move to gofrs/uuid ([#1396](https://github.com/sourcenetwork/defradb/issues/1396))
+* Move to ipfs boxo ([#1393](https://github.com/sourcenetwork/defradb/issues/1393))
+* Document collection.txn ([#1363](https://github.com/sourcenetwork/defradb/issues/1363))
+
+### Bot
+
+* Bump golang.org/x/crypto from 0.8.0 to 0.9.0 ([#1497](https://github.com/sourcenetwork/defradb/issues/1497))
+* Bump golang.org/x/net from 0.9.0 to 0.10.0 ([#1496](https://github.com/sourcenetwork/defradb/issues/1496))
+* Bump google.golang.org/grpc from 1.54.0 to 1.55.0 ([#1464](https://github.com/sourcenetwork/defradb/issues/1464))
+* Bump github.com/ipfs/boxo from 0.8.0 to 0.8.1 ([#1427](https://github.com/sourcenetwork/defradb/issues/1427))
+* Bump golang.org/x/crypto from 0.7.0 to 0.8.0 ([#1398](https://github.com/sourcenetwork/defradb/issues/1398))
+* Bump github.com/spf13/cobra from 1.6.1 to 1.7.0 ([#1399](https://github.com/sourcenetwork/defradb/issues/1399))
+* Bump github.com/ipfs/go-blockservice from 0.5.0 to 0.5.1 ([#1300](https://github.com/sourcenetwork/defradb/issues/1300))
+* Bump github.com/ipfs/go-cid from 0.4.0 to 0.4.1 ([#1301](https://github.com/sourcenetwork/defradb/issues/1301))
+
+
+## [v0.5.0](https://github.com/sourcenetwork/defradb/compare/v0.4.0...v0.5.0)
+
+> 2023-04-12
+
+DefraDB v0.5 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+There many new features in this release, but most importantly, this is the first open source release for DefraDB. As such, this release focused on various quality of life changes and refactors, bug fixes, and overall cleanliness of the repo so it can effectively be used and tested in the public domain.
+
+To get a full outline of the changes, we invite you to review the official changelog below. Some highlights are the first iteration of our schema update system, allowing developers to add new fields to schemas using our JSON Patch based DDL, a new DAG based delete system which will persist "soft-delete" ops into the CRDT Merkle DAG, and a early prototype for our collection level peer-to-peer synchronization.
+
+This release does include a Breaking Change to existing v0.4.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Add document delete mechanics ([#1263](https://github.com/sourcenetwork/defradb/issues/1263))
+* Ability to explain an executed request ([#1188](https://github.com/sourcenetwork/defradb/issues/1188))
+* Add SchemaPatch CLI command ([#1250](https://github.com/sourcenetwork/defradb/issues/1250))
+* Add support for one-one mutation from sec. side ([#1247](https://github.com/sourcenetwork/defradb/issues/1247))
+* Store only key in DAG instead of dockey path ([#1245](https://github.com/sourcenetwork/defradb/issues/1245))
+* Add collectionId field to commit field ([#1235](https://github.com/sourcenetwork/defradb/issues/1235))
+* Add field kind substitution for PatchSchema ([#1223](https://github.com/sourcenetwork/defradb/issues/1223))
+* Add dockey field for commit field ([#1216](https://github.com/sourcenetwork/defradb/issues/1216))
+* Allow new fields to be added locally to schema ([#1139](https://github.com/sourcenetwork/defradb/issues/1139))
+* Add `like` sub-string filter ([#1091](https://github.com/sourcenetwork/defradb/issues/1091))
+* Add ability for P2P to wait for pushlog by peer ([#1098](https://github.com/sourcenetwork/defradb/issues/1098))
+* Add P2P collection topic subscription ([#1086](https://github.com/sourcenetwork/defradb/issues/1086))
+* Add support for schema version id in queries ([#1067](https://github.com/sourcenetwork/defradb/issues/1067))
+* Add schema version id to commit queries ([#1061](https://github.com/sourcenetwork/defradb/issues/1061))
+* Persist schema version at time of commit ([#1055](https://github.com/sourcenetwork/defradb/issues/1055))
+* Add ability to input simple explain type arg ([#1039](https://github.com/sourcenetwork/defradb/issues/1039))
+
+### Fixes
+
+* API address parameter validation ([#1311](https://github.com/sourcenetwork/defradb/issues/1311))
+* Improve error message for NonNull GQL types ([#1333](https://github.com/sourcenetwork/defradb/issues/1333))
+* Handle panics in the rpc server ([#1330](https://github.com/sourcenetwork/defradb/issues/1330))
+* Handle returned error in select.go ([#1329](https://github.com/sourcenetwork/defradb/issues/1329))
+* Resolve handful of CLI issues ([#1318](https://github.com/sourcenetwork/defradb/issues/1318))
+* Only check for events queue on subscription request ([#1326](https://github.com/sourcenetwork/defradb/issues/1326))
+* Remove client Create/UpdateCollection ([#1309](https://github.com/sourcenetwork/defradb/issues/1309))
+* CLI to display specific command usage help ([#1314](https://github.com/sourcenetwork/defradb/issues/1314))
+* Fix P2P collection CLI commands ([#1295](https://github.com/sourcenetwork/defradb/issues/1295))
+* Dont double up badger file path ([#1299](https://github.com/sourcenetwork/defradb/issues/1299))
+* Update immutable package ([#1290](https://github.com/sourcenetwork/defradb/issues/1290))
+* Fix panic on success of Add/RemoveP2PCollections ([#1297](https://github.com/sourcenetwork/defradb/issues/1297))
+* Fix deadlock on memory-datastore Close ([#1273](https://github.com/sourcenetwork/defradb/issues/1273))
+* Determine if query is introspection query ([#1255](https://github.com/sourcenetwork/defradb/issues/1255))
+* Allow newly added fields to sync via p2p ([#1226](https://github.com/sourcenetwork/defradb/issues/1226))
+* Expose `ExplainEnum` in the GQL schema ([#1204](https://github.com/sourcenetwork/defradb/issues/1204))
+* Resolve aggregates' mapping with deep nested subtypes ([#1175](https://github.com/sourcenetwork/defradb/issues/1175))
+* Make sort stable and handle nil comparison ([#1094](https://github.com/sourcenetwork/defradb/issues/1094))
+* Change successful schema add status to 200 ([#1106](https://github.com/sourcenetwork/defradb/issues/1106))
+* Add delay in P2P test util execution ([#1093](https://github.com/sourcenetwork/defradb/issues/1093))
+* Ensure errors test don't hard expect folder name ([#1072](https://github.com/sourcenetwork/defradb/issues/1072))
+* Remove potential P2P deadlock ([#1056](https://github.com/sourcenetwork/defradb/issues/1056))
+* Rework the P2P integration tests ([#989](https://github.com/sourcenetwork/defradb/issues/989))
+* Improve DAG sync with highly concurrent updates ([#1031](https://github.com/sourcenetwork/defradb/issues/1031))
+
+### Documentation
+
+* Update docs for the v0.5 release ([#1320](https://github.com/sourcenetwork/defradb/issues/1320))
+* Document client interfaces in client/db.go ([#1305](https://github.com/sourcenetwork/defradb/issues/1305))
+* Document client Description types ([#1307](https://github.com/sourcenetwork/defradb/issues/1307))
+* Improve security policy ([#1240](https://github.com/sourcenetwork/defradb/issues/1240))
+* Add security disclosure policy ([#1194](https://github.com/sourcenetwork/defradb/issues/1194))
+* Correct commits query example in readme ([#1172](https://github.com/sourcenetwork/defradb/issues/1172))
+
+### Refactoring
+
+* Improve p2p collection operations on peer ([#1286](https://github.com/sourcenetwork/defradb/issues/1286))
+* Migrate gql introspection tests to new framework ([#1211](https://github.com/sourcenetwork/defradb/issues/1211))
+* Reorganise client transaction related interfaces ([#1180](https://github.com/sourcenetwork/defradb/issues/1180))
+* Config-local viper, rootdir, and logger parsing ([#1132](https://github.com/sourcenetwork/defradb/issues/1132))
+* Migrate mutation-relation tests to new framework ([#1109](https://github.com/sourcenetwork/defradb/issues/1109))
+* Rework integration test framework ([#1089](https://github.com/sourcenetwork/defradb/issues/1089))
+* Generate gql types using col. desc ([#1080](https://github.com/sourcenetwork/defradb/issues/1080))
+* Extract config errors to dedicated file ([#1107](https://github.com/sourcenetwork/defradb/issues/1107))
+* Change terminology from query to request ([#1054](https://github.com/sourcenetwork/defradb/issues/1054))
+* Allow db keys to handle multiple schema versions ([#1026](https://github.com/sourcenetwork/defradb/issues/1026))
+* Extract query schema errors to dedicated file ([#1037](https://github.com/sourcenetwork/defradb/issues/1037))
+* Extract planner errors to dedicated file ([#1034](https://github.com/sourcenetwork/defradb/issues/1034))
+* Extract query parser errors to dedicated file ([#1035](https://github.com/sourcenetwork/defradb/issues/1035))
+
+### Testing
+
+* Remove test reference to DEFRA_ROOTDIR env var ([#1328](https://github.com/sourcenetwork/defradb/issues/1328))
+* Expand tests for Peer subscribe actions ([#1287](https://github.com/sourcenetwork/defradb/issues/1287))
+* Fix flaky TestCloseThroughContext test ([#1265](https://github.com/sourcenetwork/defradb/issues/1265))
+* Add gql introspection tests for patch schema ([#1219](https://github.com/sourcenetwork/defradb/issues/1219))
+* Explicitly state change detector split for test ([#1228](https://github.com/sourcenetwork/defradb/issues/1228))
+* Add test for successful one-one create mutation ([#1215](https://github.com/sourcenetwork/defradb/issues/1215))
+* Ensure that all databases are always closed on exit ([#1187](https://github.com/sourcenetwork/defradb/issues/1187))
+* Add P2P tests for Schema Update adding field ([#1182](https://github.com/sourcenetwork/defradb/issues/1182))
+* Migrate P2P/state tests to new framework ([#1160](https://github.com/sourcenetwork/defradb/issues/1160))
+* Remove sleep from subscription tests ([#1156](https://github.com/sourcenetwork/defradb/issues/1156))
+* Fetch documents on test execution start ([#1163](https://github.com/sourcenetwork/defradb/issues/1163))
+* Introduce basic testing for the `version` module ([#1111](https://github.com/sourcenetwork/defradb/issues/1111))
+* Boost test coverage for collection_update ([#1050](https://github.com/sourcenetwork/defradb/issues/1050))
+* Wait between P2P update retry attempts ([#1052](https://github.com/sourcenetwork/defradb/issues/1052))
+* Exclude auto-generated protobuf files from codecov ([#1048](https://github.com/sourcenetwork/defradb/issues/1048))
+* Add P2P tests for relational docs ([#1042](https://github.com/sourcenetwork/defradb/issues/1042))
+
+### Continuous integration
+
+* Add workflow that builds DefraDB AMI upon tag push ([#1304](https://github.com/sourcenetwork/defradb/issues/1304))
+* Allow PR title to end with a capital letter ([#1291](https://github.com/sourcenetwork/defradb/issues/1291))
+* Changes for `dependabot` to be well-behaved ([#1165](https://github.com/sourcenetwork/defradb/issues/1165))
+* Skip benchmarks for dependabot ([#1144](https://github.com/sourcenetwork/defradb/issues/1144))
+* Add workflow to ensure deps build properly ([#1078](https://github.com/sourcenetwork/defradb/issues/1078))
+* Runner and Builder Containerfiles ([#951](https://github.com/sourcenetwork/defradb/issues/951))
+* Fix go-header linter rule to be any year ([#1021](https://github.com/sourcenetwork/defradb/issues/1021))
+
+### Chore
+
+* Add Islam as contributor ([#1302](https://github.com/sourcenetwork/defradb/issues/1302))
+* Update go-libp2p to 0.26.4 ([#1257](https://github.com/sourcenetwork/defradb/issues/1257))
+* Improve the test coverage of datastore ([#1203](https://github.com/sourcenetwork/defradb/issues/1203))
+* Add issue and discussion templates ([#1193](https://github.com/sourcenetwork/defradb/issues/1193))
+* Bump libp2p/go-libp2p-kad-dht from 0.21.0 to 0.21.1 ([#1146](https://github.com/sourcenetwork/defradb/issues/1146))
+* Enable dependabot ([#1120](https://github.com/sourcenetwork/defradb/issues/1120))
+* Update `opentelemetry` dependencies ([#1114](https://github.com/sourcenetwork/defradb/issues/1114))
+* Update dependencies including go-ipfs ([#1112](https://github.com/sourcenetwork/defradb/issues/1112))
+* Bump to GoLang v1.19 ([#818](https://github.com/sourcenetwork/defradb/issues/818))
+* Remove versionedScan node ([#1049](https://github.com/sourcenetwork/defradb/issues/1049))
+
+### Bot
+
+* Bump github.com/multiformats/go-multiaddr from 0.8.0 to 0.9.0 ([#1277](https://github.com/sourcenetwork/defradb/issues/1277))
+* Bump google.golang.org/grpc from 1.53.0 to 1.54.0 ([#1233](https://github.com/sourcenetwork/defradb/issues/1233))
+* Bump github.com/multiformats/go-multibase from 0.1.1 to 0.2.0 ([#1230](https://github.com/sourcenetwork/defradb/issues/1230))
+* Bump github.com/ipfs/go-libipfs from 0.6.2 to 0.7.0 ([#1231](https://github.com/sourcenetwork/defradb/issues/1231))
+* Bump github.com/ipfs/go-cid from 0.3.2 to 0.4.0 ([#1200](https://github.com/sourcenetwork/defradb/issues/1200))
+* Bump github.com/ipfs/go-ipfs-blockstore from 1.2.0 to 1.3.0 ([#1199](https://github.com/sourcenetwork/defradb/issues/1199))
+* Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 ([#1198](https://github.com/sourcenetwork/defradb/issues/1198))
+* Bump github.com/ipfs/go-libipfs from 0.6.1 to 0.6.2 ([#1201](https://github.com/sourcenetwork/defradb/issues/1201))
+* Bump golang.org/x/crypto from 0.6.0 to 0.7.0 ([#1197](https://github.com/sourcenetwork/defradb/issues/1197))
+* Bump libp2p/go-libp2p-gostream from 0.5.0 to 0.6.0 ([#1152](https://github.com/sourcenetwork/defradb/issues/1152))
+* Bump github.com/ipfs/go-libipfs from 0.5.0 to 0.6.1 ([#1166](https://github.com/sourcenetwork/defradb/issues/1166))
+* Bump github.com/ugorji/go/codec from 1.2.9 to 1.2.11 ([#1173](https://github.com/sourcenetwork/defradb/issues/1173))
+* Bump github.com/libp2p/go-libp2p-pubsub from 0.9.0 to 0.9.3 ([#1183](https://github.com/sourcenetwork/defradb/issues/1183))
+
+
+## [v0.4.0](https://github.com/sourcenetwork/defradb/compare/v0.3.1...v0.4.0)
+
+> 2023-12-23
+
+DefraDB v0.4 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+There are various new features in this release - some of which are breaking - and we invite you to review the official changelog below. Some highlights are persistence of replicators, DateTime scalars, TLS support, and GQL subscriptions.
+
+This release does include a Breaking Change to existing v0.3.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Add basic metric functionality ([#971](https://github.com/sourcenetwork/defradb/issues/971))
+* Add thread safe transactional in-memory datastore ([#947](https://github.com/sourcenetwork/defradb/issues/947))
+* Persist p2p replicators ([#960](https://github.com/sourcenetwork/defradb/issues/960))
+* Add DateTime custom scalars ([#931](https://github.com/sourcenetwork/defradb/issues/931))
+* Add GraphQL subscriptions ([#934](https://github.com/sourcenetwork/defradb/issues/934))
+* Add support for tls ([#885](https://github.com/sourcenetwork/defradb/issues/885))
+* Add group by support for commits ([#887](https://github.com/sourcenetwork/defradb/issues/887))
+* Add depth support for commits ([#889](https://github.com/sourcenetwork/defradb/issues/889))
+* Make dockey optional for allCommits queries ([#847](https://github.com/sourcenetwork/defradb/issues/847))
+* Add WithStack to the errors package ([#870](https://github.com/sourcenetwork/defradb/issues/870))
+* Add event system ([#834](https://github.com/sourcenetwork/defradb/issues/834))
+
+### Fixes
+
+* Correct errors.WithStack behaviour ([#984](https://github.com/sourcenetwork/defradb/issues/984))
+* Correctly handle nested one to one joins ([#964](https://github.com/sourcenetwork/defradb/issues/964))
+* Do not assume parent record exists when joining ([#963](https://github.com/sourcenetwork/defradb/issues/963))
+* Change time format for HTTP API log ([#910](https://github.com/sourcenetwork/defradb/issues/910))
+* Error if group select contains non-group-by fields ([#898](https://github.com/sourcenetwork/defradb/issues/898))
+* Add inspection of values for ENV flags ([#900](https://github.com/sourcenetwork/defradb/issues/900))
+* Remove panics from document ([#881](https://github.com/sourcenetwork/defradb/issues/881))
+* Add __typename support ([#871](https://github.com/sourcenetwork/defradb/issues/871))
+* Handle subscriber close ([#877](https://github.com/sourcenetwork/defradb/issues/877))
+* Publish update events post commit ([#866](https://github.com/sourcenetwork/defradb/issues/866))
+
+### Refactoring
+
+* Make rootstore require Batching and TxnDatastore ([#940](https://github.com/sourcenetwork/defradb/issues/940))
+* Conceptually clarify schema vs query-language ([#924](https://github.com/sourcenetwork/defradb/issues/924))
+* Decouple db.db from gql ([#912](https://github.com/sourcenetwork/defradb/issues/912))
+* Merkle clock heads cleanup ([#918](https://github.com/sourcenetwork/defradb/issues/918))
+* Simplify dag fetcher ([#913](https://github.com/sourcenetwork/defradb/issues/913))
+* Cleanup parsing logic ([#909](https://github.com/sourcenetwork/defradb/issues/909))
+* Move planner outside the gql directory ([#907](https://github.com/sourcenetwork/defradb/issues/907))
+* Refactor commit nodes ([#892](https://github.com/sourcenetwork/defradb/issues/892))
+* Make latest commits syntax sugar ([#890](https://github.com/sourcenetwork/defradb/issues/890))
+* Remove commit query ([#841](https://github.com/sourcenetwork/defradb/issues/841))
+
+### Testing
+
+* Add event tests ([#965](https://github.com/sourcenetwork/defradb/issues/965))
+* Add new setup for testing explain functionality ([#949](https://github.com/sourcenetwork/defradb/issues/949))
+* Add txn relation-type delete and create tests ([#875](https://github.com/sourcenetwork/defradb/issues/875))
+* Skip change detection for tests that assert panic ([#883](https://github.com/sourcenetwork/defradb/issues/883))
+
+### Continuous integration
+
+* Bump all gh-action versions to support node16 ([#990](https://github.com/sourcenetwork/defradb/issues/990))
+* Bump ssh-agent action to v0.7.0 ([#978](https://github.com/sourcenetwork/defradb/issues/978))
+* Add error message format check ([#901](https://github.com/sourcenetwork/defradb/issues/901))
+
+### Chore
+
+* Extract (events, merkle) errors to errors.go ([#973](https://github.com/sourcenetwork/defradb/issues/973))
+* Extract (datastore, db) errors to errors.go ([#969](https://github.com/sourcenetwork/defradb/issues/969))
+* Extract (connor, crdt, core) errors to errors.go ([#968](https://github.com/sourcenetwork/defradb/issues/968))
+* Extract inline (http and client) errors to errors.go ([#967](https://github.com/sourcenetwork/defradb/issues/967))
+* Update badger version ([#966](https://github.com/sourcenetwork/defradb/issues/966))
+* Move Option and Enumerable to immutables ([#939](https://github.com/sourcenetwork/defradb/issues/939))
+* Add configuration of external loggers ([#942](https://github.com/sourcenetwork/defradb/issues/942))
+* Strip DSKey prefixes and simplify NewDataStoreKey ([#944](https://github.com/sourcenetwork/defradb/issues/944))
+* Include version metadata in cross-building ([#930](https://github.com/sourcenetwork/defradb/issues/930))
+* Update to v0.23.2 the libP2P package ([#908](https://github.com/sourcenetwork/defradb/issues/908))
+* Remove `ipfslite` dependency ([#739](https://github.com/sourcenetwork/defradb/issues/739))
+
+
+
+## [v0.3.1](https://github.com/sourcenetwork/defradb/compare/v0.3.0...v0.3.1)
+
+> 2022-09-23
+
+DefraDB v0.3.1 is a minor release, primarily focusing on additional/extended features and fixes of items added in the `v0.3.0` release.
+
+### Features
+
+* Add cid support for allCommits ([#857](https://github.com/sourcenetwork/defradb/issues/857))
+* Add offset support to allCommits ([#859](https://github.com/sourcenetwork/defradb/issues/859))
+* Add limit support to allCommits query ([#856](https://github.com/sourcenetwork/defradb/issues/856))
+* Add order support to allCommits ([#845](https://github.com/sourcenetwork/defradb/issues/845))
+* Display CLI usage on user error ([#819](https://github.com/sourcenetwork/defradb/issues/819))
+* Add support for dockey filters in child joins ([#806](https://github.com/sourcenetwork/defradb/issues/806))
+* Add sort support for numeric aggregates ([#786](https://github.com/sourcenetwork/defradb/issues/786))
+* Allow filtering by nil ([#789](https://github.com/sourcenetwork/defradb/issues/789))
+* Add aggregate offset support ([#778](https://github.com/sourcenetwork/defradb/issues/778))
+* Remove filter depth limit ([#777](https://github.com/sourcenetwork/defradb/issues/777))
+* Add support for and-or inline array aggregate filters ([#779](https://github.com/sourcenetwork/defradb/issues/779))
+* Add limit support for aggregates ([#771](https://github.com/sourcenetwork/defradb/issues/771))
+* Add support for inline arrays of nillable types ([#759](https://github.com/sourcenetwork/defradb/issues/759))
+* Create errors package ([#548](https://github.com/sourcenetwork/defradb/issues/548))
+* Add ability to display peer id ([#719](https://github.com/sourcenetwork/defradb/issues/719))
+* Add a config option to set the vlog max file size ([#743](https://github.com/sourcenetwork/defradb/issues/743))
+* Explain `topLevelNode` like a `MultiNode` plan ([#749](https://github.com/sourcenetwork/defradb/issues/749))
+* Make `topLevelNode` explainable ([#737](https://github.com/sourcenetwork/defradb/issues/737))
+
+### Fixes
+
+* Order subtype without selecting the join child ([#810](https://github.com/sourcenetwork/defradb/issues/810))
+* Correctly handles nil one-one joins ([#837](https://github.com/sourcenetwork/defradb/issues/837))
+* Reset scan node for each join ([#828](https://github.com/sourcenetwork/defradb/issues/828))
+* Handle filter input field argument being nil ([#787](https://github.com/sourcenetwork/defradb/issues/787))
+* Ensure CLI outputs JSON to stdout when directed to pipe ([#804](https://github.com/sourcenetwork/defradb/issues/804))
+* Error if given the wrong side of a one-one relationship ([#795](https://github.com/sourcenetwork/defradb/issues/795))
+* Add object marker to enable return of empty docs ([#800](https://github.com/sourcenetwork/defradb/issues/800))
+* Resolve the extra `typeIndexJoin`s for `_avg` aggregate ([#774](https://github.com/sourcenetwork/defradb/issues/774))
+* Remove _like filter operator ([#797](https://github.com/sourcenetwork/defradb/issues/797))
+* Remove having gql types ([#785](https://github.com/sourcenetwork/defradb/issues/785))
+* Error if child _group selected without parent groupBy ([#781](https://github.com/sourcenetwork/defradb/issues/781))
+* Error nicely on missing field specifier ([#782](https://github.com/sourcenetwork/defradb/issues/782))
+* Handle order input field argument being nil ([#701](https://github.com/sourcenetwork/defradb/issues/701))
+* Change output to outputpath in config file template for logger ([#716](https://github.com/sourcenetwork/defradb/issues/716))
+* Delete mutations not correct persisting all keys ([#731](https://github.com/sourcenetwork/defradb/issues/731))
+
+### Tooling
+
+* Ban the usage of `ioutil` package ([#747](https://github.com/sourcenetwork/defradb/issues/747))
+* Migrate from CircleCi to GitHub Actions ([#679](https://github.com/sourcenetwork/defradb/issues/679))
+
+### Documentation
+
+* Clarify meaning of url param, update in-repo CLI docs ([#814](https://github.com/sourcenetwork/defradb/issues/814))
+* Disclaimer of exposed to network and not encrypted ([#793](https://github.com/sourcenetwork/defradb/issues/793))
+* Update logo to respect theme ([#728](https://github.com/sourcenetwork/defradb/issues/728))
+
+### Refactoring
+
+* Replace all `interface{}` with `any` alias ([#805](https://github.com/sourcenetwork/defradb/issues/805))
+* Use fastjson to parse mutation data string ([#772](https://github.com/sourcenetwork/defradb/issues/772))
+* Rework limit node flow ([#767](https://github.com/sourcenetwork/defradb/issues/767))
+* Make Option immutable ([#769](https://github.com/sourcenetwork/defradb/issues/769))
+* Rework sum and count nodes to make use of generics ([#757](https://github.com/sourcenetwork/defradb/issues/757))
+* Remove some possible panics from codebase ([#732](https://github.com/sourcenetwork/defradb/issues/732))
+* Change logging calls to use feedback in CLI package ([#714](https://github.com/sourcenetwork/defradb/issues/714))
+
+### Testing
+
+* Add tests for aggs with nil filters ([#813](https://github.com/sourcenetwork/defradb/issues/813))
+* Add not equals filter tests ([#798](https://github.com/sourcenetwork/defradb/issues/798))
+* Fix `cli/peerid_test` to not clash addresses ([#766](https://github.com/sourcenetwork/defradb/issues/766))
+* Add change detector summary to test readme ([#754](https://github.com/sourcenetwork/defradb/issues/754))
+* Add tests for inline array grouping ([#752](https://github.com/sourcenetwork/defradb/issues/752))
+
+### Continuous integration
+
+* Reduce test resource usage and test with file db ([#791](https://github.com/sourcenetwork/defradb/issues/791))
+* Add makefile target to verify the local module cache ([#775](https://github.com/sourcenetwork/defradb/issues/775))
+* Allow PR titles to end with a number ([#745](https://github.com/sourcenetwork/defradb/issues/745))
+* Add a workflow to validate pull request titles ([#734](https://github.com/sourcenetwork/defradb/issues/734))
+* Fix the linter version to `v1.47` ([#726](https://github.com/sourcenetwork/defradb/issues/726))
+
+### Chore
+
+* Remove file system paths from resulting executable ([#831](https://github.com/sourcenetwork/defradb/issues/831))
+* Add goimports linter for consistent imports ordering ([#816](https://github.com/sourcenetwork/defradb/issues/816))
+* Improve UX by providing more information ([#802](https://github.com/sourcenetwork/defradb/issues/802))
+* Change to defra errors and handle errors stacktrace ([#794](https://github.com/sourcenetwork/defradb/issues/794))
+* Clean up `go.mod` with pruned module graphs ([#756](https://github.com/sourcenetwork/defradb/issues/756))
+* Update to v0.20.3 of libp2p ([#740](https://github.com/sourcenetwork/defradb/issues/740))
+* Bump to GoLang `v1.18` ([#721](https://github.com/sourcenetwork/defradb/issues/721))
+
+
+
+## [v0.3.0](https://github.com/sourcenetwork/defradb/compare/v0.2.1...v0.3.0)
+
+> 2022-08-02
+
+DefraDB v0.3 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+There are *several* new features in this release, and we invite you to review the official changelog below. Some highlights are various new features for Grouping & Aggregation for the query system, like top-level aggregation and group filtering. Moreover, a brand new Query Explain system was added to introspect the execution plans created by DefraDB. Lastly we introduced a revamped CLI configuration system.
+
+This release does include a Breaking Change to existing v0.2.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Add named config overrides ([#659](https://github.com/sourcenetwork/defradb/issues/659))
+* Expose color and caller log options, add validation ([#652](https://github.com/sourcenetwork/defradb/issues/652))
+* Add ability to explain `groupNode` and it's attribute(s). ([#641](https://github.com/sourcenetwork/defradb/issues/641))
+* Add primary directive for schema definitions ([@primary](https://github.com/primary)) ([#650](https://github.com/sourcenetwork/defradb/issues/650))
+* Add support for aggregate filters on inline arrays ([#622](https://github.com/sourcenetwork/defradb/issues/622))
+* Add explainable renderLimitNode & hardLimitNode attributes. ([#614](https://github.com/sourcenetwork/defradb/issues/614))
+* Add support for top level aggregates ([#594](https://github.com/sourcenetwork/defradb/issues/594))
+* Update `countNode` explanation to be consistent. ([#600](https://github.com/sourcenetwork/defradb/issues/600))
+* Add support for stdin as input in CLI ([#608](https://github.com/sourcenetwork/defradb/issues/608))
+* Explain `cid` & `field` attributes for `dagScanNode` ([#598](https://github.com/sourcenetwork/defradb/issues/598))
+* Add ability to explain `dagScanNode` attribute(s). ([#560](https://github.com/sourcenetwork/defradb/issues/560))
+* Add the ability to send user feedback to the console even when logging to file. ([#568](https://github.com/sourcenetwork/defradb/issues/568))
+* Add ability to explain `sortNode` attribute(s). ([#558](https://github.com/sourcenetwork/defradb/issues/558))
+* Add ability to explain `sumNode` attribute(s). ([#559](https://github.com/sourcenetwork/defradb/issues/559))
+* Introduce top-level config package ([#389](https://github.com/sourcenetwork/defradb/issues/389))
+* Add ability to explain `updateNode` attributes. ([#514](https://github.com/sourcenetwork/defradb/issues/514))
+* Add `typeIndexJoin` explainable attributes. ([#499](https://github.com/sourcenetwork/defradb/issues/499))
+* Add support to explain `countNode` attributes. ([#504](https://github.com/sourcenetwork/defradb/issues/504))
+* Add CORS capability to HTTP API ([#467](https://github.com/sourcenetwork/defradb/issues/467))
+* Add explaination of spans for `scanNode`. ([#492](https://github.com/sourcenetwork/defradb/issues/492))
+* Add ability to Explain the response plan. ([#385](https://github.com/sourcenetwork/defradb/issues/385))
+* Add aggregate filter support for groups only ([#426](https://github.com/sourcenetwork/defradb/issues/426))
+* Configurable caller option in logger ([#416](https://github.com/sourcenetwork/defradb/issues/416))
+* Add Average aggregate support ([#383](https://github.com/sourcenetwork/defradb/issues/383))
+* Allow summation of aggregates ([#341](https://github.com/sourcenetwork/defradb/issues/341))
+* Add ability to check DefraDB CLI version. ([#339](https://github.com/sourcenetwork/defradb/issues/339))
+
+### Fixes
+
+* Add a check to ensure limit is not 0 when evaluating query limit and offset ([#706](https://github.com/sourcenetwork/defradb/issues/706))
+* Support multiple `--logger` flags ([#704](https://github.com/sourcenetwork/defradb/issues/704))
+* Return without an error if relation is finalized ([#698](https://github.com/sourcenetwork/defradb/issues/698))
+* Logger not correctly applying named config ([#696](https://github.com/sourcenetwork/defradb/issues/696))
+* Add content-type media type parsing ([#678](https://github.com/sourcenetwork/defradb/issues/678))
+* Remove portSyncLock deadlock condition ([#671](https://github.com/sourcenetwork/defradb/issues/671))
+* Silence cobra default errors and usage printing ([#668](https://github.com/sourcenetwork/defradb/issues/668))
+* Add stdout validation when setting logging output path ([#666](https://github.com/sourcenetwork/defradb/issues/666))
+* Consider `--logoutput` CLI flag properly ([#645](https://github.com/sourcenetwork/defradb/issues/645))
+* Handle errors and responses in CLI `client` commands ([#579](https://github.com/sourcenetwork/defradb/issues/579))
+* Rename aggregate gql types ([#638](https://github.com/sourcenetwork/defradb/issues/638))
+* Error when attempting to insert value into relationship field ([#632](https://github.com/sourcenetwork/defradb/issues/632))
+* Allow adding of new schema to database ([#635](https://github.com/sourcenetwork/defradb/issues/635))
+* Correctly parse dockey in broadcast log event. ([#631](https://github.com/sourcenetwork/defradb/issues/631))
+* Increase system's open files limit in integration tests ([#627](https://github.com/sourcenetwork/defradb/issues/627))
+* Avoid populating `order.ordering` with empties. ([#618](https://github.com/sourcenetwork/defradb/issues/618))
+* Change to supporting of non-null inline arrays ([#609](https://github.com/sourcenetwork/defradb/issues/609))
+* Assert fields exist in collection before saving to them ([#604](https://github.com/sourcenetwork/defradb/issues/604))
+* CLI `init` command to reinitialize only config file ([#603](https://github.com/sourcenetwork/defradb/issues/603))
+* Add config and registry clearing to TestLogWritesMessagesToFeedbackLog ([#596](https://github.com/sourcenetwork/defradb/issues/596))
+* Change `$eq` to `_eq` in the failing test. ([#576](https://github.com/sourcenetwork/defradb/issues/576))
+* Resolve failing HTTP API tests via cleanup ([#557](https://github.com/sourcenetwork/defradb/issues/557))
+* Ensure Makefile compatibility with macOS ([#527](https://github.com/sourcenetwork/defradb/issues/527))
+* Separate out iotas in their own blocks. ([#464](https://github.com/sourcenetwork/defradb/issues/464))
+* Use x/cases for titling instead of strings to handle deprecation ([#457](https://github.com/sourcenetwork/defradb/issues/457))
+* Handle limit and offset in sub groups ([#440](https://github.com/sourcenetwork/defradb/issues/440))
+* Issue preventing DB from restarting with no records ([#437](https://github.com/sourcenetwork/defradb/issues/437))
+* log serving HTTP API before goroutine blocks ([#358](https://github.com/sourcenetwork/defradb/issues/358))
+
+### Testing
+
+* Add integration testing for P2P. ([#655](https://github.com/sourcenetwork/defradb/issues/655))
+* Fix formatting of tests with no extra brackets ([#643](https://github.com/sourcenetwork/defradb/issues/643))
+* Add tests for `averageNode` explain. ([#639](https://github.com/sourcenetwork/defradb/issues/639))
+* Add schema integration tests ([#628](https://github.com/sourcenetwork/defradb/issues/628))
+* Add tests for default properties ([#611](https://github.com/sourcenetwork/defradb/issues/611))
+* Specify which collection to update in test framework ([#601](https://github.com/sourcenetwork/defradb/issues/601))
+* Add tests for grouping by undefined value ([#543](https://github.com/sourcenetwork/defradb/issues/543))
+* Add test for querying undefined field ([#544](https://github.com/sourcenetwork/defradb/issues/544))
+* Expand commit query tests ([#541](https://github.com/sourcenetwork/defradb/issues/541))
+* Add cid (time-travel) query tests ([#539](https://github.com/sourcenetwork/defradb/issues/539))
+* Restructure and expand filter tests ([#512](https://github.com/sourcenetwork/defradb/issues/512))
+* Basic unit testing of `node` package ([#503](https://github.com/sourcenetwork/defradb/issues/503))
+* Test filter in filter tests ([#473](https://github.com/sourcenetwork/defradb/issues/473))
+* Add test for deletion of records in a relationship ([#329](https://github.com/sourcenetwork/defradb/issues/329))
+* Benchmark transaction iteration ([#289](https://github.com/sourcenetwork/defradb/issues/289))
+
+### Refactoring
+
+* Improve CLI error handling and fix small issues ([#649](https://github.com/sourcenetwork/defradb/issues/649))
+* Add top-level `version` package ([#583](https://github.com/sourcenetwork/defradb/issues/583))
+* Remove extra log levels ([#634](https://github.com/sourcenetwork/defradb/issues/634))
+* Change `sortNode` to `orderNode`. ([#591](https://github.com/sourcenetwork/defradb/issues/591))
+* Rework update and delete node to remove secondary planner ([#571](https://github.com/sourcenetwork/defradb/issues/571))
+* Trim imported connor package ([#530](https://github.com/sourcenetwork/defradb/issues/530))
+* Internal doc restructure ([#471](https://github.com/sourcenetwork/defradb/issues/471))
+* Copy-paste connor fork into repo ([#567](https://github.com/sourcenetwork/defradb/issues/567))
+* Add safety to the tests, add ability to catch stderr logs and add output path validation ([#552](https://github.com/sourcenetwork/defradb/issues/552))
+* Change handler functions implementation and response formatting ([#498](https://github.com/sourcenetwork/defradb/issues/498))
+* Improve the HTTP API implementation ([#382](https://github.com/sourcenetwork/defradb/issues/382))
+* Use new logger in net/api ([#420](https://github.com/sourcenetwork/defradb/issues/420))
+* Rename NewCidV1_SHA2_256 to mixedCaps ([#415](https://github.com/sourcenetwork/defradb/issues/415))
+* Remove utils package ([#397](https://github.com/sourcenetwork/defradb/issues/397))
+* Rework planNode Next and Value(s) function ([#374](https://github.com/sourcenetwork/defradb/issues/374))
+* Restructure aggregate query syntax ([#373](https://github.com/sourcenetwork/defradb/issues/373))
+* Remove dead code from client package and document remaining ([#356](https://github.com/sourcenetwork/defradb/issues/356))
+* Restructure datastore keys ([#316](https://github.com/sourcenetwork/defradb/issues/316))
+* Add commits lost during github outage ([#303](https://github.com/sourcenetwork/defradb/issues/303))
+* Move public members out of core and base packages ([#295](https://github.com/sourcenetwork/defradb/issues/295))
+* Make db stuff internal/private ([#291](https://github.com/sourcenetwork/defradb/issues/291))
+* Rework client.DB to ensure interface contains only public types ([#277](https://github.com/sourcenetwork/defradb/issues/277))
+* Remove GetPrimaryIndexDocKey from collection interface ([#279](https://github.com/sourcenetwork/defradb/issues/279))
+* Remove DataStoreKey from (public) dockey struct ([#278](https://github.com/sourcenetwork/defradb/issues/278))
+* Renormalize to ensure consistent file line termination. ([#226](https://github.com/sourcenetwork/defradb/issues/226))
+* Strongly typed key refactor ([#17](https://github.com/sourcenetwork/defradb/issues/17))
+
+### Documentation
+
+* Use permanent link to BSL license document ([#692](https://github.com/sourcenetwork/defradb/issues/692))
+* README update v0.3.0 ([#646](https://github.com/sourcenetwork/defradb/issues/646))
+* Improve code documentation ([#533](https://github.com/sourcenetwork/defradb/issues/533))
+* Add CONTRIBUTING.md ([#531](https://github.com/sourcenetwork/defradb/issues/531))
+* Add package level docs for logging lib ([#338](https://github.com/sourcenetwork/defradb/issues/338))
+
+### Tooling
+
+* Include all touched packages in code coverage ([#673](https://github.com/sourcenetwork/defradb/issues/673))
+* Use `gotestsum` over `go test` ([#619](https://github.com/sourcenetwork/defradb/issues/619))
+* Update Github pull request template ([#524](https://github.com/sourcenetwork/defradb/issues/524))
+* Fix the cross-build script ([#460](https://github.com/sourcenetwork/defradb/issues/460))
+* Add test coverage html output ([#466](https://github.com/sourcenetwork/defradb/issues/466))
+* Add linter rule for `goconst`. ([#398](https://github.com/sourcenetwork/defradb/issues/398))
+* Add github PR template. ([#394](https://github.com/sourcenetwork/defradb/issues/394))
+* Disable auto-fixing linter issues by default ([#429](https://github.com/sourcenetwork/defradb/issues/429))
+* Fix linting of empty `else` code blocks ([#402](https://github.com/sourcenetwork/defradb/issues/402))
+* Add the `gofmt` linter rule. ([#405](https://github.com/sourcenetwork/defradb/issues/405))
+* Cleanup linter config file ([#400](https://github.com/sourcenetwork/defradb/issues/400))
+* Add linter rule for copyright headers ([#360](https://github.com/sourcenetwork/defradb/issues/360))
+* Organize our config files and tooling. ([#336](https://github.com/sourcenetwork/defradb/issues/336))
+* Limit line length to 100 characters (linter check) ([#224](https://github.com/sourcenetwork/defradb/issues/224))
+* Ignore db/tests folder and the bench marks. ([#280](https://github.com/sourcenetwork/defradb/issues/280))
+
+### Continuous Integration
+
+* Fix circleci cache permission errors. ([#371](https://github.com/sourcenetwork/defradb/issues/371))
+* Ban extra elses ([#366](https://github.com/sourcenetwork/defradb/issues/366))
+* Fix change-detection to not fail when new tests are added. ([#333](https://github.com/sourcenetwork/defradb/issues/333))
+* Update golang-ci linter and explicit go-setup to use v1.17 ([#331](https://github.com/sourcenetwork/defradb/issues/331))
+* Comment the benchmarking result comparison to the PR ([#305](https://github.com/sourcenetwork/defradb/issues/305))
+* Add benchmark performance comparisons ([#232](https://github.com/sourcenetwork/defradb/issues/232))
+* Add caching / storing of bench report on default branch ([#290](https://github.com/sourcenetwork/defradb/issues/290))
+* Ensure full-benchmarks are ran on a PR-merge. ([#282](https://github.com/sourcenetwork/defradb/issues/282))
+* Add ability to control benchmarks by PR labels. ([#267](https://github.com/sourcenetwork/defradb/issues/267))
+
+### Chore
+
+* Update APL to refer to D2 Foundation ([#711](https://github.com/sourcenetwork/defradb/issues/711))
+* Update gitignore to include `cmd` folders ([#617](https://github.com/sourcenetwork/defradb/issues/617))
+* Enable random execution order of tests ([#554](https://github.com/sourcenetwork/defradb/issues/554))
+* Enable linters exportloopref, nolintlint, whitespace ([#535](https://github.com/sourcenetwork/defradb/issues/535))
+* Add utility for generation of man pages ([#493](https://github.com/sourcenetwork/defradb/issues/493))
+* Add Dockerfile ([#517](https://github.com/sourcenetwork/defradb/issues/517))
+* Enable errorlint linter ([#520](https://github.com/sourcenetwork/defradb/issues/520))
+* Binaries in`cmd` folder, examples in `examples` folder ([#501](https://github.com/sourcenetwork/defradb/issues/501))
+* Improve log outputs ([#506](https://github.com/sourcenetwork/defradb/issues/506))
+* Move testing to top-level `tests` folder ([#446](https://github.com/sourcenetwork/defradb/issues/446))
+* Update dependencies ([#450](https://github.com/sourcenetwork/defradb/issues/450))
+* Update go-ipfs-blockstore and ipfs-lite ([#436](https://github.com/sourcenetwork/defradb/issues/436))
+* Update libp2p dependency to v0.19 ([#424](https://github.com/sourcenetwork/defradb/issues/424))
+* Update ioutil package to io / os packages. ([#376](https://github.com/sourcenetwork/defradb/issues/376))
+* git ignore vscode ([#343](https://github.com/sourcenetwork/defradb/issues/343))
+* Updated README.md contributors section ([#292](https://github.com/sourcenetwork/defradb/issues/292))
+* Update changelog v0.2.1 ([#252](https://github.com/sourcenetwork/defradb/issues/252))
+
+
+
+## [v0.2.1](https://github.com/sourcenetwork/defradb/compare/v0.2.0...v0.2.1)
+
+> 2022-03-04
+
+### Features
+
+* Add ability to delete multiple documents using filter ([#206](https://github.com/sourcenetwork/defradb/issues/206))
+* Add ability to delete multiple documents, using multiple ids ([#196](https://github.com/sourcenetwork/defradb/issues/196))
+
+### Fixes
+
+* Concurrency control of Document using RWMutex ([#213](https://github.com/sourcenetwork/defradb/issues/213))
+* Only log errors and above when benchmarking ([#261](https://github.com/sourcenetwork/defradb/issues/261))
+* Handle proper type conversion on sort nodes ([#228](https://github.com/sourcenetwork/defradb/issues/228))
+* Return empty array if no values found ([#223](https://github.com/sourcenetwork/defradb/issues/223))
+* Close fetcher on error ([#210](https://github.com/sourcenetwork/defradb/issues/210))
+* Installing binary using defradb name ([#190](https://github.com/sourcenetwork/defradb/issues/190))
+
+### Tooling
+
+* Add short benchmark runner option ([#263](https://github.com/sourcenetwork/defradb/issues/263))
+
+### Documentation
+
+* Add data format changes documentation folder ([#89](https://github.com/sourcenetwork/defradb/issues/89))
+* Correcting typos ([#143](https://github.com/sourcenetwork/defradb/issues/143))
+* Update generated CLI docs ([#208](https://github.com/sourcenetwork/defradb/issues/208))
+* Updated readme with P2P section ([#220](https://github.com/sourcenetwork/defradb/issues/220))
+* Update old or missing license headers ([#205](https://github.com/sourcenetwork/defradb/issues/205))
+* Update git-chglog config and template ([#195](https://github.com/sourcenetwork/defradb/issues/195))
+
+### Refactoring
+
+* Introduction of logging system ([#67](https://github.com/sourcenetwork/defradb/issues/67))
+* Restructure db/txn/multistore structures ([#199](https://github.com/sourcenetwork/defradb/issues/199))
+* Initialize database in constructor ([#211](https://github.com/sourcenetwork/defradb/issues/211))
+* Purge all println and ban it ([#253](https://github.com/sourcenetwork/defradb/issues/253))
+
+### Testing
+
+* Detect and force breaking filesystem changes to be documented ([#89](https://github.com/sourcenetwork/defradb/issues/89))
+* Boost collection test coverage ([#183](https://github.com/sourcenetwork/defradb/issues/183))
+
+### Continuous integration
+
+* Combine the Lint and Benchmark workflows so that the benchmark job depends on the lint job in one workflow ([#209](https://github.com/sourcenetwork/defradb/issues/209))
+* Add rule to only run benchmark if other check are successful ([#194](https://github.com/sourcenetwork/defradb/issues/194))
+* Increase linter timeout ([#230](https://github.com/sourcenetwork/defradb/issues/230))
+
+### Chore
+
+* Remove commented out code ([#238](https://github.com/sourcenetwork/defradb/issues/238))
+* Remove dead code from multi node ([#186](https://github.com/sourcenetwork/defradb/issues/186))
+
+
+
+## [v0.2.0](https://github.com/sourcenetwork/defradb/compare/v0.1.0...v0.2.0)
+
+> 2022-02-07
+
+DefraDB v0.2 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+This release is jam-packed with new features and a small number of breaking changes. Read the full changelog for a detailed description. Most notable features include a new Peer-to-Peer (P2P) data synchronization system, an expanded query system to support GroupBy & Aggregate operations, and lastly TimeTraveling queries allowing to query previous states of a document.
+
+Much more than just that has been added to ensure we're building reliable software expected of any database, such as expanded test & benchmark suites, automated bug detection, performance gains, and more.
+
+This release does include a Breaking Change to existing v0.1 databases regarding the internal data model, which affects the "Content Identifiers" we use to generate DocKeys and VersionIDs. If you need help migrating an existing deployment, reach out at hello@source.network or join our Discord at https://discord.gg/w7jYQVJ.
+
+### Features
+
+* Added Peer-to-Peer networking data synchronization ([#177](https://github.com/sourcenetwork/defradb/issues/177))
+* TimeTraveling (History Traversing) query engine and doc fetcher ([#59](https://github.com/sourcenetwork/defradb/issues/59))
+* Add Document Deletion with a Key ([#150](https://github.com/sourcenetwork/defradb/issues/150))
+* Add support for sum aggregate ([#121](https://github.com/sourcenetwork/defradb/issues/121))
+* Add support for lwwr scalar arrays (full replace on update) ([#115](https://github.com/sourcenetwork/defradb/issues/115))
+* Add count aggregate support ([#102](https://github.com/sourcenetwork/defradb/issues/102))
+* Add support for named relationships ([#108](https://github.com/sourcenetwork/defradb/issues/108))
+* Add multi doc key lookup support ([#76](https://github.com/sourcenetwork/defradb/issues/76))
+* Add basic group by functionality ([#43](https://github.com/sourcenetwork/defradb/issues/43))
+* Update datastore packages to allow use of context ([#48](https://github.com/sourcenetwork/defradb/issues/48))
+
+### Bug fixes
+
+* Only add join if aggregating child object collection ([#188](https://github.com/sourcenetwork/defradb/issues/188))
+* Handle errors generated during input object thunks ([#123](https://github.com/sourcenetwork/defradb/issues/123))
+* Remove new types from in-memory cache on generate error ([#122](https://github.com/sourcenetwork/defradb/issues/122))
+* Support relationships where both fields have the same name ([#109](https://github.com/sourcenetwork/defradb/issues/109))
+* Handle errors generated in fields thunk ([#66](https://github.com/sourcenetwork/defradb/issues/66))
+* Ensure OperationDefinition case has at least one selection([#24](https://github.com/sourcenetwork/defradb/pull/24))
+* Close datastore iterator on scan close ([#56](https://github.com/sourcenetwork/defradb/pull/56)) (resulted in a panic when using limit)
+* Close superseded iterators before orphaning ([#56](https://github.com/sourcenetwork/defradb/pull/56)) (fixes a panic in the join code)
+* Move discard to after error check ([#88](https://github.com/sourcenetwork/defradb/pull/88)) (did result in panic if transaction creation fails)
+* Check for nil iterator before closing document fetcher ([#108](https://github.com/sourcenetwork/defradb/pull/108))
+
+### Tooling
+* Added benchmark suite ([#160](https://github.com/sourcenetwork/defradb/issues/160))
+
+### Documentation
+
+* Correcting comment typos ([#142](https://github.com/sourcenetwork/defradb/issues/142))
+* Correcting README typos ([#140](https://github.com/sourcenetwork/defradb/issues/140))
+
+### Testing
+
+* Add transaction integration tests ([#175](https://github.com/sourcenetwork/defradb/issues/175))
+* Allow running of tests using badger-file as well as IM options ([#128](https://github.com/sourcenetwork/defradb/issues/128))
+* Add test datastore selection support ([#88](https://github.com/sourcenetwork/defradb/issues/88))
+
+### Refactoring
+
+* Datatype modification protection ([#138](https://github.com/sourcenetwork/defradb/issues/138))
+* Cleanup Linter Complaints and Setup Makefile ([#63](https://github.com/sourcenetwork/defradb/issues/63))
+* Rework document rendering to avoid data duplication and mutation ([#68](https://github.com/sourcenetwork/defradb/issues/68))
+* Remove dependency on concrete datastore implementations from db package ([#51](https://github.com/sourcenetwork/defradb/issues/51))
+* Remove all `errors.Wrap` and update them with `fmt.Errorf`. ([#41](https://github.com/sourcenetwork/defradb/issues/41))
+* Restructure integration tests to provide better visibility ([#15](https://github.com/sourcenetwork/defradb/pull/15))
+* Remove schemaless code branches ([#23](https://github.com/sourcenetwork/defradb/pull/23))
+
+### Performance
+* Add badger multi scan support ([#85](https://github.com/sourcenetwork/defradb/pull/85))
+* Add support for range spans ([#86](https://github.com/sourcenetwork/defradb/pull/86))
+
+### Continous integration
+
+* Use more accurate test coverage. ([#134](https://github.com/sourcenetwork/defradb/issues/134))
+* Disable Codecov's Patch Check
+* Make codcov less strict for now to unblock development ([#125](https://github.com/sourcenetwork/defradb/issues/125))
+* Add codecov config file. ([#118](https://github.com/sourcenetwork/defradb/issues/118))
+* Add workflow that runs a job on AWS EC2 instance. ([#110](https://github.com/sourcenetwork/defradb/issues/110))
+* Add Code Test Coverage with CodeCov ([#116](https://github.com/sourcenetwork/defradb/issues/116))
+* Integrate GitHub Action for golangci-lint Annotations ([#106](https://github.com/sourcenetwork/defradb/issues/106))
+* Add Linter Check to CircleCi ([#92](https://github.com/sourcenetwork/defradb/issues/92))
+
+### Chore
+
+* Remove the S1038 rule of the gosimple linter. ([#129](https://github.com/sourcenetwork/defradb/issues/129))
+* Update to badger v3, and use badger as default in memory store ([#56](https://github.com/sourcenetwork/defradb/issues/56))
+* Make Cid versions consistent ([#57](https://github.com/sourcenetwork/defradb/issues/57))
+
+
+
+## v0.1.0
+
+> 2021-03-15
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7345a58cc8..48671840d5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,89 @@
+
+## [v0.12.0](https://github.com/sourcenetwork/defradb/compare/v0.11.0...v0.12.0)
+
+> 2024-06-28
+
+DefraDB v0.12 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes.
+
+To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.11.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.gg/w7jYQVJ/.
+
+### Features
+
+* Ability to generate a new identity ([#2760](https://github.com/sourcenetwork/defradb/issues/2760))
+* Add async transaction callbacks ([#2708](https://github.com/sourcenetwork/defradb/issues/2708))
+* Allow lens runtime selection via config ([#2684](https://github.com/sourcenetwork/defradb/issues/2684))
+* Sec. indexes on relations ([#2670](https://github.com/sourcenetwork/defradb/issues/2670))
+* Add authentication for ACP ([#2649](https://github.com/sourcenetwork/defradb/issues/2649))
+* Inject ACP instance into the DB instance ([#2633](https://github.com/sourcenetwork/defradb/issues/2633))
+* Keyring ([#2557](https://github.com/sourcenetwork/defradb/issues/2557))
+* Enable sec. indexes with ACP ([#2602](https://github.com/sourcenetwork/defradb/issues/2602))
+
+### Fixes
+
+* Race condition when testing CLI ([#2713](https://github.com/sourcenetwork/defradb/issues/2713))
+* Remove shared mutable state between database instances ([#2777](https://github.com/sourcenetwork/defradb/issues/2777))
+* Change new identity keys to hex format ([#2773](https://github.com/sourcenetwork/defradb/issues/2773))
+* Return slice of correct length from db.AddSchema ([#2765](https://github.com/sourcenetwork/defradb/issues/2765))
+* Use node representation for Block ([#2746](https://github.com/sourcenetwork/defradb/issues/2746))
+* Add version check in basicTxn.Query ([#2742](https://github.com/sourcenetwork/defradb/issues/2742))
+* Merge retry logic ([#2719](https://github.com/sourcenetwork/defradb/issues/2719))
+* Resolve incorrect merge conflict ([#2723](https://github.com/sourcenetwork/defradb/issues/2723))
+* Keyring output ([#2784](https://github.com/sourcenetwork/defradb/issues/2784))
+* Incorporate schema root into docID ([#2701](https://github.com/sourcenetwork/defradb/issues/2701))
+* Make node options composable ([#2648](https://github.com/sourcenetwork/defradb/issues/2648))
+* Remove limit for fetching secondary docs ([#2594](https://github.com/sourcenetwork/defradb/issues/2594))
+
+### Documentation
+
+* Remove reference to client ping from readme ([#2793](https://github.com/sourcenetwork/defradb/issues/2793))
+* Add http/openapi documentation & ci workflow ([#2678](https://github.com/sourcenetwork/defradb/issues/2678))
+* Streamline cli documentation ([#2646](https://github.com/sourcenetwork/defradb/issues/2646))
+* Document Event Update struct ([#2598](https://github.com/sourcenetwork/defradb/issues/2598))
+
+### Refactoring
+
+* Use events to test network logic ([#2700](https://github.com/sourcenetwork/defradb/issues/2700))
+* Change local_acp implementation to use acp_core ([#2691](https://github.com/sourcenetwork/defradb/issues/2691))
+* Rework definition validation ([#2720](https://github.com/sourcenetwork/defradb/issues/2720))
+* Extract definition stuff from collection.go ([#2706](https://github.com/sourcenetwork/defradb/issues/2706))
+* Change counters to support encryption ([#2698](https://github.com/sourcenetwork/defradb/issues/2698))
+* DAG sync and move merge outside of net package ([#2658](https://github.com/sourcenetwork/defradb/issues/2658))
+* Replace subscription events publisher ([#2686](https://github.com/sourcenetwork/defradb/issues/2686))
+* Extract Defra specific logic from ACPLocal type ([#2656](https://github.com/sourcenetwork/defradb/issues/2656))
+* Change from protobuf to cbor for IPLD ([#2604](https://github.com/sourcenetwork/defradb/issues/2604))
+* Reorganize global CLI flags ([#2615](https://github.com/sourcenetwork/defradb/issues/2615))
+* Move internal packages to internal dir ([#2599](https://github.com/sourcenetwork/defradb/issues/2599))
+
+### Testing
+
+* Remove duplicate test ([#2787](https://github.com/sourcenetwork/defradb/issues/2787))
+* Support asserting on doc index in test results ([#2786](https://github.com/sourcenetwork/defradb/issues/2786))
+* Allow test harness to execute benchmarks ([#2740](https://github.com/sourcenetwork/defradb/issues/2740))
+* Add relation substitute mechanic to tests ([#2682](https://github.com/sourcenetwork/defradb/issues/2682))
+* Test node pkg constructor via integration test suite ([#2641](https://github.com/sourcenetwork/defradb/issues/2641))
+
+### Continuous integration
+
+* Cache dependencies to speed up test runs ([#2732](https://github.com/sourcenetwork/defradb/issues/2732))
+
+### Bot
+
+* Update dependencies (bulk dependabot PRs) 24-06-2024 ([#2761](https://github.com/sourcenetwork/defradb/issues/2761))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.13.0 to 7.13.1 in /playground ([#2733](https://github.com/sourcenetwork/defradb/issues/2733))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.13.0 to 7.13.1 in /playground ([#2734](https://github.com/sourcenetwork/defradb/issues/2734))
+* Update dependencies (bulk dependabot PRs) 06-17-2024 ([#2730](https://github.com/sourcenetwork/defradb/issues/2730))
+* Bump braces from 3.0.2 to 3.0.3 in /playground ([#2716](https://github.com/sourcenetwork/defradb/issues/2716))
+* Update dependencies (bulk dependabot PRs) 06-10-2024 ([#2705](https://github.com/sourcenetwork/defradb/issues/2705))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.11.0 to 7.12.0 in /playground ([#2675](https://github.com/sourcenetwork/defradb/issues/2675))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.11.0 to 7.12.0 in /playground ([#2676](https://github.com/sourcenetwork/defradb/issues/2676))
+* Update dependencies (bulk dependabot PRs) 03-06-2024 ([#2674](https://github.com/sourcenetwork/defradb/issues/2674))
+* Update dependencies (bulk dependabot PRs) 01-06-2024 ([#2660](https://github.com/sourcenetwork/defradb/issues/2660))
+* Bump [@typescript](https://github.com/typescript)-eslint/parser from 7.9.0 to 7.10.0 in /playground ([#2635](https://github.com/sourcenetwork/defradb/issues/2635))
+* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 7.9.0 to 7.10.0 in /playground ([#2637](https://github.com/sourcenetwork/defradb/issues/2637))
+* Bump swagger-ui-react from 5.17.10 to 5.17.12 in /playground ([#2636](https://github.com/sourcenetwork/defradb/issues/2636))
+* Bump google.golang.org/protobuf from 1.33.0 to 1.34.1 ([#2607](https://github.com/sourcenetwork/defradb/issues/2607))
+* Update dependencies (bulk dependabot PRs) 05-20-2024 ([#2631](https://github.com/sourcenetwork/defradb/issues/2631))
+* Update dependencies (bulk dependabot PRs) 05-14-2024 ([#2617](https://github.com/sourcenetwork/defradb/issues/2617))
## [v0.11.0](https://github.com/sourcenetwork/defradb/compare/v0.10.0...v0.11.0)
diff --git a/Makefile b/Makefile
index 658b514a4b..efa4748d40 100644
--- a/Makefile
+++ b/Makefile
@@ -158,9 +158,9 @@ deps\:chglog:
deps\:modules:
go mod download
-.PHONY: deps\:mock
-deps\:mock:
- go install github.com/vektra/mockery/v2@v2.32.0
+.PHONY: deps\:mocks
+deps\:mocks:
+ go install github.com/vektra/mockery/v2@v2.43.0
.PHONY: deps\:playground
deps\:playground:
@@ -173,11 +173,11 @@ deps:
$(MAKE) deps:chglog && \
$(MAKE) deps:lint && \
$(MAKE) deps:test && \
- $(MAKE) deps:mock
+ $(MAKE) deps:mocks
-.PHONY: mock
-mock:
- @$(MAKE) deps:mock
+.PHONY: mocks
+mocks:
+ @$(MAKE) deps:mocks
mockery --config="tools/configs/mockery.yaml"
.PHONY: dev\:start
@@ -356,10 +356,17 @@ chglog:
docs:
@$(MAKE) docs\:cli
@$(MAKE) docs\:manpages
+ @$(MAKE) docs\:http
+ @$(MAKE) toc
.PHONY: docs\:cli
docs\:cli:
- go run cmd/genclidocs/main.go -o docs/cli/
+ rm -f docs/website/references/cli/*.md
+ go run cmd/genclidocs/main.go -o docs/website/references/cli
+
+.PHONY: docs\:http
+docs\:http:
+ go run cmd/genopenapi/main.go | python -m json.tool > docs/website/references/http/openapi.json
.PHONY: docs\:manpages
docs\:manpages:
@@ -369,3 +376,15 @@ docs\:manpages:
docs\:godoc:
godoc -http=:6060
# open http://localhost:6060/pkg/github.com/sourcenetwork/defradb/
+
+.PHONY: toc
+toc:
+ bash tools/scripts/md-toc/gh-md-toc --insert --no-backup --hide-footer --skip-header README.md
+
+.PHONY: fix
+fix:
+ @$(MAKE) deps
+ @$(MAKE) lint\:fix
+ @$(MAKE) tidy
+ @$(MAKE) mocks
+ @$(MAKE) docs
diff --git a/README.md b/README.md
index 220c48f842..6e1f2b3d1a 100644
--- a/README.md
+++ b/README.md
@@ -15,28 +15,29 @@ DefraDB is a user-centric database that prioritizes data ownership, personal pri
Read the documentation on [docs.source.network](https://docs.source.network/).
+
## Table of Contents
-- [Install](#install)
-- [Start](#start)
-- [Configuration](#configuration)
-- [External port binding](#external-port-binding)
-- [Add a schema type](#add-a-schema-type)
-- [Create a document instance](#create-a-document-instance)
-- [Query documents](#query-documents)
-- [Obtain document commits](#obtain-document-commits)
-- [DefraDB Query Language (DQL)](#defradb-query-language-dql)
-- [Peer-to-peer data synchronization](#peer-to-peer-data-synchronization)
- - [Pubsub example](#pubsub-example)
- - [Collection subscription example](#collection-subscription-example)
- - [Replicator example](#replicator-example)
-- [Securing the HTTP API with TLS](#securing-the-http-api-with-tls)
-- [Access Control System](#access-control-system)
-- [Supporting CORS](#supporting-cors)
-- [Backing up and restoring](#backing-up-and-restoring)
-- [Community](#community)
-- [Licensing](#licensing)
-- [Contributors](#contributors)
+
+ * [Install](#install)
+ * [Key Management](#key-management)
+ * [Start](#start)
+ * [Configuration](#configuration)
+ * [External port binding](#external-port-binding)
+ * [Add a schema type](#add-a-schema-type)
+ * [Create a document](#create-a-document)
+ * [Query documents](#query-documents)
+ * [Obtain document commits](#obtain-document-commits)
+ * [DefraDB Query Language (DQL)](#defradb-query-language-dql)
+ * [Peer-to-peer data synchronization](#peer-to-peer-data-synchronization)
+ * [Securing the HTTP API with TLS](#securing-the-http-api-with-tls)
+ * [Access Control System](#access-control-system)
+ * [Supporting CORS](#supporting-cors)
+ * [Backing up and restoring](#backing-up-and-restoring)
+ * [Community](#community)
+ * [Licensing](#licensing)
+ * [Contributors](#contributors)
+
DISCLAIMER: At this early stage, DefraDB does not offer data encryption, and the default configuration exposes the database to the network. The software is provided "as is" and is not guaranteed to be stable, secure, or error-free. We encourage you to experiment with DefraDB and provide feedback, but please do not use it for production purposes until it has been thoroughly tested and developed.
@@ -58,11 +59,38 @@ export PATH=$PATH:$(go env GOPATH)/bin
We recommend experimenting with queries using a native GraphQL client. GraphiQL is a popular option - [download and install it](https://altairgraphql.dev/#download).
+## Key Management
+
+DefraDB has a built in keyring that can be used to store private keys securely.
+
+The following keys are loaded from the keyring on start:
+
+- `peer-key` Ed25519 private key (required)
+- `encryption-key` AES-128, AES-192, or AES-256 key (optional)
+
+To randomly generate the required keys, run the following command:
+
+```
+defradb keyring generate
+```
+
+To import externally generated keys, run the following command:
+
+```
+defradb keyring import
+```
+
+To learn more about the available options:
+
+```
+defradb keyring --help
+```
+
## Start
Start a node by executing `defradb start`. Keep the node running while going through the following examples.
-Verify the local connection to the node works by executing `defradb client ping` in another terminal.
+Verify the local connection to the node works by executing `defradb client collection describe` in another terminal.
## Configuration
@@ -451,5 +479,6 @@ DefraDB's code is released under the [Business Source License (BSL)](licenses/BS
- Orpheus Lummis ([@orpheuslummis](https://github.com/orpheuslummis))
- Fred Carle ([@fredcarle](https://github.com/fredcarle))
- Islam Aliev ([@islamaliev](https://github.com/islamaliev))
+- Keenan Nemetz ([@nasdf](https://github.com/nasdf))
You are invited to contribute to DefraDB. Follow the [Contributing guide](./CONTRIBUTING.md) to get started.
diff --git a/acp/README.md b/acp/README.md
index 697a60a0c2..29244103fa 100644
--- a/acp/README.md
+++ b/acp/README.md
@@ -145,9 +145,31 @@ Here are some valid expression examples. Assuming these `expr` are under a requi
- `expr: owner +reader`
- `expr: owner+reader`
-
## DAC Usage CLI:
+### Authentication
+
+To perform authenticated operations you will need to generate a `secp256k1` key pair.
+
+The command below will generate a new secp256k1 private key and print the 256 bit X coordinate as a hexadecimal value.
+
+```sh
+openssl ecparam -name secp256k1 -genkey | openssl ec -text -noout | head -n5 | tail -n3 | tr -d '\n:\ '
+```
+
+Copy the private key hex from the output.
+
+```sh
+read EC key
+e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
+```
+
+Use the private key to generate authentication tokens for each request.
+
+```sh
+defradb client ... --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
+```
+
### Adding a Policy:
We have in `examples/dpi_policy/user_dpi_policy.yml`:
@@ -176,14 +198,13 @@ resources:
CLI Command:
```sh
-defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f examples/dpi_policy/user_dpi_policy.yml
-
+defradb client acp policy add -f examples/dpi_policy/user_dpi_policy.yml --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
```json
{
- "PolicyID": "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c"
+ "PolicyID": "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765"
}
```
@@ -192,7 +213,7 @@ Result:
We have in `examples/schema/permissioned/users.graphql`:
```graphql
type Users @policy(
- id: "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c",
+ id: "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765",
resource: "users"
) {
name: String
@@ -230,7 +251,7 @@ Result:
],
"Indexes": [],
"Policy": {
- "ID": "24ab8cba6d6f0bcfe4d2712c7d95c09dd1b8076ea5a8896476413fd6c891c18c",
+ "ID": "50d354a91ab1b8fce8a0ae4693de7616fb1d82cfc540f25cfbe11eb0195a5765",
"ResourceName": "users"
}
}
@@ -242,7 +263,7 @@ Result:
CLI Command:
```sh
-defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users '[{ "name": "SecretShahzad" }, { "name": "SecretLone" }]'
+defradb client collection create --name Users '[{ "name": "SecretShahzad" }, { "name": "SecretLone" }]' --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
### Create public documents (without identity)
@@ -255,7 +276,7 @@ defradb client collection create --name Users '[{ "name": "PublicShahzad" }, {
### Get all docIDs without an identity (shows only public):
CLI Command:
```sh
-defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j
+defradb client collection docIDs --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
@@ -273,7 +294,7 @@ Result:
### Get all docIDs with an identity (shows public and owned documents):
```sh
-defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j
+defradb client collection docIDs --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
@@ -300,7 +321,7 @@ Result:
### Access the private document (including field names):
CLI Command:
```sh
-defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
@@ -325,7 +346,7 @@ Error:
### Accessing the private document with wrong identity:
CLI Command:
```sh
-defradb client collection get -i cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity 4d092126012ebaf56161716018a71630d99443d9d5217e9d8502bb5c5456f2c5
```
Error:
@@ -336,7 +357,7 @@ Error:
### Update private document:
CLI Command:
```sh
-defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --updater '{ "name": "SecretUpdatedShahzad" }'
+defradb client collection update --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --updater '{ "name": "SecretUpdatedShahzad" }' --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
@@ -352,7 +373,7 @@ Result:
#### Check if it actually got updated:
CLI Command:
```sh
-defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
@@ -368,7 +389,7 @@ Result:
### Delete private document:
CLI Command:
```sh
-defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+defradb client collection delete --name Users --docID "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Result:
@@ -384,7 +405,7 @@ Result:
#### Check if it actually got deleted:
CLI Command:
```sh
-defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a"
+defradb client collection get --name Users "bae-a5830219-b8e7-5791-9836-2e494816fc0a" --identity e3b722906ee4e56368f581cd8b18ab0f48af1ea53e635e3f7b8acd076676f6ac
```
Error:
@@ -408,9 +429,21 @@ Error:
## DAC Usage HTTP:
-HTTP requests work similar to their CLI counter parts, the main difference is that the identity will just be specified within the Auth Header like so: `Authorization: Basic `.
-Note: The `Basic` label will change to `Bearer ` after JWS Authentication Tokens are supported.
+### Authentication
+
+To perform authenticated operations you will need to build and sign a JWT token with the following required fields:
+
+- `sub` public key of the identity
+- `aud` host name of the defradb api
+
+> The `exp` and `nbf` fields should also be set to short-lived durations.
+
+The JWT must be signed with the `secp256k1` private key of the identity you wish to perform actions as.
+
+The signed token must be set on the `Authorization` header of the HTTP request with the `bearer ` prefix prepended to it.
+
+If authentication fails for any reason a `403` forbidden response will be returned.
## _AAC DPI Rules (coming soon)_
## _AAC Usage: (coming soon)_
diff --git a/acp/acp_local.go b/acp/acp_local.go
index e569efd5d0..b62c4a454c 100644
--- a/acp/acp_local.go
+++ b/acp/acp_local.go
@@ -12,25 +12,58 @@ package acp
import (
"context"
+ "errors"
protoTypes "github.com/cosmos/gogoproto/types"
- "github.com/sourcenetwork/corelog"
+ "github.com/sourcenetwork/acp_core/pkg/auth"
+ "github.com/sourcenetwork/acp_core/pkg/engine"
+ "github.com/sourcenetwork/acp_core/pkg/runtime"
+ "github.com/sourcenetwork/acp_core/pkg/types"
"github.com/sourcenetwork/immutable"
- "github.com/sourcenetwork/sourcehub/x/acp/embedded"
- "github.com/sourcenetwork/sourcehub/x/acp/types"
- "github.com/valyala/fastjson"
-
- "github.com/sourcenetwork/defradb/errors"
)
-var (
- _ ACP = (*ACPLocal)(nil)
-)
+const localACPStoreName = "local_acp"
// ACPLocal represents a local acp implementation that makes no remote calls.
type ACPLocal struct {
pathToStore immutable.Option[string]
- localACP *embedded.LocalACP
+ engine types.ACPEngineServer
+ manager runtime.RuntimeManager
+}
+
+var _ sourceHubClient = (*ACPLocal)(nil)
+
+func mapACPCorePolicy(pol *types.Policy) policy {
+ resources := make(map[string]*resource)
+ for _, coreResource := range pol.Resources {
+ resource := mapACPCoreResource(coreResource)
+ resources[resource.Name] = resource
+ }
+
+ return policy{
+ ID: pol.Id,
+ Resources: resources,
+ }
+}
+
+func mapACPCoreResource(policy *types.Resource) *resource {
+ perms := make(map[string]*permission)
+ for _, corePermission := range policy.Permissions {
+ perm := mapACPCorePermission(corePermission)
+ perms[perm.Name] = perm
+ }
+
+ return &resource{
+ Name: policy.Name,
+ Permissions: perms,
+ }
+}
+
+func mapACPCorePermission(perm *types.Permission) *permission {
+ return &permission{
+ Name: perm.Name,
+ Expression: perm.Expression,
+ }
}
func (l *ACPLocal) Init(ctx context.Context, path string) {
@@ -42,220 +75,136 @@ func (l *ACPLocal) Init(ctx context.Context, path string) {
}
func (l *ACPLocal) Start(ctx context.Context) error {
- var localACP embedded.LocalACP
+ var manager runtime.RuntimeManager
var err error
+ var opts []runtime.Opt
+ var storeLocation string
if !l.pathToStore.HasValue() { // Use a non-persistent, i.e. in memory store.
- localACP, err = embedded.NewLocalACP(
- embedded.WithInMemStore(),
- )
-
- if err != nil {
- return NewErrInitializationOfACPFailed(err, "Local", "in-memory")
- }
+ storeLocation = "in-memory"
+ opts = append(opts, runtime.WithMemKV())
} else { // Use peristent storage.
- acpStorePath := l.pathToStore.Value() + "/" + embedded.DefaultDataDir
- localACP, err = embedded.NewLocalACP(
- embedded.WithPersistentStorage(acpStorePath),
- )
- if err != nil {
- return NewErrInitializationOfACPFailed(err, "Local", l.pathToStore.Value())
- }
+ storeLocation = l.pathToStore.Value()
+ acpStorePath := storeLocation + "/" + localACPStoreName
+ opts = append(opts, runtime.WithPersistentKV(acpStorePath))
+ }
+
+ manager, err = runtime.NewRuntimeManager(opts...)
+ if err != nil {
+ return NewErrInitializationOfACPFailed(err, "Local", storeLocation)
}
- l.localACP = &localACP
+ l.manager = manager
+ l.engine = engine.NewACPEngine(manager)
return nil
}
func (l *ACPLocal) Close() error {
- return l.localACP.Close()
+ return l.manager.Terminate()
}
func (l *ACPLocal) AddPolicy(
ctx context.Context,
creatorID string,
policy string,
+ marshalType policyMarshalType,
+ creationTime *protoTypes.Timestamp,
) (string, error) {
- // Having a creator identity is a MUST requirement for adding a policy.
- if creatorID == "" {
- return "", ErrPolicyCreatorMustNotBeEmpty
- }
-
- if policy == "" {
- return "", ErrPolicyDataMustNotBeEmpty
- }
-
- // Assume policy is in YAML format by default.
- policyMarshalType := types.PolicyMarshalingType_SHORT_YAML
- if isJSON := fastjson.Validate(policy) == nil; isJSON { // Detect JSON format.
- policyMarshalType = types.PolicyMarshalingType_SHORT_JSON
+ principal, err := auth.NewDIDPrincipal(creatorID)
+ if err != nil {
+ return "", newErrInvalidActorID(err, creatorID)
}
+ ctx = auth.InjectPrincipal(ctx, principal)
- createPolicy := types.MsgCreatePolicy{
- Creator: creatorID,
+ createPolicy := types.CreatePolicyRequest{
Policy: policy,
- MarshalType: policyMarshalType,
+ MarshalType: types.PolicyMarshalingType(marshalType),
CreationTime: protoTypes.TimestampNow(),
}
- createPolicyResponse, err := l.localACP.GetMsgService().CreatePolicy(
- l.localACP.GetCtx(),
- &createPolicy,
- )
-
+ response, err := l.engine.CreatePolicy(ctx, &createPolicy)
if err != nil {
- return "", NewErrFailedToAddPolicyWithACP(err, "Local", creatorID)
+ return "", err
}
- policyID := createPolicyResponse.Policy.Id
- log.InfoContext(ctx, "Created Policy", corelog.Any("PolicyID", policyID))
-
- return policyID, nil
+ return response.Policy.Id, nil
}
-func (l *ACPLocal) ValidateResourceExistsOnValidDPI(
+func (l *ACPLocal) Policy(
ctx context.Context,
policyID string,
- resourceName string,
-) error {
- if policyID == "" && resourceName == "" {
- return ErrNoPolicyArgs
- }
-
- if policyID == "" {
- return ErrPolicyIDMustNotBeEmpty
- }
+) (immutable.Option[policy], error) {
+ none := immutable.None[policy]()
- if resourceName == "" {
- return ErrResourceNameMustNotBeEmpty
- }
-
- queryPolicyRequest := types.QueryPolicyRequest{Id: policyID}
- queryPolicyResponse, err := l.localACP.GetQueryService().Policy(
- l.localACP.GetCtx(),
- &queryPolicyRequest,
- )
+ request := types.GetPolicyRequest{Id: policyID}
+ response, err := l.engine.GetPolicy(ctx, &request)
if err != nil {
if errors.Is(err, types.ErrPolicyNotFound) {
- return newErrPolicyDoesNotExistWithACP(err, policyID)
- } else {
- return newErrPolicyValidationFailedWithACP(err, policyID)
+ return none, nil
}
+ return none, err
}
- // So far we validated that the policy exists, now lets validate that resource exists.
- resourceResponse := queryPolicyResponse.Policy.GetResourceByName(resourceName)
- if resourceResponse == nil {
- return newErrResourceDoesNotExistOnTargetPolicy(resourceName, policyID)
- }
-
- // Now that we have validated that policyID exists and it contains a corresponding
- // resource with the matching name, validate that all required permissions
- // for DPI actually exist on the target resource.
- for _, requiredPermission := range dpiRequiredPermissions {
- permissionResponse := resourceResponse.GetPermissionByName(requiredPermission)
- if permissionResponse == nil {
- return newErrResourceIsMissingRequiredPermission(
- resourceName,
- requiredPermission,
- policyID,
- )
- }
-
- // Now we need to ensure that the "owner" relation has access to all the required
- // permissions for DPI. This is important because even if the policy has the required
- // permissions under the resource, it's possible that those permissions are not granted
- // to the "owner" relation, this will help users not shoot themseleves in the foot.
- // TODO-ACP: Better validation, once sourcehub implements meta-policies.
- // Issue: https://github.com/sourcenetwork/defradb/issues/2359
- if err := validateDPIExpressionOfRequiredPermission(
- permissionResponse.Expression,
- requiredPermission,
- ); err != nil {
- return err
- }
- }
-
- return nil
+ policy := mapACPCorePolicy(response.Policy)
+ return immutable.Some(policy), nil
}
-func (l *ACPLocal) RegisterDocObject(
+func (l *ACPLocal) RegisterObject(
ctx context.Context,
actorID string,
policyID string,
resourceName string,
- docID string,
-) error {
- registerDoc := types.MsgRegisterObject{
- Creator: actorID,
+ objectID string,
+ creationTime *protoTypes.Timestamp,
+) (RegistrationResult, error) {
+ principal, err := auth.NewDIDPrincipal(actorID)
+ if err != nil {
+ return RegistrationResult_NoOp, newErrInvalidActorID(err, actorID)
+ }
+
+ ctx = auth.InjectPrincipal(ctx, principal)
+ req := types.RegisterObjectRequest{
PolicyId: policyID,
- Object: types.NewObject(resourceName, docID),
- CreationTime: protoTypes.TimestampNow(),
+ Object: types.NewObject(resourceName, objectID),
+ CreationTime: creationTime,
}
- registerDocResponse, err := l.localACP.GetMsgService().RegisterObject(
- l.localACP.GetCtx(),
- ®isterDoc,
- )
+ registerDocResponse, err := l.engine.RegisterObject(ctx, &req)
if err != nil {
- return NewErrFailedToRegisterDocWithACP(err, "Local", policyID, actorID, resourceName, docID)
+ return RegistrationResult_NoOp, err
}
- switch registerDocResponse.Result {
- case types.RegistrationResult_NoOp:
- return ErrObjectDidNotRegister
-
- case types.RegistrationResult_Registered:
- log.InfoContext(
- ctx,
- "Document registered with local acp",
- corelog.Any("PolicyID", policyID),
- corelog.Any("Creator", actorID),
- corelog.Any("Resource", resourceName),
- corelog.Any("DocID", docID),
- )
- return nil
-
- case types.RegistrationResult_Unarchived:
- log.InfoContext(
- ctx,
- "Document re-registered (unarchived object) with local acp",
- corelog.Any("PolicyID", policyID),
- corelog.Any("Creator", actorID),
- corelog.Any("Resource", resourceName),
- corelog.Any("DocID", docID),
- )
- return nil
- }
-
- return ErrObjectDidNotRegister
+ result := RegistrationResult(registerDocResponse.Result)
+ return result, nil
}
-func (l *ACPLocal) IsDocRegistered(
+func (l *ACPLocal) ObjectOwner(
ctx context.Context,
policyID string,
resourceName string,
- docID string,
-) (bool, error) {
- queryObjectOwner := types.QueryObjectOwnerRequest{
+ objectID string,
+) (immutable.Option[string], error) {
+ none := immutable.None[string]()
+
+ req := types.GetObjectRegistrationRequest{
PolicyId: policyID,
- Object: types.NewObject(resourceName, docID),
+ Object: types.NewObject(resourceName, objectID),
}
-
- queryObjectOwnerResponse, err := l.localACP.GetQueryService().ObjectOwner(
- l.localACP.GetCtx(),
- &queryObjectOwner,
- )
+ result, err := l.engine.GetObjectRegistration(ctx, &req)
if err != nil {
- return false, NewErrFailedToCheckIfDocIsRegisteredWithACP(err, "Local", policyID, resourceName, docID)
+ return none, err
}
- return queryObjectOwnerResponse.IsRegistered, nil
+ if result.IsRegistered {
+ return immutable.Some(result.OwnerId), nil
+ }
+
+ return none, nil
}
-func (l *ACPLocal) CheckDocAccess(
+func (l *ACPLocal) VerifyAccessRequest(
ctx context.Context,
permission DPIPermission,
actorID string,
@@ -263,7 +212,7 @@ func (l *ACPLocal) CheckDocAccess(
resourceName string,
docID string,
) (bool, error) {
- checkDoc := types.QueryVerifyAccessRequestRequest{
+ req := types.VerifyAccessRequestRequest{
PolicyId: policyID,
AccessRequest: &types.AccessRequest{
Operations: []*types.Operation{
@@ -277,34 +226,11 @@ func (l *ACPLocal) CheckDocAccess(
},
},
}
+ resp, err := l.engine.VerifyAccessRequest(ctx, &req)
- checkDocResponse, err := l.localACP.GetQueryService().VerifyAccessRequest(
- l.localACP.GetCtx(),
- &checkDoc,
- )
if err != nil {
- return false, NewErrFailedToVerifyDocAccessWithACP(err, "Local", policyID, actorID, resourceName, docID)
+ return false, err
}
- if checkDocResponse.Valid {
- log.InfoContext(
- ctx,
- "Document accessible",
- corelog.Any("PolicyID", policyID),
- corelog.Any("ActorID", actorID),
- corelog.Any("Resource", resourceName),
- corelog.Any("DocID", docID),
- )
- return true, nil
- } else {
- log.InfoContext(
- ctx,
- "Document inaccessible",
- corelog.Any("PolicyID", policyID),
- corelog.Any("ActorID", actorID),
- corelog.Any("Resource", resourceName),
- corelog.Any("DocID", docID),
- )
- return false, nil
- }
+ return resp.Valid, nil
}
diff --git a/acp/acp_local_test.go b/acp/acp_local_test.go
index 9abdcb04d1..ac024d73c6 100644
--- a/acp/acp_local_test.go
+++ b/acp/acp_local_test.go
@@ -17,11 +17,13 @@ import (
"github.com/stretchr/testify/require"
)
-var identity1 = "cosmos1zzg43wdrhmmk89z3pmejwete2kkd4a3vn7w969"
-var identity2 = "cosmos1x25hhksxhu86r45hqwk28dd70qzux3262hdrll"
+var identity1 = "did:key:z7r8os2G88XXBNBTLj3kFR5rzUJ4VAesbX7PgsA68ak9B5RYcXF5EZEmjRzzinZndPSSwujXb4XKHG6vmKEFG6ZfsfcQn"
+var identity2 = "did:key:z7r8ooUiNXK8TT8Xjg1EWStR2ZdfxbzVfvGWbA2FjmzcnmDxz71QkP1Er8PP3zyLZpBLVgaXbZPGJPS4ppXJDPRcqrx4F"
+var invalidIdentity = "did:something"
-var validPolicyID string = "4f13c5084c3d0e1e5c5db702fceef84c3b6ab948949ca8e27fcaad3fb8bc39f4"
+var validPolicyID string = "d59f91ba65fe142d35fc7df34482eafc7e99fed7c144961ba32c4664634e61b7"
var validPolicy string = `
+name: test
description: a policy
actor:
@@ -46,7 +48,7 @@ resources:
func Test_LocalACP_InMemory_StartAndClose_NoError(t *testing.T) {
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, "")
err := localACP.Start(ctx)
@@ -62,7 +64,7 @@ func Test_LocalACP_PersistentMemory_StartAndClose_NoError(t *testing.T) {
require.NotEqual(t, "", acpPath)
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, acpPath)
err := localACP.Start(ctx)
@@ -72,9 +74,9 @@ func Test_LocalACP_PersistentMemory_StartAndClose_NoError(t *testing.T) {
require.Nil(t, err)
}
-func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) {
+func Test_LocalACP_InMemory_AddPolicy_CreatingSamePolicyAfterWipeReturnsSameID(t *testing.T) {
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, "")
errStart := localACP.Start(ctx)
@@ -96,7 +98,7 @@ func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) {
errClose := localACP.Close()
require.Nil(t, errClose)
- // Since nothing is persisted should allow adding same policy again.
+ // Since nothing is persisted should allow adding same policy again with same ID
localACP.Init(ctx, "")
errStart = localACP.Start(ctx)
@@ -118,12 +120,12 @@ func Test_LocalACP_InMemory_AddPolicy_CanCreateTwice(t *testing.T) {
require.Nil(t, errClose)
}
-func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) {
+func Test_LocalACP_PersistentMemory_AddPolicy_CreatingSamePolicyReturnsDifferentIDs(t *testing.T) {
acpPath := t.TempDir()
require.NotEqual(t, "", acpPath)
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, acpPath)
errStart := localACP.Start(ctx)
@@ -150,14 +152,14 @@ func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) {
errStart = localACP.Start(ctx)
require.Nil(t, errStart)
- // Should not allow us to create the same policy again as it exists already.
- _, errAddPolicy = localACP.AddPolicy(
+ // Should generate a different ID for the new policy, even though the payload is the same
+ newPolicyID, errAddPolicy := localACP.AddPolicy(
ctx,
identity1,
validPolicy,
)
- require.Error(t, errAddPolicy)
- require.ErrorIs(t, errAddPolicy, ErrFailedToAddPolicyWithACP)
+ require.NoError(t, errAddPolicy)
+ require.NotEqual(t, newPolicyID, policyID)
errClose = localACP.Close()
require.Nil(t, errClose)
@@ -165,7 +167,7 @@ func Test_LocalACP_PersistentMemory_AddPolicy_CanNotCreateTwice(t *testing.T) {
func Test_LocalACP_InMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist(t *testing.T) {
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, "")
errStart := localACP.Start(ctx)
@@ -215,7 +217,7 @@ func Test_LocalACP_PersistentMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist
require.NotEqual(t, "", acpPath)
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, acpPath)
errStart := localACP.Start(ctx)
@@ -278,7 +280,7 @@ func Test_LocalACP_PersistentMemory_ValidateResourseExistsOrNot_ErrIfDoesntExist
func Test_LocalACP_InMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAndErrorOtherwise(t *testing.T) {
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, "")
errStart := localACP.Start(ctx)
@@ -358,7 +360,7 @@ func Test_LocalACP_PersistentMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAn
require.NotEqual(t, "", acpPath)
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, acpPath)
errStart := localACP.Start(ctx)
@@ -454,7 +456,7 @@ func Test_LocalACP_PersistentMemory_IsDocRegistered_TrueIfRegisteredFalseIfNotAn
func Test_LocalACP_InMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErrorOtherwise(t *testing.T) {
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, "")
errStart := localACP.Start(ctx)
@@ -540,7 +542,7 @@ func Test_LocalACP_PersistentMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErr
require.NotEqual(t, "", acpPath)
ctx := context.Background()
- var localACP ACPLocal
+ localACP := NewLocalACP()
localACP.Init(ctx, acpPath)
errStart := localACP.Start(ctx)
@@ -652,3 +654,95 @@ func Test_LocalACP_PersistentMemory_CheckDocAccess_TrueIfHaveAccessFalseIfNotErr
errClose = localACP.Close()
require.Nil(t, errClose)
}
+
+func Test_LocalACP_InMemory_AddPolicy_InvalidCreatorIDReturnsError(t *testing.T) {
+ ctx := context.Background()
+ localACP := NewLocalACP()
+
+ localACP.Init(ctx, "")
+ err := localACP.Start(ctx)
+ require.Nil(t, err)
+
+ policyID, err := localACP.AddPolicy(
+ ctx,
+ invalidIdentity,
+ validPolicy,
+ )
+
+ require.ErrorIs(t, err, ErrInvalidActorID)
+ require.Empty(t, policyID)
+
+ err = localACP.Close()
+ require.NoError(t, err)
+}
+
+func Test_LocalACP_InMemory_RegisterObject_InvalidCreatorIDReturnsError(t *testing.T) {
+ ctx := context.Background()
+ localACP := NewLocalACP()
+
+ localACP.Init(ctx, "")
+ err := localACP.Start(ctx)
+ require.Nil(t, err)
+
+ err = localACP.RegisterDocObject(
+ ctx,
+ invalidIdentity,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+
+ require.ErrorIs(t, err, ErrInvalidActorID)
+
+ err = localACP.Close()
+ require.NoError(t, err)
+}
+
+func Test_LocalACP_Persistent_AddPolicy_InvalidCreatorIDReturnsError(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ localACP := NewLocalACP()
+
+ localACP.Init(ctx, acpPath)
+ err := localACP.Start(ctx)
+ require.Nil(t, err)
+
+ policyID, err := localACP.AddPolicy(
+ ctx,
+ invalidIdentity,
+ validPolicy,
+ )
+
+ require.ErrorIs(t, err, ErrInvalidActorID)
+ require.Empty(t, policyID)
+
+ err = localACP.Close()
+ require.NoError(t, err)
+}
+
+func Test_LocalACP_Persistent_RegisterObject_InvalidCreatorIDReturnsError(t *testing.T) {
+ acpPath := t.TempDir()
+ require.NotEqual(t, "", acpPath)
+
+ ctx := context.Background()
+ localACP := NewLocalACP()
+
+ localACP.Init(ctx, acpPath)
+ err := localACP.Start(ctx)
+ require.Nil(t, err)
+
+ err = localACP.RegisterDocObject(
+ ctx,
+ invalidIdentity,
+ validPolicyID,
+ "users",
+ "documentID_XYZ",
+ )
+
+ require.ErrorIs(t, err, ErrInvalidActorID)
+
+ err = localACP.Close()
+ require.NoError(t, err)
+}
diff --git a/acp/errors.go b/acp/errors.go
index 307b32f5ad..5ff4eee302 100644
--- a/acp/errors.go
+++ b/acp/errors.go
@@ -35,6 +35,8 @@ const (
errExprOfRequiredPermMustStartWithRelation = "expr of required permission must start with required relation"
errExprOfRequiredPermHasInvalidChar = "expr of required permission has invalid character after relation"
+
+ errInvalidActorID = "invalid actor ID"
)
var (
@@ -53,6 +55,7 @@ var (
ErrNoPolicyArgs = errors.New(errNoPolicyArgs)
ErrPolicyIDMustNotBeEmpty = errors.New(errPolicyIDMustNotBeEmpty)
ErrResourceNameMustNotBeEmpty = errors.New(errResourceNameMustNotBeEmpty)
+ ErrInvalidActorID = errors.New(errInvalidActorID)
)
func NewErrInitializationOfACPFailed(
@@ -205,3 +208,14 @@ func newErrExprOfRequiredPermissionHasInvalidChar(
errors.NewKV("Character", string(char)),
)
}
+
+func newErrInvalidActorID(
+ inner error,
+ id string,
+) error {
+ return errors.Wrap(
+ errInvalidActorID,
+ inner,
+ errors.NewKV("ActorID", id),
+ )
+}
diff --git a/acp/identity/errors.go b/acp/identity/errors.go
new file mode 100644
index 0000000000..3ad815b8bb
--- /dev/null
+++ b/acp/identity/errors.go
@@ -0,0 +1,36 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package identity
+
+import (
+ "encoding/hex"
+
+ "github.com/sourcenetwork/defradb/errors"
+)
+
+const (
+ errDIDCreation = "could not produce did for key"
+ errFailedToGenerateIdentityFromPrivateKey = "failed to generate identity from private key"
+)
+
+var (
+ ErrDIDCreation = errors.New(errDIDCreation)
+ ErrFailedToGenerateIdentityFromPrivateKey = errors.New(errFailedToGenerateIdentityFromPrivateKey)
+)
+
+func newErrDIDCreation(inner error, keytype string, pubKey []byte) error {
+ return errors.Wrap(
+ errDIDCreation,
+ inner,
+ errors.NewKV("KeyType", keytype),
+ errors.NewKV("PubKey", hex.EncodeToString(pubKey)),
+ )
+}
diff --git a/acp/identity/generate.go b/acp/identity/generate.go
new file mode 100644
index 0000000000..cf37ce6e46
--- /dev/null
+++ b/acp/identity/generate.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package identity
+
+import (
+ "encoding/hex"
+
+ "github.com/sourcenetwork/defradb/crypto"
+)
+
+// RawIdentity holds the raw bytes that make up an actor's identity.
+type RawIdentity struct {
+ // PrivateKey is a secp256k1 private key that is a 256-bit big-endian
+ // binary-encoded number, padded to a length of 32 bytes in HEX format.
+ PrivateKey string
+
+ // PublicKey is a compressed 33-byte secp256k1 public key in HEX format.
+ PublicKey string
+
+ // DID is `did:key` key generated from the public key address.
+ DID string
+}
+
+// Generate generates a new identity.
+func Generate() (RawIdentity, error) {
+ privateKey, err := crypto.GenerateSecp256k1()
+ if err != nil {
+ return RawIdentity{}, err
+ }
+
+ maybeNewIdentity, err := FromPrivateKey(privateKey)
+ if err != nil {
+ return RawIdentity{}, err
+ }
+
+ if !maybeNewIdentity.HasValue() {
+ return RawIdentity{}, ErrFailedToGenerateIdentityFromPrivateKey
+ }
+
+ newIdentity := maybeNewIdentity.Value()
+
+ return RawIdentity{
+ PrivateKey: hex.EncodeToString(newIdentity.PrivateKey.Serialize()),
+ PublicKey: hex.EncodeToString(newIdentity.PublicKey.SerializeCompressed()),
+ DID: newIdentity.DID,
+ }, nil
+}
diff --git a/acp/identity/identity.go b/acp/identity/identity.go
index 108c183748..8d9a84c23b 100644
--- a/acp/identity/identity.go
+++ b/acp/identity/identity.go
@@ -8,34 +8,72 @@
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
-/*
-Package identity provides defradb identity.
-*/
-
package identity
-import "github.com/sourcenetwork/immutable"
+import (
+ "github.com/cyware/ssi-sdk/crypto"
+ "github.com/cyware/ssi-sdk/did/key"
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ "github.com/sourcenetwork/immutable"
+)
-// Identity is the unique identifier for an actor.
-type Identity string
+// didProducer generates a did:key from a public key
+type didProducer = func(crypto.KeyType, []byte) (*key.DIDKey, error)
-var (
- // None is an empty identity.
- None = immutable.None[Identity]()
-)
+// None specifies an anonymous actor.
+var None = immutable.None[Identity]()
+
+// Identity describes a unique actor.
+type Identity struct {
+ // PublicKey is the actor's public key.
+ PublicKey *secp256k1.PublicKey
+ // PrivateKey is the actor's private key.
+ PrivateKey *secp256k1.PrivateKey
+ // DID is the actor's unique identifier.
+ //
+ // The address is derived from the actor's public key,
+ // using the did:key method
+ DID string
+}
+
+// FromPrivateKey returns a new identity using the given private key.
+func FromPrivateKey(privateKey *secp256k1.PrivateKey) (immutable.Option[Identity], error) {
+ pubKey := privateKey.PubKey()
+ did, err := DIDFromPublicKey(pubKey)
+ if err != nil {
+ return None, err
+ }
+
+ return immutable.Some(Identity{
+ DID: did,
+ PublicKey: pubKey,
+ PrivateKey: privateKey,
+ }), nil
+}
-// New makes a new identity if the input is not empty otherwise, returns None.
-func New(identity string) immutable.Option[Identity] {
- // TODO-ACP: There will be more validation once sourcehub gets some utilities.
- // Then a validation function would do the validation, will likely do outside this function.
- // https://github.com/sourcenetwork/defradb/issues/2358
- if identity == "" {
- return None
+// FromPublicKey returns a new identity using the given public key.
+func FromPublicKey(publicKey *secp256k1.PublicKey) (immutable.Option[Identity], error) {
+ did, err := DIDFromPublicKey(publicKey)
+ if err != nil {
+ return None, err
}
- return immutable.Some(Identity(identity))
+ return immutable.Some(Identity{
+ DID: did,
+ PublicKey: publicKey,
+ }), nil
}
-// String returns the string representation of the identity.
-func (i Identity) String() string {
- return string(i)
+// DIDFromPublicKey returns a did:key generated from the the given public key.
+func DIDFromPublicKey(publicKey *secp256k1.PublicKey) (string, error) {
+ return didFromPublicKey(publicKey, key.CreateDIDKey)
+}
+
+// didFromPublicKey produces a did from a secp256k1 key and a producer function
+func didFromPublicKey(publicKey *secp256k1.PublicKey, producer didProducer) (string, error) {
+ bytes := publicKey.SerializeUncompressed()
+ did, err := producer(crypto.SECP256k1, bytes)
+ if err != nil {
+ return "", newErrDIDCreation(err, "secp256k1", bytes)
+ }
+ return did.String(), nil
}
diff --git a/acp/identity/identity_test.go b/acp/identity/identity_test.go
new file mode 100644
index 0000000000..bcef99005b
--- /dev/null
+++ b/acp/identity/identity_test.go
@@ -0,0 +1,78 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package identity
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/cyware/ssi-sdk/crypto"
+ "github.com/cyware/ssi-sdk/did/key"
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_DIDFromPublicKey_ProducesDIDForPublicKey(t *testing.T) {
+ pubKey := &secp256k1.PublicKey{}
+
+ did, err := DIDFromPublicKey(pubKey)
+
+ want := "did:key:z7r8ooUiNXK8TT8Xjg1EWStR2ZdfxbzVfvGWbA2FjmzcnmDxz71QkP1Er8PP3zyLZpBLVgaXbZPGJPS4ppXJDPRcqrx4F"
+ require.Equal(t, want, did)
+ require.NoError(t, err)
+}
+
+func Test_DIDFromPublicKey_ReturnsErrorWhenProducerFails(t *testing.T) {
+ mockedProducer := func(crypto.KeyType, []byte) (*key.DIDKey, error) {
+ return nil, fmt.Errorf("did generation err")
+ }
+
+ pubKey := &secp256k1.PublicKey{}
+
+ did, err := didFromPublicKey(pubKey, mockedProducer)
+
+ require.Empty(t, did)
+ require.ErrorIs(t, err, ErrDIDCreation)
+}
+
+func Test_RawIdentityGeneration_ReturnsNewRawIdentity(t *testing.T) {
+ newIdentity, err := Generate()
+ require.NoError(t, err)
+
+ // Check that both private and public key are not empty.
+ require.NotEmpty(t, newIdentity.PrivateKey)
+ require.NotEmpty(t, newIdentity.PublicKey)
+
+ // Check leading `did:key` prefix.
+ require.Equal(t, newIdentity.DID[:7], "did:key")
+}
+
+func Test_RawIdentityGenerationIsNotFixed_ReturnsUniqueRawIdentites(t *testing.T) {
+ newIdentity1, err1 := Generate()
+ newIdentity2, err2 := Generate()
+ require.NoError(t, err1)
+ require.NoError(t, err2)
+
+ // Check that both private and public key are not empty.
+ require.NotEmpty(t, newIdentity1.PrivateKey)
+ require.NotEmpty(t, newIdentity1.PublicKey)
+ require.NotEmpty(t, newIdentity2.PrivateKey)
+ require.NotEmpty(t, newIdentity2.PublicKey)
+
+ // Check leading `did:key` prefix.
+ require.Equal(t, newIdentity1.DID[:7], "did:key")
+ require.Equal(t, newIdentity2.DID[:7], "did:key")
+
+ // Check both are different.
+ require.NotEqual(t, newIdentity1.PrivateKey, newIdentity2.PrivateKey)
+ require.NotEqual(t, newIdentity1.PublicKey, newIdentity2.PublicKey)
+ require.NotEqual(t, newIdentity1.DID, newIdentity2.DID)
+}
diff --git a/acp/source_hub_client.go b/acp/source_hub_client.go
new file mode 100644
index 0000000000..22371cd6e2
--- /dev/null
+++ b/acp/source_hub_client.go
@@ -0,0 +1,321 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+import (
+ "context"
+
+ protoTypes "github.com/cosmos/gogoproto/types"
+ "github.com/sourcenetwork/corelog"
+ "github.com/sourcenetwork/immutable"
+ "github.com/valyala/fastjson"
+)
+
+// sourceHubClient is a private abstraction to allow multiple ACP implementations
+// based off of the SourceHub libraries to share the same Defra-specific logic via the
+// sourceHubBridge.
+type sourceHubClient interface {
+ // Init initializes the acp, with an absolute path. The provided path indicates where the
+ // persistent data will be stored for acp.
+ //
+ // If the path is empty then acp will run in memory.
+ Init(ctx context.Context, path string)
+
+ // Start starts the acp, using the initialized path. Will recover acp state
+ // from a previous run if under the same path.
+ //
+ // If the path is empty then acp will run in memory.
+ Start(ctx context.Context) error
+
+ // AddPolicy attempts to add the given policy. Upon success a policyID is returned,
+ // otherwise returns error.
+ AddPolicy(
+ ctx context.Context,
+ creatorID string,
+ policy string,
+ marshalType policyMarshalType,
+ creationTime *protoTypes.Timestamp,
+ ) (string, error)
+
+ // Policy returns a policy of the given policyID if one is found.
+ Policy(
+ ctx context.Context,
+ policyID string,
+ ) (immutable.Option[policy], error)
+
+ // RegisterObject registers the object to have access control.
+ // No error is returned upon successful registering of an object.
+ RegisterObject(
+ ctx context.Context,
+ actorID string,
+ policyID string,
+ resourceName string,
+ objectID string,
+ creationTime *protoTypes.Timestamp,
+ ) (RegistrationResult, error)
+
+ // ObjectOwner returns the owner of the object of the given objectID.
+ ObjectOwner(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+ objectID string,
+ ) (immutable.Option[string], error)
+
+ // VerifyAccessRequest returns true if the check was successfull and the request has access to the object. If
+ // the check was successful but the request does not have access to the object, then returns false.
+ // Otherwise if check failed then an error is returned (and the boolean result should not be used).
+ VerifyAccessRequest(
+ ctx context.Context,
+ permission DPIPermission,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+ ) (bool, error)
+
+ // Close closes any resources in use by acp.
+ Close() error
+}
+
+// sourceHubBridge wraps a sourceHubClient, hosting the Defra-specific logic away from client-specific
+// code.
+type sourceHubBridge struct {
+ client sourceHubClient
+}
+
+var _ ACP = (*sourceHubBridge)(nil)
+
+func NewLocalACP() ACP {
+ return &sourceHubBridge{
+ client: &ACPLocal{},
+ }
+}
+
+func (a *sourceHubBridge) Init(ctx context.Context, path string) {
+ a.client.Init(ctx, path)
+}
+
+func (a *sourceHubBridge) Start(ctx context.Context) error {
+ return a.client.Start(ctx)
+}
+
+func (a *sourceHubBridge) AddPolicy(ctx context.Context, creatorID string, policy string) (string, error) {
+ // Having a creator identity is a MUST requirement for adding a policy.
+ if creatorID == "" {
+ return "", ErrPolicyCreatorMustNotBeEmpty
+ }
+
+ if policy == "" {
+ return "", ErrPolicyDataMustNotBeEmpty
+ }
+
+ marshalType := policyMarshalType_YAML
+ if isJSON := fastjson.Validate(policy) == nil; isJSON { // Detect JSON format.
+ marshalType = policyMarshalType_JSON
+ }
+
+ policyID, err := a.client.AddPolicy(
+ ctx,
+ creatorID,
+ policy,
+ marshalType,
+ protoTypes.TimestampNow(),
+ )
+
+ if err != nil {
+ return "", NewErrFailedToAddPolicyWithACP(err, "Local", creatorID)
+ }
+
+ log.InfoContext(ctx, "Created Policy", corelog.Any("PolicyID", policyID))
+
+ return policyID, nil
+}
+
+func (a *sourceHubBridge) ValidateResourceExistsOnValidDPI(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+) error {
+ if policyID == "" && resourceName == "" {
+ return ErrNoPolicyArgs
+ }
+
+ if policyID == "" {
+ return ErrPolicyIDMustNotBeEmpty
+ }
+
+ if resourceName == "" {
+ return ErrResourceNameMustNotBeEmpty
+ }
+
+ maybePolicy, err := a.client.Policy(ctx, policyID)
+
+ if err != nil {
+ return newErrPolicyValidationFailedWithACP(err, policyID)
+ }
+ if !maybePolicy.HasValue() {
+ return newErrPolicyDoesNotExistWithACP(err, policyID)
+ }
+
+ policy := maybePolicy.Value()
+
+ // So far we validated that the policy exists, now lets validate that resource exists.
+ resourceResponse, ok := policy.Resources[resourceName]
+ if !ok {
+ return newErrResourceDoesNotExistOnTargetPolicy(resourceName, policyID)
+ }
+
+ // Now that we have validated that policyID exists and it contains a corresponding
+ // resource with the matching name, validate that all required permissions
+ // for DPI actually exist on the target resource.
+ for _, requiredPermission := range dpiRequiredPermissions {
+ permissionResponse, ok := resourceResponse.Permissions[requiredPermission]
+ if !ok {
+ return newErrResourceIsMissingRequiredPermission(
+ resourceName,
+ requiredPermission,
+ policyID,
+ )
+ }
+
+ // Now we need to ensure that the "owner" relation has access to all the required
+ // permissions for DPI. This is important because even if the policy has the required
+ // permissions under the resource, it's possible that those permissions are not granted
+ // to the "owner" relation, this will help users not shoot themseleves in the foot.
+ // TODO-ACP: Better validation, once sourcehub implements meta-policies.
+ // Issue: https://github.com/sourcenetwork/defradb/issues/2359
+ if err := validateDPIExpressionOfRequiredPermission(
+ permissionResponse.Expression,
+ requiredPermission,
+ ); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *sourceHubBridge) RegisterDocObject(
+ ctx context.Context,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+) error {
+ registerDocResult, err := a.client.RegisterObject(
+ ctx,
+ actorID,
+ policyID,
+ resourceName,
+ docID,
+ protoTypes.TimestampNow(),
+ )
+
+ if err != nil {
+ return NewErrFailedToRegisterDocWithACP(err, "Local", policyID, actorID, resourceName, docID)
+ }
+
+ switch registerDocResult {
+ case RegistrationResult_NoOp:
+ return ErrObjectDidNotRegister
+
+ case RegistrationResult_Registered:
+ log.InfoContext(
+ ctx,
+ "Document registered with local acp",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("Creator", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return nil
+
+ case RegistrationResult_Unarchived:
+ log.InfoContext(
+ ctx,
+ "Document re-registered (unarchived object) with local acp",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("Creator", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return nil
+ }
+
+ return ErrObjectDidNotRegister
+}
+
+func (a *sourceHubBridge) IsDocRegistered(
+ ctx context.Context,
+ policyID string,
+ resourceName string,
+ docID string,
+) (bool, error) {
+ maybeActor, err := a.client.ObjectOwner(
+ ctx,
+ policyID,
+ resourceName,
+ docID,
+ )
+ if err != nil {
+ return false, NewErrFailedToCheckIfDocIsRegisteredWithACP(err, "Local", policyID, resourceName, docID)
+ }
+
+ return maybeActor.HasValue(), nil
+}
+
+func (a *sourceHubBridge) CheckDocAccess(
+ ctx context.Context,
+ permission DPIPermission,
+ actorID string,
+ policyID string,
+ resourceName string,
+ docID string,
+) (bool, error) {
+ isValid, err := a.client.VerifyAccessRequest(
+ ctx,
+ permission,
+ actorID,
+ policyID,
+ resourceName,
+ docID,
+ )
+ if err != nil {
+ return false, NewErrFailedToVerifyDocAccessWithACP(err, "Local", policyID, actorID, resourceName, docID)
+ }
+
+ if isValid {
+ log.InfoContext(
+ ctx,
+ "Document accessible",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("ActorID", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return true, nil
+ } else {
+ log.InfoContext(
+ ctx,
+ "Document inaccessible",
+ corelog.Any("PolicyID", policyID),
+ corelog.Any("ActorID", actorID),
+ corelog.Any("Resource", resourceName),
+ corelog.Any("DocID", docID),
+ )
+ return false, nil
+ }
+}
+
+func (a *sourceHubBridge) Close() error {
+ return a.client.Close()
+}
diff --git a/acp/types.go b/acp/types.go
new file mode 100644
index 0000000000..e17f9d9dc4
--- /dev/null
+++ b/acp/types.go
@@ -0,0 +1,54 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package acp
+
+// RegistrationResult is an enum type which indicates the result of a RegisterObject call to SourceHub / ACP Core
+type RegistrationResult int32
+
+const (
+ // NoOp indicates no action was take. The operation failed or the Object already existed and was active
+ RegistrationResult_NoOp RegistrationResult = 0
+ // Registered indicates the Object was sucessfuly registered to the Actor.
+ RegistrationResult_Registered RegistrationResult = 1
+ // Unarchived indicates that a previously deleted Object is active again.
+ // Only the original owners can Unarchive an object.
+ RegistrationResult_Unarchived RegistrationResult = 2
+)
+
+// policyMarshalType represents the format in which a policy
+// is marshaled as
+type policyMarshalType int32
+
+const (
+ policyMarshalType_YAML policyMarshalType = 1
+ policyMarshalType_JSON policyMarshalType = 2
+)
+
+// policy is a data container carrying the necessary data
+// to verify whether a policy meets DPI requirements
+type policy struct {
+ ID string
+ Resources map[string]*resource
+}
+
+// resource is a data container carrying the necessary data
+// to verify whether it meets DPI requirements.
+type resource struct {
+ Name string
+ Permissions map[string]*permission
+}
+
+// permission is a data container carrying the necessary data
+// to verify whether it meets DPI requirements.
+type permission struct {
+ Name string
+ Expression string
+}
diff --git a/cli/acp_policy_add.go b/cli/acp_policy_add.go
index bca5e95abd..b0db4f63c0 100644
--- a/cli/acp_policy_add.go
+++ b/cli/acp_policy_add.go
@@ -37,7 +37,8 @@ Notes:
- Learn more about [ACP & DPI Rules](/acp/README.md)
Example: add from an argument string:
- defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j '
+ defradb client acp policy add -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f \
+'
description: A Valid DefraDB Policy Interface
actor:
@@ -61,10 +62,12 @@ resources:
'
Example: add from file:
- defradb client acp policy add -i cosmos17r39df0hdcrgnmmw4mvu7qgk5nu888c7uvv37y -f policy.yml
+ defradb client acp policy add -f policy.yml \
+ -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f
Example: add from file, verbose flags:
- defradb client acp policy add --identity cosmos1kpw734v54g0t0d8tcye8ee5jc3gld0tcr2q473 --file policy.yml
+ defradb client acp policy add --file policy.yml \
+ --identity 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f
Example: add from stdin:
cat policy.yml | defradb client acp policy add -
diff --git a/cli/cli.go b/cli/cli.go
index 38209a9f69..e87ea8dec9 100644
--- a/cli/cli.go
+++ b/cli/cli.go
@@ -122,9 +122,23 @@ func NewDefraCommand() *cobra.Command {
collection,
)
+ keyring := MakeKeyringCommand()
+ keyring.AddCommand(
+ MakeKeyringGenerateCommand(),
+ MakeKeyringImportCommand(),
+ MakeKeyringExportCommand(),
+ )
+
+ identity := MakeIdentityCommand()
+ identity.AddCommand(
+ MakeIdentityNewCommand(),
+ )
+
root := MakeRootCommand()
root.AddCommand(
client,
+ keyring,
+ identity,
MakeStartCommand(),
MakeServerDumpCmd(),
MakeVersionCommand(),
diff --git a/cli/client.go b/cli/client.go
index 06460ca70d..d6ae8256dd 100644
--- a/cli/client.go
+++ b/cli/client.go
@@ -38,7 +38,8 @@ Execute queries, add schema types, obtain node info, etc.`,
return setContextDB(cmd)
},
}
- cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", "ACP Identity")
+ cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "",
+ "Hex formatted private key used to authenticate with ACP")
cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID")
return cmd
}
diff --git a/cli/collection.go b/cli/collection.go
index cdf3d41f5a..ad7f54ec5c 100644
--- a/cli/collection.go
+++ b/cli/collection.go
@@ -80,7 +80,8 @@ func MakeCollectionCommand() *cobra.Command {
},
}
cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID")
- cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "", "ACP Identity")
+ cmd.PersistentFlags().StringVarP(&identity, "identity", "i", "",
+ "Hex formatted private key used to authenticate with ACP")
cmd.PersistentFlags().StringVar(&name, "name", "", "Collection name")
cmd.PersistentFlags().StringVar(&schemaRoot, "schema", "", "Collection schema Root")
cmd.PersistentFlags().StringVar(&versionID, "version", "", "Collection version ID")
diff --git a/cli/collection_create.go b/cli/collection_create.go
index df7d8794b5..994911a14c 100644
--- a/cli/collection_create.go
+++ b/cli/collection_create.go
@@ -30,7 +30,8 @@ Example: create from string:
defradb client collection create --name User '{ "name": "Bob" }'
Example: create from string, with identity:
- defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User '{ "name": "Bob" }'
+ defradb client collection create --name User '{ "name": "Bob" }' \
+ -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f
Example: create multiple from string:
defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]'
diff --git a/cli/collection_delete.go b/cli/collection_delete.go
index a9776d1985..c3abfcd80e 100644
--- a/cli/collection_delete.go
+++ b/cli/collection_delete.go
@@ -28,7 +28,8 @@ Example: delete by docID:
defradb client collection delete --name User --docID bae-123
Example: delete by docID with identity:
- defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User --docID bae-123
+ defradb client collection delete --name User --docID bae-123 \
+ -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f
Example: delete by filter:
defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }'
diff --git a/cli/collection_get.go b/cli/collection_get.go
index 9ad5566f62..5a310a148c 100644
--- a/cli/collection_get.go
+++ b/cli/collection_get.go
@@ -27,7 +27,7 @@ Example:
defradb client collection get --name User bae-123
Example to get a private document we must use an identity:
- defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User bae-123
+ defradb client collection get -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User bae-123
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
diff --git a/cli/collection_list_doc_ids.go b/cli/collection_list_doc_ids.go
index 168bb74a5a..bc6b298f32 100644
--- a/cli/collection_list_doc_ids.go
+++ b/cli/collection_list_doc_ids.go
@@ -26,7 +26,7 @@ Example: list all docID(s):
defradb client collection docIDs --name User
Example: list all docID(s), with an identity:
- defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User
+ defradb client collection docIDs -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User
`,
RunE: func(cmd *cobra.Command, args []string) error {
col, ok := tryGetContextCollection(cmd)
diff --git a/cli/collection_update.go b/cli/collection_update.go
index 3e676edce9..fb7e352249 100644
--- a/cli/collection_update.go
+++ b/cli/collection_update.go
@@ -37,7 +37,7 @@ Example: update by docID:
--docID bae-123 --updater '{ "verified": true }'
Example: update private docID, with identity:
- defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User \
+ defradb client collection update -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f --name User \
--docID bae-123 --updater '{ "verified": true }'
`,
Args: cobra.RangeArgs(0, 1),
diff --git a/cli/config.go b/cli/config.go
index fd275a2d01..d559711f8b 100644
--- a/cli/config.go
+++ b/cli/config.go
@@ -11,6 +11,7 @@
package cli
import (
+ "errors"
"os"
"path/filepath"
"strings"
@@ -36,27 +37,58 @@ var configPaths = []string{
"datastore.badger.path",
"api.pubkeypath",
"api.privkeypath",
+ "keyring.path",
}
-// configFlags is a mapping of config keys to cli flags to bind to.
+// configFlags is a mapping of cli flag names to config keys to bind.
var configFlags = map[string]string{
- "log.level": "log-level",
- "log.output": "log-output",
- "log.format": "log-format",
- "log.stacktrace": "log-stacktrace",
- "log.source": "log-source",
- "log.overrides": "log-overrides",
- "log.nocolor": "log-no-color",
- "api.address": "url",
- "datastore.maxtxnretries": "max-txn-retries",
- "datastore.store": "store",
- "datastore.badger.valuelogfilesize": "valuelogfilesize",
- "net.peers": "peers",
- "net.p2paddresses": "p2paddr",
- "net.p2pdisabled": "no-p2p",
- "api.allowed-origins": "allowed-origins",
- "api.pubkeypath": "pubkeypath",
- "api.privkeypath": "privkeypath",
+ "log-level": "log.level",
+ "log-output": "log.output",
+ "log-format": "log.format",
+ "log-stacktrace": "log.stacktrace",
+ "log-source": "log.source",
+ "log-overrides": "log.overrides",
+ "no-log-color": "log.colordisabled",
+ "url": "api.address",
+ "max-txn-retries": "datastore.maxtxnretries",
+ "store": "datastore.store",
+ "valuelogfilesize": "datastore.badger.valuelogfilesize",
+ "peers": "net.peers",
+ "p2paddr": "net.p2paddresses",
+ "no-p2p": "net.p2pdisabled",
+ "allowed-origins": "api.allowed-origins",
+ "pubkeypath": "api.pubkeypath",
+ "privkeypath": "api.privkeypath",
+ "keyring-namespace": "keyring.namespace",
+ "keyring-backend": "keyring.backend",
+ "keyring-path": "keyring.path",
+ "no-keyring": "keyring.disabled",
+}
+
+// configDefaults contains default values for config entries.
+var configDefaults = map[string]any{
+ "api.address": "127.0.0.1:9181",
+ "api.allowed-origins": []string{},
+ "datastore.badger.path": "data",
+ "datastore.maxtxnretries": 5,
+ "datastore.store": "badger",
+ "datastore.badger.valuelogfilesize": 1 << 30,
+ "net.p2pdisabled": false,
+ "net.p2paddresses": []string{"/ip4/127.0.0.1/tcp/9171"},
+ "net.peers": []string{},
+ "net.pubSubEnabled": true,
+ "net.relay": false,
+ "keyring.backend": "file",
+ "keyring.disabled": false,
+ "keyring.namespace": "defradb",
+ "keyring.path": "keys",
+ "log.caller": false,
+ "log.colordisabled": false,
+ "log.format": "text",
+ "log.level": "info",
+ "log.output": "stderr",
+ "log.source": false,
+ "log.stacktrace": false,
}
// defaultConfig returns a new config with default values.
@@ -70,11 +102,9 @@ func defaultConfig() *viper.Viper {
cfg.SetConfigName("config")
cfg.SetConfigType("yaml")
- cfg.SetDefault("datastore.badger.path", "data")
- cfg.SetDefault("net.pubSubEnabled", true)
- cfg.SetDefault("net.relay", false)
- cfg.SetDefault("log.caller", false)
-
+ for key, val := range configDefaults {
+ cfg.SetDefault(key, val)
+ }
return cfg
}
@@ -126,13 +156,14 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) {
}
}
- // set default logging config
+ // set logging config
corelog.SetConfig(corelog.Config{
Level: cfg.GetString("log.level"),
Format: cfg.GetString("log.format"),
Output: cfg.GetString("log.output"),
EnableStackTrace: cfg.GetBool("log.stacktrace"),
EnableSource: cfg.GetBool("log.source"),
+ DisableColor: cfg.GetBool("log.colordisabled"),
})
// set logging config overrides
@@ -143,11 +174,9 @@ func loadConfig(rootdir string, flags *pflag.FlagSet) (*viper.Viper, error) {
// bindConfigFlags binds the set of cli flags to config values.
func bindConfigFlags(cfg *viper.Viper, flags *pflag.FlagSet) error {
- for key, flag := range configFlags {
- err := cfg.BindPFlag(key, flags.Lookup(flag))
- if err != nil {
- return err
- }
- }
- return nil
+ var errs []error
+ flags.VisitAll(func(f *pflag.Flag) {
+ errs = append(errs, cfg.BindPFlag(configFlags[f.Name], f))
+ })
+ return errors.Join(errs...)
}
diff --git a/cli/config_test.go b/cli/config_test.go
index 39a17d60fd..d3f6d954e3 100644
--- a/cli/config_test.go
+++ b/cli/config_test.go
@@ -14,17 +14,20 @@ import (
"path/filepath"
"testing"
+ "github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCreateConfig(t *testing.T) {
rootdir := t.TempDir()
- err := createConfig(rootdir, NewDefraCommand().PersistentFlags())
+ flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
+
+ err := createConfig(rootdir, flags)
require.NoError(t, err)
// ensure no errors when config already exists
- err = createConfig(rootdir, NewDefraCommand().PersistentFlags())
+ err = createConfig(rootdir, flags)
require.NoError(t, err)
assert.FileExists(t, filepath.Join(rootdir, "config.yaml"))
@@ -32,11 +35,12 @@ func TestCreateConfig(t *testing.T) {
func TestLoadConfigNotExist(t *testing.T) {
rootdir := t.TempDir()
- cfg, err := loadConfig(rootdir, NewDefraCommand().PersistentFlags())
+ flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
+
+ cfg, err := loadConfig(rootdir, flags)
require.NoError(t, err)
assert.Equal(t, 5, cfg.GetInt("datastore.maxtxnretries"))
-
assert.Equal(t, filepath.Join(rootdir, "data"), cfg.GetString("datastore.badger.path"))
assert.Equal(t, 1<<30, cfg.GetInt("datastore.badger.valuelogfilesize"))
assert.Equal(t, "badger", cfg.GetString("datastore.store"))
@@ -58,5 +62,10 @@ func TestLoadConfigNotExist(t *testing.T) {
assert.Equal(t, false, cfg.GetBool("log.stacktrace"))
assert.Equal(t, false, cfg.GetBool("log.source"))
assert.Equal(t, "", cfg.GetString("log.overrides"))
- assert.Equal(t, false, cfg.GetBool("log.nocolor"))
+ assert.Equal(t, false, cfg.GetBool("log.colordisabled"))
+
+ assert.Equal(t, filepath.Join(rootdir, "keys"), cfg.GetString("keyring.path"))
+ assert.Equal(t, false, cfg.GetBool("keyring.disabled"))
+ assert.Equal(t, "defradb", cfg.GetString("keyring.namespace"))
+ assert.Equal(t, "file", cfg.GetString("keyring.backend"))
}
diff --git a/cli/errors.go b/cli/errors.go
index 02cd252b59..504cb9ca25 100644
--- a/cli/errors.go
+++ b/cli/errors.go
@@ -16,6 +16,14 @@ import (
"github.com/sourcenetwork/defradb/errors"
)
+const errKeyringHelp = `%w
+
+Did you forget to initialize the keyring?
+
+Use the following command to generate the required keys:
+ defradb keyring generate
+`
+
const (
errInvalidLensConfig string = "invalid lens configuration"
errSchemaVersionNotOfSchema string = "the given schema version is from a different schema"
@@ -53,3 +61,7 @@ func NewErrSchemaVersionNotOfSchema(schemaRoot string, schemaVersionID string) e
errors.NewKV("SchemaVersionID", schemaVersionID),
)
}
+
+func NewErrKeyringHelp(inner error) error {
+ return fmt.Errorf(errKeyringHelp, inner)
+}
diff --git a/cli/identity.go b/cli/identity.go
new file mode 100644
index 0000000000..66efcec098
--- /dev/null
+++ b/cli/identity.go
@@ -0,0 +1,25 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func MakeIdentityCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "identity",
+ Short: "Interact with identity features of DefraDB instance",
+ Long: `Interact with identity features of DefraDB instance`,
+ }
+
+ return cmd
+}
diff --git a/cli/identity_new.go b/cli/identity_new.go
new file mode 100644
index 0000000000..e7101c1bae
--- /dev/null
+++ b/cli/identity_new.go
@@ -0,0 +1,46 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/sourcenetwork/defradb/acp/identity"
+)
+
+func MakeIdentityNewCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "new",
+ Short: "Generate a new identity",
+ Long: `Generate a new identity
+
+The generated identity contains:
+- A secp256k1 private key that is a 256-bit big-endian binary-encoded number,
+padded to a length of 32 bytes in HEX format.
+- A compressed 33-byte secp256k1 public key in HEX format.
+- A "did:key" generated from the public key.
+
+Example: generate a new identity:
+ defradb identity new
+
+`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ newIdentity, err := identity.Generate()
+ if err != nil {
+ return err
+ }
+
+ return writeJSON(cmd, newIdentity)
+ },
+ }
+
+ return cmd
+}
diff --git a/cli/identity_new_test.go b/cli/identity_new_test.go
new file mode 100644
index 0000000000..cb4367abe3
--- /dev/null
+++ b/cli/identity_new_test.go
@@ -0,0 +1,26 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewIdentityGeneration(t *testing.T) {
+ cmd := NewDefraCommand()
+
+ cmd.SetArgs([]string{"identity", "new"})
+
+ err := cmd.Execute()
+ require.NoError(t, err)
+}
diff --git a/cli/keyring.go b/cli/keyring.go
new file mode 100644
index 0000000000..a905ce190b
--- /dev/null
+++ b/cli/keyring.go
@@ -0,0 +1,39 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func MakeKeyringCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "keyring",
+ Short: "Manage DefraDB private keys",
+ Long: `Manage DefraDB private keys.
+Generate, import, and export private keys.
+
+The following keys are loaded from the keyring on start:
+ peer-key: Ed25519 private key (required)
+ encryption-key: AES-128, AES-192, or AES-256 key (optional)
+
+To randomly generate the required keys, run the following command:
+ defradb keyring generate
+
+To import externally generated keys, run the following command:
+ defradb keyring import
+
+To learn more about the available options:
+ defradb keyring --help
+`,
+ }
+ return cmd
+}
diff --git a/cli/keyring_export.go b/cli/keyring_export.go
new file mode 100644
index 0000000000..775672fc8a
--- /dev/null
+++ b/cli/keyring_export.go
@@ -0,0 +1,41 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+)
+
+func MakeKeyringExportCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "export ",
+ Short: "Export a private key",
+ Long: `Export a private key.
+Prints the hexadecimal representation of a private key.
+
+Example:
+ defradb keyring export encryption-key`,
+ Args: cobra.ExactArgs(1),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ keyring, err := openKeyring(cmd)
+ if err != nil {
+ return err
+ }
+ keyBytes, err := keyring.Get(args[0])
+ if err != nil {
+ return err
+ }
+ cmd.Printf("%x\n", keyBytes)
+ return nil
+ },
+ }
+ return cmd
+}
diff --git a/cli/keyring_export_test.go b/cli/keyring_export_test.go
new file mode 100644
index 0000000000..8631ff70ab
--- /dev/null
+++ b/cli/keyring_export_test.go
@@ -0,0 +1,51 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "bytes"
+ "encoding/hex"
+ "strings"
+ "testing"
+
+ "github.com/sourcenetwork/defradb/crypto"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestKeyringExport(t *testing.T) {
+ rootdir := t.TempDir()
+ readPassword = func(_ *cobra.Command, _ string) ([]byte, error) {
+ return []byte("secret"), nil
+ }
+
+ keyBytes, err := crypto.GenerateAES256()
+ require.NoError(t, err)
+ keyHex := hex.EncodeToString(keyBytes)
+
+ cmd := NewDefraCommand()
+ cmd.SetArgs([]string{"keyring", "import", "--rootdir", rootdir, encryptionKeyName, keyHex})
+
+ err = cmd.Execute()
+ require.NoError(t, err)
+
+ var output bytes.Buffer
+ cmd.SetOut(&output)
+ cmd.SetArgs([]string{"keyring", "export", "--rootdir", rootdir, encryptionKeyName})
+
+ err = cmd.Execute()
+ require.NoError(t, err)
+
+ actualKeyHex := strings.TrimSpace(output.String())
+ assert.Equal(t, keyHex, actualKeyHex)
+}
diff --git a/cli/keyring_generate.go b/cli/keyring_generate.go
new file mode 100644
index 0000000000..34209671a5
--- /dev/null
+++ b/cli/keyring_generate.go
@@ -0,0 +1,77 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/sourcenetwork/defradb/crypto"
+)
+
+func MakeKeyringGenerateCommand() *cobra.Command {
+ var noEncryptionKey bool
+ var noPeerKey bool
+ var cmd = &cobra.Command{
+ Use: "generate",
+ Short: "Generate private keys",
+ Long: `Generate private keys.
+Randomly generate and store private keys in the keyring.
+By default peer and encryption keys will be generated.
+
+WARNING: This will overwrite existing keys in the keyring.
+
+Example:
+ defradb keyring generate
+
+Example: with no encryption key
+ defradb keyring generate --no-encryption-key
+
+Example: with no peer key
+ defradb keyring generate --no-peer-key
+
+Example: with system keyring
+ defradb keyring generate --keyring-backend system`,
+ RunE: func(cmd *cobra.Command, args []string) error {
+ keyring, err := openKeyring(cmd)
+ if err != nil {
+ return err
+ }
+ if !noEncryptionKey {
+ encryptionKey, err := crypto.GenerateAES256()
+ if err != nil {
+ return err
+ }
+ err = keyring.Set(encryptionKeyName, encryptionKey)
+ if err != nil {
+ return err
+ }
+ log.Info("generated encryption key")
+ }
+ if !noPeerKey {
+ peerKey, err := crypto.GenerateEd25519()
+ if err != nil {
+ return err
+ }
+ err = keyring.Set(peerKeyName, peerKey)
+ if err != nil {
+ return err
+ }
+ log.Info("generated peer key")
+ }
+ return nil
+ },
+ }
+ cmd.Flags().BoolVar(&noEncryptionKey, "no-encryption-key", false,
+ "Skip generating an encryption key. Encryption at rest will be disabled")
+ cmd.Flags().BoolVar(&noPeerKey, "no-peer-key", false,
+ "Skip generating a peer key.")
+ return cmd
+}
diff --git a/cli/keyring_generate_test.go b/cli/keyring_generate_test.go
new file mode 100644
index 0000000000..b29446bd15
--- /dev/null
+++ b/cli/keyring_generate_test.go
@@ -0,0 +1,68 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "path/filepath"
+ "testing"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestKeyringGenerate(t *testing.T) {
+ rootdir := t.TempDir()
+ readPassword = func(_ *cobra.Command, _ string) ([]byte, error) {
+ return []byte("secret"), nil
+ }
+
+ cmd := NewDefraCommand()
+ cmd.SetArgs([]string{"keyring", "generate", "--rootdir", rootdir})
+
+ err := cmd.Execute()
+ require.NoError(t, err)
+
+ assert.FileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName))
+ assert.FileExists(t, filepath.Join(rootdir, "keys", peerKeyName))
+}
+
+func TestKeyringGenerateNoEncryptionKey(t *testing.T) {
+ rootdir := t.TempDir()
+ readPassword = func(_ *cobra.Command, _ string) ([]byte, error) {
+ return []byte("secret"), nil
+ }
+
+ cmd := NewDefraCommand()
+ cmd.SetArgs([]string{"keyring", "generate", "--no-encryption-key", "--rootdir", rootdir})
+
+ err := cmd.Execute()
+ require.NoError(t, err)
+
+ assert.NoFileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName))
+ assert.FileExists(t, filepath.Join(rootdir, "keys", peerKeyName))
+}
+
+func TestKeyringGenerateNoPeerKey(t *testing.T) {
+ rootdir := t.TempDir()
+ readPassword = func(_ *cobra.Command, _ string) ([]byte, error) {
+ return []byte("secret"), nil
+ }
+
+ cmd := NewDefraCommand()
+ cmd.SetArgs([]string{"keyring", "generate", "--no-peer-key", "--rootdir", rootdir})
+
+ err := cmd.Execute()
+ require.NoError(t, err)
+
+ assert.FileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName))
+ assert.NoFileExists(t, filepath.Join(rootdir, "keys", peerKeyName))
+}
diff --git a/cli/keyring_import.go b/cli/keyring_import.go
new file mode 100644
index 0000000000..61f80f12a1
--- /dev/null
+++ b/cli/keyring_import.go
@@ -0,0 +1,42 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "encoding/hex"
+
+ "github.com/spf13/cobra"
+)
+
+func MakeKeyringImportCommand() *cobra.Command {
+ var cmd = &cobra.Command{
+ Use: "import ",
+ Short: "Import a private key",
+ Long: `Import a private key.
+Store an externally generated key in the keyring.
+
+Example:
+ defradb keyring import encryption-key 0000000000000000`,
+ Args: cobra.ExactArgs(2),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ keyring, err := openKeyring(cmd)
+ if err != nil {
+ return err
+ }
+ keyBytes, err := hex.DecodeString(args[1])
+ if err != nil {
+ return err
+ }
+ return keyring.Set(args[0], keyBytes)
+ },
+ }
+ return cmd
+}
diff --git a/cli/keyring_import_test.go b/cli/keyring_import_test.go
new file mode 100644
index 0000000000..dac907e000
--- /dev/null
+++ b/cli/keyring_import_test.go
@@ -0,0 +1,42 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package cli
+
+import (
+ "encoding/hex"
+ "path/filepath"
+ "testing"
+
+ "github.com/sourcenetwork/defradb/crypto"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestKeyringImport(t *testing.T) {
+ rootdir := t.TempDir()
+ readPassword = func(_ *cobra.Command, _ string) ([]byte, error) {
+ return []byte("secret"), nil
+ }
+
+ keyBytes, err := crypto.GenerateAES256()
+ require.NoError(t, err)
+ keyHex := hex.EncodeToString(keyBytes)
+
+ cmd := NewDefraCommand()
+ cmd.SetArgs([]string{"keyring", "import", "--rootdir", rootdir, encryptionKeyName, keyHex})
+
+ err = cmd.Execute()
+ require.NoError(t, err)
+
+ assert.FileExists(t, filepath.Join(rootdir, "keys", encryptionKeyName))
+}
diff --git a/cli/request.go b/cli/request.go
index 3dba0c197d..b6ec8e05ce 100644
--- a/cli/request.go
+++ b/cli/request.go
@@ -38,7 +38,7 @@ Do a query request from a file by using the '-f' flag. Example command:
defradb client query -f request.graphql
Do a query request from a file and with an identity. Example command:
- defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql
+ defradb client query -i 028d53f37a19afb9a0dbc5b4be30c65731479ee8cfa0c9bc8f8bf198cc3c075f -f request.graphql
Or it can be sent via stdin by using the '-' special syntax. Example command:
cat request.graphql | defradb client query -
@@ -77,12 +77,12 @@ To learn more about the DefraDB GraphQL Query Language, refer to https://docs.so
for _, err := range result.GQL.Errors {
errors = append(errors, err.Error())
}
- if result.Pub == nil {
+ if result.Subscription == nil {
cmd.Print(REQ_RESULTS_HEADER)
return writeJSON(cmd, map[string]any{"data": result.GQL.Data, "errors": errors})
}
cmd.Print(SUB_RESULTS_HEADER)
- for item := range result.Pub.Stream() {
+ for item := range result.Subscription {
writeJSON(cmd, item) //nolint:errcheck
}
return nil
diff --git a/cli/root.go b/cli/root.go
index 8fc8baf628..51bf0e2ed6 100644
--- a/cli/root.go
+++ b/cli/root.go
@@ -30,114 +30,72 @@ Start a DefraDB node, interact with a local or remote node, and much more.
return setContextConfig(cmd)
},
}
-
+ // set default flag values from config
+ cfg := defaultConfig()
cmd.PersistentFlags().String(
"rootdir",
"",
"Directory for persistent data (default: $HOME/.defradb)",
)
-
cmd.PersistentFlags().String(
"log-level",
- "info",
+ cfg.GetString(configFlags["log-level"]),
"Log level to use. Options are debug, info, error, fatal",
)
-
cmd.PersistentFlags().String(
"log-output",
- "stderr",
+ cfg.GetString(configFlags["log-output"]),
"Log output path. Options are stderr or stdout.",
)
-
cmd.PersistentFlags().String(
"log-format",
- "text",
+ cfg.GetString(configFlags["log-format"]),
"Log format to use. Options are text or json",
)
-
cmd.PersistentFlags().Bool(
"log-stacktrace",
- false,
+ cfg.GetBool(configFlags["log-stacktrace"]),
"Include stacktrace in error and fatal logs",
)
-
cmd.PersistentFlags().Bool(
"log-source",
- false,
+ cfg.GetBool(configFlags["log-source"]),
"Include source location in logs",
)
-
cmd.PersistentFlags().String(
"log-overrides",
- "",
+ cfg.GetString(configFlags["log-overrides"]),
"Logger config overrides. Format ,=,...;,...",
)
-
cmd.PersistentFlags().Bool(
- "log-no-color",
- false,
+ "no-log-color",
+ cfg.GetBool(configFlags["no-log-color"]),
"Disable colored log output",
)
-
cmd.PersistentFlags().String(
"url",
- "127.0.0.1:9181",
+ cfg.GetString(configFlags["url"]),
"URL of HTTP endpoint to listen on or connect to",
)
-
- cmd.PersistentFlags().StringArray(
- "peers",
- []string{},
- "List of peers to connect to",
- )
-
- cmd.PersistentFlags().Int(
- "max-txn-retries",
- 5,
- "Specify the maximum number of retries per transaction",
- )
-
cmd.PersistentFlags().String(
- "store",
- "badger",
- "Specify the datastore to use (supported: badger, memory)",
+ "keyring-namespace",
+ cfg.GetString(configFlags["keyring-namespace"]),
+ "Service name to use when using the system backend",
)
-
- cmd.PersistentFlags().Int(
- "valuelogfilesize",
- 1<<30,
- "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize",
- )
-
- cmd.PersistentFlags().StringSlice(
- "p2paddr",
- []string{"/ip4/127.0.0.1/tcp/9171"},
- "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)",
- )
-
- cmd.PersistentFlags().Bool(
- "no-p2p",
- false,
- "Disable the peer-to-peer network synchronization system",
- )
-
- cmd.PersistentFlags().StringArray(
- "allowed-origins",
- []string{},
- "List of origins to allow for CORS requests",
- )
-
cmd.PersistentFlags().String(
- "pubkeypath",
- "",
- "Path to the public key for tls",
+ "keyring-backend",
+ cfg.GetString(configFlags["keyring-backend"]),
+ "Keyring backend to use. Options are file or system",
)
-
cmd.PersistentFlags().String(
- "privkeypath",
- "",
- "Path to the private key for tls",
+ "keyring-path",
+ cfg.GetString(configFlags["keyring-path"]),
+ "Path to store encrypted keys when using the file backend",
+ )
+ cmd.PersistentFlags().Bool(
+ "no-keyring",
+ cfg.GetBool(configFlags["no-keyring"]),
+ "Disable the keyring and generate ephemeral keys",
)
-
return cmd
}
diff --git a/cli/server_dump.go b/cli/server_dump.go
index 767b86f364..9008c81730 100644
--- a/cli/server_dump.go
+++ b/cli/server_dump.go
@@ -13,8 +13,9 @@ package cli
import (
"github.com/spf13/cobra"
- "github.com/sourcenetwork/defradb/db"
+ "github.com/sourcenetwork/defradb/acp"
"github.com/sourcenetwork/defradb/errors"
+ "github.com/sourcenetwork/defradb/internal/db"
"github.com/sourcenetwork/defradb/node"
)
@@ -32,11 +33,11 @@ func MakeServerDumpCmd() *cobra.Command {
storeOpts := []node.StoreOpt{
node.WithPath(cfg.GetString("datastore.badger.path")),
}
- rootstore, err := node.NewStore(storeOpts...)
+ rootstore, err := node.NewStore(cmd.Context(), storeOpts...)
if err != nil {
return err
}
- db, err := db.NewDB(cmd.Context(), rootstore)
+ db, err := db.NewDB(cmd.Context(), rootstore, acp.NoACP, nil)
if err != nil {
return errors.Wrap("failed to initialize database", err)
}
diff --git a/cli/start.go b/cli/start.go
index ca9267e7e9..970b857aa0 100644
--- a/cli/start.go
+++ b/cli/start.go
@@ -14,15 +14,15 @@ import (
"fmt"
"os"
"os/signal"
- "path/filepath"
"syscall"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/spf13/cobra"
- "github.com/sourcenetwork/defradb/db"
"github.com/sourcenetwork/defradb/errors"
"github.com/sourcenetwork/defradb/http"
+ "github.com/sourcenetwork/defradb/internal/db"
+ "github.com/sourcenetwork/defradb/keyring"
"github.com/sourcenetwork/defradb/net"
netutils "github.com/sourcenetwork/defradb/net/utils"
"github.com/sourcenetwork/defradb/node"
@@ -39,7 +39,7 @@ func MakeStartCommand() *cobra.Command {
return err
}
rootdir := mustGetContextRootDir(cmd)
- if err := createConfig(rootdir, cmd.Root().PersistentFlags()); err != nil {
+ if err := createConfig(rootdir, cmd.Flags()); err != nil {
return err
}
return setContextConfig(cmd)
@@ -47,67 +47,60 @@ func MakeStartCommand() *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
cfg := mustGetContextConfig(cmd)
- dbOpts := []db.Option{
- db.WithUpdateEvents(),
- db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")),
- // TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in,
- // we can allow starting of db without acp. Currently that can only be done programmatically.
- // https://github.com/sourcenetwork/defradb/issues/2271
- db.WithACPInMemory(),
+ var peers []peer.AddrInfo
+ if val := cfg.GetStringSlice("net.peers"); len(val) > 0 {
+ addrs, err := netutils.ParsePeers(val)
+ if err != nil {
+ return errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %s", val), err)
+ }
+ peers = addrs
}
- netOpts := []net.NodeOpt{
+ opts := []node.Option{
+ node.WithPath(cfg.GetString("datastore.badger.path")),
+ node.WithInMemory(cfg.GetString("datastore.store") == configStoreMemory),
+ node.WithDisableP2P(cfg.GetBool("net.p2pDisabled")),
+ node.WithACPType(node.LocalACPType),
+ node.WithPeers(peers...),
+ // db options
+ db.WithMaxRetries(cfg.GetInt("datastore.MaxTxnRetries")),
+ // net node options
net.WithListenAddresses(cfg.GetStringSlice("net.p2pAddresses")...),
net.WithEnablePubSub(cfg.GetBool("net.pubSubEnabled")),
net.WithEnableRelay(cfg.GetBool("net.relayEnabled")),
- }
-
- serverOpts := []http.ServerOpt{
+ // http server options
http.WithAddress(cfg.GetString("api.address")),
http.WithAllowedOrigins(cfg.GetStringSlice("api.allowed-origins")...),
http.WithTLSCertPath(cfg.GetString("api.pubKeyPath")),
http.WithTLSKeyPath(cfg.GetString("api.privKeyPath")),
- }
-
- storeOpts := []node.StoreOpt{
- node.WithPath(cfg.GetString("datastore.badger.path")),
- node.WithInMemory(cfg.GetString("datastore.store") == configStoreMemory),
- }
-
- var peers []peer.AddrInfo
- if val := cfg.GetStringSlice("net.peers"); len(val) > 0 {
- addrs, err := netutils.ParsePeers(val)
- if err != nil {
- return errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %s", val), err)
- }
- peers = addrs
+ node.WithLensRuntime(node.LensRuntimeType(cfg.GetString("lens.runtime"))),
}
if cfg.GetString("datastore.store") != configStoreMemory {
- // It would be ideal to not have the key path tied to the datastore.
- // Running with memory store mode will always generate a random key.
- // Adding support for an ephemeral mode and moving the key to the
- // config would solve both of these issues.
rootDir := mustGetContextRootDir(cmd)
- key, err := loadOrGeneratePrivateKey(filepath.Join(rootDir, "data", "key"))
- if err != nil {
- return err
- }
- netOpts = append(netOpts, net.WithPrivateKey(key))
-
// TODO-ACP: Infuture when we add support for the --no-acp flag when admin signatures are in,
// we can allow starting of db without acp. Currently that can only be done programmatically.
// https://github.com/sourcenetwork/defradb/issues/2271
- dbOpts = append(dbOpts, db.WithACP(rootDir))
+ opts = append(opts, node.WithACPPath(rootDir))
}
- opts := []node.NodeOpt{
- node.WithPeers(peers...),
- node.WithStoreOpts(storeOpts...),
- node.WithDatabaseOpts(dbOpts...),
- node.WithNetOpts(netOpts...),
- node.WithServerOpts(serverOpts...),
- node.WithDisableP2P(cfg.GetBool("net.p2pDisabled")),
+ if !cfg.GetBool("keyring.disabled") {
+ kr, err := openKeyring(cmd)
+ if err != nil {
+ return NewErrKeyringHelp(err)
+ }
+ // load the required peer key
+ peerKey, err := kr.Get(peerKeyName)
+ if err != nil {
+ return NewErrKeyringHelp(err)
+ }
+ opts = append(opts, net.WithPrivateKey(peerKey))
+ // load the optional encryption key
+ encryptionKey, err := kr.Get(encryptionKeyName)
+ if err != nil && !errors.Is(err, keyring.ErrNotFound) {
+ return err
+ }
+ opts = append(opts, node.WithEncryptionKey(encryptionKey))
}
n, err := node.NewNode(cmd.Context(), opts...)
@@ -139,6 +132,52 @@ func MakeStartCommand() *cobra.Command {
return nil
},
}
-
+ // set default flag values from config
+ cfg := defaultConfig()
+ cmd.PersistentFlags().StringArray(
+ "peers",
+ cfg.GetStringSlice(configFlags["peers"]),
+ "List of peers to connect to",
+ )
+ cmd.PersistentFlags().Int(
+ "max-txn-retries",
+ cfg.GetInt(configFlags["max-txn-retries"]),
+ "Specify the maximum number of retries per transaction",
+ )
+ cmd.PersistentFlags().String(
+ "store",
+ cfg.GetString(configFlags["store"]),
+ "Specify the datastore to use (supported: badger, memory)",
+ )
+ cmd.PersistentFlags().Int(
+ "valuelogfilesize",
+ cfg.GetInt(configFlags["valuelogfilesize"]),
+ "Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize",
+ )
+ cmd.PersistentFlags().StringSlice(
+ "p2paddr",
+ cfg.GetStringSlice(configFlags["p2paddr"]),
+ "Listen addresses for the p2p network (formatted as a libp2p MultiAddr)",
+ )
+ cmd.PersistentFlags().Bool(
+ "no-p2p",
+ cfg.GetBool(configFlags["no-p2p"]),
+ "Disable the peer-to-peer network synchronization system",
+ )
+ cmd.PersistentFlags().StringArray(
+ "allowed-origins",
+ cfg.GetStringSlice(configFlags["allowed-origins"]),
+ "List of origins to allow for CORS requests",
+ )
+ cmd.PersistentFlags().String(
+ "pubkeypath",
+ cfg.GetString(configFlags["pubkeypath"]),
+ "Path to the public key for tls",
+ )
+ cmd.PersistentFlags().String(
+ "privkeypath",
+ cfg.GetString(configFlags["privkeypath"]),
+ "Path to the private key for tls",
+ )
return cmd
}
diff --git a/cli/utils.go b/cli/utils.go
index 25af57528b..d1ee09962b 100644
--- a/cli/utils.go
+++ b/cli/utils.go
@@ -12,18 +12,27 @@ package cli
import (
"context"
+ "encoding/hex"
"encoding/json"
"os"
"path/filepath"
+ "syscall"
- "github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
"github.com/spf13/cobra"
"github.com/spf13/viper"
+ "golang.org/x/term"
acpIdentity "github.com/sourcenetwork/defradb/acp/identity"
"github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/db"
"github.com/sourcenetwork/defradb/http"
+ "github.com/sourcenetwork/defradb/internal/db"
+ "github.com/sourcenetwork/defradb/keyring"
+)
+
+const (
+ peerKeyName = "peer-key"
+ encryptionKeyName = "encryption-key"
)
type contextKey string
@@ -42,6 +51,14 @@ var (
colContextKey = contextKey("col")
)
+// readPassword reads a user input password without echoing it to the terminal.
+var readPassword = func(cmd *cobra.Command, msg string) ([]byte, error) {
+ cmd.Print(msg)
+ pass, err := term.ReadPassword(int(syscall.Stdin))
+ cmd.Println("")
+ return pass, err
+}
+
// mustGetContextDB returns the db for the current command context.
//
// If a db is not set in the current context this function panics.
@@ -99,8 +116,7 @@ func setContextDB(cmd *cobra.Command) error {
// setContextConfig sets teh config for the current command context.
func setContextConfig(cmd *cobra.Command) error {
rootdir := mustGetContextRootDir(cmd)
- flags := cmd.Root().PersistentFlags()
- cfg, err := loadConfig(rootdir, flags)
+ cfg, err := loadConfig(rootdir, cmd.Flags())
if err != nil {
return err
}
@@ -125,12 +141,21 @@ func setContextTransaction(cmd *cobra.Command, txId uint64) error {
}
// setContextIdentity sets the identity for the current command context.
-func setContextIdentity(cmd *cobra.Command, identity string) error {
- // TODO-ACP: `https://github.com/sourcenetwork/defradb/issues/2358` do the validation here.
- if identity == "" {
+func setContextIdentity(cmd *cobra.Command, privateKeyHex string) error {
+ if privateKeyHex == "" {
return nil
}
- ctx := db.SetContextIdentity(cmd.Context(), acpIdentity.New(identity))
+ data, err := hex.DecodeString(privateKeyHex)
+ if err != nil {
+ return err
+ }
+ privKey := secp256k1.PrivKeyFromBytes(data)
+ identity, err := acpIdentity.FromPrivateKey(privKey)
+ if err != nil {
+ return err
+ }
+
+ ctx := db.SetContextIdentity(cmd.Context(), identity)
cmd.SetContext(ctx)
return nil
}
@@ -153,44 +178,24 @@ func setContextRootDir(cmd *cobra.Command) error {
return nil
}
-// loadOrGeneratePrivateKey loads the private key from the given path
-// or generates a new key and writes it to a file at the given path.
-func loadOrGeneratePrivateKey(path string) (crypto.PrivKey, error) {
- key, err := loadPrivateKey(path)
- if err == nil {
- return key, nil
- }
- if os.IsNotExist(err) {
- return generatePrivateKey(path)
- }
- return nil, err
-}
-
-// generatePrivateKey generates a new private key and writes it
-// to a file at the given path.
-func generatePrivateKey(path string) (crypto.PrivKey, error) {
- key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
- if err != nil {
- return nil, err
- }
- data, err := crypto.MarshalPrivateKey(key)
- if err != nil {
- return nil, err
+// openKeyring opens the keyring for the current environment.
+func openKeyring(cmd *cobra.Command) (keyring.Keyring, error) {
+ cfg := mustGetContextConfig(cmd)
+ backend := cfg.Get("keyring.backend")
+ if backend == "system" {
+ return keyring.OpenSystemKeyring(cfg.GetString("keyring.namespace")), nil
}
- err = os.MkdirAll(filepath.Dir(path), 0755)
- if err != nil {
- return nil, err
+ if backend != "file" {
+ log.Info("keyring defaulted to file backend")
}
- return key, os.WriteFile(path, data, 0644)
-}
-
-// loadPrivateKey reads the private key from the file at the given path.
-func loadPrivateKey(path string) (crypto.PrivKey, error) {
- data, err := os.ReadFile(path)
- if err != nil {
+ path := cfg.GetString("keyring.path")
+ if err := os.MkdirAll(path, 0755); err != nil {
return nil, err
}
- return crypto.UnmarshalPrivateKey(data)
+ prompt := keyring.PromptFunc(func(s string) ([]byte, error) {
+ return readPassword(cmd, s)
+ })
+ return keyring.OpenFileKeyring(path, prompt)
}
func writeJSON(cmd *cobra.Command, out any) error {
diff --git a/client/collection.go b/client/collection.go
index 38c309a0e8..b557e2e335 100644
--- a/client/collection.go
+++ b/client/collection.go
@@ -122,21 +122,6 @@ type Collection interface {
// GetIndexes returns all the indexes that exist on the collection.
GetIndexes(ctx context.Context) ([]IndexDescription, error)
-
- // CreateDocIndex creates an index for the given document.
- // WARNING: This method is only for internal use and is not supposed to be called by the client
- // as it might compromise the integrity of the database. This method will be removed in the future
- CreateDocIndex(context.Context, *Document) error
-
- // UpdateDocIndex updates the index for the given document.
- // WARNING: This method is only for internal use and is not supposed to be called by the client
- // as it might compromise the integrity of the database. This method will be removed in the future
- UpdateDocIndex(ctx context.Context, oldDoc, newDoc *Document) error
-
- // DeleteDocIndex deletes the index for the given document.
- // WARNING: This method is only for internal use and is not supposed to be called by the client
- // as it might compromise the integrity of the database. This method will be removed in the future
- DeleteDocIndex(context.Context, *Document) error
}
// DocIDResult wraps the result of an attempt at a DocID retrieval operation.
diff --git a/client/db.go b/client/db.go
index c5cb95eb4b..e52dfed60a 100644
--- a/client/db.go
+++ b/client/db.go
@@ -13,12 +13,12 @@ package client
import (
"context"
- blockstore "github.com/ipfs/boxo/blockstore"
+ ds "github.com/ipfs/go-datastore"
"github.com/lens-vm/lens/host-go/config/model"
"github.com/sourcenetwork/immutable"
"github.com/sourcenetwork/defradb/datastore"
- "github.com/sourcenetwork/defradb/events"
+ "github.com/sourcenetwork/defradb/event"
)
type CollectionName = string
@@ -48,13 +48,18 @@ type DB interface {
// Blockstore returns the blockstore, within which all blocks (commits) managed by DefraDB are held.
//
// It sits within the rootstore returned by [Root].
- Blockstore() blockstore.Blockstore
+ Blockstore() datastore.DAGStore
// Peerstore returns the peerstore where known host information is stored.
//
// It sits within the rootstore returned by [Root].
Peerstore() datastore.DSBatching
+ // Headstore returns the headstore where the current heads of the database are stored.
+ //
+ // It is read-only and sits within the rootstore returned by [Root].
+ Headstore() ds.Read
+
// Close closes the database instance and releases any resources held.
//
// The behaviour of other functions in this package after this function has been called is undefined
@@ -70,7 +75,7 @@ type DB interface {
//
// It may be used to monitor database events - a new event will be yielded for each mutation.
// Note: it does not copy the queue, just the reference to it.
- Events() events.Events
+ Events() *event.Bus
// MaxTxnRetries returns the number of retries that this DefraDB instance has been configured to
// make in the event of a transaction conflict in certain scenarios.
@@ -260,9 +265,9 @@ type RequestResult struct {
// GQL contains the immediate results of the GQL request.
GQL GQLResult
- // Pub contains a pointer to an event stream which channels any subscription results
- // if the request was a GQL subscription.
- Pub *events.Publisher[events.Update]
+ // Subscription is an optional channel which returns results
+ // from a subscription request.
+ Subscription <-chan GQLResult
}
// CollectionFetchOptions represents a set of options used for fetching collections.
diff --git a/client/document.go b/client/document.go
index 4534e9fa33..ada47cc8f9 100644
--- a/client/document.go
+++ b/client/document.go
@@ -24,7 +24,7 @@ import (
"github.com/valyala/fastjson"
"github.com/sourcenetwork/defradb/client/request"
- ccid "github.com/sourcenetwork/defradb/core/cid"
+ ccid "github.com/sourcenetwork/defradb/internal/core/cid"
)
// This is the main implementation starting point for accessing the internal Document API
@@ -777,6 +777,11 @@ func (doc *Document) GenerateDocID() (DocID, error) {
return DocID{}, err
}
+ // The DocID must take into consideration the schema root, this ensures that
+ // otherwise identical documents created using different schema will have different
+ // document IDs - we do not want cross-schema docID collisions.
+ bytes = append(bytes, []byte(doc.collectionDefinition.Schema.Root)...)
+
cid, err := ccid.NewSHA256CidV1(bytes)
if err != nil {
return DocID{}, err
diff --git a/client/document_test.go b/client/document_test.go
index a70e868e0e..b15c7b019a 100644
--- a/client/document_test.go
+++ b/client/document_test.go
@@ -18,7 +18,7 @@ import (
"github.com/sourcenetwork/immutable"
- ccid "github.com/sourcenetwork/defradb/core/cid"
+ ccid "github.com/sourcenetwork/defradb/internal/core/cid"
)
var (
diff --git a/client/lens.go b/client/lens.go
index 3f5befc604..997ddb4831 100644
--- a/client/lens.go
+++ b/client/lens.go
@@ -15,6 +15,8 @@ import (
"github.com/lens-vm/lens/host-go/config/model"
"github.com/sourcenetwork/immutable/enumerable"
+
+ "github.com/sourcenetwork/defradb/datastore"
)
// LensConfig represents the configuration of a Lens migration in Defra.
@@ -38,9 +40,18 @@ type LensConfig struct {
model.Lens
}
+// TxnSource represents an object capable of constructing the transactions that
+// implicit-transaction registries need internally.
+type TxnSource interface {
+ NewTxn(context.Context, bool) (datastore.Txn, error)
+}
+
// LensRegistry exposes several useful thread-safe migration related functions which may
// be used to manage migrations.
type LensRegistry interface {
+ // Init initializes the registry with the provided transaction source.
+ Init(TxnSource)
+
// SetMigration caches the migration for the given collection ID. It does not persist the migration in long
// term storage, for that one should call [Store.SetMigration(ctx, cfg)].
//
diff --git a/client/mocks/collection.go b/client/mocks/collection.go
index 7c227edd2b..3b80849661 100644
--- a/client/mocks/collection.go
+++ b/client/mocks/collection.go
@@ -29,6 +29,10 @@ func (_m *Collection) EXPECT() *Collection_Expecter {
func (_m *Collection) Create(ctx context.Context, doc *client.Document) error {
ret := _m.Called(ctx, doc)
+ if len(ret) == 0 {
+ panic("no return value specified for Create")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
r0 = rf(ctx, doc)
@@ -68,53 +72,14 @@ func (_c *Collection_Create_Call) RunAndReturn(run func(context.Context, *client
return _c
}
-// CreateDocIndex provides a mock function with given fields: _a0, _a1
-func (_m *Collection) CreateDocIndex(_a0 context.Context, _a1 *client.Document) error {
- ret := _m.Called(_a0, _a1)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
- r0 = rf(_a0, _a1)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// Collection_CreateDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateDocIndex'
-type Collection_CreateDocIndex_Call struct {
- *mock.Call
-}
-
-// CreateDocIndex is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 *client.Document
-func (_e *Collection_Expecter) CreateDocIndex(_a0 interface{}, _a1 interface{}) *Collection_CreateDocIndex_Call {
- return &Collection_CreateDocIndex_Call{Call: _e.mock.On("CreateDocIndex", _a0, _a1)}
-}
-
-func (_c *Collection_CreateDocIndex_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_CreateDocIndex_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(*client.Document))
- })
- return _c
-}
-
-func (_c *Collection_CreateDocIndex_Call) Return(_a0 error) *Collection_CreateDocIndex_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *Collection_CreateDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_CreateDocIndex_Call {
- _c.Call.Return(run)
- return _c
-}
-
// CreateIndex provides a mock function with given fields: _a0, _a1
func (_m *Collection) CreateIndex(_a0 context.Context, _a1 client.IndexDescription) (client.IndexDescription, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateIndex")
+ }
+
var r0 client.IndexDescription
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.IndexDescription) (client.IndexDescription, error)); ok {
@@ -168,6 +133,10 @@ func (_c *Collection_CreateIndex_Call) RunAndReturn(run func(context.Context, cl
func (_m *Collection) CreateMany(ctx context.Context, docs []*client.Document) error {
ret := _m.Called(ctx, docs)
+ if len(ret) == 0 {
+ panic("no return value specified for CreateMany")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, []*client.Document) error); ok {
r0 = rf(ctx, docs)
@@ -211,6 +180,10 @@ func (_c *Collection_CreateMany_Call) RunAndReturn(run func(context.Context, []*
func (_m *Collection) Definition() client.CollectionDefinition {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Definition")
+ }
+
var r0 client.CollectionDefinition
if rf, ok := ret.Get(0).(func() client.CollectionDefinition); ok {
r0 = rf()
@@ -252,6 +225,10 @@ func (_c *Collection_Definition_Call) RunAndReturn(run func() client.CollectionD
func (_m *Collection) Delete(ctx context.Context, docID client.DocID) (bool, error) {
ret := _m.Called(ctx, docID)
+ if len(ret) == 0 {
+ panic("no return value specified for Delete")
+ }
+
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (bool, error)); ok {
@@ -301,53 +278,14 @@ func (_c *Collection_Delete_Call) RunAndReturn(run func(context.Context, client.
return _c
}
-// DeleteDocIndex provides a mock function with given fields: _a0, _a1
-func (_m *Collection) DeleteDocIndex(_a0 context.Context, _a1 *client.Document) error {
- ret := _m.Called(_a0, _a1)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
- r0 = rf(_a0, _a1)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// Collection_DeleteDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteDocIndex'
-type Collection_DeleteDocIndex_Call struct {
- *mock.Call
-}
-
-// DeleteDocIndex is a helper method to define mock.On call
-// - _a0 context.Context
-// - _a1 *client.Document
-func (_e *Collection_Expecter) DeleteDocIndex(_a0 interface{}, _a1 interface{}) *Collection_DeleteDocIndex_Call {
- return &Collection_DeleteDocIndex_Call{Call: _e.mock.On("DeleteDocIndex", _a0, _a1)}
-}
-
-func (_c *Collection_DeleteDocIndex_Call) Run(run func(_a0 context.Context, _a1 *client.Document)) *Collection_DeleteDocIndex_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(*client.Document))
- })
- return _c
-}
-
-func (_c *Collection_DeleteDocIndex_Call) Return(_a0 error) *Collection_DeleteDocIndex_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *Collection_DeleteDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document) error) *Collection_DeleteDocIndex_Call {
- _c.Call.Return(run)
- return _c
-}
-
// DeleteWithFilter provides a mock function with given fields: ctx, filter
func (_m *Collection) DeleteWithFilter(ctx context.Context, filter interface{}) (*client.DeleteResult, error) {
ret := _m.Called(ctx, filter)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteWithFilter")
+ }
+
var r0 *client.DeleteResult
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, interface{}) (*client.DeleteResult, error)); ok {
@@ -403,6 +341,10 @@ func (_c *Collection_DeleteWithFilter_Call) RunAndReturn(run func(context.Contex
func (_m *Collection) Description() client.CollectionDescription {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Description")
+ }
+
var r0 client.CollectionDescription
if rf, ok := ret.Get(0).(func() client.CollectionDescription); ok {
r0 = rf()
@@ -444,6 +386,10 @@ func (_c *Collection_Description_Call) RunAndReturn(run func() client.Collection
func (_m *Collection) DropIndex(ctx context.Context, indexName string) error {
ret := _m.Called(ctx, indexName)
+ if len(ret) == 0 {
+ panic("no return value specified for DropIndex")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, indexName)
@@ -487,6 +433,10 @@ func (_c *Collection_DropIndex_Call) RunAndReturn(run func(context.Context, stri
func (_m *Collection) Exists(ctx context.Context, docID client.DocID) (bool, error) {
ret := _m.Called(ctx, docID)
+ if len(ret) == 0 {
+ panic("no return value specified for Exists")
+ }
+
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.DocID) (bool, error)); ok {
@@ -540,6 +490,10 @@ func (_c *Collection_Exists_Call) RunAndReturn(run func(context.Context, client.
func (_m *Collection) Get(ctx context.Context, docID client.DocID, showDeleted bool) (*client.Document, error) {
ret := _m.Called(ctx, docID, showDeleted)
+ if len(ret) == 0 {
+ panic("no return value specified for Get")
+ }
+
var r0 *client.Document
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.DocID, bool) (*client.Document, error)); ok {
@@ -596,6 +550,10 @@ func (_c *Collection_Get_Call) RunAndReturn(run func(context.Context, client.Doc
func (_m *Collection) GetAllDocIDs(ctx context.Context) (<-chan client.DocIDResult, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for GetAllDocIDs")
+ }
+
var r0 <-chan client.DocIDResult
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (<-chan client.DocIDResult, error)); ok {
@@ -650,6 +608,10 @@ func (_c *Collection_GetAllDocIDs_Call) RunAndReturn(run func(context.Context) (
func (_m *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for GetIndexes")
+ }
+
var r0 []client.IndexDescription
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) ([]client.IndexDescription, error)); ok {
@@ -704,6 +666,10 @@ func (_c *Collection_GetIndexes_Call) RunAndReturn(run func(context.Context) ([]
func (_m *Collection) ID() uint32 {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for ID")
+ }
+
var r0 uint32
if rf, ok := ret.Get(0).(func() uint32); ok {
r0 = rf()
@@ -745,6 +711,10 @@ func (_c *Collection_ID_Call) RunAndReturn(run func() uint32) *Collection_ID_Cal
func (_m *Collection) Name() immutable.Option[string] {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Name")
+ }
+
var r0 immutable.Option[string]
if rf, ok := ret.Get(0).(func() immutable.Option[string]); ok {
r0 = rf()
@@ -786,6 +756,10 @@ func (_c *Collection_Name_Call) RunAndReturn(run func() immutable.Option[string]
func (_m *Collection) Save(ctx context.Context, doc *client.Document) error {
ret := _m.Called(ctx, doc)
+ if len(ret) == 0 {
+ panic("no return value specified for Save")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
r0 = rf(ctx, doc)
@@ -829,6 +803,10 @@ func (_c *Collection_Save_Call) RunAndReturn(run func(context.Context, *client.D
func (_m *Collection) Schema() client.SchemaDescription {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Schema")
+ }
+
var r0 client.SchemaDescription
if rf, ok := ret.Get(0).(func() client.SchemaDescription); ok {
r0 = rf()
@@ -870,6 +848,10 @@ func (_c *Collection_Schema_Call) RunAndReturn(run func() client.SchemaDescripti
func (_m *Collection) SchemaRoot() string {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for SchemaRoot")
+ }
+
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
@@ -911,6 +893,10 @@ func (_c *Collection_SchemaRoot_Call) RunAndReturn(run func() string) *Collectio
func (_m *Collection) Update(ctx context.Context, docs *client.Document) error {
ret := _m.Called(ctx, docs)
+ if len(ret) == 0 {
+ panic("no return value specified for Update")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.Document) error); ok {
r0 = rf(ctx, docs)
@@ -950,54 +936,14 @@ func (_c *Collection_Update_Call) RunAndReturn(run func(context.Context, *client
return _c
}
-// UpdateDocIndex provides a mock function with given fields: ctx, oldDoc, newDoc
-func (_m *Collection) UpdateDocIndex(ctx context.Context, oldDoc *client.Document, newDoc *client.Document) error {
- ret := _m.Called(ctx, oldDoc, newDoc)
-
- var r0 error
- if rf, ok := ret.Get(0).(func(context.Context, *client.Document, *client.Document) error); ok {
- r0 = rf(ctx, oldDoc, newDoc)
- } else {
- r0 = ret.Error(0)
- }
-
- return r0
-}
-
-// Collection_UpdateDocIndex_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateDocIndex'
-type Collection_UpdateDocIndex_Call struct {
- *mock.Call
-}
-
-// UpdateDocIndex is a helper method to define mock.On call
-// - ctx context.Context
-// - oldDoc *client.Document
-// - newDoc *client.Document
-func (_e *Collection_Expecter) UpdateDocIndex(ctx interface{}, oldDoc interface{}, newDoc interface{}) *Collection_UpdateDocIndex_Call {
- return &Collection_UpdateDocIndex_Call{Call: _e.mock.On("UpdateDocIndex", ctx, oldDoc, newDoc)}
-}
-
-func (_c *Collection_UpdateDocIndex_Call) Run(run func(ctx context.Context, oldDoc *client.Document, newDoc *client.Document)) *Collection_UpdateDocIndex_Call {
- _c.Call.Run(func(args mock.Arguments) {
- run(args[0].(context.Context), args[1].(*client.Document), args[2].(*client.Document))
- })
- return _c
-}
-
-func (_c *Collection_UpdateDocIndex_Call) Return(_a0 error) *Collection_UpdateDocIndex_Call {
- _c.Call.Return(_a0)
- return _c
-}
-
-func (_c *Collection_UpdateDocIndex_Call) RunAndReturn(run func(context.Context, *client.Document, *client.Document) error) *Collection_UpdateDocIndex_Call {
- _c.Call.Return(run)
- return _c
-}
-
// UpdateWithFilter provides a mock function with given fields: ctx, filter, updater
func (_m *Collection) UpdateWithFilter(ctx context.Context, filter interface{}, updater string) (*client.UpdateResult, error) {
ret := _m.Called(ctx, filter, updater)
+ if len(ret) == 0 {
+ panic("no return value specified for UpdateWithFilter")
+ }
+
var r0 *client.UpdateResult
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, interface{}, string) (*client.UpdateResult, error)); ok {
diff --git a/client/mocks/db.go b/client/mocks/db.go
index 20b5988fe7..396bc5397c 100644
--- a/client/mocks/db.go
+++ b/client/mocks/db.go
@@ -3,14 +3,15 @@
package mocks
import (
- blockstore "github.com/ipfs/boxo/blockstore"
- client "github.com/sourcenetwork/defradb/client"
-
context "context"
+ client "github.com/sourcenetwork/defradb/client"
+
datastore "github.com/sourcenetwork/defradb/datastore"
- events "github.com/sourcenetwork/defradb/events"
+ event "github.com/sourcenetwork/defradb/event"
+
+ go_datastore "github.com/ipfs/go-datastore"
immutable "github.com/sourcenetwork/immutable"
@@ -36,6 +37,10 @@ func (_m *DB) EXPECT() *DB_Expecter {
func (_m *DB) AddPolicy(ctx context.Context, policy string) (client.AddPolicyResult, error) {
ret := _m.Called(ctx, policy)
+ if len(ret) == 0 {
+ panic("no return value specified for AddPolicy")
+ }
+
var r0 client.AddPolicyResult
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (client.AddPolicyResult, error)); ok {
@@ -89,6 +94,10 @@ func (_c *DB_AddPolicy_Call) RunAndReturn(run func(context.Context, string) (cli
func (_m *DB) AddSchema(_a0 context.Context, _a1 string) ([]client.CollectionDescription, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for AddSchema")
+ }
+
var r0 []client.CollectionDescription
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.CollectionDescription, error)); ok {
@@ -144,6 +153,10 @@ func (_c *DB_AddSchema_Call) RunAndReturn(run func(context.Context, string) ([]c
func (_m *DB) AddView(ctx context.Context, gqlQuery string, sdl string, transform immutable.Option[model.Lens]) ([]client.CollectionDefinition, error) {
ret := _m.Called(ctx, gqlQuery, sdl, transform)
+ if len(ret) == 0 {
+ panic("no return value specified for AddView")
+ }
+
var r0 []client.CollectionDefinition
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string, string, immutable.Option[model.Lens]) ([]client.CollectionDefinition, error)); ok {
@@ -201,6 +214,10 @@ func (_c *DB_AddView_Call) RunAndReturn(run func(context.Context, string, string
func (_m *DB) BasicExport(ctx context.Context, config *client.BackupConfig) error {
ret := _m.Called(ctx, config)
+ if len(ret) == 0 {
+ panic("no return value specified for BasicExport")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, *client.BackupConfig) error); ok {
r0 = rf(ctx, config)
@@ -244,6 +261,10 @@ func (_c *DB_BasicExport_Call) RunAndReturn(run func(context.Context, *client.Ba
func (_m *DB) BasicImport(ctx context.Context, filepath string) error {
ret := _m.Called(ctx, filepath)
+ if len(ret) == 0 {
+ panic("no return value specified for BasicImport")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(ctx, filepath)
@@ -284,15 +305,19 @@ func (_c *DB_BasicImport_Call) RunAndReturn(run func(context.Context, string) er
}
// Blockstore provides a mock function with given fields:
-func (_m *DB) Blockstore() blockstore.Blockstore {
+func (_m *DB) Blockstore() datastore.DAGStore {
ret := _m.Called()
- var r0 blockstore.Blockstore
- if rf, ok := ret.Get(0).(func() blockstore.Blockstore); ok {
+ if len(ret) == 0 {
+ panic("no return value specified for Blockstore")
+ }
+
+ var r0 datastore.DAGStore
+ if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok {
r0 = rf()
} else {
if ret.Get(0) != nil {
- r0 = ret.Get(0).(blockstore.Blockstore)
+ r0 = ret.Get(0).(datastore.DAGStore)
}
}
@@ -316,12 +341,12 @@ func (_c *DB_Blockstore_Call) Run(run func()) *DB_Blockstore_Call {
return _c
}
-func (_c *DB_Blockstore_Call) Return(_a0 blockstore.Blockstore) *DB_Blockstore_Call {
+func (_c *DB_Blockstore_Call) Return(_a0 datastore.DAGStore) *DB_Blockstore_Call {
_c.Call.Return(_a0)
return _c
}
-func (_c *DB_Blockstore_Call) RunAndReturn(run func() blockstore.Blockstore) *DB_Blockstore_Call {
+func (_c *DB_Blockstore_Call) RunAndReturn(run func() datastore.DAGStore) *DB_Blockstore_Call {
_c.Call.Return(run)
return _c
}
@@ -359,14 +384,20 @@ func (_c *DB_Close_Call) RunAndReturn(run func()) *DB_Close_Call {
}
// Events provides a mock function with given fields:
-func (_m *DB) Events() events.Events {
+func (_m *DB) Events() *event.Bus {
ret := _m.Called()
- var r0 events.Events
- if rf, ok := ret.Get(0).(func() events.Events); ok {
+ if len(ret) == 0 {
+ panic("no return value specified for Events")
+ }
+
+ var r0 *event.Bus
+ if rf, ok := ret.Get(0).(func() *event.Bus); ok {
r0 = rf()
} else {
- r0 = ret.Get(0).(events.Events)
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(*event.Bus)
+ }
}
return r0
@@ -389,12 +420,12 @@ func (_c *DB_Events_Call) Run(run func()) *DB_Events_Call {
return _c
}
-func (_c *DB_Events_Call) Return(_a0 events.Events) *DB_Events_Call {
+func (_c *DB_Events_Call) Return(_a0 *event.Bus) *DB_Events_Call {
_c.Call.Return(_a0)
return _c
}
-func (_c *DB_Events_Call) RunAndReturn(run func() events.Events) *DB_Events_Call {
+func (_c *DB_Events_Call) RunAndReturn(run func() *event.Bus) *DB_Events_Call {
_c.Call.Return(run)
return _c
}
@@ -403,6 +434,10 @@ func (_c *DB_Events_Call) RunAndReturn(run func() events.Events) *DB_Events_Call
func (_m *DB) ExecRequest(ctx context.Context, request string) *client.RequestResult {
ret := _m.Called(ctx, request)
+ if len(ret) == 0 {
+ panic("no return value specified for ExecRequest")
+ }
+
var r0 *client.RequestResult
if rf, ok := ret.Get(0).(func(context.Context, string) *client.RequestResult); ok {
r0 = rf(ctx, request)
@@ -448,6 +483,10 @@ func (_c *DB_ExecRequest_Call) RunAndReturn(run func(context.Context, string) *c
func (_m *DB) GetAllIndexes(_a0 context.Context) (map[string][]client.IndexDescription, error) {
ret := _m.Called(_a0)
+ if len(ret) == 0 {
+ panic("no return value specified for GetAllIndexes")
+ }
+
var r0 map[string][]client.IndexDescription
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (map[string][]client.IndexDescription, error)); ok {
@@ -502,6 +541,10 @@ func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[str
func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for GetCollectionByName")
+ }
+
var r0 client.Collection
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok {
@@ -557,6 +600,10 @@ func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, st
func (_m *DB) GetCollections(_a0 context.Context, _a1 client.CollectionFetchOptions) ([]client.Collection, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for GetCollections")
+ }
+
var r0 []client.Collection
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.CollectionFetchOptions) ([]client.Collection, error)); ok {
@@ -612,6 +659,10 @@ func (_c *DB_GetCollections_Call) RunAndReturn(run func(context.Context, client.
func (_m *DB) GetSchemaByVersionID(_a0 context.Context, _a1 string) (client.SchemaDescription, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for GetSchemaByVersionID")
+ }
+
var r0 client.SchemaDescription
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, string) (client.SchemaDescription, error)); ok {
@@ -665,6 +716,10 @@ func (_c *DB_GetSchemaByVersionID_Call) RunAndReturn(run func(context.Context, s
func (_m *DB) GetSchemas(_a0 context.Context, _a1 client.SchemaFetchOptions) ([]client.SchemaDescription, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for GetSchemas")
+ }
+
var r0 []client.SchemaDescription
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, client.SchemaFetchOptions) ([]client.SchemaDescription, error)); ok {
@@ -716,10 +771,61 @@ func (_c *DB_GetSchemas_Call) RunAndReturn(run func(context.Context, client.Sche
return _c
}
+// Headstore provides a mock function with given fields:
+func (_m *DB) Headstore() go_datastore.Read {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for Headstore")
+ }
+
+ var r0 go_datastore.Read
+ if rf, ok := ret.Get(0).(func() go_datastore.Read); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(go_datastore.Read)
+ }
+ }
+
+ return r0
+}
+
+// DB_Headstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Headstore'
+type DB_Headstore_Call struct {
+ *mock.Call
+}
+
+// Headstore is a helper method to define mock.On call
+func (_e *DB_Expecter) Headstore() *DB_Headstore_Call {
+ return &DB_Headstore_Call{Call: _e.mock.On("Headstore")}
+}
+
+func (_c *DB_Headstore_Call) Run(run func()) *DB_Headstore_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *DB_Headstore_Call) Return(_a0 go_datastore.Read) *DB_Headstore_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *DB_Headstore_Call) RunAndReturn(run func() go_datastore.Read) *DB_Headstore_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// LensRegistry provides a mock function with given fields:
func (_m *DB) LensRegistry() client.LensRegistry {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for LensRegistry")
+ }
+
var r0 client.LensRegistry
if rf, ok := ret.Get(0).(func() client.LensRegistry); ok {
r0 = rf()
@@ -763,6 +869,10 @@ func (_c *DB_LensRegistry_Call) RunAndReturn(run func() client.LensRegistry) *DB
func (_m *DB) MaxTxnRetries() int {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for MaxTxnRetries")
+ }
+
var r0 int
if rf, ok := ret.Get(0).(func() int); ok {
r0 = rf()
@@ -804,6 +914,10 @@ func (_c *DB_MaxTxnRetries_Call) RunAndReturn(run func() int) *DB_MaxTxnRetries_
func (_m *DB) NewConcurrentTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for NewConcurrentTxn")
+ }
+
var r0 datastore.Txn
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok {
@@ -859,6 +973,10 @@ func (_c *DB_NewConcurrentTxn_Call) RunAndReturn(run func(context.Context, bool)
func (_m *DB) NewTxn(_a0 context.Context, _a1 bool) (datastore.Txn, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for NewTxn")
+ }
+
var r0 datastore.Txn
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok {
@@ -914,6 +1032,10 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor
func (_m *DB) PatchCollection(_a0 context.Context, _a1 string) error {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for PatchCollection")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(_a0, _a1)
@@ -957,6 +1079,10 @@ func (_c *DB_PatchCollection_Call) RunAndReturn(run func(context.Context, string
func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 immutable.Option[model.Lens], _a3 bool) error {
ret := _m.Called(_a0, _a1, _a2, _a3)
+ if len(ret) == 0 {
+ panic("no return value specified for PatchSchema")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string, immutable.Option[model.Lens], bool) error); ok {
r0 = rf(_a0, _a1, _a2, _a3)
@@ -1002,6 +1128,10 @@ func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, im
func (_m *DB) Peerstore() datastore.DSBatching {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Peerstore")
+ }
+
var r0 datastore.DSBatching
if rf, ok := ret.Get(0).(func() datastore.DSBatching); ok {
r0 = rf()
@@ -1045,6 +1175,10 @@ func (_c *DB_Peerstore_Call) RunAndReturn(run func() datastore.DSBatching) *DB_P
func (_m *DB) PrintDump(ctx context.Context) error {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for PrintDump")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
@@ -1087,6 +1221,10 @@ func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_P
func (_m *DB) Root() datastore.RootStore {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Root")
+ }
+
var r0 datastore.RootStore
if rf, ok := ret.Get(0).(func() datastore.RootStore); ok {
r0 = rf()
@@ -1130,6 +1268,10 @@ func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Ca
func (_m *DB) SetActiveSchemaVersion(_a0 context.Context, _a1 string) error {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for SetActiveSchemaVersion")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, string) error); ok {
r0 = rf(_a0, _a1)
@@ -1173,6 +1315,10 @@ func (_c *DB_SetActiveSchemaVersion_Call) RunAndReturn(run func(context.Context,
func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for SetMigration")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, client.LensConfig) error); ok {
r0 = rf(_a0, _a1)
diff --git a/client/normal_nil.go b/client/normal_nil.go
index 7cd2df3f16..f78a0cc63e 100644
--- a/client/normal_nil.go
+++ b/client/normal_nil.go
@@ -30,7 +30,7 @@ func NewNormalNil(kind FieldKind) (NormalValue, error) {
return NewNormalNillableFloat(immutable.None[float64]()), nil
case FieldKind_NILLABLE_DATETIME:
return NewNormalNillableTime(immutable.None[time.Time]()), nil
- case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_JSON:
+ case FieldKind_NILLABLE_STRING, FieldKind_NILLABLE_JSON, FieldKind_DocID:
return NewNormalNillableString(immutable.None[string]()), nil
case FieldKind_NILLABLE_BLOB:
return NewNormalNillableBytes(immutable.None[[]byte]()), nil
diff --git a/client/normal_value_test.go b/client/normal_value_test.go
index 33cd20c46e..73e9def5d6 100644
--- a/client/normal_value_test.go
+++ b/client/normal_value_test.go
@@ -1404,7 +1404,7 @@ func TestNormalValue_NewNormalNil(t *testing.T) {
assert.True(t, v.IsNil())
} else {
_, err := NewNormalNil(kind)
- require.Error(t, err)
+ require.Error(t, err, "field kind: "+kind.String())
}
}
}
diff --git a/client/schema_field_description.go b/client/schema_field_description.go
index 87ee843ec8..cad233b67c 100644
--- a/client/schema_field_description.go
+++ b/client/schema_field_description.go
@@ -104,7 +104,7 @@ func (k ScalarKind) Underlying() string {
}
func (k ScalarKind) IsNillable() bool {
- return k != FieldKind_DocID
+ return true
}
func (k ScalarKind) IsObject() bool {
diff --git a/core/clock.go b/core/clock.go
deleted file mode 100644
index e7b8c7f1f2..0000000000
--- a/core/clock.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package core
-
-import (
- "context"
-
- ipld "github.com/ipfs/go-ipld-format"
-)
-
-// MerkleClock is the core logical clock implementation that manages writing to and from
-// the MerkleDAG structure, ensuring a causal ordering of events.
-type MerkleClock interface {
- AddDAGNode(
- ctx context.Context,
- delta Delta,
- ) (ipld.Node, error) // possibly change to AddDeltaNode?
- ProcessNode(context.Context, Delta, ipld.Node) error
-}
diff --git a/core/delta.go b/core/delta.go
deleted file mode 100644
index fda7dd13ae..0000000000
--- a/core/delta.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package core
-
-import (
- cid "github.com/ipfs/go-cid"
-)
-
-// Delta represents a delta-state update to delta-CRDT.
-// They are serialized to and from Protobuf (or CBOR).
-type Delta interface {
- GetPriority() uint64
- SetPriority(uint64)
- Marshal() ([]byte, error)
- Unmarshal(b []byte) error
-}
-
-// CompositeDelta represents a delta-state update to a composite CRDT.
-type CompositeDelta interface {
- Delta
- Links() []DAGLink
-}
-
-// DAGLink represents a link to another object in a DAG.
-type DAGLink struct {
- Name string
- Cid cid.Cid
-}
diff --git a/core/node.go b/core/node.go
deleted file mode 100644
index 6e9589ea04..0000000000
--- a/core/node.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package core
-
-import (
- "context"
-
- cid "github.com/ipfs/go-cid"
- ipld "github.com/ipfs/go-ipld-format"
-)
-
-// NodeDeltaPair is a Node with its underlying delta already extracted.
-// Used in a channel response for streaming.
-type NodeDeltaPair interface {
- GetNode() ipld.Node
- GetDelta() Delta
- Error() error
-}
-
-// A NodeGetter extended from ipld.NodeGetter with delta-related functions.
-type NodeGetter interface {
- ipld.NodeGetter
- GetDelta(context.Context, cid.Cid) (ipld.Node, Delta, error)
- GetDeltas(context.Context, []cid.Cid) <-chan NodeDeltaPair
- GetPriority(context.Context, cid.Cid) (uint64, error)
-}
diff --git a/crypto/crypto.go b/crypto/crypto.go
new file mode 100644
index 0000000000..acf374adb2
--- /dev/null
+++ b/crypto/crypto.go
@@ -0,0 +1,41 @@
+// Copyright 2024 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package crypto
+
+import (
+ "crypto/ed25519"
+ "crypto/rand"
+
+ "github.com/decred/dcrd/dcrec/secp256k1/v4"
+)
+
+// GenerateSecp256k1 generates a new secp256k1 private key.
+func GenerateSecp256k1() (*secp256k1.PrivateKey, error) {
+ return secp256k1.GeneratePrivateKey()
+}
+
+// GenerateAES256 generates a new random AES-256 bit key.
+func GenerateAES256() ([]byte, error) {
+ return RandomBytes(32)
+}
+
+// GenerateEd25519 generates a new random Ed25519 private key.
+func GenerateEd25519() (ed25519.PrivateKey, error) {
+ _, priv, err := ed25519.GenerateKey(rand.Reader)
+ return priv, err
+}
+
+// RandomBytes returns a random slice of bytes of the given size.
+func RandomBytes(size int) ([]byte, error) {
+ data := make([]byte, size)
+ _, err := rand.Read(data)
+ return data, err
+}
diff --git a/datastore/badger/v4/datastore_test.go b/datastore/badger/v4/datastore_test.go
index 69a24981df..c72ff988db 100644
--- a/datastore/badger/v4/datastore_test.go
+++ b/datastore/badger/v4/datastore_test.go
@@ -1130,7 +1130,7 @@ func TestTxnWithConflict(t *testing.T) {
require.ErrorIs(t, err, ErrTxnConflict)
}
-func TestTxnWithConflictAfterDelete(t *testing.T) {
+func TestTxnWithNoConflictAfterDelete(t *testing.T) {
ctx := context.Background()
s := newLoadedDatastore(ctx, t)
defer func() {
@@ -1144,9 +1144,6 @@ func TestTxnWithConflictAfterDelete(t *testing.T) {
tx2, err := s.NewTransaction(ctx, false)
require.NoError(t, err)
- _, err = tx.GetSize(ctx, testKey2)
- require.NoError(t, err)
-
err = tx.Put(ctx, testKey2, testValue3)
require.NoError(t, err)
@@ -1157,7 +1154,7 @@ func TestTxnWithConflictAfterDelete(t *testing.T) {
require.NoError(t, err)
err = tx.Commit(ctx)
- require.ErrorIs(t, err, ErrTxnConflict)
+ require.NoError(t, err)
}
func TestTxnWithNoConflictAfterGet(t *testing.T) {
diff --git a/datastore/blockstore.go b/datastore/blockstore.go
index be25894a3d..f9f92198cd 100644
--- a/datastore/blockstore.go
+++ b/datastore/blockstore.go
@@ -13,13 +13,14 @@ package datastore
import (
"context"
- blockstore "github.com/ipfs/boxo/blockstore"
+ "github.com/ipfs/boxo/blockstore"
dshelp "github.com/ipfs/boxo/datastore/dshelp"
blocks "github.com/ipfs/go-block-format"
"github.com/ipfs/go-cid"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
ipld "github.com/ipfs/go-ipld-format"
+ "github.com/ipld/go-ipld-prime/storage/bsadapter"
"github.com/sourcenetwork/defradb/errors"
)
@@ -44,18 +45,33 @@ import (
// NewBlockstore returns a default Blockstore implementation
// using the provided datastore.Batching backend.
-func NewBlockstore(store DSReaderWriter) blockstore.Blockstore {
+func newBlockstore(store DSReaderWriter) *bstore {
return &bstore{
store: store,
}
}
+func newIPLDStore(store blockstore.Blockstore) *bsadapter.Adapter {
+ return &bsadapter.Adapter{Wrapped: store}
+}
+
type bstore struct {
store DSReaderWriter
rehash bool
}
+var _ blockstore.Blockstore = (*bstore)(nil)
+var _ DAGStore = (*bstore)(nil)
+
+// AsIPLDStorage returns an IPLDStorage instance.
+//
+// It wraps the blockstore in an IPLD Blockstore adapter for use with
+// the IPLD LinkSystem.
+func (bs *bstore) AsIPLDStorage() IPLDStorage {
+ return newIPLDStore(bs)
+}
+
// HashOnRead enables or disables rehashing of blocks on read.
func (bs *bstore) HashOnRead(enabled bool) {
bs.rehash = enabled
diff --git a/datastore/blockstore_test.go b/datastore/blockstore_test.go
index 81e086c99f..29daffcc76 100644
--- a/datastore/blockstore_test.go
+++ b/datastore/blockstore_test.go
@@ -19,7 +19,7 @@ import (
ipld "github.com/ipfs/go-ipld-format"
"github.com/stretchr/testify/require"
- ccid "github.com/sourcenetwork/defradb/core/cid"
+ ccid "github.com/sourcenetwork/defradb/internal/core/cid"
"github.com/sourcenetwork/defradb/datastore/memory"
)
diff --git a/datastore/concurrent_txn.go b/datastore/concurrent_txn.go
index f46637e99d..409a26223c 100644
--- a/datastore/concurrent_txn.go
+++ b/datastore/concurrent_txn.go
@@ -15,8 +15,6 @@ import (
"sync"
ds "github.com/ipfs/go-datastore"
-
- "github.com/sourcenetwork/defradb/datastore/iterable"
)
type concurrentTxn struct {
@@ -32,31 +30,16 @@ type concurrentTxn struct {
// NewConcurrentTxnFrom creates a new Txn from rootstore that supports concurrent API calls
func NewConcurrentTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, readonly bool) (Txn, error) {
- var rootTxn ds.Txn
- var err error
-
- // check if our datastore natively supports iterable transaction, transactions or batching
- if iterableTxnStore, ok := rootstore.(iterable.IterableTxnDatastore); ok {
- rootTxn, err = iterableTxnStore.NewIterableTransaction(ctx, readonly)
- if err != nil {
- return nil, err
- }
- } else {
- rootTxn, err = rootstore.NewTransaction(ctx, readonly)
- if err != nil {
- return nil, err
- }
+ rootTxn, err := newTxnFrom(ctx, rootstore, readonly)
+ if err != nil {
+ return nil, err
}
-
rootConcurentTxn := &concurrentTxn{Txn: rootTxn}
multistore := MultiStoreFrom(rootConcurentTxn)
return &txn{
- rootConcurentTxn,
- multistore,
- id,
- []func(){},
- []func(){},
- []func(){},
+ t: rootConcurentTxn,
+ MultiStore: multistore,
+ id: id,
}, nil
}
@@ -90,7 +73,7 @@ func (t *concurrentTxn) Put(ctx context.Context, key ds.Key, value []byte) error
// Sync executes the transaction.
func (t *concurrentTxn) Sync(ctx context.Context, prefix ds.Key) error {
- return t.Txn.Commit(ctx)
+ return t.Commit(ctx)
}
// Close discards the transaction.
diff --git a/datastore/dag.go b/datastore/dag.go
deleted file mode 100644
index cd2b48eb54..0000000000
--- a/datastore/dag.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package datastore
-
-import (
- blockstore "github.com/ipfs/boxo/blockstore"
-)
-
-// DAGStore is the interface to the underlying BlockStore and BlockService.
-type dagStore struct {
- blockstore.Blockstore // become a Blockstore
- store DSReaderWriter
- // bstore blockstore.Blockstore
- // bserv blockservice.BlockService
-}
-
-// NewDAGStore creates a new DAGStore with the supplied Batching datastore.
-func NewDAGStore(store DSReaderWriter) DAGStore {
- dstore := &dagStore{
- Blockstore: NewBlockstore(store),
- store: store,
- }
-
- return dstore
-}
-
-// func (d *dagStore) setupBlockstore() error {
-// bs := blockstore.NewBlockstore(d.store)
-// // bs = blockstore.NewIdStore(bs)
-// // cachedbs, err := blockstore.CachedBlockstore(d.ctx, bs, blockstore.DefaultCacheOpts())
-// // if err != nil {
-// // return err
-// // }
-// d.bstore = bs
-// return nil
-// }
-
-// func (d *dagStore) setupBlockService() error {
-// // if d.cfg.Offline {
-// // d.bserv = blockservice.New(d.bstore, offline.Exchange(p.bstore))
-// // return nil
-// // }
-
-// // bswapnet := network.NewFromIpfsHost(p.host, p.dht)
-// // bswap := bitswap.New(p.ctx, bswapnet, p.bstore)
-// // p.bserv = blockservice.New(p.bstore, bswap)
-
-// // @todo Investigate if we need an Exchanger or if it can stay as nil
-// d.bserv = blockservice.New(d.bstore, offline.Exchange(d.bstore))
-// return nil
-// }
-
-// func (d *dagStore) setupDAGService() error {
-// d.DAGService = dag.NewDAGService(d.bserv)
-// return nil
-// }
-
-// func (d *dagStore) Blockstore() blockstore.Blockstore {
-// return d.bstore
-// }
diff --git a/datastore/memory/memory.go b/datastore/memory/memory.go
index e650776623..8a17d79603 100644
--- a/datastore/memory/memory.go
+++ b/datastore/memory/memory.go
@@ -347,10 +347,14 @@ func (d *Datastore) executePurge(ctx context.Context) {
}
func (d *Datastore) handleContextDone(ctx context.Context) {
- <-ctx.Done()
- // It is safe to ignore the error since the only error that could occur is if the
- // datastore is already closed, in which case the purpose of the `Close` call is already covered.
- _ = d.Close()
+ select {
+ case <-d.closing:
+ return
+ case <-ctx.Done():
+ // It is safe to ignore the error since the only error that could occur is if the
+ // datastore is already closed, in which case the purpose of the `Close` call is already covered.
+ _ = d.Close()
+ }
}
// commit commits the given transaction to the datastore.
diff --git a/datastore/memory/txn.go b/datastore/memory/txn.go
index 7430077e46..27b6292d06 100644
--- a/datastore/memory/txn.go
+++ b/datastore/memory/txn.go
@@ -80,6 +80,11 @@ func (t *basicTxn) get(ctx context.Context, key ds.Key) dsItem {
if result.key == "" {
result = t.ds.get(ctx, key, t.getDSVersion())
result.isGet = true
+ if result.key == "" {
+ // If the datastore doesn't have the item, we still need to track it
+ // to check for merge conflicts.
+ result.key = key.String()
+ }
t.ops.Set(result)
}
return result
@@ -97,7 +102,7 @@ func (t *basicTxn) Get(ctx context.Context, key ds.Key) ([]byte, error) {
return nil, ErrTxnDiscarded
}
result := t.get(ctx, key)
- if result.key == "" || result.isDeleted {
+ if result.version == 0 || result.isDeleted {
return nil, ds.ErrNotFound
}
return result.val, nil
@@ -115,7 +120,7 @@ func (t *basicTxn) GetSize(ctx context.Context, key ds.Key) (size int, err error
return 0, ErrTxnDiscarded
}
result := t.get(ctx, key)
- if result.key == "" || result.isDeleted {
+ if result.version == 0 || result.isDeleted {
return 0, ds.ErrNotFound
}
return len(result.val), nil
@@ -133,7 +138,7 @@ func (t *basicTxn) Has(ctx context.Context, key ds.Key) (exists bool, err error)
return false, ErrTxnDiscarded
}
result := t.get(ctx, key)
- if result.key == "" || result.isDeleted {
+ if result.version == 0 || result.isDeleted {
return false, nil
}
return true, nil
@@ -174,13 +179,17 @@ func (t *basicTxn) Query(ctx context.Context, q dsq.Query) (dsq.Results, error)
iter := t.ds.values.Iter()
iterOps := t.ops.Iter()
iterOpsHasValue := iterOps.Next()
+ dsVersion := t.getDSVersion()
// iterate over the underlying store and ensure that ops with keys smaller than or equal to
// the key of the underlying store are added with priority.
for iter.Next() {
// fast forward to last inserted version
item := iter.Item()
+ if item.version > dsVersion {
+ continue
+ }
for iter.Next() {
- if item.key == iter.Item().key {
+ if item.key == iter.Item().key && iter.Item().version <= dsVersion {
item = iter.Item()
continue
}
@@ -270,8 +279,14 @@ func (t *basicTxn) checkForConflicts(ctx context.Context) error {
iter := t.ops.Iter()
defer iter.Release()
for iter.Next() {
- expectedItem := t.ds.get(ctx, ds.NewKey(iter.Item().key), t.getDSVersion())
- latestItem := t.ds.get(ctx, ds.NewKey(iter.Item().key), t.ds.getVersion())
+ item := iter.Item()
+ if !item.isGet {
+ // Conflict should only occur if an item has been updated
+ // after we've read it within the transaction.
+ continue
+ }
+ expectedItem := t.ds.get(ctx, ds.NewKey(item.key), t.getDSVersion())
+ latestItem := t.ds.get(ctx, ds.NewKey(item.key), t.ds.getVersion())
if latestItem.version != expectedItem.version {
return ErrTxnConflict
}
diff --git a/datastore/memory/txn_test.go b/datastore/memory/txn_test.go
index 0dae48f2dc..d1861e7d87 100644
--- a/datastore/memory/txn_test.go
+++ b/datastore/memory/txn_test.go
@@ -661,7 +661,7 @@ func TestTxnQueryWithOnlyOneOperation(t *testing.T) {
tx, err := s.NewTransaction(ctx, false)
require.NoError(t, err)
- err = s.Put(ctx, testKey4, testValue4)
+ err = tx.Put(ctx, testKey4, testValue4)
require.NoError(t, err)
results, err := tx.Query(ctx, dsq.Query{})
@@ -707,11 +707,16 @@ func TestTxnWithConflict(t *testing.T) {
require.NoError(t, err)
}()
- tx := s.newTransaction(false)
+ tx, err := s.NewTransaction(ctx, false)
+ require.NoError(t, err)
- tx2 := s.newTransaction(false)
+ tx2, err := s.NewTransaction(ctx, false)
+ require.NoError(t, err)
- err := tx.Put(ctx, testKey3, testValue3)
+ _, err = tx.GetSize(ctx, testKey3)
+ require.ErrorIs(t, err, ds.ErrNotFound)
+
+ err = tx.Put(ctx, testKey3, testValue3)
require.NoError(t, err)
err = tx2.Put(ctx, testKey3, testValue4)
@@ -724,7 +729,7 @@ func TestTxnWithConflict(t *testing.T) {
require.ErrorIs(t, err, ErrTxnConflict)
}
-func TestTxnWithConflictAfterDelete(t *testing.T) {
+func TestTxnWithNoConflictAfterDelete(t *testing.T) {
ctx := context.Background()
s := newLoadedDatastore(ctx)
defer func() {
@@ -746,7 +751,7 @@ func TestTxnWithConflictAfterDelete(t *testing.T) {
require.NoError(t, err)
err = tx.Commit(ctx)
- require.ErrorIs(t, err, ErrTxnConflict)
+ require.NoError(t, err)
}
func TestTxnWithConflictAfterGet(t *testing.T) {
diff --git a/datastore/mocks/dag_store.go b/datastore/mocks/dag_store.go
index 8408013ccc..f6fe123a80 100644
--- a/datastore/mocks/dag_store.go
+++ b/datastore/mocks/dag_store.go
@@ -8,6 +8,8 @@ import (
context "context"
+ datastore "github.com/sourcenetwork/defradb/datastore"
+
mock "github.com/stretchr/testify/mock"
)
@@ -28,6 +30,10 @@ func (_m *DAGStore) EXPECT() *DAGStore_Expecter {
func (_m *DAGStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for AllKeysChan")
+ }
+
var r0 <-chan cid.Cid
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (<-chan cid.Cid, error)); ok {
@@ -78,10 +84,61 @@ func (_c *DAGStore_AllKeysChan_Call) RunAndReturn(run func(context.Context) (<-c
return _c
}
+// AsIPLDStorage provides a mock function with given fields:
+func (_m *DAGStore) AsIPLDStorage() datastore.IPLDStorage {
+ ret := _m.Called()
+
+ if len(ret) == 0 {
+ panic("no return value specified for AsIPLDStorage")
+ }
+
+ var r0 datastore.IPLDStorage
+ if rf, ok := ret.Get(0).(func() datastore.IPLDStorage); ok {
+ r0 = rf()
+ } else {
+ if ret.Get(0) != nil {
+ r0 = ret.Get(0).(datastore.IPLDStorage)
+ }
+ }
+
+ return r0
+}
+
+// DAGStore_AsIPLDStorage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AsIPLDStorage'
+type DAGStore_AsIPLDStorage_Call struct {
+ *mock.Call
+}
+
+// AsIPLDStorage is a helper method to define mock.On call
+func (_e *DAGStore_Expecter) AsIPLDStorage() *DAGStore_AsIPLDStorage_Call {
+ return &DAGStore_AsIPLDStorage_Call{Call: _e.mock.On("AsIPLDStorage")}
+}
+
+func (_c *DAGStore_AsIPLDStorage_Call) Run(run func()) *DAGStore_AsIPLDStorage_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run()
+ })
+ return _c
+}
+
+func (_c *DAGStore_AsIPLDStorage_Call) Return(_a0 datastore.IPLDStorage) *DAGStore_AsIPLDStorage_Call {
+ _c.Call.Return(_a0)
+ return _c
+}
+
+func (_c *DAGStore_AsIPLDStorage_Call) RunAndReturn(run func() datastore.IPLDStorage) *DAGStore_AsIPLDStorage_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// DeleteBlock provides a mock function with given fields: _a0, _a1
func (_m *DAGStore) DeleteBlock(_a0 context.Context, _a1 cid.Cid) error {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for DeleteBlock")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) error); ok {
r0 = rf(_a0, _a1)
@@ -125,6 +182,10 @@ func (_c *DAGStore_DeleteBlock_Call) RunAndReturn(run func(context.Context, cid.
func (_m *DAGStore) Get(_a0 context.Context, _a1 cid.Cid) (blocks.Block, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for Get")
+ }
+
var r0 blocks.Block
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (blocks.Block, error)); ok {
@@ -180,6 +241,10 @@ func (_c *DAGStore_Get_Call) RunAndReturn(run func(context.Context, cid.Cid) (bl
func (_m *DAGStore) GetSize(_a0 context.Context, _a1 cid.Cid) (int, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for GetSize")
+ }
+
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (int, error)); ok {
@@ -233,6 +298,10 @@ func (_c *DAGStore_GetSize_Call) RunAndReturn(run func(context.Context, cid.Cid)
func (_m *DAGStore) Has(_a0 context.Context, _a1 cid.Cid) (bool, error) {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for Has")
+ }
+
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, cid.Cid) (bool, error)); ok {
@@ -319,6 +388,10 @@ func (_c *DAGStore_HashOnRead_Call) RunAndReturn(run func(bool)) *DAGStore_HashO
func (_m *DAGStore) Put(_a0 context.Context, _a1 blocks.Block) error {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for Put")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, blocks.Block) error); ok {
r0 = rf(_a0, _a1)
@@ -362,6 +435,10 @@ func (_c *DAGStore_Put_Call) RunAndReturn(run func(context.Context, blocks.Block
func (_m *DAGStore) PutMany(_a0 context.Context, _a1 []blocks.Block) error {
ret := _m.Called(_a0, _a1)
+ if len(ret) == 0 {
+ panic("no return value specified for PutMany")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, []blocks.Block) error); ok {
r0 = rf(_a0, _a1)
diff --git a/datastore/mocks/ds_reader_writer.go b/datastore/mocks/ds_reader_writer.go
index 865086c697..989521521e 100644
--- a/datastore/mocks/ds_reader_writer.go
+++ b/datastore/mocks/ds_reader_writer.go
@@ -31,6 +31,10 @@ func (_m *DSReaderWriter) EXPECT() *DSReaderWriter_Expecter {
func (_m *DSReaderWriter) Delete(ctx context.Context, key datastore.Key) error {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for Delete")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok {
r0 = rf(ctx, key)
@@ -74,6 +78,10 @@ func (_c *DSReaderWriter_Delete_Call) RunAndReturn(run func(context.Context, dat
func (_m *DSReaderWriter) Get(ctx context.Context, key datastore.Key) ([]byte, error) {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for Get")
+ }
+
var r0 []byte
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok {
@@ -129,6 +137,10 @@ func (_c *DSReaderWriter_Get_Call) RunAndReturn(run func(context.Context, datast
func (_m *DSReaderWriter) GetIterator(q query.Query) (iterable.Iterator, error) {
ret := _m.Called(q)
+ if len(ret) == 0 {
+ panic("no return value specified for GetIterator")
+ }
+
var r0 iterable.Iterator
var r1 error
if rf, ok := ret.Get(0).(func(query.Query) (iterable.Iterator, error)); ok {
@@ -183,6 +195,10 @@ func (_c *DSReaderWriter_GetIterator_Call) RunAndReturn(run func(query.Query) (i
func (_m *DSReaderWriter) GetSize(ctx context.Context, key datastore.Key) (int, error) {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for GetSize")
+ }
+
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok {
@@ -236,6 +252,10 @@ func (_c *DSReaderWriter_GetSize_Call) RunAndReturn(run func(context.Context, da
func (_m *DSReaderWriter) Has(ctx context.Context, key datastore.Key) (bool, error) {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for Has")
+ }
+
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok {
@@ -289,6 +309,10 @@ func (_c *DSReaderWriter_Has_Call) RunAndReturn(run func(context.Context, datast
func (_m *DSReaderWriter) Put(ctx context.Context, key datastore.Key, value []byte) error {
ret := _m.Called(ctx, key, value)
+ if len(ret) == 0 {
+ panic("no return value specified for Put")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok {
r0 = rf(ctx, key, value)
@@ -333,6 +357,10 @@ func (_c *DSReaderWriter_Put_Call) RunAndReturn(run func(context.Context, datast
func (_m *DSReaderWriter) Query(ctx context.Context, q query.Query) (query.Results, error) {
ret := _m.Called(ctx, q)
+ if len(ret) == 0 {
+ panic("no return value specified for Query")
+ }
+
var r0 query.Results
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok {
diff --git a/datastore/mocks/results.go b/datastore/mocks/results.go
index e1fee8f859..d1f68949a7 100644
--- a/datastore/mocks/results.go
+++ b/datastore/mocks/results.go
@@ -26,6 +26,10 @@ func (_m *Results) EXPECT() *Results_Expecter {
func (_m *Results) Close() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -67,6 +71,10 @@ func (_c *Results_Close_Call) RunAndReturn(run func() error) *Results_Close_Call
func (_m *Results) Next() <-chan query.Result {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Next")
+ }
+
var r0 <-chan query.Result
if rf, ok := ret.Get(0).(func() <-chan query.Result); ok {
r0 = rf()
@@ -110,6 +118,10 @@ func (_c *Results_Next_Call) RunAndReturn(run func() <-chan query.Result) *Resul
func (_m *Results) NextSync() (query.Result, bool) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for NextSync")
+ }
+
var r0 query.Result
var r1 bool
if rf, ok := ret.Get(0).(func() (query.Result, bool)); ok {
@@ -161,6 +173,10 @@ func (_c *Results_NextSync_Call) RunAndReturn(run func() (query.Result, bool)) *
func (_m *Results) Process() goprocess.Process {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Process")
+ }
+
var r0 goprocess.Process
if rf, ok := ret.Get(0).(func() goprocess.Process); ok {
r0 = rf()
@@ -204,6 +220,10 @@ func (_c *Results_Process_Call) RunAndReturn(run func() goprocess.Process) *Resu
func (_m *Results) Query() query.Query {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Query")
+ }
+
var r0 query.Query
if rf, ok := ret.Get(0).(func() query.Query); ok {
r0 = rf()
@@ -245,6 +265,10 @@ func (_c *Results_Query_Call) RunAndReturn(run func() query.Query) *Results_Quer
func (_m *Results) Rest() ([]query.Entry, error) {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Rest")
+ }
+
var r0 []query.Entry
var r1 error
if rf, ok := ret.Get(0).(func() ([]query.Entry, error)); ok {
diff --git a/datastore/mocks/root_store.go b/datastore/mocks/root_store.go
index 836a059f68..94d2694721 100644
--- a/datastore/mocks/root_store.go
+++ b/datastore/mocks/root_store.go
@@ -29,6 +29,10 @@ func (_m *RootStore) EXPECT() *RootStore_Expecter {
func (_m *RootStore) Batch(ctx context.Context) (datastore.Batch, error) {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for Batch")
+ }
+
var r0 datastore.Batch
var r1 error
if rf, ok := ret.Get(0).(func(context.Context) (datastore.Batch, error)); ok {
@@ -83,6 +87,10 @@ func (_c *RootStore_Batch_Call) RunAndReturn(run func(context.Context) (datastor
func (_m *RootStore) Close() error {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Close")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
@@ -124,6 +132,10 @@ func (_c *RootStore_Close_Call) RunAndReturn(run func() error) *RootStore_Close_
func (_m *RootStore) Delete(ctx context.Context, key datastore.Key) error {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for Delete")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok {
r0 = rf(ctx, key)
@@ -167,6 +179,10 @@ func (_c *RootStore_Delete_Call) RunAndReturn(run func(context.Context, datastor
func (_m *RootStore) Get(ctx context.Context, key datastore.Key) ([]byte, error) {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for Get")
+ }
+
var r0 []byte
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) ([]byte, error)); ok {
@@ -222,6 +238,10 @@ func (_c *RootStore_Get_Call) RunAndReturn(run func(context.Context, datastore.K
func (_m *RootStore) GetSize(ctx context.Context, key datastore.Key) (int, error) {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for GetSize")
+ }
+
var r0 int
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (int, error)); ok {
@@ -275,6 +295,10 @@ func (_c *RootStore_GetSize_Call) RunAndReturn(run func(context.Context, datasto
func (_m *RootStore) Has(ctx context.Context, key datastore.Key) (bool, error) {
ret := _m.Called(ctx, key)
+ if len(ret) == 0 {
+ panic("no return value specified for Has")
+ }
+
var r0 bool
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) (bool, error)); ok {
@@ -328,6 +352,10 @@ func (_c *RootStore_Has_Call) RunAndReturn(run func(context.Context, datastore.K
func (_m *RootStore) NewTransaction(ctx context.Context, readOnly bool) (datastore.Txn, error) {
ret := _m.Called(ctx, readOnly)
+ if len(ret) == 0 {
+ panic("no return value specified for NewTransaction")
+ }
+
var r0 datastore.Txn
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, bool) (datastore.Txn, error)); ok {
@@ -383,6 +411,10 @@ func (_c *RootStore_NewTransaction_Call) RunAndReturn(run func(context.Context,
func (_m *RootStore) Put(ctx context.Context, key datastore.Key, value []byte) error {
ret := _m.Called(ctx, key, value)
+ if len(ret) == 0 {
+ panic("no return value specified for Put")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key, []byte) error); ok {
r0 = rf(ctx, key, value)
@@ -427,6 +459,10 @@ func (_c *RootStore_Put_Call) RunAndReturn(run func(context.Context, datastore.K
func (_m *RootStore) Query(ctx context.Context, q query.Query) (query.Results, error) {
ret := _m.Called(ctx, q)
+ if len(ret) == 0 {
+ panic("no return value specified for Query")
+ }
+
var r0 query.Results
var r1 error
if rf, ok := ret.Get(0).(func(context.Context, query.Query) (query.Results, error)); ok {
@@ -482,6 +518,10 @@ func (_c *RootStore_Query_Call) RunAndReturn(run func(context.Context, query.Que
func (_m *RootStore) Sync(ctx context.Context, prefix datastore.Key) error {
ret := _m.Called(ctx, prefix)
+ if len(ret) == 0 {
+ panic("no return value specified for Sync")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context, datastore.Key) error); ok {
r0 = rf(ctx, prefix)
diff --git a/datastore/mocks/txn.go b/datastore/mocks/txn.go
index 0dc71cb46f..7c9872dfb2 100644
--- a/datastore/mocks/txn.go
+++ b/datastore/mocks/txn.go
@@ -26,6 +26,10 @@ func (_m *Txn) EXPECT() *Txn_Expecter {
func (_m *Txn) Commit(ctx context.Context) error {
ret := _m.Called(ctx)
+ if len(ret) == 0 {
+ panic("no return value specified for Commit")
+ }
+
var r0 error
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
r0 = rf(ctx)
@@ -68,6 +72,10 @@ func (_c *Txn_Commit_Call) RunAndReturn(run func(context.Context) error) *Txn_Co
func (_m *Txn) DAGstore() datastore.DAGStore {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for DAGstore")
+ }
+
var r0 datastore.DAGStore
if rf, ok := ret.Get(0).(func() datastore.DAGStore); ok {
r0 = rf()
@@ -111,6 +119,10 @@ func (_c *Txn_DAGstore_Call) RunAndReturn(run func() datastore.DAGStore) *Txn_DA
func (_m *Txn) Datastore() datastore.DSReaderWriter {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Datastore")
+ }
+
var r0 datastore.DSReaderWriter
if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok {
r0 = rf()
@@ -187,6 +199,10 @@ func (_c *Txn_Discard_Call) RunAndReturn(run func(context.Context)) *Txn_Discard
func (_m *Txn) Headstore() datastore.DSReaderWriter {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Headstore")
+ }
+
var r0 datastore.DSReaderWriter
if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok {
r0 = rf()
@@ -230,6 +246,10 @@ func (_c *Txn_Headstore_Call) RunAndReturn(run func() datastore.DSReaderWriter)
func (_m *Txn) ID() uint64 {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for ID")
+ }
+
var r0 uint64
if rf, ok := ret.Get(0).(func() uint64); ok {
r0 = rf()
@@ -300,6 +320,39 @@ func (_c *Txn_OnDiscard_Call) RunAndReturn(run func(func())) *Txn_OnDiscard_Call
return _c
}
+// OnDiscardAsync provides a mock function with given fields: fn
+func (_m *Txn) OnDiscardAsync(fn func()) {
+ _m.Called(fn)
+}
+
+// Txn_OnDiscardAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnDiscardAsync'
+type Txn_OnDiscardAsync_Call struct {
+ *mock.Call
+}
+
+// OnDiscardAsync is a helper method to define mock.On call
+// - fn func()
+func (_e *Txn_Expecter) OnDiscardAsync(fn interface{}) *Txn_OnDiscardAsync_Call {
+ return &Txn_OnDiscardAsync_Call{Call: _e.mock.On("OnDiscardAsync", fn)}
+}
+
+func (_c *Txn_OnDiscardAsync_Call) Run(run func(fn func())) *Txn_OnDiscardAsync_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(func()))
+ })
+ return _c
+}
+
+func (_c *Txn_OnDiscardAsync_Call) Return() *Txn_OnDiscardAsync_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *Txn_OnDiscardAsync_Call) RunAndReturn(run func(func())) *Txn_OnDiscardAsync_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// OnError provides a mock function with given fields: fn
func (_m *Txn) OnError(fn func()) {
_m.Called(fn)
@@ -333,6 +386,39 @@ func (_c *Txn_OnError_Call) RunAndReturn(run func(func())) *Txn_OnError_Call {
return _c
}
+// OnErrorAsync provides a mock function with given fields: fn
+func (_m *Txn) OnErrorAsync(fn func()) {
+ _m.Called(fn)
+}
+
+// Txn_OnErrorAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnErrorAsync'
+type Txn_OnErrorAsync_Call struct {
+ *mock.Call
+}
+
+// OnErrorAsync is a helper method to define mock.On call
+// - fn func()
+func (_e *Txn_Expecter) OnErrorAsync(fn interface{}) *Txn_OnErrorAsync_Call {
+ return &Txn_OnErrorAsync_Call{Call: _e.mock.On("OnErrorAsync", fn)}
+}
+
+func (_c *Txn_OnErrorAsync_Call) Run(run func(fn func())) *Txn_OnErrorAsync_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(func()))
+ })
+ return _c
+}
+
+func (_c *Txn_OnErrorAsync_Call) Return() *Txn_OnErrorAsync_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *Txn_OnErrorAsync_Call) RunAndReturn(run func(func())) *Txn_OnErrorAsync_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// OnSuccess provides a mock function with given fields: fn
func (_m *Txn) OnSuccess(fn func()) {
_m.Called(fn)
@@ -366,10 +452,47 @@ func (_c *Txn_OnSuccess_Call) RunAndReturn(run func(func())) *Txn_OnSuccess_Call
return _c
}
+// OnSuccessAsync provides a mock function with given fields: fn
+func (_m *Txn) OnSuccessAsync(fn func()) {
+ _m.Called(fn)
+}
+
+// Txn_OnSuccessAsync_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'OnSuccessAsync'
+type Txn_OnSuccessAsync_Call struct {
+ *mock.Call
+}
+
+// OnSuccessAsync is a helper method to define mock.On call
+// - fn func()
+func (_e *Txn_Expecter) OnSuccessAsync(fn interface{}) *Txn_OnSuccessAsync_Call {
+ return &Txn_OnSuccessAsync_Call{Call: _e.mock.On("OnSuccessAsync", fn)}
+}
+
+func (_c *Txn_OnSuccessAsync_Call) Run(run func(fn func())) *Txn_OnSuccessAsync_Call {
+ _c.Call.Run(func(args mock.Arguments) {
+ run(args[0].(func()))
+ })
+ return _c
+}
+
+func (_c *Txn_OnSuccessAsync_Call) Return() *Txn_OnSuccessAsync_Call {
+ _c.Call.Return()
+ return _c
+}
+
+func (_c *Txn_OnSuccessAsync_Call) RunAndReturn(run func(func())) *Txn_OnSuccessAsync_Call {
+ _c.Call.Return(run)
+ return _c
+}
+
// Peerstore provides a mock function with given fields:
func (_m *Txn) Peerstore() datastore.DSBatching {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Peerstore")
+ }
+
var r0 datastore.DSBatching
if rf, ok := ret.Get(0).(func() datastore.DSBatching); ok {
r0 = rf()
@@ -413,6 +536,10 @@ func (_c *Txn_Peerstore_Call) RunAndReturn(run func() datastore.DSBatching) *Txn
func (_m *Txn) Rootstore() datastore.DSReaderWriter {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Rootstore")
+ }
+
var r0 datastore.DSReaderWriter
if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok {
r0 = rf()
@@ -456,6 +583,10 @@ func (_c *Txn_Rootstore_Call) RunAndReturn(run func() datastore.DSReaderWriter)
func (_m *Txn) Systemstore() datastore.DSReaderWriter {
ret := _m.Called()
+ if len(ret) == 0 {
+ panic("no return value specified for Systemstore")
+ }
+
var r0 datastore.DSReaderWriter
if rf, ok := ret.Get(0).(func() datastore.DSReaderWriter); ok {
r0 = rf()
diff --git a/datastore/mocks/utils.go b/datastore/mocks/utils.go
index af91fc6d3a..50131a8538 100644
--- a/datastore/mocks/utils.go
+++ b/datastore/mocks/utils.go
@@ -68,6 +68,7 @@ func prepareDAGStore(t *testing.T) *DAGStore {
func NewTxnWithMultistore(t *testing.T) *MultiStoreTxn {
txn := NewTxn(t)
txn.EXPECT().OnSuccess(mock.Anything).Maybe()
+ txn.EXPECT().OnSuccessAsync(mock.Anything).Maybe()
result := &MultiStoreTxn{
Txn: txn,
diff --git a/datastore/multi.go b/datastore/multi.go
index 47015e4581..bbd333ba19 100644
--- a/datastore/multi.go
+++ b/datastore/multi.go
@@ -46,7 +46,7 @@ func MultiStoreFrom(rootstore ds.Datastore) MultiStore {
head: prefix(rootRW, headStoreKey),
peer: namespace.Wrap(rootstore, peerStoreKey),
system: prefix(rootRW, systemStoreKey),
- dag: NewDAGStore(prefix(rootRW, blockStoreKey)),
+ dag: newBlockstore(prefix(rootRW, blockStoreKey)),
}
return ms
diff --git a/datastore/store.go b/datastore/store.go
index 7f2764a65d..7954eb5014 100644
--- a/datastore/store.go
+++ b/datastore/store.go
@@ -11,8 +11,9 @@
package datastore
import (
- blockstore "github.com/ipfs/boxo/blockstore"
+ "github.com/ipfs/boxo/blockstore"
ds "github.com/ipfs/go-datastore"
+ "github.com/ipld/go-ipld-prime/storage"
"github.com/sourcenetwork/corelog"
@@ -72,6 +73,13 @@ type DSReaderWriter interface {
// DAGStore proxies the ipld.DAGService under the /core namespace for future-proofing
type DAGStore interface {
blockstore.Blockstore
+ AsIPLDStorage() IPLDStorage
+}
+
+// IPLDStorage provides the methods needed for an IPLD LinkSystem.
+type IPLDStorage interface {
+ storage.ReadableStorage
+ storage.WritableStorage
}
// DSBatching wraps the Batching interface from go-datastore
diff --git a/datastore/txn.go b/datastore/txn.go
index acc7a53193..249903b817 100644
--- a/datastore/txn.go
+++ b/datastore/txn.go
@@ -43,105 +43,116 @@ type Txn interface {
// OnDiscard registers a function to be called when the transaction is discarded.
OnDiscard(fn func())
+
+ // OnSuccessAsync registers a function to be called asynchronously when the transaction is committed.
+ OnSuccessAsync(fn func())
+
+ // OnErrorAsync registers a function to be called asynchronously when the transaction is rolled back.
+ OnErrorAsync(fn func())
+
+ // OnDiscardAsync registers a function to be called asynchronously when the transaction is discarded.
+ OnDiscardAsync(fn func())
}
type txn struct {
- t ds.Txn
MultiStore
-
+ t ds.Txn
id uint64
successFns []func()
errorFns []func()
discardFns []func()
+
+ successAsyncFns []func()
+ errorAsyncFns []func()
+ discardAsyncFns []func()
}
var _ Txn = (*txn)(nil)
-// NewTxnFrom returns a new Txn from the rootstore.
-func NewTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, readonly bool) (Txn, error) {
+func newTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, readonly bool) (ds.Txn, error) {
// check if our datastore natively supports iterable transaction, transactions or batching
- if iterableTxnStore, ok := rootstore.(iterable.IterableTxnDatastore); ok {
- rootTxn, err := iterableTxnStore.NewIterableTransaction(ctx, readonly)
- if err != nil {
- return nil, err
- }
- multistore := MultiStoreFrom(ShimTxnStore{rootTxn})
- return &txn{
- rootTxn,
- multistore,
- id,
- []func(){},
- []func(){},
- []func(){},
- }, nil
+ switch t := rootstore.(type) {
+ case iterable.IterableTxnDatastore:
+ return t.NewIterableTransaction(ctx, readonly)
+
+ default:
+ return rootstore.NewTransaction(ctx, readonly)
}
+}
- rootTxn, err := rootstore.NewTransaction(ctx, readonly)
+// NewTxnFrom returns a new Txn from the rootstore.
+func NewTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, readonly bool) (Txn, error) {
+ rootTxn, err := newTxnFrom(ctx, rootstore, readonly)
if err != nil {
return nil, err
}
-
multistore := MultiStoreFrom(ShimTxnStore{rootTxn})
return &txn{
- rootTxn,
- multistore,
- id,
- []func(){},
- []func(){},
- []func(){},
+ t: rootTxn,
+ MultiStore: multistore,
+ id: id,
}, nil
}
-// ID returns the unique immutable identifier for this transaction.
func (t *txn) ID() uint64 {
return t.id
}
-// Commit finalizes a transaction, attempting to commit it to the Datastore.
func (t *txn) Commit(ctx context.Context) error {
- if err := t.t.Commit(ctx); err != nil {
- runFns(t.errorFns)
- return err
+ var fns []func()
+ var asyncFns []func()
+
+ err := t.t.Commit(ctx)
+ if err != nil {
+ fns = t.errorFns
+ asyncFns = t.errorAsyncFns
+ } else {
+ fns = t.successFns
+ asyncFns = t.successAsyncFns
}
- runFns(t.successFns)
- return nil
+
+ for _, fn := range asyncFns {
+ go fn()
+ }
+ for _, fn := range fns {
+ fn()
+ }
+ return err
}
-// Discard throws away changes recorded in a transaction without committing.
func (t *txn) Discard(ctx context.Context) {
t.t.Discard(ctx)
- runFns(t.discardFns)
+ for _, fn := range t.discardAsyncFns {
+ go fn()
+ }
+ for _, fn := range t.discardFns {
+ fn()
+ }
}
-// OnSuccess registers a function to be called when the transaction is committed.
-func (txn *txn) OnSuccess(fn func()) {
- if fn == nil {
- return
- }
- txn.successFns = append(txn.successFns, fn)
+func (t *txn) OnSuccess(fn func()) {
+ t.successFns = append(t.successFns, fn)
}
-// OnError registers a function to be called when the transaction is rolled back.
-func (txn *txn) OnError(fn func()) {
- if fn == nil {
- return
- }
- txn.errorFns = append(txn.errorFns, fn)
+func (t *txn) OnError(fn func()) {
+ t.errorFns = append(t.errorFns, fn)
}
-// OnDiscard registers a function to be called when the transaction is discarded.
-func (txn *txn) OnDiscard(fn func()) {
- if fn == nil {
- return
- }
- txn.discardFns = append(txn.discardFns, fn)
+func (t *txn) OnDiscard(fn func()) {
+ t.discardFns = append(t.discardFns, fn)
}
-func runFns(fns []func()) {
- for _, fn := range fns {
- fn()
- }
+func (t *txn) OnSuccessAsync(fn func()) {
+ t.successAsyncFns = append(t.successAsyncFns, fn)
+}
+
+func (t *txn) OnErrorAsync(fn func()) {
+ t.errorAsyncFns = append(t.errorAsyncFns, fn)
+}
+
+func (t *txn) OnDiscardAsync(fn func()) {
+ t.discardAsyncFns = append(t.discardAsyncFns, fn)
}
// Shim to make ds.Txn support ds.Datastore.
diff --git a/datastore/txn_test.go b/datastore/txn_test.go
index f5170146d6..cf3cdc5c1d 100644
--- a/datastore/txn_test.go
+++ b/datastore/txn_test.go
@@ -12,13 +12,16 @@ package datastore
import (
"context"
+ "sync"
"testing"
ds "github.com/ipfs/go-datastore"
+ "github.com/ipfs/go-datastore/query"
badger "github.com/sourcenetwork/badger/v4"
"github.com/stretchr/testify/require"
badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4"
+ "github.com/sourcenetwork/defradb/datastore/memory"
)
func TestNewTxnFrom(t *testing.T) {
@@ -56,8 +59,6 @@ func TestOnSuccess(t *testing.T) {
txn, err := NewTxnFrom(ctx, rootstore, 0, false)
require.NoError(t, err)
- txn.OnSuccess(nil)
-
text := "Source"
txn.OnSuccess(func() {
text += " Inc"
@@ -68,7 +69,7 @@ func TestOnSuccess(t *testing.T) {
require.Equal(t, text, "Source Inc")
}
-func TestOnError(t *testing.T) {
+func TestOnSuccessAsync(t *testing.T) {
ctx := context.Background()
opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
rootstore, err := badgerds.NewDatastore("", &opts)
@@ -77,7 +78,25 @@ func TestOnError(t *testing.T) {
txn, err := NewTxnFrom(ctx, rootstore, 0, false)
require.NoError(t, err)
- txn.OnError(nil)
+ var wg sync.WaitGroup
+ txn.OnSuccessAsync(func() {
+ wg.Done()
+ })
+
+ wg.Add(1)
+ err = txn.Commit(ctx)
+ require.NoError(t, err)
+ wg.Wait()
+}
+
+func TestOnError(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ txn, err := NewTxnFrom(ctx, rootstore, 0, false)
+ require.NoError(t, err)
text := "Source"
txn.OnError(func() {
@@ -93,6 +112,68 @@ func TestOnError(t *testing.T) {
require.Equal(t, text, "Source Inc")
}
+func TestOnErrorAsync(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ txn, err := NewTxnFrom(ctx, rootstore, 0, false)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ txn.OnErrorAsync(func() {
+ wg.Done()
+ })
+
+ rootstore.Close()
+ require.NoError(t, err)
+
+ wg.Add(1)
+ err = txn.Commit(ctx)
+ require.ErrorIs(t, err, badgerds.ErrClosed)
+ wg.Wait()
+}
+
+func TestOnDiscard(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ txn, err := NewTxnFrom(ctx, rootstore, 0, false)
+ require.NoError(t, err)
+
+ text := "Source"
+ txn.OnDiscard(func() {
+ text += " Inc"
+ })
+ txn.Discard(ctx)
+ require.NoError(t, err)
+
+ require.Equal(t, text, "Source Inc")
+}
+
+func TestOnDiscardAsync(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ txn, err := NewTxnFrom(ctx, rootstore, 0, false)
+ require.NoError(t, err)
+
+ var wg sync.WaitGroup
+ txn.OnDiscardAsync(func() {
+ wg.Done()
+ })
+
+ wg.Add(1)
+ txn.Discard(ctx)
+ require.NoError(t, err)
+ wg.Wait()
+}
+
func TestShimTxnStoreSync(t *testing.T) {
ctx := context.Background()
opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
@@ -120,3 +201,352 @@ func TestShimTxnStoreClose(t *testing.T) {
err = shimTxn.Close()
require.NoError(t, err)
}
+
+func TestMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldSucceed(t *testing.T) {
+ ctx := context.Background()
+ rootstore := memory.NewDatastore(ctx)
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.NoError(t, err)
+}
+
+func TestMemoryStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) {
+ ctx := context.Background()
+ rootstore := memory.NewDatastore(ctx)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ _, err = txn1.Get(ctx, ds.NewKey("key"))
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.ErrorIs(t, err, badger.ErrConflict)
+}
+
+func TestMemoryStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflict(t *testing.T) {
+ ctx := context.Background()
+ rootstore := memory.NewDatastore(ctx)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ _, err = txn1.Has(ctx, ds.NewKey("key"))
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.ErrorIs(t, err, badger.ErrConflict)
+}
+
+func TestBadgerMemoryStoreTxn_TwoTransactionsWithPutConflict_ShouldSucceed(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.NoError(t, err)
+}
+
+func TestBadgerMemoryStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ _, err = txn1.Get(ctx, ds.NewKey("key"))
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.ErrorIs(t, err, badger.ErrConflict)
+}
+
+func TestBadgerMemoryStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflict(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ _, err = txn1.Has(ctx, ds.NewKey("key"))
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.ErrorIs(t, err, badger.ErrConflict)
+}
+
+func TestBadgerFileStoreTxn_TwoTransactionsWithPutConflict_ShouldSucceed(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("")}
+ rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts)
+ require.NoError(t, err)
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.NoError(t, err)
+}
+
+func TestBadgerFileStoreTxn_TwoTransactionsWithGetPutConflict_ShouldErrorWithConflict(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("")}
+ rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts)
+ require.NoError(t, err)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ _, err = txn1.Get(ctx, ds.NewKey("key"))
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.ErrorIs(t, err, badger.ErrConflict)
+}
+
+func TestBadgerFileStoreTxn_TwoTransactionsWithHasPutConflict_ShouldErrorWithConflict(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("")}
+ rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts)
+ require.NoError(t, err)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ _, err = txn1.Has(ctx, ds.NewKey("key"))
+ require.NoError(t, err)
+
+ err = txn1.Put(ctx, ds.NewKey("other-key"), []byte("value"))
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("key"), []byte("value"))
+ require.NoError(t, err)
+
+ // Commit txn2 first to create a conflict
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ err = txn1.Commit(ctx)
+ require.ErrorIs(t, err, badger.ErrConflict)
+}
+
+func TestMemoryStoreTxn_TwoTransactionsWithQueryAndPut_ShouldOmmitNewPut(t *testing.T) {
+ ctx := context.Background()
+ rootstore := memory.NewDatastore(ctx)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("other-key"), []byte("other-value"))
+ require.NoError(t, err)
+
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ qResults, err := txn1.Query(ctx, query.Query{})
+ require.NoError(t, err)
+
+ docs := [][]byte{}
+ for r := range qResults.Next() {
+ docs = append(docs, r.Entry.Value)
+ }
+ require.Equal(t, [][]byte{[]byte("value")}, docs)
+ txn1.Discard(ctx)
+}
+
+func TestBadgerMemoryStoreTxn_TwoTransactionsWithQueryAndPut_ShouldOmmitNewPut(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)}
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ require.NoError(t, err)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("other-key"), []byte("other-value"))
+ require.NoError(t, err)
+
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ qResults, err := txn1.Query(ctx, query.Query{})
+ require.NoError(t, err)
+
+ docs := [][]byte{}
+ for r := range qResults.Next() {
+ docs = append(docs, r.Entry.Value)
+ }
+ require.Equal(t, [][]byte{[]byte("value")}, docs)
+ txn1.Discard(ctx)
+}
+
+func TestBadgerFileStoreTxn_TwoTransactionsWithQueryAndPut_ShouldOmmitNewPut(t *testing.T) {
+ ctx := context.Background()
+ opts := badgerds.Options{Options: badger.DefaultOptions("")}
+ rootstore, err := badgerds.NewDatastore(t.TempDir(), &opts)
+ require.NoError(t, err)
+
+ rootstore.Put(ctx, ds.NewKey("key"), []byte("value"))
+
+ txn1, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ txn2, err := rootstore.NewTransaction(ctx, false)
+ require.NoError(t, err)
+
+ err = txn2.Put(ctx, ds.NewKey("other-key"), []byte("other-value"))
+ require.NoError(t, err)
+
+ err = txn2.Commit(ctx)
+ require.NoError(t, err)
+
+ qResults, err := txn1.Query(ctx, query.Query{})
+ require.NoError(t, err)
+
+ docs := [][]byte{}
+ for r := range qResults.Next() {
+ docs = append(docs, r.Entry.Value)
+ }
+ require.Equal(t, [][]byte{[]byte("value")}, docs)
+ txn1.Discard(ctx)
+}
diff --git a/db/collection.go b/db/collection.go
deleted file mode 100644
index e84530d3e7..0000000000
--- a/db/collection.go
+++ /dev/null
@@ -1,2058 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "reflect"
- "strconv"
- "strings"
-
- jsonpatch "github.com/evanphx/json-patch/v5"
- "github.com/ipfs/go-cid"
- ds "github.com/ipfs/go-datastore"
- "github.com/ipfs/go-datastore/query"
- ipld "github.com/ipfs/go-ipld-format"
- "github.com/lens-vm/lens/host-go/config/model"
- "github.com/sourcenetwork/immutable"
-
- "github.com/sourcenetwork/defradb/acp"
- "github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/client/request"
- "github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/db/base"
- "github.com/sourcenetwork/defradb/db/description"
- "github.com/sourcenetwork/defradb/db/fetcher"
- "github.com/sourcenetwork/defradb/errors"
- "github.com/sourcenetwork/defradb/events"
- "github.com/sourcenetwork/defradb/lens"
- merklecrdt "github.com/sourcenetwork/defradb/merkle/crdt"
-)
-
-var _ client.Collection = (*collection)(nil)
-
-// collection stores data records at Documents, which are gathered
-// together under a collection name. This is analogous to SQL Tables.
-type collection struct {
- db *db
- def client.CollectionDefinition
- indexes []CollectionIndex
- fetcherFactory func() fetcher.Fetcher
-}
-
-// @todo: Move the base Descriptions to an internal API within the db/ package.
-// @body: Currently, the New/Create Collection APIs accept CollectionDescriptions
-// as params. We want these Descriptions objects to be low level descriptions, and
-// to be auto generated based on a more controllable and user friendly
-// CollectionOptions object.
-
-// newCollection returns a pointer to a newly instantiated DB Collection
-func (db *db) newCollection(desc client.CollectionDescription, schema client.SchemaDescription) *collection {
- return &collection{
- db: db,
- def: client.CollectionDefinition{Description: desc, Schema: schema},
- }
-}
-
-// newFetcher returns a new fetcher instance for this collection.
-// If a fetcherFactory is set, it will be used to create the fetcher.
-// It's a very simple factory, but it allows us to inject a mock fetcher
-// for testing.
-func (c *collection) newFetcher() fetcher.Fetcher {
- var innerFetcher fetcher.Fetcher
- if c.fetcherFactory != nil {
- innerFetcher = c.fetcherFactory()
- } else {
- innerFetcher = new(fetcher.DocumentFetcher)
- }
-
- return lens.NewFetcher(innerFetcher, c.db.LensRegistry())
-}
-
-// createCollection creates a collection and saves it to the database in its system store.
-// Note: Collection.ID is an auto-incrementing value that is generated by the database.
-func (db *db) createCollection(
- ctx context.Context,
- def client.CollectionDefinition,
- newDefinitions []client.CollectionDefinition,
-) (client.Collection, error) {
- schema := def.Schema
- desc := def.Description
- txn := mustGetContextTxn(ctx)
-
- if desc.Name.HasValue() {
- exists, err := description.HasCollectionByName(ctx, txn, desc.Name.Value())
- if err != nil {
- return nil, err
- }
- if exists {
- return nil, ErrCollectionAlreadyExists
- }
- }
-
- existingDefinitions, err := db.getAllActiveDefinitions(ctx)
- if err != nil {
- return nil, err
- }
-
- schemaByName := map[string]client.SchemaDescription{}
- for _, existingDefinition := range existingDefinitions {
- schemaByName[existingDefinition.Schema.Name] = existingDefinition.Schema
- }
- for _, newDefinition := range newDefinitions {
- schemaByName[newDefinition.Schema.Name] = newDefinition.Schema
- }
-
- _, err = validateUpdateSchemaFields(schemaByName, client.SchemaDescription{}, schema)
- if err != nil {
- return nil, err
- }
-
- definitionsByName := map[string]client.CollectionDefinition{}
- for _, existingDefinition := range existingDefinitions {
- definitionsByName[existingDefinition.GetName()] = existingDefinition
- }
- for _, newDefinition := range newDefinitions {
- definitionsByName[newDefinition.GetName()] = newDefinition
- }
- err = db.validateNewCollection(def, definitionsByName)
- if err != nil {
- return nil, err
- }
-
- colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{})
- if err != nil {
- return nil, err
- }
- colID, err := colSeq.next(ctx)
- if err != nil {
- return nil, err
- }
-
- fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(uint32(colID)))
- if err != nil {
- return nil, err
- }
-
- desc.ID = uint32(colID)
- desc.RootID = desc.ID
-
- schema, err = description.CreateSchemaVersion(ctx, txn, schema)
- if err != nil {
- return nil, err
- }
- desc.SchemaVersionID = schema.VersionID
- for _, localField := range desc.Fields {
- var fieldID uint64
- if localField.Name == request.DocIDFieldName {
- // There is no hard technical requirement for this, we just think it looks nicer
- // if the doc id is at the zero index. It makes it look a little nicer in commit
- // queries too.
- fieldID = 0
- } else {
- fieldID, err = fieldSeq.next(ctx)
- if err != nil {
- return nil, err
- }
- }
-
- for i := range desc.Fields {
- if desc.Fields[i].Name == localField.Name {
- desc.Fields[i].ID = client.FieldID(fieldID)
- break
- }
- }
- }
-
- desc, err = description.SaveCollection(ctx, txn, desc)
- if err != nil {
- return nil, err
- }
-
- col := db.newCollection(desc, schema)
-
- for _, index := range desc.Indexes {
- if _, err := col.createIndex(ctx, index); err != nil {
- return nil, err
- }
- }
-
- return db.getCollectionByID(ctx, desc.ID)
-}
-
-// validateCollectionDefinitionPolicyDesc validates that the policy definition is valid, beyond syntax.
-//
-// Ensures that the information within the policy definition makes sense,
-// this function might also make relevant remote calls using the acp system.
-func (db *db) validateCollectionDefinitionPolicyDesc(
- ctx context.Context,
- policyDesc immutable.Option[client.PolicyDescription],
-) error {
- if !policyDesc.HasValue() {
- // No policy validation needed, whether acp exists or not doesn't matter.
- return nil
- }
-
- // If there is a policy specified, but the database does not have
- // acp enabled/available return an error, database must have an acp available
- // to enable access control (inorder to adhere to the policy specified).
- if !db.acp.HasValue() {
- return ErrCanNotHavePolicyWithoutACP
- }
-
- // If we have the policy specified on the collection, and acp is available/enabled,
- // then using the acp system we need to ensure the policy id specified
- // actually exists as a policy, and the resource name exists on that policy
- // and that the resource is a valid DPI.
- return db.acp.Value().ValidateResourceExistsOnValidDPI(
- ctx,
- policyDesc.Value().ID,
- policyDesc.Value().ResourceName,
- )
-}
-
-// updateSchema updates the persisted schema description matching the name of the given
-// description, to the values in the given description.
-//
-// It will validate the given description using [validateUpdateSchema] before updating it.
-//
-// The schema (including the schema version ID) will only be updated if any changes have actually
-// been made, if the given description matches the current persisted description then no changes will be
-// applied.
-func (db *db) updateSchema(
- ctx context.Context,
- existingSchemaByName map[string]client.SchemaDescription,
- proposedDescriptionsByName map[string]client.SchemaDescription,
- schema client.SchemaDescription,
- migration immutable.Option[model.Lens],
- setAsActiveVersion bool,
-) error {
- hasChanged, err := db.validateUpdateSchema(
- existingSchemaByName,
- proposedDescriptionsByName,
- schema,
- )
- if err != nil {
- return err
- }
-
- if !hasChanged {
- return nil
- }
-
- for _, field := range schema.Fields {
- if field.Kind.IsObject() && !field.Kind.IsArray() {
- idFieldName := field.Name + "_id"
- if _, ok := schema.GetFieldByName(idFieldName); !ok {
- schema.Fields = append(schema.Fields, client.SchemaFieldDescription{
- Name: idFieldName,
- Kind: client.FieldKind_DocID,
- })
- }
- }
- }
-
- for i, field := range schema.Fields {
- if field.Typ == client.NONE_CRDT {
- // If no CRDT Type has been provided, default to LWW_REGISTER.
- field.Typ = client.LWW_REGISTER
- schema.Fields[i] = field
- }
- }
-
- txn := mustGetContextTxn(ctx)
- previousVersionID := schema.VersionID
- schema, err = description.CreateSchemaVersion(ctx, txn, schema)
- if err != nil {
- return err
- }
-
- // After creating the new schema version, we need to create new collection versions for
- // any collection using the previous version. These will be inactive unless [setAsActiveVersion]
- // is true.
-
- cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID)
- if err != nil {
- return err
- }
-
- colSeq, err := db.getSequence(ctx, core.CollectionIDSequenceKey{})
- if err != nil {
- return err
- }
-
- for _, col := range cols {
- previousID := col.ID
-
- existingCols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schema.VersionID)
- if err != nil {
- return err
- }
-
- // The collection version may exist before the schema version was created locally. This is
- // because migrations for the globally known schema version may have been registered locally
- // (typically to handle documents synced over P2P at higher versions) before the local schema
- // was updated. We need to check for them now, and update them instead of creating new ones
- // if they exist.
- var isExistingCol bool
- existingColLoop:
- for _, existingCol := range existingCols {
- sources := existingCol.CollectionSources()
- for _, source := range sources {
- // Make sure that this collection is the parent of the current [col], and not part of
- // another collection set that happens to be using the same schema.
- if source.SourceCollectionID == previousID {
- if existingCol.RootID == client.OrphanRootID {
- existingCol.RootID = col.RootID
- }
-
- fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(existingCol.RootID))
- if err != nil {
- return err
- }
-
- for _, globalField := range schema.Fields {
- var fieldID client.FieldID
- // We must check the source collection if the field already exists, and take its ID
- // from there, otherwise the field must be generated by the sequence.
- existingField, ok := col.GetFieldByName(globalField.Name)
- if ok {
- fieldID = existingField.ID
- } else {
- nextFieldID, err := fieldSeq.next(ctx)
- if err != nil {
- return err
- }
- fieldID = client.FieldID(nextFieldID)
- }
-
- existingCol.Fields = append(
- existingCol.Fields,
- client.CollectionFieldDescription{
- Name: globalField.Name,
- ID: fieldID,
- },
- )
- }
- existingCol, err = description.SaveCollection(ctx, txn, existingCol)
- if err != nil {
- return err
- }
- isExistingCol = true
- break existingColLoop
- }
- }
- }
-
- if !isExistingCol {
- colID, err := colSeq.next(ctx)
- if err != nil {
- return err
- }
-
- fieldSeq, err := db.getSequence(ctx, core.NewFieldIDSequenceKey(col.RootID))
- if err != nil {
- return err
- }
-
- // Create any new collections without a name (inactive), if [setAsActiveVersion] is true
- // they will be activated later along with any existing collection versions.
- col.Name = immutable.None[string]()
- col.ID = uint32(colID)
- col.SchemaVersionID = schema.VersionID
- col.Sources = []any{
- &client.CollectionSource{
- SourceCollectionID: previousID,
- Transform: migration,
- },
- }
-
- for _, globalField := range schema.Fields {
- _, exists := col.GetFieldByName(globalField.Name)
- if !exists {
- fieldID, err := fieldSeq.next(ctx)
- if err != nil {
- return err
- }
-
- col.Fields = append(
- col.Fields,
- client.CollectionFieldDescription{
- Name: globalField.Name,
- ID: client.FieldID(fieldID),
- },
- )
- }
- }
-
- _, err = description.SaveCollection(ctx, txn, col)
- if err != nil {
- return err
- }
-
- if migration.HasValue() {
- err = db.LensRegistry().SetMigration(ctx, col.ID, migration.Value())
- if err != nil {
- return err
- }
- }
- }
- }
-
- if setAsActiveVersion {
- // activate collection versions using the new schema ID. This call must be made after
- // all new collection versions have been saved.
- err = db.setActiveSchemaVersion(ctx, schema.VersionID)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// validateUpdateSchema validates that the given schema description is a valid update.
-//
-// Will return true if the given description differs from the current persisted state of the
-// schema. Will return an error if it fails validation.
-func (db *db) validateUpdateSchema(
- existingDescriptionsByName map[string]client.SchemaDescription,
- proposedDescriptionsByName map[string]client.SchemaDescription,
- proposedDesc client.SchemaDescription,
-) (bool, error) {
- if proposedDesc.Name == "" {
- return false, ErrSchemaNameEmpty
- }
-
- existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name]
- if !collectionExists {
- return false, NewErrAddCollectionWithPatch(proposedDesc.Name)
- }
-
- if proposedDesc.Root != existingDesc.Root {
- return false, NewErrSchemaRootDoesntMatch(
- proposedDesc.Name,
- existingDesc.Root,
- proposedDesc.Root,
- )
- }
-
- if proposedDesc.Name != existingDesc.Name {
- // There is actually little reason to not support this atm besides controlling the surface area
- // of the new feature. Changing this should not break anything, but it should be tested first.
- return false, NewErrCannotModifySchemaName(existingDesc.Name, proposedDesc.Name)
- }
-
- if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID {
- // If users specify this it will be overwritten, an error is preferred to quietly ignoring it.
- return false, ErrCannotSetVersionID
- }
-
- hasChangedFields, err := validateUpdateSchemaFields(proposedDescriptionsByName, existingDesc, proposedDesc)
- if err != nil {
- return hasChangedFields, err
- }
-
- return hasChangedFields, err
-}
-
-func validateUpdateSchemaFields(
- descriptionsByName map[string]client.SchemaDescription,
- existingDesc client.SchemaDescription,
- proposedDesc client.SchemaDescription,
-) (bool, error) {
- hasChanged := false
- existingFieldsByName := map[string]client.SchemaFieldDescription{}
- existingFieldIndexesByName := map[string]int{}
- for i, field := range existingDesc.Fields {
- existingFieldIndexesByName[field.Name] = i
- existingFieldsByName[field.Name] = field
- }
-
- newFieldNames := map[string]struct{}{}
- for proposedIndex, proposedField := range proposedDesc.Fields {
- existingField, fieldAlreadyExists := existingFieldsByName[proposedField.Name]
-
- // If the field is new, then the collection has changed
- hasChanged = hasChanged || !fieldAlreadyExists
-
- if !fieldAlreadyExists && proposedField.Kind.IsObject() {
- _, relatedDescFound := descriptionsByName[proposedField.Kind.Underlying()]
-
- if !relatedDescFound {
- return false, NewErrFieldKindNotFound(proposedField.Name, proposedField.Kind.Underlying())
- }
-
- if proposedField.Kind.IsObject() && !proposedField.Kind.IsArray() {
- idFieldName := proposedField.Name + request.RelatedObjectID
- idField, idFieldFound := proposedDesc.GetFieldByName(idFieldName)
- if idFieldFound {
- if idField.Kind != client.FieldKind_DocID {
- return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocID, idField.Kind)
- }
- }
- }
- }
-
- if proposedField.Kind.IsObjectArray() {
- return false, NewErrSecondaryFieldOnSchema(proposedField.Name)
- }
-
- if _, isDuplicate := newFieldNames[proposedField.Name]; isDuplicate {
- return false, NewErrDuplicateField(proposedField.Name)
- }
-
- if fieldAlreadyExists && proposedField != existingField {
- return false, NewErrCannotMutateField(proposedField.Name)
- }
-
- if existingIndex := existingFieldIndexesByName[proposedField.Name]; fieldAlreadyExists &&
- proposedIndex != existingIndex {
- return false, NewErrCannotMoveField(proposedField.Name, proposedIndex, existingIndex)
- }
-
- if !proposedField.Typ.IsSupportedFieldCType() {
- return false, client.NewErrInvalidCRDTType(proposedField.Name, proposedField.Typ.String())
- }
-
- if !proposedField.Typ.IsCompatibleWith(proposedField.Kind) {
- return false, client.NewErrCRDTKindMismatch(proposedField.Typ.String(), proposedField.Kind.String())
- }
-
- newFieldNames[proposedField.Name] = struct{}{}
- }
-
- for _, field := range existingDesc.Fields {
- if _, stillExists := newFieldNames[field.Name]; !stillExists {
- return false, NewErrCannotDeleteField(field.Name)
- }
- }
- return hasChanged, nil
-}
-
-func (db *db) patchCollection(
- ctx context.Context,
- patchString string,
-) error {
- patch, err := jsonpatch.DecodePatch([]byte(patchString))
- if err != nil {
- return err
- }
- txn := mustGetContextTxn(ctx)
- cols, err := description.GetCollections(ctx, txn)
- if err != nil {
- return err
- }
-
- existingColsByID := map[uint32]client.CollectionDescription{}
- for _, col := range cols {
- existingColsByID[col.ID] = col
- }
-
- existingDescriptionJson, err := json.Marshal(existingColsByID)
- if err != nil {
- return err
- }
-
- newDescriptionJson, err := patch.Apply(existingDescriptionJson)
- if err != nil {
- return err
- }
-
- var newColsByID map[uint32]client.CollectionDescription
- decoder := json.NewDecoder(strings.NewReader(string(newDescriptionJson)))
- decoder.DisallowUnknownFields()
- err = decoder.Decode(&newColsByID)
- if err != nil {
- return err
- }
-
- err = db.validateCollectionChanges(existingColsByID, newColsByID)
- if err != nil {
- return err
- }
-
- for _, col := range newColsByID {
- _, err := description.SaveCollection(ctx, txn, col)
- if err != nil {
- return err
- }
-
- existingCol, ok := existingColsByID[col.ID]
- if ok {
- // Clear any existing migrations in the registry, using this semi-hacky way
- // to avoid adding more functions to a public interface that we wish to remove.
-
- for _, src := range existingCol.CollectionSources() {
- if src.Transform.HasValue() {
- err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{})
- if err != nil {
- return err
- }
- }
- }
- for _, src := range existingCol.QuerySources() {
- if src.Transform.HasValue() {
- err = db.LensRegistry().SetMigration(ctx, existingCol.ID, model.Lens{})
- if err != nil {
- return err
- }
- }
- }
- }
-
- for _, src := range col.CollectionSources() {
- if src.Transform.HasValue() {
- err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value())
- if err != nil {
- return err
- }
- }
- }
-
- for _, src := range col.QuerySources() {
- if src.Transform.HasValue() {
- err = db.LensRegistry().SetMigration(ctx, col.ID, src.Transform.Value())
- if err != nil {
- return err
- }
- }
- }
- }
-
- return db.loadSchema(ctx)
-}
-
-var patchCollectionValidators = []func(
- map[uint32]client.CollectionDescription,
- map[uint32]client.CollectionDescription,
-) error{
- validateCollectionNameUnique,
- validateSingleVersionActive,
- validateSourcesNotRedefined,
- validateIndexesNotModified,
- validateFieldsNotModified,
- validatePolicyNotModified,
- validateIDNotZero,
- validateIDUnique,
- validateIDExists,
- validateRootIDNotMutated,
- validateSchemaVersionIDNotMutated,
- validateCollectionNotRemoved,
-}
-
-func (db *db) validateCollectionChanges(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, validators := range patchCollectionValidators {
- err := validators(oldColsByID, newColsByID)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-var newCollectionValidators = []func(
- client.CollectionDefinition,
- map[string]client.CollectionDefinition,
-) error{
- validateSecondaryFieldsPairUp,
- validateRelationPointsToValidKind,
- validateSingleSidePrimary,
-}
-
-func (db *db) validateNewCollection(
- def client.CollectionDefinition,
- defsByName map[string]client.CollectionDefinition,
-) error {
- for _, validators := range newCollectionValidators {
- err := validators(def, defsByName)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func validateRelationPointsToValidKind(
- def client.CollectionDefinition,
- defsByName map[string]client.CollectionDefinition,
-) error {
- for _, field := range def.Description.Fields {
- if !field.Kind.HasValue() {
- continue
- }
-
- if !field.Kind.Value().IsObject() {
- continue
- }
-
- underlying := field.Kind.Value().Underlying()
- _, ok := defsByName[underlying]
- if !ok {
- return NewErrFieldKindNotFound(field.Name, underlying)
- }
- }
-
- return nil
-}
-
-func validateSecondaryFieldsPairUp(
- def client.CollectionDefinition,
- defsByName map[string]client.CollectionDefinition,
-) error {
- for _, field := range def.Description.Fields {
- if !field.Kind.HasValue() {
- continue
- }
-
- if !field.Kind.Value().IsObject() {
- continue
- }
-
- if !field.RelationName.HasValue() {
- continue
- }
-
- _, hasSchemaField := def.Schema.GetFieldByName(field.Name)
- if hasSchemaField {
- continue
- }
-
- underlying := field.Kind.Value().Underlying()
- otherDef, ok := defsByName[underlying]
- if !ok {
- continue
- }
-
- if len(otherDef.Description.Fields) == 0 {
- // Views/embedded objects do not require both sides of the relation to be defined.
- continue
- }
-
- otherField, ok := otherDef.Description.GetFieldByRelation(
- field.RelationName.Value(),
- def.GetName(),
- field.Name,
- )
- if !ok {
- return NewErrRelationMissingField(underlying, field.RelationName.Value())
- }
-
- _, ok = otherDef.Schema.GetFieldByName(otherField.Name)
- if !ok {
- // This secondary is paired with another secondary, which is invalid
- return NewErrRelationMissingField(underlying, field.RelationName.Value())
- }
- }
-
- return nil
-}
-
-func validateSingleSidePrimary(
- def client.CollectionDefinition,
- defsByName map[string]client.CollectionDefinition,
-) error {
- for _, field := range def.Description.Fields {
- if !field.Kind.HasValue() {
- continue
- }
-
- if !field.Kind.Value().IsObject() {
- continue
- }
-
- if !field.RelationName.HasValue() {
- continue
- }
-
- _, hasSchemaField := def.Schema.GetFieldByName(field.Name)
- if !hasSchemaField {
- // This is a secondary field and thus passes this rule
- continue
- }
-
- underlying := field.Kind.Value().Underlying()
- otherDef, ok := defsByName[underlying]
- if !ok {
- continue
- }
-
- otherField, ok := otherDef.Description.GetFieldByRelation(
- field.RelationName.Value(),
- def.GetName(),
- field.Name,
- )
- if !ok {
- // This must be a one-sided relation, in which case it passes this rule
- continue
- }
-
- _, ok = otherDef.Schema.GetFieldByName(otherField.Name)
- if ok {
- // This primary is paired with another primary, which is invalid
- return ErrMultipleRelationPrimaries
- }
- }
-
- return nil
-}
-
-func validateCollectionNameUnique(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- names := map[string]struct{}{}
- for _, col := range newColsByID {
- if !col.Name.HasValue() {
- continue
- }
-
- if _, ok := names[col.Name.Value()]; ok {
- return NewErrCollectionAlreadyExists(col.Name.Value())
- }
- names[col.Name.Value()] = struct{}{}
- }
-
- return nil
-}
-
-func validateSingleVersionActive(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- rootsWithActiveCol := map[uint32]struct{}{}
- for _, col := range newColsByID {
- if !col.Name.HasValue() {
- continue
- }
-
- if _, ok := rootsWithActiveCol[col.RootID]; ok {
- return NewErrMultipleActiveCollectionVersions(col.Name.Value(), col.RootID)
- }
- rootsWithActiveCol[col.RootID] = struct{}{}
- }
-
- return nil
-}
-
-// validateSourcesNotRedefined specifies the limitations on how the collection sources
-// can be mutated.
-//
-// Currently new sources cannot be added, existing cannot be removed, and CollectionSources
-// cannot be redirected to other collections.
-func validateSourcesNotRedefined(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- oldCol, ok := oldColsByID[newCol.ID]
- if !ok {
- continue
- }
-
- newColSources := newCol.CollectionSources()
- oldColSources := oldCol.CollectionSources()
-
- if len(newColSources) != len(oldColSources) {
- return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID)
- }
-
- for i := range newColSources {
- if newColSources[i].SourceCollectionID != oldColSources[i].SourceCollectionID {
- return NewErrCollectionSourceIDMutated(
- newCol.ID,
- newColSources[i].SourceCollectionID,
- oldColSources[i].SourceCollectionID,
- )
- }
- }
-
- newQuerySources := newCol.QuerySources()
- oldQuerySources := oldCol.QuerySources()
-
- if len(newQuerySources) != len(oldQuerySources) {
- return NewErrCollectionSourcesCannotBeAddedRemoved(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validateIndexesNotModified(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- oldCol, ok := oldColsByID[newCol.ID]
- if !ok {
- continue
- }
-
- // DeepEqual is temporary, as this validation is temporary
- if !reflect.DeepEqual(oldCol.Indexes, newCol.Indexes) {
- return NewErrCollectionIndexesCannotBeMutated(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validateFieldsNotModified(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- oldCol, ok := oldColsByID[newCol.ID]
- if !ok {
- continue
- }
-
- // DeepEqual is temporary, as this validation is temporary
- if !reflect.DeepEqual(oldCol.Fields, newCol.Fields) {
- return NewErrCollectionFieldsCannotBeMutated(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validatePolicyNotModified(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- oldCol, ok := oldColsByID[newCol.ID]
- if !ok {
- continue
- }
-
- // DeepEqual is temporary, as this validation is temporary
- if !reflect.DeepEqual(oldCol.Policy, newCol.Policy) {
- return NewErrCollectionPolicyCannotBeMutated(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validateIDNotZero(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- if newCol.ID == 0 {
- return ErrCollectionIDCannotBeZero
- }
- }
-
- return nil
-}
-
-func validateIDUnique(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- colIds := map[uint32]struct{}{}
- for _, newCol := range newColsByID {
- if _, ok := colIds[newCol.ID]; ok {
- return NewErrCollectionIDAlreadyExists(newCol.ID)
- }
- colIds[newCol.ID] = struct{}{}
- }
-
- return nil
-}
-
-func validateIDExists(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- if _, ok := oldColsByID[newCol.ID]; !ok {
- return NewErrAddCollectionIDWithPatch(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validateRootIDNotMutated(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- oldCol, ok := oldColsByID[newCol.ID]
- if !ok {
- continue
- }
-
- if newCol.RootID != oldCol.RootID {
- return NewErrCollectionRootIDCannotBeMutated(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validateSchemaVersionIDNotMutated(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
- for _, newCol := range newColsByID {
- oldCol, ok := oldColsByID[newCol.ID]
- if !ok {
- continue
- }
-
- if newCol.SchemaVersionID != oldCol.SchemaVersionID {
- return NewErrCollectionSchemaVersionIDCannotBeMutated(newCol.ID)
- }
- }
-
- return nil
-}
-
-func validateCollectionNotRemoved(
- oldColsByID map[uint32]client.CollectionDescription,
- newColsByID map[uint32]client.CollectionDescription,
-) error {
-oldLoop:
- for _, oldCol := range oldColsByID {
- for _, newCol := range newColsByID {
- // It is not enough to just match by the map index, in case the index does not pair
- // up with the ID (this can happen if a user moves the collection within the map)
- if newCol.ID == oldCol.ID {
- continue oldLoop
- }
- }
-
- return NewErrCollectionsCannotBeDeleted(oldCol.ID)
- }
-
- return nil
-}
-
-// SetActiveSchemaVersion activates all collection versions with the given schema version, and deactivates all
-// those without it (if they share the same schema root).
-//
-// This will affect all operations interacting with the schema where a schema version is not explicitly
-// provided. This includes GQL queries and Collection operations.
-//
-// It will return an error if the provided schema version ID does not exist.
-func (db *db) setActiveSchemaVersion(
- ctx context.Context,
- schemaVersionID string,
-) error {
- if schemaVersionID == "" {
- return ErrSchemaVersionIDEmpty
- }
- txn := mustGetContextTxn(ctx)
- cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID)
- if err != nil {
- return err
- }
-
- schema, err := description.GetSchemaVersion(ctx, txn, schemaVersionID)
- if err != nil {
- return err
- }
-
- colsWithRoot, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root)
- if err != nil {
- return err
- }
-
- colsBySourceID := map[uint32][]client.CollectionDescription{}
- colsByID := make(map[uint32]client.CollectionDescription, len(colsWithRoot))
- for _, col := range colsWithRoot {
- colsByID[col.ID] = col
-
- sources := col.CollectionSources()
- if len(sources) > 0 {
- // For now, we assume that each collection can only have a single source. This will likely need
- // to change later.
- slice := colsBySourceID[sources[0].SourceCollectionID]
- slice = append(slice, col)
- colsBySourceID[sources[0].SourceCollectionID] = slice
- }
- }
-
- for _, col := range cols {
- if col.Name.HasValue() {
- // The collection is already active, so we can skip it and continue
- continue
- }
- sources := col.CollectionSources()
-
- var activeCol client.CollectionDescription
- var rootCol client.CollectionDescription
- var isActiveFound bool
- if len(sources) > 0 {
- // For now, we assume that each collection can only have a single source. This will likely need
- // to change later.
- activeCol, rootCol, isActiveFound = db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID)
- }
- if !isActiveFound {
- // We need to look both down and up for the active version - the most recent is not necessarily the active one.
- activeCol, isActiveFound = db.getActiveCollectionUp(ctx, colsBySourceID, rootCol.ID)
- }
-
- var newName string
- if isActiveFound {
- newName = activeCol.Name.Value()
- } else {
- // If there are no active versions in the collection set, take the name of the schema to be the name of the
- // collection.
- newName = schema.Name
- }
- col.Name = immutable.Some(newName)
-
- _, err = description.SaveCollection(ctx, txn, col)
- if err != nil {
- return err
- }
-
- if isActiveFound {
- // Deactivate the currently active collection by setting its name to none.
- activeCol.Name = immutable.None[string]()
- _, err = description.SaveCollection(ctx, txn, activeCol)
- if err != nil {
- return err
- }
- }
- }
-
- // Load the schema into the clients (e.g. GQL)
- return db.loadSchema(ctx)
-}
-
-func (db *db) getActiveCollectionDown(
- ctx context.Context,
- colsByID map[uint32]client.CollectionDescription,
- id uint32,
-) (client.CollectionDescription, client.CollectionDescription, bool) {
- col, ok := colsByID[id]
- if !ok {
- return client.CollectionDescription{}, client.CollectionDescription{}, false
- }
-
- if col.Name.HasValue() {
- return col, client.CollectionDescription{}, true
- }
-
- sources := col.CollectionSources()
- if len(sources) == 0 {
- // If a collection has zero sources it is likely the initial collection version, or
- // this collection set is currently orphaned (can happen when setting migrations that
- // do not yet link all the way back to a non-orphaned set)
- return client.CollectionDescription{}, col, false
- }
-
- // For now, we assume that each collection can only have a single source. This will likely need
- // to change later.
- return db.getActiveCollectionDown(ctx, colsByID, sources[0].SourceCollectionID)
-}
-
-func (db *db) getActiveCollectionUp(
- ctx context.Context,
- colsBySourceID map[uint32][]client.CollectionDescription,
- id uint32,
-) (client.CollectionDescription, bool) {
- cols, ok := colsBySourceID[id]
- if !ok {
- // We have reached the top of the set, and have not found an active collection
- return client.CollectionDescription{}, false
- }
-
- for _, col := range cols {
- if col.Name.HasValue() {
- return col, true
- }
- activeCol, isFound := db.getActiveCollectionUp(ctx, colsBySourceID, col.ID)
- if isFound {
- return activeCol, isFound
- }
- }
-
- return client.CollectionDescription{}, false
-}
-
-func (db *db) getCollectionByID(ctx context.Context, id uint32) (client.Collection, error) {
- txn := mustGetContextTxn(ctx)
-
- col, err := description.GetCollectionByID(ctx, txn, id)
- if err != nil {
- return nil, err
- }
-
- schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID)
- if err != nil {
- return nil, err
- }
-
- collection := db.newCollection(col, schema)
-
- err = collection.loadIndexes(ctx)
- if err != nil {
- return nil, err
- }
-
- return collection, nil
-}
-
-// getCollectionByName returns an existing collection within the database.
-func (db *db) getCollectionByName(ctx context.Context, name string) (client.Collection, error) {
- if name == "" {
- return nil, ErrCollectionNameEmpty
- }
-
- cols, err := db.getCollections(ctx, client.CollectionFetchOptions{Name: immutable.Some(name)})
- if err != nil {
- return nil, err
- }
-
- // cols will always have length == 1 here
- return cols[0], nil
-}
-
-// getCollections returns all collections and their descriptions matching the given options
-// that currently exist within this [Store].
-//
-// Inactive collections are not returned by default unless a specific schema version ID
-// is provided.
-func (db *db) getCollections(
- ctx context.Context,
- options client.CollectionFetchOptions,
-) ([]client.Collection, error) {
- txn := mustGetContextTxn(ctx)
-
- var cols []client.CollectionDescription
- switch {
- case options.Name.HasValue():
- col, err := description.GetCollectionByName(ctx, txn, options.Name.Value())
- if err != nil {
- return nil, err
- }
- cols = append(cols, col)
-
- case options.SchemaVersionID.HasValue():
- var err error
- cols, err = description.GetCollectionsBySchemaVersionID(ctx, txn, options.SchemaVersionID.Value())
- if err != nil {
- return nil, err
- }
-
- case options.SchemaRoot.HasValue():
- var err error
- cols, err = description.GetCollectionsBySchemaRoot(ctx, txn, options.SchemaRoot.Value())
- if err != nil {
- return nil, err
- }
-
- default:
- if options.IncludeInactive.HasValue() && options.IncludeInactive.Value() {
- var err error
- cols, err = description.GetCollections(ctx, txn)
- if err != nil {
- return nil, err
- }
- } else {
- var err error
- cols, err = description.GetActiveCollections(ctx, txn)
- if err != nil {
- return nil, err
- }
- }
- }
-
- collections := []client.Collection{}
- for _, col := range cols {
- if options.SchemaVersionID.HasValue() {
- if col.SchemaVersionID != options.SchemaVersionID.Value() {
- continue
- }
- }
- // By default, we don't return inactive collections unless a specific version is requested.
- if !options.IncludeInactive.Value() && !col.Name.HasValue() && !options.SchemaVersionID.HasValue() {
- continue
- }
-
- schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID)
- if err != nil {
- // If the schema is not found we leave it as empty and carry on. This can happen when
- // a migration is registered before the schema is declared locally.
- if !errors.Is(err, ds.ErrNotFound) {
- return nil, err
- }
- }
-
- if options.SchemaRoot.HasValue() {
- if schema.Root != options.SchemaRoot.Value() {
- continue
- }
- }
-
- collection := db.newCollection(col, schema)
- collections = append(collections, collection)
-
- err = collection.loadIndexes(ctx)
- if err != nil {
- return nil, err
- }
- }
-
- return collections, nil
-}
-
-// getAllActiveDefinitions returns all queryable collection/views and any embedded schema used by them.
-func (db *db) getAllActiveDefinitions(ctx context.Context) ([]client.CollectionDefinition, error) {
- txn := mustGetContextTxn(ctx)
-
- cols, err := description.GetActiveCollections(ctx, txn)
- if err != nil {
- return nil, err
- }
-
- definitions := make([]client.CollectionDefinition, len(cols))
- for i, col := range cols {
- schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID)
- if err != nil {
- return nil, err
- }
-
- collection := db.newCollection(col, schema)
-
- err = collection.loadIndexes(ctx)
- if err != nil {
- return nil, err
- }
-
- definitions[i] = collection.Definition()
- }
-
- schemas, err := description.GetCollectionlessSchemas(ctx, txn)
- if err != nil {
- return nil, err
- }
-
- for _, schema := range schemas {
- definitions = append(
- definitions,
- client.CollectionDefinition{
- Schema: schema,
- },
- )
- }
-
- return definitions, nil
-}
-
-// GetAllDocIDs returns all the document IDs that exist in the collection.
-//
-// @todo: We probably need a lock on the collection for this kind of op since
-// it hits every key and will cause Tx conflicts for concurrent Txs
-func (c *collection) GetAllDocIDs(
- ctx context.Context,
-) (<-chan client.DocIDResult, error) {
- ctx, _, err := ensureContextTxn(ctx, c.db, true)
- if err != nil {
- return nil, err
- }
- return c.getAllDocIDsChan(ctx)
-}
-
-func (c *collection) getAllDocIDsChan(
- ctx context.Context,
-) (<-chan client.DocIDResult, error) {
- txn := mustGetContextTxn(ctx)
- prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix
- CollectionRootID: c.Description().RootID,
- }
- q, err := txn.Datastore().Query(ctx, query.Query{
- Prefix: prefix.ToString(),
- KeysOnly: true,
- })
- if err != nil {
- return nil, err
- }
-
- resCh := make(chan client.DocIDResult)
- go func() {
- defer func() {
- if err := q.Close(); err != nil {
- log.ErrorContextE(ctx, errFailedtoCloseQueryReqAllIDs, err)
- }
- close(resCh)
- txn.Discard(ctx)
- }()
- for res := range q.Next() {
- // check for Done on context first
- select {
- case <-ctx.Done():
- // we've been cancelled! ;)
- return
- default:
- // noop, just continue on the with the for loop
- }
- if res.Error != nil {
- resCh <- client.DocIDResult{
- Err: res.Error,
- }
- return
- }
-
- rawDocID := ds.NewKey(res.Key).BaseNamespace()
- docID, err := client.NewDocIDFromString(rawDocID)
- if err != nil {
- resCh <- client.DocIDResult{
- Err: err,
- }
- return
- }
-
- canRead, err := c.checkAccessOfDocWithACP(
- ctx,
- acp.ReadPermission,
- docID.String(),
- )
-
- if err != nil {
- resCh <- client.DocIDResult{
- Err: err,
- }
- return
- }
-
- if canRead {
- resCh <- client.DocIDResult{
- ID: docID,
- }
- }
- }
- }()
-
- return resCh, nil
-}
-
-// Description returns the client.CollectionDescription.
-func (c *collection) Description() client.CollectionDescription {
- return c.Definition().Description
-}
-
-// Name returns the collection name.
-func (c *collection) Name() immutable.Option[string] {
- return c.Description().Name
-}
-
-// Schema returns the Schema of the collection.
-func (c *collection) Schema() client.SchemaDescription {
- return c.Definition().Schema
-}
-
-// ID returns the ID of the collection.
-func (c *collection) ID() uint32 {
- return c.Description().ID
-}
-
-func (c *collection) SchemaRoot() string {
- return c.Schema().Root
-}
-
-func (c *collection) Definition() client.CollectionDefinition {
- return c.def
-}
-
-// Create a new document.
-// Will verify the DocID/CID to ensure that the new document is correctly formatted.
-func (c *collection) Create(
- ctx context.Context,
- doc *client.Document,
-) error {
- ctx, txn, err := ensureContextTxn(ctx, c.db, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = c.create(ctx, doc)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// CreateMany creates a collection of documents at once.
-// Will verify the DocID/CID to ensure that the new documents are correctly formatted.
-func (c *collection) CreateMany(
- ctx context.Context,
- docs []*client.Document,
-) error {
- ctx, txn, err := ensureContextTxn(ctx, c.db, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- for _, doc := range docs {
- err = c.create(ctx, doc)
- if err != nil {
- return err
- }
- }
- return txn.Commit(ctx)
-}
-
-func (c *collection) getDocIDAndPrimaryKeyFromDoc(
- doc *client.Document,
-) (client.DocID, core.PrimaryDataStoreKey, error) {
- docID, err := doc.GenerateDocID()
- if err != nil {
- return client.DocID{}, core.PrimaryDataStoreKey{}, err
- }
-
- primaryKey := c.getPrimaryKeyFromDocID(docID)
- if primaryKey.DocID != doc.ID().String() {
- return client.DocID{}, core.PrimaryDataStoreKey{},
- NewErrDocVerification(doc.ID().String(), primaryKey.DocID)
- }
- return docID, primaryKey, nil
-}
-
-func (c *collection) create(
- ctx context.Context,
- doc *client.Document,
-) error {
- docID, primaryKey, err := c.getDocIDAndPrimaryKeyFromDoc(doc)
- if err != nil {
- return err
- }
-
- // check if doc already exists
- exists, isDeleted, err := c.exists(ctx, primaryKey)
- if err != nil {
- return err
- }
- if exists {
- return NewErrDocumentAlreadyExists(primaryKey.DocID)
- }
- if isDeleted {
- return NewErrDocumentDeleted(primaryKey.DocID)
- }
-
- // write value object marker if we have an empty doc
- if len(doc.Values()) == 0 {
- txn := mustGetContextTxn(ctx)
- valueKey := c.getDataStoreKeyFromDocID(docID)
- err = txn.Datastore().Put(ctx, valueKey.ToDS(), []byte{base.ObjectMarker})
- if err != nil {
- return err
- }
- }
-
- // write data to DB via MerkleClock/CRDT
- _, err = c.save(ctx, doc, true)
- if err != nil {
- return err
- }
-
- err = c.indexNewDoc(ctx, doc)
- if err != nil {
- return err
- }
-
- return c.registerDocWithACP(ctx, doc.ID().String())
-}
-
-// Update an existing document with the new values.
-// Any field that needs to be removed or cleared should call doc.Clear(field) before.
-// Any field that is nil/empty that hasn't called Clear will be ignored.
-func (c *collection) Update(
- ctx context.Context,
- doc *client.Document,
-) error {
- ctx, txn, err := ensureContextTxn(ctx, c.db, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- primaryKey := c.getPrimaryKeyFromDocID(doc.ID())
- exists, isDeleted, err := c.exists(ctx, primaryKey)
- if err != nil {
- return err
- }
- if !exists {
- return client.ErrDocumentNotFoundOrNotAuthorized
- }
- if isDeleted {
- return NewErrDocumentDeleted(primaryKey.DocID)
- }
-
- err = c.update(ctx, doc)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// Contract: DB Exists check is already performed, and a doc with the given ID exists.
-// Note: Should we CompareAndSet the update, IE: Query(read-only) the state, and update if changed
-// or, just update everything regardless.
-// Should probably be smart about the update due to the MerkleCRDT overhead, shouldn't
-// add to the bloat.
-func (c *collection) update(
- ctx context.Context,
- doc *client.Document,
-) error {
- // Stop the update if the correct permissions aren't there.
- canUpdate, err := c.checkAccessOfDocWithACP(
- ctx,
- acp.WritePermission,
- doc.ID().String(),
- )
- if err != nil {
- return err
- }
- if !canUpdate {
- return client.ErrDocumentNotFoundOrNotAuthorized
- }
-
- _, err = c.save(ctx, doc, false)
- if err != nil {
- return err
- }
- return nil
-}
-
-// Save a document into the db.
-// Either by creating a new document or by updating an existing one
-func (c *collection) Save(
- ctx context.Context,
- doc *client.Document,
-) error {
- ctx, txn, err := ensureContextTxn(ctx, c.db, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- // Check if document already exists with primary DS key.
- primaryKey := c.getPrimaryKeyFromDocID(doc.ID())
- exists, isDeleted, err := c.exists(ctx, primaryKey)
- if err != nil {
- return err
- }
-
- if isDeleted {
- return NewErrDocumentDeleted(doc.ID().String())
- }
-
- if exists {
- err = c.update(ctx, doc)
- } else {
- err = c.create(ctx, doc)
- }
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// save saves the document state. save MUST not be called outside the `c.create`
-// and `c.update` methods as we wrap the acp logic within those methods. Calling
-// save elsewhere could cause the omission of acp checks.
-func (c *collection) save(
- ctx context.Context,
- doc *client.Document,
- isCreate bool,
-) (cid.Cid, error) {
- if !isCreate {
- err := c.updateIndexedDoc(ctx, doc)
- if err != nil {
- return cid.Undef, err
- }
- }
- txn := mustGetContextTxn(ctx)
-
- // NOTE: We delay the final Clean() call until we know
- // the commit on the transaction is successful. If we didn't
- // wait, and just did it here, then *if* the commit fails down
- // the line, then we have no way to roll back the state
- // side-effect on the document func called here.
- txn.OnSuccess(func() {
- doc.Clean()
- })
-
- // New batch transaction/store (optional/todo)
- // Ensute/Set doc object marker
- // Loop through doc values
- // => instantiate MerkleCRDT objects
- // => Set/Publish new CRDT values
- primaryKey := c.getPrimaryKeyFromDocID(doc.ID())
- links := make([]core.DAGLink, 0)
- for k, v := range doc.Fields() {
- val, err := doc.GetValueWithField(v)
- if err != nil {
- return cid.Undef, err
- }
-
- if val.IsDirty() {
- fieldKey, fieldExists := c.tryGetFieldKey(primaryKey, k)
-
- if !fieldExists {
- return cid.Undef, client.NewErrFieldNotExist(k)
- }
-
- fieldDescription, valid := c.Definition().GetFieldByName(k)
- if !valid {
- return cid.Undef, client.NewErrFieldNotExist(k)
- }
-
- // by default the type will have been set to LWW_REGISTER. We need to ensure
- // that it's set to the same as the field description CRDT type.
- val.SetType(fieldDescription.Typ)
-
- relationFieldDescription, isSecondaryRelationID := c.isSecondaryIDField(fieldDescription)
- if isSecondaryRelationID {
- primaryId := val.Value().(string)
-
- err = c.patchPrimaryDoc(
- ctx,
- c.Name().Value(),
- relationFieldDescription,
- primaryKey.DocID,
- primaryId,
- )
- if err != nil {
- return cid.Undef, err
- }
-
- // If this field was a secondary relation ID the related document will have been
- // updated instead and we should discard this value
- continue
- }
-
- err = c.validateOneToOneLinkDoesntAlreadyExist(
- ctx,
- doc.ID().String(),
- fieldDescription,
- val.Value(),
- )
- if err != nil {
- return cid.Undef, err
- }
-
- merkleCRDT, err := merklecrdt.InstanceWithStore(
- txn,
- core.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()),
- val.Type(),
- fieldDescription.Kind,
- fieldKey,
- fieldDescription.Name,
- )
- if err != nil {
- return cid.Undef, err
- }
-
- node, _, err := merkleCRDT.Save(ctx, val)
- if err != nil {
- return cid.Undef, err
- }
-
- link := core.DAGLink{
- Name: k,
- Cid: node.Cid(),
- }
- links = append(links, link)
- }
- }
-
- headNode, priority, err := c.saveCompositeToMerkleCRDT(
- ctx,
- primaryKey.ToDataStoreKey(),
- links,
- client.Active,
- )
- if err != nil {
- return cid.Undef, err
- }
-
- if c.db.events.Updates.HasValue() {
- txn.OnSuccess(
- func() {
- c.db.events.Updates.Value().Publish(
- events.Update{
- DocID: doc.ID().String(),
- Cid: headNode.Cid(),
- SchemaRoot: c.Schema().Root,
- Block: headNode,
- Priority: priority,
- },
- )
- },
- )
- }
-
- txn.OnSuccess(func() {
- doc.SetHead(headNode.Cid())
- })
-
- return headNode.Cid(), nil
-}
-
-func (c *collection) validateOneToOneLinkDoesntAlreadyExist(
- ctx context.Context,
- docID string,
- fieldDescription client.FieldDefinition,
- value any,
-) error {
- if fieldDescription.Kind != client.FieldKind_DocID {
- return nil
- }
-
- if value == nil {
- return nil
- }
-
- objFieldDescription, ok := c.Definition().GetFieldByName(
- strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID),
- )
- if !ok {
- return client.NewErrFieldNotExist(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID))
- }
- if !(objFieldDescription.Kind.IsObject() && !objFieldDescription.Kind.IsArray()) {
- return nil
- }
-
- otherCol, err := c.db.getCollectionByName(ctx, objFieldDescription.Kind.Underlying())
- if err != nil {
- return err
- }
- otherObjFieldDescription, _ := otherCol.Description().GetFieldByRelation(
- fieldDescription.RelationName,
- c.Name().Value(),
- objFieldDescription.Name,
- )
- if !(otherObjFieldDescription.Kind.HasValue() &&
- otherObjFieldDescription.Kind.Value().IsObject() &&
- !otherObjFieldDescription.Kind.Value().IsArray()) {
- // If the other field is not an object field then this is not a one to one relation and we can continue
- return nil
- }
-
- filter := fmt.Sprintf(
- `{_and: [{%s: {_ne: "%s"}}, {%s: {_eq: "%s"}}]}`,
- request.DocIDFieldName,
- docID,
- fieldDescription.Name,
- value,
- )
- selectionPlan, err := c.makeSelectionPlan(ctx, filter)
- if err != nil {
- return err
- }
-
- err = selectionPlan.Init()
- if err != nil {
- closeErr := selectionPlan.Close()
- if closeErr != nil {
- return errors.Wrap(err.Error(), closeErr)
- }
- return err
- }
-
- if err = selectionPlan.Start(); err != nil {
- closeErr := selectionPlan.Close()
- if closeErr != nil {
- return errors.Wrap(err.Error(), closeErr)
- }
- return err
- }
-
- alreadyLinked, err := selectionPlan.Next()
- if err != nil {
- closeErr := selectionPlan.Close()
- if closeErr != nil {
- return errors.Wrap(err.Error(), closeErr)
- }
- return err
- }
-
- if alreadyLinked {
- existingDocument := selectionPlan.Value()
- err := selectionPlan.Close()
- if err != nil {
- return err
- }
- return NewErrOneOneAlreadyLinked(docID, existingDocument.GetID(), objFieldDescription.RelationName)
- }
-
- err = selectionPlan.Close()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// Delete will attempt to delete a document by docID and return true if a deletion is successful,
-// otherwise will return false, along with an error, if it cannot.
-// If the document doesn't exist, then it will return false, and a ErrDocumentNotFound error.
-// This operation will all state relating to the given DocID. This includes data, block, and head storage.
-func (c *collection) Delete(
- ctx context.Context,
- docID client.DocID,
-) (bool, error) {
- ctx, txn, err := ensureContextTxn(ctx, c.db, false)
- if err != nil {
- return false, err
- }
- defer txn.Discard(ctx)
-
- primaryKey := c.getPrimaryKeyFromDocID(docID)
-
- err = c.applyDelete(ctx, primaryKey)
- if err != nil {
- return false, err
- }
- return true, txn.Commit(ctx)
-}
-
-// Exists checks if a given document exists with supplied DocID.
-func (c *collection) Exists(
- ctx context.Context,
- docID client.DocID,
-) (bool, error) {
- ctx, txn, err := ensureContextTxn(ctx, c.db, false)
- if err != nil {
- return false, err
- }
- defer txn.Discard(ctx)
-
- primaryKey := c.getPrimaryKeyFromDocID(docID)
- exists, isDeleted, err := c.exists(ctx, primaryKey)
- if err != nil && !errors.Is(err, ds.ErrNotFound) {
- return false, err
- }
- return exists && !isDeleted, txn.Commit(ctx)
-}
-
-// check if a document exists with the given primary key
-func (c *collection) exists(
- ctx context.Context,
- primaryKey core.PrimaryDataStoreKey,
-) (exists bool, isDeleted bool, err error) {
- canRead, err := c.checkAccessOfDocWithACP(
- ctx,
- acp.ReadPermission,
- primaryKey.DocID,
- )
- if err != nil {
- return false, false, err
- } else if !canRead {
- return false, false, nil
- }
-
- txn := mustGetContextTxn(ctx)
- val, err := txn.Datastore().Get(ctx, primaryKey.ToDS())
- if err != nil && errors.Is(err, ds.ErrNotFound) {
- return false, false, nil
- } else if err != nil {
- return false, false, err
- }
- if bytes.Equal(val, []byte{base.DeletedObjectMarker}) {
- return true, true, nil
- }
-
- return true, false, nil
-}
-
-// saveCompositeToMerkleCRDT saves the composite to the merkle CRDT.
-// saveCompositeToMerkleCRDT MUST not be called outside the `c.save`
-// and `c.applyDelete` methods as we wrap the acp logic around those methods.
-// Calling it elsewhere could cause the omission of acp checks.
-func (c *collection) saveCompositeToMerkleCRDT(
- ctx context.Context,
- dsKey core.DataStoreKey,
- links []core.DAGLink,
- status client.DocumentStatus,
-) (ipld.Node, uint64, error) {
- txn := mustGetContextTxn(ctx)
- dsKey = dsKey.WithFieldId(core.COMPOSITE_NAMESPACE)
- merkleCRDT := merklecrdt.NewMerkleCompositeDAG(
- txn,
- core.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()),
- dsKey,
- "",
- )
-
- if status.IsDeleted() {
- return merkleCRDT.Delete(ctx, links)
- }
-
- return merkleCRDT.Save(ctx, links)
-}
-
-func (c *collection) getPrimaryKeyFromDocID(docID client.DocID) core.PrimaryDataStoreKey {
- return core.PrimaryDataStoreKey{
- CollectionRootID: c.Description().RootID,
- DocID: docID.String(),
- }
-}
-
-func (c *collection) getDataStoreKeyFromDocID(docID client.DocID) core.DataStoreKey {
- return core.DataStoreKey{
- CollectionRootID: c.Description().RootID,
- DocID: docID.String(),
- InstanceType: core.ValueKey,
- }
-}
-
-func (c *collection) tryGetFieldKey(primaryKey core.PrimaryDataStoreKey, fieldName string) (core.DataStoreKey, bool) {
- fieldId, hasField := c.tryGetFieldID(fieldName)
- if !hasField {
- return core.DataStoreKey{}, false
- }
-
- return core.DataStoreKey{
- CollectionRootID: c.Description().RootID,
- DocID: primaryKey.DocID,
- FieldId: strconv.FormatUint(uint64(fieldId), 10),
- }, true
-}
-
-// tryGetFieldID returns the FieldID of the given fieldName.
-// Will return false if the field is not found.
-func (c *collection) tryGetFieldID(fieldName string) (uint32, bool) {
- for _, field := range c.Definition().GetFields() {
- if field.Name == fieldName {
- if field.Kind.IsObject() || field.Kind.IsObjectArray() {
- // We do not wish to match navigational properties, only
- // fields directly on the collection.
- return uint32(0), false
- }
- return uint32(field.ID), true
- }
- }
-
- return uint32(0), false
-}
diff --git a/db/config.go b/db/config.go
deleted file mode 100644
index 397956ed8b..0000000000
--- a/db/config.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2024 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
-
- "github.com/lens-vm/lens/host-go/engine/module"
- "github.com/sourcenetwork/immutable"
-
- "github.com/sourcenetwork/defradb/acp"
- "github.com/sourcenetwork/defradb/events"
-)
-
-const (
- defaultMaxTxnRetries = 5
- updateEventBufferSize = 100
-)
-
-// Option is a funtion that sets a config value on the db.
-type Option func(*db)
-
-// WithACP enables access control. If path is empty then acp runs in-memory.
-func WithACP(path string) Option {
- return func(db *db) {
- var acpLocal acp.ACPLocal
- acpLocal.Init(context.Background(), path)
- db.acp = immutable.Some[acp.ACP](&acpLocal)
- }
-}
-
-// WithACPInMemory enables access control in-memory.
-func WithACPInMemory() Option { return WithACP("") }
-
-// WithUpdateEvents enables the update events channel.
-func WithUpdateEvents() Option {
- return func(db *db) {
- db.events = events.Events{
- Updates: immutable.Some(events.New[events.Update](0, updateEventBufferSize)),
- }
- }
-}
-
-// WithMaxRetries sets the maximum number of retries per transaction.
-func WithMaxRetries(num int) Option {
- return func(db *db) {
- db.maxTxnRetries = immutable.Some(num)
- }
-}
-
-// WithLensPoolSize sets the maximum number of cached migrations instances to preserve per schema version.
-//
-// Will default to `5` if not set.
-func WithLensPoolSize(size int) Option {
- return func(db *db) {
- db.lensPoolSize = immutable.Some(size)
- }
-}
-
-// WithLensRuntime returns an option that sets the lens registry runtime.
-func WithLensRuntime(runtime module.Runtime) Option {
- return func(db *db) {
- db.lensRuntime = immutable.Some(runtime)
- }
-}
diff --git a/db/config_test.go b/db/config_test.go
deleted file mode 100644
index 02bd81a910..0000000000
--- a/db/config_test.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2024 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "testing"
-
- "github.com/lens-vm/lens/host-go/runtimes/wasmtime"
- "github.com/stretchr/testify/assert"
-)
-
-func TestWithACP(t *testing.T) {
- d := &db{}
- WithACP("test")(d)
- assert.True(t, d.acp.HasValue())
-}
-
-func TestWithACPInMemory(t *testing.T) {
- d := &db{}
- WithACPInMemory()(d)
- assert.True(t, d.acp.HasValue())
-}
-
-func TestWithUpdateEvents(t *testing.T) {
- d := &db{}
- WithUpdateEvents()(d)
- assert.NotNil(t, d.events)
-}
-
-func TestWithMaxRetries(t *testing.T) {
- d := &db{}
- WithMaxRetries(10)(d)
- assert.True(t, d.maxTxnRetries.HasValue())
- assert.Equal(t, 10, d.maxTxnRetries.Value())
-}
-
-func TestWithLensPoolSize(t *testing.T) {
- d := &db{}
- WithLensPoolSize(10)(d)
- assert.Equal(t, 10, d.lensPoolSize.Value())
-}
-
-func TestWithLensRuntime(t *testing.T) {
- d := &db{}
- WithLensRuntime(wasmtime.New())(d)
- assert.NotNil(t, d.lensRuntime.Value())
-}
diff --git a/db/subscriptions.go b/db/subscriptions.go
deleted file mode 100644
index 0d16074887..0000000000
--- a/db/subscriptions.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
-
- "github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/client/request"
- "github.com/sourcenetwork/defradb/events"
- "github.com/sourcenetwork/defradb/planner"
-)
-
-func (db *db) checkForClientSubscriptions(r *request.Request) (
- *events.Publisher[events.Update],
- *request.ObjectSubscription,
- error,
-) {
- if len(r.Subscription) == 0 || len(r.Subscription[0].Selections) == 0 {
- // This is not a subscription request and we have nothing to do here
- return nil, nil, nil
- }
-
- if !db.events.Updates.HasValue() {
- return nil, nil, ErrSubscriptionsNotAllowed
- }
-
- s := r.Subscription[0].Selections[0]
- if subRequest, ok := s.(*request.ObjectSubscription); ok {
- pub, err := events.NewPublisher(db.events.Updates.Value(), 5)
- if err != nil {
- return nil, nil, err
- }
-
- return pub, subRequest, nil
- }
-
- return nil, nil, client.NewErrUnexpectedType[request.ObjectSubscription]("SubscriptionSelection", s)
-}
-
-func (db *db) handleSubscription(
- ctx context.Context,
- pub *events.Publisher[events.Update],
- r *request.ObjectSubscription,
-) {
- for evt := range pub.Event() {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- log.ErrorContext(ctx, err.Error())
- continue
- }
-
- ctx := SetContextTxn(ctx, txn)
- db.handleEvent(ctx, pub, evt, r)
- txn.Discard(ctx)
- }
-}
-
-func (db *db) handleEvent(
- ctx context.Context,
- pub *events.Publisher[events.Update],
- evt events.Update,
- r *request.ObjectSubscription,
-) {
- txn := mustGetContextTxn(ctx)
- identity := GetContextIdentity(ctx)
- p := planner.New(
- ctx,
- identity,
- db.acp,
- db,
- txn,
- )
-
- s := r.ToSelect(evt.DocID, evt.Cid.String())
-
- result, err := p.RunSubscriptionRequest(ctx, s)
- if err != nil {
- pub.Publish(client.GQLResult{
- Errors: []error{err},
- })
- return
- }
-
- // Don't send anything back to the client if the request yields an empty dataset.
- if len(result) == 0 {
- return
- }
-
- pub.Publish(client.GQLResult{
- Data: result,
- })
-}
diff --git a/docs/cli/defradb.md b/docs/cli/defradb.md
deleted file mode 100644
index 602206e575..0000000000
--- a/docs/cli/defradb.md
+++ /dev/null
@@ -1,42 +0,0 @@
-## defradb
-
-DefraDB Edge Database
-
-### Synopsis
-
-DefraDB is the edge database to power the user-centric future.
-
-Start a DefraDB node, interact with a local or remote node, and much more.
-
-
-### Options
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -h, --help help for defradb
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb server-dump](defradb_server-dump.md) - Dumps the state of the entire database
-* [defradb start](defradb_start.md) - Start a DefraDB node
-* [defradb version](defradb_version.md) - Display the version information of DefraDB and its components
-
diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md
deleted file mode 100644
index 302e171dd3..0000000000
--- a/docs/cli/defradb_client.md
+++ /dev/null
@@ -1,54 +0,0 @@
-## defradb client
-
-Interact with a DefraDB node
-
-### Synopsis
-
-Interact with a DefraDB node.
-Execute queries, add schema types, obtain node info, etc.
-
-### Options
-
-```
- -h, --help help for client
- -i, --identity string ACP Identity
- --tx uint Transaction ID
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb](defradb.md) - DefraDB Edge Database
-* [defradb client acp](defradb_client_acp.md) - Interact with the access control system of a DefraDB node
-* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-* [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side
-* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance
-* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system
-* [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request
-* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node
-* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions
-* [defradb client view](defradb_client_view.md) - Manage views within a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_acp.md b/docs/cli/defradb_client_acp.md
deleted file mode 100644
index d3f57ae230..0000000000
--- a/docs/cli/defradb_client_acp.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## defradb client acp
-
-Interact with the access control system of a DefraDB node
-
-### Synopsis
-
-Interact with the access control system of a DefraDB node
-
-Learn more about [ACP](/acp/README.md)
-
-
-
-### Options
-
-```
- -h, --help help for acp
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client acp policy](defradb_client_acp_policy.md) - Interact with the acp policy features of DefraDB instance
-
diff --git a/docs/cli/defradb_client_acp_policy.md b/docs/cli/defradb_client_acp_policy.md
deleted file mode 100644
index 2e659a0eb4..0000000000
--- a/docs/cli/defradb_client_acp_policy.md
+++ /dev/null
@@ -1,44 +0,0 @@
-## defradb client acp policy
-
-Interact with the acp policy features of DefraDB instance
-
-### Synopsis
-
-Interact with the acp policy features of DefraDB instance
-
-### Options
-
-```
- -h, --help help for policy
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client acp](defradb_client_acp.md) - Interact with the access control system of a DefraDB node
-* [defradb client acp policy add](defradb_client_acp_policy_add.md) - Add new policy
-
diff --git a/docs/cli/defradb_client_acp_policy_add.md b/docs/cli/defradb_client_acp_policy_add.md
deleted file mode 100644
index f426909323..0000000000
--- a/docs/cli/defradb_client_acp_policy_add.md
+++ /dev/null
@@ -1,91 +0,0 @@
-## defradb client acp policy add
-
-Add new policy
-
-### Synopsis
-
-Add new policy
-
-Notes:
- - Can not add a policy without specifying an identity.
- - ACP must be available (i.e. ACP can not be disabled).
- - A non-DPI policy will be accepted (will be registered with acp system).
- - But only a valid DPI policyID & resource can be specified on a schema.
- - DPI validation happens when attempting to add a schema with '@policy'.
- - Learn more about [ACP & DPI Rules](/acp/README.md)
-
-Example: add from an argument string:
- defradb client acp policy add -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j '
-description: A Valid DefraDB Policy Interface
-
-actor:
- name: actor
-
-resources:
- users:
- permissions:
- read:
- expr: owner + reader
- write:
- expr: owner
-
- relations:
- owner:
- types:
- - actor
- reader:
- types:
- - actor
-'
-
-Example: add from file:
- defradb client acp policy add -i cosmos17r39df0hdcrgnmmw4mvu7qgk5nu888c7uvv37y -f policy.yml
-
-Example: add from file, verbose flags:
- defradb client acp policy add --identity cosmos1kpw734v54g0t0d8tcye8ee5jc3gld0tcr2q473 --file policy.yml
-
-Example: add from stdin:
- cat policy.yml | defradb client acp policy add -
-
-
-
-```
-defradb client acp policy add [-i --identity] [policy] [flags]
-```
-
-### Options
-
-```
- -f, --file string File to load a policy from
- -h, --help help for add
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client acp policy](defradb_client_acp_policy.md) - Interact with the acp policy features of DefraDB instance
-
diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md
deleted file mode 100644
index ffa879365c..0000000000
--- a/docs/cli/defradb_client_backup.md
+++ /dev/null
@@ -1,46 +0,0 @@
-## defradb client backup
-
-Interact with the backup utility
-
-### Synopsis
-
-Export to or Import from a backup file.
-Currently only supports JSON format.
-
-### Options
-
-```
- -h, --help help for backup
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client backup export](defradb_client_backup_export.md) - Export the database to a file
-* [defradb client backup import](defradb_client_backup_import.md) - Import a JSON data file to the database
-
diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md
deleted file mode 100644
index fc05e8ee14..0000000000
--- a/docs/cli/defradb_client_backup_export.md
+++ /dev/null
@@ -1,58 +0,0 @@
-## defradb client backup export
-
-Export the database to a file
-
-### Synopsis
-
-Export the database to a file. If a file exists at the location, it will be overwritten.
-
-If the --collection flag is provided, only the data for that collection will be exported.
-Otherwise, all collections in the database will be exported.
-
-If the --pretty flag is provided, the JSON will be pretty printed.
-
-Example: export data for the 'Users' collection:
- defradb client export --collection Users user_data.json
-
-```
-defradb client backup export [-c --collections | -p --pretty | -f --format] [flags]
-```
-
-### Options
-
-```
- -c, --collections strings List of collections
- -f, --format string Define the output format. Supported formats: [json] (default "json")
- -h, --help help for export
- -p, --pretty Set the output JSON to be pretty printed
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility
-
diff --git a/docs/cli/defradb_client_backup_import.md b/docs/cli/defradb_client_backup_import.md
deleted file mode 100644
index 373f5be89c..0000000000
--- a/docs/cli/defradb_client_backup_import.md
+++ /dev/null
@@ -1,50 +0,0 @@
-## defradb client backup import
-
-Import a JSON data file to the database
-
-### Synopsis
-
-Import a JSON data file to the database.
-
-Example: import data to the database:
- defradb client import user_data.json
-
-```
-defradb client backup import [flags]
-```
-
-### Options
-
-```
- -h, --help help for import
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client backup](defradb_client_backup.md) - Interact with the backup utility
-
diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md
deleted file mode 100644
index 59faa94f78..0000000000
--- a/docs/cli/defradb_client_collection.md
+++ /dev/null
@@ -1,54 +0,0 @@
-## defradb client collection
-
-Interact with a collection.
-
-### Synopsis
-
-Create, read, update, and delete documents within a collection.
-
-### Options
-
-```
- --get-inactive Get inactive collections as well as active
- -h, --help help for collection
- -i, --identity string ACP Identity
- --name string Collection name
- --schema string Collection schema Root
- --tx uint Transaction ID
- --version string Collection version ID
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client collection create](defradb_client_collection_create.md) - Create a new document.
-* [defradb client collection delete](defradb_client_collection_delete.md) - Delete documents by docID or filter.
-* [defradb client collection describe](defradb_client_collection_describe.md) - View collection description.
-* [defradb client collection docIDs](defradb_client_collection_docIDs.md) - List all document IDs (docIDs).
-* [defradb client collection get](defradb_client_collection_get.md) - View document fields.
-* [defradb client collection patch](defradb_client_collection_patch.md) - Patch existing collection descriptions
-* [defradb client collection update](defradb_client_collection_update.md) - Update documents by docID or filter.
-
diff --git a/docs/cli/defradb_client_collection_create.md b/docs/cli/defradb_client_collection_create.md
deleted file mode 100644
index b565c2a547..0000000000
--- a/docs/cli/defradb_client_collection_create.md
+++ /dev/null
@@ -1,68 +0,0 @@
-## defradb client collection create
-
-Create a new document.
-
-### Synopsis
-
-Create a new document.
-
-Example: create from string:
- defradb client collection create --name User '{ "name": "Bob" }'
-
-Example: create from string, with identity:
- defradb client collection create -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User '{ "name": "Bob" }'
-
-Example: create multiple from string:
- defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]'
-
-Example: create from file:
- defradb client collection create --name User -f document.json
-
-Example: create from stdin:
- cat document.json | defradb client collection create --name User -
-
-
-```
-defradb client collection create [-i --identity] [flags]
-```
-
-### Options
-
-```
- -f, --file string File containing document(s)
- -h, --help help for create
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --get-inactive Get inactive collections as well as active
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --name string Collection name
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --schema string Collection schema Root
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
- --version string Collection version ID
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/cli/defradb_client_collection_delete.md
deleted file mode 100644
index 2bca8d7d8a..0000000000
--- a/docs/cli/defradb_client_collection_delete.md
+++ /dev/null
@@ -1,63 +0,0 @@
-## defradb client collection delete
-
-Delete documents by docID or filter.
-
-### Synopsis
-
-Delete documents by docID or filter and lists the number of documents deleted.
-
-Example: delete by docID:
- defradb client collection delete --name User --docID bae-123
-
-Example: delete by docID with identity:
- defradb client collection delete -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User --docID bae-123
-
-Example: delete by filter:
- defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }'
-
-
-```
-defradb client collection delete [-i --identity] [--filter --docID ] [flags]
-```
-
-### Options
-
-```
- --docID string Document ID
- --filter string Document filter
- -h, --help help for delete
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --get-inactive Get inactive collections as well as active
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --name string Collection name
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --schema string Collection schema Root
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
- --version string Collection version ID
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_collection_describe.md b/docs/cli/defradb_client_collection_describe.md
deleted file mode 100644
index bea05a1321..0000000000
--- a/docs/cli/defradb_client_collection_describe.md
+++ /dev/null
@@ -1,64 +0,0 @@
-## defradb client collection describe
-
-View collection description.
-
-### Synopsis
-
-Introspect collection types.
-
-Example: view all collections
- defradb client collection describe
-
-Example: view collection by name
- defradb client collection describe --name User
-
-Example: view collection by schema root id
- defradb client collection describe --schema bae123
-
-Example: view collection by version id. This will also return inactive collections
- defradb client collection describe --version bae123
-
-
-```
-defradb client collection describe [flags]
-```
-
-### Options
-
-```
- --get-inactive Get inactive collections as well as active
- -h, --help help for describe
- --name string Collection name
- --schema string Collection schema Root
- --version string Collection version ID
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_collection_docIDs.md b/docs/cli/defradb_client_collection_docIDs.md
deleted file mode 100644
index 1cf1a8444a..0000000000
--- a/docs/cli/defradb_client_collection_docIDs.md
+++ /dev/null
@@ -1,58 +0,0 @@
-## defradb client collection docIDs
-
-List all document IDs (docIDs).
-
-### Synopsis
-
-List all document IDs (docIDs).
-
-Example: list all docID(s):
- defradb client collection docIDs --name User
-
-Example: list all docID(s), with an identity:
- defradb client collection docIDs -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User
-
-
-```
-defradb client collection docIDs [-i --identity] [flags]
-```
-
-### Options
-
-```
- -h, --help help for docIDs
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --get-inactive Get inactive collections as well as active
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --name string Collection name
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --schema string Collection schema Root
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
- --version string Collection version ID
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_collection_get.md b/docs/cli/defradb_client_collection_get.md
deleted file mode 100644
index 7b80a2a54b..0000000000
--- a/docs/cli/defradb_client_collection_get.md
+++ /dev/null
@@ -1,59 +0,0 @@
-## defradb client collection get
-
-View document fields.
-
-### Synopsis
-
-View document fields.
-
-Example:
- defradb client collection get --name User bae-123
-
-Example to get a private document we must use an identity:
- defradb client collection get -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User bae-123
-
-
-```
-defradb client collection get [-i --identity] [--show-deleted] [flags]
-```
-
-### Options
-
-```
- -h, --help help for get
- --show-deleted Show deleted documents
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --get-inactive Get inactive collections as well as active
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --name string Collection name
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --schema string Collection schema Root
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
- --version string Collection version ID
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_collection_patch.md b/docs/cli/defradb_client_collection_patch.md
deleted file mode 100644
index c8540aa397..0000000000
--- a/docs/cli/defradb_client_collection_patch.md
+++ /dev/null
@@ -1,65 +0,0 @@
-## defradb client collection patch
-
-Patch existing collection descriptions
-
-### Synopsis
-
-Patch existing collection descriptions.
-
-Uses JSON Patch to modify collection descriptions.
-
-Example: patch from an argument string:
- defradb client collection patch '[{ "op": "add", "path": "...", "value": {...} }]'
-
-Example: patch from file:
- defradb client collection patch -p patch.json
-
-Example: patch from stdin:
- cat patch.json | defradb client collection patch -
-
-To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.
-
-```
-defradb client collection patch [patch] [flags]
-```
-
-### Options
-
-```
- -h, --help help for patch
- -p, --patch-file string File to load a patch from
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --get-inactive Get inactive collections as well as active
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --name string Collection name
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --schema string Collection schema Root
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
- --version string Collection version ID
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_collection_update.md b/docs/cli/defradb_client_collection_update.md
deleted file mode 100644
index ab6b8999b0..0000000000
--- a/docs/cli/defradb_client_collection_update.md
+++ /dev/null
@@ -1,70 +0,0 @@
-## defradb client collection update
-
-Update documents by docID or filter.
-
-### Synopsis
-
-Update documents by docID or filter.
-
-Example: update from string:
- defradb client collection update --name User --docID bae-123 '{ "name": "Bob" }'
-
-Example: update by filter:
- defradb client collection update --name User \
- --filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }'
-
-Example: update by docID:
- defradb client collection update --name User \
- --docID bae-123 --updater '{ "verified": true }'
-
-Example: update private docID, with identity:
- defradb client collection update -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j --name User \
- --docID bae-123 --updater '{ "verified": true }'
-
-
-```
-defradb client collection update [-i --identity] [--filter --docID --updater ] [flags]
-```
-
-### Options
-
-```
- --docID string Document ID
- --filter string Document filter
- -h, --help help for update
- --updater string Document updater
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --get-inactive Get inactive collections as well as active
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --name string Collection name
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --schema string Collection schema Root
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
- --version string Collection version ID
-```
-
-### SEE ALSO
-
-* [defradb client collection](defradb_client_collection.md) - Interact with a collection.
-
diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md
deleted file mode 100644
index a819df1514..0000000000
--- a/docs/cli/defradb_client_dump.md
+++ /dev/null
@@ -1,43 +0,0 @@
-## defradb client dump
-
-Dump the contents of DefraDB node-side
-
-```
-defradb client dump [flags]
-```
-
-### Options
-
-```
- -h, --help help for dump
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-
diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md
deleted file mode 100644
index bb59a6373b..0000000000
--- a/docs/cli/defradb_client_index.md
+++ /dev/null
@@ -1,46 +0,0 @@
-## defradb client index
-
-Manage collections' indexes of a running DefraDB instance
-
-### Synopsis
-
-Manage (create, drop, or list) collection indexes on a DefraDB node.
-
-### Options
-
-```
- -h, --help help for index
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client index create](defradb_client_index_create.md) - Creates a secondary index on a collection's field(s)
-* [defradb client index drop](defradb_client_index_drop.md) - Drop a collection's secondary index
-* [defradb client index list](defradb_client_index_list.md) - Shows the list indexes in the database or for a specific collection
-
diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md
deleted file mode 100644
index 8c365e348e..0000000000
--- a/docs/cli/defradb_client_index_create.md
+++ /dev/null
@@ -1,60 +0,0 @@
-## defradb client index create
-
-Creates a secondary index on a collection's field(s)
-
-### Synopsis
-
-Creates a secondary index on a collection's field(s).
-
-The --name flag is optional. If not provided, a name will be generated automatically.
-The --unique flag is optional. If provided, the index will be unique.
-
-Example: create an index for 'Users' collection on 'name' field:
- defradb client index create --collection Users --fields name
-
-Example: create a named index for 'Users' collection on 'name' field:
- defradb client index create --collection Users --fields name --name UsersByName
-
-```
-defradb client index create -c --collection --fields [-n --name ] [--unique] [flags]
-```
-
-### Options
-
-```
- -c, --collection string Collection name
- --fields strings Fields to index
- -h, --help help for create
- -n, --name string Index name
- -u, --unique Make the index unique
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md
deleted file mode 100644
index 03b206c6cb..0000000000
--- a/docs/cli/defradb_client_index_drop.md
+++ /dev/null
@@ -1,52 +0,0 @@
-## defradb client index drop
-
-Drop a collection's secondary index
-
-### Synopsis
-
-Drop a collection's secondary index.
-
-Example: drop the index 'UsersByName' for 'Users' collection:
- defradb client index create --collection Users --name UsersByName
-
-```
-defradb client index drop -c --collection -n --name [flags]
-```
-
-### Options
-
-```
- -c, --collection string Collection name
- -h, --help help for drop
- -n, --name string Index name
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md
deleted file mode 100644
index 3c776f73ac..0000000000
--- a/docs/cli/defradb_client_index_list.md
+++ /dev/null
@@ -1,54 +0,0 @@
-## defradb client index list
-
-Shows the list indexes in the database or for a specific collection
-
-### Synopsis
-
-Shows the list indexes in the database or for a specific collection.
-
-If the --collection flag is provided, only the indexes for that collection will be shown.
-Otherwise, all indexes in the database will be shown.
-
-Example: show all index for 'Users' collection:
- defradb client index list --collection Users
-
-```
-defradb client index list [-c --collection ] [flags]
-```
-
-### Options
-
-```
- -c, --collection string Collection name
- -h, --help help for list
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_p2p.md b/docs/cli/defradb_client_p2p.md
deleted file mode 100644
index 2506208717..0000000000
--- a/docs/cli/defradb_client_p2p.md
+++ /dev/null
@@ -1,46 +0,0 @@
-## defradb client p2p
-
-Interact with the DefraDB P2P system
-
-### Synopsis
-
-Interact with the DefraDB P2P system
-
-### Options
-
-```
- -h, --help help for p2p
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system
-* [defradb client p2p info](defradb_client_p2p_info.md) - Get peer info from a DefraDB node
-* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system
-
diff --git a/docs/cli/defradb_client_p2p_collection.md b/docs/cli/defradb_client_p2p_collection.md
deleted file mode 100644
index a1de966445..0000000000
--- a/docs/cli/defradb_client_p2p_collection.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## defradb client p2p collection
-
-Configure the P2P collection system
-
-### Synopsis
-
-Add, delete, or get the list of P2P collections.
-The selected collections synchronize their events on the pubsub network.
-
-### Options
-
-```
- -h, --help help for collection
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system
-* [defradb client p2p collection add](defradb_client_p2p_collection_add.md) - Add P2P collections
-* [defradb client p2p collection getall](defradb_client_p2p_collection_getall.md) - Get all P2P collections
-* [defradb client p2p collection remove](defradb_client_p2p_collection_remove.md) - Remove P2P collections
-
diff --git a/docs/cli/defradb_client_p2p_collection_add.md b/docs/cli/defradb_client_p2p_collection_add.md
deleted file mode 100644
index 01bc79ca0f..0000000000
--- a/docs/cli/defradb_client_p2p_collection_add.md
+++ /dev/null
@@ -1,55 +0,0 @@
-## defradb client p2p collection add
-
-Add P2P collections
-
-### Synopsis
-
-Add P2P collections to the synchronized pubsub topics.
-The collections are synchronized between nodes of a pubsub network.
-
-Example: add single collection
- defradb client p2p collection add bae123
-
-Example: add multiple collections
- defradb client p2p collection add bae123,bae456
-
-
-```
-defradb client p2p collection add [collectionIDs] [flags]
-```
-
-### Options
-
-```
- -h, --help help for add
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system
-
diff --git a/docs/cli/defradb_client_p2p_collection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md
deleted file mode 100644
index 8d10944ad2..0000000000
--- a/docs/cli/defradb_client_p2p_collection_getall.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## defradb client p2p collection getall
-
-Get all P2P collections
-
-### Synopsis
-
-Get all P2P collections in the pubsub topics.
-This is the list of collections of the node that are synchronized on the pubsub network.
-
-```
-defradb client p2p collection getall [flags]
-```
-
-### Options
-
-```
- -h, --help help for getall
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system
-
diff --git a/docs/cli/defradb_client_p2p_collection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md
deleted file mode 100644
index 1cd6a14ee9..0000000000
--- a/docs/cli/defradb_client_p2p_collection_remove.md
+++ /dev/null
@@ -1,55 +0,0 @@
-## defradb client p2p collection remove
-
-Remove P2P collections
-
-### Synopsis
-
-Remove P2P collections from the followed pubsub topics.
-The removed collections will no longer be synchronized between nodes.
-
-Example: remove single collection
- defradb client p2p collection remove bae123
-
-Example: remove multiple collections
- defradb client p2p collection remove bae123,bae456
-
-
-```
-defradb client p2p collection remove [collectionIDs] [flags]
-```
-
-### Options
-
-```
- -h, --help help for remove
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system
-
diff --git a/docs/cli/defradb_client_p2p_info.md b/docs/cli/defradb_client_p2p_info.md
deleted file mode 100644
index 385780ad3d..0000000000
--- a/docs/cli/defradb_client_p2p_info.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## defradb client p2p info
-
-Get peer info from a DefraDB node
-
-### Synopsis
-
-Get peer info from a DefraDB node
-
-```
-defradb client p2p info [flags]
-```
-
-### Options
-
-```
- -h, --help help for info
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system
-
diff --git a/docs/cli/defradb_client_p2p_replicator.md b/docs/cli/defradb_client_p2p_replicator.md
deleted file mode 100644
index b9d5b561c7..0000000000
--- a/docs/cli/defradb_client_p2p_replicator.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## defradb client p2p replicator
-
-Configure the replicator system
-
-### Synopsis
-
-Configure the replicator system. Add, delete, or get the list of persisted replicators.
-A replicator replicates one or all collection(s) from one node to another.
-
-### Options
-
-```
- -h, --help help for replicator
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system
-* [defradb client p2p replicator delete](defradb_client_p2p_replicator_delete.md) - Delete replicator(s) and stop synchronization
-* [defradb client p2p replicator getall](defradb_client_p2p_replicator_getall.md) - Get all replicators
-* [defradb client p2p replicator set](defradb_client_p2p_replicator_set.md) - Add replicator(s) and start synchronization
-
diff --git a/docs/cli/defradb_client_p2p_replicator_delete.md b/docs/cli/defradb_client_p2p_replicator_delete.md
deleted file mode 100644
index 93e5ff6d95..0000000000
--- a/docs/cli/defradb_client_p2p_replicator_delete.md
+++ /dev/null
@@ -1,53 +0,0 @@
-## defradb client p2p replicator delete
-
-Delete replicator(s) and stop synchronization
-
-### Synopsis
-
-Delete replicator(s) and stop synchronization.
-A replicator synchronizes one or all collection(s) from this node to another.
-
-Example:
- defradb client p2p replicator delete -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}'
-
-
-```
-defradb client p2p replicator delete [-c, --collection] [flags]
-```
-
-### Options
-
-```
- -c, --collection strings Collection(s) to stop replicating
- -h, --help help for delete
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system
-
diff --git a/docs/cli/defradb_client_p2p_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md
deleted file mode 100644
index cc9cc1ed63..0000000000
--- a/docs/cli/defradb_client_p2p_replicator_getall.md
+++ /dev/null
@@ -1,52 +0,0 @@
-## defradb client p2p replicator getall
-
-Get all replicators
-
-### Synopsis
-
-Get all the replicators active in the P2P data sync system.
-A replicator synchronizes one or all collection(s) from this node to another.
-
-Example:
- defradb client p2p replicator getall
-
-
-```
-defradb client p2p replicator getall [flags]
-```
-
-### Options
-
-```
- -h, --help help for getall
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system
-
diff --git a/docs/cli/defradb_client_p2p_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md
deleted file mode 100644
index 4fbc980a7c..0000000000
--- a/docs/cli/defradb_client_p2p_replicator_set.md
+++ /dev/null
@@ -1,53 +0,0 @@
-## defradb client p2p replicator set
-
-Add replicator(s) and start synchronization
-
-### Synopsis
-
-Add replicator(s) and start synchronization.
-A replicator synchronizes one or all collection(s) from this node to another.
-
-Example:
- defradb client p2p replicator set -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}'
-
-
-```
-defradb client p2p replicator set [-c, --collection] [flags]
-```
-
-### Options
-
-```
- -c, --collection strings Collection(s) to replicate
- -h, --help help for set
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system
-
diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md
deleted file mode 100644
index 493acca2d4..0000000000
--- a/docs/cli/defradb_client_query.md
+++ /dev/null
@@ -1,65 +0,0 @@
-## defradb client query
-
-Send a DefraDB GraphQL query request
-
-### Synopsis
-
-Send a DefraDB GraphQL query request to the database.
-
-A query request can be sent as a single argument. Example command:
- defradb client query 'query { ... }'
-
-Do a query request from a file by using the '-f' flag. Example command:
- defradb client query -f request.graphql
-
-Do a query request from a file and with an identity. Example command:
- defradb client query -i cosmos1f2djr7dl9vhrk3twt3xwqp09nhtzec9mdkf70j -f request.graphql
-
-Or it can be sent via stdin by using the '-' special syntax. Example command:
- cat request.graphql | defradb client query -
-
-A GraphQL client such as GraphiQL (https://github.com/graphql/graphiql) can be used to interact
-with the database more conveniently.
-
-To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network.
-
-```
-defradb client query [-i --identity] [request] [flags]
-```
-
-### Options
-
-```
- -f, --file string File containing the query request
- -h, --help help for query
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-
diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md
deleted file mode 100644
index 2e144a89e6..0000000000
--- a/docs/cli/defradb_client_schema.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## defradb client schema
-
-Interact with the schema system of a DefraDB node
-
-### Synopsis
-
-Make changes, updates, or look for existing schema types.
-
-### Options
-
-```
- -h, --help help for schema
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client schema add](defradb_client_schema_add.md) - Add new schema
-* [defradb client schema describe](defradb_client_schema_describe.md) - View schema descriptions.
-* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance
-* [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type
-* [defradb client schema set-active](defradb_client_schema_set-active.md) - Set the active collection version
-
diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md
deleted file mode 100644
index 0ff3f683f4..0000000000
--- a/docs/cli/defradb_client_schema_add.md
+++ /dev/null
@@ -1,64 +0,0 @@
-## defradb client schema add
-
-Add new schema
-
-### Synopsis
-
-Add new schema.
-
-Schema Object with a '@policy(id:".." resource: "..")' linked will only be accepted if:
- - ACP is available (i.e. ACP is not disabled).
- - The specified resource adheres to the Document Access Control DPI Rules.
- - Learn more about [ACP & DPI Rules](/acp/README.md)
-
-Example: add from an argument string:
- defradb client schema add 'type Foo { ... }'
-
-Example: add from file:
- defradb client schema add -f schema.graphql
-
-Example: add from stdin:
- cat schema.graphql | defradb client schema add -
-
-Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.
-
-```
-defradb client schema add [schema] [flags]
-```
-
-### Options
-
-```
- -f, --file string File to load a schema from
- -h, --help help for add
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node
-
diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/cli/defradb_client_schema_describe.md
deleted file mode 100644
index 0b28a1e64e..0000000000
--- a/docs/cli/defradb_client_schema_describe.md
+++ /dev/null
@@ -1,63 +0,0 @@
-## defradb client schema describe
-
-View schema descriptions.
-
-### Synopsis
-
-Introspect schema types.
-
-Example: view all schemas
- defradb client schema describe
-
-Example: view schemas by name
- defradb client schema describe --name User
-
-Example: view schemas by root
- defradb client schema describe --root bae123
-
-Example: view a single schema by version id
- defradb client schema describe --version bae123
-
-
-```
-defradb client schema describe [flags]
-```
-
-### Options
-
-```
- -h, --help help for describe
- --name string Schema name
- --root string Schema root
- --version string Schema Version ID
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node
-
diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md
deleted file mode 100644
index c339763571..0000000000
--- a/docs/cli/defradb_client_schema_migration.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## defradb client schema migration
-
-Interact with the schema migration system of a running DefraDB instance
-
-### Synopsis
-
-Make set or look for existing schema migrations on a DefraDB node.
-
-### Options
-
-```
- -h, --help help for migration
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node
-* [defradb client schema migration down](defradb_client_schema_migration_down.md) - Reverses the migration to the specified collection version.
-* [defradb client schema migration reload](defradb_client_schema_migration_reload.md) - Reload the schema migrations within DefraDB
-* [defradb client schema migration set](defradb_client_schema_migration_set.md) - Set a schema migration within DefraDB
-* [defradb client schema migration set-registry](defradb_client_schema_migration_set-registry.md) - Set a schema migration within the DefraDB LensRegistry
-* [defradb client schema migration up](defradb_client_schema_migration_up.md) - Applies the migration to the specified collection version.
-
diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md
deleted file mode 100644
index f741f5bec9..0000000000
--- a/docs/cli/defradb_client_schema_migration_down.md
+++ /dev/null
@@ -1,60 +0,0 @@
-## defradb client schema migration down
-
-Reverses the migration to the specified collection version.
-
-### Synopsis
-
-Reverses the migration to the specified collection version.
-Documents is a list of documents to reverse the migration from.
-
-Example: migrate from string
- defradb client schema migration down --collection 2 '[{"name": "Bob"}]'
-
-Example: migrate from file
- defradb client schema migration down --collection 2 -f documents.json
-
-Example: migrate from stdin
- cat documents.json | defradb client schema migration down --collection 2 -
-
-
-```
-defradb client schema migration down --collection [flags]
-```
-
-### Options
-
-```
- --collection uint32 Collection id
- -f, --file string File containing document(s)
- -h, --help help for down
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_schema_migration_reload.md b/docs/cli/defradb_client_schema_migration_reload.md
deleted file mode 100644
index 8a1d8480c0..0000000000
--- a/docs/cli/defradb_client_schema_migration_reload.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## defradb client schema migration reload
-
-Reload the schema migrations within DefraDB
-
-### Synopsis
-
-Reload the schema migrations within DefraDB
-
-```
-defradb client schema migration reload [flags]
-```
-
-### Options
-
-```
- -h, --help help for reload
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_schema_migration_set-registry.md b/docs/cli/defradb_client_schema_migration_set-registry.md
deleted file mode 100644
index ebb4c625c7..0000000000
--- a/docs/cli/defradb_client_schema_migration_set-registry.md
+++ /dev/null
@@ -1,53 +0,0 @@
-## defradb client schema migration set-registry
-
-Set a schema migration within the DefraDB LensRegistry
-
-### Synopsis
-
-Set a migration to a collection within the LensRegistry of the local DefraDB node.
-Does not persist the migration after restart.
-
-Example: set from an argument string:
- defradb client schema migration set-registry 2 '{"lenses": [...'
-
-Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.
-
-```
-defradb client schema migration set-registry [collectionID] [cfg] [flags]
-```
-
-### Options
-
-```
- -h, --help help for set-registry
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md
deleted file mode 100644
index 8386fd8369..0000000000
--- a/docs/cli/defradb_client_schema_migration_set.md
+++ /dev/null
@@ -1,60 +0,0 @@
-## defradb client schema migration set
-
-Set a schema migration within DefraDB
-
-### Synopsis
-
-Set a migration from a source schema version to a destination schema version for
-all collections that are on the given source schema version within the local DefraDB node.
-
-Example: set from an argument string:
- defradb client schema migration set bae123 bae456 '{"lenses": [...'
-
-Example: set from file:
- defradb client schema migration set bae123 bae456 -f schema_migration.lens
-
-Example: add from stdin:
- cat schema_migration.lens | defradb client schema migration set bae123 bae456 -
-
-Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.
-
-```
-defradb client schema migration set [src] [dst] [cfg] [flags]
-```
-
-### Options
-
-```
- -f, --file string Lens configuration file
- -h, --help help for set
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/cli/defradb_client_schema_migration_up.md
deleted file mode 100644
index b55ace45ad..0000000000
--- a/docs/cli/defradb_client_schema_migration_up.md
+++ /dev/null
@@ -1,60 +0,0 @@
-## defradb client schema migration up
-
-Applies the migration to the specified collection version.
-
-### Synopsis
-
-Applies the migration to the specified collection version.
-Documents is a list of documents to apply the migration to.
-
-Example: migrate from string
- defradb client schema migration up --collection 2 '[{"name": "Bob"}]'
-
-Example: migrate from file
- defradb client schema migration up --collection 2 -f documents.json
-
-Example: migrate from stdin
- cat documents.json | defradb client schema migration up --collection 2 -
-
-
-```
-defradb client schema migration up --collection [flags]
-```
-
-### Options
-
-```
- --collection uint32 Collection id
- -f, --file string File containing document(s)
- -h, --help help for up
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance
-
diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md
deleted file mode 100644
index 7d16e632ae..0000000000
--- a/docs/cli/defradb_client_schema_patch.md
+++ /dev/null
@@ -1,63 +0,0 @@
-## defradb client schema patch
-
-Patch an existing schema type
-
-### Synopsis
-
-Patch an existing schema.
-
-Uses JSON Patch to modify schema types.
-
-Example: patch from an argument string:
- defradb client schema patch '[{ "op": "add", "path": "...", "value": {...} }]' '{"lenses": [...'
-
-Example: patch from file:
- defradb client schema patch -p patch.json
-
-Example: patch from stdin:
- cat patch.json | defradb client schema patch -
-
-To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.
-
-```
-defradb client schema patch [schema] [migration] [flags]
-```
-
-### Options
-
-```
- -h, --help help for patch
- -t, --lens-file string File to load a lens config from
- -p, --patch-file string File to load a patch from
- --set-active Set the active schema version for all collections using the root schem
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node
-
diff --git a/docs/cli/defradb_client_schema_set-active.md b/docs/cli/defradb_client_schema_set-active.md
deleted file mode 100644
index 7f7b4f4cd5..0000000000
--- a/docs/cli/defradb_client_schema_set-active.md
+++ /dev/null
@@ -1,48 +0,0 @@
-## defradb client schema set-active
-
-Set the active collection version
-
-### Synopsis
-
-Activates all collection versions with the given schema version, and deactivates all
-those without it (if they share the same schema root).
-
-```
-defradb client schema set-active [versionID] [flags]
-```
-
-### Options
-
-```
- -h, --help help for set-active
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node
-
diff --git a/docs/cli/defradb_client_tx.md b/docs/cli/defradb_client_tx.md
deleted file mode 100644
index 67bf63e2df..0000000000
--- a/docs/cli/defradb_client_tx.md
+++ /dev/null
@@ -1,46 +0,0 @@
-## defradb client tx
-
-Create, commit, and discard DefraDB transactions
-
-### Synopsis
-
-Create, commit, and discard DefraDB transactions
-
-### Options
-
-```
- -h, --help help for tx
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client tx commit](defradb_client_tx_commit.md) - Commit a DefraDB transaction.
-* [defradb client tx create](defradb_client_tx_create.md) - Create a new DefraDB transaction.
-* [defradb client tx discard](defradb_client_tx_discard.md) - Discard a DefraDB transaction.
-
diff --git a/docs/cli/defradb_client_tx_commit.md b/docs/cli/defradb_client_tx_commit.md
deleted file mode 100644
index eba408dc57..0000000000
--- a/docs/cli/defradb_client_tx_commit.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## defradb client tx commit
-
-Commit a DefraDB transaction.
-
-### Synopsis
-
-Commit a DefraDB transaction.
-
-```
-defradb client tx commit [id] [flags]
-```
-
-### Options
-
-```
- -h, --help help for commit
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions
-
diff --git a/docs/cli/defradb_client_tx_create.md b/docs/cli/defradb_client_tx_create.md
deleted file mode 100644
index 26668e6ad2..0000000000
--- a/docs/cli/defradb_client_tx_create.md
+++ /dev/null
@@ -1,49 +0,0 @@
-## defradb client tx create
-
-Create a new DefraDB transaction.
-
-### Synopsis
-
-Create a new DefraDB transaction.
-
-```
-defradb client tx create [flags]
-```
-
-### Options
-
-```
- --concurrent Transaction is concurrent
- -h, --help help for create
- --read-only Transaction is read only
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions
-
diff --git a/docs/cli/defradb_client_tx_discard.md b/docs/cli/defradb_client_tx_discard.md
deleted file mode 100644
index 3989bc4c05..0000000000
--- a/docs/cli/defradb_client_tx_discard.md
+++ /dev/null
@@ -1,47 +0,0 @@
-## defradb client tx discard
-
-Discard a DefraDB transaction.
-
-### Synopsis
-
-Discard a DefraDB transaction.
-
-```
-defradb client tx discard [id] [flags]
-```
-
-### Options
-
-```
- -h, --help help for discard
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions
-
diff --git a/docs/cli/defradb_client_view.md b/docs/cli/defradb_client_view.md
deleted file mode 100644
index 09c5bab11b..0000000000
--- a/docs/cli/defradb_client_view.md
+++ /dev/null
@@ -1,44 +0,0 @@
-## defradb client view
-
-Manage views within a running DefraDB instance
-
-### Synopsis
-
-Manage (add) views withing a running DefraDB instance
-
-### Options
-
-```
- -h, --help help for view
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client](defradb_client.md) - Interact with a DefraDB node
-* [defradb client view add](defradb_client_view_add.md) - Add new view
-
diff --git a/docs/cli/defradb_client_view_add.md b/docs/cli/defradb_client_view_add.md
deleted file mode 100644
index b671d8290c..0000000000
--- a/docs/cli/defradb_client_view_add.md
+++ /dev/null
@@ -1,53 +0,0 @@
-## defradb client view add
-
-Add new view
-
-### Synopsis
-
-Add new database view.
-
-Example: add from an argument string:
- defradb client view add 'Foo { name, ...}' 'type Foo { ... }' '{"lenses": [...'
-
-Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.
-
-```
-defradb client view add [query] [sdl] [transform] [flags]
-```
-
-### Options
-
-```
- -f, --file string Lens configuration file
- -h, --help help for add
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- -i, --identity string ACP Identity
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --tx uint Transaction ID
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb client view](defradb_client_view.md) - Manage views within a running DefraDB instance
-
diff --git a/docs/cli/defradb_server-dump.md b/docs/cli/defradb_server-dump.md
deleted file mode 100644
index 3651d32e9c..0000000000
--- a/docs/cli/defradb_server-dump.md
+++ /dev/null
@@ -1,41 +0,0 @@
-## defradb server-dump
-
-Dumps the state of the entire database
-
-```
-defradb server-dump [flags]
-```
-
-### Options
-
-```
- -h, --help help for server-dump
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb](defradb.md) - DefraDB Edge Database
-
diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md
deleted file mode 100644
index e0f732cb04..0000000000
--- a/docs/cli/defradb_start.md
+++ /dev/null
@@ -1,45 +0,0 @@
-## defradb start
-
-Start a DefraDB node
-
-### Synopsis
-
-Start a DefraDB node.
-
-```
-defradb start [flags]
-```
-
-### Options
-
-```
- -h, --help help for start
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb](defradb.md) - DefraDB Edge Database
-
diff --git a/docs/cli/defradb_version.md b/docs/cli/defradb_version.md
deleted file mode 100644
index b4693fddbf..0000000000
--- a/docs/cli/defradb_version.md
+++ /dev/null
@@ -1,43 +0,0 @@
-## defradb version
-
-Display the version information of DefraDB and its components
-
-```
-defradb version [flags]
-```
-
-### Options
-
-```
- -f, --format string Version output format. Options are text, json
- --full Display the full version information
- -h, --help help for version
-```
-
-### Options inherited from parent commands
-
-```
- --allowed-origins stringArray List of origins to allow for CORS requests
- --log-format string Log format to use. Options are text or json (default "text")
- --log-level string Log level to use. Options are debug, info, error, fatal (default "info")
- --log-no-color Disable colored log output
- --log-output string Log output path. Options are stderr or stdout. (default "stderr")
- --log-overrides string Logger config overrides. Format ,=,...;,...
- --log-source Include source location in logs
- --log-stacktrace Include stacktrace in error and fatal logs
- --max-txn-retries int Specify the maximum number of retries per transaction (default 5)
- --no-p2p Disable the peer-to-peer network synchronization system
- --p2paddr strings Listen addresses for the p2p network (formatted as a libp2p MultiAddr) (default [/ip4/127.0.0.1/tcp/9171])
- --peers stringArray List of peers to connect to
- --privkeypath string Path to the private key for tls
- --pubkeypath string Path to the public key for tls
- --rootdir string Directory for persistent data (default: $HOME/.defradb)
- --store string Specify the datastore to use (supported: badger, memory) (default "badger")
- --url string URL of HTTP endpoint to listen on or connect to (default "127.0.0.1:9181")
- --valuelogfilesize int Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1073741824)
-```
-
-### SEE ALSO
-
-* [defradb](defradb.md) - DefraDB Edge Database
-
diff --git a/docs/config.md b/docs/config.md
index da46700bb7..ca69d6afd2 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -89,6 +89,34 @@ Include source location in logs. Defaults to `false`.
Logger config overrides. Format `,=,...;,...`.
-## `log.nocolor`
+## `log.colordisabled`
Disable colored log output. Defaults to `false`.
+
+## `keyring.path`
+
+Path to store encrypted key files in. Defaults to `keys`.
+
+## `keyring.disabled`
+
+Disable the keyring and generate ephemeral keys instead. Defaults to `false`.
+
+## `keyring.namespace`
+
+The service name to use when using the system keyring. Defaults to `defradb`.
+
+## `keyring.backend`
+
+Keyring backend to use. Defaults to `file`.
+
+- `file` Stores keys in encrypted files
+- `system` Stores keys in the OS managed keyring
+
+## `lens.runtime`
+
+The LensVM wasm runtime to run lens modules in.
+
+Possible values:
+- `wasm-time` (default): https://github.com/bytecodealliance/wasmtime-go
+- `wasmer` (windows not supported): https://github.com/wasmerio/wasmer-go
+- `wazero`: https://github.com/tetratelabs/wazero
diff --git a/docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md b/docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md
new file mode 100644
index 0000000000..574762a1a7
--- /dev/null
+++ b/docs/data_format_changes/i2603-ipld-protobuf-to-cbor.md
@@ -0,0 +1,3 @@
+# Change encoding from protobuf to cbor and use the new IPLD schema
+
+The DAG blocks are now encoded using CBOR instead of protobuf and we use the new `github.com/ipld/go-ipld-prime` package to handle block encoding and decoding. It makes use of the new IPLD schema to define the block structure.
\ No newline at end of file
diff --git a/docs/data_format_changes/i2670-sec-index-on-relations.md b/docs/data_format_changes/i2670-sec-index-on-relations.md
new file mode 100644
index 0000000000..4f56429166
--- /dev/null
+++ b/docs/data_format_changes/i2670-sec-index-on-relations.md
@@ -0,0 +1,3 @@
+# Enable secondary index on relations
+
+This naturally caused some explain metrics to change and change detector complain about it.
\ No newline at end of file
diff --git a/docs/data_format_changes/i2688-doc-id-includes-schema.md b/docs/data_format_changes/i2688-doc-id-includes-schema.md
new file mode 100644
index 0000000000..caca83656c
--- /dev/null
+++ b/docs/data_format_changes/i2688-doc-id-includes-schema.md
@@ -0,0 +1,3 @@
+# Incorporate schema root into docID
+
+DocID generation now incorporates schema root, meaning documents created against different schema will no longer clash. This change also means that all the docIDs and commit CIDs have changed.
diff --git a/docs/data_format_changes/i2696-support-encryption-for-counters.md b/docs/data_format_changes/i2696-support-encryption-for-counters.md
new file mode 100644
index 0000000000..dd53e57898
--- /dev/null
+++ b/docs/data_format_changes/i2696-support-encryption-for-counters.md
@@ -0,0 +1,2 @@
+# Support encryption for counters
+We changed the data format of counters from int64 and float64 to bytes to support encryption. This changes the generated CIDs for counters.
\ No newline at end of file
diff --git a/docs/data_format_changes/i2746-use-node-representation.md b/docs/data_format_changes/i2746-use-node-representation.md
new file mode 100644
index 0000000000..c701aed809
--- /dev/null
+++ b/docs/data_format_changes/i2746-use-node-representation.md
@@ -0,0 +1,2 @@
+# Use node representation
+To enable the use of optional IPLD schema fields, we change to using the node representation when saving with the link system.
\ No newline at end of file
diff --git a/docs/website/BSL-License.md b/docs/website/BSL-License.md
new file mode 100644
index 0000000000..a5018a4ef8
--- /dev/null
+++ b/docs/website/BSL-License.md
@@ -0,0 +1,80 @@
+---
+sidebar_position: 7
+title: BSL 1.1 License
+---
+
+We are pleased to announce that Source is transitioning its source-code license from Apache 2.0 to the Business Source License version (BSL 1.1). This strategic move empowers Source to exercise greater control over its source code's commercialization while providing open access to the community.
+
+The timing for this shift is perfect because our latest release brings revolutionary design changes. At this stage of development, it is typical for some commercial users to fork the software for commercial gains. This move is not beneficial to our community. Moreover, it can result in forks jungle, brand dilution, confused users, and code fragmentation. Thus, we must make this change right now to protect our community and developers.
+
+As you know, DefraDB is a community-driven and developer-centric project. We strongly value our community's active participation and feedback. We genuinely seek your valuable insights to explore and expand the possibilities of the Additional Use Grant in alignment with our shared vision. Together, we can shape the future of DefraDB to serve the needs of our collective community better.
+
+At our mission's core, we are steadfast in promoting fairness and social good. Similarly, security, interpretability, user data portability, privacy, democratization, and the long-lasting vitality of the network is of prime importance to us.
+.
+
+## BSL Overview
+
+Here is a brief BSL introduction and key points of our licensing terms.
+
+### Brief History of BSL
+The founders of MySQL and MariaDB introduced BSL. They implemented it first in 2013 for MariaDB products. In 2017, they updated the license and further refined it to version 1.1, benefiting from valuable insights and guidance from Bruce Perens, the co-founder of the Open-Source Initiative (OSI).
+
+### Other Open-Source Companies Using Commercial Licenses
+
+It's worth mentioning that providing some sort of commercial licensing is gaining attention and adoption in the open-source community. Today, many notable companies use these licenses for their specific software products. For example, MariaDB, WP Engine (WordPress), MySQL AB (MySQL), Canonical (Ubuntu), Mozilla Corporation (Firefox), Docker, SUSE (Linux Kernel), GitLab (Git), JetBrains (IntelliJ), GitHub (Git), Red Hat (Linux), and Redis Labs are among them.
+
+### What Are the provisions of BSL?
+
+The license design provides a balance between open-source principles and commercial interests. It allows companies to maintain control over the commercialization of their source code while providing necessary access to the community.
+
+## Key Points of BSL 1.1
+* The standard term for the BSL is four years, providing time for DefraDB to gain support and stability before broader adoption.
+* The standard BSL allows copying, modifying, redistributing, and non-production use of the source code.
+
+### Production Use Criteria
+As you can see, the non-production use of the source code does not benefit the community's financial sustainability. Thus, to allow running DefraDB in production, we created the following four criteria under the Additional Use Grant Conditions. In summary, the production use is allowed if:
+1. The application is connected to the Source Hub Mainnet, with ongoing support through the OPEN utility token.
+2. The application is connected to any sidechain protocol integrated with the Source Hub Mainnet.
+3. The applications use the Source Network Access Control Policy on any public protocols.
+4. The project is a non-profit project.
+If none of the above criteria apply to your use case, you can obtain commercial licensing by contacting Source.
+
+### How Will This Change Benefit DefraDB?
+
+BSL offers compelling advantages for us.
+1. It allows us to maintain a balance between openness and commercial viability.
+2. By providing a time-limited license that reverts to open-source after a specified period, BSL allows us to benefit from community collaboration.
+3. It also safeguards our ability to monetize and protect our IP. It balances source code accessibility and commercial interests.
+4. It also enables transparency and trust while preserving proprietary innovations. The defined roadmap for licensing transitions provides predictability, aligning development efforts and business objectives. Endorsement by influential figures in the open-source community enhances credibility and reputation.
+5. Adopting BSL demonstrates our commitment to collaboration and innovation. It will attract developers and foster partnerships. Overall, BSL combines the best elements of open source with safeguards for commercial success. It will empower DefraDB to thrive, innovate, and position itself as a leader in the open-source ecosystem.
+
+### How Will This Change Benefit Our Community?
+
+This move also brings compelling benefits to our open-source community. It can create a thriving environment for collaboration and innovation. Here's why BSL is advantageous to our community:
+1. Source Code Access: BSL ensures transparency by making the source code readily available. It fosters trust and empowers developers to understand, enhance, and improve the software.
+2. Collaborative Community: BSL encourages active participation. It enables developers to contribute their expertise, ideas, and enhancements. This collective effort drives continuous innovation and creates a supportive network within the community.
+3. Increased Adoption: BSL's balanced licensing structure will promote wider adoption of DefraDB. Aligning openness with commercial viability attracts organizations to embrace and distribute the software, expanding its reach and impact.
+4. Protection of Open-Source Values: BSL maintains the core principles of open source. At the same time, it also acknowledges the need for sustainable commercial models. It strikes a crucial balance that safeguards developers' and companies' interests.
+By choosing BSL, we can contribute to the growth and advancement of our project. Thus, it will enable our vibrant community to thrive more.
+
+## Frequently Asked Questions
+
+### Q: What Distinguishes the BSL From Other Licenses Such As AGPL, SSPL, Or the Inclusion of The Common Clause in Agreements?
+BSL 1.1 stands apart due to its unique characteristic as a time-limited license. It reverts to an open-source license (specifically Apache) after four years. Conversely, the other mentioned options impose permanent restrictions on specific usage scenarios. Our approach aims to strike an optimal balance between ensuring the availability of our source code and supporting the developer community. It also safeguards our capacity to commercialize and provide support for it.
+
+### Q: Why Make the Change Now?
+Our current release is a revolutionary one. It introduces captivating design changes that could lead to third-party forks of the Source code. It allows them to create commercial derivatives without contributing to the developer community. In the best interest of the community and our customers, we believe it is crucial to avoid such an outcome.
+### Q: After Four Years, Can I Develop My Own Commercial Product Under the Apache 2.0 license?
+Absolutely, if that is your intention.
+
+### Q: Is Source Still Committed to Being an Open-Source Company?
+Absolutely, yes. We continue to license a significant portion of our source code under approved open-source licenses for specific uses. We remain dedicated to servicing various open-source projects related to Source. Furthermore, the BSL only imposes restrictions on the commercialization of our source code. Lastly, after four years, the source code automatically converts to an OSI-approved license (Apache 2.0), further solidifying our commitment to open-source principles.
+
+### Q: Which of Your Products Will Be Under The BSL?
+Currently, BSL 1.1 is only for DefraDB. However, if, in the future, we need to update the licensing of any of our other projects, we will inform the community accordingly.
+
+### Q: Can I Use DefraDB Licensed Under BSL In a Test and Development Environment?
+Yes, you can use DefraDB licensed under BSL in a non-production test and development environment without requiring a subscription from us.
+
+### Q: Can I Apply BSL To My Own Software?
+Yes, you can utilize the BSL framework for your software if you hold the copyright or if the software has a permissive license, such as BSD. Converting your software to BSL involves adding the BSL header to all software files and including the BSL license file in your software distribution. You also need to specify an Additional Use Grant (or declare its absence) and a Change Date suitable for your software in the header of the BSL license file.
diff --git a/docs/website/README.md b/docs/website/README.md
new file mode 100644
index 0000000000..f02723dc69
--- /dev/null
+++ b/docs/website/README.md
@@ -0,0 +1,5 @@
+# Website documentation
+
+This directory contains the documentation that is displayed on our documentation website https://docs.source.network/.
+
+The structure of this directory and it's children should match that of the website.
diff --git a/docs/website/concepts/_category_.json b/docs/website/concepts/_category_.json
new file mode 100644
index 0000000000..fccb23e53b
--- /dev/null
+++ b/docs/website/concepts/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Concepts",
+ "position": 3
+ }
+
\ No newline at end of file
diff --git a/docs/website/concepts/ipfs.md b/docs/website/concepts/ipfs.md
new file mode 100644
index 0000000000..5d643d3c61
--- /dev/null
+++ b/docs/website/concepts/ipfs.md
@@ -0,0 +1,31 @@
+---
+title: IPFS
+---
+
+## Overview
+
+IPFS is a decentralized system to access websites, applications, files, and data using content addressing. IPFS stands for **InterPlanetary File System**. The fundamental idea underlying in this technology is to change the way a network of people and computers can exchange information amongst themselves.
+
+## Key Features
+
+- Distributed/decentralized system
+- Uses content addressing
+- Participation
+
+A decentralized system lets you access information or a file from multiple locations, which aren't managed by a single organization. The pro's of decentralization are - access to multiple locations to access data, easy to dodge content censorship, and faster file transfer.
+
+IPFS addresses a file by its content instead of its location. A content identifier is the cryptographic hash of the content at that address. It is unique to the content it came in from and permits you to verify if you got what you had requested for.
+
+For IPFS to work well, active participation of people is necessary. If you are sharing files using IPFS, you need to have copies of the shared files available on multiple computers, which are powered on and running IPFS. In a nutshell, many people provide access to each others files and participate in making them available when requested. Note that if you have downloaded a file using IPFS, by default your computer will share it further with others participants to share further.
+
+## How Does it Work?
+
+As discussed earlier, IPFS is a p2p (peer-to-peer) storage network. The IPFS ecosystem works with the following fundamental principles.
+
+1. Unique identification via content addressing
+2. Content linking via directed acrylic graphs (DAGs)
+3. Content discovery via distributed hash tables (DHTs)
+
+## Suggested Reading
+
+For more in-depth knowledge of the IPFS system refer to the [IPFS Conceptual documentation](https://docs.ipfs.io/concepts/).
diff --git a/docs/website/concepts/libp2p.md b/docs/website/concepts/libp2p.md
new file mode 100644
index 0000000000..ace7366fa5
--- /dev/null
+++ b/docs/website/concepts/libp2p.md
@@ -0,0 +1,24 @@
+# libp2p
+## Overview
+
+libp2p is a modular system which helps in the development of peer-to-peer network applications. The system comprises of protocols, specifications, and libraries.
+
+## What is Peer-to-peer?
+
+Most commonly used peer-to-peer applications include file sharing networks like bittorrent (used to download movies, files) and the recent uptrend of blockchain networks. Both these network types communicate in a peer-to-peer method.
+
+In a p2p network, participants (also known as nodes or peers) communicate with each other directly rather than using a **server** like the client/server model of data transfer.
+
+# Problems Solved by libp2p
+
+Of the many problems, the major ones which libp2p addresses include:
+- Transport
+- Identity
+- Security
+- Peer Routing
+- Content Routing
+- Messaging/PubSub
+
+## Suggested Reading
+
+For more in-depth knowledge of the libp2p system refer to the [libp2p Conceptual documentation](https://docs.libp2p.io/concepts/).
\ No newline at end of file
diff --git a/docs/website/getting-started.md b/docs/website/getting-started.md
new file mode 100644
index 0000000000..739f27b893
--- /dev/null
+++ b/docs/website/getting-started.md
@@ -0,0 +1,353 @@
+---
+sidebar_position: 1
+title: Getting Started
+slug: /
+---
+
+DefraDB is a user-centric database that prioritizes data ownership, personal privacy, and information security. Its data model, powered by the convergence of [MerkleCRDTs](https://arxiv.org/pdf/2004.00107.pdf) and the content-addressability of [IPLD](https://docs.ipld.io/), enables a multi-write-master architecture. It features [DQL](./references/query-specification/query-language-overview.md), a query language compatible with GraphQL but providing extra convenience. By leveraging peer-to-peer networking it can be deployed nimbly in novel topologies. Access control is determined by a relationship-based DSL, supporting document or field-level policies, secured by the SourceHub network. DefraDB is a core part of the [Source technologies](https://source.network/) that enable new paradigms of decentralized data and access-control management, user-centric apps, data trustworthiness, and much more.
+
+DISCLAIMER: At this early stage, DefraDB does not offer access control or data encryption, and the default configuration exposes the database to the network. The software is provided "as is" and is not guaranteed to be stable, secure, or error-free. We encourage you to experiment with DefraDB and provide feedback, but please do not use it for production purposes until it has been thoroughly tested and developed.
+
+## Install
+
+Install `defradb` by [downloading an executable](https://github.com/sourcenetwork/defradb/releases) or building it locally using the [Go toolchain](https://golang.org/):
+
+```shell
+git clone git@github.com:sourcenetwork/defradb.git
+cd defradb
+make install
+```
+
+In the following sections, we assume that `defradb` is included in your `PATH`. If you installed it with the Go toolchain, use:
+
+```shell
+export PATH=$PATH:$(go env GOPATH)/bin
+```
+
+We recommend experimenting with queries using a native GraphQL client. Altair is a popular option - [download and install it](https://altairgraphql.dev/#download).
+
+## Start
+
+Start a node by executing `defradb start`. Keep the node running while going through the following examples.
+
+Verify the local connection to the node works by executing `defradb client ping` in another terminal.
+
+## Configuration
+
+In this document, we use the default configuration, which has the following behavior:
+
+- `~/.defradb/` is DefraDB's configuration and data directory
+- `client` command interacts with the locally running node
+- The GraphQL endpoint is provided at
+
+The GraphQL endpoint can be used with a GraphQL client (e.g., Altair) to conveniently perform requests (`query`, `mutation`) and obtain schema introspection.
+
+## Add a schema type
+
+Schemas are used to structure documents using a type system.
+
+In the following examples, we'll be using a simple `User` schema type.
+
+Add it to the database with the following command. By doing so, DefraDB generates the typed GraphQL endpoints for querying, mutation, and introspection.
+
+```shell
+defradb client schema add '
+ type User {
+ name: String
+ age: Int
+ verified: Boolean
+ points: Float
+ }
+'
+```
+
+Find more examples of schema type definitions in the [examples/schema/](https://github.com/sourcenetwork/defradb/examples/schema/) folder.
+
+## Create a document
+
+Submit a `mutation` request to create a document of the `User` type:
+
+```shell
+defradb client query '
+ mutation {
+ create_User(input: {age: 31, verified: true, points: 90, name: "Bob"}) {
+ _key
+ }
+ }
+'
+```
+
+Expected response:
+
+```json
+{
+ "data": [
+ {
+ "_key": "bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab",
+ }
+ ]
+}
+```
+
+`_key` is the document's key, a unique identifier of the document, determined by its schema and initial data.
+
+## Query documents
+
+Once you have populated your node with data, you can query it:
+
+```shell
+defradb client query '
+ query {
+ User {
+ _key
+ age
+ name
+ points
+ }
+ }
+'
+```
+
+This query obtains *all* users and returns their fields `_key, age, name, points`. GraphQL queries only return the exact fields requested.
+
+You can further filter results with the `filter` argument.
+
+```shell
+defradb client query '
+ query {
+ User(filter: {points: {_ge: 50}}) {
+ _key
+ age
+ name
+ points
+ }
+ }
+'
+```
+
+This returns only user documents which have a value for the `points` field *Greater Than or Equal to* (`_ge`) 50.
+
+## Obtain document commits
+
+DefraDB's data model is based on [MerkleCRDTs](./guides/merkle-crdt.md). Each document has a graph of all of its updates, similar to Git. The updates are called `commits` and are identified by `cid`, a content identifier. Each references its parents by their `cid`s.
+
+To get the most recent commits in the MerkleDAG for the document identified as `bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab`:
+
+```shell
+defradb client query '
+ query {
+ latestCommits(dockey: "bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab") {
+ cid
+ delta
+ height
+ links {
+ cid
+ name
+ }
+ }
+ }
+'
+```
+
+It returns a structure similar to the following, which contains the update payload that caused this new commit (`delta`) and any subgraph commits it references.
+
+```json
+{
+ "data": [
+ {
+ "cid": "bafybeifhtfs6vgu7cwbhkojneh7gghwwinh5xzmf7nqkqqdebw5rqino7u",
+ "delta": "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=",
+ "height": 1,
+ "links": [
+ {
+ "cid": "bafybeiet6foxcipesjurdqi4zpsgsiok5znqgw4oa5poef6qtiby5hlpzy",
+ "name": "age"
+ },
+ {
+ "cid": "bafybeielahxy3r3ulykwoi5qalvkluojta4jlg6eyxvt7lbon3yd6ignby",
+ "name": "name"
+ },
+ {
+ "cid": "bafybeia3tkpz52s3nx4uqadbm7t5tir6gagkvjkgipmxs2xcyzlkf4y4dm",
+ "name": "points"
+ },
+ {
+ "cid": "bafybeia4off4javopmxcdyvr6fgb5clo7m5bblxic5sqr2vd52s6khyksm",
+ "name": "verified"
+ }
+ ]
+ }
+ ]
+}
+```
+
+Obtain a specific commit by its content identifier (`cid`):
+
+```shell
+defradb client query '
+ query {
+ commits(cid: "bafybeifhtfs6vgu7cwbhkojneh7gghwwinh5xzmf7nqkqqdebw5rqino7u") {
+ cid
+ delta
+ height
+ links {
+ cid
+ name
+ }
+ }
+ }
+'
+```
+
+## DefraDB Query Language (DQL)
+
+DQL is compatible with GraphQL but features various extensions.
+
+Read the [Query specification](./references/query-specification/query-language-overview.md) to discover filtering, ordering, limiting, relationships, variables, aggregate functions, and other useful features.
+
+
+## Peer-to-peer data synchronization
+
+DefraDB leverages peer-to-peer networking for data exchange, synchronization, and replication of documents and commits.
+
+When starting a node for the first time, a key pair is generated and stored in its "root directory" (`~/.defradb/` by default).
+
+Each node has a unique `Peer ID` generated from its public key. This ID allows other nodes to connect to it.
+
+There are two types of peer-to-peer relationships supported: **pubsub** peering and **replicator** peering.
+
+Pubsub peering *passively* synchronizes data between nodes by broadcasting *Document Commit* updates to the topic of the commit's document key. Nodes need to be listening on the pubsub channel to receive updates. This is for when two nodes *already* have share a document and want to keep them in sync.
+
+Replicator peering *actively* pushes changes from a specific collection *to* a target peer.
+
+### Pubsub example
+
+Pubsub peers can be specified on the command line using the `--peers` flag, which accepts a comma-separated list of peer [multiaddresses](https://docs.libp2p.io/concepts/addressing/). For example, a node at IP `192.168.1.12` listening on 9000 with Peer ID `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B` would be referred to using the multiaddress `/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`.
+
+Let's go through an example of two nodes (*nodeA* and *nodeB*) connecting with each other over pubsub, on the same machine.
+
+Start *nodeA* with a default configuration:
+
+```shell
+defradb start
+```
+
+Obtain the Peer ID from its console output. In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different.
+
+For *nodeB*, we provide the following configuration:
+
+```shell
+defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B
+```
+
+About the flags:
+
+- `--rootdir` specifies the root dir (config and data) to use
+- `--url` is the address to listen on for the client HTTP and GraphQL API
+- `--p2paddr` is the multiaddress for the p2p networking to listen on
+- `--tcpaddr` is the multiaddress for the gRPC server to listen on
+- `--peers` is a comma-separated list of peer multiaddresses
+
+This starts two nodes and connects them via pubsub networking.
+
+### Collection subscription example
+
+It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `schemaVersionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the `rpc p2pcollection` command:
+
+```shell
+defradb client rpc p2pcollection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske
+```
+
+Multiple collection IDs can be added at once.
+
+```shell
+defradb client rpc p2pcollection add --url localhost:9182
+```
+
+### Replicator example
+
+Replicator peering is targeted: it allows a node to actively send updates to another node. Let's go through an example of *nodeA* actively replicating to *nodeB*:
+
+Start *nodeA*:
+
+```shell
+defradb start
+```
+
+In another terminal, add this example schema to it:
+
+```shell
+defradb client schema add '
+ type Article {
+ content: String
+ published: Boolean
+ }
+'
+```
+
+Start (or continue running from above) *nodeB*, that will be receiving updates:
+
+```shell
+defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162
+```
+
+Here we *do not* specify `--peers` as we will manually define a replicator after startup via the `rpc` client command.
+
+In another terminal, add the same schema to *nodeB*:
+
+```shell
+defradb client schema add --url localhost:9182 '
+ type Article {
+ content: String
+ published: Boolean
+ }
+'
+```
+
+Set *nodeA* to actively replicate the "Article" collection to *nodeB*:
+
+```shell
+defradb client rpc addreplicator "Article" /ip4/0.0.0.0/tcp/9172/p2p/
+defradb client rpc replicator set -c "Article" /ip4/0.0.0.0/tcp/9172/p2p/
+
+```
+
+As we add or update documents in the "Article" collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub.
+
+
+## Securing the HTTP API with TLS
+
+By default, DefraDB will expose its HTTP API at `http://localhost:9181/api/v0`. It's also possible to configure the API to use TLS with self-signed certificates or Let's Encrypt.
+
+To start defradb with self-signed certificates placed under `~/.defradb/certs/` with `server.key`
+being the public key and `server.crt` being the private key, just do:
+```shell
+defradb start --tls
+```
+
+The keys can be generated with your generator of choice or with `make tls-certs`.
+
+Since the keys should be stored within the DefraDB data and configuration directory, the recommended key generation command is `make tls-certs path="~/.defradb/certs"`.
+
+If not saved under `~/.defradb/certs` then the public (`pubkeypath`) and private (`privkeypaths`) key paths need to be explicitly defined in addition to the `--tls` flag or `tls` set to `true` in the config.
+
+Then to start the server with TLS, using your generated keys in custom path:
+```shell
+defradb start --tls --pubkeypath ~/path-to-pubkey.key --privkeypath ~/path-to-privkey.crt
+
+```
+
+DefraDB also comes with automatic HTTPS for deployments on the public web. To enable HTTPS,
+ deploy DefraDB to a server with both port 80 and port 443 open. With your domain's DNS A record
+ pointed to the IP of your server, you can run the database using the following command:
+```shell
+sudo defradb start --tls --url=your-domain.net --email=email@example.com
+```
+Note: `sudo` is needed above for the redirection server (to bind port 80).
+
+A valid email address is necessary for the creation of the certificate, and is important to get notifications from the Certificate Authority - in case the certificate is about to expire, etc.
+
+
+## Conclusion
+
+This gets you started to use DefraDB! Read on the documentation website for guides and further information.
diff --git a/docs/website/guides/_category_.json b/docs/website/guides/_category_.json
new file mode 100644
index 0000000000..7494f247dd
--- /dev/null
+++ b/docs/website/guides/_category_.json
@@ -0,0 +1,5 @@
+{
+ "label": "Guides",
+ "position": 2
+ }
+
\ No newline at end of file
diff --git a/docs/website/guides/akash-deployment.md b/docs/website/guides/akash-deployment.md
new file mode 100644
index 0000000000..1156347999
--- /dev/null
+++ b/docs/website/guides/akash-deployment.md
@@ -0,0 +1,145 @@
+---
+sidebar_label: Akash Deployment Guide
+sidebar_position: 60
+---
+# Deploy DefraDB on Akash
+
+## Overview
+
+This guide will walk you through the required steps to deploy DefraDB on Akash.
+
+## Prerequisites
+
+Before you get started you will need an Akash account with at least 5 AKT. If don't have an Akash account you can create one by installing [Keplr](https://www.keplr.app/).
+
+## Deploy
+
+![Cloudmos console](/img/akash/deploy.png "Cloudmos console")
+
+Deploying on Akash can be done through the [Cloudmos console](https://deploy.cloudmos.io/new-deployment). Click on the "Empty" deployment type and copy the config below into the editor.
+
+```yaml
+---
+version: "2.0"
+
+services:
+ defradb:
+ image: sourcenetwork/defradb:develop
+ args:
+ - start
+ - --url=0.0.0.0:9181
+ expose:
+ - port: 9171
+ as: 9171
+ to:
+ - global: true
+ - port: 9181
+ as: 80
+ to:
+ - global: true
+
+profiles:
+ compute:
+ defradb:
+ resources:
+ cpu:
+ units: 1.0
+ memory:
+ size: 1Gi
+ storage:
+ size: 1Gi
+ placement:
+ akash:
+ attributes:
+ host: akash
+ signedBy:
+ anyOf:
+ - "akash1365yvmc4s7awdyj3n2sav7xfx76adc6dnmlx63"
+ - "akash18qa2a2ltfyvkyj0ggj3hkvuj6twzyumuaru9s4"
+ pricing:
+ defradb:
+ denom: uakt
+ amount: 10000
+
+deployment:
+ defradb:
+ akash:
+ profile: defradb
+ count: 1
+```
+
+Next click the "Create Deployment" button. A pop-up will appear asking you to confirm the configuration transaction.
+
+After confirming you will be prompted to select a provider. Select a provider with a price and location that makes sense for your use case.
+
+A final pop-up will appear asking you to confirm the deployment transaction. If the deployment is successful you should now see deployment info similar to the image below.
+
+## Deployment Info
+
+![Cloudmos deployment](/img/akash/info.png "Cloudmos deployment")
+
+To configure and interact with your DefraDB node, you will need the P2P and API addresses. They can be found at the labeled locations in the image above.
+
+## P2P Replication
+
+To replicate documents from a local DefraDB instance to your Akash deployment you will need to create a shared schema on both nodes.
+
+Run the commands below to create the shared schema.
+
+First on the local node:
+
+```bash
+defradb client schema add '
+ type User {
+ name: String
+ age: Int
+ }
+'
+```
+
+Then on the Akash node:
+
+```bash
+defradb client schema add --url '
+ type User {
+ name: String
+ age: Int
+ }
+'
+```
+
+> The API address can be found in the [deployment info](#deployment-info).
+
+Next you will need the peer ID of the Akash node. Run the command below to view the node's peer info.
+
+```bash
+defradb client p2p info --url