Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: multiple plaintext authentication #91

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@ CIRCOM_FILES := $(wildcard $(addsuffix /*_*b.circom,$(TARGET_DIRS)))
# Extract target sizes (e.g., "512b", "1024b") from directory names
TARGET_SIZES := $(patsubst builds/target_%,%,$(TARGET_DIRS))


# Create artifacts directories
$(shell mkdir -p $(addsuffix /artifacts,$(TARGET_DIRS)))

# Default target
.PHONY: all clean
all: build params
all: build check params

# Build target
.PHONY: build
Expand All @@ -34,6 +35,22 @@ params:
cargo +nightly run --release -- "$$target_dir/artifacts" "$${size}b" "5" || exit 1; \
done

.PHONY: check
check:
@echo "Checking that all .bin artifacts exist..."
@set -e; \
for circuit in $(CIRCOM_FILES); do \
f1="$$(dirname $${circuit})/artifacts/$$(basename $${circuit} .circom).bin"; \
f2="$$(dirname $${circuit})/artifacts/$$(basename $${circuit} .circom).r1cs"; \
if [ ! -f "$${f1}" ] || [ ! -f "$${f2}" ]; then \
echo "ERROR: Missing artifact '$${f1}', '$${f2}"; \
exit 1; \
else \
echo "OK: $${f1}, $${f2}"; \
fi; \
done
@echo "All artifacts present!"

# Clean target
clean:
rm -rf $(addsuffix /artifacts,$(TARGET_DIRS))
Expand Down
6 changes: 3 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@

## Overview

`web-prover-circuits` is a project focused on implementing parsers and extractors/selective-disclosure for various data formats inside zero-knowledge circuits.
Specifically, these are designed to be used in an NIVC folding scheme.
`web-prover-circuits` is a project focused on implementing parsers and extractors/selective-disclosure for various data formats inside zero-knowledge circuits.
Specifically, these are designed to be used in an NIVC folding scheme.
Currently, our program layout looks like this:
![v0.7.0](docs/images/v0.7.0.png)
![v0.7.5](docs/images/v0.7.5.png)

## Repository Structure

Expand Down
13 changes: 7 additions & 6 deletions circuits/chacha20/authentication.circom
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,9 @@ template PlaintextAuthentication(DATA_BYTES) {
// in => N 32-bit words => N 4 byte words
signal input plaintext[DATA_BYTES];

signal input ciphertext_digest;
signal input plaintext_index_counter;

// step_in should be the ciphertext digest + the HTTP digests + JSON seq digest
signal input step_in[1];

Expand Down Expand Up @@ -142,15 +145,13 @@ template PlaintextAuthentication(DATA_BYTES) {
}
}

signal ciphertext_digest <== DataHasher(DATA_BYTES)(bigEndianCiphertext);

signal zeroed_plaintext[DATA_BYTES];
for(var i = 0 ; i < DATA_BYTES ; i++) {
// Sets any padding bytes to zero (which are presumably at the end) so they don't accum into the poly hash
zeroed_plaintext[i] <== (1 - isPadding[i]) * plaintext[i];
}
signal plaintext_digest <== PolynomialDigest(DATA_BYTES)(zeroed_plaintext, ciphertext_digest);
signal plaintext_digest_hashed <== Poseidon(1)([plaintext_digest]);
// TODO: I'm not sure we need to subtract the CT digest
step_out[0] <== step_in[0] - ciphertext_digest + plaintext_digest_hashed;
signal part_ciphertext_digest <== DataHasher(DATA_BYTES)(bigEndianCiphertext);
signal plaintext_digest <== PolynomialDigestWithCounter(DATA_BYTES)(zeroed_plaintext, ciphertext_digest, plaintext_index_counter);

step_out[0] <== step_in[0] - part_ciphertext_digest + plaintext_digest;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to write out the counter so that we use the correct counter in the next iteration?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Furthermore, we also need to assert the first time around the counter is 0

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps this is captured by the fact the whole PT digest needs to match? My only argument against this is without also verifying the counter, someone could be mischevious and clever with plaintext chunks

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, you're right. should we add another public input?
also should i add it here in this PR, or create an issue and tackle with arbitrary data size proofs?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this resolved now then? Seems like it.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nope, it's not. have to add a counter hash.

}
11 changes: 5 additions & 6 deletions circuits/http/verification.circom
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
pragma circom 2.1.9;

include "machine.circom";
// TODO: we don't need this if we do a poly digest of the plaintext in authentication circuit
include "../utils/hash.circom";

template HTTPVerification(DATA_BYTES, MAX_NUMBER_OF_HEADERS) {
Expand All @@ -17,9 +16,10 @@ template HTTPVerification(DATA_BYTES, MAX_NUMBER_OF_HEADERS) {
isPadding[i] <== IsEqual()([data[i], -1]);
zeroed_data[i] <== (1 - isPadding[i]) * data[i];
}
signal data_digest <== PolynomialDigest(DATA_BYTES)(zeroed_data, ciphertext_digest);
signal pt_digest <== PolynomialDigest(DATA_BYTES)(zeroed_data, ciphertext_digest);

signal input main_digests[MAX_NUMBER_OF_HEADERS + 1]; // Contains digests of start line and all intended headers (up to `MAX_NUMBER_OF_HEADERS`)
// Contains digests of start line and all intended headers (up to `MAX_NUMBER_OF_HEADERS`)
signal input main_digests[MAX_NUMBER_OF_HEADERS + 1];
signal not_contained[MAX_NUMBER_OF_HEADERS + 1];
var num_to_match = MAX_NUMBER_OF_HEADERS + 1;
for(var i = 0 ; i < MAX_NUMBER_OF_HEADERS + 1 ; i++) {
Expand Down Expand Up @@ -106,9 +106,8 @@ template HTTPVerification(DATA_BYTES, MAX_NUMBER_OF_HEADERS) {
State[DATA_BYTES - 1].next_parsing_body === 1;
State[DATA_BYTES - 1].next_line_status === 0;

// TODO: Need to subtract all the header digests here and also wrap them in poseidon. We can use the ones from the input to make this cheaper since they're verified in this circuit!
// subtract all the header digests here and also wrap them in poseidon.
signal body_digest_hashed <== Poseidon(1)([body_digest[DATA_BYTES - 1]]);
signal data_digest_hashed <== Poseidon(1)([data_digest]);
signal option_hash[MAX_NUMBER_OF_HEADERS + 1];
signal main_digests_hashed[MAX_NUMBER_OF_HEADERS + 1];
var accumulated_main_digests_hashed = 0;
Expand All @@ -118,5 +117,5 @@ template HTTPVerification(DATA_BYTES, MAX_NUMBER_OF_HEADERS) {
accumulated_main_digests_hashed += main_digests_hashed[i];
}

step_out[0] <== step_in[0] + body_digest_hashed - accumulated_main_digests_hashed - data_digest_hashed; // TODO: data_digest is really plaintext_digest from before, consider changing names
step_out[0] <== step_in[0] + body_digest_hashed - accumulated_main_digests_hashed - pt_digest;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to hash pt_digest?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yeah, i had thought about that, but then how do we calculate pt_digest_hash in authentication circuit?

my reasoning to just use digest and not hash was because all others were hashes, so you'd still have to find preimage of those to cancel these?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ah, because of the different components...

Okay, I see.

Can you please make some issues on some of these potential security problems like this we're introducing? Then I'm good to sign off on this..

}
8 changes: 4 additions & 4 deletions circuits/json/extraction.circom
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ include "hash_machine.circom";
template JSONExtraction(DATA_BYTES, MAX_STACK_HEIGHT) {
signal input data[DATA_BYTES];
signal input ciphertext_digest;
signal input sequence_digest;
signal input sequence_digest;
signal input value_digest;

signal input step_in[1];
Expand All @@ -23,7 +23,7 @@ template JSONExtraction(DATA_BYTES, MAX_STACK_HEIGHT) {
}
signal intermediate_digest[DATA_BYTES][3 * MAX_STACK_HEIGHT];
signal state_digest[DATA_BYTES];

// Debugging
// for(var i = 0; i<MAX_STACK_HEIGHT; i++) {
// log("State[", 0, "].next_stack[", i,"] = [",State[0].next_stack[i][0], "][", State[0].next_stack[i][1],"]" );
Expand Down Expand Up @@ -73,8 +73,8 @@ template JSONExtraction(DATA_BYTES, MAX_STACK_HEIGHT) {
}
state_digest[data_idx] <== accumulator;
sequence_is_matched[data_idx] <== IsEqual()([state_digest[data_idx], sequence_digest]);
// Now check for if the value digest appears

// Now check for if the value digest appears
var value_digest_in_stack = 0;
for(var i = 0 ; i < MAX_STACK_HEIGHT ; i++) {
// A single value can be present only, and it is on index 1, so we can just accum
Expand Down
25 changes: 15 additions & 10 deletions circuits/test/chacha20/authentication.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,9 @@ import { WitnessTester } from "circomkit";
import { circomkit, PolynomialDigest, toByte, toUint32Array, uintArray32ToBits, modAdd } from "../common";
import { DataHasher } from "../common/poseidon";
import { assert } from "chai";
import { poseidon1 } from "poseidon-lite";

describe("Plaintext Authentication", () => {
let circuit: WitnessTester<["key", "nonce", "counter", "plaintext", "step_in"], ["step_out"]>;
let circuit: WitnessTester<["key", "nonce", "counter", "plaintext", "plaintext_index_counter", "ciphertext_digest", "step_in"], ["step_out"]>;
describe("16 block test", () => {
it("should perform encryption", async () => {
circuit = await circomkit.WitnessTester(`PlaintextAuthentication`, {
Expand Down Expand Up @@ -49,18 +48,21 @@ describe("Plaintext Authentication", () => {
0xf9, 0x1b, 0x65, 0xc5, 0x52, 0x47, 0x33, 0xab, 0x8f, 0x59, 0x3d, 0xab, 0xcd, 0x62, 0xb3, 0x57,
0x16, 0x39, 0xd6, 0x24, 0xe6, 0x51, 0x52, 0xab, 0x8f, 0x53, 0x0c, 0x35, 0x9f, 0x08, 0x61, 0xd8
];
const counterBits = uintArray32ToBits([1])[0]
const counterBits = uintArray32ToBits([1])[0];
let ciphertext_digest = DataHasher(ciphertextBytes);
let w = await circuit.compute({
key: toInput(Buffer.from(keyBytes)),
nonce: toInput(Buffer.from(nonceBytes)),
counter: counterBits,
plaintext: plaintextBytes,
plaintext_index_counter: 0,
ciphertext_digest: ciphertext_digest,
step_in: 0
}, (["step_out"]));

// Output
let ciphertext_digest = DataHasher(ciphertextBytes);
let plaintext_digest_hashed = poseidon1([PolynomialDigest(plaintextBytes, ciphertext_digest)]);
let output = modAdd(plaintext_digest_hashed - ciphertext_digest, BigInt(0));
let plaintext_digest = PolynomialDigest(plaintextBytes, ciphertext_digest, BigInt(0));
let output = modAdd(plaintext_digest - ciphertext_digest, BigInt(0));
assert.deepEqual(w.step_out, output);
});
});
Expand Down Expand Up @@ -107,16 +109,19 @@ describe("Plaintext Authentication", () => {
];
let totalLength = 128;
let paddedPlaintextBytes = plaintextBytes.concat(Array(totalLength - plaintextBytes.length).fill(-1));
const counterBits = uintArray32ToBits([1])[0]
const counterBits = uintArray32ToBits([1])[0];
let ciphertext_digest = DataHasher(ciphertextBytes);
let w = await circuit.compute({
key: toInput(Buffer.from(keyBytes)),
nonce: toInput(Buffer.from(nonceBytes)),
counter: counterBits,
plaintext: paddedPlaintextBytes,
step_in: 0
step_in: 0,
plaintext_index_counter: 0,
ciphertext_digest: ciphertext_digest,
}, (["step_out"]));
let ciphertext_digest = DataHasher(ciphertextBytes);
let plaintext_digest = poseidon1([PolynomialDigest(plaintextBytes, ciphertext_digest)]);

let plaintext_digest = PolynomialDigest(plaintextBytes, ciphertext_digest, BigInt(0));
let output = modAdd(plaintext_digest - ciphertext_digest, BigInt(0));
assert.deepEqual(w.step_out, output);
});
Expand Down
26 changes: 20 additions & 6 deletions circuits/test/common/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -238,9 +238,13 @@ export function bytesToBigInt(bytes: number[] | Uint8Array): bigint {
}

const prime = BigInt("21888242871839275222246405745257275088548364400416034343698204186575808495617");
export function PolynomialDigest(coeffs: number[], input: bigint): bigint {
export function PolynomialDigest(coeffs: number[], input: bigint, counter: bigint): bigint {
let result = BigInt(0);
// input ** counter
let power = BigInt(1);
for (let i = 0; i < counter; i++) {
power = (power * input) % prime;
}

for (let i = 0; i < coeffs.length; i++) {
result = (result + BigInt(coeffs[i]) * power) % prime;
Expand Down Expand Up @@ -306,6 +310,10 @@ export const http_response_ciphertext = [
220, 67, 16, 26,
];

export const http_response_ciphertext_dup = [
66, 0, 57, 150, 208, 144, 184, 250, 244, 106, 253, 118, 105, 7, 189, 139, 78, 36, 126, 180, 79, 153, 22, 237, 62, 182, 186, 218, 239, 75, 35, 97, 231, 115, 106, 144, 4, 226, 80, 116, 121, 35, 136, 75, 89, 30, 78, 124, 59, 165, 121, 235, 65, 63, 174, 154, 143, 75, 78, 33, 20, 38, 21, 133, 42, 97, 147, 38, 195, 192, 90, 33, 165, 244, 196, 97, 167, 218, 2, 114, 7, 50, 34, 109, 211, 202, 30, 101, 196, 146, 61, 67, 166, 66, 255, 90, 38, 74, 162, 187, 173, 9, 149, 98, 16, 65, 79, 186, 61, 110, 193, 228, 163, 82, 238, 26, 30, 105, 206, 69, 2, 102, 238, 165, 47, 159, 39, 5, 197, 150, 0, 69, 51, 234, 132, 22, 219, 250, 22, 69, 111, 87, 123, 211, 13, 88, 46, 215, 6, 12, 107, 65, 69, 9, 235, 217, 180, 167, 132, 204
];

export const http_start_line = [72, 84, 84, 80, 47, 49, 46, 49, 32, 50, 48, 48, 32, 79, 75];

export const http_header_0 = [
Expand Down Expand Up @@ -421,7 +429,7 @@ interface ManifestResponse {
};
}

interface Manifest {
export interface Manifest {
response: ManifestResponse;
}

Expand All @@ -441,22 +449,28 @@ function headersToBytes(headers: Record<string, string[]>): number[][] {

export function InitialDigest(
manifest: Manifest,
ciphertext: number[],
ciphertexts: number[][],
maxStackHeight: number
): [bigint, bigint] {
let ciphertextDigests: bigint[] = [];
// Create a digest of the ciphertext itself
const ciphertextDigest = DataHasher(ciphertext);
ciphertexts.forEach(ciphertext => {
const ciphertextDigest = DataHasher(ciphertext);
ciphertextDigests.push(ciphertextDigest);
});

let ciphertextDigest = ciphertextDigests.reduce((a, b) => a + b, BigInt(0));

// Digest the start line using the ciphertext_digest as a random input
const startLineBytes = strToBytes(
`${manifest.response.version} ${manifest.response.status} ${manifest.response.message}`
);
const startLineDigest = PolynomialDigest(startLineBytes, ciphertextDigest);
const startLineDigest = PolynomialDigest(startLineBytes, ciphertextDigest, BigInt(0));

// Digest all the headers
const headerBytes = headersToBytes(manifest.response.headers);
const headersDigest = headerBytes.map(bytes =>
PolynomialDigest(bytes, ciphertextDigest)
PolynomialDigest(bytes, ciphertextDigest, BigInt(0))
);

// Digest the JSON sequence
Expand Down
Loading
Loading