From c08885d50dc47bc25f14850564b1712eedd2293a Mon Sep 17 00:00:00 2001 From: Yang Hau Date: Tue, 31 Dec 2024 02:22:10 +0100 Subject: [PATCH] fix the typos --- jolt-core/src/poly/commitment/zeromorph.rs | 14 ++++++------- jolt-core/src/poly/sparse_interleaved_poly.rs | 2 +- .../src/subprotocols/grand_product_quarks.rs | 4 ++-- .../src/subprotocols/sparse_grand_product.rs | 20 +++++++++---------- .../src/emulator/device/virtio_block_disk.rs | 10 +++++----- tracer/src/emulator/elf_analyzer.rs | 6 +++--- tracer/src/emulator/mmu.rs | 2 +- 7 files changed, 29 insertions(+), 29 deletions(-) diff --git a/jolt-core/src/poly/commitment/zeromorph.rs b/jolt-core/src/poly/commitment/zeromorph.rs index 505bd629c..3a91adea7 100644 --- a/jolt-core/src/poly/commitment/zeromorph.rs +++ b/jolt-core/src/poly/commitment/zeromorph.rs @@ -635,7 +635,7 @@ mod test { use ark_std::{test_rng, UniformRand}; use rand_core::SeedableRng; - // Evaluate Phi_k(x) = \sum_{i=0}^k x^i using the direct inefficent formula + // Evaluate Phi_k(x) = \sum_{i=0}^k x^i using the direct inefficient formula fn phi(challenge: &P::ScalarField, subscript: usize) -> P::ScalarField { let len = (1 << subscript) as u64; (0..len).fold(P::ScalarField::zero(), |mut acc, i| { @@ -900,7 +900,7 @@ mod test { &mut prover_transcript, ) .unwrap(); - let p_transcipt_squeeze: ::ScalarField = + let p_transcript_squeeze: ::ScalarField = prover_transcript.challenge_scalar(); // Verify proof. @@ -914,10 +914,10 @@ mod test { &mut verifier_transcript, ) .unwrap(); - let v_transcipt_squeeze: ::ScalarField = + let v_transcript_squeeze: ::ScalarField = verifier_transcript.challenge_scalar(); - assert_eq!(p_transcipt_squeeze, v_transcipt_squeeze); + assert_eq!(p_transcript_squeeze, v_transcript_squeeze); // evaluate bad proof for soundness let altered_verifier_point = point @@ -968,7 +968,7 @@ mod test { &evals, &mut prover_transcript, ); - let p_transcipt_squeeze: ::ScalarField = + let p_transcript_squeeze: ::ScalarField = prover_transcript.challenge_scalar(); // Verify proof. @@ -982,10 +982,10 @@ mod test { &mut verifier_transcript, ) .unwrap(); - let v_transcipt_squeeze: ::ScalarField = + let v_transcript_squeeze: ::ScalarField = verifier_transcript.challenge_scalar(); - assert_eq!(p_transcipt_squeeze, v_transcipt_squeeze); + assert_eq!(p_transcript_squeeze, v_transcript_squeeze); // evaluate bad proof for completeness let altered_verifier_point = point diff --git a/jolt-core/src/poly/sparse_interleaved_poly.rs b/jolt-core/src/poly/sparse_interleaved_poly.rs index 2eb9a7908..20efd9fe5 100644 --- a/jolt-core/src/poly/sparse_interleaved_poly.rs +++ b/jolt-core/src/poly/sparse_interleaved_poly.rs @@ -227,7 +227,7 @@ impl SparseInterleavedPolynomial { continue; } if coeff.index % 2 == 0 { - // Left node; try to find correspoding right node + // Left node; try to find corresponding right node let right = segment .get(j + 1) .cloned() diff --git a/jolt-core/src/subprotocols/grand_product_quarks.rs b/jolt-core/src/subprotocols/grand_product_quarks.rs index 62057c917..49c06fea8 100644 --- a/jolt-core/src/subprotocols/grand_product_quarks.rs +++ b/jolt-core/src/subprotocols/grand_product_quarks.rs @@ -287,10 +287,10 @@ pub enum QuarkError { #[error("InvalidSumcheck")] InvalidQuarkSumcheck, /// Returned if a quark opening proof fails - #[error("IvalidOpeningProof")] + #[error("InvalidOpeningProof")] InvalidOpeningProof, /// Returned if eq(tau, r)*(f(1, r) - f(r, 0)*f(r,1)) does not match the result from sumcheck - #[error("IvalidOpeningProof")] + #[error("InvalidOpeningProof")] InvalidBinding, } diff --git a/jolt-core/src/subprotocols/sparse_grand_product.rs b/jolt-core/src/subprotocols/sparse_grand_product.rs index 1e192b51d..48c6d67fd 100644 --- a/jolt-core/src/subprotocols/sparse_grand_product.rs +++ b/jolt-core/src/subprotocols/sparse_grand_product.rs @@ -38,11 +38,11 @@ struct BatchedGrandProductToggleLayer { flag_values: Vec>, /// The Reed-Solomon fingerprints for each circuit in the batch. fingerprints: Vec>, - /// Once the sparse flag/fingerprint vectors cannnot be bound further + /// Once the sparse flag/fingerprint vectors cannot be bound further /// (i.e. binding would require processing values in different vectors), /// we switch to using `coalesced_flags` to represent the flag values. coalesced_flags: Option>, - /// Once the sparse flag/fingerprint vectors cannnot be bound further + /// Once the sparse flag/fingerprint vectors cannot be bound further /// (i.e. binding would require processing values in different vectors), /// we switch to using `coalesced_fingerprints` to represent the fingerprint values. coalesced_fingerprints: Option>, @@ -210,11 +210,11 @@ impl Bindable for BatchedGrandProductToggleLayer { } self.coalesced_flags = Some(bound_flags); - let coalesced_fingerpints = self.coalesced_fingerprints.as_mut().unwrap(); - let mut bound_fingerprints = vec![F::zero(); coalesced_fingerpints.len() / 2]; + let coalesced_fingerprints = self.coalesced_fingerprints.as_mut().unwrap(); + let mut bound_fingerprints = vec![F::zero(); coalesced_fingerprints.len() / 2]; for i in 0..bound_fingerprints.len() { - bound_fingerprints[i] = coalesced_fingerpints[2 * i] - + r * (coalesced_fingerpints[2 * i + 1] - coalesced_fingerpints[2 * i]); + bound_fingerprints[i] = coalesced_fingerprints[2 * i] + + r * (coalesced_fingerprints[2 * i + 1] - coalesced_fingerprints[2 * i]); } self.coalesced_fingerprints = Some(bound_fingerprints); self.batched_layer_len /= 2; @@ -399,14 +399,14 @@ impl BatchedCubicSumcheck, previous_round_claim: F) -> UniPoly { if let Some(coalesced_flags) = &self.coalesced_flags { - let coalesced_fingerpints = self.coalesced_fingerprints.as_ref().unwrap(); + let coalesced_fingerprints = self.coalesced_fingerprints.as_ref().unwrap(); let cubic_evals = if eq_poly.E1_len == 1 { // 1. Flags/fingerprints are coalesced, and E1 is fully bound // This is similar to the if case of `DenseInterleavedPolynomial::compute_cubic` coalesced_flags .par_chunks(2) - .zip(coalesced_fingerpints.par_chunks(2)) + .zip(coalesced_fingerprints.par_chunks(2)) .zip(eq_poly.E2.par_chunks(2)) .map(|((flags, fingerprints), eq_chunk)| { let eq_evals = { @@ -453,12 +453,12 @@ impl BatchedCubicSumcheck u8 { let index = (address >> 3) as usize; let pos = (address % 8) * 8; @@ -394,7 +394,7 @@ impl VirtioBlockDisk { /// Writes a byte to disk. /// /// # Arguments - /// * `addresss` Address in disk + /// * `address` Address in disk /// * `value` Data written to disk fn write_to_disk(&mut self, address: u64, value: u8) { let index = (address >> 3) as usize; @@ -453,7 +453,7 @@ impl VirtioBlockDisk { (self.get_base_avail_address() + 4 + queue_size * 2).div_ceil(align) * align } - // @TODO: Follow the virtio block specification more propertly. + // @TODO: Follow the virtio block specification more properly. fn handle_disk_access(&mut self, memory: &mut MemoryWrapper) { let base_desc_address = self.get_base_desc_address(); let base_avail_address = self.get_base_avail_address(); diff --git a/tracer/src/emulator/elf_analyzer.rs b/tracer/src/emulator/elf_analyzer.rs index c37b2c1a5..9f035c205 100644 --- a/tracer/src/emulator/elf_analyzer.rs +++ b/tracer/src/emulator/elf_analyzer.rs @@ -658,9 +658,9 @@ impl ElfAnalyzer { string_table_section_headers: &Vec<&SectionHeader>, ) -> Option { let tohost_values = [0x2e, 0x74, 0x6f, 0x68, 0x6f, 0x73, 0x74, 0x00]; // ".tohost\null" - for progrma_data_header in program_data_section_headers { - let sh_addr = progrma_data_header.sh_addr; - let sh_name = progrma_data_header.sh_name as u64; + for program_data_header in program_data_section_headers { + let sh_addr = program_data_header.sh_addr; + let sh_name = program_data_header.sh_name as u64; // Find all string sections so far. // @TODO: Is there a way to know which string table section // sh_name of program data section points to? diff --git a/tracer/src/emulator/mmu.rs b/tracer/src/emulator/mmu.rs index f8245b6cc..a2dd12fbc 100644 --- a/tracer/src/emulator/mmu.rs +++ b/tracer/src/emulator/mmu.rs @@ -532,7 +532,7 @@ impl Mmu { false => match effective_address { // I don't know why but dtb data seems to be stored from 0x1020 on Linux. // It might be from self.x[0xb] initialization? - // And DTB size is arbitray. + // And DTB size is arbitrary. 0x00001020..=0x00001fff => self.dtb[effective_address as usize - 0x1020], 0x02000000..=0x0200ffff => self.clint.load(effective_address), 0x0C000000..=0x0fffffff => self.plic.load(effective_address),