Skip to content

Commit

Permalink
chore: reorganize to cleanup exports of base package
Browse files Browse the repository at this point in the history
  • Loading branch information
mabels committed Dec 16, 2024
1 parent e686f4d commit 7ebdf48
Show file tree
Hide file tree
Showing 111 changed files with 1,553 additions and 523 deletions.
2 changes: 1 addition & 1 deletion lib/bin.js → cborg/bin.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import process from 'process'
import { decode, encode } from '../cborg.js'
import { tokensToDiagnostic, fromDiag } from './diagnostic.js'
import { fromHex as _fromHex, toHex } from './byte-utils.js'
import { fromHex as _fromHex, toHex } from 'cborg/utils'

/**
* @param {number} code
Expand Down
28 changes: 28 additions & 0 deletions cborg/common.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// const decodeErrPrefix = 'CBOR decode error:'
// const encodeErrPrefix = 'CBOR encode error:'

const uintMinorPrefixBytes = []
uintMinorPrefixBytes[23] = 1
uintMinorPrefixBytes[24] = 2
uintMinorPrefixBytes[25] = 3
uintMinorPrefixBytes[26] = 5
uintMinorPrefixBytes[27] = 9

// /**
// * @param {Uint8Array} data
// * @param {number} pos
// * @param {number} need
// * @param {string} decodeErrPrefix
// */
// function assertEnoughData (data, pos, need, decodeErrPrefix) {
// if (data.length - pos < need) {
// throw new Error(`${decodeErrPrefix} not enough data for type`)
// }
// }

export {
// decodeErrPrefix,
// encodeErrPrefix,
uintMinorPrefixBytes
// assertEnoughData
}
46 changes: 24 additions & 22 deletions lib/decode.js → cborg/decode.js
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
import { decodeErrPrefix } from './common.js'
import { Type } from './token.js'
import { jump, quick } from './jump.js'
import { jump, quick, Type } from 'cborg/utils'

/**
* @typedef {import('./token.js').Token} Token
* @typedef {import('cborg/utils').Token} Token
* @typedef {import('../interface').DecodeOptions} DecodeOptions
* @typedef {import('../interface').DecodeTokenizer} DecodeTokenizer
* @typedef {import('../interface').DecodeFunction} DecodeFunction
*/

const defaultDecodeOptions = {
const /** @type {DecodeOptions} */defaultDecodeOptions = {
strict: false,
allowIndefinite: true,
allowUndefined: true,
allowBigInt: true
allowBigInt: true,
decodeErrPrefix: 'CBOR decode error:'
}

/**
Expand All @@ -23,10 +23,11 @@ class Tokeniser {
* @param {Uint8Array} data
* @param {DecodeOptions} options
*/
constructor (data, options = {}) {
constructor (data, options) {
this._pos = 0
this.data = data
this.options = options
this.jump = jump(options.decodeErrPrefix)
}

pos () {
Expand All @@ -41,11 +42,11 @@ class Tokeniser {
const byt = this.data[this._pos]
let token = quick[byt]
if (token === undefined) {
const decoder = jump[byt]
const decoder = this.jump[byt]
/* c8 ignore next 4 */
// if we're here then there's something wrong with our jump or quick lists!
if (!decoder) {
throw new Error(`${decodeErrPrefix} no decoder for major type ${byt >>> 5} (byte 0x${byt.toString(16).padStart(2, '0')})`)
throw new Error(`${this.options.decodeErrPrefix} no decoder for major type ${byt >>> 5} (byte 0x${byt.toString(16).padStart(2, '0')})`)
}
const minor = byt & 31
token = decoder(this.data, this._pos, minor, this.options)
Expand Down Expand Up @@ -74,10 +75,10 @@ function tokenToArray (token, tokeniser, options) {
// normal end to indefinite length array
break
}
throw new Error(`${decodeErrPrefix} got unexpected break to lengthed array`)
throw new Error(`${options.decodeErrPrefix} got unexpected break to lengthed array`)
}
if (value === DONE) {
throw new Error(`${decodeErrPrefix} found array but not enough entries (got ${i}, expected ${token.value})`)
throw new Error(`${options.decodeErrPrefix} found array but not enough entries (got ${i}, expected ${token.value})`)
}
arr[i] = value
}
Expand All @@ -101,23 +102,23 @@ function tokenToMap (token, tokeniser, options) {
// normal end to indefinite length map
break
}
throw new Error(`${decodeErrPrefix} got unexpected break to lengthed map`)
throw new Error(`${options.decodeErrPrefix} got unexpected break to lengthed map`)
}
if (key === DONE) {
throw new Error(`${decodeErrPrefix} found map but not enough entries (got ${i} [no key], expected ${token.value})`)
throw new Error(`${options.decodeErrPrefix} found map but not enough entries (got ${i} [no key], expected ${token.value})`)
}
if (useMaps !== true && typeof key !== 'string') {
throw new Error(`${decodeErrPrefix} non-string keys not supported (got ${typeof key})`)
throw new Error(`${options.decodeErrPrefix} non-string keys not supported (got ${typeof key})`)
}
if (options.rejectDuplicateMapKeys === true) {
// @ts-ignore
if ((useMaps && m.has(key)) || (!useMaps && (key in obj))) {
throw new Error(`${decodeErrPrefix} found repeat map key "${key}"`)
throw new Error(`${options.decodeErrPrefix} found repeat map key "${key}"`)
}
}
const value = tokensToObject(tokeniser, options)
if (value === DONE) {
throw new Error(`${decodeErrPrefix} found map but not enough entries (got ${i} [no value], expected ${token.value})`)
throw new Error(`${options.decodeErrPrefix} found map but not enough entries (got ${i} [no value], expected ${token.value})`)
}
if (useMaps) {
// @ts-ignore TODO reconsider this .. maybe needs to be strict about key types
Expand Down Expand Up @@ -166,7 +167,7 @@ function tokensToObject (tokeniser, options) {
const tagged = tokensToObject(tokeniser, options)
return options.tags[token.value](tagged)
}
throw new Error(`${decodeErrPrefix} tag not supported (${token.value})`)
throw new Error(`${options.decodeErrPrefix} tag not supported (${token.value})`)
}
/* c8 ignore next */
throw new Error('unsupported')
Expand All @@ -178,17 +179,17 @@ function tokensToObject (tokeniser, options) {
* @returns {[any, Uint8Array]}
*/
function decodeFirst (data, options) {
options = Object.assign({}, defaultDecodeOptions, options)
if (!(data instanceof Uint8Array)) {
throw new Error(`${decodeErrPrefix} data to decode must be a Uint8Array`)
throw new Error(`${options.decodeErrPrefix} data to decode must be a Uint8Array`)
}
options = Object.assign({}, defaultDecodeOptions, options)
const tokeniser = options.tokenizer || new Tokeniser(data, options)
const decoded = tokensToObject(tokeniser, options)
if (decoded === DONE) {
throw new Error(`${decodeErrPrefix} did not find any content to decode`)
throw new Error(`${options.decodeErrPrefix} did not find any content to decode`)
}
if (decoded === BREAK) {
throw new Error(`${decodeErrPrefix} got unexpected break`)
throw new Error(`${options.decodeErrPrefix} got unexpected break`)
}
return [decoded, data.subarray(tokeniser.pos())]
}
Expand All @@ -199,9 +200,10 @@ function decodeFirst (data, options) {
* @returns {any}
*/
function decode (data, options) {
options = Object.assign({}, defaultDecodeOptions, options)
const [decoded, remainder] = decodeFirst(data, options)
if (remainder.length > 0) {
throw new Error(`${decodeErrPrefix} too many terminals, data makes no sense`)
throw new Error(`${options.decodeErrPrefix} too many terminals, data makes no sense:${remainder.length}`)
}
return decoded
}
Expand Down
9 changes: 6 additions & 3 deletions lib/diagnostic.js → cborg/diagnostic.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import { Tokeniser } from './decode.js'
import { Type } from './token.js'
import { toHex, fromHex } from './byte-utils.js'
import { uintBoundaries } from './0uint.js'
import { uintBoundaries, toHex, fromHex } from 'cborg/utils'

const utf8Encoder = new TextEncoder()
const utf8Decoder = new TextDecoder()
Expand All @@ -11,7 +10,11 @@ const utf8Decoder = new TextDecoder()
* @param {number} [width]
*/
function * tokensToDiagnostic (inp, width = 100) {
const tokeniser = new Tokeniser(inp, { retainStringBytes: true, allowBigInt: true })
const tokeniser = new Tokeniser(inp, {
decodeErrPrefix: 'Diagnostic decode error: ',
retainStringBytes: true,
allowBigInt: true
})
let pos = 0
const indent = []

Expand Down
2 changes: 1 addition & 1 deletion lib/diagnostic_test.js → cborg/diagnostic_test.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { tokensToDiagnostic } from './diagnostic.js'
import { fromHex } from './byte-utils.js'
import { fromHex } from 'cborg/utils'

const inp = `
a7
Expand Down
44 changes: 23 additions & 21 deletions lib/encode.js → cborg/encode.js
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
import { is } from './is.js'
import { Token, Type } from './token.js'
import { Bl } from './bl.js'
import { encodeErrPrefix } from './common.js'
import { quickEncodeToken } from './jump.js'
import { asU8A } from './byte-utils.js'

import { encodeUint } from './0uint.js'
import { encodeNegint } from './1negint.js'
import { encodeBytes } from './2bytes.js'
import { encodeString } from './3string.js'
import { encodeArray } from './4array.js'
import { encodeMap } from './5map.js'
import { encodeTag } from './6tag.js'
import { encodeFloat } from './7float.js'
import {
Token, Type, Bl,
quickEncodeToken,
asU8A,
encodeUint,
encodeNegint,
encodeBytes,
encodeString,
encodeArray,
encodeMap,
encodeTag,
encodeFloat
} from 'cborg/utils'

/**
* @typedef {import('../interface').EncodeOptions} EncodeOptions
Expand All @@ -27,7 +26,9 @@ import { encodeFloat } from './7float.js'
const defaultEncodeOptions = {
float64: false,
mapSorter,
quickEncodeToken
quickEncodeToken,
encodeErrPrefix: 'CBOR encode error:'

}

/** @returns {TokenTypeEncoder[]} */
Expand Down Expand Up @@ -77,9 +78,10 @@ class Ref {
/**
* @param {Reference|undefined} stack
* @param {object|any[]} obj
* @param {EncodeOptions} options
* @returns {Reference}
*/
static createCheck (stack, obj) {
static createCheck (stack, obj, { encodeErrPrefix }) {
if (stack && stack.includes(obj)) {
throw new Error(`${encodeErrPrefix} object contains circular references`)
}
Expand Down Expand Up @@ -221,7 +223,7 @@ const typeEncoders = {
}
return simpleTokens.emptyArray
}
refStack = Ref.createCheck(refStack, obj)
refStack = Ref.createCheck(refStack, obj, options)
const entries = []
let i = 0
for (const e of obj) {
Expand Down Expand Up @@ -252,7 +254,7 @@ const typeEncoders = {
}
return simpleTokens.emptyMap
}
refStack = Ref.createCheck(refStack, obj)
refStack = Ref.createCheck(refStack, obj, options)
/** @type {TokenOrNestedTokens[]} */
const entries = []
let i = 0
Expand All @@ -278,11 +280,11 @@ for (const typ of 'Uint8Clamped Uint16 Uint32 Int8 Int16 Int32 BigUint64 BigInt6

/**
* @param {any} obj
* @param {EncodeOptions} [options]
* @param {EncodeOptions} options
* @param {Reference} [refStack]
* @returns {TokenOrNestedTokens}
*/
function objectToTokens (obj, options = {}, refStack) {
function objectToTokens (obj, options, refStack) {
const typ = is(obj)
const customTypeEncoder = (options && options.typeEncoders && /** @type {OptionalTypeEncoder} */ options.typeEncoders[typ]) || typeEncoders[typ]
if (typeof customTypeEncoder === 'function') {
Expand All @@ -293,7 +295,7 @@ function objectToTokens (obj, options = {}, refStack) {
}
const typeEncoder = typeEncoders[typ]
if (!typeEncoder) {
throw new Error(`${encodeErrPrefix} unsupported type: ${typ}`)
throw new Error(`${options.encodeErrPrefix} unsupported type: ${typ}`)
}
return typeEncoder(obj, typ, options, refStack)
}
Expand Down
24 changes: 12 additions & 12 deletions cborg.js → cborg/index.js
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
import { encode } from './lib/encode.js'
import { decode, decodeFirst, Tokeniser, tokensToObject } from './lib/decode.js'
import { Token, Type } from './lib/token.js'
import { encode } from './encode.js'
import { decode, decodeFirst, Tokeniser, tokensToObject } from './decode.js'
import { Token, Type } from 'cborg/utils'

// is this needed for the json module and other independ encoders
export { encodeCustom } from './lib/encode.js'
export { encodeErrPrefix, decodeErrPrefix } from './lib/common.js'
export { asU8A, fromString, decodeCodePointsArray } from './lib/byte-utils.js'
export { quickEncodeToken } from './lib/jump.js'
export { makeCborEncoders, objectToTokens } from './lib/encode.js'
export { encodeCustom } from './encode.js'
// export { asU8A, fromString, decodeCodePointsArray } from './utils/byte-utils.js'
// export { quickEncodeToken } from './utils/jump.js'
// export { encodeErrPrefix, decodeErrPrefix } from './lib/common.js'
// export { makeCborEncoders, objectToTokens } from './lib/encode.js'

/**
* Export the types that were present in the original manual cborg.d.ts
* @typedef {import('./interface').TagDecoder} TagDecoder
* @typedef {import('../interface').TagDecoder} TagDecoder
* There was originally just `TypeEncoder` so don't break types by renaming or not exporting
* @typedef {import('./interface').OptionalTypeEncoder} TypeEncoder
* @typedef {import('./interface').DecodeOptions} DecodeOptions
* @typedef {import('./interface').EncodeOptions} EncodeOptions
* @typedef {import('../interface').OptionalTypeEncoder} TypeEncoder
* @typedef {import('../interface').DecodeOptions} DecodeOptions
* @typedef {import('../interface').EncodeOptions} EncodeOptions
*/

export {
Expand Down
File renamed without changes.
8 changes: 4 additions & 4 deletions lib/length.js → cborg/length.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
// never reference the file directly to ensure the
// indepency of the json module
import { makeCborEncoders, objectToTokens, quickEncodeToken } from 'cborg'
import { makeCborEncoders, objectToTokens } from './encode.js'
import { quickEncodeToken } from 'cborg/utils'

/**
* @typedef {import('../interface').EncodeOptions} EncodeOptions
Expand All @@ -13,7 +12,8 @@ const cborEncoders = makeCborEncoders()
/** @type {EncodeOptions} */
const defaultEncodeOptions = {
float64: false,
quickEncodeToken
quickEncodeToken,
encodeErrPrefix: 'CBOR encode error: '
}

/**
Expand Down
11 changes: 7 additions & 4 deletions interface.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import { Token } from './lib/token'
import { Bl } from './lib/bl'
import type { Token, Bl } from 'cborg/utils'

export type TokenOrNestedTokens = Token | Token[] | TokenOrNestedTokens[]

Expand All @@ -14,12 +13,14 @@ export type OptionalTypeEncoder = (data: any, typ: string, options: EncodeOption
export type StrictTypeEncoder = (data: any, typ: string, options: EncodeOptions, refStack?: Reference) => TokenOrNestedTokens

export type TokenTypeEncoder = {
(buf: Bl, token: Token, options?: EncodeOptions): void;
(buf: Bl, token: Token, options: EncodeOptions): void;
compareTokens(t1: Token, t2: Token): number;
// TODO: make this non-optional as a breaking change and remove the throw in length.js
encodedSize?(token: Token, options?: EncodeOptions): number;
encodedSize?(token: Token, options: EncodeOptions): number;
}

export type DecodeFunction = (data:Uint8Array, pos:number, minor:number, options:DecodeOptions) => any

export type MapSorter = (e1: (Token | Token[])[], e2: (Token | Token[])[]) => number

export type QuickEncodeToken = (token: Token) => Uint8Array | undefined
Expand All @@ -45,6 +46,7 @@ export interface DecodeOptions {
retainStringBytes?: boolean
tags?: TagDecoder[],
tokenizer?: DecodeTokenizer
decodeErrPrefix: string
}

export interface EncodeOptions {
Expand All @@ -53,4 +55,5 @@ export interface EncodeOptions {
mapSorter?: MapSorter,
quickEncodeToken?: QuickEncodeToken,
typeEncoders?: { [typeName: string]: OptionalTypeEncoder }
encodeErrPrefix: string
}
Loading

0 comments on commit 7ebdf48

Please sign in to comment.