Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Handle multiple copies of Type due to bundling #136

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ import { decode, Tokenizer, Type } from 'cborg'
class CustomTokeniser extends Tokenizer {
next () {
const nextToken = super.next()
if (nextToken.type === Type.bytes) {
if (nextToken.type.equals(Type.bytes)) {
throw new Error('Unsupported type: bytes')
}
return nextToken
Expand Down
22 changes: 0 additions & 22 deletions cborg.js

This file was deleted.

4 changes: 2 additions & 2 deletions lib/bin.js → cborg/bin.js
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#!/usr/bin/env node

import process from 'process'
import { decode, encode } from '../cborg.js'
import { decode, encode } from 'cborg'
import { tokensToDiagnostic, fromDiag } from './diagnostic.js'
import { fromHex as _fromHex, toHex } from './byte-utils.js'
import { fromHex as _fromHex, toHex } from 'cborg/utils'

/**
* @param {number} code
Expand Down
28 changes: 28 additions & 0 deletions cborg/common.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
// const decodeErrPrefix = 'CBOR decode error:'
// const encodeErrPrefix = 'CBOR encode error:'

const uintMinorPrefixBytes = []
uintMinorPrefixBytes[23] = 1
uintMinorPrefixBytes[24] = 2
uintMinorPrefixBytes[25] = 3
uintMinorPrefixBytes[26] = 5
uintMinorPrefixBytes[27] = 9

// /**
// * @param {Uint8Array} data
// * @param {number} pos
// * @param {number} need
// * @param {string} decodeErrPrefix
// */
// function assertEnoughData (data, pos, need, decodeErrPrefix) {
// if (data.length - pos < need) {
// throw new Error(`${decodeErrPrefix} not enough data for type`)
// }
// }

export {
// decodeErrPrefix,
// encodeErrPrefix,
uintMinorPrefixBytes
// assertEnoughData
}
54 changes: 28 additions & 26 deletions lib/decode.js → cborg/decode.js
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
import { decodeErrPrefix } from './common.js'
import { Type } from './token.js'
import { jump, quick } from './jump.js'
import { jump, quick, Type } from 'cborg/utils'

/**
* @typedef {import('./token.js').Token} Token
* @typedef {import('cborg/utils').Token} Token
* @typedef {import('../interface').DecodeOptions} DecodeOptions
* @typedef {import('../interface').DecodeTokenizer} DecodeTokenizer
* @typedef {import('../interface').DecodeFunction} DecodeFunction
*/

const defaultDecodeOptions = {
const /** @type {DecodeOptions} */defaultDecodeOptions = {
strict: false,
allowIndefinite: true,
allowUndefined: true,
allowBigInt: true
allowBigInt: true,
decodeErrPrefix: 'CBOR decode error:'
}

/**
Expand All @@ -23,10 +23,11 @@ class Tokeniser {
* @param {Uint8Array} data
* @param {DecodeOptions} options
*/
constructor (data, options = {}) {
constructor (data, options) {
this._pos = 0
this.data = data
this.options = options
this.jump = jump(options.decodeErrPrefix)
}

pos () {
Expand All @@ -41,11 +42,11 @@ class Tokeniser {
const byt = this.data[this._pos]
let token = quick[byt]
if (token === undefined) {
const decoder = jump[byt]
const decoder = this.jump[byt]
/* c8 ignore next 4 */
// if we're here then there's something wrong with our jump or quick lists!
if (!decoder) {
throw new Error(`${decodeErrPrefix} no decoder for major type ${byt >>> 5} (byte 0x${byt.toString(16).padStart(2, '0')})`)
throw new Error(`${this.options.decodeErrPrefix} no decoder for major type ${byt >>> 5} (byte 0x${byt.toString(16).padStart(2, '0')})`)
}
const minor = byt & 31
token = decoder(this.data, this._pos, minor, this.options)
Expand Down Expand Up @@ -74,10 +75,10 @@ function tokenToArray (token, tokeniser, options) {
// normal end to indefinite length array
break
}
throw new Error(`${decodeErrPrefix} got unexpected break to lengthed array`)
throw new Error(`${options.decodeErrPrefix} got unexpected break to lengthed array`)
}
if (value === DONE) {
throw new Error(`${decodeErrPrefix} found array but not enough entries (got ${i}, expected ${token.value})`)
throw new Error(`${options.decodeErrPrefix} found array but not enough entries (got ${i}, expected ${token.value})`)
}
arr[i] = value
}
Expand All @@ -101,23 +102,23 @@ function tokenToMap (token, tokeniser, options) {
// normal end to indefinite length map
break
}
throw new Error(`${decodeErrPrefix} got unexpected break to lengthed map`)
throw new Error(`${options.decodeErrPrefix} got unexpected break to lengthed map`)
}
if (key === DONE) {
throw new Error(`${decodeErrPrefix} found map but not enough entries (got ${i} [no key], expected ${token.value})`)
throw new Error(`${options.decodeErrPrefix} found map but not enough entries (got ${i} [no key], expected ${token.value})`)
}
if (useMaps !== true && typeof key !== 'string') {
throw new Error(`${decodeErrPrefix} non-string keys not supported (got ${typeof key})`)
throw new Error(`${options.decodeErrPrefix} non-string keys not supported (got ${typeof key})`)
}
if (options.rejectDuplicateMapKeys === true) {
// @ts-ignore
if ((useMaps && m.has(key)) || (!useMaps && (key in obj))) {
throw new Error(`${decodeErrPrefix} found repeat map key "${key}"`)
throw new Error(`${options.decodeErrPrefix} found repeat map key "${key}"`)
}
}
const value = tokensToObject(tokeniser, options)
if (value === DONE) {
throw new Error(`${decodeErrPrefix} found map but not enough entries (got ${i} [no value], expected ${token.value})`)
throw new Error(`${options.decodeErrPrefix} found map but not enough entries (got ${i} [no value], expected ${token.value})`)
}
if (useMaps) {
// @ts-ignore TODO reconsider this .. maybe needs to be strict about key types
Expand Down Expand Up @@ -145,28 +146,28 @@ function tokensToObject (tokeniser, options) {

const token = tokeniser.next()

if (token.type === Type.break) {
if (token.type.equals(Type.break)) {
return BREAK
}

if (token.type.terminal) {
return token.value
}

if (token.type === Type.array) {
if (token.type.equals(Type.array)) {
return tokenToArray(token, tokeniser, options)
}

if (token.type === Type.map) {
if (token.type.equals(Type.map)) {
return tokenToMap(token, tokeniser, options)
}

if (token.type === Type.tag) {
if (token.type.equals(Type.tag)) {
if (options.tags && typeof options.tags[token.value] === 'function') {
const tagged = tokensToObject(tokeniser, options)
return options.tags[token.value](tagged)
}
throw new Error(`${decodeErrPrefix} tag not supported (${token.value})`)
throw new Error(`${options.decodeErrPrefix} tag not supported (${token.value})`)
}
/* c8 ignore next */
throw new Error('unsupported')
Expand All @@ -178,17 +179,17 @@ function tokensToObject (tokeniser, options) {
* @returns {[any, Uint8Array]}
*/
function decodeFirst (data, options) {
options = Object.assign({}, defaultDecodeOptions, options)
if (!(data instanceof Uint8Array)) {
throw new Error(`${decodeErrPrefix} data to decode must be a Uint8Array`)
throw new Error(`${options.decodeErrPrefix} data to decode must be a Uint8Array`)
}
options = Object.assign({}, defaultDecodeOptions, options)
const tokeniser = options.tokenizer || new Tokeniser(data, options)
const decoded = tokensToObject(tokeniser, options)
if (decoded === DONE) {
throw new Error(`${decodeErrPrefix} did not find any content to decode`)
throw new Error(`${options.decodeErrPrefix} did not find any content to decode`)
}
if (decoded === BREAK) {
throw new Error(`${decodeErrPrefix} got unexpected break`)
throw new Error(`${options.decodeErrPrefix} got unexpected break`)
}
return [decoded, data.subarray(tokeniser.pos())]
}
Expand All @@ -199,9 +200,10 @@ function decodeFirst (data, options) {
* @returns {any}
*/
function decode (data, options) {
options = Object.assign({}, defaultDecodeOptions, options)
const [decoded, remainder] = decodeFirst(data, options)
if (remainder.length > 0) {
throw new Error(`${decodeErrPrefix} too many terminals, data makes no sense`)
throw new Error(`${options.decodeErrPrefix} too many terminals, data makes no sense:${remainder.length}`)
}
return decoded
}
Expand Down
34 changes: 19 additions & 15 deletions lib/diagnostic.js → cborg/diagnostic.js
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import { Tokeniser } from './decode.js'
import { toHex, fromHex } from './byte-utils.js'
import { uintBoundaries } from './0uint.js'
import { uintBoundaries, toHex, fromHex, Type } from 'cborg/utils'

const utf8Encoder = new TextEncoder()
const utf8Decoder = new TextDecoder()
Expand All @@ -10,7 +9,11 @@ const utf8Decoder = new TextDecoder()
* @param {number} [width]
*/
function * tokensToDiagnostic (inp, width = 100) {
const tokeniser = new Tokeniser(inp, { retainStringBytes: true, allowBigInt: true })
const tokeniser = new Tokeniser(inp, {
decodeErrPrefix: 'Diagnostic decode error: ',
retainStringBytes: true,
allowBigInt: true
})
let pos = 0
const indent = []

Expand All @@ -31,25 +34,25 @@ function * tokensToDiagnostic (inp, width = 100) {
/** @type {string|number} */
let v = String(token.value)
let outp = `${margin}${slc(0, 1)}`
const str = token.type.name === 'bytes' || token.type.name === 'string'
if (token.type.name === 'string') {
const str = token.type.equals(Type.bytes) || token.type.equals(Type.string)
if (token.type.equals(Type.string)) {
v = v.length
vLength -= v
} else if (token.type.name === 'bytes') {
} else if (token.type.equals(Type.bytes)) {
v = token.value.length
// @ts-ignore
vLength -= v
}

let multilen
switch (token.type.name) {
case 'string':
case 'bytes':
case 'map':
case 'array':
case Type.string.name:
case Type.bytes.name:
case Type.map.name:
case Type.array.name:
// for bytes and string, we want to print out the length part of the value prefix if it
// exists - it exists for short lengths (<24) but does for longer lengths
multilen = token.type.name === 'string' ? utf8Encoder.encode(token.value).length : token.value.length
multilen = token.type.equals(Type.string) ? utf8Encoder.encode(token.value).length : token.value.length
if (multilen >= uintBoundaries[0]) {
if (multilen < uintBoundaries[1]) {
outp += ` ${slc(1, 1)}`
Expand All @@ -71,13 +74,14 @@ function * tokensToDiagnostic (inp, width = 100) {

outp = outp.padEnd(width / 2, ' ')
outp += `# ${margin}${token.type.name}`
// there should be a method to get a Type from a String
if (token.type.name !== v) {
outp += `(${v})`
}
yield outp

if (str) {
let asString = token.type.name === 'string'
let asString = token.type.equals(Type.string)
margin += ' '
let repr = asString ? utf8Encoder.encode(token.value) : token.value
if (asString && token.byteValue !== undefined) {
Expand Down Expand Up @@ -110,15 +114,15 @@ function * tokensToDiagnostic (inp, width = 100) {
}
if (!token.type.terminal) {
switch (token.type.name) {
case 'map':
case Type.map.name:
indent.push(token.value * 2)
break
case 'array':
case Type.array.name:
indent.push(token.value)
break
// TODO: test tags .. somehow
/* c8 ignore next 5 */
case 'tag':
case Type.tag.name:
indent.push(1)
break
default:
Expand Down
2 changes: 1 addition & 1 deletion lib/diagnostic_test.js → cborg/diagnostic_test.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { tokensToDiagnostic } from './diagnostic.js'
import { fromHex } from './byte-utils.js'
import { fromHex } from 'cborg/utils'

const inp = `
a7
Expand Down
Loading
Loading