diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9b865de66..cb79d4bb9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -47,6 +47,19 @@ jobs: matrix: ${{ needs.matrix.outputs.matrix }} cache_nonce: ${{ needs.matrix.outputs.cache_nonce }} + linting: + runs-on: ubuntu-latest + if: github.event_name == 'pull_request' + steps: + - uses: actions/checkout@v4 + - name: Check `nph` formatting + uses: arnetheduck/nph-action@v1 + with: + version: latest + options: "codex/ tests/" + fail: true + suggest: true + coverage: # Force to stick to ubuntu 20.04 for coverage because # lcov was updated to 2.x version in ubuntu-latest diff --git a/.gitmodules b/.gitmodules index 6842ddea4..ece887495 100644 --- a/.gitmodules +++ b/.gitmodules @@ -218,3 +218,6 @@ [submodule "vendor/nim-zippy"] path = vendor/nim-zippy url = https://github.com/status-im/nim-zippy.git +[submodule "vendor/nph"] + path = vendor/nph + url = https://github.com/arnetheduck/nph.git diff --git a/Makefile b/Makefile index 22cb2b319..3dfe8e7ef 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ # version pinned by nimbus-build-system. #PINNED_NIM_VERSION := 38640664088251bbc88917b4bacfd86ec53014b8 # 1.6.21 PINNED_NIM_VERSION := v2.0.14 + ifeq ($(NIM_COMMIT),) NIM_COMMIT := $(PINNED_NIM_VERSION) else ifeq ($(NIM_COMMIT),pinned) @@ -199,4 +200,42 @@ ifneq ($(USE_LIBBACKTRACE), 0) + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) endif +############ +## Format ## +############ +.PHONY: build-nph install-nph-hook clean-nph print-nph-path + +# Default location for nph binary shall be next to nim binary to make it available on the path. +NPH:=$(shell dirname $(NIM_BINARY))/nph + +build-nph: +ifeq ("$(wildcard $(NPH))","") + $(ENV_SCRIPT) nim c vendor/nph/src/nph.nim && \ + mv vendor/nph/src/nph $(shell dirname $(NPH)) + echo "nph utility is available at " $(NPH) +endif + +GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit + +install-nph-hook: build-nph +ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","") + cp ./tools/scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK) +else + echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override" + exit 1 +endif + +nph/%: build-nph + echo -e $(FORMAT_MSG) "nph/$*" && \ + $(NPH) $* + +clean-nph: + rm -f $(NPH) + +# To avoid hardcoding nph binary location in several places +print-nph-path: + echo "$(NPH)" + +clean: | clean-nph + endif # "variables.mk" was not included diff --git a/README.md b/README.md index e1fb1e256..c68127752 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,7 @@ Run the client with: ```bash build/codex ``` + ## Configuration It is possible to configure a Codex node in several ways: @@ -51,3 +52,15 @@ To get acquainted with Codex, consider: ## API The client exposes a REST API that can be used to interact with the clients. Overview of the API can be found on [api.codex.storage](https://api.codex.storage). + +## Contributing and development + +Feel free to dive in, contributions are welcomed! Open an issue or submit PRs. + +### Linting and formatting + +`nim-codex` uses [nph](https://github.com/arnetheduck/nph) for formatting our code and it is requrired to adhere to its styling. +If you are setting up fresh setup, in order to get `nph` run `make build-nph`. +In order to format files run `make nph/`. +If you want you can install Git pre-commit hook using `make install-nph-commit`, which will format modified files prior commiting them. +If you are using VSCode and the NimLang extension you can enable "Format On Save" that will format the files using `nph`. \ No newline at end of file diff --git a/codex/blockexchange.nim b/codex/blockexchange.nim index 1c90ae4db..ff33f406d 100644 --- a/codex/blockexchange.nim +++ b/codex/blockexchange.nim @@ -1,10 +1,5 @@ -import ./blockexchange/[ - network, - engine, - peers] +import ./blockexchange/[network, engine, peers] -import ./blockexchange/protobuf/[ - blockexc, - presence] +import ./blockexchange/protobuf/[blockexc, presence] export network, engine, blockexc, presence, peers diff --git a/codex/blockexchange/engine/advertiser.nim b/codex/blockexchange/engine/advertiser.nim index 20baaf582..f5f28bc17 100644 --- a/codex/blockexchange/engine/advertiser.nim +++ b/codex/blockexchange/engine/advertiser.nim @@ -34,20 +34,19 @@ const DefaultConcurrentAdvertRequests = 10 DefaultAdvertiseLoopSleep = 30.minutes -type - Advertiser* = ref object of RootObj - localStore*: BlockStore # Local block store for this instance - discovery*: Discovery # Discovery interface +type Advertiser* = ref object of RootObj + localStore*: BlockStore # Local block store for this instance + discovery*: Discovery # Discovery interface - advertiserRunning*: bool # Indicates if discovery is running - concurrentAdvReqs: int # Concurrent advertise requests + advertiserRunning*: bool # Indicates if discovery is running + concurrentAdvReqs: int # Concurrent advertise requests - advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle - advertiseQueue*: AsyncQueue[Cid] # Advertise queue - trackedFutures*: TrackedFutures # Advertise tasks futures + advertiseLocalStoreLoop*: Future[void] # Advertise loop task handle + advertiseQueue*: AsyncQueue[Cid] # Advertise queue + trackedFutures*: TrackedFutures # Advertise tasks futures - advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep - inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests + advertiseLocalStoreLoopSleep: Duration # Advertise loop sleep + inFlightAdvReqs*: Table[Cid, Future[void]] # Inflight advertise requests proc addCidToQueue(b: Advertiser, cid: Cid) {.async.} = if cid notin b.advertiseQueue: @@ -83,7 +82,6 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = trace "Advertiser iterating blocks finished." await sleepAsync(b.advertiseLocalStoreLoopSleep) - except CancelledError: break # do not propagate as advertiseLocalStoreLoop was asyncSpawned except CatchableError as e: @@ -94,20 +92,17 @@ proc advertiseLocalStoreLoop(b: Advertiser) {.async: (raises: []).} = proc processQueueLoop(b: Advertiser) {.async: (raises: []).} = while b.advertiserRunning: try: - let - cid = await b.advertiseQueue.get() + let cid = await b.advertiseQueue.get() if cid in b.inFlightAdvReqs: continue try: - let - request = b.discovery.provide(cid) + let request = b.discovery.provide(cid) b.inFlightAdvReqs[cid] = request codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) await request - finally: b.inFlightAdvReqs.del(cid) codex_inflight_advertise.set(b.inFlightAdvReqs.len.int64) @@ -125,7 +120,7 @@ proc start*(b: Advertiser) {.async.} = trace "Advertiser start" - proc onBlock(cid: Cid) {.async.} = + proc onBlock(cid: Cid) {.async.} = await b.advertiseBlock(cid) doAssert(b.localStore.onBlockStored.isNone()) @@ -136,7 +131,7 @@ proc start*(b: Advertiser) {.async.} = return b.advertiserRunning = true - for i in 0.. 0: peerCtx.cleanPresence(dontWantCids) - let - wantCids = wantList.filterIt( - it in peerHave - ) + let wantCids = wantList.filterIt(it in peerHave) if wantCids.len > 0: trace "Peer has blocks in our wantList", peer, wants = wantCids @@ -246,13 +242,12 @@ proc blockPresenceHandler*( # if none of the connected peers report our wants in their have list, # fire up discovery b.discovery.queueFindBlocksReq( - toSeq(b.pendingBlocks.wantListCids) - .filter do(cid: Cid) -> bool: - not b.peers.anyIt( cid in it.peerHaveCids )) + toSeq(b.pendingBlocks.wantListCids).filter do(cid: Cid) -> bool: + not b.peers.anyIt(cid in it.peerHaveCids) + ) proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.async.} = - let - cids = blocksDelivery.mapIt( it.blk.cid ) + let cids = blocksDelivery.mapIt(it.blk.cid) # schedule any new peers to provide blocks to for p in b.peers: @@ -270,14 +265,16 @@ proc scheduleTasks(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asyn proc cancelBlocks(b: BlockExcEngine, addrs: seq[BlockAddress]) {.async.} = ## Tells neighboring peers that we're no longer interested in a block. - trace "Sending block request cancellations to peers", addrs, peers = b.peers.mapIt($it.id) + trace "Sending block request cancellations to peers", + addrs, peers = b.peers.mapIt($it.id) - let failed = (await allFinished( - b.peers.mapIt( - b.network.request.sendWantCancellations( - peer = it.id, - addresses = addrs)))) - .filterIt(it.failed) + let failed = ( + await allFinished( + b.peers.mapIt( + b.network.request.sendWantCancellations(peer = it.id, addresses = addrs) + ) + ) + ).filterIt(it.failed) if failed.len > 0: warn "Failed to send block request cancellations to peers", peers = failed.len @@ -290,12 +287,13 @@ proc resolveBlocks*(b: BlockExcEngine, blocksDelivery: seq[BlockDelivery]) {.asy proc resolveBlocks*(b: BlockExcEngine, blocks: seq[Block]) {.async.} = await b.resolveBlocks( blocks.mapIt( - BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid) - ))) + BlockDelivery(blk: it, address: BlockAddress(leaf: false, cid: it.cid)) + ) + ) -proc payForBlocks(engine: BlockExcEngine, - peer: BlockExcPeerCtx, - blocksDelivery: seq[BlockDelivery]) {.async.} = +proc payForBlocks( + engine: BlockExcEngine, peer: BlockExcPeerCtx, blocksDelivery: seq[BlockDelivery] +) {.async.} = let sendPayment = engine.network.request.sendPayment price = peer.price(blocksDelivery.mapIt(it.address)) @@ -304,9 +302,7 @@ proc payForBlocks(engine: BlockExcEngine, trace "Sending payment for blocks", price, len = blocksDelivery.len await sendPayment(peer.id, payment) -proc validateBlockDelivery( - b: BlockExcEngine, - bd: BlockDelivery): ?!void = +proc validateBlockDelivery(b: BlockExcEngine, bd: BlockDelivery): ?!void = if bd.address notin b.pendingBlocks: return failure("Received block is not currently a pending block") @@ -315,33 +311,36 @@ proc validateBlockDelivery( return failure("Missing proof") if proof.index != bd.address.index: - return failure("Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index) + return failure( + "Proof index " & $proof.index & " doesn't match leaf index " & $bd.address.index + ) without leaf =? bd.blk.cid.mhash.mapFailure, err: return failure("Unable to get mhash from cid for block, nested err: " & err.msg) without treeRoot =? bd.address.treeCid.mhash.mapFailure, err: - return failure("Unable to get mhash from treeCid for block, nested err: " & err.msg) + return + failure("Unable to get mhash from treeCid for block, nested err: " & err.msg) if err =? proof.verify(leaf, treeRoot).errorOption: return failure("Unable to verify proof for block, nested err: " & err.msg) - else: # not leaf if bd.address.cid != bd.blk.cid: - return failure("Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid) + return failure( + "Delivery cid " & $bd.address.cid & " doesn't match block cid " & $bd.blk.cid + ) return success() proc blocksDeliveryHandler*( - b: BlockExcEngine, - peer: PeerId, - blocksDelivery: seq[BlockDelivery]) {.async.} = + b: BlockExcEngine, peer: PeerId, blocksDelivery: seq[BlockDelivery] +) {.async.} = trace "Received blocks from peer", peer, blocks = (blocksDelivery.mapIt(it.address)) var validatedBlocksDelivery: seq[BlockDelivery] for bd in blocksDelivery: logScope: - peer = peer + peer = peer address = bd.address if err =? b.validateBlockDelivery(bd).errorOption: @@ -356,12 +355,11 @@ proc blocksDeliveryHandler*( without proof =? bd.proof: error "Proof expected for a leaf block delivery" continue - if err =? (await b.localStore.putCidAndProof( - bd.address.treeCid, - bd.address.index, - bd.blk.cid, - proof)).errorOption: - + if err =? ( + await b.localStore.putCidAndProof( + bd.address.treeCid, bd.address.index, bd.blk.cid, proof + ) + ).errorOption: error "Unable to store proof and cid for a block" continue @@ -370,20 +368,15 @@ proc blocksDeliveryHandler*( await b.resolveBlocks(validatedBlocksDelivery) codex_block_exchange_blocks_received.inc(validatedBlocksDelivery.len.int64) - let - peerCtx = b.peers.get(peer) + let peerCtx = b.peers.get(peer) if peerCtx != nil: await b.payForBlocks(peerCtx, blocksDelivery) ## shouldn't we remove them from the want-list instead of this: - peerCtx.cleanPresence(blocksDelivery.mapIt( it.address )) + peerCtx.cleanPresence(blocksDelivery.mapIt(it.address)) -proc wantListHandler*( - b: BlockExcEngine, - peer: PeerId, - wantList: WantList) {.async.} = - let - peerCtx = b.peers.get(peer) +proc wantListHandler*(b: BlockExcEngine, peer: PeerId, wantList: WantList) {.async.} = + let peerCtx = b.peers.get(peer) if peerCtx.isNil: return @@ -393,35 +386,32 @@ proc wantListHandler*( schedulePeer = false for e in wantList.entries: - let - idx = peerCtx.peerWants.findIt(it.address == e.address) + let idx = peerCtx.peerWants.findIt(it.address == e.address) logScope: - peer = peerCtx.id - address = e.address - wantType = $e.wantType + peer = peerCtx.id + address = e.address + wantType = $e.wantType if idx < 0: # Adding new entry to peer wants let have = await e.address in b.localStore - price = @( - b.pricing.get(Pricing(price: 0.u256)) - .price.toBytesBE) + price = @(b.pricing.get(Pricing(price: 0.u256)).price.toBytesBE) if e.wantType == WantType.WantHave: if have: presence.add( BlockPresence( - address: e.address, - `type`: BlockPresenceType.Have, - price: price)) + address: e.address, `type`: BlockPresenceType.Have, price: price + ) + ) else: if e.sendDontHave: presence.add( BlockPresence( - address: e.address, - `type`: BlockPresenceType.DontHave, - price: price)) + address: e.address, `type`: BlockPresenceType.DontHave, price: price + ) + ) peerCtx.peerWants.add(e) codex_block_exchange_want_have_lists_received.inc() @@ -446,31 +436,24 @@ proc wantListHandler*( if not b.scheduleTask(peerCtx): warn "Unable to schedule task for peer", peer -proc accountHandler*( - engine: BlockExcEngine, - peer: PeerId, - account: Account) {.async.} = - let - context = engine.peers.get(peer) +proc accountHandler*(engine: BlockExcEngine, peer: PeerId, account: Account) {.async.} = + let context = engine.peers.get(peer) if context.isNil: return context.account = account.some proc paymentHandler*( - engine: BlockExcEngine, - peer: PeerId, - payment: SignedState) {.async.} = + engine: BlockExcEngine, peer: PeerId, payment: SignedState +) {.async.} = trace "Handling payments", peer - without context =? engine.peers.get(peer).option and - account =? context.account: + without context =? engine.peers.get(peer).option and account =? context.account: trace "No context or account for peer", peer return if channel =? context.paymentChannel: - let - sender = account.address + let sender = account.address discard engine.wallet.acceptPayment(channel, Asset, sender, payment) else: context.paymentChannel = engine.wallet.acceptChannel(payment).option @@ -484,19 +467,16 @@ proc setupPeer*(b: BlockExcEngine, peer: PeerId) {.async.} = if peer notin b.peers: trace "Setting up new peer", peer - b.peers.add(BlockExcPeerCtx( - id: peer - )) + b.peers.add(BlockExcPeerCtx(id: peer)) trace "Added peer", peers = b.peers.len # broadcast our want list, the other peer will do the same if b.pendingBlocks.wantListLen > 0: trace "Sending our want list to a peer", peer let cids = toSeq(b.pendingBlocks.wantList) - await b.network.request.sendWantList( - peer, cids, full = true) + await b.network.request.sendWantList(peer, cids, full = true) - if address =? b.pricing.?address: + if address =? b.pricing .? address: await b.network.request.sendAccount(peer, Account(address: address)) proc dropPeer*(b: BlockExcEngine, peer: PeerId) = @@ -515,10 +495,8 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = # TODO: There should be all sorts of accounting of # bytes sent/received here - var - wantsBlocks = task.peerWants.filterIt( - it.wantType == WantType.WantBlock and not it.inFlight - ) + var wantsBlocks = + task.peerWants.filterIt(it.wantType == WantType.WantBlock and not it.inFlight) proc updateInFlight(addresses: seq[BlockAddress], inFlight: bool) = for peerWant in task.peerWants.mitems: @@ -535,18 +513,20 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = if e.address.leaf: (await b.localStore.getBlockAndProof(e.address.treeCid, e.address.index)).map( (blkAndProof: (Block, CodexProof)) => - BlockDelivery(address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some) + BlockDelivery( + address: e.address, blk: blkAndProof[0], proof: blkAndProof[1].some + ) ) else: (await b.localStore.getBlock(e.address)).map( - (blk: Block) => BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) + (blk: Block) => + BlockDelivery(address: e.address, blk: blk, proof: CodexProof.none) ) let blocksDeliveryFut = await allFinished(wantsBlocks.map(localLookup)) - blocksDelivery = blocksDeliveryFut - .filterIt(it.completed and it.read.isOk) - .mapIt(it.read.get) + blocksDelivery = + blocksDeliveryFut.filterIt(it.completed and it.read.isOk).mapIt(it.read.get) # All the wants that failed local lookup must be set to not-in-flight again. let @@ -555,11 +535,9 @@ proc taskHandler*(b: BlockExcEngine, task: BlockExcPeerCtx) {.gcsafe, async.} = updateInFlight(failedAddresses, false) if blocksDelivery.len > 0: - trace "Sending blocks to peer", peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) - await b.network.request.sendBlocksDelivery( - task.id, - blocksDelivery - ) + trace "Sending blocks to peer", + peer = task.id, blocks = (blocksDelivery.mapIt(it.address)) + await b.network.request.sendBlocksDelivery(task.id, blocksDelivery) codex_block_exchange_blocks_sent.inc(blocksDelivery.len.int64) @@ -572,8 +550,7 @@ proc blockexcTaskRunner(b: BlockExcEngine) {.async: (raises: []).} = trace "Starting blockexc task runner" while b.blockexcRunning: try: - let - peerCtx = await b.taskQueue.pop() + let peerCtx = await b.taskQueue.pop() await b.taskHandler(peerCtx) except CancelledError: @@ -599,20 +576,20 @@ proc new*( ## Create new block exchange engine instance ## - let - engine = BlockExcEngine( - localStore: localStore, - peers: peerStore, - pendingBlocks: pendingBlocks, - peersPerRequest: peersPerRequest, - network: network, - wallet: wallet, - concurrentTasks: concurrentTasks, - trackedFutures: TrackedFutures.new(), - taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), - discovery: discovery, - advertiser: advertiser, - blockFetchTimeout: blockFetchTimeout) + let engine = BlockExcEngine( + localStore: localStore, + peers: peerStore, + pendingBlocks: pendingBlocks, + peersPerRequest: peersPerRequest, + network: network, + wallet: wallet, + concurrentTasks: concurrentTasks, + trackedFutures: TrackedFutures.new(), + taskQueue: newAsyncHeapQueue[BlockExcPeerCtx](DefaultTaskQueueSize), + discovery: discovery, + advertiser: advertiser, + blockFetchTimeout: blockFetchTimeout, + ) proc peerEventHandler(peerId: PeerId, event: PeerEvent) {.async.} = if event.kind == PeerEventKind.Joined: @@ -624,19 +601,17 @@ proc new*( network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Joined) network.switch.addPeerEventHandler(peerEventHandler, PeerEventKind.Left) - proc blockWantListHandler( - peer: PeerId, - wantList: WantList): Future[void] {.gcsafe.} = + proc blockWantListHandler(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} = engine.wantListHandler(peer, wantList) proc blockPresenceHandler( - peer: PeerId, - presence: seq[BlockPresence]): Future[void] {.gcsafe.} = + peer: PeerId, presence: seq[BlockPresence] + ): Future[void] {.gcsafe.} = engine.blockPresenceHandler(peer, presence) proc blocksDeliveryHandler( - peer: PeerId, - blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} = + peer: PeerId, blocksDelivery: seq[BlockDelivery] + ): Future[void] {.gcsafe.} = engine.blocksDeliveryHandler(peer, blocksDelivery) proc accountHandler(peer: PeerId, account: Account): Future[void] {.gcsafe.} = @@ -650,6 +625,7 @@ proc new*( onBlocksDelivery: blocksDeliveryHandler, onPresence: blockPresenceHandler, onAccount: accountHandler, - onPayment: paymentHandler) + onPayment: paymentHandler, + ) return engine diff --git a/codex/blockexchange/engine/payments.nim b/codex/blockexchange/engine/payments.nim index 35d38e291..88953976f 100644 --- a/codex/blockexchange/engine/payments.nim +++ b/codex/blockexchange/engine/payments.nim @@ -15,15 +15,16 @@ import ../peers export nitro export results -push: {.upraises: [].} +push: + {.upraises: [].} const ChainId* = 0.u256 # invalid chain id for now const Asset* = EthAddress.zero # invalid ERC20 asset address for now -const AmountPerChannel = (10'u64^18).u256 # 1 asset, ERC20 default is 18 decimals +const AmountPerChannel = (10'u64 ^ 18).u256 # 1 asset, ERC20 default is 18 decimals -func openLedgerChannel*(wallet: WalletRef, - hub: EthAddress, - asset: EthAddress): ?!ChannelId = +func openLedgerChannel*( + wallet: WalletRef, hub: EthAddress, asset: EthAddress +): ?!ChannelId = wallet.openLedgerChannel(hub, ChainId, asset, AmountPerChannel) func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId = @@ -36,9 +37,7 @@ func getOrOpenChannel(wallet: WalletRef, peer: BlockExcPeerCtx): ?!ChannelId = else: failure "no account set for peer" -func pay*(wallet: WalletRef, - peer: BlockExcPeerCtx, - amount: UInt256): ?!SignedState = +func pay*(wallet: WalletRef, peer: BlockExcPeerCtx, amount: UInt256): ?!SignedState = if account =? peer.account: let asset = Asset let receiver = account.address diff --git a/codex/blockexchange/engine/pendingblocks.nim b/codex/blockexchange/engine/pendingblocks.nim index 9c5efc0b9..3b69e2d2a 100644 --- a/codex/blockexchange/engine/pendingblocks.nim +++ b/codex/blockexchange/engine/pendingblocks.nim @@ -12,7 +12,8 @@ import std/monotimes import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -25,11 +26,15 @@ import ../../logutils logScope: topics = "codex pendingblocks" -declareGauge(codex_block_exchange_pending_block_requests, "codex blockexchange pending block requests") -declareGauge(codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us") +declareGauge( + codex_block_exchange_pending_block_requests, + "codex blockexchange pending block requests", +) +declareGauge( + codex_block_exchange_retrieval_time_us, "codex blockexchange block retrieval time us" +) -const - DefaultBlockTimeout* = 10.minutes +const DefaultBlockTimeout* = 10.minutes type BlockReq* = object @@ -44,10 +49,11 @@ proc updatePendingBlockGauge(p: PendingBlocksManager) = codex_block_exchange_pending_block_requests.set(p.blocks.len.int64) proc getWantHandle*( - p: PendingBlocksManager, - address: BlockAddress, - timeout = DefaultBlockTimeout, - inFlight = false): Future[Block] {.async.} = + p: PendingBlocksManager, + address: BlockAddress, + timeout = DefaultBlockTimeout, + inFlight = false, +): Future[Block] {.async.} = ## Add an event for a block ## @@ -56,7 +62,8 @@ proc getWantHandle*( p.blocks[address] = BlockReq( handle: newFuture[Block]("pendingBlocks.getWantHandle"), inFlight: inFlight, - startTime: getMonoTime().ticks) + startTime: getMonoTime().ticks, + ) p.updatePendingBlockGauge() return await p.blocks[address].handle.wait(timeout) @@ -72,15 +79,13 @@ proc getWantHandle*( p.updatePendingBlockGauge() proc getWantHandle*( - p: PendingBlocksManager, - cid: Cid, - timeout = DefaultBlockTimeout, - inFlight = false): Future[Block] = + p: PendingBlocksManager, cid: Cid, timeout = DefaultBlockTimeout, inFlight = false +): Future[Block] = p.getWantHandle(BlockAddress.init(cid), timeout, inFlight) proc resolve*( - p: PendingBlocksManager, - blocksDelivery: seq[BlockDelivery]) {.gcsafe, raises: [].} = + p: PendingBlocksManager, blocksDelivery: seq[BlockDelivery] +) {.gcsafe, raises: [].} = ## Resolve pending blocks ## @@ -101,19 +106,14 @@ proc resolve*( else: trace "Block handle already finished", address = bd.address -proc setInFlight*( - p: PendingBlocksManager, - address: BlockAddress, - inFlight = true) = +proc setInFlight*(p: PendingBlocksManager, address: BlockAddress, inFlight = true) = ## Set inflight status for a block ## p.blocks.withValue(address, pending): pending[].inFlight = inFlight -proc isInFlight*( - p: PendingBlocksManager, - address: BlockAddress): bool = +proc isInFlight*(p: PendingBlocksManager, address: BlockAddress): bool = ## Check if a block is in flight ## diff --git a/codex/blockexchange/network/network.nim b/codex/blockexchange/network/network.nim index b6195473c..ecb728901 100644 --- a/codex/blockexchange/network/network.nim +++ b/codex/blockexchange/network/network.nim @@ -35,8 +35,10 @@ const type WantListHandler* = proc(peer: PeerId, wantList: WantList): Future[void] {.gcsafe.} - BlocksDeliveryHandler* = proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} - BlockPresenceHandler* = proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} + BlocksDeliveryHandler* = + proc(peer: PeerId, blocks: seq[BlockDelivery]): Future[void] {.gcsafe.} + BlockPresenceHandler* = + proc(peer: PeerId, precense: seq[BlockPresence]): Future[void] {.gcsafe.} AccountHandler* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} PaymentHandler* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} @@ -54,10 +56,14 @@ type cancel: bool = false, wantType: WantType = WantType.WantHave, full: bool = false, - sendDontHave: bool = false): Future[void] {.gcsafe.} - WantCancellationSender* = proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} - BlocksDeliverySender* = proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} - PresenceSender* = proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} + sendDontHave: bool = false, + ): Future[void] {.gcsafe.} + WantCancellationSender* = + proc(peer: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} + BlocksDeliverySender* = + proc(peer: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} + PresenceSender* = + proc(peer: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} AccountSender* = proc(peer: PeerId, account: Account): Future[void] {.gcsafe.} PaymentSender* = proc(peer: PeerId, payment: SignedState): Future[void] {.gcsafe.} @@ -108,10 +114,7 @@ proc send*(b: BlockExcNetwork, id: PeerId, msg: pb.Message) {.async.} = finally: b.inflightSema.release() -proc handleWantList( - b: BlockExcNetwork, - peer: NetworkPeer, - list: WantList) {.async.} = +proc handleWantList(b: BlockExcNetwork, peer: NetworkPeer, list: WantList) {.async.} = ## Handle incoming want list ## @@ -119,14 +122,15 @@ proc handleWantList( await b.handlers.onWantList(peer.id, list) proc sendWantList*( - b: BlockExcNetwork, - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false): Future[void] = + b: BlockExcNetwork, + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, +): Future[void] = ## Send a want message to peer ## @@ -137,43 +141,41 @@ proc sendWantList*( priority: priority, cancel: cancel, wantType: wantType, - sendDontHave: sendDontHave) ), - full: full) + sendDontHave: sendDontHave, + ) + ), + full: full, + ) b.send(id, Message(wantlist: msg)) proc sendWantCancellations*( - b: BlockExcNetwork, - id: PeerId, - addresses: seq[BlockAddress]): Future[void] {.async.} = + b: BlockExcNetwork, id: PeerId, addresses: seq[BlockAddress] +): Future[void] {.async.} = ## Informs a remote peer that we're no longer interested in a set of blocks ## await b.sendWantList(id = id, addresses = addresses, cancel = true) proc handleBlocksDelivery( - b: BlockExcNetwork, - peer: NetworkPeer, - blocksDelivery: seq[BlockDelivery]) {.async.} = + b: BlockExcNetwork, peer: NetworkPeer, blocksDelivery: seq[BlockDelivery] +) {.async.} = ## Handle incoming blocks ## if not b.handlers.onBlocksDelivery.isNil: await b.handlers.onBlocksDelivery(peer.id, blocksDelivery) - proc sendBlocksDelivery*( - b: BlockExcNetwork, - id: PeerId, - blocksDelivery: seq[BlockDelivery]): Future[void] = + b: BlockExcNetwork, id: PeerId, blocksDelivery: seq[BlockDelivery] +): Future[void] = ## Send blocks to remote ## b.send(id, pb.Message(payload: blocksDelivery)) proc handleBlockPresence( - b: BlockExcNetwork, - peer: NetworkPeer, - presence: seq[BlockPresence]) {.async.} = + b: BlockExcNetwork, peer: NetworkPeer, presence: seq[BlockPresence] +) {.async.} = ## Handle block presence ## @@ -181,56 +183,44 @@ proc handleBlockPresence( await b.handlers.onPresence(peer.id, presence) proc sendBlockPresence*( - b: BlockExcNetwork, - id: PeerId, - presence: seq[BlockPresence]): Future[void] = + b: BlockExcNetwork, id: PeerId, presence: seq[BlockPresence] +): Future[void] = ## Send presence to remote ## b.send(id, Message(blockPresences: @presence)) proc handleAccount( - network: BlockExcNetwork, - peer: NetworkPeer, - account: Account) {.async.} = + network: BlockExcNetwork, peer: NetworkPeer, account: Account +) {.async.} = ## Handle account info ## if not network.handlers.onAccount.isNil: await network.handlers.onAccount(peer.id, account) -proc sendAccount*( - b: BlockExcNetwork, - id: PeerId, - account: Account): Future[void] = +proc sendAccount*(b: BlockExcNetwork, id: PeerId, account: Account): Future[void] = ## Send account info to remote ## b.send(id, Message(account: AccountMessage.init(account))) -proc sendPayment*( - b: BlockExcNetwork, - id: PeerId, - payment: SignedState): Future[void] = +proc sendPayment*(b: BlockExcNetwork, id: PeerId, payment: SignedState): Future[void] = ## Send payment to remote ## b.send(id, Message(payment: StateChannelUpdate.init(payment))) proc handlePayment( - network: BlockExcNetwork, - peer: NetworkPeer, - payment: SignedState) {.async.} = + network: BlockExcNetwork, peer: NetworkPeer, payment: SignedState +) {.async.} = ## Handle payment ## if not network.handlers.onPayment.isNil: await network.handlers.onPayment(peer.id, payment) -proc rpcHandler( - b: BlockExcNetwork, - peer: NetworkPeer, - msg: Message) {.raises: [].} = +proc rpcHandler(b: BlockExcNetwork, peer: NetworkPeer, msg: Message) {.raises: [].} = ## handle rpc messages ## if msg.wantList.entries.len > 0: @@ -266,7 +256,7 @@ proc getOrCreatePeer(b: BlockExcNetwork, peer: PeerId): NetworkPeer = if not isNil(b.getConn): getConn = b.getConn - let rpcHandler = proc (p: NetworkPeer, msg: Message) {.async.} = + let rpcHandler = proc(p: NetworkPeer, msg: Message) {.async.} = b.rpcHandler(p, msg) # create new pubsub peer @@ -316,41 +306,43 @@ method init*(b: BlockExcNetwork) = proc handle(conn: Connection, proto: string) {.async, gcsafe, closure.} = let peerId = conn.peerId let blockexcPeer = b.getOrCreatePeer(peerId) - await blockexcPeer.readLoop(conn) # attach read loop + await blockexcPeer.readLoop(conn) # attach read loop b.handler = handle b.codec = Codec proc new*( - T: type BlockExcNetwork, - switch: Switch, - connProvider: ConnProvider = nil, - maxInflight = MaxInflight): BlockExcNetwork = + T: type BlockExcNetwork, + switch: Switch, + connProvider: ConnProvider = nil, + maxInflight = MaxInflight, +): BlockExcNetwork = ## Create a new BlockExcNetwork instance ## - let - self = BlockExcNetwork( - switch: switch, - getConn: connProvider, - inflightSema: newAsyncSemaphore(maxInflight)) + let self = BlockExcNetwork( + switch: switch, getConn: connProvider, inflightSema: newAsyncSemaphore(maxInflight) + ) proc sendWantList( - id: PeerId, - cids: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false): Future[void] {.gcsafe.} = - self.sendWantList( - id, cids, priority, cancel, - wantType, full, sendDontHave) - - proc sendWantCancellations(id: PeerId, addresses: seq[BlockAddress]): Future[void] {.gcsafe.} = + id: PeerId, + cids: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ): Future[void] {.gcsafe.} = + self.sendWantList(id, cids, priority, cancel, wantType, full, sendDontHave) + + proc sendWantCancellations( + id: PeerId, addresses: seq[BlockAddress] + ): Future[void] {.gcsafe.} = self.sendWantCancellations(id, addresses) - proc sendBlocksDelivery(id: PeerId, blocksDelivery: seq[BlockDelivery]): Future[void] {.gcsafe.} = + proc sendBlocksDelivery( + id: PeerId, blocksDelivery: seq[BlockDelivery] + ): Future[void] {.gcsafe.} = self.sendBlocksDelivery(id, blocksDelivery) proc sendPresence(id: PeerId, presence: seq[BlockPresence]): Future[void] {.gcsafe.} = @@ -368,7 +360,8 @@ proc new*( sendBlocksDelivery: sendBlocksDelivery, sendPresence: sendPresence, sendAccount: sendAccount, - sendPayment: sendPayment) + sendPayment: sendPayment, + ) self.init() return self diff --git a/codex/blockexchange/network/networkpeer.nim b/codex/blockexchange/network/networkpeer.nim index 133d8c7ca..90c538ea0 100644 --- a/codex/blockexchange/network/networkpeer.nim +++ b/codex/blockexchange/network/networkpeer.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -33,8 +34,7 @@ type getConn: ConnProvider proc connected*(b: NetworkPeer): bool = - not(isNil(b.sendConn)) and - not(b.sendConn.closed or b.sendConn.atEof) + not (isNil(b.sendConn)) and not (b.sendConn.closed or b.sendConn.atEof) proc readLoop*(b: NetworkPeer, conn: Connection) {.async.} = if isNil(conn): @@ -80,15 +80,11 @@ proc broadcast*(b: NetworkPeer, msg: Message) = asyncSpawn sendAwaiter() func new*( - T: type NetworkPeer, - peer: PeerId, - connProvider: ConnProvider, - rpcHandler: RPCHandler): NetworkPeer = - - doAssert(not isNil(connProvider), - "should supply connection provider") - - NetworkPeer( - id: peer, - getConn: connProvider, - handler: rpcHandler) + T: type NetworkPeer, + peer: PeerId, + connProvider: ConnProvider, + rpcHandler: RPCHandler, +): NetworkPeer = + doAssert(not isNil(connProvider), "should supply connection provider") + + NetworkPeer(id: peer, getConn: connProvider, handler: rpcHandler) diff --git a/codex/blockexchange/peers/peercontext.nim b/codex/blockexchange/peers/peercontext.nim index 727676de8..7a299b6b3 100644 --- a/codex/blockexchange/peers/peercontext.nim +++ b/codex/blockexchange/peers/peercontext.nim @@ -25,15 +25,14 @@ import ../../logutils export payments, nitro -type - BlockExcPeerCtx* = ref object of RootObj - id*: PeerId - blocks*: Table[BlockAddress, Presence] # remote peer have list including price - peerWants*: seq[WantListEntry] # remote peers want lists - exchanged*: int # times peer has exchanged with us - lastExchange*: Moment # last time peer has exchanged with us - account*: ?Account # ethereum account of this peer - paymentChannel*: ?ChannelId # payment channel id +type BlockExcPeerCtx* = ref object of RootObj + id*: PeerId + blocks*: Table[BlockAddress, Presence] # remote peer have list including price + peerWants*: seq[WantListEntry] # remote peers want lists + exchanged*: int # times peer has exchanged with us + lastExchange*: Moment # last time peer has exchanged with us + account*: ?Account # ethereum account of this peer + paymentChannel*: ?ChannelId # payment channel id proc peerHave*(self: BlockExcPeerCtx): seq[BlockAddress] = toSeq(self.blocks.keys) diff --git a/codex/blockexchange/peers/peerctxstore.nim b/codex/blockexchange/peers/peerctxstore.nim index 4b65d8491..7cf167b4e 100644 --- a/codex/blockexchange/peers/peerctxstore.nim +++ b/codex/blockexchange/peers/peerctxstore.nim @@ -13,7 +13,8 @@ import std/algorithm import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -22,7 +23,6 @@ import ../protobuf/blockexc import ../../blocktype import ../../logutils - import ./peercontext export peercontext @@ -32,6 +32,7 @@ logScope: type PeerCtxStore* = ref object of RootObj peers*: OrderedTable[PeerId, BlockExcPeerCtx] + PeersForBlock* = object of RootObj with*: seq[BlockExcPeerCtx] without*: seq[BlockExcPeerCtx] @@ -44,7 +45,7 @@ proc contains*(a: openArray[BlockExcPeerCtx], b: PeerId): bool = ## Convenience method to check for peer precense ## - a.anyIt( it.id == b ) + a.anyIt(it.id == b) func contains*(self: PeerCtxStore, peerId: PeerId): bool = peerId in self.peers @@ -62,21 +63,21 @@ func len*(self: PeerCtxStore): int = self.peers.len func peersHave*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it == address ) ) + toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it == address)) func peersHave*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerHave.anyIt( it.cidOrTreeCid == cid ) ) + toSeq(self.peers.values).filterIt(it.peerHave.anyIt(it.cidOrTreeCid == cid)) func peersWant*(self: PeerCtxStore, address: BlockAddress): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it == address ) ) + toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it == address)) func peersWant*(self: PeerCtxStore, cid: Cid): seq[BlockExcPeerCtx] = - toSeq(self.peers.values).filterIt( it.peerWants.anyIt( it.address.cidOrTreeCid == cid ) ) + toSeq(self.peers.values).filterIt(it.peerWants.anyIt(it.address.cidOrTreeCid == cid)) proc getPeersForBlock*(self: PeerCtxStore, address: BlockAddress): PeersForBlock = var res = PeersForBlock() for peer in self: - if peer.peerHave.anyIt( it == address ): + if peer.peerHave.anyIt(it == address): res.with.add(peer) else: res.without.add(peer) diff --git a/codex/blockexchange/protobuf/blockexc.nim b/codex/blockexchange/protobuf/blockexc.nim index 120498537..698686810 100644 --- a/codex/blockexchange/protobuf/blockexc.nim +++ b/codex/blockexchange/protobuf/blockexc.nim @@ -42,7 +42,6 @@ proc `==`*(a: WantListEntry, b: BlockAddress): bool = proc `<`*(a, b: WantListEntry): bool = a.priority < b.priority - proc `==`*(a: BlockPresence, b: BlockAddress): bool = return a.address == b diff --git a/codex/blockexchange/protobuf/message.nim b/codex/blockexchange/protobuf/message.nim index 61488b405..73cb60f1b 100644 --- a/codex/blockexchange/protobuf/message.nim +++ b/codex/blockexchange/protobuf/message.nim @@ -20,40 +20,40 @@ const type WantType* = enum - WantBlock = 0, + WantBlock = 0 WantHave = 1 WantListEntry* = object address*: BlockAddress - priority*: int32 # The priority (normalized). default to 1 - cancel*: bool # Whether this revokes an entry - wantType*: WantType # Note: defaults to enum 0, ie Block - sendDontHave*: bool # Note: defaults to false - inFlight*: bool # Whether block sending is in progress. Not serialized. + priority*: int32 # The priority (normalized). default to 1 + cancel*: bool # Whether this revokes an entry + wantType*: WantType # Note: defaults to enum 0, ie Block + sendDontHave*: bool # Note: defaults to false + inFlight*: bool # Whether block sending is in progress. Not serialized. WantList* = object - entries*: seq[WantListEntry] # A list of wantList entries - full*: bool # Whether this is the full wantList. default to false + entries*: seq[WantListEntry] # A list of wantList entries + full*: bool # Whether this is the full wantList. default to false BlockDelivery* = object blk*: Block address*: BlockAddress - proof*: ?CodexProof # Present only if `address.leaf` is true + proof*: ?CodexProof # Present only if `address.leaf` is true BlockPresenceType* = enum - Have = 0, + Have = 0 DontHave = 1 BlockPresence* = object address*: BlockAddress `type`*: BlockPresenceType - price*: seq[byte] # Amount of assets to pay for the block (UInt256) + price*: seq[byte] # Amount of assets to pay for the block (UInt256) AccountMessage* = object - address*: seq[byte] # Ethereum address to which payments should be made + address*: seq[byte] # Ethereum address to which payments should be made StateChannelUpdate* = object - update*: seq[byte] # Signed Nitro state, serialized as JSON + update*: seq[byte] # Signed Nitro state, serialized as JSON Message* = object wantList*: WantList @@ -140,7 +140,6 @@ proc protobufEncode*(value: Message): seq[byte] = ipb.finish() ipb.buffer - # # Decoding Message from seq[byte] in Protobuf format # @@ -151,22 +150,22 @@ proc decode*(_: type BlockAddress, pb: ProtoBuffer): ProtoResult[BlockAddress] = field: uint64 cidBuf = newSeq[byte]() - if ? pb.getField(1, field): + if ?pb.getField(1, field): leaf = bool(field) if leaf: var treeCid: Cid index: Natural - if ? pb.getField(2, cidBuf): - treeCid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) - if ? pb.getField(3, field): + if ?pb.getField(2, cidBuf): + treeCid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(3, field): index = field value = BlockAddress(leaf: true, treeCid: treeCid, index: index) else: var cid: Cid - if ? pb.getField(4, cidBuf): - cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(4, cidBuf): + cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) value = BlockAddress(leaf: false, cid: cid) ok(value) @@ -176,15 +175,15 @@ proc decode*(_: type WantListEntry, pb: ProtoBuffer): ProtoResult[WantListEntry] value = WantListEntry() field: uint64 ipb: ProtoBuffer - if ? pb.getField(1, ipb): - value.address = ? BlockAddress.decode(ipb) - if ? pb.getField(2, field): + if ?pb.getField(1, ipb): + value.address = ?BlockAddress.decode(ipb) + if ?pb.getField(2, field): value.priority = int32(field) - if ? pb.getField(3, field): + if ?pb.getField(3, field): value.cancel = bool(field) - if ? pb.getField(4, field): + if ?pb.getField(4, field): value.wantType = WantType(field) - if ? pb.getField(5, field): + if ?pb.getField(5, field): value.sendDontHave = bool(field) ok(value) @@ -193,10 +192,10 @@ proc decode*(_: type WantList, pb: ProtoBuffer): ProtoResult[WantList] = value = WantList() field: uint64 sublist: seq[seq[byte]] - if ? pb.getRepeatedField(1, sublist): + if ?pb.getRepeatedField(1, sublist): for item in sublist: - value.entries.add(? WantListEntry.decode(initProtoBuffer(item))) - if ? pb.getField(2, field): + value.entries.add(?WantListEntry.decode(initProtoBuffer(item))) + if ?pb.getField(2, field): value.full = bool(field) ok(value) @@ -208,17 +207,18 @@ proc decode*(_: type BlockDelivery, pb: ProtoBuffer): ProtoResult[BlockDelivery] cid: Cid ipb: ProtoBuffer - if ? pb.getField(1, cidBuf): - cid = ? Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) - if ? pb.getField(2, dataBuf): - value.blk = ? Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob) - if ? pb.getField(3, ipb): - value.address = ? BlockAddress.decode(ipb) + if ?pb.getField(1, cidBuf): + cid = ?Cid.init(cidBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(2, dataBuf): + value.blk = + ?Block.new(cid, dataBuf, verify = true).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(3, ipb): + value.address = ?BlockAddress.decode(ipb) if value.address.leaf: var proofBuf = newSeq[byte]() - if ? pb.getField(4, proofBuf): - let proof = ? CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob) + if ?pb.getField(4, proofBuf): + let proof = ?CodexProof.decode(proofBuf).mapErr(x => ProtoError.IncorrectBlob) value.proof = proof.some else: value.proof = CodexProof.none @@ -232,23 +232,23 @@ proc decode*(_: type BlockPresence, pb: ProtoBuffer): ProtoResult[BlockPresence] value = BlockPresence() field: uint64 ipb: ProtoBuffer - if ? pb.getField(1, ipb): - value.address = ? BlockAddress.decode(ipb) - if ? pb.getField(2, field): + if ?pb.getField(1, ipb): + value.address = ?BlockAddress.decode(ipb) + if ?pb.getField(2, field): value.`type` = BlockPresenceType(field) - discard ? pb.getField(3, value.price) + discard ?pb.getField(3, value.price) ok(value) proc decode*(_: type AccountMessage, pb: ProtoBuffer): ProtoResult[AccountMessage] = - var - value = AccountMessage() - discard ? pb.getField(1, value.address) + var value = AccountMessage() + discard ?pb.getField(1, value.address) ok(value) -proc decode*(_: type StateChannelUpdate, pb: ProtoBuffer): ProtoResult[StateChannelUpdate] = - var - value = StateChannelUpdate() - discard ? pb.getField(1, value.update) +proc decode*( + _: type StateChannelUpdate, pb: ProtoBuffer +): ProtoResult[StateChannelUpdate] = + var value = StateChannelUpdate() + discard ?pb.getField(1, value.update) ok(value) proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = @@ -257,17 +257,19 @@ proc protobufDecode*(_: type Message, msg: seq[byte]): ProtoResult[Message] = pb = initProtoBuffer(msg, maxSize = MaxMessageSize) ipb: ProtoBuffer sublist: seq[seq[byte]] - if ? pb.getField(1, ipb): - value.wantList = ? WantList.decode(ipb) - if ? pb.getRepeatedField(3, sublist): + if ?pb.getField(1, ipb): + value.wantList = ?WantList.decode(ipb) + if ?pb.getRepeatedField(3, sublist): for item in sublist: - value.payload.add(? BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize))) - if ? pb.getRepeatedField(4, sublist): + value.payload.add( + ?BlockDelivery.decode(initProtoBuffer(item, maxSize = MaxBlockSize)) + ) + if ?pb.getRepeatedField(4, sublist): for item in sublist: - value.blockPresences.add(? BlockPresence.decode(initProtoBuffer(item))) - discard ? pb.getField(5, value.pendingBytes) - if ? pb.getField(6, ipb): - value.account = ? AccountMessage.decode(ipb) - if ? pb.getField(7, ipb): - value.payment = ? StateChannelUpdate.decode(ipb) + value.blockPresences.add(?BlockPresence.decode(initProtoBuffer(item))) + discard ?pb.getField(5, value.pendingBytes) + if ?pb.getField(6, ipb): + value.account = ?AccountMessage.decode(ipb) + if ?pb.getField(7, ipb): + value.payment = ?StateChannelUpdate.decode(ipb) ok(value) diff --git a/codex/blockexchange/protobuf/payments.nim b/codex/blockexchange/protobuf/payments.nim index 61321d2e2..5d010a81b 100644 --- a/codex/blockexchange/protobuf/payments.nim +++ b/codex/blockexchange/protobuf/payments.nim @@ -11,11 +11,11 @@ export StateChannelUpdate export stint export nitro -push: {.upraises: [].} +push: + {.upraises: [].} -type - Account* = object - address*: EthAddress +type Account* = object + address*: EthAddress func init*(_: type AccountMessage, account: Account): AccountMessage = AccountMessage(address: @(account.address.toArray)) @@ -24,7 +24,7 @@ func parse(_: type EthAddress, bytes: seq[byte]): ?EthAddress = var address: array[20, byte] if bytes.len != address.len: return EthAddress.none - for i in 0..=` to the data, # use the Cid as a container! - Block( - cid: cid, - data: @data).success + + Block(cid: cid, data: @data).success proc new*( - T: type Block, - cid: Cid, - data: openArray[byte], - verify: bool = true + T: type Block, cid: Cid, data: openArray[byte], verify: bool = true ): ?!Block = ## creates a new block for both storage and network IO ## if verify: let - mhash = ? cid.mhash.mapFailure - computedMhash = ? MultiHash.digest($mhash.mcodec, data).mapFailure - computedCid = ? Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure + mhash = ?cid.mhash.mapFailure + computedMhash = ?MultiHash.digest($mhash.mcodec, data).mapFailure + computedCid = ?Cid.init(cid.cidver, cid.mcodec, computedMhash).mapFailure if computedCid != cid: return "Cid doesn't match the data".failure - return Block( - cid: cid, - data: @data - ).success + return Block(cid: cid, data: @data).success proc emptyBlock*(version: CidVersion, hcodec: MultiCodec): ?!Block = - emptyCid(version, hcodec, BlockCodec) - .flatMap((cid: Cid) => Block.new(cid = cid, data = @[])) + emptyCid(version, hcodec, BlockCodec).flatMap( + (cid: Cid) => Block.new(cid = cid, data = @[]) + ) proc emptyBlock*(cid: Cid): ?!Block = - cid.mhash.mapFailure.flatMap((mhash: MultiHash) => - emptyBlock(cid.cidver, mhash.mcodec)) + cid.mhash.mapFailure.flatMap( + (mhash: MultiHash) => emptyBlock(cid.cidver, mhash.mcodec) + ) proc isEmpty*(cid: Cid): bool = - success(cid) == cid.mhash.mapFailure.flatMap((mhash: MultiHash) => - emptyCid(cid.cidver, mhash.mcodec, cid.mcodec)) + success(cid) == + cid.mhash.mapFailure.flatMap( + (mhash: MultiHash) => emptyCid(cid.cidver, mhash.mcodec, cid.mcodec) + ) proc isEmpty*(blk: Block): bool = blk.cid.isEmpty diff --git a/codex/chunker.nim b/codex/chunker.nim index ad256538d..f735aa4b4 100644 --- a/codex/chunker.nim +++ b/codex/chunker.nim @@ -11,7 +11,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/questionable import pkg/questionable/results @@ -23,8 +24,7 @@ import ./logutils export blocktype -const - DefaultChunkSize* = DefaultBlockSize +const DefaultChunkSize* = DefaultBlockSize type # default reader type @@ -33,10 +33,10 @@ type # Reader that splits input data into fixed-size chunks Chunker* = ref object - reader*: Reader # Procedure called to actually read the data - offset*: int # Bytes read so far (position in the stream) - chunkSize*: NBytes # Size of each chunk - pad*: bool # Pad last chunk to chunkSize? + reader*: Reader # Procedure called to actually read the data + offset*: int # Bytes read so far (position in the stream) + chunkSize*: NBytes # Size of each chunk + pad*: bool # Pad last chunk to chunkSize? FileChunker* = Chunker LPStreamChunker* = Chunker @@ -60,30 +60,21 @@ proc getBytes*(c: Chunker): Future[seq[byte]] {.async.} = return move buff proc new*( - T: type Chunker, - reader: Reader, - chunkSize = DefaultChunkSize, - pad = true + T: type Chunker, reader: Reader, chunkSize = DefaultChunkSize, pad = true ): Chunker = ## create a new Chunker instance ## - Chunker( - reader: reader, - offset: 0, - chunkSize: chunkSize, - pad: pad) + Chunker(reader: reader, offset: 0, chunkSize: chunkSize, pad: pad) proc new*( - T: type LPStreamChunker, - stream: LPStream, - chunkSize = DefaultChunkSize, - pad = true + T: type LPStreamChunker, stream: LPStream, chunkSize = DefaultChunkSize, pad = true ): LPStreamChunker = ## create the default File chunker ## - proc reader(data: ChunkBuffer, len: int): Future[int] - {.gcsafe, async, raises: [Defect].} = + proc reader( + data: ChunkBuffer, len: int + ): Future[int] {.gcsafe, async, raises: [Defect].} = var res = 0 try: while res < len: @@ -101,22 +92,17 @@ proc new*( return res - LPStreamChunker.new( - reader = reader, - chunkSize = chunkSize, - pad = pad) + LPStreamChunker.new(reader = reader, chunkSize = chunkSize, pad = pad) proc new*( - T: type FileChunker, - file: File, - chunkSize = DefaultChunkSize, - pad = true + T: type FileChunker, file: File, chunkSize = DefaultChunkSize, pad = true ): FileChunker = ## create the default File chunker ## - proc reader(data: ChunkBuffer, len: int): Future[int] - {.gcsafe, async, raises: [Defect].} = + proc reader( + data: ChunkBuffer, len: int + ): Future[int] {.gcsafe, async, raises: [Defect].} = var total = 0 try: while total < len: @@ -135,7 +121,4 @@ proc new*( return total - FileChunker.new( - reader = reader, - chunkSize = chunkSize, - pad = pad) + FileChunker.new(reader = reader, chunkSize = chunkSize, pad = pad) diff --git a/codex/clock.nim b/codex/clock.nim index 933cd199a..98db22f74 100644 --- a/codex/clock.nim +++ b/codex/clock.nim @@ -20,9 +20,9 @@ method start*(clock: Clock) {.base, async.} = method stop*(clock: Clock) {.base, async.} = discard -proc withTimeout*(future: Future[void], - clock: Clock, - expiry: SecondsSince1970) {.async.} = +proc withTimeout*( + future: Future[void], clock: Clock, expiry: SecondsSince1970 +) {.async.} = let timeout = clock.waitUntil(expiry) try: await future or timeout diff --git a/codex/codex.nim b/codex/codex.nim index b22bf3d45..139852548 100644 --- a/codex/codex.nim +++ b/codex/codex.nim @@ -68,8 +68,7 @@ proc waitForSync(provider: Provider): Future[void] {.async.} = inc sleepTime trace "Ethereum provider is synced." -proc bootstrapInteractions( - s: CodexServer): Future[void] {.async.} = +proc bootstrapInteractions(s: CodexServer): Future[void] {.async.} = ## bootstrap interactions and return contracts ## using clients, hosts, validators pairings ## @@ -137,12 +136,12 @@ proc bootstrapInteractions( host = some HostInteractions.new(clock, sales) if config.validator: - without validationConfig =? ValidationConfig.init( - config.validatorMaxSlots, - config.validatorGroups, - config.validatorGroupIndex), err: - error "Invalid validation parameters", err = err.msg - quit QuitFailure + without validationConfig =? + ValidationConfig.init( + config.validatorMaxSlots, config.validatorGroups, config.validatorGroupIndex + ), err: + error "Invalid validation parameters", err = err.msg + quit QuitFailure let validation = Validation.new(clock, market, validationConfig) validator = some ValidatorInteractions.new(clock, validation) @@ -156,10 +155,9 @@ proc start*(s: CodexServer) {.async.} = await s.codexNode.switch.start() - let (announceAddrs,discoveryAddrs)= nattedAddress( - s.config.nat, - s.codexNode.switch.peerInfo.addrs, - s.config.discoveryPort) + let (announceAddrs, discoveryAddrs) = nattedAddress( + s.config.nat, s.codexNode.switch.peerInfo.addrs, s.config.discoveryPort + ) s.codexNode.discovery.updateAnnounceRecord(announceAddrs) s.codexNode.discovery.updateDhtRecord(discoveryAddrs) @@ -176,15 +174,14 @@ proc stop*(s: CodexServer) {.async.} = s.codexNode.switch.stop(), s.codexNode.stop(), s.repoStore.stop(), - s.maintenance.stop()) + s.maintenance.stop(), + ) proc new*( - T: type CodexServer, - config: CodexConf, - privateKey: CodexPrivateKey): CodexServer = + T: type CodexServer, config: CodexConf, privateKey: CodexPrivateKey +): CodexServer = ## create CodexServer including setting up datastore, repostore, etc - let - switch = SwitchBuilder + let switch = SwitchBuilder .new() .withPrivateKey(privateKey) .withAddresses(config.listenAddrs) @@ -197,80 +194,107 @@ proc new*( .withTcpTransport({ServerFlags.ReuseAddr}) .build() - var - cache: CacheStore = nil + var cache: CacheStore = nil if config.cacheSize > 0'nb: cache = CacheStore.new(cacheSize = config.cacheSize) ## Is unused? - let - discoveryDir = config.dataDir / CodexDhtNamespace + let discoveryDir = config.dataDir / CodexDhtNamespace if io2.createPath(discoveryDir).isErr: - trace "Unable to create discovery directory for block store", discoveryDir = discoveryDir + trace "Unable to create discovery directory for block store", + discoveryDir = discoveryDir raise (ref Defect)( - msg: "Unable to create discovery directory for block store: " & discoveryDir) + msg: "Unable to create discovery directory for block store: " & discoveryDir + ) let discoveryStore = Datastore( - LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace) - .expect("Should create discovery datastore!")) + LevelDbDatastore.new(config.dataDir / CodexDhtProvidersNamespace).expect( + "Should create discovery datastore!" + ) + ) discovery = Discovery.new( switch.peerInfo.privateKey, announceAddrs = config.listenAddrs, bindPort = config.discoveryPort, bootstrapNodes = config.bootstrapNodes, - store = discoveryStore) + store = discoveryStore, + ) wallet = WalletRef.new(EthPrivateKey.random()) network = BlockExcNetwork.new(switch) - repoData = case config.repoKind - of repoFS: Datastore(FSDatastore.new($config.dataDir, depth = 5) - .expect("Should create repo file data store!")) - of repoSQLite: Datastore(SQLiteDatastore.new($config.dataDir) - .expect("Should create repo SQLite data store!")) - of repoLevelDb: Datastore(LevelDbDatastore.new($config.dataDir) - .expect("Should create repo LevelDB data store!")) + repoData = + case config.repoKind + of repoFS: + Datastore( + FSDatastore.new($config.dataDir, depth = 5).expect( + "Should create repo file data store!" + ) + ) + of repoSQLite: + Datastore( + SQLiteDatastore.new($config.dataDir).expect( + "Should create repo SQLite data store!" + ) + ) + of repoLevelDb: + Datastore( + LevelDbDatastore.new($config.dataDir).expect( + "Should create repo LevelDB data store!" + ) + ) repoStore = RepoStore.new( repoDs = repoData, - metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace) - .expect("Should create metadata store!"), + metaDs = LevelDbDatastore.new(config.dataDir / CodexMetaNamespace).expect( + "Should create metadata store!" + ), quotaMaxBytes = config.storageQuota, - blockTtl = config.blockTtl) + blockTtl = config.blockTtl, + ) maintenance = BlockMaintainer.new( repoStore, interval = config.blockMaintenanceInterval, - numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks) + numberOfBlocksPerInterval = config.blockMaintenanceNumberOfBlocks, + ) peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() advertiser = Advertiser.new(repoStore, discovery) - blockDiscovery = DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) - engine = BlockExcEngine.new(repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks) + blockDiscovery = + DiscoveryEngine.new(repoStore, peerStore, network, discovery, pendingBlocks) + engine = BlockExcEngine.new( + repoStore, wallet, network, blockDiscovery, advertiser, peerStore, pendingBlocks + ) store = NetworkStore.new(engine, repoStore) - prover = if config.prover: - let backend = config.initializeBackend().expect("Unable to create prover backend.") - some Prover.new(store, backend, config.numProofSamples) - else: - none Prover + prover = + if config.prover: + let backend = + config.initializeBackend().expect("Unable to create prover backend.") + some Prover.new(store, backend, config.numProofSamples) + else: + none Prover codexNode = CodexNodeRef.new( switch = switch, networkStore = store, engine = engine, discovery = discovery, - prover = prover) - - restServer = RestServerRef.new( - codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), - initTAddress(config.apiBindAddress , config.apiPort), - bufferSize = (1024 * 64), - maxRequestBodySize = int.high) + prover = prover, + ) + + restServer = RestServerRef + .new( + codexNode.initRestApi(config, repoStore, config.apiCorsAllowedOrigin), + initTAddress(config.apiBindAddress, config.apiPort), + bufferSize = (1024 * 64), + maxRequestBodySize = int.high, + ) .expect("Should start rest server!") switch.mount(network) @@ -280,4 +304,5 @@ proc new*( codexNode: codexNode, restServer: restServer, repoStore: repoStore, - maintenance: maintenance) + maintenance: maintenance, + ) diff --git a/codex/codextypes.nim b/codex/codextypes.nim index 2fd15d1ef..274b9be0a 100644 --- a/codex/codextypes.nim +++ b/codex/codextypes.nim @@ -25,15 +25,15 @@ export tables const # Size of blocks for storage / network exchange, - DefaultBlockSize* = NBytes 1024*64 + DefaultBlockSize* = NBytes 1024 * 64 DefaultCellSize* = NBytes 2048 # Proving defaults - DefaultMaxSlotDepth* = 32 + DefaultMaxSlotDepth* = 32 DefaultMaxDatasetDepth* = 8 - DefaultBlockDepth* = 5 - DefaultCellElms* = 67 - DefaultSamplesNum* = 5 + DefaultBlockDepth* = 5 + DefaultCellElms* = 67 + DefaultSamplesNum* = 5 # hashes Sha256HashCodec* = multiCodec("sha2-256") @@ -48,18 +48,10 @@ const SlotProvingRootCodec* = multiCodec("codex-proving-root") CodexSlotCellCodec* = multiCodec("codex-slot-cell") - CodexHashesCodecs* = [ - Sha256HashCodec, - Pos2Bn128SpngCodec, - Pos2Bn128MrklCodec - ] + CodexHashesCodecs* = [Sha256HashCodec, Pos2Bn128SpngCodec, Pos2Bn128MrklCodec] CodexPrimitivesCodecs* = [ - ManifestCodec, - DatasetRootCodec, - BlockCodec, - SlotRootCodec, - SlotProvingRootCodec, + ManifestCodec, DatasetRootCodec, BlockCodec, SlotRootCodec, SlotProvingRootCodec, CodexSlotCellCodec, ] @@ -74,40 +66,34 @@ proc initEmptyCidTable(): ?!Table[(CidVersion, MultiCodec, MultiCodec), Cid] = let emptyData: seq[byte] = @[] PadHashes = { - Sha256HashCodec: ? MultiHash.digest($Sha256HashCodec, emptyData).mapFailure, - Sha512HashCodec: ? MultiHash.digest($Sha512HashCodec, emptyData).mapFailure, + Sha256HashCodec: ?MultiHash.digest($Sha256HashCodec, emptyData).mapFailure, + Sha512HashCodec: ?MultiHash.digest($Sha512HashCodec, emptyData).mapFailure, }.toTable - var - table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]() + var table = initTable[(CidVersion, MultiCodec, MultiCodec), Cid]() for hcodec, mhash in PadHashes.pairs: - table[(CIDv1, hcodec, BlockCodec)] = ? Cid.init(CIDv1, BlockCodec, mhash).mapFailure + table[(CIDv1, hcodec, BlockCodec)] = ?Cid.init(CIDv1, BlockCodec, mhash).mapFailure success table -proc emptyCid*( - version: CidVersion, - hcodec: MultiCodec, - dcodec: MultiCodec): ?!Cid = +proc emptyCid*(version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec): ?!Cid = ## Returns cid representing empty content, ## given cid version, hash codec and data codec ## - var - table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid] + var table {.global, threadvar.}: Table[(CidVersion, MultiCodec, MultiCodec), Cid] once: - table = ? initEmptyCidTable() + table = ?initEmptyCidTable() table[(version, hcodec, dcodec)].catch proc emptyDigest*( - version: CidVersion, - hcodec: MultiCodec, - dcodec: MultiCodec): ?!MultiHash = + version: CidVersion, hcodec: MultiCodec, dcodec: MultiCodec +): ?!MultiHash = ## Returns hash representing empty content, ## given cid version, hash codec and data codec ## - emptyCid(version, hcodec, dcodec) - .flatMap((cid: Cid) => cid.mhash.mapFailure) + + emptyCid(version, hcodec, dcodec).flatMap((cid: Cid) => cid.mhash.mapFailure) diff --git a/codex/conf.nim b/codex/conf.nim index 41ee628e2..6d47f8f47 100644 --- a/codex/conf.nim +++ b/codex/conf.nim @@ -50,18 +50,17 @@ export units, net, codextypes, logutils, completeCmdArg, parseCmdArg, NatConfig export ValidationGroups, MaxSlots export - DefaultQuotaBytes, - DefaultBlockTtl, - DefaultBlockMaintenanceInterval, + DefaultQuotaBytes, DefaultBlockTtl, DefaultBlockMaintenanceInterval, DefaultNumberOfBlocksToMaintainPerInterval proc defaultDataDir*(): string = - let dataDir = when defined(windows): - "AppData" / "Roaming" / "Codex" - elif defined(macosx): - "Library" / "Application Support" / "Codex" - else: - ".cache" / "codex" + let dataDir = + when defined(windows): + "AppData" / "Roaming" / "Codex" + elif defined(macosx): + "Library" / "Application Support" / "Codex" + else: + ".cache" / "codex" getHomeDir() / dataDir @@ -96,320 +95,341 @@ type CodexConf* = object configFile* {. - desc: "Loads the configuration from a TOML file" - defaultValueDesc: "none" - defaultValue: InputFile.none - name: "config-file" }: Option[InputFile] + desc: "Loads the configuration from a TOML file", + defaultValueDesc: "none", + defaultValue: InputFile.none, + name: "config-file" + .}: Option[InputFile] - logLevel* {. - defaultValue: "info" - desc: "Sets the log level", - name: "log-level" }: string + logLevel* {.defaultValue: "info", desc: "Sets the log level", name: "log-level".}: + string logFormat* {. - desc: "Specifies what kind of logs should be written to stdout (auto, " & - "colors, nocolors, json)" - defaultValueDesc: "auto" - defaultValue: LogKind.Auto - name: "log-format" }: LogKind + desc: + "Specifies what kind of logs should be written to stdout (auto, " & + "colors, nocolors, json)", + defaultValueDesc: "auto", + defaultValue: LogKind.Auto, + name: "log-format" + .}: LogKind metricsEnabled* {. - desc: "Enable the metrics server" - defaultValue: false - name: "metrics" }: bool + desc: "Enable the metrics server", defaultValue: false, name: "metrics" + .}: bool metricsAddress* {. - desc: "Listening address of the metrics server" - defaultValue: defaultAddress(config) - defaultValueDesc: "127.0.0.1" - name: "metrics-address" }: IpAddress + desc: "Listening address of the metrics server", + defaultValue: defaultAddress(config), + defaultValueDesc: "127.0.0.1", + name: "metrics-address" + .}: IpAddress metricsPort* {. - desc: "Listening HTTP port of the metrics server" - defaultValue: 8008 - name: "metrics-port" }: Port + desc: "Listening HTTP port of the metrics server", + defaultValue: 8008, + name: "metrics-port" + .}: Port dataDir* {. - desc: "The directory where codex will store configuration and data" - defaultValue: DefaultDataDir - defaultValueDesc: $DefaultDataDir - abbr: "d" - name: "data-dir" }: OutDir + desc: "The directory where codex will store configuration and data", + defaultValue: DefaultDataDir, + defaultValueDesc: $DefaultDataDir, + abbr: "d", + name: "data-dir" + .}: OutDir listenAddrs* {. - desc: "Multi Addresses to listen on" - defaultValue: @[ - MultiAddress.init("/ip4/0.0.0.0/tcp/0") - .expect("Should init multiaddress")] - defaultValueDesc: "/ip4/0.0.0.0/tcp/0" - abbr: "i" - name: "listen-addrs" }: seq[MultiAddress] + desc: "Multi Addresses to listen on", + defaultValue: + @[MultiAddress.init("/ip4/0.0.0.0/tcp/0").expect("Should init multiaddress")], + defaultValueDesc: "/ip4/0.0.0.0/tcp/0", + abbr: "i", + name: "listen-addrs" + .}: seq[MultiAddress] nat* {. - desc: "Specify method to use for determining public address. " & - "Must be one of: any, none, upnp, pmp, extip:" - defaultValue: defaultNatConfig() - defaultValueDesc: "any" - name: "nat" }: NatConfig + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:", + defaultValue: defaultNatConfig(), + defaultValueDesc: "any", + name: "nat" + .}: NatConfig discoveryPort* {. - desc: "Discovery (UDP) port" - defaultValue: 8090.Port - defaultValueDesc: "8090" - abbr: "u" - name: "disc-port" }: Port + desc: "Discovery (UDP) port", + defaultValue: 8090.Port, + defaultValueDesc: "8090", + abbr: "u", + name: "disc-port" + .}: Port netPrivKeyFile* {. - desc: "Source of network (secp256k1) private key file path or name" - defaultValue: "key" - name: "net-privkey" }: string + desc: "Source of network (secp256k1) private key file path or name", + defaultValue: "key", + name: "net-privkey" + .}: string bootstrapNodes* {. - desc: "Specifies one or more bootstrap nodes to use when " & - "connecting to the network" - abbr: "b" - name: "bootstrap-node" }: seq[SignedPeerRecord] + desc: + "Specifies one or more bootstrap nodes to use when " & + "connecting to the network", + abbr: "b", + name: "bootstrap-node" + .}: seq[SignedPeerRecord] maxPeers* {. - desc: "The maximum number of peers to connect to" - defaultValue: 160 - name: "max-peers" }: int + desc: "The maximum number of peers to connect to", + defaultValue: 160, + name: "max-peers" + .}: int agentString* {. - defaultValue: "Codex" - desc: "Node agent string which is used as identifier in network" - name: "agent-string" }: string + defaultValue: "Codex", + desc: "Node agent string which is used as identifier in network", + name: "agent-string" + .}: string apiBindAddress* {. - desc: "The REST API bind address" - defaultValue: "127.0.0.1" - name: "api-bindaddr" - }: string + desc: "The REST API bind address", defaultValue: "127.0.0.1", name: "api-bindaddr" + .}: string apiPort* {. desc: "The REST Api port", - defaultValue: 8080.Port - defaultValueDesc: "8080" - name: "api-port" - abbr: "p" }: Port + defaultValue: 8080.Port, + defaultValueDesc: "8080", + name: "api-port", + abbr: "p" + .}: Port apiCorsAllowedOrigin* {. - desc: "The REST Api CORS allowed origin for downloading data. " & + desc: + "The REST Api CORS allowed origin for downloading data. " & "'*' will allow all origins, '' will allow none.", - defaultValue: string.none - defaultValueDesc: "Disallow all cross origin requests to download data" - name: "api-cors-origin" }: Option[string] + defaultValue: string.none, + defaultValueDesc: "Disallow all cross origin requests to download data", + name: "api-cors-origin" + .}: Option[string] repoKind* {. - desc: "Backend for main repo store (fs, sqlite, leveldb)" - defaultValueDesc: "fs" - defaultValue: repoFS - name: "repo-kind" }: RepoKind + desc: "Backend for main repo store (fs, sqlite, leveldb)", + defaultValueDesc: "fs", + defaultValue: repoFS, + name: "repo-kind" + .}: RepoKind storageQuota* {. - desc: "The size of the total storage quota dedicated to the node" - defaultValue: DefaultQuotaBytes - defaultValueDesc: $DefaultQuotaBytes - name: "storage-quota" - abbr: "q" }: NBytes + desc: "The size of the total storage quota dedicated to the node", + defaultValue: DefaultQuotaBytes, + defaultValueDesc: $DefaultQuotaBytes, + name: "storage-quota", + abbr: "q" + .}: NBytes blockTtl* {. - desc: "Default block timeout in seconds - 0 disables the ttl" - defaultValue: DefaultBlockTtl - defaultValueDesc: $DefaultBlockTtl - name: "block-ttl" - abbr: "t" }: Duration + desc: "Default block timeout in seconds - 0 disables the ttl", + defaultValue: DefaultBlockTtl, + defaultValueDesc: $DefaultBlockTtl, + name: "block-ttl", + abbr: "t" + .}: Duration blockMaintenanceInterval* {. - desc: "Time interval in seconds - determines frequency of block " & - "maintenance cycle: how often blocks are checked " & - "for expiration and cleanup" - defaultValue: DefaultBlockMaintenanceInterval - defaultValueDesc: $DefaultBlockMaintenanceInterval - name: "block-mi" }: Duration + desc: + "Time interval in seconds - determines frequency of block " & + "maintenance cycle: how often blocks are checked " & "for expiration and cleanup", + defaultValue: DefaultBlockMaintenanceInterval, + defaultValueDesc: $DefaultBlockMaintenanceInterval, + name: "block-mi" + .}: Duration blockMaintenanceNumberOfBlocks* {. - desc: "Number of blocks to check every maintenance cycle" - defaultValue: DefaultNumberOfBlocksToMaintainPerInterval - defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval - name: "block-mn" }: int + desc: "Number of blocks to check every maintenance cycle", + defaultValue: DefaultNumberOfBlocksToMaintainPerInterval, + defaultValueDesc: $DefaultNumberOfBlocksToMaintainPerInterval, + name: "block-mn" + .}: int cacheSize* {. - desc: "The size of the block cache, 0 disables the cache - " & - "might help on slow hardrives" - defaultValue: 0 - defaultValueDesc: "0" - name: "cache-size" - abbr: "c" }: NBytes + desc: + "The size of the block cache, 0 disables the cache - " & + "might help on slow hardrives", + defaultValue: 0, + defaultValueDesc: "0", + name: "cache-size", + abbr: "c" + .}: NBytes logFile* {. - desc: "Logs to file" - defaultValue: string.none - name: "log-file" - hidden - .}: Option[string] + desc: "Logs to file", defaultValue: string.none, name: "log-file", hidden + .}: Option[string] - case cmd* {. - defaultValue: noCmd - command }: StartUpCmd + case cmd* {.defaultValue: noCmd, command.}: StartUpCmd of persistence: ethProvider* {. - desc: "The URL of the JSON-RPC API of the Ethereum node" - defaultValue: "ws://localhost:8545" + desc: "The URL of the JSON-RPC API of the Ethereum node", + defaultValue: "ws://localhost:8545", name: "eth-provider" .}: string ethAccount* {. - desc: "The Ethereum account that is used for storage contracts" - defaultValue: EthAddress.none - defaultValueDesc: "" + desc: "The Ethereum account that is used for storage contracts", + defaultValue: EthAddress.none, + defaultValueDesc: "", name: "eth-account" .}: Option[EthAddress] ethPrivateKey* {. - desc: "File containing Ethereum private key for storage contracts" - defaultValue: string.none - defaultValueDesc: "" + desc: "File containing Ethereum private key for storage contracts", + defaultValue: string.none, + defaultValueDesc: "", name: "eth-private-key" .}: Option[string] marketplaceAddress* {. - desc: "Address of deployed Marketplace contract" - defaultValue: EthAddress.none - defaultValueDesc: "" + desc: "Address of deployed Marketplace contract", + defaultValue: EthAddress.none, + defaultValueDesc: "", name: "marketplace-address" .}: Option[EthAddress] # TODO: should go behind a feature flag simulateProofFailures* {. - desc: "Simulates proof failures once every N proofs. 0 = disabled." - defaultValue: 0 - name: "simulate-proof-failures" - hidden - .}: int + desc: "Simulates proof failures once every N proofs. 0 = disabled.", + defaultValue: 0, + name: "simulate-proof-failures", + hidden + .}: int validator* {. - desc: "Enables validator, requires an Ethereum node" - defaultValue: false + desc: "Enables validator, requires an Ethereum node", + defaultValue: false, name: "validator" .}: bool validatorMaxSlots* {. - desc: "Maximum number of slots that the validator monitors" - longDesc: "If set to 0, the validator will not limit " & - "the maximum number of slots it monitors" - defaultValue: 1000 + desc: "Maximum number of slots that the validator monitors", + longDesc: + "If set to 0, the validator will not limit " & + "the maximum number of slots it monitors", + defaultValue: 1000, name: "validator-max-slots" .}: MaxSlots validatorGroups* {. - desc: "Slot validation groups" - longDesc: "A number indicating total number of groups into " & + desc: "Slot validation groups", + longDesc: + "A number indicating total number of groups into " & "which the whole slot id space will be divided. " & "The value must be in the range [2, 65535]. " & "If not provided, the validator will observe " & "the whole slot id space and the value of " & "the --validator-group-index parameter will be ignored. " & - "Powers of twos are advised for even distribution" - defaultValue: ValidationGroups.none + "Powers of twos are advised for even distribution", + defaultValue: ValidationGroups.none, name: "validator-groups" .}: Option[ValidationGroups] validatorGroupIndex* {. - desc: "Slot validation group index" - longDesc: "The value provided must be in the range " & + desc: "Slot validation group index", + longDesc: + "The value provided must be in the range " & "[0, validatorGroups). Ignored when --validator-groups " & "is not provided. Only slot ids satisfying condition " & "[(slotId mod validationGroups) == groupIndex] will be " & - "observed by the validator" - defaultValue: 0 + "observed by the validator", + defaultValue: 0, name: "validator-group-index" .}: uint16 rewardRecipient* {. - desc: "Address to send payouts to (eg rewards and refunds)" + desc: "Address to send payouts to (eg rewards and refunds)", name: "reward-recipient" .}: Option[EthAddress] - case persistenceCmd* {. - defaultValue: noCmd - command }: PersistenceCmd - + case persistenceCmd* {.defaultValue: noCmd, command.}: PersistenceCmd of PersistenceCmd.prover: circuitDir* {. - desc: "Directory where Codex will store proof circuit data" - defaultValue: DefaultCircuitDir - defaultValueDesc: $DefaultCircuitDir - abbr: "cd" - name: "circuit-dir" }: OutDir + desc: "Directory where Codex will store proof circuit data", + defaultValue: DefaultCircuitDir, + defaultValueDesc: $DefaultCircuitDir, + abbr: "cd", + name: "circuit-dir" + .}: OutDir circomR1cs* {. - desc: "The r1cs file for the storage circuit" - defaultValue: $DefaultCircuitDir / "proof_main.r1cs" - defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs" + desc: "The r1cs file for the storage circuit", + defaultValue: $DefaultCircuitDir / "proof_main.r1cs", + defaultValueDesc: $DefaultCircuitDir & "/proof_main.r1cs", name: "circom-r1cs" .}: InputFile circomWasm* {. - desc: "The wasm file for the storage circuit" - defaultValue: $DefaultCircuitDir / "proof_main.wasm" - defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm" + desc: "The wasm file for the storage circuit", + defaultValue: $DefaultCircuitDir / "proof_main.wasm", + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.wasm", name: "circom-wasm" .}: InputFile circomZkey* {. - desc: "The zkey file for the storage circuit" - defaultValue: $DefaultCircuitDir / "proof_main.zkey" - defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey" + desc: "The zkey file for the storage circuit", + defaultValue: $DefaultCircuitDir / "proof_main.zkey", + defaultValueDesc: $DefaultDataDir & "/circuits/proof_main.zkey", name: "circom-zkey" .}: InputFile # TODO: should probably be hidden and behind a feature flag circomNoZkey* {. - desc: "Ignore the zkey file - use only for testing!" - defaultValue: false + desc: "Ignore the zkey file - use only for testing!", + defaultValue: false, name: "circom-no-zkey" .}: bool numProofSamples* {. - desc: "Number of samples to prove" - defaultValue: DefaultSamplesNum - defaultValueDesc: $DefaultSamplesNum - name: "proof-samples" }: int + desc: "Number of samples to prove", + defaultValue: DefaultSamplesNum, + defaultValueDesc: $DefaultSamplesNum, + name: "proof-samples" + .}: int maxSlotDepth* {. - desc: "The maximum depth of the slot tree" - defaultValue: DefaultMaxSlotDepth - defaultValueDesc: $DefaultMaxSlotDepth - name: "max-slot-depth" }: int + desc: "The maximum depth of the slot tree", + defaultValue: DefaultMaxSlotDepth, + defaultValueDesc: $DefaultMaxSlotDepth, + name: "max-slot-depth" + .}: int maxDatasetDepth* {. - desc: "The maximum depth of the dataset tree" - defaultValue: DefaultMaxDatasetDepth - defaultValueDesc: $DefaultMaxDatasetDepth - name: "max-dataset-depth" }: int + desc: "The maximum depth of the dataset tree", + defaultValue: DefaultMaxDatasetDepth, + defaultValueDesc: $DefaultMaxDatasetDepth, + name: "max-dataset-depth" + .}: int maxBlockDepth* {. - desc: "The maximum depth of the network block merkle tree" - defaultValue: DefaultBlockDepth - defaultValueDesc: $DefaultBlockDepth - name: "max-block-depth" }: int + desc: "The maximum depth of the network block merkle tree", + defaultValue: DefaultBlockDepth, + defaultValueDesc: $DefaultBlockDepth, + name: "max-block-depth" + .}: int maxCellElms* {. - desc: "The maximum number of elements in a cell" - defaultValue: DefaultCellElms - defaultValueDesc: $DefaultCellElms - name: "max-cell-elements" }: int + desc: "The maximum number of elements in a cell", + defaultValue: DefaultCellElms, + defaultValueDesc: $DefaultCellElms, + name: "max-cell-elements" + .}: int of PersistenceCmd.noCmd: discard - of StartUpCmd.noCmd: discard # end of persistence EthAddress* = ethers.Address -logutils.formatIt(LogFormat.textLines, EthAddress): it.short0xHexLog -logutils.formatIt(LogFormat.json, EthAddress): %it +logutils.formatIt(LogFormat.textLines, EthAddress): + it.short0xHexLog +logutils.formatIt(LogFormat.json, EthAddress): + %it func defaultAddress*(conf: CodexConf): IpAddress = result = static parseIpAddress("127.0.0.1") @@ -443,20 +463,19 @@ const nimBanner* = getNimBanner() codexFullVersion* = - "Codex version: " & codexVersion & "\p" & - "Codex revision: " & codexRevision & "\p" & + "Codex version: " & codexVersion & "\p" & "Codex revision: " & codexRevision & "\p" & nimBanner -proc parseCmdArg*(T: typedesc[MultiAddress], - input: string): MultiAddress - {.upraises: [ValueError] .} = +proc parseCmdArg*( + T: typedesc[MultiAddress], input: string +): MultiAddress {.upraises: [ValueError].} = var ma: MultiAddress try: let res = MultiAddress.init(input) if res.isOk: ma = res.get() else: - warn "Invalid MultiAddress", input=input, error = res.error() + warn "Invalid MultiAddress", input = input, error = res.error() quit QuitFailure except LPError as exc: warn "Invalid MultiAddress uri", uri = input, error = exc.msg @@ -478,28 +497,28 @@ proc parseCmdArg*(T: type SignedPeerRecord, uri: string): T = res func parseCmdArg*(T: type NatConfig, p: string): T {.raises: [ValueError].} = - case p.toLowerAscii: - of "any": - NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) - of "none": - NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) - of "upnp": - NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) - of "pmp": - NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) - else: - if p.startsWith("extip:"): - try: - let ip = parseIpAddress(p[6..^1]) - NatConfig(hasExtIp: true, extIp: ip) - except ValueError: - let error = "Not a valid IP address: " & p[6..^1] - raise newException(ValueError, error) - else: - let error = "Not a valid NAT option: " & p + case p.toLowerAscii + of "any": + NatConfig(hasExtIp: false, nat: NatStrategy.NatAny) + of "none": + NatConfig(hasExtIp: false, nat: NatStrategy.NatNone) + of "upnp": + NatConfig(hasExtIp: false, nat: NatStrategy.NatUpnp) + of "pmp": + NatConfig(hasExtIp: false, nat: NatStrategy.NatPmp) + else: + if p.startsWith("extip:"): + try: + let ip = parseIpAddress(p[6 ..^ 1]) + NatConfig(hasExtIp: true, extIp: ip) + except ValueError: + let error = "Not a valid IP address: " & p[6 ..^ 1] raise newException(ValueError, error) + else: + let error = "Not a valid NAT option: " & p + raise newException(ValueError, error) -proc completeCmdArg*(T: type NatConfig; val: string): seq[string] = +proc completeCmdArg*(T: type NatConfig, val: string): seq[string] = return @[] proc parseCmdArg*(T: type EthAddress, address: string): T = @@ -509,20 +528,21 @@ proc parseCmdArg*(T: type NBytes, val: string): T = var num = 0'i64 let count = parseSize(val, num, alwaysBin = true) if count == 0: - warn "Invalid number of bytes", nbytes = val - quit QuitFailure + warn "Invalid number of bytes", nbytes = val + quit QuitFailure NBytes(num) proc parseCmdArg*(T: type Duration, val: string): T = var dur: Duration let count = parseDuration(val, dur) if count == 0: - warn "Cannot parse duration", dur = dur - quit QuitFailure + warn "Cannot parse duration", dur = dur + quit QuitFailure dur -proc readValue*(r: var TomlReader, val: var EthAddress) - {.upraises: [SerializationError, IOError].} = +proc readValue*( + r: var TomlReader, val: var EthAddress +) {.upraises: [SerializationError, IOError].} = val = EthAddress.init(r.readValue(string)).get() proc readValue*(r: var TomlReader, val: var SignedPeerRecord) = @@ -545,11 +565,12 @@ proc readValue*(r: var TomlReader, val: var MultiAddress) = if res.isOk: val = res.get() else: - warn "Invalid MultiAddress", input=input, error=res.error() + warn "Invalid MultiAddress", input = input, error = res.error() quit QuitFailure -proc readValue*(r: var TomlReader, val: var NBytes) - {.upraises: [SerializationError, IOError].} = +proc readValue*( + r: var TomlReader, val: var NBytes +) {.upraises: [SerializationError, IOError].} = var value = 0'i64 var str = r.readValue(string) let count = parseSize(str, value, alwaysBin = true) @@ -558,8 +579,9 @@ proc readValue*(r: var TomlReader, val: var NBytes) quit QuitFailure val = NBytes(value) -proc readValue*(r: var TomlReader, val: var Duration) - {.upraises: [SerializationError, IOError].} = +proc readValue*( + r: var TomlReader, val: var Duration +) {.upraises: [SerializationError, IOError].} = var str = r.readValue(string) var dur: Duration let count = parseDuration(str, dur) @@ -568,20 +590,23 @@ proc readValue*(r: var TomlReader, val: var Duration) quit QuitFailure val = dur -proc readValue*(r: var TomlReader, val: var NatConfig) - {.raises: [SerializationError].} = - val = try: parseCmdArg(NatConfig, r.readValue(string)) - except CatchableError as err: - raise newException(SerializationError, err.msg) +proc readValue*( + r: var TomlReader, val: var NatConfig +) {.raises: [SerializationError].} = + val = + try: + parseCmdArg(NatConfig, r.readValue(string)) + except CatchableError as err: + raise newException(SerializationError, err.msg) # no idea why confutils needs this: -proc completeCmdArg*(T: type EthAddress; val: string): seq[string] = +proc completeCmdArg*(T: type EthAddress, val: string): seq[string] = discard -proc completeCmdArg*(T: type NBytes; val: string): seq[string] = +proc completeCmdArg*(T: type NBytes, val: string): seq[string] = discard -proc completeCmdArg*(T: type Duration; val: string): seq[string] = +proc completeCmdArg*(T: type Duration, val: string): seq[string] = discard # silly chronicles, colors is a compile-time property @@ -603,7 +628,7 @@ proc stripAnsi*(v: string): string = if c2 != '[': break else: - if c2 in {'0'..'9'} + {';'}: + if c2 in {'0' .. '9'} + {';'}: discard # keep looking elif c2 == 'm': i = x + 1 @@ -627,12 +652,12 @@ proc updateLogLevel*(logLevel: string) {.upraises: [ValueError].} = setLogLevel(parseEnum[LogLevel](directives[0].toUpperAscii)) except ValueError: raise (ref ValueError)( - msg: "Please specify one of: trace, debug, " & - "info, notice, warn, error or fatal" + msg: + "Please specify one of: trace, debug, " & "info, notice, warn, error or fatal" ) if directives.len > 1: - for topicName, settings in parseTopicDirectives(directives[1..^1]): + for topicName, settings in parseTopicDirectives(directives[1 ..^ 1]): if not setTopicState(topicName, settings.state, settings.logLevel): warn "Unrecognized logging topic", topic = topicName @@ -641,7 +666,9 @@ proc setupLogging*(conf: CodexConf) = warn "Logging configuration options not enabled in the current build" else: var logFile: ?IoHandle - proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = discard + proc noOutput(logLevel: LogLevel, msg: LogOutputStr) = + discard + proc writeAndFlush(f: File, msg: LogOutputStr) = try: f.write(msg) @@ -662,14 +689,11 @@ proc setupLogging*(conf: CodexConf) = defaultChroniclesStream.outputs[2].writer = noOutput if logFilePath =? conf.logFile and logFilePath.len > 0: - let logFileHandle = openFile( - logFilePath, - {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate} - ) + let logFileHandle = + openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}) if logFileHandle.isErr: error "failed to open log file", - path = logFilePath, - errorCode = $logFileHandle.error + path = logFilePath, errorCode = $logFileHandle.error else: logFile = logFileHandle.option defaultChroniclesStream.outputs[2].writer = fileFlush @@ -677,14 +701,13 @@ proc setupLogging*(conf: CodexConf) = defaultChroniclesStream.outputs[1].writer = noOutput let writer = - case conf.logFormat: + case conf.logFormat of LogKind.Auto: - if isatty(stdout): - stdoutFlush - else: - noColorsFlush - of LogKind.Colors: stdoutFlush - of LogKind.NoColors: noColorsFlush + if isatty(stdout): stdoutFlush else: noColorsFlush + of LogKind.Colors: + stdoutFlush + of LogKind.NoColors: + noColorsFlush of LogKind.Json: defaultChroniclesStream.outputs[1].writer = stdoutFlush noOutput @@ -695,8 +718,9 @@ proc setupLogging*(conf: CodexConf) = var counter = 0.uint64 proc numberedWriter(logLevel: LogLevel, msg: LogOutputStr) = inc(counter) - let withoutNewLine = msg[0..^2] + let withoutNewLine = msg[0 ..^ 2] writer(logLevel, withoutNewLine & " count=" & $counter & "\n") + defaultChroniclesStream.outputs[0].writer = numberedWriter else: defaultChroniclesStream.outputs[0].writer = writer diff --git a/codex/contracts/clock.nim b/codex/contracts/clock.nim index 284996ffd..b5bf7ebb4 100644 --- a/codex/contracts/clock.nim +++ b/codex/contracts/clock.nim @@ -11,14 +11,13 @@ export clock logScope: topics = "contracts clock" -type - OnChainClock* = ref object of Clock - provider: Provider - subscription: Subscription - offset: times.Duration - blockNumber: UInt256 - started: bool - newBlock: AsyncEvent +type OnChainClock* = ref object of Clock + provider: Provider + subscription: Subscription + offset: times.Duration + blockNumber: UInt256 + started: bool + newBlock: AsyncEvent proc new*(_: type OnChainClock, provider: Provider): OnChainClock = OnChainClock(provider: provider, newBlock: newAsyncEvent()) @@ -29,7 +28,8 @@ proc update(clock: OnChainClock, blck: Block) = let computerTime = getTime() clock.offset = blockTime - computerTime clock.blockNumber = number - trace "updated clock", blockTime=blck.timestamp, blockNumber=number, offset=clock.offset + trace "updated clock", + blockTime = blck.timestamp, blockNumber = number, offset = clock.offset clock.newBlock.fire() proc update(clock: OnChainClock) {.async.} = @@ -39,7 +39,7 @@ proc update(clock: OnChainClock) {.async.} = except CancelledError as error: raise error except CatchableError as error: - debug "error updating clock: ", error=error.msg + debug "error updating clock: ", error = error.msg discard method start*(clock: OnChainClock) {.async.} = @@ -48,7 +48,7 @@ method start*(clock: OnChainClock) {.async.} = proc onBlock(blckResult: ?!Block) = if eventError =? blckResult.errorOption: - error "There was an error in block subscription", msg=eventError.msg + error "There was an error in block subscription", msg = eventError.msg return # ignore block parameter; hardhat may call this with pending blocks diff --git a/codex/contracts/config.nim b/codex/contracts/config.nim index 76e00207f..87cd1f2aa 100644 --- a/codex/contracts/config.nim +++ b/codex/contracts/config.nim @@ -8,11 +8,14 @@ type MarketplaceConfig* = object collateral*: CollateralConfig proofs*: ProofConfig + CollateralConfig* = object - repairRewardPercentage*: uint8 # percentage of remaining collateral slot has after it has been freed + repairRewardPercentage*: uint8 + # percentage of remaining collateral slot has after it has been freed maxNumberOfSlashes*: uint8 # frees slot when the number of slashes reaches this value slashCriterion*: uint16 # amount of proofs missed that lead to slashing slashPercentage*: uint8 # percentage of the collateral that is slashed + ProofConfig* = object period*: UInt256 # proofs requirements are calculated per period (in seconds) timeout*: UInt256 # mark proofs as missing before the timeout (in seconds) @@ -23,14 +26,13 @@ type # blocks. Should be a prime number to ensure there are no cycles. downtimeProduct*: uint8 - func fromTuple(_: type ProofConfig, tupl: tuple): ProofConfig = ProofConfig( period: tupl[0], timeout: tupl[1], downtime: tupl[2], zkeyHash: tupl[3], - downtimeProduct: tupl[4] + downtimeProduct: tupl[4], ) func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = @@ -38,14 +40,11 @@ func fromTuple(_: type CollateralConfig, tupl: tuple): CollateralConfig = repairRewardPercentage: tupl[0], maxNumberOfSlashes: tupl[1], slashCriterion: tupl[2], - slashPercentage: tupl[3] + slashPercentage: tupl[3], ) func fromTuple(_: type MarketplaceConfig, tupl: tuple): MarketplaceConfig = - MarketplaceConfig( - collateral: tupl[0], - proofs: tupl[1] - ) + MarketplaceConfig(collateral: tupl[0], proofs: tupl[1]) func solidityType*(_: type ProofConfig): string = solidityType(ProofConfig.fieldTypes) diff --git a/codex/contracts/deployment.nim b/codex/contracts/deployment.nim index 611aa3595..c4e59b804 100644 --- a/codex/contracts/deployment.nim +++ b/codex/contracts/deployment.nim @@ -12,23 +12,20 @@ type Deployment* = ref object config: CodexConf const knownAddresses = { - # Hardhat localhost network - "31337": { - "Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44"), - }.toTable, - # Taiko Alpha-3 Testnet - "167005": { - "Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F") - }.toTable, - # Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC) - "789987": { - "Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648") - }.toTable + # Hardhat localhost network + "31337": + {"Marketplace": Address.init("0x322813Fd9A801c5507c9de605d63CEA4f2CE6c44")}.toTable, + # Taiko Alpha-3 Testnet + "167005": + {"Marketplace": Address.init("0x948CF9291b77Bd7ad84781b9047129Addf1b894F")}.toTable, + # Codex Testnet - Nov 25 2024 18:41:29 PM (+00:00 UTC) + "789987": + {"Marketplace": Address.init("0xAB03b6a58C5262f530D54146DA2a552B1C0F7648")}.toTable, }.toTable proc getKnownAddress(T: type, chainId: UInt256): ?Address = let id = chainId.toString(10) - notice "Looking for well-known contract address with ChainID ", chainId=id + notice "Looking for well-known contract address with ChainID ", chainId = id if not (id in knownAddresses): return none Address diff --git a/codex/contracts/interactions/clientinteractions.nim b/codex/contracts/interactions/clientinteractions.nim index 78b3bedf8..df81da117 100644 --- a/codex/contracts/interactions/clientinteractions.nim +++ b/codex/contracts/interactions/clientinteractions.nim @@ -9,13 +9,12 @@ import ./interactions export purchasing export logutils -type - ClientInteractions* = ref object of ContractInteractions - purchasing*: Purchasing +type ClientInteractions* = ref object of ContractInteractions + purchasing*: Purchasing -proc new*(_: type ClientInteractions, - clock: OnChainClock, - purchasing: Purchasing): ClientInteractions = +proc new*( + _: type ClientInteractions, clock: OnChainClock, purchasing: Purchasing +): ClientInteractions = ClientInteractions(clock: clock, purchasing: purchasing) proc start*(self: ClientInteractions) {.async.} = diff --git a/codex/contracts/interactions/hostinteractions.nim b/codex/contracts/interactions/hostinteractions.nim index 2decfa44a..dd3117463 100644 --- a/codex/contracts/interactions/hostinteractions.nim +++ b/codex/contracts/interactions/hostinteractions.nim @@ -7,15 +7,10 @@ import ./interactions export sales export logutils -type - HostInteractions* = ref object of ContractInteractions - sales*: Sales +type HostInteractions* = ref object of ContractInteractions + sales*: Sales -proc new*( - _: type HostInteractions, - clock: Clock, - sales: Sales -): HostInteractions = +proc new*(_: type HostInteractions, clock: Clock, sales: Sales): HostInteractions = ## Create a new HostInteractions instance ## HostInteractions(clock: clock, sales: sales) diff --git a/codex/contracts/interactions/interactions.nim b/codex/contracts/interactions/interactions.nim index d4fddf54a..1006eb3f9 100644 --- a/codex/contracts/interactions/interactions.nim +++ b/codex/contracts/interactions/interactions.nim @@ -5,9 +5,8 @@ import ../market export clock -type - ContractInteractions* = ref object of RootObj - clock*: Clock +type ContractInteractions* = ref object of RootObj + clock*: Clock method start*(self: ContractInteractions) {.async, base.} = discard diff --git a/codex/contracts/interactions/validatorinteractions.nim b/codex/contracts/interactions/validatorinteractions.nim index 1aa4026c2..aae282023 100644 --- a/codex/contracts/interactions/validatorinteractions.nim +++ b/codex/contracts/interactions/validatorinteractions.nim @@ -3,13 +3,12 @@ import ../../validation export validation -type - ValidatorInteractions* = ref object of ContractInteractions - validation: Validation +type ValidatorInteractions* = ref object of ContractInteractions + validation: Validation -proc new*(_: type ValidatorInteractions, - clock: OnChainClock, - validation: Validation): ValidatorInteractions = +proc new*( + _: type ValidatorInteractions, clock: OnChainClock, validation: Validation +): ValidatorInteractions = ValidatorInteractions(clock: clock, validation: validation) proc start*(self: ValidatorInteractions) {.async.} = diff --git a/codex/contracts/market.nim b/codex/contracts/market.nim index 069028682..94dff5ef4 100644 --- a/codex/contracts/market.nim +++ b/codex/contracts/market.nim @@ -27,18 +27,12 @@ type eventSubscription: EventSubscription func new*( - _: type OnChainMarket, - contract: Marketplace, - rewardRecipient = Address.none): OnChainMarket = - + _: type OnChainMarket, contract: Marketplace, rewardRecipient = Address.none +): OnChainMarket = without signer =? contract.signer: raiseAssert("Marketplace contract should have a signer") - OnChainMarket( - contract: contract, - signer: signer, - rewardRecipient: rewardRecipient - ) + OnChainMarket(contract: contract, signer: signer, rewardRecipient: rewardRecipient) proc raiseMarketError(message: string) {.raises: [MarketError].} = raise newException(MarketError, message) @@ -105,18 +99,19 @@ method myRequests*(market: OnChainMarket): Future[seq[RequestId]] {.async.} = method mySlots*(market: OnChainMarket): Future[seq[SlotId]] {.async.} = convertEthersError: let slots = await market.contract.mySlots() - debug "Fetched my slots", numSlots=len(slots) + debug "Fetched my slots", numSlots = len(slots) return slots -method requestStorage(market: OnChainMarket, request: StorageRequest){.async.} = +method requestStorage(market: OnChainMarket, request: StorageRequest) {.async.} = convertEthersError: debug "Requesting storage" await market.approveFunds(request.price()) discard await market.contract.requestStorage(request).confirm(1) -method getRequest*(market: OnChainMarket, - id: RequestId): Future[?StorageRequest] {.async.} = +method getRequest*( + market: OnChainMarket, id: RequestId +): Future[?StorageRequest] {.async.} = convertEthersError: try: return some await market.contract.getRequest(id) @@ -125,8 +120,9 @@ method getRequest*(market: OnChainMarket, return none StorageRequest raise e -method requestState*(market: OnChainMarket, - requestId: RequestId): Future[?RequestState] {.async.} = +method requestState*( + market: OnChainMarket, requestId: RequestId +): Future[?RequestState] {.async.} = convertEthersError: try: let overrides = CallOverrides(blockTag: some BlockTag.pending) @@ -136,25 +132,26 @@ method requestState*(market: OnChainMarket, return none RequestState raise e -method slotState*(market: OnChainMarket, - slotId: SlotId): Future[SlotState] {.async.} = +method slotState*(market: OnChainMarket, slotId: SlotId): Future[SlotState] {.async.} = convertEthersError: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.slotState(slotId, overrides) -method getRequestEnd*(market: OnChainMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method getRequestEnd*( + market: OnChainMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = convertEthersError: return await market.contract.requestEnd(id) -method requestExpiresAt*(market: OnChainMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method requestExpiresAt*( + market: OnChainMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = convertEthersError: return await market.contract.requestExpiry(id) -method getHost(market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256): Future[?Address] {.async.} = +method getHost( + market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 +): Future[?Address] {.async.} = convertEthersError: let slotId = slotId(requestId, slotIndex) let address = await market.contract.getHost(slotId) @@ -163,8 +160,7 @@ method getHost(market: OnChainMarket, else: return none Address -method getActiveSlot*(market: OnChainMarket, - slotId: SlotId): Future[?Slot] {.async.} = +method getActiveSlot*(market: OnChainMarket, slotId: SlotId): Future[?Slot] {.async.} = convertEthersError: try: return some await market.contract.getActiveSlot(slotId) @@ -173,11 +169,13 @@ method getActiveSlot*(market: OnChainMarket, return none Slot raise e -method fillSlot(market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - collateral: UInt256) {.async.} = +method fillSlot( + market: OnChainMarket, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + collateral: UInt256, +) {.async.} = convertEthersError: logScope: requestId @@ -197,9 +195,9 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = let collateralRecipient = await market.getSigner() freeSlot = market.contract.freeSlot( slotId, - rewardRecipient, # --reward-recipient - collateralRecipient) # SP's address - + rewardRecipient, # --reward-recipient + collateralRecipient, + ) # SP's address else: # Otherwise, use the SP's address as both the reward and collateral # recipient (the contract will use msg.sender for both) @@ -207,14 +205,11 @@ method freeSlot*(market: OnChainMarket, slotId: SlotId) {.async.} = discard await freeSlot.confirm(1) - -method withdrawFunds(market: OnChainMarket, - requestId: RequestId) {.async.} = +method withdrawFunds(market: OnChainMarket, requestId: RequestId) {.async.} = convertEthersError: discard await market.contract.withdrawFunds(requestId).confirm(1) -method isProofRequired*(market: OnChainMarket, - id: SlotId): Future[bool] {.async.} = +method isProofRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = convertEthersError: try: let overrides = CallOverrides(blockTag: some BlockTag.pending) @@ -224,8 +219,7 @@ method isProofRequired*(market: OnChainMarket, return false raise e -method willProofBeRequired*(market: OnChainMarket, - id: SlotId): Future[bool] {.async.} = +method willProofBeRequired*(market: OnChainMarket, id: SlotId): Future[bool] {.async.} = convertEthersError: try: let overrides = CallOverrides(blockTag: some BlockTag.pending) @@ -235,27 +229,25 @@ method willProofBeRequired*(market: OnChainMarket, return false raise e -method getChallenge*(market: OnChainMarket, id: SlotId): Future[ProofChallenge] {.async.} = +method getChallenge*( + market: OnChainMarket, id: SlotId +): Future[ProofChallenge] {.async.} = convertEthersError: let overrides = CallOverrides(blockTag: some BlockTag.pending) return await market.contract.getChallenge(id, overrides) -method submitProof*(market: OnChainMarket, - id: SlotId, - proof: Groth16Proof) {.async.} = +method submitProof*(market: OnChainMarket, id: SlotId, proof: Groth16Proof) {.async.} = convertEthersError: discard await market.contract.submitProof(id, proof).confirm(1) -method markProofAsMissing*(market: OnChainMarket, - id: SlotId, - period: Period) {.async.} = +method markProofAsMissing*( + market: OnChainMarket, id: SlotId, period: Period +) {.async.} = convertEthersError: discard await market.contract.markProofAsMissing(id, period).confirm(1) method canProofBeMarkedAsMissing*( - market: OnChainMarket, - id: SlotId, - period: Period + market: OnChainMarket, id: SlotId, period: Period ): Future[bool] {.async.} = let provider = market.contract.provider let contractWithoutSigner = market.contract.connect(provider) @@ -268,46 +260,42 @@ method canProofBeMarkedAsMissing*( return false method reserveSlot*( - market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256) {.async.} = - + market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 +) {.async.} = convertEthersError: - discard await market.contract.reserveSlot( + discard await market.contract + .reserveSlot( requestId, slotIndex, # reserveSlot runs out of gas for unknown reason, but 100k gas covers it - TransactionOverrides(gasLimit: some 100000.u256) - ).confirm(1) + TransactionOverrides(gasLimit: some 100000.u256), + ) + .confirm(1) method canReserveSlot*( - market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256): Future[bool] {.async.} = - + market: OnChainMarket, requestId: RequestId, slotIndex: UInt256 +): Future[bool] {.async.} = convertEthersError: return await market.contract.canReserveSlot(requestId, slotIndex) -method subscribeRequests*(market: OnChainMarket, - callback: OnRequest): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!StorageRequested) {.upraises:[].} = +method subscribeRequests*( + market: OnChainMarket, callback: OnRequest +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!StorageRequested) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in Request subscription", msg = eventErr.msg return - callback(event.requestId, - event.ask, - event.expiry) + callback(event.requestId, event.ask, event.expiry) convertEthersError: let subscription = await market.contract.subscribe(StorageRequested, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeSlotFilled*(market: OnChainMarket, - callback: OnSlotFilled): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotFilled) {.upraises:[].} = +method subscribeSlotFilled*( + market: OnChainMarket, callback: OnSlotFilled +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!SlotFilled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotFilled subscription", msg = eventErr.msg return @@ -318,11 +306,12 @@ method subscribeSlotFilled*(market: OnChainMarket, let subscription = await market.contract.subscribe(SlotFilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeSlotFilled*(market: OnChainMarket, - requestId: RequestId, - slotIndex: UInt256, - callback: OnSlotFilled): - Future[MarketSubscription] {.async.} = +method subscribeSlotFilled*( + market: OnChainMarket, + requestId: RequestId, + slotIndex: UInt256, + callback: OnSlotFilled, +): Future[MarketSubscription] {.async.} = proc onSlotFilled(eventRequestId: RequestId, eventSlotIndex: UInt256) = if eventRequestId == requestId and eventSlotIndex == slotIndex: callback(requestId, slotIndex) @@ -330,10 +319,10 @@ method subscribeSlotFilled*(market: OnChainMarket, convertEthersError: return await market.subscribeSlotFilled(onSlotFilled) -method subscribeSlotFreed*(market: OnChainMarket, - callback: OnSlotFreed): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!SlotFreed) {.upraises:[].} = +method subscribeSlotFreed*( + market: OnChainMarket, callback: OnSlotFreed +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!SlotFreed) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in SlotFreed subscription", msg = eventErr.msg return @@ -345,12 +334,12 @@ method subscribeSlotFreed*(market: OnChainMarket, return OnChainMarketSubscription(eventSubscription: subscription) method subscribeSlotReservationsFull*( - market: OnChainMarket, - callback: OnSlotReservationsFull): Future[MarketSubscription] {.async.} = - - proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises:[].} = + market: OnChainMarket, callback: OnSlotReservationsFull +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!SlotReservationsFull) {.upraises: [].} = without event =? eventResult, eventErr: - error "There was an error in SlotReservationsFull subscription", msg = eventErr.msg + error "There was an error in SlotReservationsFull subscription", + msg = eventErr.msg return callback(event.requestId, event.slotIndex) @@ -359,10 +348,10 @@ method subscribeSlotReservationsFull*( let subscription = await market.contract.subscribe(SlotReservationsFull, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeFulfillment(market: OnChainMarket, - callback: OnFulfillment): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} = +method subscribeFulfillment( + market: OnChainMarket, callback: OnFulfillment +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFulfillment subscription", msg = eventErr.msg return @@ -373,11 +362,10 @@ method subscribeFulfillment(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeFulfillment(market: OnChainMarket, - requestId: RequestId, - callback: OnFulfillment): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFulfilled) {.upraises:[].} = +method subscribeFulfillment( + market: OnChainMarket, requestId: RequestId, callback: OnFulfillment +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFulfilled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFulfillment subscription", msg = eventErr.msg return @@ -389,10 +377,10 @@ method subscribeFulfillment(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFulfilled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestCancelled*(market: OnChainMarket, - callback: OnRequestCancelled): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} = +method subscribeRequestCancelled*( + market: OnChainMarket, callback: OnRequestCancelled +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestCancelled subscription", msg = eventErr.msg return @@ -403,11 +391,10 @@ method subscribeRequestCancelled*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestCancelled*(market: OnChainMarket, - requestId: RequestId, - callback: OnRequestCancelled): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestCancelled) {.upraises:[].} = +method subscribeRequestCancelled*( + market: OnChainMarket, requestId: RequestId, callback: OnRequestCancelled +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestCancelled) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestCancelled subscription", msg = eventErr.msg return @@ -419,10 +406,10 @@ method subscribeRequestCancelled*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestCancelled, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestFailed*(market: OnChainMarket, - callback: OnRequestFailed): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} = +method subscribeRequestFailed*( + market: OnChainMarket, callback: OnRequestFailed +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFailed subscription", msg = eventErr.msg return @@ -433,11 +420,10 @@ method subscribeRequestFailed*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeRequestFailed*(market: OnChainMarket, - requestId: RequestId, - callback: OnRequestFailed): - Future[MarketSubscription] {.async.} = - proc onEvent(eventResult: ?!RequestFailed) {.upraises:[]} = +method subscribeRequestFailed*( + market: OnChainMarket, requestId: RequestId, callback: OnRequestFailed +): Future[MarketSubscription] {.async.} = + proc onEvent(eventResult: ?!RequestFailed) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in RequestFailed subscription", msg = eventErr.msg return @@ -449,9 +435,9 @@ method subscribeRequestFailed*(market: OnChainMarket, let subscription = await market.contract.subscribe(RequestFailed, onEvent) return OnChainMarketSubscription(eventSubscription: subscription) -method subscribeProofSubmission*(market: OnChainMarket, - callback: OnProofSubmitted): - Future[MarketSubscription] {.async.} = +method subscribeProofSubmission*( + market: OnChainMarket, callback: OnProofSubmitted +): Future[MarketSubscription] {.async.} = proc onEvent(eventResult: ?!ProofSubmitted) {.upraises: [].} = without event =? eventResult, eventErr: error "There was an error in ProofSubmitted subscription", msg = eventErr.msg @@ -467,48 +453,37 @@ method unsubscribe*(subscription: OnChainMarketSubscription) {.async.} = await subscription.eventSubscription.unsubscribe() method queryPastSlotFilledEvents*( - market: OnChainMarket, - fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} = - + market: OnChainMarket, fromBlock: BlockTag +): Future[seq[SlotFilled]] {.async.} = convertEthersError: - return await market.contract.queryFilter(SlotFilled, - fromBlock, - BlockTag.latest) + return await market.contract.queryFilter(SlotFilled, fromBlock, BlockTag.latest) method queryPastSlotFilledEvents*( - market: OnChainMarket, - blocksAgo: int): Future[seq[SlotFilled]] {.async.} = - + market: OnChainMarket, blocksAgo: int +): Future[seq[SlotFilled]] {.async.} = convertEthersError: - let fromBlock = - await market.contract.provider.pastBlockTag(blocksAgo) + let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastSlotFilledEvents(fromBlock) method queryPastSlotFilledEvents*( - market: OnChainMarket, - fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} = - + market: OnChainMarket, fromTime: SecondsSince1970 +): Future[seq[SlotFilled]] {.async.} = convertEthersError: - let fromBlock = - await market.contract.provider.blockNumberForEpoch(fromTime) + let fromBlock = await market.contract.provider.blockNumberForEpoch(fromTime) return await market.queryPastSlotFilledEvents(BlockTag.init(fromBlock)) method queryPastStorageRequestedEvents*( - market: OnChainMarket, - fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} = - + market: OnChainMarket, fromBlock: BlockTag +): Future[seq[StorageRequested]] {.async.} = convertEthersError: - return await market.contract.queryFilter(StorageRequested, - fromBlock, - BlockTag.latest) + return + await market.contract.queryFilter(StorageRequested, fromBlock, BlockTag.latest) method queryPastStorageRequestedEvents*( - market: OnChainMarket, - blocksAgo: int): Future[seq[StorageRequested]] {.async.} = - + market: OnChainMarket, blocksAgo: int +): Future[seq[StorageRequested]] {.async.} = convertEthersError: - let fromBlock = - await market.contract.provider.pastBlockTag(blocksAgo) + let fromBlock = await market.contract.provider.pastBlockTag(blocksAgo) return await market.queryPastStorageRequestedEvents(fromBlock) diff --git a/codex/contracts/marketplace.nim b/codex/contracts/marketplace.nim index 020f501eb..6d98135ab 100644 --- a/codex/contracts/marketplace.nim +++ b/codex/contracts/marketplace.nim @@ -14,8 +14,7 @@ export erc20 except `%`, `%*`, toJson export config export requests -type - Marketplace* = ref object of Contract +type Marketplace* = ref object of Contract proc configuration*(marketplace: Marketplace): MarketplaceConfig {.contract, view.} proc token*(marketplace: Marketplace): Address {.contract, view.} @@ -23,22 +22,54 @@ proc slashMisses*(marketplace: Marketplace): UInt256 {.contract, view.} proc slashPercentage*(marketplace: Marketplace): UInt256 {.contract, view.} proc minCollateralThreshold*(marketplace: Marketplace): UInt256 {.contract, view.} -proc requestStorage*(marketplace: Marketplace, request: StorageRequest): Confirmable {.contract.} -proc fillSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256, proof: Groth16Proof): Confirmable {.contract.} -proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId): Confirmable {.contract.} -proc withdrawFunds*(marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address): Confirmable {.contract.} +proc requestStorage*( + marketplace: Marketplace, request: StorageRequest +): Confirmable {.contract.} + +proc fillSlot*( + marketplace: Marketplace, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, +): Confirmable {.contract.} + +proc withdrawFunds*( + marketplace: Marketplace, requestId: RequestId +): Confirmable {.contract.} + +proc withdrawFunds*( + marketplace: Marketplace, requestId: RequestId, withdrawAddress: Address +): Confirmable {.contract.} + proc freeSlot*(marketplace: Marketplace, id: SlotId): Confirmable {.contract.} -proc freeSlot*(marketplace: Marketplace, id: SlotId, rewardRecipient: Address, collateralRecipient: Address): Confirmable {.contract.} -proc getRequest*(marketplace: Marketplace, id: RequestId): StorageRequest {.contract, view.} +proc freeSlot*( + marketplace: Marketplace, + id: SlotId, + rewardRecipient: Address, + collateralRecipient: Address, +): Confirmable {.contract.} + +proc getRequest*( + marketplace: Marketplace, id: RequestId +): StorageRequest {.contract, view.} + proc getHost*(marketplace: Marketplace, id: SlotId): Address {.contract, view.} proc getActiveSlot*(marketplace: Marketplace, id: SlotId): Slot {.contract, view.} proc myRequests*(marketplace: Marketplace): seq[RequestId] {.contract, view.} proc mySlots*(marketplace: Marketplace): seq[SlotId] {.contract, view.} -proc requestState*(marketplace: Marketplace, requestId: RequestId): RequestState {.contract, view.} +proc requestState*( + marketplace: Marketplace, requestId: RequestId +): RequestState {.contract, view.} + proc slotState*(marketplace: Marketplace, slotId: SlotId): SlotState {.contract, view.} -proc requestEnd*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.} -proc requestExpiry*(marketplace: Marketplace, requestId: RequestId): SecondsSince1970 {.contract, view.} +proc requestEnd*( + marketplace: Marketplace, requestId: RequestId +): SecondsSince1970 {.contract, view.} + +proc requestExpiry*( + marketplace: Marketplace, requestId: RequestId +): SecondsSince1970 {.contract, view.} proc proofTimeout*(marketplace: Marketplace): UInt256 {.contract, view.} @@ -46,11 +77,24 @@ proc proofEnd*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc missingProofs*(marketplace: Marketplace, id: SlotId): UInt256 {.contract, view.} proc isProofRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} proc willProofBeRequired*(marketplace: Marketplace, id: SlotId): bool {.contract, view.} -proc getChallenge*(marketplace: Marketplace, id: SlotId): array[32, byte] {.contract, view.} +proc getChallenge*( + marketplace: Marketplace, id: SlotId +): array[32, byte] {.contract, view.} + proc getPointer*(marketplace: Marketplace, id: SlotId): uint8 {.contract, view.} -proc submitProof*(marketplace: Marketplace, id: SlotId, proof: Groth16Proof): Confirmable {.contract.} -proc markProofAsMissing*(marketplace: Marketplace, id: SlotId, period: UInt256): Confirmable {.contract.} +proc submitProof*( + marketplace: Marketplace, id: SlotId, proof: Groth16Proof +): Confirmable {.contract.} + +proc markProofAsMissing*( + marketplace: Marketplace, id: SlotId, period: UInt256 +): Confirmable {.contract.} + +proc reserveSlot*( + marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 +): Confirmable {.contract.} -proc reserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): Confirmable {.contract.} -proc canReserveSlot*(marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256): bool {.contract, view.} +proc canReserveSlot*( + marketplace: Marketplace, requestId: RequestId, slotIndex: UInt256 +): bool {.contract, view.} diff --git a/codex/contracts/proofs.nim b/codex/contracts/proofs.nim index a7a59351e..771d685b1 100644 --- a/codex/contracts/proofs.nim +++ b/codex/contracts/proofs.nim @@ -7,13 +7,16 @@ type a*: G1Point b*: G2Point c*: G1Point + G1Point* = object x*: UInt256 y*: UInt256 + # A field element F_{p^2} encoded as `real + i * imag` Fp2Element* = object real*: UInt256 imag*: UInt256 + G2Point* = object x*: Fp2Element y*: Fp2Element diff --git a/codex/contracts/provider.nim b/codex/contracts/provider.nim index 62098fb5f..b7fc5602b 100644 --- a/codex/contracts/provider.nim +++ b/codex/contracts/provider.nim @@ -12,8 +12,9 @@ logScope: proc raiseProviderError(message: string) {.raises: [ProviderError].} = raise newException(ProviderError, message) -proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag): - Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = +proc blockNumberAndTimestamp*( + provider: Provider, blockTag: BlockTag +): Future[(UInt256, UInt256)] {.async: (raises: [ProviderError]).} = without latestBlock =? await provider.getBlock(blockTag): raiseProviderError("Could not get latest block") @@ -23,14 +24,10 @@ proc blockNumberAndTimestamp*(provider: Provider, blockTag: BlockTag): return (latestBlockNumber, latestBlock.timestamp) proc binarySearchFindClosestBlock( - provider: Provider, - epochTime: int, - low: UInt256, - high: UInt256): Future[UInt256] {.async: (raises: [ProviderError]).} = - let (_, lowTimestamp) = - await provider.blockNumberAndTimestamp(BlockTag.init(low)) - let (_, highTimestamp) = - await provider.blockNumberAndTimestamp(BlockTag.init(high)) + provider: Provider, epochTime: int, low: UInt256, high: UInt256 +): Future[UInt256] {.async: (raises: [ProviderError]).} = + let (_, lowTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(low)) + let (_, highTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(high)) if abs(lowTimestamp.truncate(int) - epochTime) < abs(highTimestamp.truncate(int) - epochTime): return low @@ -41,8 +38,8 @@ proc binarySearchBlockNumberForEpoch( provider: Provider, epochTime: UInt256, latestBlockNumber: UInt256, - earliestBlockNumber: UInt256): Future[UInt256] - {.async: (raises: [ProviderError]).} = + earliestBlockNumber: UInt256, +): Future[UInt256] {.async: (raises: [ProviderError]).} = var low = earliestBlockNumber var high = latestBlockNumber @@ -52,7 +49,7 @@ proc binarySearchBlockNumberForEpoch( let mid = (low + high) div 2 let (midBlockNumber, midBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.init(mid)) - + if midBlockTimestamp < epochTime: low = mid + 1 elif midBlockTimestamp > epochTime: @@ -63,16 +60,16 @@ proc binarySearchBlockNumberForEpoch( # low is always greater than high - this is why we use high, where # intuitively we would use low: await provider.binarySearchFindClosestBlock( - epochTime.truncate(int), low=high, high=low) + epochTime.truncate(int), low = high, high = low + ) proc blockNumberForEpoch*( - provider: Provider, - epochTime: SecondsSince1970): Future[UInt256] - {.async: (raises: [ProviderError]).} = + provider: Provider, epochTime: SecondsSince1970 +): Future[UInt256] {.async: (raises: [ProviderError]).} = let epochTimeUInt256 = epochTime.u256 - let (latestBlockNumber, latestBlockTimestamp) = + let (latestBlockNumber, latestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.latest) - let (earliestBlockNumber, earliestBlockTimestamp) = + let (earliestBlockNumber, earliestBlockTimestamp) = await provider.blockNumberAndTimestamp(BlockTag.earliest) # Initially we used the average block time to predict @@ -109,18 +106,18 @@ proc blockNumberForEpoch*( return latestBlockNumber if earliestBlockNumber > 0 and earliestBlockTimestamp > epochTimeUInt256: - let availableHistoryInDays = - (latestBlockTimestamp - earliestBlockTimestamp) div - 1.days.secs.u256 - warn "Short block history detected.", earliestBlockTimestamp = - earliestBlockTimestamp, days = availableHistoryInDays + let availableHistoryInDays = + (latestBlockTimestamp - earliestBlockTimestamp) div 1.days.secs.u256 + warn "Short block history detected.", + earliestBlockTimestamp = earliestBlockTimestamp, days = availableHistoryInDays return earliestBlockNumber return await provider.binarySearchBlockNumberForEpoch( - epochTimeUInt256, latestBlockNumber, earliestBlockNumber) + epochTimeUInt256, latestBlockNumber, earliestBlockNumber + ) -proc pastBlockTag*(provider: Provider, - blocksAgo: int): - Future[BlockTag] {.async: (raises: [ProviderError]).} = +proc pastBlockTag*( + provider: Provider, blocksAgo: int +): Future[BlockTag] {.async: (raises: [ProviderError]).} = let head = await provider.getBlockNumber() return BlockTag.init(head - blocksAgo.abs.u256) diff --git a/codex/contracts/requests.nim b/codex/contracts/requests.nim index 704341973..840f785e7 100644 --- a/codex/contracts/requests.nim +++ b/codex/contracts/requests.nim @@ -18,6 +18,7 @@ type content* {.serialize.}: StorageContent expiry* {.serialize.}: UInt256 nonce*: Nonce + StorageAsk* = object slots* {.serialize.}: uint64 slotSize* {.serialize.}: UInt256 @@ -26,12 +27,15 @@ type reward* {.serialize.}: UInt256 collateral* {.serialize.}: UInt256 maxSlotLoss* {.serialize.}: uint64 + StorageContent* = object cid* {.serialize.}: string merkleRoot*: array[32, byte] + Slot* = object request* {.serialize.}: StorageRequest slotIndex* {.serialize.}: UInt256 + SlotId* = distinct array[32, byte] RequestId* = distinct array[32, byte] Nonce* = distinct array[32, byte] @@ -41,6 +45,7 @@ type Cancelled Finished Failed + SlotState* {.pure.} = enum Free Filled @@ -80,27 +85,26 @@ proc toHex*[T: distinct](id: T): string = type baseType = T.distinctBase baseType(id).toHex -logutils.formatIt(LogFormat.textLines, Nonce): it.short0xHexLog -logutils.formatIt(LogFormat.textLines, RequestId): it.short0xHexLog -logutils.formatIt(LogFormat.textLines, SlotId): it.short0xHexLog -logutils.formatIt(LogFormat.json, Nonce): it.to0xHexLog -logutils.formatIt(LogFormat.json, RequestId): it.to0xHexLog -logutils.formatIt(LogFormat.json, SlotId): it.to0xHexLog +logutils.formatIt(LogFormat.textLines, Nonce): + it.short0xHexLog +logutils.formatIt(LogFormat.textLines, RequestId): + it.short0xHexLog +logutils.formatIt(LogFormat.textLines, SlotId): + it.short0xHexLog +logutils.formatIt(LogFormat.json, Nonce): + it.to0xHexLog +logutils.formatIt(LogFormat.json, RequestId): + it.to0xHexLog +logutils.formatIt(LogFormat.json, SlotId): + it.to0xHexLog func fromTuple(_: type StorageRequest, tupl: tuple): StorageRequest = StorageRequest( - client: tupl[0], - ask: tupl[1], - content: tupl[2], - expiry: tupl[3], - nonce: tupl[4] + client: tupl[0], ask: tupl[1], content: tupl[2], expiry: tupl[3], nonce: tupl[4] ) func fromTuple(_: type Slot, tupl: tuple): Slot = - Slot( - request: tupl[0], - slotIndex: tupl[1] - ) + Slot(request: tupl[0], slotIndex: tupl[1]) func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = StorageAsk( @@ -110,14 +114,11 @@ func fromTuple(_: type StorageAsk, tupl: tuple): StorageAsk = proofProbability: tupl[3], reward: tupl[4], collateral: tupl[5], - maxSlotLoss: tupl[6] + maxSlotLoss: tupl[6], ) func fromTuple(_: type StorageContent, tupl: tuple): StorageContent = - StorageContent( - cid: tupl[0], - merkleRoot: tupl[1] - ) + StorageContent(cid: tupl[0], merkleRoot: tupl[1]) func solidityType*(_: type StorageContent): string = solidityType(StorageContent.fieldTypes) @@ -160,7 +161,7 @@ func decode*(decoder: var AbiDecoder, T: type Slot): ?!T = success Slot.fromTuple(tupl) func id*(request: StorageRequest): RequestId = - let encoding = AbiEncoder.encode((request, )) + let encoding = AbiEncoder.encode((request,)) RequestId(keccak256.digest(encoding).data) func slotId*(requestId: RequestId, slotIndex: UInt256): SlotId = diff --git a/codex/discovery.nim b/codex/discovery.nim index e3e37d611..9aa8c7d85 100644 --- a/codex/discovery.nim +++ b/codex/discovery.nim @@ -32,15 +32,15 @@ export discv5 logScope: topics = "codex discovery" -type - Discovery* = ref object of RootObj - protocol*: discv5.Protocol # dht protocol - key: PrivateKey # private key - peerId: PeerId # the peer id of the local node - announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records - providerRecord*: ?SignedPeerRecord # record to advertice node connection information, this carry any - # address that the node can be connected on - dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information +type Discovery* = ref object of RootObj + protocol*: discv5.Protocol # dht protocol + key: PrivateKey # private key + peerId: PeerId # the peer id of the local node + announceAddrs*: seq[MultiAddress] # addresses announced as part of the provider records + providerRecord*: ?SignedPeerRecord + # record to advertice node connection information, this carry any + # address that the node can be connected on + dhtRecord*: ?SignedPeerRecord # record to advertice DHT connection information proc toNodeId*(cid: Cid): NodeId = ## Cid to discovery id @@ -54,14 +54,11 @@ proc toNodeId*(host: ca.Address): NodeId = readUintBE[256](keccak256.digest(host.toArray).data) -proc findPeer*( - d: Discovery, - peerId: PeerId): Future[?PeerRecord] {.async.} = +proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = trace "protocol.resolve..." ## Find peer using the given Discovery object ## - let - node = await d.protocol.resolve(toNodeId(peerId)) + let node = await d.protocol.resolve(toNodeId(peerId)) return if node.isSome(): @@ -69,37 +66,31 @@ proc findPeer*( else: PeerRecord.none -method find*( - d: Discovery, - cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = +method find*(d: Discovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, base.} = ## Find block providers ## - without providers =? - (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: + without providers =? (await d.protocol.getProviders(cid.toNodeId())).mapFailure, error: warn "Error finding providers for block", cid, error = error.msg - return providers.filterIt( not (it.data.peerId == d.peerId) ) + return providers.filterIt(not (it.data.peerId == d.peerId)) method provide*(d: Discovery, cid: Cid) {.async, base.} = ## Provide a block Cid ## - let - nodes = await d.protocol.addProvider( - cid.toNodeId(), d.providerRecord.get) + let nodes = await d.protocol.addProvider(cid.toNodeId(), d.providerRecord.get) if nodes.len <= 0: warn "Couldn't provide to any nodes!" - method find*( - d: Discovery, - host: ca.Address): Future[seq[SignedPeerRecord]] {.async, base.} = + d: Discovery, host: ca.Address +): Future[seq[SignedPeerRecord]] {.async, base.} = ## Find host providers ## trace "Finding providers for host", host = $host - without var providers =? - (await d.protocol.getProviders(host.toNodeId())).mapFailure, error: + without var providers =? (await d.protocol.getProviders(host.toNodeId())).mapFailure, + error: trace "Error finding providers for host", host = $host, exc = error.msg return @@ -117,15 +108,11 @@ method provide*(d: Discovery, host: ca.Address) {.async, base.} = ## trace "Providing host", host = $host - let - nodes = await d.protocol.addProvider( - host.toNodeId(), d.providerRecord.get) + let nodes = await d.protocol.addProvider(host.toNodeId(), d.providerRecord.get) if nodes.len > 0: trace "Provided to nodes", nodes = nodes.len -method removeProvider*( - d: Discovery, - peerId: PeerId): Future[void] {.base, gcsafe.} = +method removeProvider*(d: Discovery, peerId: PeerId): Future[void] {.base, gcsafe.} = ## Remove provider from providers table ## @@ -139,26 +126,24 @@ proc updateAnnounceRecord*(d: Discovery, addrs: openArray[MultiAddress]) = d.announceAddrs = @addrs trace "Updating announce record", addrs = d.announceAddrs - d.providerRecord = SignedPeerRecord.init( - d.key, PeerRecord.init(d.peerId, d.announceAddrs)) - .expect("Should construct signed record").some + d.providerRecord = SignedPeerRecord + .init(d.key, PeerRecord.init(d.peerId, d.announceAddrs)) + .expect("Should construct signed record").some if not d.protocol.isNil: - d.protocol.updateRecord(d.providerRecord) - .expect("Should update SPR") + d.protocol.updateRecord(d.providerRecord).expect("Should update SPR") proc updateDhtRecord*(d: Discovery, addrs: openArray[MultiAddress]) = ## Update providers record ## trace "Updating Dht record", addrs = addrs - d.dhtRecord = SignedPeerRecord.init( - d.key, PeerRecord.init(d.peerId, @addrs)) - .expect("Should construct signed record").some + d.dhtRecord = SignedPeerRecord + .init(d.key, PeerRecord.init(d.peerId, @addrs)) + .expect("Should construct signed record").some if not d.protocol.isNil: - d.protocol.updateRecord(d.dhtRecord) - .expect("Should update SPR") + d.protocol.updateRecord(d.dhtRecord).expect("Should update SPR") proc start*(d: Discovery) {.async.} = d.protocol.open() @@ -174,15 +159,13 @@ proc new*( bindPort = 0.Port, announceAddrs: openArray[MultiAddress], bootstrapNodes: openArray[SignedPeerRecord] = [], - store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!") + store: Datastore = SQLiteDatastore.new(Memory).expect("Should not fail!"), ): Discovery = ## Create a new Discovery node instance for the given key and datastore ## - var - self = Discovery( - key: key, - peerId: PeerId.init(key).expect("Should construct PeerId")) + var self = + Discovery(key: key, peerId: PeerId.init(key).expect("Should construct PeerId")) self.updateAnnounceRecord(announceAddrs) @@ -190,11 +173,8 @@ proc new*( # FIXME disable IP limits temporarily so we can run our workshop. Re-enable # and figure out proper solution. let discoveryConfig = DiscoveryConfig( - tableIpLimits: TableIpLimits( - tableIpLimit: high(uint), - bucketIpLimit:high(uint) - ), - bitsPerHop: DefaultBitsPerHop + tableIpLimits: TableIpLimits(tableIpLimit: high(uint), bucketIpLimit: high(uint)), + bitsPerHop: DefaultBitsPerHop, ) # -------------------------------------------------------------------------- @@ -206,6 +186,7 @@ proc new*( bootstrapRecords = bootstrapNodes, rng = Rng.instance(), providers = ProvidersManager.new(store), - config = discoveryConfig) + config = discoveryConfig, + ) self diff --git a/codex/erasure/asyncbackend.nim b/codex/erasure/asyncbackend.nim index 4827806ad..1d069ead5 100644 --- a/codex/erasure/asyncbackend.nim +++ b/codex/erasure/asyncbackend.nim @@ -23,7 +23,8 @@ logScope: topics = "codex asyncerasure" const - CompletitionTimeout = 1.seconds # Maximum await time for completition after receiving a signal + CompletitionTimeout = 1.seconds + # Maximum await time for completition after receiving a signal CompletitionRetryDelay = 10.millis type @@ -62,12 +63,9 @@ proc encodeTask(args: EncodeTaskArgs, data: seq[seq[byte]]): EncodeTaskResult = let resDataSize = parity.len * args.blockSize resData = cast[ptr UncheckedArray[byte]](allocShared0(resDataSize)) - arrHolder = SharedArrayHolder[byte]( - data: resData, - size: resDataSize - ) + arrHolder = SharedArrayHolder[byte](data: resData, size: resDataSize) - for i in 0.. - self.store.getBlock( - BlockAddress.init(manifest.treeCid, i) - ).map((r: ?!bt.Block) => (r, i)) # Get the data blocks (first K) + pendingBlocks = indicies.map( + (i: int) => + self.store.getBlock(BlockAddress.init(manifest.treeCid, i)).map( + (r: ?!bt.Block) => (r, i) + ) # Get the data blocks (first K) ) - proc isFinished(): bool = pendingBlocks.len == 0 + proc isFinished(): bool = + pendingBlocks.len == 0 proc genNext(): Future[(?!bt.Block, int)] {.async.} = let completedFut = await one(pendingBlocks) @@ -123,29 +124,31 @@ proc getPendingBlocks( let (_, index) = await completedFut raise newException( CatchableError, - "Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & $index) + "Future for block id not found, tree cid: " & $manifest.treeCid & ", index: " & + $index, + ) AsyncIter[(?!bt.Block, int)].new(genNext, isFinished) proc prepareEncodingData( - self: Erasure, - manifest: Manifest, - params: EncodingParams, - step: Natural, - data: ref seq[seq[byte]], - cids: ref seq[Cid], - emptyBlock: seq[byte]): Future[?!Natural] {.async.} = + self: Erasure, + manifest: Manifest, + params: EncodingParams, + step: Natural, + data: ref seq[seq[byte]], + cids: ref seq[Cid], + emptyBlock: seq[byte], +): Future[?!Natural] {.async.} = ## Prepare data for encoding ## let strategy = params.strategy.init( - firstIndex = 0, - lastIndex = params.rounded - 1, - iterations = params.steps + firstIndex = 0, lastIndex = params.rounded - 1, iterations = params.steps ) indicies = toSeq(strategy.getIndicies(step)) - pendingBlocksIter = self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount)) + pendingBlocksIter = + self.getPendingBlocks(manifest, indicies.filterIt(it < manifest.blocksCount)) var resolved = 0 for fut in pendingBlocksIter: @@ -164,20 +167,22 @@ proc prepareEncodingData( let pos = indexToPos(params.steps, idx, step) trace "Padding with empty block", idx shallowCopy(data[pos], emptyBlock) - without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), err: + without emptyBlockCid =? emptyCid(manifest.version, manifest.hcodec, manifest.codec), + err: return failure(err) cids[idx] = emptyBlockCid success(resolved.Natural) proc prepareDecodingData( - self: Erasure, - encoded: Manifest, - step: Natural, - data: ref seq[seq[byte]], - parityData: ref seq[seq[byte]], - cids: ref seq[Cid], - emptyBlock: seq[byte]): Future[?!(Natural, Natural)] {.async.} = + self: Erasure, + encoded: Manifest, + step: Natural, + data: ref seq[seq[byte]], + parityData: ref seq[seq[byte]], + cids: ref seq[Cid], + emptyBlock: seq[byte], +): Future[?!(Natural, Natural)] {.async.} = ## Prepare data for decoding ## `encoded` - the encoded manifest ## `step` - the current step @@ -189,9 +194,7 @@ proc prepareDecodingData( let strategy = encoded.protectedStrategy.init( - firstIndex = 0, - lastIndex = encoded.blocksCount - 1, - iterations = encoded.steps + firstIndex = 0, lastIndex = encoded.blocksCount - 1, iterations = encoded.steps ) indicies = toSeq(strategy.getIndicies(step)) pendingBlocksIter = self.getPendingBlocks(encoded, indicies) @@ -211,20 +214,21 @@ proc prepareDecodingData( trace "Failed retreiving a block", idx, treeCid = encoded.treeCid, msg = err.msg continue - let - pos = indexToPos(encoded.steps, idx, step) + let pos = indexToPos(encoded.steps, idx, step) logScope: - cid = blk.cid - idx = idx - pos = pos - step = step + cid = blk.cid + idx = idx + pos = pos + step = step empty = blk.isEmpty cids[idx] = blk.cid if idx >= encoded.rounded: trace "Retrieved parity block" - shallowCopy(parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data) + shallowCopy( + parityData[pos - encoded.ecK], if blk.isEmpty: emptyBlock else: blk.data + ) parityPieces.inc else: trace "Retrieved data block" @@ -236,17 +240,19 @@ proc prepareDecodingData( return success (dataPieces.Natural, parityPieces.Natural) proc init*( - _: type EncodingParams, - manifest: Manifest, - ecK: Natural, ecM: Natural, - strategy: StrategyType): ?!EncodingParams = + _: type EncodingParams, + manifest: Manifest, + ecK: Natural, + ecM: Natural, + strategy: StrategyType, +): ?!EncodingParams = if ecK > manifest.blocksCount: let exc = (ref InsufficientBlocksError)( - msg: "Unable to encode manifest, not enough blocks, ecK = " & - $ecK & - ", blocksCount = " & - $manifest.blocksCount, - minSize: ecK.NBytes * manifest.blockSize) + msg: + "Unable to encode manifest, not enough blocks, ecK = " & $ecK & + ", blocksCount = " & $manifest.blocksCount, + minSize: ecK.NBytes * manifest.blockSize, + ) return failure(exc) let @@ -260,25 +266,23 @@ proc init*( rounded: rounded, steps: steps, blocksCount: blocksCount, - strategy: strategy + strategy: strategy, ) proc encodeData( - self: Erasure, - manifest: Manifest, - params: EncodingParams - ): Future[?!Manifest] {.async.} = + self: Erasure, manifest: Manifest, params: EncodingParams +): Future[?!Manifest] {.async.} = ## Encode blocks pointed to by the protected manifest ## ## `manifest` - the manifest to encode ## logScope: - steps = params.steps - rounded_blocks = params.rounded - blocks_count = params.blocksCount - ecK = params.ecK - ecM = params.ecM + steps = params.steps + rounded_blocks = params.rounded + blocks_count = params.blocksCount + ecK = params.ecK + ecM = params.ecM var cids = seq[Cid].new() @@ -288,11 +292,12 @@ proc encodeData( cids[].setLen(params.blocksCount) try: - for step in 0.. i < tree.leavesCount) + let idxIter = + Iter[Natural].new(recoveredIndices).filter((i: Natural) => i < tree.leavesCount) if err =? (await self.store.putSomeProofs(tree, idxIter)).errorOption: - return failure(err) + return failure(err) let decoded = Manifest.new(encoded) @@ -479,14 +486,14 @@ proc stop*(self: Erasure) {.async.} = return proc new*( - T: type Erasure, - store: BlockStore, - encoderProvider: EncoderProvider, - decoderProvider: DecoderProvider): Erasure = + T: type Erasure, + store: BlockStore, + encoderProvider: EncoderProvider, + decoderProvider: DecoderProvider, +): Erasure = ## Create a new Erasure instance for encoding and decoding manifests ## Erasure( - store: store, - encoderProvider: encoderProvider, - decoderProvider: decoderProvider) + store: store, encoderProvider: encoderProvider, decoderProvider: decoderProvider + ) diff --git a/codex/errors.nim b/codex/errors.nim index d98bfc041..f7c2fa6bc 100644 --- a/codex/errors.nim +++ b/codex/errors.nim @@ -20,13 +20,15 @@ type CodexResult*[T] = Result[T, ref CodexError] template mapFailure*[T, V, E]( - exp: Result[T, V], - exc: typedesc[E], + exp: Result[T, V], exc: typedesc[E] ): Result[T, ref CatchableError] = ## Convert `Result[T, E]` to `Result[E, ref CatchableError]` ## - exp.mapErr(proc (e: V): ref CatchableError = (ref exc)(msg: $e)) + exp.mapErr( + proc(e: V): ref CatchableError = + (ref exc)(msg: $e) + ) template mapFailure*[T, V](exp: Result[T, V]): Result[T, ref CatchableError] = mapFailure(exp, CodexError) diff --git a/codex/indexingstrategy.nim b/codex/indexingstrategy.nim index d8eeba58f..063ecd98b 100644 --- a/codex/indexingstrategy.nim +++ b/codex/indexingstrategy.nim @@ -10,7 +10,7 @@ type # 0 => 0, 1, 2 # 1 => 3, 4, 5 # 2 => 6, 7, 8 - LinearStrategy, + LinearStrategy # Stepped indexing: # 0 => 0, 3, 6 @@ -21,31 +21,32 @@ type # Representing a strategy for grouping indices (of blocks usually) # Given an interation-count as input, will produce a seq of # selected indices. - IndexingError* = object of CodexError IndexingWrongIndexError* = object of IndexingError IndexingWrongIterationsError* = object of IndexingError IndexingStrategy* = object strategyType*: StrategyType - firstIndex*: int # Lowest index that can be returned - lastIndex*: int # Highest index that can be returned - iterations*: int # getIndices(iteration) will run from 0 ..< iterations + firstIndex*: int # Lowest index that can be returned + lastIndex*: int # Highest index that can be returned + iterations*: int # getIndices(iteration) will run from 0 ..< iterations step*: int -func checkIteration(self: IndexingStrategy, iteration: int): void {.raises: [IndexingError].} = +func checkIteration( + self: IndexingStrategy, iteration: int +): void {.raises: [IndexingError].} = if iteration >= self.iterations: raise newException( - IndexingError, - "Indexing iteration can't be greater than or equal to iterations.") + IndexingError, "Indexing iteration can't be greater than or equal to iterations." + ) func getIter(first, last, step: int): Iter[int] = {.cast(noSideEffect).}: Iter[int].new(first, last, step) func getLinearIndicies( - self: IndexingStrategy, - iteration: int): Iter[int] {.raises: [IndexingError].} = + self: IndexingStrategy, iteration: int +): Iter[int] {.raises: [IndexingError].} = self.checkIteration(iteration) let @@ -55,8 +56,8 @@ func getLinearIndicies( getIter(first, last, 1) func getSteppedIndicies( - self: IndexingStrategy, - iteration: int): Iter[int] {.raises: [IndexingError].} = + self: IndexingStrategy, iteration: int +): Iter[int] {.raises: [IndexingError].} = self.checkIteration(iteration) let @@ -66,9 +67,8 @@ func getSteppedIndicies( getIter(first, last, self.iterations) func getIndicies*( - self: IndexingStrategy, - iteration: int): Iter[int] {.raises: [IndexingError].} = - + self: IndexingStrategy, iteration: int +): Iter[int] {.raises: [IndexingError].} = case self.strategyType of StrategyType.LinearStrategy: self.getLinearIndicies(iteration) @@ -76,22 +76,25 @@ func getIndicies*( self.getSteppedIndicies(iteration) func init*( - strategy: StrategyType, - firstIndex, lastIndex, iterations: int): IndexingStrategy {.raises: [IndexingError].} = - + strategy: StrategyType, firstIndex, lastIndex, iterations: int +): IndexingStrategy {.raises: [IndexingError].} = if firstIndex > lastIndex: raise newException( IndexingWrongIndexError, - "firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & ")") + "firstIndex (" & $firstIndex & ") can't be greater than lastIndex (" & $lastIndex & + ")", + ) if iterations <= 0: raise newException( IndexingWrongIterationsError, - "iterations (" & $iterations & ") must be greater than zero.") + "iterations (" & $iterations & ") must be greater than zero.", + ) IndexingStrategy( strategyType: strategy, firstIndex: firstIndex, lastIndex: lastIndex, iterations: iterations, - step: divUp((lastIndex - firstIndex + 1), iterations)) + step: divUp((lastIndex - firstIndex + 1), iterations), + ) diff --git a/codex/logutils.nim b/codex/logutils.nim index e24b52d24..b37f69526 100644 --- a/codex/logutils.nim +++ b/codex/logutils.nim @@ -123,8 +123,9 @@ func shortLog*(long: string, ellipses = "*", start = 3, stop = 6): string = short func shortHexLog*(long: string): string = - if long[0..1] == "0x": result &= "0x" - result &= long[2..long.high].shortLog("..", 4, 4) + if long[0 .. 1] == "0x": + result &= "0x" + result &= long[2 .. long.high].shortLog("..", 4, 4) func short0xHexLog*[N: static[int], T: array[N, byte]](v: T): string = v.to0xHex.shortHexLog @@ -182,12 +183,16 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) = let v = opts.map(opt => opt.formatJsonOption) setProperty(r, key, json.`%`(v)) - proc setProperty*(r: var JsonRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var JsonRecord, key: string, val: seq[T] + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T let v = val.map(it => body) setProperty(r, key, json.`%`(v)) - proc setProperty*(r: var JsonRecord, key: string, val: T) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var JsonRecord, key: string, val: T + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T = val let v = body setProperty(r, key, json.`%`(v)) @@ -218,23 +223,35 @@ template formatIt*(format: LogFormat, T: typedesc, body: untyped) = let v = opts.map(opt => opt.formatTextLineOption) setProperty(r, key, v.formatTextLineSeq) - proc setProperty*(r: var TextLineRecord, key: string, val: seq[T]) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var TextLineRecord, key: string, val: seq[T] + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T let v = val.map(it => body) setProperty(r, key, v.formatTextLineSeq) - proc setProperty*(r: var TextLineRecord, key: string, val: T) {.raises:[ValueError, IOError].} = + proc setProperty*( + r: var TextLineRecord, key: string, val: T + ) {.raises: [ValueError, IOError].} = var it {.inject, used.}: T = val let v = body setProperty(r, key, v) template formatIt*(T: type, body: untyped) {.dirty.} = - formatIt(LogFormat.textLines, T): body - formatIt(LogFormat.json, T): body - -formatIt(LogFormat.textLines, Cid): shortLog($it) -formatIt(LogFormat.json, Cid): $it -formatIt(UInt256): $it -formatIt(MultiAddress): $it -formatIt(LogFormat.textLines, array[32, byte]): it.short0xHexLog -formatIt(LogFormat.json, array[32, byte]): it.to0xHex + formatIt(LogFormat.textLines, T): + body + formatIt(LogFormat.json, T): + body + +formatIt(LogFormat.textLines, Cid): + shortLog($it) +formatIt(LogFormat.json, Cid): + $it +formatIt(UInt256): + $it +formatIt(MultiAddress): + $it +formatIt(LogFormat.textLines, array[32, byte]): + it.short0xHexLog +formatIt(LogFormat.json, array[32, byte]): + it.to0xHex diff --git a/codex/manifest/coders.nim b/codex/manifest/coders.nim index 4eed42990..0c461e458 100644 --- a/codex/manifest/coders.nim +++ b/codex/manifest/coders.nim @@ -10,9 +10,10 @@ # This module implements serialization and deserialization of Manifest import pkg/upraises -import times +import times -push: {.upraises: [].} +push: + {.upraises: [].} import std/tables import std/sequtils @@ -33,7 +34,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] = ## multicodec container (Dag-pb) for now ## - ? manifest.verify() + ?manifest.verify() var pbNode = initProtoBuffer() # NOTE: The `Data` field in the the `dag-pb` @@ -98,7 +99,7 @@ proc encode*(manifest: Manifest): ?!seq[byte] = if manifest.filename.isSome: header.write(8, manifest.filename.get()) - if manifest.mimetype.isSome: + if manifest.mimetype.isSome: header.write(9, manifest.mimetype.get()) if manifest.uploadedAt.isSome: @@ -206,15 +207,14 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = if pbVerificationInfo.getField(4, verifiableStrategy).isErr: return failure("Unable to decode `verifiableStrategy` from manifest!") - let - treeCid = ? Cid.init(treeCidBuf).mapFailure + let treeCid = ?Cid.init(treeCidBuf).mapFailure var filenameOption = if filename.len == 0: string.none else: filename.some var mimetypeOption = if mimetype.len == 0: string.none else: mimetype.some var uploadedAtOption = if uploadedAt == 0: int64.none else: uploadedAt.int64.some - let - self = if protected: + let self = + if protected: Manifest.new( treeCid = treeCid, datasetSize = datasetSize.NBytes, @@ -224,37 +224,39 @@ proc decode*(_: type Manifest, data: openArray[byte]): ?!Manifest = codec = codec.MultiCodec, ecK = ecK.int, ecM = ecM.int, - originalTreeCid = ? Cid.init(originalTreeCid).mapFailure, + originalTreeCid = ?Cid.init(originalTreeCid).mapFailure, originalDatasetSize = originalDatasetSize.NBytes, strategy = StrategyType(protectedStrategy), filename = filenameOption, mimetype = mimetypeOption, - uploadedAt = uploadedAtOption) - else: - Manifest.new( - treeCid = treeCid, - datasetSize = datasetSize.NBytes, - blockSize = blockSize.NBytes, - version = CidVersion(version), - hcodec = hcodec.MultiCodec, - codec = codec.MultiCodec, - filename = filenameOption, - mimetype = mimetypeOption, - uploadedAt = uploadedAtOption) - - ? self.verify() + uploadedAt = uploadedAtOption, + ) + else: + Manifest.new( + treeCid = treeCid, + datasetSize = datasetSize.NBytes, + blockSize = blockSize.NBytes, + version = CidVersion(version), + hcodec = hcodec.MultiCodec, + codec = codec.MultiCodec, + filename = filenameOption, + mimetype = mimetypeOption, + uploadedAt = uploadedAtOption, + ) + + ?self.verify() if verifiable: let - verifyRootCid = ? Cid.init(verifyRoot).mapFailure - slotRootCids = slotRoots.mapIt(? Cid.init(it).mapFailure) + verifyRootCid = ?Cid.init(verifyRoot).mapFailure + slotRootCids = slotRoots.mapIt(?Cid.init(it).mapFailure) return Manifest.new( manifest = self, verifyRoot = verifyRootCid, slotRoots = slotRootCids, cellSize = cellSize.NBytes, - strategy = StrategyType(verifiableStrategy) + strategy = StrategyType(verifiableStrategy), ) self.success @@ -263,7 +265,7 @@ func decode*(_: type Manifest, blk: Block): ?!Manifest = ## Decode a manifest using `decoder` ## - if not ? blk.cid.isManifest: + if not ?blk.cid.isManifest: return failure "Cid not a manifest codec" Manifest.decode(blk.data) diff --git a/codex/manifest/manifest.nim b/codex/manifest/manifest.nim index 73644dd23..6e0d1b80f 100644 --- a/codex/manifest/manifest.nim +++ b/codex/manifest/manifest.nim @@ -11,7 +11,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/libp2p/protobuf/minprotobuf import pkg/libp2p/[cid, multihash, multicodec] @@ -25,37 +26,37 @@ import ../blocktype import ../indexingstrategy import ../logutils - # TODO: Manifest should be reworked to more concrete types, # perhaps using inheritance -type - Manifest* = ref object of RootObj - treeCid {.serialize.}: Cid # Root of the merkle tree - datasetSize {.serialize.}: NBytes # Total size of all blocks - blockSize {.serialize.}: NBytes # Size of each contained block (might not be needed if blocks are len-prefixed) - codec: MultiCodec # Dataset codec - hcodec: MultiCodec # Multihash codec - version: CidVersion # Cid version - filename {.serialize.}: ?string # The filename of the content uploaded (optional) - mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) - uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds - case protected {.serialize.}: bool # Protected datasets have erasure coded info +type Manifest* = ref object of RootObj + treeCid {.serialize.}: Cid # Root of the merkle tree + datasetSize {.serialize.}: NBytes # Total size of all blocks + blockSize {.serialize.}: NBytes + # Size of each contained block (might not be needed if blocks are len-prefixed) + codec: MultiCodec # Dataset codec + hcodec: MultiCodec # Multihash codec + version: CidVersion # Cid version + filename {.serialize.}: ?string # The filename of the content uploaded (optional) + mimetype {.serialize.}: ?string # The mimetype of the content uploaded (optional) + uploadedAt {.serialize.}: ?int64 # The UTC creation timestamp in seconds + case protected {.serialize.}: bool # Protected datasets have erasure coded info + of true: + ecK: int # Number of blocks to encode + ecM: int # Number of resulting parity blocks + originalTreeCid: Cid # The original root of the dataset being erasure coded + originalDatasetSize: NBytes + protectedStrategy: StrategyType # Indexing strategy used to build the slot roots + case verifiable {.serialize.}: bool + # Verifiable datasets can be used to generate storage proofs of true: - ecK: int # Number of blocks to encode - ecM: int # Number of resulting parity blocks - originalTreeCid: Cid # The original root of the dataset being erasure coded - originalDatasetSize: NBytes - protectedStrategy: StrategyType # Indexing strategy used to build the slot roots - case verifiable {.serialize.}: bool # Verifiable datasets can be used to generate storage proofs - of true: - verifyRoot: Cid # Root of the top level merkle tree built from slot roots - slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks - cellSize: NBytes # Size of each slot cell - verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots - else: - discard + verifyRoot: Cid # Root of the top level merkle tree built from slot roots + slotRoots: seq[Cid] # Individual slot root built from the original dataset blocks + cellSize: NBytes # Size of each slot cell + verifiableStrategy: StrategyType # Indexing strategy used to build the slot roots else: discard + else: + discard ############################################################ # Accessors @@ -137,7 +138,7 @@ func uploadedAt*(self: Manifest): ?int64 = ############################################################ func isManifest*(cid: Cid): ?!bool = - success (ManifestCodec == ? cid.contentType().mapFailure(CodexError)) + success (ManifestCodec == ?cid.contentType().mapFailure(CodexError)) func isManifest*(mc: MultiCodec): ?!bool = success mc == ManifestCodec @@ -159,7 +160,8 @@ func verify*(self: Manifest): ?!void = ## if self.protected and (self.blocksCount != self.steps * (self.ecK + self.ecM)): - return failure newException(CodexError, "Broken manifest: wrong originalBlocksCount") + return + failure newException(CodexError, "Broken manifest: wrong originalBlocksCount") return success() @@ -167,41 +169,32 @@ func cid*(self: Manifest): ?!Cid {.deprecated: "use treeCid instead".} = self.treeCid.success func `==`*(a, b: Manifest): bool = - (a.treeCid == b.treeCid) and - (a.datasetSize == b.datasetSize) and - (a.blockSize == b.blockSize) and - (a.version == b.version) and - (a.hcodec == b.hcodec) and - (a.codec == b.codec) and - (a.protected == b.protected) and - (a.filename == b.filename) and - (a.mimetype == b.mimetype) and - (a.uploadedAt == b.uploadedAt) and - (if a.protected: - (a.ecK == b.ecK) and - (a.ecM == b.ecM) and - (a.originalTreeCid == b.originalTreeCid) and - (a.originalDatasetSize == b.originalDatasetSize) and - (a.protectedStrategy == b.protectedStrategy) and - (a.verifiable == b.verifiable) and - (if a.verifiable: - (a.verifyRoot == b.verifyRoot) and - (a.slotRoots == b.slotRoots) and - (a.cellSize == b.cellSize) and - (a.verifiableStrategy == b.verifiableStrategy) + (a.treeCid == b.treeCid) and (a.datasetSize == b.datasetSize) and + (a.blockSize == b.blockSize) and (a.version == b.version) and (a.hcodec == b.hcodec) and + (a.codec == b.codec) and (a.protected == b.protected) and (a.filename == b.filename) and + (a.mimetype == b.mimetype) and (a.uploadedAt == b.uploadedAt) and ( + if a.protected: + (a.ecK == b.ecK) and (a.ecM == b.ecM) and (a.originalTreeCid == b.originalTreeCid) and + (a.originalDatasetSize == b.originalDatasetSize) and + (a.protectedStrategy == b.protectedStrategy) and (a.verifiable == b.verifiable) and + ( + if a.verifiable: + (a.verifyRoot == b.verifyRoot) and (a.slotRoots == b.slotRoots) and + (a.cellSize == b.cellSize) and ( + a.verifiableStrategy == b.verifiableStrategy + ) else: - true) + true + ) else: - true) + true + ) func `$`*(self: Manifest): string = - result = "treeCid: " & $self.treeCid & - ", datasetSize: " & $self.datasetSize & - ", blockSize: " & $self.blockSize & - ", version: " & $self.version & - ", hcodec: " & $self.hcodec & - ", codec: " & $self.codec & - ", protected: " & $self.protected + result = + "treeCid: " & $self.treeCid & ", datasetSize: " & $self.datasetSize & ", blockSize: " & + $self.blockSize & ", version: " & $self.version & ", hcodec: " & $self.hcodec & + ", codec: " & $self.codec & ", protected: " & $self.protected if self.filename.isSome: result &= ", filename: " & $self.filename @@ -212,19 +205,19 @@ func `$`*(self: Manifest): string = if self.uploadedAt.isSome: result &= ", uploadedAt: " & $self.uploadedAt - result &= (if self.protected: - ", ecK: " & $self.ecK & - ", ecM: " & $self.ecM & - ", originalTreeCid: " & $self.originalTreeCid & - ", originalDatasetSize: " & $self.originalDatasetSize & - ", verifiable: " & $self.verifiable & - (if self.verifiable: - ", verifyRoot: " & $self.verifyRoot & - ", slotRoots: " & $self.slotRoots + result &= ( + if self.protected: + ", ecK: " & $self.ecK & ", ecM: " & $self.ecM & ", originalTreeCid: " & + $self.originalTreeCid & ", originalDatasetSize: " & $self.originalDatasetSize & + ", verifiable: " & $self.verifiable & ( + if self.verifiable: + ", verifyRoot: " & $self.verifyRoot & ", slotRoots: " & $self.slotRoots + else: + "" + ) else: - "") - else: - "") + "" + ) return result @@ -233,18 +226,18 @@ func `$`*(self: Manifest): string = ############################################################ func new*( - T: type Manifest, - treeCid: Cid, - blockSize: NBytes, - datasetSize: NBytes, - version: CidVersion = CIDv1, - hcodec = Sha256HashCodec, - codec = BlockCodec, - protected = false, - filename: ?string = string.none, - mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none): Manifest = - + T: type Manifest, + treeCid: Cid, + blockSize: NBytes, + datasetSize: NBytes, + version: CidVersion = CIDv1, + hcodec = Sha256HashCodec, + codec = BlockCodec, + protected = false, + filename: ?string = string.none, + mimetype: ?string = string.none, + uploadedAt: ?int64 = int64.none, +): Manifest = T( treeCid: treeCid, blockSize: blockSize, @@ -255,15 +248,17 @@ func new*( protected: protected, filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt) + uploadedAt: uploadedAt, + ) func new*( - T: type Manifest, - manifest: Manifest, - treeCid: Cid, - datasetSize: NBytes, - ecK, ecM: int, - strategy = SteppedStrategy): Manifest = + T: type Manifest, + manifest: Manifest, + treeCid: Cid, + datasetSize: NBytes, + ecK, ecM: int, + strategy = SteppedStrategy, +): Manifest = ## Create an erasure protected dataset from an ## unprotected one ## @@ -276,18 +271,17 @@ func new*( hcodec: manifest.hcodec, blockSize: manifest.blockSize, protected: true, - ecK: ecK, ecM: ecM, + ecK: ecK, + ecM: ecM, originalTreeCid: manifest.treeCid, originalDatasetSize: manifest.datasetSize, protectedStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt - ) + uploadedAt: manifest.uploadedAt, + ) -func new*( - T: type Manifest, - manifest: Manifest): Manifest = +func new*(T: type Manifest, manifest: Manifest): Manifest = ## Create an unprotected dataset from an ## erasure protected one ## @@ -302,25 +296,26 @@ func new*( protected: false, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt) + uploadedAt: manifest.uploadedAt, + ) func new*( - T: type Manifest, - treeCid: Cid, - datasetSize: NBytes, - blockSize: NBytes, - version: CidVersion, - hcodec: MultiCodec, - codec: MultiCodec, - ecK: int, - ecM: int, - originalTreeCid: Cid, - originalDatasetSize: NBytes, - strategy = SteppedStrategy, - filename: ?string = string.none, - mimetype: ?string = string.none, - uploadedAt: ?int64 = int64.none): Manifest = - + T: type Manifest, + treeCid: Cid, + datasetSize: NBytes, + blockSize: NBytes, + version: CidVersion, + hcodec: MultiCodec, + codec: MultiCodec, + ecK: int, + ecM: int, + originalTreeCid: Cid, + originalDatasetSize: NBytes, + strategy = SteppedStrategy, + filename: ?string = string.none, + mimetype: ?string = string.none, + uploadedAt: ?int64 = int64.none, +): Manifest = Manifest( treeCid: treeCid, datasetSize: datasetSize, @@ -334,28 +329,30 @@ func new*( originalTreeCid: originalTreeCid, originalDatasetSize: originalDatasetSize, protectedStrategy: strategy, - filename: filename, + filename: filename, mimetype: mimetype, - uploadedAt: uploadedAt) + uploadedAt: uploadedAt, + ) func new*( - T: type Manifest, - manifest: Manifest, - verifyRoot: Cid, - slotRoots: openArray[Cid], - cellSize = DefaultCellSize, - strategy = LinearStrategy): ?!Manifest = + T: type Manifest, + manifest: Manifest, + verifyRoot: Cid, + slotRoots: openArray[Cid], + cellSize = DefaultCellSize, + strategy = LinearStrategy, +): ?!Manifest = ## Create a verifiable dataset from an ## protected one ## if not manifest.protected: return failure newException( - CodexError, "Can create verifiable manifest only from protected manifest.") + CodexError, "Can create verifiable manifest only from protected manifest." + ) if slotRoots.len != manifest.numSlots: - return failure newException( - CodexError, "Wrong number of slot roots.") + return failure newException(CodexError, "Wrong number of slot roots.") success Manifest( treeCid: manifest.treeCid, @@ -377,12 +374,10 @@ func new*( verifiableStrategy: strategy, filename: manifest.filename, mimetype: manifest.mimetype, - uploadedAt: manifest.uploadedAt - ) + uploadedAt: manifest.uploadedAt, + ) -func new*( - T: type Manifest, - data: openArray[byte]): ?!Manifest = +func new*(T: type Manifest, data: openArray[byte]): ?!Manifest = ## Create a manifest instance from given data ## diff --git a/codex/market.nim b/codex/market.nim index 38df96693..4c8e459c8 100644 --- a/codex/market.nim +++ b/codex/market.nim @@ -19,16 +19,17 @@ type Market* = ref object of RootObj MarketError* = object of CodexError Subscription* = ref object of RootObj - OnRequest* = proc(id: RequestId, - ask: StorageAsk, - expiry: UInt256) {.gcsafe, upraises:[].} + OnRequest* = + proc(id: RequestId, ask: StorageAsk, expiry: UInt256) {.gcsafe, upraises: [].} OnFulfillment* = proc(requestId: RequestId) {.gcsafe, upraises: [].} - OnSlotFilled* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises:[].} + OnSlotFilled* = + proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} OnSlotFreed* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSlotReservationsFull* = proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} - OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises:[].} - OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises:[].} - OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises:[].} + OnSlotReservationsFull* = + proc(requestId: RequestId, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnRequestCancelled* = proc(requestId: RequestId) {.gcsafe, upraises: [].} + OnRequestFailed* = proc(requestId: RequestId) {.gcsafe, upraises: [].} + OnProofSubmitted* = proc(id: SlotId) {.gcsafe, upraises: [].} ProofChallenge* = array[32, byte] # Marketplace events -- located here due to the Market abstraction @@ -37,21 +38,28 @@ type requestId*: RequestId ask*: StorageAsk expiry*: UInt256 + SlotFilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId slotIndex*: UInt256 + SlotFreed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId slotIndex*: UInt256 + SlotReservationsFull* = object of MarketplaceEvent requestId* {.indexed.}: RequestId slotIndex*: UInt256 + RequestFulfilled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId + RequestCancelled* = object of MarketplaceEvent requestId* {.indexed.}: RequestId + RequestFailed* = object of MarketplaceEvent requestId* {.indexed.}: RequestId + ProofSubmitted* = object of MarketplaceEvent id*: SlotId @@ -81,8 +89,7 @@ proc inDowntime*(market: Market, slotId: SlotId): Future[bool] {.async.} = let pntr = await market.getPointer(slotId) return pntr < downtime -method requestStorage*(market: Market, - request: StorageRequest) {.base, async.} = +method requestStorage*(market: Market, request: StorageRequest) {.base, async.} = raiseAssert("not implemented") method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} = @@ -91,182 +98,168 @@ method myRequests*(market: Market): Future[seq[RequestId]] {.base, async.} = method mySlots*(market: Market): Future[seq[SlotId]] {.base, async.} = raiseAssert("not implemented") -method getRequest*(market: Market, - id: RequestId): - Future[?StorageRequest] {.base, async.} = +method getRequest*( + market: Market, id: RequestId +): Future[?StorageRequest] {.base, async.} = raiseAssert("not implemented") -method requestState*(market: Market, - requestId: RequestId): Future[?RequestState] {.base, async.} = +method requestState*( + market: Market, requestId: RequestId +): Future[?RequestState] {.base, async.} = raiseAssert("not implemented") -method slotState*(market: Market, - slotId: SlotId): Future[SlotState] {.base, async.} = +method slotState*(market: Market, slotId: SlotId): Future[SlotState] {.base, async.} = raiseAssert("not implemented") -method getRequestEnd*(market: Market, - id: RequestId): Future[SecondsSince1970] {.base, async.} = +method getRequestEnd*( + market: Market, id: RequestId +): Future[SecondsSince1970] {.base, async.} = raiseAssert("not implemented") -method requestExpiresAt*(market: Market, - id: RequestId): Future[SecondsSince1970] {.base, async.} = +method requestExpiresAt*( + market: Market, id: RequestId +): Future[SecondsSince1970] {.base, async.} = raiseAssert("not implemented") -method getHost*(market: Market, - requestId: RequestId, - slotIndex: UInt256): Future[?Address] {.base, async.} = +method getHost*( + market: Market, requestId: RequestId, slotIndex: UInt256 +): Future[?Address] {.base, async.} = raiseAssert("not implemented") -method getActiveSlot*( - market: Market, - slotId: SlotId): Future[?Slot] {.base, async.} = - +method getActiveSlot*(market: Market, slotId: SlotId): Future[?Slot] {.base, async.} = raiseAssert("not implemented") -method fillSlot*(market: Market, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - collateral: UInt256) {.base, async.} = +method fillSlot*( + market: Market, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + collateral: UInt256, +) {.base, async.} = raiseAssert("not implemented") method freeSlot*(market: Market, slotId: SlotId) {.base, async.} = raiseAssert("not implemented") -method withdrawFunds*(market: Market, - requestId: RequestId) {.base, async.} = +method withdrawFunds*(market: Market, requestId: RequestId) {.base, async.} = raiseAssert("not implemented") -method subscribeRequests*(market: Market, - callback: OnRequest): - Future[Subscription] {.base, async.} = +method subscribeRequests*( + market: Market, callback: OnRequest +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method isProofRequired*(market: Market, - id: SlotId): Future[bool] {.base, async.} = +method isProofRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} = raiseAssert("not implemented") -method willProofBeRequired*(market: Market, - id: SlotId): Future[bool] {.base, async.} = +method willProofBeRequired*(market: Market, id: SlotId): Future[bool] {.base, async.} = raiseAssert("not implemented") -method getChallenge*(market: Market, id: SlotId): Future[ProofChallenge] {.base, async.} = +method getChallenge*( + market: Market, id: SlotId +): Future[ProofChallenge] {.base, async.} = raiseAssert("not implemented") -method submitProof*(market: Market, - id: SlotId, - proof: Groth16Proof) {.base, async.} = +method submitProof*(market: Market, id: SlotId, proof: Groth16Proof) {.base, async.} = raiseAssert("not implemented") -method markProofAsMissing*(market: Market, - id: SlotId, - period: Period) {.base, async.} = +method markProofAsMissing*(market: Market, id: SlotId, period: Period) {.base, async.} = raiseAssert("not implemented") -method canProofBeMarkedAsMissing*(market: Market, - id: SlotId, - period: Period): Future[bool] {.base, async.} = +method canProofBeMarkedAsMissing*( + market: Market, id: SlotId, period: Period +): Future[bool] {.base, async.} = raiseAssert("not implemented") method reserveSlot*( - market: Market, - requestId: RequestId, - slotIndex: UInt256) {.base, async.} = - + market: Market, requestId: RequestId, slotIndex: UInt256 +) {.base, async.} = raiseAssert("not implemented") method canReserveSlot*( - market: Market, - requestId: RequestId, - slotIndex: UInt256): Future[bool] {.base, async.} = - + market: Market, requestId: RequestId, slotIndex: UInt256 +): Future[bool] {.base, async.} = raiseAssert("not implemented") -method subscribeFulfillment*(market: Market, - callback: OnFulfillment): - Future[Subscription] {.base, async.} = +method subscribeFulfillment*( + market: Market, callback: OnFulfillment +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeFulfillment*(market: Market, - requestId: RequestId, - callback: OnFulfillment): - Future[Subscription] {.base, async.} = +method subscribeFulfillment*( + market: Market, requestId: RequestId, callback: OnFulfillment +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeSlotFilled*(market: Market, - callback: OnSlotFilled): - Future[Subscription] {.base, async.} = +method subscribeSlotFilled*( + market: Market, callback: OnSlotFilled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeSlotFilled*(market: Market, - requestId: RequestId, - slotIndex: UInt256, - callback: OnSlotFilled): - Future[Subscription] {.base, async.} = +method subscribeSlotFilled*( + market: Market, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeSlotFreed*(market: Market, - callback: OnSlotFreed): - Future[Subscription] {.base, async.} = +method subscribeSlotFreed*( + market: Market, callback: OnSlotFreed +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") method subscribeSlotReservationsFull*( - market: Market, - callback: OnSlotReservationsFull): Future[Subscription] {.base, async.} = - + market: Market, callback: OnSlotReservationsFull +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestCancelled*(market: Market, - callback: OnRequestCancelled): - Future[Subscription] {.base, async.} = +method subscribeRequestCancelled*( + market: Market, callback: OnRequestCancelled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestCancelled*(market: Market, - requestId: RequestId, - callback: OnRequestCancelled): - Future[Subscription] {.base, async.} = +method subscribeRequestCancelled*( + market: Market, requestId: RequestId, callback: OnRequestCancelled +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestFailed*(market: Market, - callback: OnRequestFailed): - Future[Subscription] {.base, async.} = +method subscribeRequestFailed*( + market: Market, callback: OnRequestFailed +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeRequestFailed*(market: Market, - requestId: RequestId, - callback: OnRequestFailed): - Future[Subscription] {.base, async.} = +method subscribeRequestFailed*( + market: Market, requestId: RequestId, callback: OnRequestFailed +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method subscribeProofSubmission*(market: Market, - callback: OnProofSubmitted): - Future[Subscription] {.base, async.} = +method subscribeProofSubmission*( + market: Market, callback: OnProofSubmitted +): Future[Subscription] {.base, async.} = raiseAssert("not implemented") -method unsubscribe*(subscription: Subscription) {.base, async, upraises:[].} = +method unsubscribe*(subscription: Subscription) {.base, async, upraises: [].} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( - market: Market, - fromBlock: BlockTag): Future[seq[SlotFilled]] {.base, async.} = + market: Market, fromBlock: BlockTag +): Future[seq[SlotFilled]] {.base, async.} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( - market: Market, - blocksAgo: int): Future[seq[SlotFilled]] {.base, async.} = + market: Market, blocksAgo: int +): Future[seq[SlotFilled]] {.base, async.} = raiseAssert("not implemented") method queryPastSlotFilledEvents*( - market: Market, - fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.base, async.} = + market: Market, fromTime: SecondsSince1970 +): Future[seq[SlotFilled]] {.base, async.} = raiseAssert("not implemented") method queryPastStorageRequestedEvents*( - market: Market, - fromBlock: BlockTag): Future[seq[StorageRequested]] {.base, async.} = + market: Market, fromBlock: BlockTag +): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") method queryPastStorageRequestedEvents*( - market: Market, - blocksAgo: int): Future[seq[StorageRequested]] {.base, async.} = + market: Market, blocksAgo: int +): Future[seq[StorageRequested]] {.base, async.} = raiseAssert("not implemented") diff --git a/codex/merkletree/codex/coders.nim b/codex/merkletree/codex/coders.nim index a2d5a24b6..b82099910 100644 --- a/codex/merkletree/codex/coders.nim +++ b/codex/merkletree/codex/coders.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/libp2p import pkg/questionable @@ -42,8 +43,8 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = var pb = initProtoBuffer(data, maxSize = MaxMerkleTreeSize) var mcodecCode: uint64 var leavesCount: uint64 - discard ? pb.getField(1, mcodecCode).mapFailure - discard ? pb.getField(2, leavesCount).mapFailure + discard ?pb.getField(1, mcodecCode).mapFailure + discard ?pb.getField(2, leavesCount).mapFailure let mcodec = MultiCodec.codec(mcodecCode.int) if mcodec == InvalidMultiCodec: @@ -53,10 +54,10 @@ proc decode*(_: type CodexTree, data: seq[byte]): ?!CodexTree = nodesBuff: seq[seq[byte]] nodes: seq[ByteHash] - if ? pb.getRepeatedField(3, nodesBuff).mapFailure: + if ?pb.getRepeatedField(3, nodesBuff).mapFailure: for nodeBuff in nodesBuff: var node: ByteHash - discard ? initProtoBuffer(nodeBuff).getField(1, node).mapFailure + discard ?initProtoBuffer(nodeBuff).getField(1, node).mapFailure nodes.add node CodexTree.fromNodes(mcodec, nodes, leavesCount.int) @@ -81,32 +82,29 @@ proc decode*(_: type CodexProof, data: seq[byte]): ?!CodexProof = var mcodecCode: uint64 var index: uint64 var nleaves: uint64 - discard ? pb.getField(1, mcodecCode).mapFailure + discard ?pb.getField(1, mcodecCode).mapFailure let mcodec = MultiCodec.codec(mcodecCode.int) if mcodec == InvalidMultiCodec: return failure("Invalid MultiCodec code " & $mcodecCode) - discard ? pb.getField(2, index).mapFailure - discard ? pb.getField(3, nleaves).mapFailure + discard ?pb.getField(2, index).mapFailure + discard ?pb.getField(3, nleaves).mapFailure var nodesBuff: seq[seq[byte]] nodes: seq[ByteHash] - if ? pb.getRepeatedField(4, nodesBuff).mapFailure: + if ?pb.getRepeatedField(4, nodesBuff).mapFailure: for nodeBuff in nodesBuff: var node: ByteHash let nodePb = initProtoBuffer(nodeBuff) - discard ? nodePb.getField(1, node).mapFailure + discard ?nodePb.getField(1, node).mapFailure nodes.add node CodexProof.init(mcodec, index.int, nleaves.int, nodes) -proc fromJson*( - _: type CodexProof, - json: JsonNode -): ?!CodexProof = +proc fromJson*(_: type CodexProof, json: JsonNode): ?!CodexProof = expectJsonKind(Cid, JString, json) var bytes: seq[byte] try: @@ -116,4 +114,5 @@ proc fromJson*( CodexProof.decode(bytes) -func `%`*(proof: CodexProof): JsonNode = % byteutils.toHex(proof.encode()) +func `%`*(proof: CodexProof): JsonNode = + %byteutils.toHex(proof.encode()) diff --git a/codex/merkletree/codex/codex.nim b/codex/merkletree/codex/codex.nim index 72b044f2d..e287dfacf 100644 --- a/codex/merkletree/codex/codex.nim +++ b/codex/merkletree/codex/codex.nim @@ -32,10 +32,10 @@ logScope: type ByteTreeKey* {.pure.} = enum - KeyNone = 0x0.byte - KeyBottomLayer = 0x1.byte - KeyOdd = 0x2.byte - KeyOddAndBottomLayer = 0x3.byte + KeyNone = 0x0.byte + KeyBottomLayer = 0x1.byte + KeyOdd = 0x2.byte + KeyOddAndBottomLayer = 0x3.byte ByteHash* = seq[byte] ByteTree* = MerkleTree[ByteHash, ByteTreeKey] @@ -56,8 +56,7 @@ proc initMultiHashCodeTable(): Table[MultiCodec, MHash] {.compileTime.} = const CodeHashes = initMultiHashCodeTable() func mhash*(mcodec: MultiCodec): ?!MHash = - let - mhash = CodeHashes.getOrDefault(mcodec) + let mhash = CodeHashes.getOrDefault(mcodec) if isNil(mhash.coder): return failure "Invalid multihash codec" @@ -71,10 +70,9 @@ func digestSize*(self: (CodexTree or CodexProof)): int = self.mhash.size func getProof*(self: CodexTree, index: int): ?!CodexProof = - var - proof = CodexProof(mcodec: self.mcodec) + var proof = CodexProof(mcodec: self.mcodec) - ? self.getProof(index, proof) + ?self.getProof(index, proof) success proof @@ -86,83 +84,66 @@ func verify*(self: CodexProof, leaf: MultiHash, root: MultiHash): ?!bool = rootBytes = root.digestBytes leafBytes = leaf.digestBytes - if self.mcodec != root.mcodec or - self.mcodec != leaf.mcodec: + if self.mcodec != root.mcodec or self.mcodec != leaf.mcodec: return failure "Hash codec mismatch" - if rootBytes.len != root.size and - leafBytes.len != leaf.size: + if rootBytes.len != root.size and leafBytes.len != leaf.size: return failure "Invalid hash length" self.verify(leafBytes, rootBytes) func verify*(self: CodexProof, leaf: Cid, root: Cid): ?!bool = - self.verify(? leaf.mhash.mapFailure, ? leaf.mhash.mapFailure) + self.verify(?leaf.mhash.mapFailure, ?leaf.mhash.mapFailure) -proc rootCid*( - self: CodexTree, - version = CIDv1, - dataCodec = DatasetRootCodec): ?!Cid = - - if (? self.root).len == 0: +proc rootCid*(self: CodexTree, version = CIDv1, dataCodec = DatasetRootCodec): ?!Cid = + if (?self.root).len == 0: return failure "Empty root" - let - mhash = ? MultiHash.init(self.mcodec, ? self.root).mapFailure + let mhash = ?MultiHash.init(self.mcodec, ?self.root).mapFailure Cid.init(version, DatasetRootCodec, mhash).mapFailure func getLeafCid*( - self: CodexTree, - i: Natural, - version = CIDv1, - dataCodec = BlockCodec): ?!Cid = - + self: CodexTree, i: Natural, version = CIDv1, dataCodec = BlockCodec +): ?!Cid = if i >= self.leavesCount: return failure "Invalid leaf index " & $i let leaf = self.leaves[i] - mhash = ? MultiHash.init($self.mcodec, leaf).mapFailure + mhash = ?MultiHash.init($self.mcodec, leaf).mapFailure Cid.init(version, dataCodec, mhash).mapFailure proc `$`*(self: CodexTree): string = - let root = if self.root.isOk: byteutils.toHex(self.root.get) else: "none" - "CodexTree(" & - " root: " & root & - ", leavesCount: " & $self.leavesCount & - ", levels: " & $self.levels & - ", mcodec: " & $self.mcodec & " )" + let root = + if self.root.isOk: + byteutils.toHex(self.root.get) + else: + "none" + "CodexTree(" & " root: " & root & ", leavesCount: " & $self.leavesCount & ", levels: " & + $self.levels & ", mcodec: " & $self.mcodec & " )" proc `$`*(self: CodexProof): string = - "CodexProof(" & - " nleaves: " & $self.nleaves & - ", index: " & $self.index & - ", path: " & $self.path.mapIt( byteutils.toHex(it) ) & - ", mcodec: " & $self.mcodec & " )" - -func compress*( - x, y: openArray[byte], - key: ByteTreeKey, - mhash: MHash): ?!ByteHash = + "CodexProof(" & " nleaves: " & $self.nleaves & ", index: " & $self.index & ", path: " & + $self.path.mapIt(byteutils.toHex(it)) & ", mcodec: " & $self.mcodec & " )" + +func compress*(x, y: openArray[byte], key: ByteTreeKey, mhash: MHash): ?!ByteHash = ## Compress two hashes ## var digest = newSeq[byte](mhash.size) - mhash.coder(@x & @y & @[ key.byte ], digest) + mhash.coder(@x & @y & @[key.byte], digest) success digest func init*( - _: type CodexTree, - mcodec: MultiCodec = Sha256HashCodec, - leaves: openArray[ByteHash]): ?!CodexTree = - + _: type CodexTree, mcodec: MultiCodec = Sha256HashCodec, leaves: openArray[ByteHash] +): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let - mhash = ? mcodec.mhash() + mhash = ?mcodec.mhash() compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compress(x, y, key, mhash) Zero: ByteHash = newSeq[byte](mhash.size) @@ -170,48 +151,42 @@ func init*( if mhash.size != leaves[0].len: return failure "Invalid hash length" - var - self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) + var self = CodexTree(mcodec: mcodec, compress: compressor, zero: Zero) - self.layers = ? merkleTreeWorker(self, leaves, isBottomLayer = true) + self.layers = ?merkleTreeWorker(self, leaves, isBottomLayer = true) success self -func init*( - _: type CodexTree, - leaves: openArray[MultiHash]): ?!CodexTree = - +func init*(_: type CodexTree, leaves: openArray[MultiHash]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let mcodec = leaves[0].mcodec - leaves = leaves.mapIt( it.digestBytes ) + leaves = leaves.mapIt(it.digestBytes) CodexTree.init(mcodec, leaves) -func init*( - _: type CodexTree, - leaves: openArray[Cid]): ?!CodexTree = +func init*(_: type CodexTree, leaves: openArray[Cid]): ?!CodexTree = if leaves.len == 0: return failure "Empty leaves" let - mcodec = (? leaves[0].mhash.mapFailure).mcodec - leaves = leaves.mapIt( (? it.mhash.mapFailure).digestBytes ) + mcodec = (?leaves[0].mhash.mapFailure).mcodec + leaves = leaves.mapIt((?it.mhash.mapFailure).digestBytes) CodexTree.init(mcodec, leaves) proc fromNodes*( - _: type CodexTree, - mcodec: MultiCodec = Sha256HashCodec, - nodes: openArray[ByteHash], - nleaves: int): ?!CodexTree = - + _: type CodexTree, + mcodec: MultiCodec = Sha256HashCodec, + nodes: openArray[ByteHash], + nleaves: int, +): ?!CodexTree = if nodes.len == 0: return failure "Empty nodes" let - mhash = ? mcodec.mhash() + mhash = ?mcodec.mhash() Zero = newSeq[byte](mhash.size) compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!ByteHash {.noSideEffect.} = compress(x, y, key, mhash) @@ -225,31 +200,31 @@ proc fromNodes*( pos = 0 while pos < nodes.len: - self.layers.add( nodes[pos..<(pos + layer)] ) + self.layers.add(nodes[pos ..< (pos + layer)]) pos += layer layer = divUp(layer, 2) let index = Rng.instance.rand(nleaves - 1) - proof = ? self.getProof(index) + proof = ?self.getProof(index) - if not ? proof.verify(self.leaves[index], ? self.root): # sanity check + if not ?proof.verify(self.leaves[index], ?self.root): # sanity check return failure "Unable to verify tree built from nodes" success self func init*( - _: type CodexProof, - mcodec: MultiCodec = Sha256HashCodec, - index: int, - nleaves: int, - nodes: openArray[ByteHash]): ?!CodexProof = - + _: type CodexProof, + mcodec: MultiCodec = Sha256HashCodec, + index: int, + nleaves: int, + nodes: openArray[ByteHash], +): ?!CodexProof = if nodes.len == 0: return failure "Empty nodes" let - mhash = ? mcodec.mhash() + mhash = ?mcodec.mhash() Zero = newSeq[byte](mhash.size) compressor = proc(x, y: seq[byte], key: ByteTreeKey): ?!seq[byte] {.noSideEffect.} = compress(x, y, key, mhash) @@ -260,4 +235,5 @@ func init*( mcodec: mcodec, index: index, nleaves: nleaves, - path: @nodes) + path: @nodes, + ) diff --git a/codex/merkletree/merkletree.nim b/codex/merkletree/merkletree.nim index 2f46b93d7..f1905becb 100644 --- a/codex/merkletree/merkletree.nim +++ b/codex/merkletree/merkletree.nim @@ -16,19 +16,19 @@ import pkg/questionable/results import ../errors type - CompressFn*[H, K] = proc (x, y: H, key: K): ?!H {.noSideEffect, raises: [].} + CompressFn*[H, K] = proc(x, y: H, key: K): ?!H {.noSideEffect, raises: [].} MerkleTree*[H, K] = ref object of RootObj - layers* : seq[seq[H]] + layers*: seq[seq[H]] compress*: CompressFn[H, K] - zero* : H + zero*: H MerkleProof*[H, K] = ref object of RootObj - index* : int # linear index of the leaf, starting from 0 - path* : seq[H] # order: from the bottom to the top - nleaves* : int # number of leaves in the tree (=size of input) - compress*: CompressFn[H, K] # compress function - zero* : H # zero value + index*: int # linear index of the leaf, starting from 0 + path*: seq[H] # order: from the bottom to the top + nleaves*: int # number of leaves in the tree (=size of input) + compress*: CompressFn[H, K] # compress function + zero*: H # zero value func depth*[H, K](self: MerkleTree[H, K]): int = return self.layers.len - 1 @@ -59,36 +59,38 @@ func root*[H, K](self: MerkleTree[H, K]): ?!H = return success last[0] func getProof*[H, K]( - self: MerkleTree[H, K], - index: int, - proof: MerkleProof[H, K]): ?!void = - let depth = self.depth + self: MerkleTree[H, K], index: int, proof: MerkleProof[H, K] +): ?!void = + let depth = self.depth let nleaves = self.leavesCount if not (index >= 0 and index < nleaves): return failure "index out of bounds" - var path : seq[H] = newSeq[H](depth) + var path: seq[H] = newSeq[H](depth) var k = index var m = nleaves - for i in 0.. odd node - h = ? proof.compress( h, p, K(bottomFlag.ord + 2) ) + h = ?proof.compress(h, p, K(bottomFlag.ord + 2)) else: # even node - h = ? proof.compress( h , p, bottomFlag ) + h = ?proof.compress(h, p, bottomFlag) bottomFlag = K.KeyNone - j = j shr 1 - m = (m+1) shr 1 + j = j shr 1 + m = (m + 1) shr 1 return success h func verify*[H, K](proof: MerkleProof[H, K], leaf: H, root: H): ?!bool = - success bool(root == ? proof.reconstructRoot(leaf)) + success bool(root == ?proof.reconstructRoot(leaf)) func merkleTreeWorker*[H, K]( - self: MerkleTree[H, K], - xs: openArray[H], - isBottomLayer: static bool): ?!seq[seq[H]] = - + self: MerkleTree[H, K], xs: openArray[H], isBottomLayer: static bool +): ?!seq[seq[H]] = let a = low(xs) let b = high(xs) let m = b - a + 1 when not isBottomLayer: if m == 1: - return success @[ @xs ] + return success @[@xs] - let halfn: int = m div 2 - let n : int = 2 * halfn + let halfn: int = m div 2 + let n: int = 2 * halfn let isOdd: bool = (n != m) var ys: seq[H] @@ -143,11 +143,11 @@ func merkleTreeWorker*[H, K]( else: ys = newSeq[H](halfn + 1) - for i in 0.. 0): - + while (let chunk = await chunker.getBytes(); chunk.len > 0): without mhash =? MultiHash.digest($hcodec, chunk).mapFailure, err: return failure(err) @@ -335,7 +318,8 @@ proc store*( for index, cid in cids: without proof =? tree.getProof(index), err: return failure(err) - if err =? (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption: + if err =? + (await self.networkStore.putCidAndProof(treeCid, index, cid, proof)).errorOption: # TODO add log here return failure(err) @@ -348,18 +332,20 @@ proc store*( codec = dataCodec, filename = filename, mimetype = mimetype, - uploadedAt = now().utc.toTime.toUnix.some) + uploadedAt = now().utc.toTime.toUnix.some, + ) without manifestBlk =? await self.storeManifest(manifest), err: error "Unable to store manifest" return failure(err) - info "Stored data", manifestCid = manifestBlk.cid, - treeCid = treeCid, - blocks = manifest.blocksCount, - datasetSize = manifest.datasetSize, - filename = manifest.filename, - mimetype = manifest.mimetype + info "Stored data", + manifestCid = manifestBlk.cid, + treeCid = treeCid, + blocks = manifest.blocksCount, + datasetSize = manifest.datasetSize, + filename = manifest.filename, + mimetype = manifest.mimetype return manifestBlk.cid.success @@ -381,15 +367,16 @@ proc iterateManifests*(self: CodexNodeRef, onManifest: OnManifest) {.async.} = onManifest(cid, manifest) proc setupRequest( - self: CodexNodeRef, - cid: Cid, - duration: UInt256, - proofProbability: UInt256, - nodes: uint, - tolerance: uint, - reward: UInt256, - collateral: UInt256, - expiry: UInt256): Future[?!StorageRequest] {.async.} = + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + reward: UInt256, + collateral: UInt256, + expiry: UInt256, +): Future[?!StorageRequest] {.async.} = ## Setup slots for a given dataset ## @@ -398,16 +385,16 @@ proc setupRequest( ecM = tolerance logScope: - cid = cid - duration = duration - nodes = nodes - tolerance = tolerance - reward = reward - proofProbability = proofProbability - collateral = collateral - expiry = expiry - ecK = ecK - ecM = ecM + cid = cid + duration = duration + nodes = nodes + tolerance = tolerance + reward = reward + proofProbability = proofProbability + collateral = collateral + expiry = expiry + ecK = ecK + ecM = ecM trace "Setting up slots" @@ -416,11 +403,8 @@ proc setupRequest( return failure error # Erasure code the dataset according to provided parameters - let - erasure = Erasure.new( - self.networkStore.localStore, - leoEncoderProvider, - leoDecoderProvider) + let erasure = + Erasure.new(self.networkStore.localStore, leoEncoderProvider, leoDecoderProvider) without encoded =? (await erasure.encode(manifest, ecK, ecM)), error: trace "Unable to erasure code dataset" @@ -441,9 +425,9 @@ proc setupRequest( let verifyRoot = if builder.verifyRoot.isNone: - return failure("No slots root") - else: - builder.verifyRoot.get.toBytes + return failure("No slots root") + else: + builder.verifyRoot.get.toBytes request = StorageRequest( ask: StorageAsk( @@ -453,42 +437,43 @@ proc setupRequest( proofProbability: proofProbability, reward: reward, collateral: collateral, - maxSlotLoss: tolerance + maxSlotLoss: tolerance, ), content: StorageContent( cid: $manifestBlk.cid, # TODO: why string? - merkleRoot: verifyRoot + merkleRoot: verifyRoot, ), - expiry: expiry + expiry: expiry, ) trace "Request created", request = $request success request proc requestStorage*( - self: CodexNodeRef, - cid: Cid, - duration: UInt256, - proofProbability: UInt256, - nodes: uint, - tolerance: uint, - reward: UInt256, - collateral: UInt256, - expiry: UInt256): Future[?!PurchaseId] {.async.} = + self: CodexNodeRef, + cid: Cid, + duration: UInt256, + proofProbability: UInt256, + nodes: uint, + tolerance: uint, + reward: UInt256, + collateral: UInt256, + expiry: UInt256, +): Future[?!PurchaseId] {.async.} = ## Initiate a request for storage sequence, this might ## be a multistep procedure. ## logScope: - cid = cid - duration = duration - nodes = nodes - tolerance = tolerance - reward = reward - proofProbability = proofProbability - collateral = collateral - expiry = expiry.truncate(int64) - now = self.clock.now + cid = cid + duration = duration + nodes = nodes + tolerance = tolerance + reward = reward + proofProbability = proofProbability + collateral = collateral + expiry = expiry.truncate(int64) + now = self.clock.now trace "Received a request for storage!" @@ -496,16 +481,11 @@ proc requestStorage*( trace "Purchasing not available" return failure "Purchasing not available" - without request =? - (await self.setupRequest( - cid, - duration, - proofProbability, - nodes, - tolerance, - reward, - collateral, - expiry)), err: + without request =? ( + await self.setupRequest( + cid, duration, proofProbability, nodes, tolerance, reward, collateral, expiry + ) + ), err: trace "Unable to setup request" return failure err @@ -513,10 +493,8 @@ proc requestStorage*( success purchase.id proc onStore( - self: CodexNodeRef, - request: StorageRequest, - slotIdx: UInt256, - blocksCb: BlocksCb): Future[?!void] {.async.} = + self: CodexNodeRef, request: StorageRequest, slotIdx: UInt256, blocksCb: BlocksCb +): Future[?!void] {.async.} = ## store data in local storage ## @@ -534,9 +512,8 @@ proc onStore( trace "Unable to fetch manifest for cid", cid, err = err.msg return failure(err) - without builder =? Poseidon2Builder.new( - self.networkStore, manifest, manifest.verifiableStrategy - ), err: + without builder =? + Poseidon2Builder.new(self.networkStore, manifest, manifest.verifiableStrategy), err: trace "Unable to create slots builder", err = err.msg return failure(err) @@ -551,7 +528,8 @@ proc onStore( proc updateExpiry(blocks: seq[bt.Block]): Future[?!void] {.async.} = trace "Updating expiry for blocks", blocks = blocks.len - let ensureExpiryFutures = blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) + let ensureExpiryFutures = + blocks.mapIt(self.networkStore.ensureExpiry(it.cid, expiry)) if updateExpiryErr =? (await allFutureResult(ensureExpiryFutures)).errorOption: return failure(updateExpiryErr) @@ -561,8 +539,9 @@ proc onStore( return success() - without indexer =? manifest.verifiableStrategy.init( - 0, manifest.blocksCount - 1, manifest.numSlots).catch, err: + without indexer =? + manifest.verifiableStrategy.init(0, manifest.blocksCount - 1, manifest.numSlots).catch, + err: trace "Unable to create indexing strategy from protected manifest", err = err.msg return failure(err) @@ -570,10 +549,9 @@ proc onStore( trace "Unable to get indicies from strategy", err = err.msg return failure(err) - if err =? (await self.fetchBatched( - manifest.treeCid, - blksIter, - onBatch = updateExpiry)).errorOption: + if err =? ( + await self.fetchBatched(manifest.treeCid, blksIter, onBatch = updateExpiry) + ).errorOption: trace "Unable to fetch blocks", err = err.msg return failure(err) @@ -584,7 +562,8 @@ proc onStore( trace "Slot successfully retrieved and reconstructed" if cid =? slotRoot.toSlotCid() and cid != manifest.slotRoots[slotIdx.int]: - trace "Slot root mismatch", manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() + trace "Slot root mismatch", + manifest = manifest.slotRoots[slotIdx.int], recovered = slotRoot.toSlotCid() return failure(newException(CodexError, "Slot root mismatch")) trace "Slot successfully retrieved and reconstructed" @@ -592,9 +571,8 @@ proc onStore( return success() proc onProve( - self: CodexNodeRef, - slot: Slot, - challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + self: CodexNodeRef, slot: Slot, challenge: ProofChallenge +): Future[?!Groth16Proof] {.async.} = ## Generats a proof for a given slot and challenge ## @@ -648,9 +626,8 @@ proc onProve( failure "Prover not enabled" proc onExpiryUpdate( - self: CodexNodeRef, - rootCid: string, - expiry: SecondsSince1970): Future[?!void] {.async.} = + self: CodexNodeRef, rootCid: string, expiry: SecondsSince1970 +): Future[?!void] {.async.} = without cid =? Cid.init(rootCid): trace "Unable to parse Cid", cid let error = newException(CodexError, "Unable to parse Cid") @@ -658,11 +635,8 @@ proc onExpiryUpdate( return await self.updateExpiry(cid, expiry) -proc onClear( - self: CodexNodeRef, - request: StorageRequest, - slotIndex: UInt256) = -# TODO: remove data from local storage +proc onClear(self: CodexNodeRef, request: StorageRequest, slotIndex: UInt256) = + # TODO: remove data from local storage discard proc start*(self: CodexNodeRef) {.async.} = @@ -676,32 +650,32 @@ proc start*(self: CodexNodeRef) {.async.} = await self.clock.start() if hostContracts =? self.contracts.host: - hostContracts.sales.onStore = - proc( - request: StorageRequest, - slot: UInt256, - onBatch: BatchProc): Future[?!void] = self.onStore(request, slot, onBatch) - - hostContracts.sales.onExpiryUpdate = - proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] = - self.onExpiryUpdate(rootCid, expiry) - - hostContracts.sales.onClear = - proc(request: StorageRequest, slotIndex: UInt256) = + hostContracts.sales.onStore = proc( + request: StorageRequest, slot: UInt256, onBatch: BatchProc + ): Future[?!void] = + self.onStore(request, slot, onBatch) + + hostContracts.sales.onExpiryUpdate = proc( + rootCid: string, expiry: SecondsSince1970 + ): Future[?!void] = + self.onExpiryUpdate(rootCid, expiry) + + hostContracts.sales.onClear = proc(request: StorageRequest, slotIndex: UInt256) = # TODO: remove data from local storage self.onClear(request, slotIndex) - hostContracts.sales.onProve = - proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] = - # TODO: generate proof - self.onProve(slot, challenge) + hostContracts.sales.onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] = + # TODO: generate proof + self.onProve(slot, challenge) try: await hostContracts.start() except CancelledError as error: raise error except CatchableError as error: - error "Unable to start host contract interactions", error=error.msg + error "Unable to start host contract interactions", error = error.msg self.contracts.host = HostInteractions.none if clientContracts =? self.contracts.client: @@ -710,7 +684,7 @@ proc start*(self: CodexNodeRef) {.async.} = except CancelledError as error: raise error except CatchableError as error: - error "Unable to start client contract interactions: ", error=error.msg + error "Unable to start client contract interactions: ", error = error.msg self.contracts.client = ClientInteractions.none if validatorContracts =? self.contracts.validator: @@ -719,7 +693,7 @@ proc start*(self: CodexNodeRef) {.async.} = except CancelledError as error: raise error except CatchableError as error: - error "Unable to start validator contract interactions: ", error=error.msg + error "Unable to start validator contract interactions: ", error = error.msg self.contracts.validator = ValidatorInteractions.none self.networkId = self.switch.peerInfo.peerId @@ -750,13 +724,14 @@ proc stop*(self: CodexNodeRef) {.async.} = await self.networkStore.close proc new*( - T: type CodexNodeRef, - switch: Switch, - networkStore: NetworkStore, - engine: BlockExcEngine, - discovery: Discovery, - prover = Prover.none, - contracts = Contracts.default): CodexNodeRef = + T: type CodexNodeRef, + switch: Switch, + networkStore: NetworkStore, + engine: BlockExcEngine, + discovery: Discovery, + prover = Prover.none, + contracts = Contracts.default, +): CodexNodeRef = ## Create new instance of a Codex self, call `start` to run it ## @@ -766,4 +741,5 @@ proc new*( engine: engine, prover: prover, discovery: discovery, - contracts: contracts) + contracts: contracts, + ) diff --git a/codex/periods.nim b/codex/periods.nim index f0b789e16..429931ee4 100644 --- a/codex/periods.nim +++ b/codex/periods.nim @@ -3,6 +3,7 @@ import pkg/stint type Periodicity* = object seconds*: UInt256 + Period* = UInt256 Timestamp* = UInt256 diff --git a/codex/purchasing.nim b/codex/purchasing.nim index ca92ece9c..4ab844051 100644 --- a/codex/purchasing.nim +++ b/codex/purchasing.nim @@ -18,16 +18,13 @@ type clock: Clock purchases: Table[PurchaseId, Purchase] proofProbability*: UInt256 + PurchaseTimeout* = Timeout const DefaultProofProbability = 100.u256 proc new*(_: type Purchasing, market: Market, clock: Clock): Purchasing = - Purchasing( - market: market, - clock: clock, - proofProbability: DefaultProofProbability, - ) + Purchasing(market: market, clock: clock, proofProbability: DefaultProofProbability) proc load*(purchasing: Purchasing) {.async.} = let market = purchasing.market @@ -43,9 +40,9 @@ proc start*(purchasing: Purchasing) {.async.} = proc stop*(purchasing: Purchasing) {.async.} = discard -proc populate*(purchasing: Purchasing, - request: StorageRequest - ): Future[StorageRequest] {.async.} = +proc populate*( + purchasing: Purchasing, request: StorageRequest +): Future[StorageRequest] {.async.} = result = request if result.ask.proofProbability == 0.u256: result.ask.proofProbability = purchasing.proofProbability @@ -55,9 +52,9 @@ proc populate*(purchasing: Purchasing, result.nonce = Nonce(id) result.client = await purchasing.market.getSigner() -proc purchase*(purchasing: Purchasing, - request: StorageRequest - ): Future[Purchase] {.async.} = +proc purchase*( + purchasing: Purchasing, request: StorageRequest +): Future[Purchase] {.async.} = let request = await purchasing.populate(request) let purchase = Purchase.new(request, purchasing.market, purchasing.clock) purchase.start() @@ -75,4 +72,3 @@ func getPurchaseIds*(purchasing: Purchasing): seq[PurchaseId] = for key in purchasing.purchases.keys: pIds.add(key) return pIds - diff --git a/codex/purchasing/purchase.nim b/codex/purchasing/purchase.nim index d616e492c..7c16c28ce 100644 --- a/codex/purchasing/purchase.nim +++ b/codex/purchasing/purchase.nim @@ -25,10 +25,7 @@ export purchaseid export statemachine func new*( - _: type Purchase, - requestId: RequestId, - market: Market, - clock: Clock + _: type Purchase, requestId: RequestId, market: Market, clock: Clock ): Purchase = ## create a new instance of a Purchase ## @@ -42,10 +39,7 @@ func new*( return purchase func new*( - _: type Purchase, - request: StorageRequest, - market: Market, - clock: Clock + _: type Purchase, request: StorageRequest, market: Market, clock: Clock ): Purchase = ## Create a new purchase using the given market and clock let purchase = Purchase.new(request.id, market, clock) @@ -76,4 +70,5 @@ func error*(purchase: Purchase): ?(ref CatchableError) = func state*(purchase: Purchase): ?string = proc description(state: State): string = $state + purchase.query(description) diff --git a/codex/purchasing/purchaseid.nim b/codex/purchasing/purchaseid.nim index 91734fe91..965b08397 100644 --- a/codex/purchasing/purchaseid.nim +++ b/codex/purchasing/purchaseid.nim @@ -3,9 +3,12 @@ import ../logutils type PurchaseId* = distinct array[32, byte] -logutils.formatIt(LogFormat.textLines, PurchaseId): it.short0xHexLog -logutils.formatIt(LogFormat.json, PurchaseId): it.to0xHexLog +logutils.formatIt(LogFormat.textLines, PurchaseId): + it.short0xHexLog +logutils.formatIt(LogFormat.json, PurchaseId): + it.to0xHexLog proc hash*(x: PurchaseId): Hash {.borrow.} proc `==`*(x, y: PurchaseId): bool {.borrow.} -proc toHex*(x: PurchaseId): string = array[32, byte](x).toHex +proc toHex*(x: PurchaseId): string = + array[32, byte](x).toHex diff --git a/codex/purchasing/statemachine.nim b/codex/purchasing/statemachine.nim index de2753c3a..20a63783e 100644 --- a/codex/purchasing/statemachine.nim +++ b/codex/purchasing/statemachine.nim @@ -14,5 +14,6 @@ type clock*: Clock requestId*: RequestId request*: ?StorageRequest + PurchaseState* = ref object of State PurchaseError* = object of CodexError diff --git a/codex/purchasing/states/cancelled.nim b/codex/purchasing/states/cancelled.nim index f9bb1ecee..760dc81a9 100644 --- a/codex/purchasing/states/cancelled.nim +++ b/codex/purchasing/states/cancelled.nim @@ -18,7 +18,7 @@ method run*(state: PurchaseCancelled, machine: Machine): Future[?State] {.async. codex_purchases_cancelled.inc() let purchase = Purchase(machine) - warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId + warn "Request cancelled, withdrawing remaining funds", requestId = purchase.requestId await purchase.market.withdrawFunds(purchase.requestId) let error = newException(Timeout, "Purchase cancelled due to timeout") diff --git a/codex/purchasing/states/error.nim b/codex/purchasing/states/error.nim index 0ebe1dbed..d7017b388 100644 --- a/codex/purchasing/states/error.nim +++ b/codex/purchasing/states/error.nim @@ -18,6 +18,7 @@ method run*(state: PurchaseErrored, machine: Machine): Future[?State] {.async.} codex_purchases_error.inc() let purchase = Purchase(machine) - error "Purchasing error", error=state.error.msgDetail, requestId = purchase.requestId + error "Purchasing error", + error = state.error.msgDetail, requestId = purchase.requestId purchase.future.fail(state.error) diff --git a/codex/purchasing/states/errorhandling.nim b/codex/purchasing/states/errorhandling.nim index 57e009247..8ef91ba69 100644 --- a/codex/purchasing/states/errorhandling.nim +++ b/codex/purchasing/states/errorhandling.nim @@ -2,8 +2,7 @@ import pkg/questionable import ../statemachine import ./error -type - ErrorHandlingState* = ref object of PurchaseState +type ErrorHandlingState* = ref object of PurchaseState method onError*(state: ErrorHandlingState, error: ref CatchableError): ?State = some State(PurchaseErrored(error: error)) diff --git a/codex/purchasing/states/failed.nim b/codex/purchasing/states/failed.nim index b05dbb6f1..5a126a736 100644 --- a/codex/purchasing/states/failed.nim +++ b/codex/purchasing/states/failed.nim @@ -5,8 +5,7 @@ import ./error declareCounter(codex_purchases_failed, "codex purchases failed") -type - PurchaseFailed* = ref object of PurchaseState +type PurchaseFailed* = ref object of PurchaseState method `$`*(state: PurchaseFailed): string = "failed" @@ -14,7 +13,7 @@ method `$`*(state: PurchaseFailed): string = method run*(state: PurchaseFailed, machine: Machine): Future[?State] {.async.} = codex_purchases_failed.inc() let purchase = Purchase(machine) - warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId + warn "Request failed, withdrawing remaining funds", requestId = purchase.requestId await purchase.market.withdrawFunds(purchase.requestId) let error = newException(PurchaseError, "Purchase failed") diff --git a/codex/purchasing/states/started.nim b/codex/purchasing/states/started.nim index 42acd1fce..083e64c8e 100644 --- a/codex/purchasing/states/started.nim +++ b/codex/purchasing/states/started.nim @@ -27,6 +27,7 @@ method run*(state: PurchaseStarted, machine: Machine): Future[?State] {.async.} let failed = newFuture[void]() proc callback(_: RequestId) = failed.complete() + let subscription = await market.subscribeRequestFailed(purchase.requestId, callback) # Ensure that we're past the request end by waiting an additional second diff --git a/codex/purchasing/states/submitted.nim b/codex/purchasing/states/submitted.nim index 5532c8506..1cf65b1f1 100644 --- a/codex/purchasing/states/submitted.nim +++ b/codex/purchasing/states/submitted.nim @@ -23,12 +23,14 @@ method run*(state: PurchaseSubmitted, machine: Machine): Future[?State] {.async. let market = purchase.market let clock = purchase.clock - info "Request submitted, waiting for slots to be filled", requestId = purchase.requestId + info "Request submitted, waiting for slots to be filled", + requestId = purchase.requestId - proc wait {.async.} = + proc wait() {.async.} = let done = newFuture[void]() proc callback(_: RequestId) = done.complete() + let subscription = await market.subscribeFulfillment(request.id, callback) await done await subscription.unsubscribe() diff --git a/codex/purchasing/states/unknown.nim b/codex/purchasing/states/unknown.nim index ade70c9f5..54e099427 100644 --- a/codex/purchasing/states/unknown.nim +++ b/codex/purchasing/states/unknown.nim @@ -19,7 +19,6 @@ method run*(state: PurchaseUnknown, machine: Machine): Future[?State] {.async.} let purchase = Purchase(machine) if (request =? await purchase.market.getRequest(purchase.requestId)) and (requestState =? await purchase.market.requestState(purchase.requestId)): - purchase.request = some request case requestState diff --git a/codex/rest/api.nim b/codex/rest/api.nim index 597e73863..4119373e0 100644 --- a/codex/rest/api.nim +++ b/codex/rest/api.nim @@ -9,8 +9,8 @@ import pkg/upraises -push: {.upraises: [].} - +push: + {.upraises: [].} import std/sequtils import mimetypes @@ -49,10 +49,7 @@ logScope: declareCounter(codex_api_uploads, "codex API uploads") declareCounter(codex_api_downloads, "codex API downloads") -proc validate( - pattern: string, - value: string): int - {.gcsafe, raises: [Defect].} = +proc validate(pattern: string, value: string): int {.gcsafe, raises: [Defect].} = 0 proc formatManifest(cid: Cid, manifest: Manifest): RestContent = @@ -63,21 +60,19 @@ proc formatManifestBlocks(node: CodexNodeRef): Future[JsonNode] {.async.} = proc addManifest(cid: Cid, manifest: Manifest) = content.add(formatManifest(cid, manifest)) + await node.iterateManifests(addManifest) return %RestContentList.init(content) proc retrieveCid( - node: CodexNodeRef, - cid: Cid, - local: bool = true, - resp: HttpResponseRef): Future[RestApiResponse] {.async.} = + node: CodexNodeRef, cid: Cid, local: bool = true, resp: HttpResponseRef +): Future[RestApiResponse] {.async.} = ## Download a file from the node in a streaming ## manner ## - var - stream: LPStream + var stream: LPStream var bytes = 0 try: @@ -101,8 +96,10 @@ proc retrieveCid( resp.addHeader("Content-Type", "application/octet-stream") if manifest.filename.isSome: - resp.setHeader("Content-Disposition", "attachment; filename=\"" & manifest.filename.get() & "\"") - + resp.setHeader( + "Content-Disposition", + "attachment; filename=\"" & manifest.filename.get() & "\"", + ) await resp.prepareChunked() @@ -129,7 +126,9 @@ proc retrieveCid( if not stream.isNil: await stream.close() -proc buildCorsHeaders(httpMethod: string, allowedOrigin: Option[string]): seq[(string, string)] = +proc buildCorsHeaders( + httpMethod: string, allowedOrigin: Option[string] +): seq[(string, string)] = var headers: seq[(string, string)] = newSeq[(string, string)]() if corsOrigin =? allowedOrigin: @@ -137,15 +136,15 @@ proc buildCorsHeaders(httpMethod: string, allowedOrigin: Option[string]): seq[(s headers.add(("Access-Control-Allow-Methods", httpMethod & ", OPTIONS")) headers.add(("Access-Control-Max-Age", "86400")) - return headers + return headers -proc setCorsHeaders(resp: HttpResponseRef, httpMethod: string, origin: string) = +proc setCorsHeaders(resp: HttpResponseRef, httpMethod: string, origin: string) = resp.setHeader("Access-Control-Allow-Origin", origin) resp.setHeader("Access-Control-Allow-Methods", httpMethod & ", OPTIONS") resp.setHeader("Access-Control-Max-Age", "86400") proc getFilenameFromContentDisposition(contentDisposition: string): ?string = - if not("filename=" in contentDisposition): + if not ("filename=" in contentDisposition): return string.none let parts = contentDisposition.split("filename=\"") @@ -154,696 +153,714 @@ proc getFilenameFromContentDisposition(contentDisposition: string): ?string = return string.none let filename = parts[1].strip() - return filename[0..^2].some + return filename[0 ..^ 2].some proc initDataApi(node: CodexNodeRef, repoStore: RepoStore, router: var RestRouter) = let allowedOrigin = router.allowedOrigin # prevents capture inside of api defintion - - router.api( - MethodOptions, - "/api/codex/v1/data") do ( - resp: HttpResponseRef) -> RestApiResponse: - - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("POST", corsOrigin) - resp.setHeader("Access-Control-Allow-Headers", "content-type, content-disposition") - - resp.status = Http204 - await resp.sendBody("") - - router.rawApi( - MethodPost, - "/api/codex/v1/data") do ( - ) -> RestApiResponse: - ## Upload a file in a streaming manner - ## - - trace "Handling file upload" - var bodyReader = request.getBodyReader() - if bodyReader.isErr(): - return RestApiResponse.error(Http500) - - # Attempt to handle `Expect` header - # some clients (curl), wait 1000ms - # before giving up - # - await request.handleExpect() - - var mimetype = request.headers.getString(ContentTypeHeader).some - - if mimetype.get() != "": - var m = newMimetypes() - let extension = m.getExt(mimetype.get(), "") - if extension == "": - return RestApiResponse.error(Http422, "The MIME type is not valid.") - else: - mimetype = string.none - const ContentDispositionHeader = "Content-Disposition" - let contentDisposition = request.headers.getString(ContentDispositionHeader) - let filename = getFilenameFromContentDisposition(contentDisposition) - - if filename.isSome and not isValidFilename(filename.get()): - return RestApiResponse.error(Http422, "The filename is not valid.") + router.api(MethodOptions, "/api/codex/v1/data") do( + resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("POST", corsOrigin) + resp.setHeader( + "Access-Control-Allow-Headers", "content-type, content-disposition" + ) - # Here we could check if the extension matches the filename if needed + resp.status = Http204 + await resp.sendBody("") - let - reader = bodyReader.get() + router.rawApi(MethodPost, "/api/codex/v1/data") do() -> RestApiResponse: + ## Upload a file in a streaming manner + ## - try: - without cid =? ( - await node.store(AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)), filename = filename, mimetype = mimetype)), error: - error "Error uploading file", exc = error.msg - return RestApiResponse.error(Http500, error.msg) - - codex_api_uploads.inc() - trace "Uploaded file", cid - return RestApiResponse.response($cid) - except CancelledError: - trace "Upload cancelled error" - return RestApiResponse.error(Http500) - except AsyncStreamError: - trace "Async stream error" - return RestApiResponse.error(Http500) - finally: - await reader.closeWait() - - trace "Something went wrong error" + trace "Handling file upload" + var bodyReader = request.getBodyReader() + if bodyReader.isErr(): return RestApiResponse.error(Http500) - router.api( - MethodGet, - "/api/codex/v1/data") do () -> RestApiResponse: - let json = await formatManifestBlocks(node) - return RestApiResponse.response($json, contentType="application/json") - - router.api( - MethodGet, - "/api/codex/v1/data/{cid}") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: + # Attempt to handle `Expect` header + # some clients (curl), wait 1000ms + # before giving up + # + await request.handleExpect() - var headers = buildCorsHeaders("GET", allowedOrigin) + var mimetype = request.headers.getString(ContentTypeHeader).some - ## Download a file from the local node in a streaming - ## manner - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), - headers = headers) + if mimetype.get() != "": + var m = newMimetypes() + let extension = m.getExt(mimetype.get(), "") + if extension == "": + return RestApiResponse.error(Http422, "The MIME type is not valid.") + else: + mimetype = string.none - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("GET", corsOrigin) - resp.setHeader("Access-Control-Headers", "X-Requested-With") + const ContentDispositionHeader = "Content-Disposition" + let contentDisposition = request.headers.getString(ContentDispositionHeader) + let filename = getFilenameFromContentDisposition(contentDisposition) - await node.retrieveCid(cid.get(), local = true, resp=resp) + if filename.isSome and not isValidFilename(filename.get()): + return RestApiResponse.error(Http422, "The filename is not valid.") - router.api( - MethodPost, - "/api/codex/v1/data/{cid}/network") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download a file from the network to the local node - ## + # Here we could check if the extension matches the filename if needed - var headers = buildCorsHeaders("GET", allowedOrigin) + let reader = bodyReader.get() - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), headers = headers) + try: + without cid =? ( + await node.store( + AsyncStreamWrapper.new(reader = AsyncStreamReader(reader)), + filename = filename, + mimetype = mimetype, + ) + ), error: + error "Error uploading file", exc = error.msg + return RestApiResponse.error(Http500, error.msg) + + codex_api_uploads.inc() + trace "Uploaded file", cid + return RestApiResponse.response($cid) + except CancelledError: + trace "Upload cancelled error" + return RestApiResponse.error(Http500) + except AsyncStreamError: + trace "Async stream error" + return RestApiResponse.error(Http500) + finally: + await reader.closeWait() - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("GET", corsOrigin) - resp.setHeader("Access-Control-Headers", "X-Requested-With") + trace "Something went wrong error" + return RestApiResponse.error(Http500) - without manifest =? (await node.fetchManifest(cid.get())), err: - error "Failed to fetch manifest", err = err.msg - return RestApiResponse.error( - Http404, - err.msg, headers = headers) - - proc fetchDatasetAsync(): Future[void] {.async.} = - try: - if err =? (await node.fetchBatched(manifest)).errorOption: - error "Unable to fetch dataset", cid = cid.get(), err = err.msg - except CatchableError as exc: - error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg - discard - - asyncSpawn fetchDatasetAsync() - - let json = %formatManifest(cid.get(), manifest) - return RestApiResponse.response($json, contentType="application/json") - - router.api( - MethodGet, - "/api/codex/v1/data/{cid}/network/stream") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download a file from the network in a streaming - ## manner - ## + router.api(MethodGet, "/api/codex/v1/data") do() -> RestApiResponse: + let json = await formatManifestBlocks(node) + return RestApiResponse.response($json, contentType = "application/json") - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/data/{cid}") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), headers = headers) + ## Download a file from the local node in a streaming + ## manner + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("GET", corsOrigin) - resp.setHeader("Access-Control-Headers", "X-Requested-With") + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET", corsOrigin) + resp.setHeader("Access-Control-Headers", "X-Requested-With") - await node.retrieveCid(cid.get(), local = false, resp=resp) + await node.retrieveCid(cid.get(), local = true, resp = resp) - router.api( - MethodGet, - "/api/codex/v1/data/{cid}/network/manifest") do ( - cid: Cid, resp: HttpResponseRef) -> RestApiResponse: - ## Download only the manifest. - ## + router.api(MethodPost, "/api/codex/v1/data/{cid}/network") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Download a file from the network to the local node + ## - var headers = buildCorsHeaders("GET", allowedOrigin) + var headers = buildCorsHeaders("GET", allowedOrigin) - if cid.isErr: - return RestApiResponse.error( - Http400, - $cid.error(), headers = headers) + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) - without manifest =? (await node.fetchManifest(cid.get())), err: - error "Failed to fetch manifest", err = err.msg - return RestApiResponse.error( - Http404, - err.msg, headers = headers) + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET", corsOrigin) + resp.setHeader("Access-Control-Headers", "X-Requested-With") - let json = %formatManifest(cid.get(), manifest) - return RestApiResponse.response($json, contentType="application/json") + without manifest =? (await node.fetchManifest(cid.get())), err: + error "Failed to fetch manifest", err = err.msg + return RestApiResponse.error(Http404, err.msg, headers = headers) - router.api( - MethodGet, - "/api/codex/v1/space") do () -> RestApiResponse: - let json = % RestRepoStore( - totalBlocks: repoStore.totalBlocks, - quotaMaxBytes: repoStore.quotaMaxBytes, - quotaUsedBytes: repoStore.quotaUsedBytes, - quotaReservedBytes: repoStore.quotaReservedBytes - ) - return RestApiResponse.response($json, contentType="application/json") + proc fetchDatasetAsync(): Future[void] {.async.} = + try: + if err =? (await node.fetchBatched(manifest)).errorOption: + error "Unable to fetch dataset", cid = cid.get(), err = err.msg + except CatchableError as exc: + error "CatchableError when fetching dataset", cid = cid.get(), exc = exc.msg + discard -proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = - let allowedOrigin = router.allowedOrigin + asyncSpawn fetchDatasetAsync() - router.api( - MethodGet, - "/api/codex/v1/sales/slots") do () -> RestApiResponse: - var headers = buildCorsHeaders("GET", allowedOrigin) + let json = %formatManifest(cid.get(), manifest) + return RestApiResponse.response($json, contentType = "application/json") - ## Returns active slots for the host - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + router.api(MethodGet, "/api/codex/v1/data/{cid}/network/stream") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Download a file from the network in a streaming + ## manner + ## - let json = %(await contracts.sales.mySlots()) - return RestApiResponse.response($json, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + var headers = buildCorsHeaders("GET", allowedOrigin) - router.api( - MethodGet, - "/api/codex/v1/sales/slots/{slotId}") do (slotId: SlotId) -> RestApiResponse: - ## Returns active slot with id {slotId} for the host. Returns 404 if the - ## slot is not active for the host. - var headers = buildCorsHeaders("GET", allowedOrigin) + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("GET", corsOrigin) + resp.setHeader("Access-Control-Headers", "X-Requested-With") - without slotId =? slotId.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + await node.retrieveCid(cid.get(), local = false, resp = resp) - without agent =? await contracts.sales.activeSale(slotId): - return RestApiResponse.error(Http404, "Provider not filling slot", headers = headers) + router.api(MethodGet, "/api/codex/v1/data/{cid}/network/manifest") do( + cid: Cid, resp: HttpResponseRef + ) -> RestApiResponse: + ## Download only the manifest. + ## - let restAgent = RestSalesAgent( - state: agent.state() |? "none", - slotIndex: agent.data.slotIndex, - requestId: agent.data.requestId, - request: agent.data.request, - reservation: agent.data.reservation, - ) + var headers = buildCorsHeaders("GET", allowedOrigin) - return RestApiResponse.response(restAgent.toJson, contentType="application/json", headers = headers) + if cid.isErr: + return RestApiResponse.error(Http400, $cid.error(), headers = headers) - router.api( - MethodGet, - "/api/codex/v1/sales/availability") do () -> RestApiResponse: - ## Returns storage that is for sale - var headers = buildCorsHeaders("GET", allowedOrigin) + without manifest =? (await node.fetchManifest(cid.get())), err: + error "Failed to fetch manifest", err = err.msg + return RestApiResponse.error(Http404, err.msg, headers = headers) - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + let json = %formatManifest(cid.get(), manifest) + return RestApiResponse.response($json, contentType = "application/json") - without avails =? (await contracts.sales.context.reservations.all(Availability)), err: - return RestApiResponse.error(Http500, err.msg, headers = headers) + router.api(MethodGet, "/api/codex/v1/space") do() -> RestApiResponse: + let json = + %RestRepoStore( + totalBlocks: repoStore.totalBlocks, + quotaMaxBytes: repoStore.quotaMaxBytes, + quotaUsedBytes: repoStore.quotaUsedBytes, + quotaReservedBytes: repoStore.quotaReservedBytes, + ) + return RestApiResponse.response($json, contentType = "application/json") - let json = %avails - return RestApiResponse.response($json, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) +proc initSalesApi(node: CodexNodeRef, router: var RestRouter) = + let allowedOrigin = router.allowedOrigin - router.rawApi( - MethodPost, - "/api/codex/v1/sales/availability") do () -> RestApiResponse: - ## Add available storage to sell. - ## Every time Availability's offer finishes, its capacity is returned to the availability. - ## - ## totalSize - size of available storage in bytes - ## duration - maximum time the storage should be sold for (in seconds) - ## minPrice - minimal price paid (in amount of tokens) for the whole hosted request's slot for the request's duration - ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) + router.api(MethodGet, "/api/codex/v1/sales/slots") do() -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) - var headers = buildCorsHeaders("POST", allowedOrigin) + ## Returns active slots for the host + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + let json = %(await contracts.sales.mySlots()) + return RestApiResponse.response( + $json, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodGet, "/api/codex/v1/sales/slots/{slotId}") do( + slotId: SlotId + ) -> RestApiResponse: + ## Returns active slot with id {slotId} for the host. Returns 404 if the + ## slot is not active for the host. + var headers = buildCorsHeaders("GET", allowedOrigin) + + without contracts =? node.contracts.host: + return + RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + + without slotId =? slotId.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + + without agent =? await contracts.sales.activeSale(slotId): + return + RestApiResponse.error(Http404, "Provider not filling slot", headers = headers) + + let restAgent = RestSalesAgent( + state: agent.state() |? "none", + slotIndex: agent.data.slotIndex, + requestId: agent.data.requestId, + request: agent.data.request, + reservation: agent.data.reservation, + ) + + return RestApiResponse.response( + restAgent.toJson, contentType = "application/json", headers = headers + ) + + router.api(MethodGet, "/api/codex/v1/sales/availability") do() -> RestApiResponse: + ## Returns storage that is for sale + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - let body = await request.getBody() + without avails =? (await contracts.sales.context.reservations.all(Availability)), + err: + return RestApiResponse.error(Http500, err.msg, headers = headers) - without restAv =? RestAvailability.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + let json = %avails + return RestApiResponse.response( + $json, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.rawApi(MethodPost, "/api/codex/v1/sales/availability") do() -> RestApiResponse: + ## Add available storage to sell. + ## Every time Availability's offer finishes, its capacity is returned to the availability. + ## + ## totalSize - size of available storage in bytes + ## duration - maximum time the storage should be sold for (in seconds) + ## minPrice - minimal price paid (in amount of tokens) for the whole hosted request's slot for the request's duration + ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) + + var headers = buildCorsHeaders("POST", allowedOrigin) + + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - let reservations = contracts.sales.context.reservations + let body = await request.getBody() - if restAv.totalSize == 0: - return RestApiResponse.error(Http400, "Total size must be larger then zero", headers = headers) + without restAv =? RestAvailability.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg, headers = headers) - if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): - return RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) + let reservations = contracts.sales.context.reservations - without availability =? ( - await reservations.createAvailability( - restAv.totalSize, - restAv.duration, - restAv.minPrice, - restAv.maxCollateral) - ), error: - return RestApiResponse.error(Http500, error.msg, headers = headers) + if restAv.totalSize == 0: + return RestApiResponse.error( + Http400, "Total size must be larger then zero", headers = headers + ) - return RestApiResponse.response(availability.toJson, - Http201, - contentType="application/json", - headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + if not reservations.hasAvailable(restAv.totalSize.truncate(uint)): + return + RestApiResponse.error(Http422, "Not enough storage quota", headers = headers) - router.api( - MethodOptions, - "/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId, resp: HttpResponseRef) -> RestApiResponse: - - if corsOrigin =? allowedOrigin: - resp.setCorsHeaders("PATCH", corsOrigin) - - resp.status = Http204 - await resp.sendBody("") - - router.rawApi( - MethodPatch, - "/api/codex/v1/sales/availability/{id}") do (id: AvailabilityId) -> RestApiResponse: - ## Updates Availability. - ## The new parameters will be only considered for new requests. - ## Existing Requests linked to this Availability will continue as is. - ## - ## totalSize - size of available storage in bytes. When decreasing the size, then lower limit is the currently `totalSize - freeSize`. - ## duration - maximum time the storage should be sold for (in seconds) - ## minPrice - minimum price to be paid (in amount of tokens) - ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled") + without availability =? ( + await reservations.createAvailability( + restAv.totalSize, restAv.duration, restAv.minPrice, restAv.maxCollateral + ) + ), error: + return RestApiResponse.error(Http500, error.msg, headers = headers) + + return RestApiResponse.response( + availability.toJson, + Http201, + contentType = "application/json", + headers = headers, + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodOptions, "/api/codex/v1/sales/availability/{id}") do( + id: AvailabilityId, resp: HttpResponseRef + ) -> RestApiResponse: + if corsOrigin =? allowedOrigin: + resp.setCorsHeaders("PATCH", corsOrigin) + + resp.status = Http204 + await resp.sendBody("") + + router.rawApi(MethodPatch, "/api/codex/v1/sales/availability/{id}") do( + id: AvailabilityId + ) -> RestApiResponse: + ## Updates Availability. + ## The new parameters will be only considered for new requests. + ## Existing Requests linked to this Availability will continue as is. + ## + ## totalSize - size of available storage in bytes. When decreasing the size, then lower limit is the currently `totalSize - freeSize`. + ## duration - maximum time the storage should be sold for (in seconds) + ## minPrice - minimum price to be paid (in amount of tokens) + ## maxCollateral - maximum collateral user is willing to pay per filled Slot (in amount of tokens) + try: + without contracts =? node.contracts.host: + return RestApiResponse.error(Http503, "Persistence is not enabled") - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg) - without keyId =? id.key.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg) + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) + without keyId =? id.key.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg) - let - body = await request.getBody() - reservations = contracts.sales.context.reservations + let + body = await request.getBody() + reservations = contracts.sales.context.reservations - type OptRestAvailability = Optionalize(RestAvailability) - without restAv =? OptRestAvailability.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg) + type OptRestAvailability = Optionalize(RestAvailability) + without restAv =? OptRestAvailability.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg) - without availability =? (await reservations.get(keyId, Availability)), error: - if error of NotExistsError: - return RestApiResponse.error(Http404, "Availability not found") + without availability =? (await reservations.get(keyId, Availability)), error: + if error of NotExistsError: + return RestApiResponse.error(Http404, "Availability not found") - return RestApiResponse.error(Http500, error.msg) + return RestApiResponse.error(Http500, error.msg) - if isSome restAv.freeSize: - return RestApiResponse.error(Http400, "Updating freeSize is not allowed") + if isSome restAv.freeSize: + return RestApiResponse.error(Http400, "Updating freeSize is not allowed") - if size =? restAv.totalSize: - # we don't allow lowering the totalSize bellow currently utilized size - if size < (availability.totalSize - availability.freeSize): - return RestApiResponse.error(Http400, "New totalSize must be larger then current totalSize - freeSize, which is currently: " & $(availability.totalSize - availability.freeSize)) + if size =? restAv.totalSize: + # we don't allow lowering the totalSize bellow currently utilized size + if size < (availability.totalSize - availability.freeSize): + return RestApiResponse.error( + Http400, + "New totalSize must be larger then current totalSize - freeSize, which is currently: " & + $(availability.totalSize - availability.freeSize), + ) - availability.freeSize += size - availability.totalSize - availability.totalSize = size + availability.freeSize += size - availability.totalSize + availability.totalSize = size - if duration =? restAv.duration: - availability.duration = duration + if duration =? restAv.duration: + availability.duration = duration - if minPrice =? restAv.minPrice: - availability.minPrice = minPrice + if minPrice =? restAv.minPrice: + availability.minPrice = minPrice - if maxCollateral =? restAv.maxCollateral: - availability.maxCollateral = maxCollateral + if maxCollateral =? restAv.maxCollateral: + availability.maxCollateral = maxCollateral - if err =? (await reservations.update(availability)).errorOption: - return RestApiResponse.error(Http500, err.msg) + if err =? (await reservations.update(availability)).errorOption: + return RestApiResponse.error(Http500, err.msg) - return RestApiResponse.response(Http200) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500) + return RestApiResponse.response(Http200) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500) - router.rawApi( - MethodGet, - "/api/codex/v1/sales/availability/{id}/reservations") do (id: AvailabilityId) -> RestApiResponse: - ## Gets Availability's reservations. - var headers = buildCorsHeaders("GET", allowedOrigin) + router.rawApi(MethodGet, "/api/codex/v1/sales/availability/{id}/reservations") do( + id: AvailabilityId + ) -> RestApiResponse: + ## Gets Availability's reservations. + var headers = buildCorsHeaders("GET", allowedOrigin) - try: - without contracts =? node.contracts.host: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + try: + without contracts =? node.contracts.host: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) - without keyId =? id.key.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + without keyId =? id.key.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) - let reservations = contracts.sales.context.reservations - let market = contracts.sales.context.market + let reservations = contracts.sales.context.reservations + let market = contracts.sales.context.market - if error =? (await reservations.get(keyId, Availability)).errorOption: - if error of NotExistsError: - return RestApiResponse.error(Http404, "Availability not found", headers = headers) - else: - return RestApiResponse.error(Http500, error.msg, headers = headers) + if error =? (await reservations.get(keyId, Availability)).errorOption: + if error of NotExistsError: + return + RestApiResponse.error(Http404, "Availability not found", headers = headers) + else: + return RestApiResponse.error(Http500, error.msg, headers = headers) - without availabilitysReservations =? (await reservations.all(Reservation, id)), err: - return RestApiResponse.error(Http500, err.msg, headers = headers) + without availabilitysReservations =? (await reservations.all(Reservation, id)), + err: + return RestApiResponse.error(Http500, err.msg, headers = headers) - # TODO: Expand this structure with information about the linked StorageRequest not only RequestID - return RestApiResponse.response(availabilitysReservations.toJson, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + # TODO: Expand this structure with information about the linked StorageRequest not only RequestID + return RestApiResponse.response( + availabilitysReservations.toJson, + contentType = "application/json", + headers = headers, + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) proc initPurchasingApi(node: CodexNodeRef, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.rawApi( - MethodPost, - "/api/codex/v1/storage/request/{cid}") do (cid: Cid) -> RestApiResponse: - var headers = buildCorsHeaders("POST", allowedOrigin) - - ## Create a request for storage - ## - ## cid - the cid of a previously uploaded dataset - ## duration - the duration of the request in seconds - ## proofProbability - how often storage proofs are required - ## reward - the maximum amount of tokens paid per second per slot to hosts the client is willing to pay - ## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data - ## nodes - number of nodes the content should be stored on - ## tolerance - allowed number of nodes that can be lost before content is lost - ## colateral - requested collateral from hosts when they fill slot - try: - without contracts =? node.contracts.client: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) - - without cid =? cid.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + router.rawApi(MethodPost, "/api/codex/v1/storage/request/{cid}") do( + cid: Cid + ) -> RestApiResponse: + var headers = buildCorsHeaders("POST", allowedOrigin) + + ## Create a request for storage + ## + ## cid - the cid of a previously uploaded dataset + ## duration - the duration of the request in seconds + ## proofProbability - how often storage proofs are required + ## reward - the maximum amount of tokens paid per second per slot to hosts the client is willing to pay + ## expiry - specifies threshold in seconds from now when the request expires if the Request does not find requested amount of nodes to host the data + ## nodes - number of nodes the content should be stored on + ## tolerance - allowed number of nodes that can be lost before content is lost + ## colateral - requested collateral from hosts when they fill slot + try: + without contracts =? node.contracts.client: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - let body = await request.getBody() + without cid =? cid.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) - without params =? StorageRequestParams.fromJson(body), error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + let body = await request.getBody() - let nodes = params.nodes |? 3 - let tolerance = params.tolerance |? 1 + without params =? StorageRequestParams.fromJson(body), error: + return RestApiResponse.error(Http400, error.msg, headers = headers) - if tolerance == 0: - return RestApiResponse.error(Http400, "Tolerance needs to be bigger then zero", headers = headers) + let nodes = params.nodes |? 3 + let tolerance = params.tolerance |? 1 - # prevent underflow - if tolerance > nodes: - return RestApiResponse.error(Http400, "Invalid parameters: `tolerance` cannot be greater than `nodes`", headers = headers) + if tolerance == 0: + return RestApiResponse.error( + Http400, "Tolerance needs to be bigger then zero", headers = headers + ) - let ecK = nodes - tolerance - let ecM = tolerance # for readability + # prevent underflow + if tolerance > nodes: + return RestApiResponse.error( + Http400, + "Invalid parameters: `tolerance` cannot be greater than `nodes`", + headers = headers, + ) - # ensure leopard constrainst of 1 < K ≥ M - if ecK <= 1 or ecK < ecM: - return RestApiResponse.error(Http400, "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", headers = headers) + let ecK = nodes - tolerance + let ecM = tolerance # for readability - without expiry =? params.expiry: - return RestApiResponse.error(Http400, "Expiry required", headers = headers) + # ensure leopard constrainst of 1 < K ≥ M + if ecK <= 1 or ecK < ecM: + return RestApiResponse.error( + Http400, + "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`", + headers = headers, + ) - if expiry <= 0 or expiry >= params.duration: - return RestApiResponse.error(Http400, "Expiry needs value bigger then zero and smaller then the request's duration", headers = headers) + without expiry =? params.expiry: + return RestApiResponse.error(Http400, "Expiry required", headers = headers) - without purchaseId =? await node.requestStorage( - cid, - params.duration, - params.proofProbability, - nodes, - tolerance, - params.reward, - params.collateral, - expiry), error: + if expiry <= 0 or expiry >= params.duration: + return RestApiResponse.error( + Http400, + "Expiry needs value bigger then zero and smaller then the request's duration", + headers = headers, + ) - if error of InsufficientBlocksError: - return RestApiResponse.error(Http400, + without purchaseId =? + await node.requestStorage( + cid, params.duration, params.proofProbability, nodes, tolerance, + params.reward, params.collateral, expiry, + ), error: + if error of InsufficientBlocksError: + return RestApiResponse.error( + Http400, "Dataset too small for erasure parameters, need at least " & - $(ref InsufficientBlocksError)(error).minSize.int & " bytes", headers = headers) + $(ref InsufficientBlocksError)(error).minSize.int & " bytes", + headers = headers, + ) - return RestApiResponse.error(Http500, error.msg, headers = headers) + return RestApiResponse.error(Http500, error.msg, headers = headers) - return RestApiResponse.response(purchaseId.toHex) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) - - router.api( - MethodGet, - "/api/codex/v1/storage/purchases/{id}") do ( - id: PurchaseId) -> RestApiResponse: - var headers = buildCorsHeaders("GET", allowedOrigin) + return RestApiResponse.response(purchaseId.toHex) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) - try: - without contracts =? node.contracts.client: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + router.api(MethodGet, "/api/codex/v1/storage/purchases/{id}") do( + id: PurchaseId + ) -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) - without id =? id.tryGet.catch, error: - return RestApiResponse.error(Http400, error.msg, headers = headers) + try: + without contracts =? node.contracts.client: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - without purchase =? contracts.purchasing.getPurchase(id): - return RestApiResponse.error(Http404, headers = headers) + without id =? id.tryGet.catch, error: + return RestApiResponse.error(Http400, error.msg, headers = headers) + + without purchase =? contracts.purchasing.getPurchase(id): + return RestApiResponse.error(Http404, headers = headers) - let json = % RestPurchase( + let json = + %RestPurchase( state: purchase.state |? "none", - error: purchase.error.?msg, + error: purchase.error .? msg, request: purchase.request, - requestId: purchase.requestId + requestId: purchase.requestId, ) - return RestApiResponse.response($json, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + return RestApiResponse.response( + $json, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) - router.api( - MethodGet, - "/api/codex/v1/storage/purchases") do () -> RestApiResponse: - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/storage/purchases") do() -> RestApiResponse: + var headers = buildCorsHeaders("GET", allowedOrigin) - try: - without contracts =? node.contracts.client: - return RestApiResponse.error(Http503, "Persistence is not enabled", headers = headers) + try: + without contracts =? node.contracts.client: + return RestApiResponse.error( + Http503, "Persistence is not enabled", headers = headers + ) - let purchaseIds = contracts.purchasing.getPurchaseIds() - return RestApiResponse.response($ %purchaseIds, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + let purchaseIds = contracts.purchasing.getPurchaseIds() + return RestApiResponse.response( + $ %purchaseIds, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) proc initNodeApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = let allowedOrigin = router.allowedOrigin ## various node management api's ## - router.api( - MethodGet, - "/api/codex/v1/spr") do () -> RestApiResponse: - ## Returns node SPR in requested format, json or text. - ## - var headers = buildCorsHeaders("GET", allowedOrigin) - - try: - without spr =? node.discovery.dhtRecord: - return RestApiResponse.response("", status=Http503, contentType="application/json", headers = headers) - - if $preferredContentType().get() == "text/plain": - return RestApiResponse.response(spr.toURI, contentType="text/plain", headers = headers) - else: - return RestApiResponse.response($ %* {"spr": spr.toURI}, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) - - router.api( - MethodGet, - "/api/codex/v1/peerid") do () -> RestApiResponse: - ## Returns node's peerId in requested format, json or text. - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/spr") do() -> RestApiResponse: + ## Returns node SPR in requested format, json or text. + ## + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + without spr =? node.discovery.dhtRecord: + return RestApiResponse.response( + "", status = Http503, contentType = "application/json", headers = headers + ) - try: - let id = $node.switch.peerInfo.peerId + if $preferredContentType().get() == "text/plain": + return RestApiResponse.response( + spr.toURI, contentType = "text/plain", headers = headers + ) + else: + return RestApiResponse.response( + $ %*{"spr": spr.toURI}, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) - if $preferredContentType().get() == "text/plain": - return RestApiResponse.response(id, contentType="text/plain", headers = headers) - else: - return RestApiResponse.response($ %* {"id": id}, contentType="application/json", headers = headers) - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + router.api(MethodGet, "/api/codex/v1/peerid") do() -> RestApiResponse: + ## Returns node's peerId in requested format, json or text. + ## + var headers = buildCorsHeaders("GET", allowedOrigin) - router.api( - MethodGet, - "/api/codex/v1/connect/{peerId}") do ( - peerId: PeerId, - addrs: seq[MultiAddress]) -> RestApiResponse: - ## Connect to a peer - ## - ## If `addrs` param is supplied, it will be used to - ## dial the peer, otherwise the `peerId` is used - ## to invoke peer discovery, if it succeeds - ## the returned addresses will be used to dial - ## - ## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs` - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + try: + let id = $node.switch.peerInfo.peerId - if peerId.isErr: - return RestApiResponse.error( - Http400, - $peerId.error(), - headers = headers) - - let addresses = if addrs.isOk and addrs.get().len > 0: - addrs.get() - else: - without peerRecord =? (await node.findPeer(peerId.get())): - return RestApiResponse.error( - Http400, - "Unable to find Peer!", - headers = headers) - peerRecord.addresses.mapIt(it.address) - try: - await node.connect(peerId.get(), addresses) - return RestApiResponse.response("Successfully connected to peer", headers = headers) - except DialFailedError: - return RestApiResponse.error(Http400, "Unable to dial peer", headers = headers) - except CatchableError: - return RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers) + if $preferredContentType().get() == "text/plain": + return + RestApiResponse.response(id, contentType = "text/plain", headers = headers) + else: + return RestApiResponse.response( + $ %*{"id": id}, contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodGet, "/api/codex/v1/connect/{peerId}") do( + peerId: PeerId, addrs: seq[MultiAddress] + ) -> RestApiResponse: + ## Connect to a peer + ## + ## If `addrs` param is supplied, it will be used to + ## dial the peer, otherwise the `peerId` is used + ## to invoke peer discovery, if it succeeds + ## the returned addresses will be used to dial + ## + ## `addrs` the listening addresses of the peers to dial, eg the one specified with `--listen-addrs` + ## + var headers = buildCorsHeaders("GET", allowedOrigin) + + if peerId.isErr: + return RestApiResponse.error(Http400, $peerId.error(), headers = headers) + + let addresses = + if addrs.isOk and addrs.get().len > 0: + addrs.get() + else: + without peerRecord =? (await node.findPeer(peerId.get())): + return + RestApiResponse.error(Http400, "Unable to find Peer!", headers = headers) + peerRecord.addresses.mapIt(it.address) + try: + await node.connect(peerId.get(), addresses) + return + RestApiResponse.response("Successfully connected to peer", headers = headers) + except DialFailedError: + return RestApiResponse.error(Http400, "Unable to dial peer", headers = headers) + except CatchableError: + return + RestApiResponse.error(Http500, "Unknown error dialling peer", headers = headers) proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = let allowedOrigin = router.allowedOrigin - router.api( - MethodGet, - "/api/codex/v1/debug/info") do () -> RestApiResponse: - ## Print rudimentary node information - ## - var headers = buildCorsHeaders("GET", allowedOrigin) + router.api(MethodGet, "/api/codex/v1/debug/info") do() -> RestApiResponse: + ## Print rudimentary node information + ## + var headers = buildCorsHeaders("GET", allowedOrigin) + + try: + let table = RestRoutingTable.init(node.discovery.protocol.routingTable) + + let json = + %*{ + "id": $node.switch.peerInfo.peerId, + "addrs": node.switch.peerInfo.addrs.mapIt($it), + "repo": $conf.dataDir, + "spr": + if node.discovery.dhtRecord.isSome: + node.discovery.dhtRecord.get.toURI + else: + "", + "announceAddresses": node.discovery.announceAddrs, + "table": table, + "codex": {"version": $codexVersion, "revision": $codexRevision}, + } + + # return pretty json for human readability + return RestApiResponse.response( + json.pretty(), contentType = "application/json", headers = headers + ) + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) + + router.api(MethodPost, "/api/codex/v1/debug/chronicles/loglevel") do( + level: Option[string] + ) -> RestApiResponse: + ## Set log level at run time + ## + ## e.g. `chronicles/loglevel?level=DEBUG` + ## + ## `level` - chronicles log level + ## + var headers = buildCorsHeaders("POST", allowedOrigin) + + try: + without res =? level and level =? res: + return RestApiResponse.error(Http400, "Missing log level", headers = headers) try: - let table = RestRoutingTable.init(node.discovery.protocol.routingTable) - - let - json = %*{ - "id": $node.switch.peerInfo.peerId, - "addrs": node.switch.peerInfo.addrs.mapIt( $it ), - "repo": $conf.dataDir, - "spr": - if node.discovery.dhtRecord.isSome: - node.discovery.dhtRecord.get.toURI - else: - "", - "announceAddresses": node.discovery.announceAddrs, - "table": table, - "codex": { - "version": $codexVersion, - "revision": $codexRevision - } - } - - # return pretty json for human readability - return RestApiResponse.response(json.pretty(), contentType="application/json", headers = headers) + {.gcsafe.}: + updateLogLevel(level) except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + return RestApiResponse.error(Http500, exc.msg, headers = headers) - router.api( - MethodPost, - "/api/codex/v1/debug/chronicles/loglevel") do ( - level: Option[string]) -> RestApiResponse: - ## Set log level at run time - ## - ## e.g. `chronicles/loglevel?level=DEBUG` - ## - ## `level` - chronicles log level - ## - var headers = buildCorsHeaders("POST", allowedOrigin) - - try: - without res =? level and level =? res: - return RestApiResponse.error(Http400, "Missing log level", headers = headers) - - try: - {.gcsafe.}: - updateLogLevel(level) - except CatchableError as exc: - return RestApiResponse.error(Http500, exc.msg, headers = headers) - - return RestApiResponse.response("") - except CatchableError as exc: - trace "Excepting processing request", exc = exc.msg - return RestApiResponse.error(Http500, headers = headers) + return RestApiResponse.response("") + except CatchableError as exc: + trace "Excepting processing request", exc = exc.msg + return RestApiResponse.error(Http500, headers = headers) when codex_enable_api_debug_peers: - router.api( - MethodGet, - "/api/codex/v1/debug/peer/{peerId}") do (peerId: PeerId) -> RestApiResponse: + router.api(MethodGet, "/api/codex/v1/debug/peer/{peerId}") do( + peerId: PeerId + ) -> RestApiResponse: var headers = buildCorsHeaders("GET", allowedOrigin) try: trace "debug/peer start" without peerRecord =? (await node.findPeer(peerId.get())): trace "debug/peer peer not found!" - return RestApiResponse.error( - Http400, - "Unable to find Peer!", - headers = headers) + return + RestApiResponse.error(Http400, "Unable to find Peer!", headers = headers) let json = %RestPeerRecord.init(peerRecord) trace "debug/peer returning peer record" @@ -853,11 +870,11 @@ proc initDebugApi(node: CodexNodeRef, conf: CodexConf, router: var RestRouter) = return RestApiResponse.error(Http500, headers = headers) proc initRestApi*( - node: CodexNodeRef, - conf: CodexConf, - repoStore: RepoStore, - corsAllowedOrigin: ?string): RestRouter = - + node: CodexNodeRef, + conf: CodexConf, + repoStore: RepoStore, + corsAllowedOrigin: ?string, +): RestRouter = var router = RestRouter.init(validate, corsAllowedOrigin) initDataApi(node, repoStore, router) diff --git a/codex/rest/coders.nim b/codex/rest/coders.nim index 0be1a6381..1c997ccf1 100644 --- a/codex/rest/coders.nim +++ b/codex/rest/coders.nim @@ -25,9 +25,7 @@ proc encodeString*(cid: type Cid): Result[string, cstring] = ok($cid) proc decodeString*(T: type Cid, value: string): Result[Cid, cstring] = - Cid - .init(value) - .mapErr do(e: CidError) -> cstring: + Cid.init(value).mapErr do(e: CidError) -> cstring: case e of CidError.Incorrect: "Incorrect Cid".cstring of CidError.Unsupported: "Unsupported Cid".cstring @@ -44,9 +42,8 @@ proc encodeString*(address: MultiAddress): Result[string, cstring] = ok($address) proc decodeString*(T: type MultiAddress, value: string): Result[MultiAddress, cstring] = - MultiAddress - .init(value) - .mapErr do(e: string) -> cstring: cstring(e) + MultiAddress.init(value).mapErr do(e: string) -> cstring: + cstring(e) proc decodeString*(T: type SomeUnsignedInt, value: string): Result[T, cstring] = Base10.decode(T, value) @@ -55,7 +52,7 @@ proc encodeString*(value: SomeUnsignedInt): Result[string, cstring] = ok(Base10.toString(value)) proc decodeString*(T: type Duration, value: string): Result[T, cstring] = - let v = ? Base10.decode(uint32, value) + let v = ?Base10.decode(uint32, value) ok(v.minutes) proc encodeString*(value: Duration): Result[string, cstring] = @@ -77,19 +74,20 @@ proc decodeString*(_: type UInt256, value: string): Result[UInt256, cstring] = except ValueError as e: err e.msg.cstring -proc decodeString*(_: type array[32, byte], - value: string): Result[array[32, byte], cstring] = +proc decodeString*( + _: type array[32, byte], value: string +): Result[array[32, byte], cstring] = try: ok array[32, byte].fromHex(value) except ValueError as e: err e.msg.cstring -proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId](_: type T, - value: string): Result[T, cstring] = +proc decodeString*[T: PurchaseId | RequestId | Nonce | SlotId | AvailabilityId]( + _: type T, value: string +): Result[T, cstring] = array[32, byte].decodeString(value).map(id => T(id)) -proc decodeString*(t: typedesc[string], - value: string): Result[string, cstring] = +proc decodeString*(t: typedesc[string], value: string): Result[string, cstring] = ok(value) proc encodeString*(value: string): RestResult[string] = diff --git a/codex/rest/json.nim b/codex/rest/json.nim index afbfebe6d..021cb1962 100644 --- a/codex/rest/json.nim +++ b/codex/rest/json.nim @@ -74,15 +74,10 @@ type quotaReservedBytes* {.serialize.}: NBytes proc init*(_: type RestContentList, content: seq[RestContent]): RestContentList = - RestContentList( - content: content - ) + RestContentList(content: content) proc init*(_: type RestContent, cid: Cid, manifest: Manifest): RestContent = - RestContent( - cid: cid, - manifest: manifest - ) + RestContent(cid: cid, manifest: manifest) proc init*(_: type RestNode, node: dn.Node): RestNode = RestNode( @@ -90,7 +85,7 @@ proc init*(_: type RestNode, node: dn.Node): RestNode = peerId: node.record.data.peerId, record: node.record, address: node.address, - seen: node.seen > 0.5 + seen: node.seen > 0.5, ) proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRoutingTable = @@ -99,28 +94,23 @@ proc init*(_: type RestRoutingTable, routingTable: rt.RoutingTable): RestRouting for node in bucket.nodes: nodes.add(RestNode.init(node)) - RestRoutingTable( - localNode: RestNode.init(routingTable.localNode), - nodes: nodes - ) + RestRoutingTable(localNode: RestNode.init(routingTable.localNode), nodes: nodes) proc init*(_: type RestPeerRecord, peerRecord: PeerRecord): RestPeerRecord = RestPeerRecord( - peerId: peerRecord.peerId, - seqNo: peerRecord.seqNo, - addresses: peerRecord.addresses + peerId: peerRecord.peerId, seqNo: peerRecord.seqNo, addresses: peerRecord.addresses ) proc init*(_: type RestNodeId, id: NodeId): RestNodeId = - RestNodeId( - id: id - ) + RestNodeId(id: id) proc `%`*(obj: StorageRequest | Slot): JsonNode = let jsonObj = newJObject() - for k, v in obj.fieldPairs: jsonObj[k] = %v + for k, v in obj.fieldPairs: + jsonObj[k] = %v jsonObj["id"] = %(obj.id) return jsonObj -proc `%`*(obj: RestNodeId): JsonNode = % $obj.id +proc `%`*(obj: RestNodeId): JsonNode = + % $obj.id diff --git a/codex/rng.nim b/codex/rng.nim index 19452cd4c..9d82156ea 100644 --- a/codex/rng.nim +++ b/codex/rng.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/libp2p/crypto/crypto import pkg/bearssl/rand @@ -30,7 +31,8 @@ proc instance*(t: type Rng): Rng = const randMax = 18_446_744_073_709_551_615'u64 proc rand*(rng: Rng, max: Natural): int = - if max == 0: return 0 + if max == 0: + return 0 while true: let x = rng[].generate(uint64) @@ -41,8 +43,8 @@ proc sample*[T](rng: Rng, a: openArray[T]): T = result = a[rng.rand(a.high)] proc sample*[T]( - rng: Rng, sample, exclude: openArray[T]): T - {.raises: [Defect, RngSampleError].} = + rng: Rng, sample, exclude: openArray[T] +): T {.raises: [Defect, RngSampleError].} = if sample == exclude: raise newException(RngSampleError, "Sample and exclude arrays are the same!") diff --git a/codex/sales.nim b/codex/sales.nim index f891edab3..8b113bca1 100644 --- a/codex/sales.nim +++ b/codex/sales.nim @@ -45,13 +45,12 @@ export salescontext logScope: topics = "sales marketplace" -type - Sales* = ref object - context*: SalesContext - agents*: seq[SalesAgent] - running: bool - subscriptions: seq[market.Subscription] - trackedFutures: TrackedFutures +type Sales* = ref object + context*: SalesContext + agents*: seq[SalesAgent] + running: bool + subscriptions: seq[market.Subscription] + trackedFutures: TrackedFutures proc `onStore=`*(sales: Sales, onStore: OnStore) = sales.context.onStore = some onStore @@ -68,28 +67,31 @@ proc `onProve=`*(sales: Sales, callback: OnProve) = proc `onExpiryUpdate=`*(sales: Sales, callback: OnExpiryUpdate) = sales.context.onExpiryUpdate = some callback -proc onStore*(sales: Sales): ?OnStore = sales.context.onStore +proc onStore*(sales: Sales): ?OnStore = + sales.context.onStore -proc onClear*(sales: Sales): ?OnClear = sales.context.onClear +proc onClear*(sales: Sales): ?OnClear = + sales.context.onClear -proc onSale*(sales: Sales): ?OnSale = sales.context.onSale +proc onSale*(sales: Sales): ?OnSale = + sales.context.onSale -proc onProve*(sales: Sales): ?OnProve = sales.context.onProve +proc onProve*(sales: Sales): ?OnProve = + sales.context.onProve -proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = sales.context.onExpiryUpdate +proc onExpiryUpdate*(sales: Sales): ?OnExpiryUpdate = + sales.context.onExpiryUpdate -proc new*(_: type Sales, - market: Market, - clock: Clock, - repo: RepoStore): Sales = +proc new*(_: type Sales, market: Market, clock: Clock, repo: RepoStore): Sales = Sales.new(market, clock, repo, 0) -proc new*(_: type Sales, - market: Market, - clock: Clock, - repo: RepoStore, - simulateProofFailures: int): Sales = - +proc new*( + _: type Sales, + market: Market, + clock: Clock, + repo: RepoStore, + simulateProofFailures: int, +): Sales = let reservations = Reservations.new(repo) Sales( context: SalesContext( @@ -97,10 +99,10 @@ proc new*(_: type Sales, clock: clock, reservations: reservations, slotQueue: SlotQueue.new(), - simulateProofFailures: simulateProofFailures + simulateProofFailures: simulateProofFailures, ), trackedFutures: TrackedFutures.new(), - subscriptions: @[] + subscriptions: @[], ) proc remove(sales: Sales, agent: SalesAgent) {.async.} = @@ -108,20 +110,21 @@ proc remove(sales: Sales, agent: SalesAgent) {.async.} = if sales.running: sales.agents.keepItIf(it != agent) -proc cleanUp(sales: Sales, - agent: SalesAgent, - returnBytes: bool, - reprocessSlot: bool, - processing: Future[void]) {.async.} = - +proc cleanUp( + sales: Sales, + agent: SalesAgent, + returnBytes: bool, + reprocessSlot: bool, + processing: Future[void], +) {.async.} = let data = agent.data logScope: topics = "sales cleanUp" requestId = data.requestId slotIndex = data.slotIndex - reservationId = data.reservation.?id |? ReservationId.default - availabilityId = data.reservation.?availabilityId |? AvailabilityId.default + reservationId = data.reservation .? id |? ReservationId.default + availabilityId = data.reservation .? availabilityId |? AvailabilityId.default trace "cleaning up sales agent" @@ -129,36 +132,37 @@ proc cleanUp(sales: Sales, # that the cleanUp was called before the sales process really started, so # there are not really any bytes to be returned if returnBytes and request =? data.request and reservation =? data.reservation: - if returnErr =? (await sales.context.reservations.returnBytesToAvailability( - reservation.availabilityId, - reservation.id, - request.ask.slotSize - )).errorOption: - error "failure returning bytes", - error = returnErr.msg, - bytes = request.ask.slotSize + if returnErr =? ( + await sales.context.reservations.returnBytesToAvailability( + reservation.availabilityId, reservation.id, request.ask.slotSize + ) + ).errorOption: + error "failure returning bytes", + error = returnErr.msg, bytes = request.ask.slotSize # delete reservation and return reservation bytes back to the availability if reservation =? data.reservation and - deleteErr =? (await sales.context.reservations.deleteReservation( - reservation.id, - reservation.availabilityId - )).errorOption: - error "failure deleting reservation", error = deleteErr.msg + deleteErr =? ( + await sales.context.reservations.deleteReservation( + reservation.id, reservation.availabilityId + ) + ).errorOption: + error "failure deleting reservation", error = deleteErr.msg # Re-add items back into the queue to prevent small availabilities from # draining the queue. Seen items will be ordered last. if reprocessSlot and request =? data.request: let queue = sales.context.slotQueue - var seenItem = SlotQueueItem.init(data.requestId, - data.slotIndex.truncate(uint16), - data.ask, - request.expiry, - seen = true) + var seenItem = SlotQueueItem.init( + data.requestId, + data.slotIndex.truncate(uint16), + data.ask, + request.expiry, + seen = true, + ) trace "pushing ignored item to queue, marked as seen" if err =? queue.push(seenItem).errorOption: - error "failed to readd slot to queue", - errorType = $(type err), error = err.msg + error "failed to readd slot to queue", errorType = $(type err), error = err.msg await sales.remove(agent) @@ -167,11 +171,8 @@ proc cleanUp(sales: Sales, processing.complete() proc filled( - sales: Sales, - request: StorageRequest, - slotIndex: UInt256, - processing: Future[void]) = - + sales: Sales, request: StorageRequest, slotIndex: UInt256, processing: Future[void] +) = if onSale =? sales.context.onSale: onSale(request, slotIndex) @@ -180,17 +181,13 @@ proc filled( processing.complete() proc processSlot(sales: Sales, item: SlotQueueItem, done: Future[void]) = - debug "Processing slot from queue", requestId = item.requestId, - slot = item.slotIndex + debug "Processing slot from queue", requestId = item.requestId, slot = item.slotIndex let agent = newSalesAgent( - sales.context, - item.requestId, - item.slotIndex.u256, - none StorageRequest + sales.context, item.requestId, item.slotIndex.u256, none StorageRequest ) - agent.onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = + agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = await sales.cleanUp(agent, returnBytes, reprocessSlot, done) agent.onFilled = some proc(request: StorageRequest, slotIndex: UInt256) = @@ -204,10 +201,12 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} without reservs =? await reservations.all(Reservation): return - let unused = reservs.filter(r => ( - let slotId = slotId(r.requestId, r.slotIndex) - not activeSlots.any(slot => slot.id == slotId) - )) + let unused = reservs.filter( + r => ( + let slotId = slotId(r.requestId, r.slotIndex) + not activeSlots.any(slot => slot.id == slotId) + ) + ) if unused.len == 0: return @@ -215,14 +214,13 @@ proc deleteInactiveReservations(sales: Sales, activeSlots: seq[Slot]) {.async.} info "Found unused reservations for deletion", unused = unused.len for reservation in unused: - logScope: reservationId = reservation.id availabilityId = reservation.availabilityId - if err =? (await reservations.deleteReservation( - reservation.id, reservation.availabilityId - )).errorOption: + if err =? ( + await reservations.deleteReservation(reservation.id, reservation.availabilityId) + ).errorOption: error "Failed to delete unused reservation", error = err.msg else: trace "Deleted unused reservation" @@ -252,11 +250,8 @@ proc load*(sales: Sales) {.async.} = await sales.deleteInactiveReservations(activeSlots) for slot in activeSlots: - let agent = newSalesAgent( - sales.context, - slot.request.id, - slot.slotIndex, - some slot.request) + let agent = + newSalesAgent(sales.context, slot.request.id, slot.slotIndex, some slot.request) agent.onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = # since workers are not being dispatched, this future has not been created @@ -282,11 +277,9 @@ proc onAvailabilityAdded(sales: Sales, availability: Availability) {.async.} = trace "unpausing queue after new availability added" queue.unpause() -proc onStorageRequested(sales: Sales, - requestId: RequestId, - ask: StorageAsk, - expiry: UInt256) = - +proc onStorageRequested( + sales: Sales, requestId: RequestId, ask: StorageAsk, expiry: UInt256 +) = logScope: topics = "marketplace sales onStorageRequested" requestId @@ -314,10 +307,7 @@ proc onStorageRequested(sales: Sales, else: warn "Error adding request to SlotQueue", error = err.msg -proc onSlotFreed(sales: Sales, - requestId: RequestId, - slotIndex: UInt256) = - +proc onSlotFreed(sales: Sales, requestId: RequestId, slotIndex: UInt256) = logScope: topics = "marketplace sales onSlotFreed" requestId @@ -331,8 +321,7 @@ proc onSlotFreed(sales: Sales, let queue = context.slotQueue # first attempt to populate request using existing slot metadata in queue - without var found =? queue.populateItem(requestId, - slotIndex.truncate(uint16)): + without var found =? queue.populateItem(requestId, slotIndex.truncate(uint16)): trace "no existing request metadata, getting request info from contract" # if there's no existing slot for that request, retrieve the request # from the contract. @@ -359,9 +348,7 @@ proc subscribeRequested(sales: Sales) {.async.} = let context = sales.context let market = context.market - proc onStorageRequested(requestId: RequestId, - ask: StorageAsk, - expiry: UInt256) = + proc onStorageRequested(requestId: RequestId, ask: StorageAsk, expiry: UInt256) = sales.onStorageRequested(requestId, ask, expiry) try: @@ -485,10 +472,9 @@ proc startSlotQueue(sales: Sales) = let slotQueue = sales.context.slotQueue let reservations = sales.context.reservations - slotQueue.onProcessSlot = - proc(item: SlotQueueItem, done: Future[void]) {.async.} = - trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex - sales.processSlot(item, done) + slotQueue.onProcessSlot = proc(item: SlotQueueItem, done: Future[void]) {.async.} = + trace "processing slot queue item", reqId = item.requestId, slotIdx = item.slotIndex + sales.processSlot(item, done) slotQueue.start() diff --git a/codex/sales/reservations.nim b/codex/sales/reservations.nim index 027bda95a..6325a3b26 100644 --- a/codex/sales/reservations.nim +++ b/codex/sales/reservations.nim @@ -26,7 +26,8 @@ ## +----------------------------------------+ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/sequtils import std/sugar @@ -54,7 +55,6 @@ export logutils logScope: topics = "sales reservations" - type AvailabilityId* = distinct array[32, byte] ReservationId* = distinct array[32, byte] @@ -65,25 +65,32 @@ type totalSize* {.serialize.}: UInt256 freeSize* {.serialize.}: UInt256 duration* {.serialize.}: UInt256 - minPrice* {.serialize.}: UInt256 # minimal price paid for the whole hosted slot for the request's duration + minPrice* {.serialize.}: UInt256 + # minimal price paid for the whole hosted slot for the request's duration maxCollateral* {.serialize.}: UInt256 + Reservation* = ref object id* {.serialize.}: ReservationId availabilityId* {.serialize.}: AvailabilityId size* {.serialize.}: UInt256 requestId* {.serialize.}: RequestId slotIndex* {.serialize.}: UInt256 + Reservations* = ref object of RootObj - availabilityLock: AsyncLock # Lock for protecting assertions of availability's sizes when searching for matching availability + availabilityLock: AsyncLock + # Lock for protecting assertions of availability's sizes when searching for matching availability repo: RepoStore onAvailabilityAdded: ?OnAvailabilityAdded + GetNext* = proc(): Future[?seq[byte]] {.upraises: [], gcsafe, closure.} IterDispose* = proc(): Future[?!void] {.gcsafe, closure.} - OnAvailabilityAdded* = proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} + OnAvailabilityAdded* = + proc(availability: Availability): Future[void] {.upraises: [], gcsafe.} StorableIter* = ref object finished*: bool next*: GetNext dispose*: IterDispose + ReservationsError* = object of CodexError ReserveFailedError* = object of ReservationsError ReleaseFailedError* = object of ReservationsError @@ -109,35 +116,44 @@ template withLock(lock, body) = if lock.locked: lock.release() - -proc new*(T: type Reservations, - repo: RepoStore): Reservations = - - T(availabilityLock: newAsyncLock(),repo: repo) +proc new*(T: type Reservations, repo: RepoStore): Reservations = + T(availabilityLock: newAsyncLock(), repo: repo) proc init*( - _: type Availability, - totalSize: UInt256, - freeSize: UInt256, - duration: UInt256, - minPrice: UInt256, - maxCollateral: UInt256): Availability = - + _: type Availability, + totalSize: UInt256, + freeSize: UInt256, + duration: UInt256, + minPrice: UInt256, + maxCollateral: UInt256, +): Availability = var id: array[32, byte] doAssert randomBytes(id) == 32 - Availability(id: AvailabilityId(id), totalSize:totalSize, freeSize: freeSize, duration: duration, minPrice: minPrice, maxCollateral: maxCollateral) + Availability( + id: AvailabilityId(id), + totalSize: totalSize, + freeSize: freeSize, + duration: duration, + minPrice: minPrice, + maxCollateral: maxCollateral, + ) proc init*( - _: type Reservation, - availabilityId: AvailabilityId, - size: UInt256, - requestId: RequestId, - slotIndex: UInt256 + _: type Reservation, + availabilityId: AvailabilityId, + size: UInt256, + requestId: RequestId, + slotIndex: UInt256, ): Reservation = - var id: array[32, byte] doAssert randomBytes(id) == 32 - Reservation(id: ReservationId(id), availabilityId: availabilityId, size: size, requestId: requestId, slotIndex: slotIndex) + Reservation( + id: ReservationId(id), + availabilityId: availabilityId, + size: size, + requestId: requestId, + slotIndex: slotIndex, + ) func toArray(id: SomeStorableId): array[32, byte] = array[32, byte](id) @@ -146,23 +162,26 @@ proc `==`*(x, y: AvailabilityId): bool {.borrow.} proc `==`*(x, y: ReservationId): bool {.borrow.} proc `==`*(x, y: Reservation): bool = x.id == y.id + proc `==`*(x, y: Availability): bool = x.id == y.id -proc `$`*(id: SomeStorableId): string = id.toArray.toHex +proc `$`*(id: SomeStorableId): string = + id.toArray.toHex proc toErr[E1: ref CatchableError, E2: ReservationsError]( - e1: E1, - _: type E2, - msg: string = e1.msg): ref E2 = - + e1: E1, _: type E2, msg: string = e1.msg +): ref E2 = return newException(E2, msg, e1) -logutils.formatIt(LogFormat.textLines, SomeStorableId): it.short0xHexLog -logutils.formatIt(LogFormat.json, SomeStorableId): it.to0xHexLog +logutils.formatIt(LogFormat.textLines, SomeStorableId): + it.short0xHexLog +logutils.formatIt(LogFormat.json, SomeStorableId): + it.to0xHexLog -proc `onAvailabilityAdded=`*(self: Reservations, - onAvailabilityAdded: OnAvailabilityAdded) = +proc `onAvailabilityAdded=`*( + self: Reservations, onAvailabilityAdded: OnAvailabilityAdded +) = self.onAvailabilityAdded = some onAvailabilityAdded func key*(id: AvailabilityId): ?!Key = @@ -179,24 +198,20 @@ func key*(availability: Availability): ?!Key = func key*(reservation: Reservation): ?!Key = return key(reservation.id, reservation.availabilityId) -func available*(self: Reservations): uint = self.repo.available.uint +func available*(self: Reservations): uint = + self.repo.available.uint func hasAvailable*(self: Reservations, bytes: uint): bool = self.repo.available(bytes.NBytes) -proc exists*( - self: Reservations, - key: Key): Future[bool] {.async.} = - +proc exists*(self: Reservations, key: Key): Future[bool] {.async.} = let exists = await self.repo.metaDs.ds.contains(key) return exists -proc getImpl( - self: Reservations, - key: Key): Future[?!seq[byte]] {.async.} = - +proc getImpl(self: Reservations, key: Key): Future[?!seq[byte]] {.async.} = if not await self.exists(key): - let err = newException(NotExistsError, "object with key " & $key & " does not exist") + let err = + newException(NotExistsError, "object with key " & $key & " does not exist") return failure(err) without serialized =? await self.repo.metaDs.ds.get(key), error: @@ -205,10 +220,8 @@ proc getImpl( return success serialized proc get*( - self: Reservations, - key: Key, - T: type SomeStorableObject): Future[?!T] {.async.} = - + self: Reservations, key: Key, T: type SomeStorableObject +): Future[?!T] {.async.} = without serialized =? await self.getImpl(key), error: return failure(error) @@ -217,27 +230,20 @@ proc get*( return success obj -proc updateImpl( - self: Reservations, - obj: SomeStorableObject): Future[?!void] {.async.} = - +proc updateImpl(self: Reservations, obj: SomeStorableObject): Future[?!void] {.async.} = trace "updating " & $(obj.type), id = obj.id without key =? obj.key, error: return failure(error) - if err =? (await self.repo.metaDs.ds.put( - key, - @(obj.toJson.toBytes) - )).errorOption: + if err =? (await self.repo.metaDs.ds.put(key, @(obj.toJson.toBytes))).errorOption: return failure(err.toErr(UpdateFailedError)) return success() proc updateAvailability( - self: Reservations, - obj: Availability): Future[?!void] {.async.} = - + self: Reservations, obj: Availability +): Future[?!void] {.async.} = logScope: availabilityId = obj.id @@ -269,11 +275,18 @@ proc updateAvailability( if oldAvailability.totalSize != obj.totalSize: trace "totalSize changed, updating repo reservation" if oldAvailability.totalSize < obj.totalSize: # storage added - if reserveErr =? (await self.repo.reserve((obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes)).errorOption: + if reserveErr =? ( + await self.repo.reserve( + (obj.totalSize - oldAvailability.totalSize).truncate(uint).NBytes + ) + ).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) - elif oldAvailability.totalSize > obj.totalSize: # storage removed - if reserveErr =? (await self.repo.release((oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes)).errorOption: + if reserveErr =? ( + await self.repo.release( + (oldAvailability.totalSize - obj.totalSize).truncate(uint).NBytes + ) + ).errorOption: return failure(reserveErr.toErr(ReleaseFailedError)) let res = await self.updateImpl(obj) @@ -296,21 +309,14 @@ proc updateAvailability( return res -proc update*( - self: Reservations, - obj: Reservation): Future[?!void] {.async.} = +proc update*(self: Reservations, obj: Reservation): Future[?!void] {.async.} = return await self.updateImpl(obj) -proc update*( - self: Reservations, - obj: Availability): Future[?!void] {.async.} = +proc update*(self: Reservations, obj: Availability): Future[?!void] {.async.} = withLock(self.availabilityLock): return await self.updateAvailability(obj) -proc delete( - self: Reservations, - key: Key): Future[?!void] {.async.} = - +proc delete(self: Reservations, key: Key): Future[?!void] {.async.} = trace "deleting object", key if not await self.exists(key): @@ -322,10 +328,8 @@ proc delete( return success() proc deleteReservation*( - self: Reservations, - reservationId: ReservationId, - availabilityId: AvailabilityId): Future[?!void] {.async.} = - + self: Reservations, reservationId: ReservationId, availabilityId: AvailabilityId +): Future[?!void] {.async.} = logScope: reservationId availabilityId @@ -365,24 +369,21 @@ proc deleteReservation*( # To delete, must not have any active sales. proc createAvailability*( - self: Reservations, - size: UInt256, - duration: UInt256, - minPrice: UInt256, - maxCollateral: UInt256): Future[?!Availability] {.async.} = - + self: Reservations, + size: UInt256, + duration: UInt256, + minPrice: UInt256, + maxCollateral: UInt256, +): Future[?!Availability] {.async.} = trace "creating availability", size, duration, minPrice, maxCollateral - let availability = Availability.init( - size, size, duration, minPrice, maxCollateral - ) + let availability = Availability.init(size, size, duration, minPrice, maxCollateral) let bytes = availability.freeSize.truncate(uint) if reserveErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) if updateErr =? (await self.update(availability)).errorOption: - # rollback the reserve trace "rolling back reserve" if rollbackErr =? (await self.repo.release(bytes.NBytes)).errorOption: @@ -394,13 +395,12 @@ proc createAvailability*( return success(availability) method createReservation*( - self: Reservations, - availabilityId: AvailabilityId, - slotSize: UInt256, - requestId: RequestId, - slotIndex: UInt256 + self: Reservations, + availabilityId: AvailabilityId, + slotSize: UInt256, + requestId: RequestId, + slotIndex: UInt256, ): Future[?!Reservation] {.async, base.} = - withLock(self.availabilityLock): without availabilityKey =? availabilityId.key, error: return failure(error) @@ -412,7 +412,8 @@ method createReservation*( if availability.freeSize < slotSize: let error = newException( BytesOutOfBoundsError, - "trying to reserve an amount of bytes that is greater than the total size of the Availability") + "trying to reserve an amount of bytes that is greater than the total size of the Availability", + ) return failure(error) trace "Creating reservation", availabilityId, slotSize, requestId, slotIndex @@ -446,11 +447,11 @@ method createReservation*( return success(reservation) proc returnBytesToAvailability*( - self: Reservations, - availabilityId: AvailabilityId, - reservationId: ReservationId, - bytes: UInt256): Future[?!void] {.async.} = - + self: Reservations, + availabilityId: AvailabilityId, + reservationId: ReservationId, + bytes: UInt256, +): Future[?!void] {.async.} = logScope: reservationId availabilityId @@ -467,14 +468,17 @@ proc returnBytesToAvailability*( let bytesToBeReturned = bytes - reservation.size if bytesToBeReturned == 0: - trace "No bytes are returned", requestSizeBytes = bytes, returningBytes = bytesToBeReturned + trace "No bytes are returned", + requestSizeBytes = bytes, returningBytes = bytesToBeReturned return success() - trace "Returning bytes", requestSizeBytes = bytes, returningBytes = bytesToBeReturned + trace "Returning bytes", + requestSizeBytes = bytes, returningBytes = bytesToBeReturned # First lets see if we can re-reserve the bytes, if the Repo's quota # is depleted then we will fail-fast as there is nothing to be done atm. - if reserveErr =? (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if reserveErr =? + (await self.repo.reserve(bytesToBeReturned.truncate(uint).NBytes)).errorOption: return failure(reserveErr.toErr(ReserveFailedError)) without availabilityKey =? availabilityId.key, error: @@ -487,9 +491,9 @@ proc returnBytesToAvailability*( # Update availability with returned size if updateErr =? (await self.updateAvailability(availability)).errorOption: - trace "Rolling back returning bytes" - if rollbackErr =? (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: + if rollbackErr =? + (await self.repo.release(bytesToBeReturned.truncate(uint).NBytes)).errorOption: rollbackErr.parent = updateErr return failure(rollbackErr) @@ -498,11 +502,11 @@ proc returnBytesToAvailability*( return success() proc release*( - self: Reservations, - reservationId: ReservationId, - availabilityId: AvailabilityId, - bytes: uint): Future[?!void] {.async.} = - + self: Reservations, + reservationId: ReservationId, + availabilityId: AvailabilityId, + bytes: uint, +): Future[?!void] {.async.} = logScope: topics = "release" bytes @@ -520,7 +524,8 @@ proc release*( if reservation.size < bytes.u256: let error = newException( BytesOutOfBoundsError, - "trying to release an amount of bytes that is greater than the total size of the Reservation") + "trying to release an amount of bytes that is greater than the total size of the Reservation", + ) return failure(error) if releaseErr =? (await self.repo.release(bytes.NBytes)).errorOption: @@ -530,7 +535,6 @@ proc release*( # persist partially used Reservation with updated size if err =? (await self.update(reservation)).errorOption: - # rollback release if an update error encountered trace "rolling back release" if rollbackErr =? (await self.repo.reserve(bytes.NBytes)).errorOption: @@ -545,11 +549,8 @@ iterator items(self: StorableIter): Future[?seq[byte]] = yield self.next() proc storables( - self: Reservations, - T: type SomeStorableObject, - queryKey: Key = ReservationsKey + self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey ): Future[?!StorableIter] {.async.} = - var iter = StorableIter() let query = Query.init(queryKey) when T is Availability: @@ -570,12 +571,8 @@ proc storables( proc next(): Future[?seq[byte]] {.async.} = await idleAsync() iter.finished = results.finished - if not results.finished and - res =? (await results.next()) and - res.data.len > 0 and - key =? res.key and - key.namespaces.len == defaultKey.namespaces.len: - + if not results.finished and res =? (await results.next()) and res.data.len > 0 and + key =? res.key and key.namespaces.len == defaultKey.namespaces.len: return some res.data return none seq[byte] @@ -588,11 +585,8 @@ proc storables( return success iter proc allImpl( - self: Reservations, - T: type SomeStorableObject, - queryKey: Key = ReservationsKey + self: Reservations, T: type SomeStorableObject, queryKey: Key = ReservationsKey ): Future[?!seq[T]] {.async.} = - var ret: seq[T] = @[] without storables =? (await self.storables(T, queryKey)), error: @@ -604,24 +598,18 @@ proc allImpl( without obj =? T.fromJson(bytes), error: error "json deserialization error", - json = string.fromBytes(bytes), - error = error.msg + json = string.fromBytes(bytes), error = error.msg continue ret.add obj return success(ret) -proc all*( - self: Reservations, - T: type SomeStorableObject -): Future[?!seq[T]] {.async.} = +proc all*(self: Reservations, T: type SomeStorableObject): Future[?!seq[T]] {.async.} = return await self.allImpl(T) proc all*( - self: Reservations, - T: type SomeStorableObject, - availabilityId: AvailabilityId + self: Reservations, T: type SomeStorableObject, availabilityId: AvailabilityId ): Future[?!seq[T]] {.async.} = without key =? (ReservationsKey / $availabilityId): return failure("no key") @@ -629,29 +617,26 @@ proc all*( return await self.allImpl(T, key) proc findAvailability*( - self: Reservations, - size, duration, minPrice, collateral: UInt256 + self: Reservations, size, duration, minPrice, collateral: UInt256 ): Future[?Availability] {.async.} = - without storables =? (await self.storables(Availability)), e: error "failed to get all storables", error = e.msg return none Availability for item in storables.items: - if bytes =? (await item) and - availability =? Availability.fromJson(bytes): - - if size <= availability.freeSize and - duration <= availability.duration and - collateral <= availability.maxCollateral and - minPrice >= availability.minPrice: - + if bytes =? (await item) and availability =? Availability.fromJson(bytes): + if size <= availability.freeSize and duration <= availability.duration and + collateral <= availability.maxCollateral and minPrice >= availability.minPrice: trace "availability matched", id = availability.id, - size, availFreeSize = availability.freeSize, - duration, availDuration = availability.duration, - minPrice, availMinPrice = availability.minPrice, - collateral, availMaxCollateral = availability.maxCollateral + size, + availFreeSize = availability.freeSize, + duration, + availDuration = availability.duration, + minPrice, + availMinPrice = availability.minPrice, + collateral, + availMaxCollateral = availability.maxCollateral # TODO: As soon as we're on ARC-ORC, we can use destructors # to automatically dispose our iterators when they fall out of scope. @@ -663,7 +648,11 @@ proc findAvailability*( trace "availability did not match", id = availability.id, - size, availFreeSize = availability.freeSize, - duration, availDuration = availability.duration, - minPrice, availMinPrice = availability.minPrice, - collateral, availMaxCollateral = availability.maxCollateral + size, + availFreeSize = availability.freeSize, + duration, + availDuration = availability.duration, + minPrice, + availMinPrice = availability.minPrice, + collateral, + availMaxCollateral = availability.maxCollateral diff --git a/codex/sales/salesagent.nim b/codex/sales/salesagent.nim index e52e2fe0e..8b8c44ea6 100644 --- a/codex/sales/salesagent.nim +++ b/codex/sales/salesagent.nim @@ -25,27 +25,26 @@ type onCleanUp*: OnCleanUp onFilled*: ?OnFilled - OnCleanUp* = proc (returnBytes = false, reprocessSlot = false): Future[void] {.gcsafe, upraises: [].} - OnFilled* = proc(request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} + OnCleanUp* = proc(returnBytes = false, reprocessSlot = false): Future[void] {. + gcsafe, upraises: [] + .} + OnFilled* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} SalesAgentError = object of CodexError AllSlotsFilledError* = object of SalesAgentError func `==`*(a, b: SalesAgent): bool = - a.data.requestId == b.data.requestId and - a.data.slotIndex == b.data.slotIndex - -proc newSalesAgent*(context: SalesContext, - requestId: RequestId, - slotIndex: UInt256, - request: ?StorageRequest): SalesAgent = + a.data.requestId == b.data.requestId and a.data.slotIndex == b.data.slotIndex + +proc newSalesAgent*( + context: SalesContext, + requestId: RequestId, + slotIndex: UInt256, + request: ?StorageRequest, +): SalesAgent = var agent = SalesAgent.new() agent.context = context - agent.data = SalesData( - requestId: requestId, - slotIndex: slotIndex, - request: request) + agent.data = SalesData(requestId: requestId, slotIndex: slotIndex, request: request) return agent proc retrieveRequest*(agent: SalesAgent) {.async.} = @@ -62,6 +61,7 @@ proc retrieveRequestState*(agent: SalesAgent): Future[?RequestState] {.async.} = func state*(agent: SalesAgent): ?string = proc description(state: State): string = $state + agent.query(description) proc subscribeCancellation(agent: SalesAgent) {.async.} = @@ -77,7 +77,7 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = while true: let deadline = max(clock.now, expiry) + 1 - trace "Waiting for request to be cancelled", now=clock.now, expiry=deadline + trace "Waiting for request to be cancelled", now = clock.now, expiry = deadline await clock.waitUntil(deadline) without state =? await agent.retrieveRequestState(): @@ -93,27 +93,29 @@ proc subscribeCancellation(agent: SalesAgent) {.async.} = of RequestState.Started, RequestState.Finished, RequestState.Failed: break - debug "The request is not yet canceled, even though it should be. Waiting for some more time.", currentState = state, now=clock.now + debug "The request is not yet canceled, even though it should be. Waiting for some more time.", + currentState = state, now = clock.now data.cancelled = onCancelled() -method onFulfilled*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = - if agent.data.requestId == requestId and - not agent.data.cancelled.isNil: +method onFulfilled*( + agent: SalesAgent, requestId: RequestId +) {.base, gcsafe, upraises: [].} = + if agent.data.requestId == requestId and not agent.data.cancelled.isNil: agent.data.cancelled.cancelSoon() -method onFailed*(agent: SalesAgent, requestId: RequestId) {.base, gcsafe, upraises: [].} = +method onFailed*( + agent: SalesAgent, requestId: RequestId +) {.base, gcsafe, upraises: [].} = without request =? agent.data.request: return if agent.data.requestId == requestId: agent.schedule(failedEvent(request)) -method onSlotFilled*(agent: SalesAgent, - requestId: RequestId, - slotIndex: UInt256) {.base, gcsafe, upraises: [].} = - - if agent.data.requestId == requestId and - agent.data.slotIndex == slotIndex: +method onSlotFilled*( + agent: SalesAgent, requestId: RequestId, slotIndex: UInt256 +) {.base, gcsafe, upraises: [].} = + if agent.data.requestId == requestId and agent.data.slotIndex == slotIndex: agent.schedule(slotFilledEvent(requestId, slotIndex)) proc subscribe*(agent: SalesAgent) {.async.} = diff --git a/codex/sales/salescontext.nim b/codex/sales/salescontext.nim index 199aa5fb6..bb0b5dc9e 100644 --- a/codex/sales/salescontext.nim +++ b/codex/sales/salescontext.nim @@ -24,12 +24,14 @@ type simulateProofFailures*: int BlocksCb* = proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, raises: [].} - OnStore* = proc(request: StorageRequest, - slot: UInt256, - blocksCb: BlocksCb): Future[?!void] {.gcsafe, upraises: [].} - OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.gcsafe, upraises: [].} - OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.gcsafe, upraises: [].} - OnClear* = proc(request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} - OnSale* = proc(request: StorageRequest, - slotIndex: UInt256) {.gcsafe, upraises: [].} + OnStore* = proc( + request: StorageRequest, slot: UInt256, blocksCb: BlocksCb + ): Future[?!void] {.gcsafe, upraises: [].} + OnProve* = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {. + gcsafe, upraises: [] + .} + OnExpiryUpdate* = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {. + gcsafe, upraises: [] + .} + OnClear* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} + OnSale* = proc(request: StorageRequest, slotIndex: UInt256) {.gcsafe, upraises: [].} diff --git a/codex/sales/salesdata.nim b/codex/sales/salesdata.nim index 7fd561492..995c7a4b8 100644 --- a/codex/sales/salesdata.nim +++ b/codex/sales/salesdata.nim @@ -3,11 +3,10 @@ import ../contracts/requests import ../market import ./reservations -type - SalesData* = ref object - requestId*: RequestId - ask*: StorageAsk - request*: ?StorageRequest - slotIndex*: UInt256 - cancelled*: Future[void] - reservation*: ?Reservation +type SalesData* = ref object + requestId*: RequestId + ask*: StorageAsk + request*: ?StorageRequest + slotIndex*: UInt256 + cancelled*: Future[void] + reservation*: ?Reservation diff --git a/codex/sales/slotqueue.nim b/codex/sales/slotqueue.nim index f5685e34a..80ca08271 100644 --- a/codex/sales/slotqueue.nim +++ b/codex/sales/slotqueue.nim @@ -17,7 +17,7 @@ logScope: type OnProcessSlot* = - proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises:[].} + proc(item: SlotQueueItem, done: Future[void]): Future[void] {.gcsafe, upraises: [].} # Non-ref obj copies value when assigned, preventing accidental modification # of values which could cause an incorrect order (eg @@ -39,7 +39,7 @@ type # don't need to -1 to prevent overflow when adding 1 (to always allow push) # because AsyncHeapQueue size is of type `int`, which is larger than `uint16` - SlotQueueSize = range[1'u16..uint16.high] + SlotQueueSize = range[1'u16 .. uint16.high] SlotQueue* = ref object maxWorkers: int @@ -69,10 +69,12 @@ const DefaultMaxWorkers = 3 const DefaultMaxSize = 128'u16 proc profitability(item: SlotQueueItem): UInt256 = - StorageAsk(collateral: item.collateral, - duration: item.duration, - reward: item.reward, - slotSize: item.slotSize).pricePerSlot + StorageAsk( + collateral: item.collateral, + duration: item.duration, + reward: item.reward, + slotSize: item.slotSize, + ).pricePerSlot proc `<`*(a, b: SlotQueueItem): bool = # for A to have a higher priority than B (in a min queue), A must be less than @@ -102,13 +104,13 @@ proc `<`*(a, b: SlotQueueItem): bool = return scoreA > scoreB proc `==`*(a, b: SlotQueueItem): bool = - a.requestId == b.requestId and - a.slotIndex == b.slotIndex - -proc new*(_: type SlotQueue, - maxWorkers = DefaultMaxWorkers, - maxSize: SlotQueueSize = DefaultMaxSize): SlotQueue = + a.requestId == b.requestId and a.slotIndex == b.slotIndex +proc new*( + _: type SlotQueue, + maxWorkers = DefaultMaxWorkers, + maxSize: SlotQueueSize = DefaultMaxSize, +): SlotQueue = if maxWorkers <= 0: raise newException(ValueError, "maxWorkers must be positive") if maxWorkers.uint16 > maxSize: @@ -121,23 +123,22 @@ proc new*(_: type SlotQueue, queue: newAsyncHeapQueue[SlotQueueItem](maxSize.int + 1), running: false, trackedFutures: TrackedFutures.new(), - unpaused: newAsyncEvent() + unpaused: newAsyncEvent(), ) # avoid instantiating `workers` in constructor to avoid side effects in # `newAsyncQueue` procedure proc init(_: type SlotQueueWorker): SlotQueueWorker = - SlotQueueWorker( - doneProcessing: newFuture[void]("slotqueue.worker.processing") - ) - -proc init*(_: type SlotQueueItem, - requestId: RequestId, - slotIndex: uint16, - ask: StorageAsk, - expiry: UInt256, - seen = false): SlotQueueItem = - + SlotQueueWorker(doneProcessing: newFuture[void]("slotqueue.worker.processing")) + +proc init*( + _: type SlotQueueItem, + requestId: RequestId, + slotIndex: uint16, + ask: StorageAsk, + expiry: UInt256, + seen = false, +): SlotQueueItem = SlotQueueItem( requestId: requestId, slotIndex: slotIndex, @@ -146,28 +147,22 @@ proc init*(_: type SlotQueueItem, reward: ask.reward, collateral: ask.collateral, expiry: expiry, - seen: seen + seen: seen, ) -proc init*(_: type SlotQueueItem, - request: StorageRequest, - slotIndex: uint16): SlotQueueItem = - - SlotQueueItem.init(request.id, - slotIndex, - request.ask, - request.expiry) - -proc init*(_: type SlotQueueItem, - requestId: RequestId, - ask: StorageAsk, - expiry: UInt256): seq[SlotQueueItem] = +proc init*( + _: type SlotQueueItem, request: StorageRequest, slotIndex: uint16 +): SlotQueueItem = + SlotQueueItem.init(request.id, slotIndex, request.ask, request.expiry) +proc init*( + _: type SlotQueueItem, requestId: RequestId, ask: StorageAsk, expiry: UInt256 +): seq[SlotQueueItem] = if not ask.slots.inRange: raise newException(SlotsOutOfRangeError, "Too many slots") var i = 0'u16 - proc initSlotQueueItem: SlotQueueItem = + proc initSlotQueueItem(): SlotQueueItem = let item = SlotQueueItem.init(requestId, i, ask, expiry) inc i return item @@ -176,37 +171,54 @@ proc init*(_: type SlotQueueItem, Rng.instance.shuffle(items) return items -proc init*(_: type SlotQueueItem, - request: StorageRequest): seq[SlotQueueItem] = - +proc init*(_: type SlotQueueItem, request: StorageRequest): seq[SlotQueueItem] = return SlotQueueItem.init(request.id, request.ask, request.expiry) proc inRange*(val: SomeUnsignedInt): bool = - val.uint16 in SlotQueueSize.low..SlotQueueSize.high + val.uint16 in SlotQueueSize.low .. SlotQueueSize.high + +proc requestId*(self: SlotQueueItem): RequestId = + self.requestId + +proc slotIndex*(self: SlotQueueItem): uint16 = + self.slotIndex -proc requestId*(self: SlotQueueItem): RequestId = self.requestId -proc slotIndex*(self: SlotQueueItem): uint16 = self.slotIndex -proc slotSize*(self: SlotQueueItem): UInt256 = self.slotSize -proc duration*(self: SlotQueueItem): UInt256 = self.duration -proc reward*(self: SlotQueueItem): UInt256 = self.reward -proc collateral*(self: SlotQueueItem): UInt256 = self.collateral -proc seen*(self: SlotQueueItem): bool = self.seen +proc slotSize*(self: SlotQueueItem): UInt256 = + self.slotSize -proc running*(self: SlotQueue): bool = self.running +proc duration*(self: SlotQueueItem): UInt256 = + self.duration -proc len*(self: SlotQueue): int = self.queue.len +proc reward*(self: SlotQueueItem): UInt256 = + self.reward -proc size*(self: SlotQueue): int = self.queue.size - 1 +proc collateral*(self: SlotQueueItem): UInt256 = + self.collateral -proc paused*(self: SlotQueue): bool = not self.unpaused.isSet +proc seen*(self: SlotQueueItem): bool = + self.seen -proc `$`*(self: SlotQueue): string = $self.queue +proc running*(self: SlotQueue): bool = + self.running + +proc len*(self: SlotQueue): int = + self.queue.len + +proc size*(self: SlotQueue): int = + self.queue.size - 1 + +proc paused*(self: SlotQueue): bool = + not self.unpaused.isSet + +proc `$`*(self: SlotQueue): string = + $self.queue proc `onProcessSlot=`*(self: SlotQueue, onProcessSlot: OnProcessSlot) = self.onProcessSlot = some onProcessSlot proc activeWorkers*(self: SlotQueue): int = - if not self.running: return 0 + if not self.running: + return 0 # active = capacity - available self.maxWorkers - self.workers.len @@ -222,10 +234,9 @@ proc unpause*(self: SlotQueue) = # set unpaused flag to true -- unblocks coroutines waiting on unpaused.wait() self.unpaused.fire() -proc populateItem*(self: SlotQueue, - requestId: RequestId, - slotIndex: uint16): ?SlotQueueItem = - +proc populateItem*( + self: SlotQueue, requestId: RequestId, slotIndex: uint16 +): ?SlotQueueItem = trace "populate item, items in queue", len = self.queue.len for item in self.queue.items: trace "populate item search", itemRequestId = item.requestId, requestId @@ -237,12 +248,11 @@ proc populateItem*(self: SlotQueue, duration: item.duration, reward: item.reward, collateral: item.collateral, - expiry: item.expiry + expiry: item.expiry, ) return none SlotQueueItem proc push*(self: SlotQueue, item: SlotQueueItem): ?!void = - logScope: requestId = item.requestId slotIndex = item.slotIndex @@ -330,9 +340,9 @@ proc addWorker(self: SlotQueue): ?!void = return success() -proc dispatch(self: SlotQueue, - worker: SlotQueueWorker, - item: SlotQueueItem) {.async: (raises: []).} = +proc dispatch( + self: SlotQueue, worker: SlotQueueWorker, item: SlotQueueItem +) {.async: (raises: []).} = logScope: requestId = item.requestId slotIndex = item.slotIndex @@ -349,10 +359,8 @@ proc dispatch(self: SlotQueue, if err =? self.addWorker().errorOption: raise err # catch below - except QueueNotRunningError as e: - info "could not re-add worker to worker queue, queue not running", - error = e.msg + info "could not re-add worker to worker queue, queue not running", error = e.msg except CancelledError: # do not bubble exception up as it is called with `asyncSpawn` which would # convert the exception into a `FutureDefect` @@ -380,7 +388,6 @@ proc clearSeenFlags*(self: SlotQueue) = trace "all 'seen' flags cleared" proc run(self: SlotQueue) {.async: (raises: []).} = - while self.running: try: if self.paused: @@ -389,7 +396,8 @@ proc run(self: SlotQueue) {.async: (raises: []).} = # block until unpaused is true/fired, ie wait for queue to be unpaused await self.unpaused.wait() - let worker = await self.workers.popFirst() # if workers saturated, wait here for new workers + let worker = + await self.workers.popFirst() # if workers saturated, wait here for new workers let item = await self.queue.pop() # if queue empty, wait here for new items logScope: @@ -442,7 +450,7 @@ proc start*(self: SlotQueue) = # Add initial workers to the `AsyncHeapQueue`. Once a worker has completed its # task, a new worker will be pushed to the queue - for i in 0.. 0: info "Proving with failure rate", rate = context.simulateProofFailures - return some State(SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures)) + return some State( + SaleProvingSimulated(failEveryNProofs: context.simulateProofFailures) + ) return some State(SaleProving()) - else: let error = newException(HostMismatchError, "Slot filled by other host") return some State(SaleErrored(error: error)) diff --git a/codex/sales/states/filling.nim b/codex/sales/states/filling.nim index ce2e53f57..a49dbb432 100644 --- a/codex/sales/states/filling.nim +++ b/codex/sales/states/filling.nim @@ -13,11 +13,11 @@ import ./errored logScope: topics = "marketplace sales filling" -type - SaleFilling* = ref object of ErrorHandlingState - proof*: Groth16Proof +type SaleFilling* = ref object of ErrorHandlingState + proof*: Groth16Proof -method `$`*(state: SaleFilling): string = "SaleFilling" +method `$`*(state: SaleFilling): string = + "SaleFilling" method onCancelled*(state: SaleFilling, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -28,7 +28,7 @@ method onFailed*(state: SaleFilling, request: StorageRequest): ?State = method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = let data = SalesAgent(machine).data let market = SalesAgent(machine).context.market - without (fullCollateral =? data.request.?ask.?collateral): + without (fullCollateral =? data.request .? ask .? collateral): raiseAssert "Request not set" logScope: @@ -41,7 +41,8 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = if slotState == SlotState.Repair: # When repairing the node gets "discount" on the collateral that it needs to let repairRewardPercentage = (await market.repairRewardPercentage).u256 - collateral = fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256) + collateral = + fullCollateral - ((fullCollateral * repairRewardPercentage)).div(100.u256) else: collateral = fullCollateral @@ -51,9 +52,9 @@ method run(state: SaleFilling, machine: Machine): Future[?State] {.async.} = except MarketError as e: if e.msg.contains "Slot is not free": debug "Slot is already filled, ignoring slot" - return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) ) + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) else: - return some State( SaleErrored(error: e) ) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the ErrorHandlingState return some State(SaleFilled()) diff --git a/codex/sales/states/finished.nim b/codex/sales/states/finished.nim index 59e9244c1..6fcd0cc0e 100644 --- a/codex/sales/states/finished.nim +++ b/codex/sales/states/finished.nim @@ -10,10 +10,10 @@ import ./failed logScope: topics = "marketplace sales finished" -type - SaleFinished* = ref object of ErrorHandlingState +type SaleFinished* = ref object of ErrorHandlingState -method `$`*(state: SaleFinished): string = "SaleFinished" +method `$`*(state: SaleFinished): string = + "SaleFinished" method onCancelled*(state: SaleFinished, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -28,7 +28,8 @@ method run*(state: SaleFinished, machine: Machine): Future[?State] {.async.} = without request =? data.request: raiseAssert "no sale request" - info "Slot finished and paid out", requestId = data.requestId, slotIndex = data.slotIndex + info "Slot finished and paid out", + requestId = data.requestId, slotIndex = data.slotIndex if onCleanUp =? agent.onCleanUp: await onCleanUp() diff --git a/codex/sales/states/ignored.nim b/codex/sales/states/ignored.nim index 93346fdc4..b915bff50 100644 --- a/codex/sales/states/ignored.nim +++ b/codex/sales/states/ignored.nim @@ -11,16 +11,17 @@ logScope: # Ignored slots could mean there was no availability or that the slot could # not be reserved. -type - SaleIgnored* = ref object of ErrorHandlingState - reprocessSlot*: bool # readd slot to queue with `seen` flag - returnBytes*: bool # return unreleased bytes from Reservation to Availability +type SaleIgnored* = ref object of ErrorHandlingState + reprocessSlot*: bool # readd slot to queue with `seen` flag + returnBytes*: bool # return unreleased bytes from Reservation to Availability -method `$`*(state: SaleIgnored): string = "SaleIgnored" +method `$`*(state: SaleIgnored): string = + "SaleIgnored" method run*(state: SaleIgnored, machine: Machine): Future[?State] {.async.} = let agent = SalesAgent(machine) if onCleanUp =? agent.onCleanUp: - await onCleanUp(reprocessSlot = state.reprocessSlot, - returnBytes = state.returnBytes) + await onCleanUp( + reprocessSlot = state.reprocessSlot, returnBytes = state.returnBytes + ) diff --git a/codex/sales/states/initialproving.nim b/codex/sales/states/initialproving.nim index 4a5b85151..bc9ce6b66 100644 --- a/codex/sales/states/initialproving.nim +++ b/codex/sales/states/initialproving.nim @@ -12,10 +12,10 @@ import ./failed logScope: topics = "marketplace sales initial-proving" -type - SaleInitialProving* = ref object of ErrorHandlingState +type SaleInitialProving* = ref object of ErrorHandlingState -method `$`*(state: SaleInitialProving): string = "SaleInitialProving" +method `$`*(state: SaleInitialProving): string = + "SaleInitialProving" method onCancelled*(state: SaleInitialProving, request: StorageRequest): ?State = return some State(SaleCancelled()) diff --git a/codex/sales/states/payout.nim b/codex/sales/states/payout.nim index 5c8c28590..f8312a4fd 100644 --- a/codex/sales/states/payout.nim +++ b/codex/sales/states/payout.nim @@ -10,10 +10,10 @@ import ./finished logScope: topics = "marketplace sales payout" -type - SalePayout* = ref object of ErrorHandlingState +type SalePayout* = ref object of ErrorHandlingState -method `$`*(state: SalePayout): string = "SalePayout" +method `$`*(state: SalePayout): string = + "SalePayout" method onCancelled*(state: SalePayout, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -29,7 +29,8 @@ method run(state: SalePayout, machine: Machine): Future[?State] {.async.} = raiseAssert "no sale request" let slot = Slot(request: request, slotIndex: data.slotIndex) - debug "Collecting finished slot's reward", requestId = data.requestId, slotIndex = data.slotIndex + debug "Collecting finished slot's reward", + requestId = data.requestId, slotIndex = data.slotIndex await market.freeSlot(slot.id) return some State(SaleFinished()) diff --git a/codex/sales/states/preparing.nim b/codex/sales/states/preparing.nim index 169eb9641..0b34f6404 100644 --- a/codex/sales/states/preparing.nim +++ b/codex/sales/states/preparing.nim @@ -14,15 +14,17 @@ import ./ignored import ./slotreserving import ./errored -declareCounter(codex_reservations_availability_mismatch, "codex reservations availability_mismatch") +declareCounter( + codex_reservations_availability_mismatch, "codex reservations availability_mismatch" +) -type - SalePreparing* = ref object of ErrorHandlingState +type SalePreparing* = ref object of ErrorHandlingState logScope: topics = "marketplace sales preparing" -method `$`*(state: SalePreparing): string = "SalePreparing" +method `$`*(state: SalePreparing): string = + "SalePreparing" method onCancelled*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -30,8 +32,9 @@ method onCancelled*(state: SalePreparing, request: StorageRequest): ?State = method onFailed*(state: SalePreparing, request: StorageRequest): ?State = return some State(SaleFailed()) -method onSlotFilled*(state: SalePreparing, requestId: RequestId, - slotIndex: UInt256): ?State = +method onSlotFilled*( + state: SalePreparing, requestId: RequestId, slotIndex: UInt256 +): ?State = return some State(SaleFilled()) method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = @@ -64,29 +67,27 @@ method run*(state: SalePreparing, machine: Machine): Future[?State] {.async.} = # availability was checked for this slot when it entered the queue, however # check to the ensure that there is still availability as they may have # changed since being added (other slots may have been processed in that time) - without availability =? await reservations.findAvailability( - request.ask.slotSize, - request.ask.duration, - request.ask.pricePerSlot, - request.ask.collateral): + without availability =? + await reservations.findAvailability( + request.ask.slotSize, request.ask.duration, request.ask.pricePerSlot, + request.ask.collateral, + ): debug "No availability found for request, ignoring" return some State(SaleIgnored(reprocessSlot: true)) info "Availability found for request, creating reservation" - without reservation =? await reservations.createReservation( - availability.id, - request.ask.slotSize, - request.id, - data.slotIndex - ), error: + without reservation =? + await reservations.createReservation( + availability.id, request.ask.slotSize, request.id, data.slotIndex + ), error: trace "Creation of reservation failed" # Race condition: # reservations.findAvailability (line 64) is no guarantee. You can never know for certain that the reservation can be created until after you have it. # Should createReservation fail because there's no space, we proceed to SaleIgnored. if error of BytesOutOfBoundsError: - # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it + # Lets monitor how often this happen and if it is often we can make it more inteligent to handle it codex_reservations_availability_mismatch.inc() return some State(SaleIgnored(reprocessSlot: true)) diff --git a/codex/sales/states/proving.nim b/codex/sales/states/proving.nim index 76180ab28..0ee2ed603 100644 --- a/codex/sales/states/proving.nim +++ b/codex/sales/states/proving.nim @@ -22,12 +22,12 @@ type loop: Future[void] method prove*( - state: SaleProving, - slot: Slot, - challenge: ProofChallenge, - onProve: OnProve, - market: Market, - currentPeriod: Period + state: SaleProving, + slot: Slot, + challenge: ProofChallenge, + onProve: OnProve, + market: Market, + currentPeriod: Period, ) {.base, async.} = try: without proof =? (await onProve(slot, challenge)), err: @@ -43,14 +43,13 @@ method prove*( error "Submitting proof failed", msg = e.msgDetail proc proveLoop( - state: SaleProving, - market: Market, - clock: Clock, - request: StorageRequest, - slotIndex: UInt256, - onProve: OnProve + state: SaleProving, + market: Market, + clock: Clock, + request: StorageRequest, + slotIndex: UInt256, + onProve: OnProve, ) {.async.} = - let slot = Slot(request: request, slotIndex: slotIndex) let slotId = slot.id @@ -76,7 +75,8 @@ proc proveLoop( case slotState of SlotState.Filled: debug "Proving for new period", period = currentPeriod - if (await market.isProofRequired(slotId)) or (await market.willProofBeRequired(slotId)): + if (await market.isProofRequired(slotId)) or + (await market.willProofBeRequired(slotId)): let challenge = await market.getChallenge(slotId) debug "Proof is required", period = currentPeriod, challenge = challenge await state.prove(slot, challenge, onProve, market, currentPeriod) @@ -100,7 +100,8 @@ proc proveLoop( debug "waiting until next period" await waitUntilPeriod(currentPeriod + 1) -method `$`*(state: SaleProving): string = "SaleProving" +method `$`*(state: SaleProving): string = + "SaleProving" method onCancelled*(state: SaleProving, request: StorageRequest): ?State = # state.loop cancellation happens automatically when run is cancelled due to diff --git a/codex/sales/states/provingsimulated.nim b/codex/sales/states/provingsimulated.nim index 20fb4ad66..e60169bcd 100644 --- a/codex/sales/states/provingsimulated.nim +++ b/codex/sales/states/provingsimulated.nim @@ -12,21 +12,26 @@ when codex_enable_proof_failures: import ./proving logScope: - topics = "marketplace sales simulated-proving" + topics = "marketplace sales simulated-proving" - type - SaleProvingSimulated* = ref object of SaleProving - failEveryNProofs*: int - proofCount: int + type SaleProvingSimulated* = ref object of SaleProving + failEveryNProofs*: int + proofCount: int proc onSubmitProofError(error: ref CatchableError, period: UInt256, slotId: SlotId) = error "Submitting invalid proof failed", period, slotId, msg = error.msgDetail - method prove*(state: SaleProvingSimulated, slot: Slot, challenge: ProofChallenge, onProve: OnProve, market: Market, currentPeriod: Period) {.async.} = + method prove*( + state: SaleProvingSimulated, + slot: Slot, + challenge: ProofChallenge, + onProve: OnProve, + market: Market, + currentPeriod: Period, + ) {.async.} = trace "Processing proving in simulated mode" state.proofCount += 1 - if state.failEveryNProofs > 0 and - state.proofCount mod state.failEveryNProofs == 0: + if state.failEveryNProofs > 0 and state.proofCount mod state.failEveryNProofs == 0: state.proofCount = 0 try: @@ -40,4 +45,6 @@ when codex_enable_proof_failures: except CatchableError as e: onSubmitProofError(e, currentPeriod, slot.id) else: - await procCall SaleProving(state).prove(slot, challenge, onProve, market, currentPeriod) + await procCall SaleProving(state).prove( + slot, challenge, onProve, market, currentPeriod + ) diff --git a/codex/sales/states/slotreserving.nim b/codex/sales/states/slotreserving.nim index 670013ab6..d856e9eeb 100644 --- a/codex/sales/states/slotreserving.nim +++ b/codex/sales/states/slotreserving.nim @@ -12,13 +12,13 @@ import ./ignored import ./downloading import ./errored -type - SaleSlotReserving* = ref object of ErrorHandlingState +type SaleSlotReserving* = ref object of ErrorHandlingState logScope: topics = "marketplace sales reserving" -method `$`*(state: SaleSlotReserving): string = "SaleSlotReserving" +method `$`*(state: SaleSlotReserving): string = + "SaleSlotReserving" method onCancelled*(state: SaleSlotReserving, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -44,17 +44,15 @@ method run*(state: SaleSlotReserving, machine: Machine): Future[?State] {.async. except MarketError as e: if e.msg.contains "Reservation not allowed": debug "Slot cannot be reserved, ignoring", error = e.msg - return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) ) + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) else: - return some State( SaleErrored(error: e) ) + return some State(SaleErrored(error: e)) # other CatchableErrors are handled "automatically" by the ErrorHandlingState trace "Slot successfully reserved" - return some State( SaleDownloading() ) - + return some State(SaleDownloading()) else: # do not re-add this slot to the queue, and return bytes from Reservation to # the Availability debug "Slot cannot be reserved, ignoring" - return some State( SaleIgnored(reprocessSlot: false, returnBytes: true) ) - + return some State(SaleIgnored(reprocessSlot: false, returnBytes: true)) diff --git a/codex/sales/states/unknown.nim b/codex/sales/states/unknown.nim index d497cba3e..3034129a0 100644 --- a/codex/sales/states/unknown.nim +++ b/codex/sales/states/unknown.nim @@ -17,7 +17,8 @@ type SaleUnknownError* = object of CatchableError UnexpectedSlotError* = object of SaleUnknownError -method `$`*(state: SaleUnknown): string = "SaleUnknown" +method `$`*(state: SaleUnknown): string = + "SaleUnknown" method onCancelled*(state: SaleUnknown, request: StorageRequest): ?State = return some State(SaleCancelled()) @@ -38,8 +39,8 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = case slotState of SlotState.Free: - let error = newException(UnexpectedSlotError, - "Slot state on chain should not be 'free'") + let error = + newException(UnexpectedSlotError, "Slot state on chain should not be 'free'") return some State(SaleErrored(error: error)) of SlotState.Filled: return some State(SaleFilled()) @@ -52,6 +53,7 @@ method run*(state: SaleUnknown, machine: Machine): Future[?State] {.async.} = of SlotState.Cancelled: return some State(SaleCancelled()) of SlotState.Repair: - let error = newException(SlotFreedError, - "Slot was forcible freed and host was removed from its hosting") + let error = newException( + SlotFreedError, "Slot was forcible freed and host was removed from its hosting" + ) return some State(SaleErrored(error: error)) diff --git a/codex/slots/builder.nim b/codex/slots/builder.nim index 9df03f166..25844db63 100644 --- a/codex/slots/builder.nim +++ b/codex/slots/builder.nim @@ -5,5 +5,4 @@ import ../merkletree export builder, converters -type - Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash] +type Poseidon2Builder* = SlotsBuilder[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/builder/builder.nim b/codex/slots/builder/builder.nim index 8652350e5..30332f1c0 100644 --- a/codex/slots/builder/builder.nim +++ b/codex/slots/builder/builder.nim @@ -34,17 +34,17 @@ export converters, asynciter logScope: topics = "codex slotsbuilder" -type - SlotsBuilder*[T, H] = ref object of RootObj - store: BlockStore - manifest: Manifest # current manifest - strategy: IndexingStrategy # indexing strategy - cellSize: NBytes # cell size - numSlotBlocks: Natural # number of blocks per slot (should yield a power of two number of cells) - slotRoots: seq[H] # roots of the slots - emptyBlock: seq[byte] # empty block - verifiableTree: ?T # verification tree (dataset tree) - emptyDigestTree: T # empty digest tree for empty blocks +type SlotsBuilder*[T, H] = ref object of RootObj + store: BlockStore + manifest: Manifest # current manifest + strategy: IndexingStrategy # indexing strategy + cellSize: NBytes # cell size + numSlotBlocks: Natural + # number of blocks per slot (should yield a power of two number of cells) + slotRoots: seq[H] # roots of the slots + emptyBlock: seq[byte] # empty block + verifiableTree: ?T # verification tree (dataset tree) + emptyDigestTree: T # empty digest tree for empty blocks func verifiable*[T, H](self: SlotsBuilder[T, H]): bool {.inline.} = ## Returns true if the slots are verifiable. @@ -133,9 +133,8 @@ func manifest*[T, H](self: SlotsBuilder[T, H]): Manifest = self.manifest proc buildBlockTree*[T, H]( - self: SlotsBuilder[T, H], - blkIdx: Natural, - slotPos: Natural): Future[?!(seq[byte], T)] {.async.} = + self: SlotsBuilder[T, H], blkIdx: Natural, slotPos: Natural +): Future[?!(seq[byte], T)] {.async.} = ## Build the block digest tree and return a tuple with the ## block data and the tree. ## @@ -160,16 +159,15 @@ proc buildBlockTree*[T, H]( if blk.isEmpty: success (self.emptyBlock, self.emptyDigestTree) else: - without tree =? - T.digestTree(blk.data, self.cellSize.int), err: + without tree =? T.digestTree(blk.data, self.cellSize.int), err: error "Failed to create digest for block", err = err.msg return failure(err) success (blk.data, tree) proc getCellHashes*[T, H]( - self: SlotsBuilder[T, H], - slotIndex: Natural): Future[?!seq[H]] {.async.} = + self: SlotsBuilder[T, H], slotIndex: Natural +): Future[?!seq[H]] {.async.} = ## Collect all the cells from a block and return ## their hashes. ## @@ -192,8 +190,8 @@ proc getCellHashes*[T, H]( pos = i trace "Getting block CID for tree at index" - without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and - digest =? tree.root, err: + without (_, tree) =? (await self.buildBlockTree(blkIdx, i)) and digest =? tree.root, + err: error "Failed to get block CID for tree at index", err = err.msg return failure(err) @@ -203,8 +201,8 @@ proc getCellHashes*[T, H]( success hashes proc buildSlotTree*[T, H]( - self: SlotsBuilder[T, H], - slotIndex: Natural): Future[?!T] {.async.} = + self: SlotsBuilder[T, H], slotIndex: Natural +): Future[?!T] {.async.} = ## Build the slot tree from the block digest hashes ## and return the tree. @@ -215,20 +213,20 @@ proc buildSlotTree*[T, H]( T.init(cellHashes) proc buildSlot*[T, H]( - self: SlotsBuilder[T, H], - slotIndex: Natural): Future[?!H] {.async.} = + self: SlotsBuilder[T, H], slotIndex: Natural +): Future[?!H] {.async.} = ## Build a slot tree and store the proofs in ## the block store. ## logScope: - cid = self.manifest.treeCid - slotIndex = slotIndex + cid = self.manifest.treeCid + slotIndex = slotIndex trace "Building slot tree" without tree =? (await self.buildSlotTree(slotIndex)) and - treeCid =? tree.root.?toSlotCid, err: + treeCid =? tree.root .? toSlotCid, err: error "Failed to build slot tree", err = err.msg return failure(err) @@ -238,13 +236,12 @@ proc buildSlot*[T, H]( error "Failed to get CID for slot cell", err = err.msg return failure(err) - without proof =? tree.getProof(i) and - encodableProof =? proof.toEncodableProof, err: + without proof =? tree.getProof(i) and encodableProof =? proof.toEncodableProof, err: error "Failed to get proof for slot tree", err = err.msg return failure(err) - if err =? (await self.store.putCidAndProof( - treeCid, i, cellCid, encodableProof)).errorOption: + if err =? + (await self.store.putCidAndProof(treeCid, i, cellCid, encodableProof)).errorOption: error "Failed to store slot tree", err = err.msg return failure(err) @@ -258,14 +255,14 @@ proc buildSlots*[T, H](self: SlotsBuilder[T, H]): Future[?!void] {.async.} = ## logScope: - cid = self.manifest.treeCid - blockCount = self.manifest.blocksCount + cid = self.manifest.treeCid + blockCount = self.manifest.blocksCount trace "Building slots" if self.slotRoots.len == 0: self.slotRoots = collect(newSeq): - for i in 0.. 0: - numPadSlotBlocks + numSlotBlocks - else: - numSlotBlocks + numPadSlotBlocks + numSlotBlocks + else: + numSlotBlocks - numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot + numBlocksTotal = numSlotBlocksTotal * manifest.numSlots # number of blocks per slot - emptyBlock = newSeq[byte](manifest.blockSize.int) - emptyDigestTree = ? T.digestTree(emptyBlock, cellSize.int) + emptyBlock = newSeq[byte](manifest.blockSize.int) + emptyDigestTree = ?T.digestTree(emptyBlock, cellSize.int) - strategy = ? strategy.init( - 0, - numBlocksTotal - 1, - manifest.numSlots).catch + strategy = ?strategy.init(0, numBlocksTotal - 1, manifest.numSlots).catch logScope: - numSlotBlocks = numSlotBlocks - numBlockCells = numBlockCells - numSlotCells = numSlotCells - pow2SlotCells = pow2SlotCells - numPadSlotBlocks = numPadSlotBlocks - numBlocksTotal = numBlocksTotal - numSlotBlocksTotal = numSlotBlocksTotal - strategy = strategy.strategyType + numSlotBlocks = numSlotBlocks + numBlockCells = numBlockCells + numSlotCells = numSlotCells + pow2SlotCells = pow2SlotCells + numPadSlotBlocks = numPadSlotBlocks + numBlocksTotal = numBlocksTotal + numSlotBlocksTotal = numSlotBlocksTotal + strategy = strategy.strategyType trace "Creating slots builder" - var - self = SlotsBuilder[T, H]( - store: store, - manifest: manifest, - strategy: strategy, - cellSize: cellSize, - emptyBlock: emptyBlock, - numSlotBlocks: numSlotBlocksTotal, - emptyDigestTree: emptyDigestTree) + var self = SlotsBuilder[T, H]( + store: store, + manifest: manifest, + strategy: strategy, + cellSize: cellSize, + emptyBlock: emptyBlock, + numSlotBlocks: numSlotBlocksTotal, + emptyDigestTree: emptyDigestTree, + ) if manifest.verifiable: - if manifest.slotRoots.len == 0 or - manifest.slotRoots.len != manifest.numSlots: + if manifest.slotRoots.len == 0 or manifest.slotRoots.len != manifest.numSlots: return failure "Manifest is verifiable but slot roots are missing or invalid." let - slotRoots = manifest.slotRoots.mapIt( (? it.fromSlotCid() )) - tree = ? self.buildVerifyTree(slotRoots) - expectedRoot = ? manifest.verifyRoot.fromVerifyCid() - verifyRoot = ? tree.root + slotRoots = manifest.slotRoots.mapIt((?it.fromSlotCid())) + tree = ?self.buildVerifyTree(slotRoots) + expectedRoot = ?manifest.verifyRoot.fromVerifyCid() + verifyRoot = ?tree.root if verifyRoot != expectedRoot: return failure "Existing slots root doesn't match reconstructed root." diff --git a/codex/slots/converters.nim b/codex/slots/converters.nim index f9716fa36..f0dc39904 100644 --- a/codex/slots/converters.nim +++ b/codex/slots/converters.nim @@ -23,21 +23,25 @@ import ../utils/digest func toCid(hash: Poseidon2Hash, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Cid = let - mhash = ? MultiHash.init(mcodec, hash.toBytes).mapFailure - treeCid = ? Cid.init(CIDv1, cidCodec, mhash).mapFailure + mhash = ?MultiHash.init(mcodec, hash.toBytes).mapFailure + treeCid = ?Cid.init(CIDv1, cidCodec, mhash).mapFailure success treeCid -proc toPoseidon2Hash(cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec): ?!Poseidon2Hash = +proc toPoseidon2Hash( + cid: Cid, mcodec: MultiCodec, cidCodec: MultiCodec +): ?!Poseidon2Hash = if cid.cidver != CIDv1: return failure("Unexpected CID version") if cid.mcodec != cidCodec: - return failure("Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec) + return failure( + "Cid is not of expected codec. Was: " & $cid.mcodec & " but expected: " & $cidCodec + ) let - mhash = ? cid.mhash.mapFailure + mhash = ?cid.mhash.mapFailure bytes: array[32, byte] = array[32, byte].initCopyFrom(mhash.digestBytes()) - hash = ? Poseidon2Hash.fromBytes(bytes).toFailure + hash = ?Poseidon2Hash.fromBytes(bytes).toFailure success hash @@ -51,7 +55,7 @@ func toSlotCid*(hash: Poseidon2Hash): ?!Cid = toCid(hash, multiCodec("identity"), SlotRootCodec) func toSlotCids*(slotRoots: openArray[Poseidon2Hash]): ?!seq[Cid] = - success slotRoots.mapIt( ? it.toSlotCid ) + success slotRoots.mapIt(?it.toSlotCid) func fromSlotCid*(cid: Cid): ?!Poseidon2Hash = toPoseidon2Hash(cid, multiCodec("identity"), SlotRootCodec) @@ -62,27 +66,17 @@ func toVerifyCid*(hash: Poseidon2Hash): ?!Cid = func fromVerifyCid*(cid: Cid): ?!Poseidon2Hash = toPoseidon2Hash(cid, multiCodec("identity"), SlotProvingRootCodec) -func toEncodableProof*( - proof: Poseidon2Proof): ?!CodexProof = - - let - encodableProof = CodexProof( - mcodec: multiCodec("identity"), - index: proof.index, - nleaves: proof.nleaves, - path: proof.path.mapIt( @( it.toBytes ) )) +func toEncodableProof*(proof: Poseidon2Proof): ?!CodexProof = + let encodableProof = CodexProof( + mcodec: multiCodec("identity"), + index: proof.index, + nleaves: proof.nleaves, + path: proof.path.mapIt(@(it.toBytes)), + ) success encodableProof -func toVerifiableProof*( - proof: CodexProof): ?!Poseidon2Proof = - - let - nodes = proof.path.mapIt( - ? Poseidon2Hash.fromBytes(it.toArray32).toFailure - ) +func toVerifiableProof*(proof: CodexProof): ?!Poseidon2Proof = + let nodes = proof.path.mapIt(?Poseidon2Hash.fromBytes(it.toArray32).toFailure) - Poseidon2Proof.init( - index = proof.index, - nleaves = proof.nleaves, - nodes = nodes) + Poseidon2Proof.init(index = proof.index, nleaves = proof.nleaves, nodes = nodes) diff --git a/codex/slots/proofs/backendfactory.nim b/codex/slots/proofs/backendfactory.nim index ac478e1ad..7aba27d85 100644 --- a/codex/slots/proofs/backendfactory.nim +++ b/codex/slots/proofs/backendfactory.nim @@ -11,26 +11,25 @@ import ../../conf import ./backends import ./backendutils -proc initializeFromConfig( - config: CodexConf, - utils: BackendUtils): ?!AnyBackend = +proc initializeFromConfig(config: CodexConf, utils: BackendUtils): ?!AnyBackend = if not fileAccessible($config.circomR1cs, {AccessFlags.Read}) or - not endsWith($config.circomR1cs, ".r1cs"): + not endsWith($config.circomR1cs, ".r1cs"): return failure("Circom R1CS file not accessible") if not fileAccessible($config.circomWasm, {AccessFlags.Read}) or - not endsWith($config.circomWasm, ".wasm"): + not endsWith($config.circomWasm, ".wasm"): return failure("Circom wasm file not accessible") if not fileAccessible($config.circomZkey, {AccessFlags.Read}) or - not endsWith($config.circomZkey, ".zkey"): + not endsWith($config.circomZkey, ".zkey"): return failure("Circom zkey file not accessible") trace "Initialized prover backend from cli config" - success(utils.initializeCircomBackend( - $config.circomR1cs, - $config.circomWasm, - $config.circomZkey)) + success( + utils.initializeCircomBackend( + $config.circomR1cs, $config.circomWasm, $config.circomZkey + ) + ) proc r1csFilePath(config: CodexConf): string = config.circuitDir / "proof_main.r1cs" @@ -42,42 +41,40 @@ proc zkeyFilePath(config: CodexConf): string = config.circuitDir / "proof_main.zkey" proc initializeFromCircuitDirFiles( - config: CodexConf, - utils: BackendUtils): ?!AnyBackend {.gcsafe.} = - if fileExists(config.r1csFilePath) and - fileExists(config.wasmFilePath) and - fileExists(config.zkeyFilePath): + config: CodexConf, utils: BackendUtils +): ?!AnyBackend {.gcsafe.} = + if fileExists(config.r1csFilePath) and fileExists(config.wasmFilePath) and + fileExists(config.zkeyFilePath): trace "Initialized prover backend from local files" - return success(utils.initializeCircomBackend( - config.r1csFilePath, - config.wasmFilePath, - config.zkeyFilePath)) + return success( + utils.initializeCircomBackend( + config.r1csFilePath, config.wasmFilePath, config.zkeyFilePath + ) + ) failure("Circuit files not found") proc suggestDownloadTool(config: CodexConf) = without address =? config.marketplaceAddress: - raise (ref Defect)(msg: "Proving backend initializing while marketplace address not set.") + raise (ref Defect)( + msg: "Proving backend initializing while marketplace address not set." + ) let - tokens = [ - "cirdl", - "\"" & $config.circuitDir & "\"", - config.ethProvider, - $address - ] + tokens = ["cirdl", "\"" & $config.circuitDir & "\"", config.ethProvider, $address] instructions = "'./" & tokens.join(" ") & "'" - warn "Proving circuit files are not found. Please run the following to download them:", instructions + warn "Proving circuit files are not found. Please run the following to download them:", + instructions proc initializeBackend*( - config: CodexConf, - utils: BackendUtils = BackendUtils()): ?!AnyBackend = - + config: CodexConf, utils: BackendUtils = BackendUtils() +): ?!AnyBackend = without backend =? initializeFromConfig(config, utils), cliErr: info "Could not initialize prover backend from CLI options...", msg = cliErr.msg without backend =? initializeFromCircuitDirFiles(config, utils), localErr: - info "Could not initialize prover backend from circuit dir files...", msg = localErr.msg + info "Could not initialize prover backend from circuit dir files...", + msg = localErr.msg suggestDownloadTool(config) return failure("CircuitFilesNotFound") # Unexpected: value of backend does not survive leaving each scope. (definition does though...) diff --git a/codex/slots/proofs/backends.nim b/codex/slots/proofs/backends.nim index 3872d821a..3bd2edb6c 100644 --- a/codex/slots/proofs/backends.nim +++ b/codex/slots/proofs/backends.nim @@ -2,5 +2,4 @@ import ./backends/circomcompat export circomcompat -type - AnyBackend* = CircomCompat +type AnyBackend* = CircomCompat diff --git a/codex/slots/proofs/backends/circomcompat.nim b/codex/slots/proofs/backends/circomcompat.nim index 374b8151c..1d2e3e19a 100644 --- a/codex/slots/proofs/backends/circomcompat.nim +++ b/codex/slots/proofs/backends/circomcompat.nim @@ -25,21 +25,22 @@ export circomcompat, converters type CircomCompat* = object - slotDepth : int # max depth of the slot tree - datasetDepth : int # max depth of dataset tree - blkDepth : int # depth of the block merkle tree (pow2 for now) - cellElms : int # number of field elements per cell - numSamples : int # number of samples per slot - r1csPath : string # path to the r1cs file - wasmPath : string # path to the wasm file - zkeyPath : string # path to the zkey file - backendCfg : ptr CircomBn254Cfg - vkp* : ptr CircomKey + slotDepth: int # max depth of the slot tree + datasetDepth: int # max depth of dataset tree + blkDepth: int # depth of the block merkle tree (pow2 for now) + cellElms: int # number of field elements per cell + numSamples: int # number of samples per slot + r1csPath: string # path to the r1cs file + wasmPath: string # path to the wasm file + zkeyPath: string # path to the zkey file + backendCfg: ptr CircomBn254Cfg + vkp*: ptr CircomKey NormalizedProofInputs*[H] {.borrow: `.`.} = distinct ProofInputs[H] -func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): - NormalizedProofInputs[H] = +func normalizeInput*[H]( + self: CircomCompat, input: ProofInputs[H] +): NormalizedProofInputs[H] = ## Parameters in CIRCOM circuits are statically sized and must be properly ## padded before they can be passed onto the circuit. This function takes ## variable length parameters and performs that padding. @@ -52,10 +53,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): for sample in input.samples: var merklePaths = sample.merklePaths merklePaths.setLen(self.slotDepth) - Sample[H]( - cellData: sample.cellData, - merklePaths: merklePaths - ) + Sample[H](cellData: sample.cellData, merklePaths: merklePaths) var normSlotProof = input.slotProof normSlotProof.setLen(self.datasetDepth) @@ -68,7 +66,7 @@ func normalizeInput*[H](self: CircomCompat, input: ProofInputs[H]): nCellsPerSlot: input.nCellsPerSlot, nSlotsPerDataSet: input.nSlotsPerDataSet, slotProof: normSlotProof, - samples: normSamples + samples: normSamples, ) proc release*(self: CircomCompat) = @@ -81,32 +79,28 @@ proc release*(self: CircomCompat) = if not isNil(self.vkp): self.vkp.unsafeAddr.release_key() -proc prove[H]( - self: CircomCompat, - input: NormalizedProofInputs[H]): ?!CircomProof = - - doAssert input.samples.len == self.numSamples, - "Number of samples does not match" +proc prove[H](self: CircomCompat, input: NormalizedProofInputs[H]): ?!CircomProof = + doAssert input.samples.len == self.numSamples, "Number of samples does not match" doAssert input.slotProof.len <= self.datasetDepth, "Slot proof is too deep - dataset has more slots than what we can handle?" doAssert input.samples.allIt( block: - (it.merklePaths.len <= self.slotDepth + self.blkDepth and - it.cellData.len == self.cellElms)), "Merkle paths too deep or cells too big for circuit" + ( + it.merklePaths.len <= self.slotDepth + self.blkDepth and + it.cellData.len == self.cellElms + ) + ), "Merkle paths too deep or cells too big for circuit" # TODO: All parameters should match circom's static parametter - var - ctx: ptr CircomCompatCtx + var ctx: ptr CircomCompatCtx defer: if ctx != nil: ctx.addr.release_circom_compat() - if init_circom_compat( - self.backendCfg, - addr ctx) != ERR_OK or ctx == nil: + if init_circom_compat(self.backendCfg, addr ctx) != ERR_OK or ctx == nil: raiseAssert("failed to initialize CircomCompat ctx") var @@ -114,67 +108,61 @@ proc prove[H]( dataSetRoot = input.datasetRoot.toBytes slotRoot = input.slotRoot.toBytes - if ctx.push_input_u256_array( - "entropy".cstring, entropy[0].addr, entropy.len.uint32) != ERR_OK: + if ctx.push_input_u256_array("entropy".cstring, entropy[0].addr, entropy.len.uint32) != + ERR_OK: return failure("Failed to push entropy") if ctx.push_input_u256_array( - "dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32) != ERR_OK: + "dataSetRoot".cstring, dataSetRoot[0].addr, dataSetRoot.len.uint32 + ) != ERR_OK: return failure("Failed to push data set root") if ctx.push_input_u256_array( - "slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32) != ERR_OK: + "slotRoot".cstring, slotRoot[0].addr, slotRoot.len.uint32 + ) != ERR_OK: return failure("Failed to push data set root") - if ctx.push_input_u32( - "nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK: + if ctx.push_input_u32("nCellsPerSlot".cstring, input.nCellsPerSlot.uint32) != ERR_OK: return failure("Failed to push nCellsPerSlot") - if ctx.push_input_u32( - "nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != ERR_OK: + if ctx.push_input_u32("nSlotsPerDataSet".cstring, input.nSlotsPerDataSet.uint32) != + ERR_OK: return failure("Failed to push nSlotsPerDataSet") - if ctx.push_input_u32( - "slotIndex".cstring, input.slotIndex.uint32) != ERR_OK: + if ctx.push_input_u32("slotIndex".cstring, input.slotIndex.uint32) != ERR_OK: return failure("Failed to push slotIndex") - var - slotProof = input.slotProof.mapIt( it.toBytes ).concat + var slotProof = input.slotProof.mapIt(it.toBytes).concat doAssert(slotProof.len == self.datasetDepth) # arrays are always flattened if ctx.push_input_u256_array( - "slotProof".cstring, - slotProof[0].addr, - uint (slotProof[0].len * slotProof.len)) != ERR_OK: - return failure("Failed to push slot proof") + "slotProof".cstring, slotProof[0].addr, uint (slotProof[0].len * slotProof.len) + ) != ERR_OK: + return failure("Failed to push slot proof") for s in input.samples: var - merklePaths = s.merklePaths.mapIt( it.toBytes ) - data = s.cellData.mapIt( @(it.toBytes) ).concat + merklePaths = s.merklePaths.mapIt(it.toBytes) + data = s.cellData.mapIt(@(it.toBytes)).concat if ctx.push_input_u256_array( "merklePaths".cstring, merklePaths[0].addr, - uint (merklePaths[0].len * merklePaths.len)) != ERR_OK: - return failure("Failed to push merkle paths") + uint (merklePaths[0].len * merklePaths.len), + ) != ERR_OK: + return failure("Failed to push merkle paths") - if ctx.push_input_u256_array( - "cellData".cstring, - data[0].addr, - data.len.uint) != ERR_OK: - return failure("Failed to push cell data") + if ctx.push_input_u256_array("cellData".cstring, data[0].addr, data.len.uint) != + ERR_OK: + return failure("Failed to push cell data") - var - proofPtr: ptr Proof = nil + var proofPtr: ptr Proof = nil let proof = try: - if ( - let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); - res != ERR_OK) or - proofPtr == nil: + if (let res = self.backendCfg.prove_circuit(ctx, proofPtr.addr); res != ERR_OK) or + proofPtr == nil: return failure("Failed to prove - err code: " & $res) proofPtr[] @@ -184,16 +172,12 @@ proc prove[H]( success proof -proc prove*[H]( - self: CircomCompat, - input: ProofInputs[H]): ?!CircomProof = - +proc prove*[H](self: CircomCompat, input: ProofInputs[H]): ?!CircomProof = self.prove(self.normalizeInput(input)) proc verify*[H]( - self: CircomCompat, - proof: CircomProof, - inputs: ProofInputs[H]): ?!bool = + self: CircomCompat, proof: CircomProof, inputs: ProofInputs[H] +): ?!bool = ## Verify a proof using a ctx ## @@ -213,43 +197,44 @@ proc verify*[H]( inputs.releaseCircomInputs() proc init*( - _: type CircomCompat, - r1csPath : string, - wasmPath : string, - zkeyPath : string = "", - slotDepth = DefaultMaxSlotDepth, - datasetDepth = DefaultMaxDatasetDepth, - blkDepth = DefaultBlockDepth, - cellElms = DefaultCellElms, - numSamples = DefaultSamplesNum): CircomCompat = + _: type CircomCompat, + r1csPath: string, + wasmPath: string, + zkeyPath: string = "", + slotDepth = DefaultMaxSlotDepth, + datasetDepth = DefaultMaxDatasetDepth, + blkDepth = DefaultBlockDepth, + cellElms = DefaultCellElms, + numSamples = DefaultSamplesNum, +): CircomCompat = ## Create a new ctx ## var cfg: ptr CircomBn254Cfg var zkey = if zkeyPath.len > 0: zkeyPath.cstring else: nil - if init_circom_config( - r1csPath.cstring, - wasmPath.cstring, - zkey, cfg.addr) != ERR_OK or cfg == nil: - if cfg != nil: cfg.addr.release_cfg() - raiseAssert("failed to initialize circom compat config") + if init_circom_config(r1csPath.cstring, wasmPath.cstring, zkey, cfg.addr) != ERR_OK or + cfg == nil: + if cfg != nil: + cfg.addr.release_cfg() + raiseAssert("failed to initialize circom compat config") - var - vkpPtr: ptr VerifyingKey = nil + var vkpPtr: ptr VerifyingKey = nil if cfg.get_verifying_key(vkpPtr.addr) != ERR_OK or vkpPtr == nil: - if vkpPtr != nil: vkpPtr.addr.release_key() + if vkpPtr != nil: + vkpPtr.addr.release_key() raiseAssert("Failed to get verifying key") CircomCompat( - r1csPath : r1csPath, - wasmPath : wasmPath, - zkeyPath : zkeyPath, - slotDepth : slotDepth, + r1csPath: r1csPath, + wasmPath: wasmPath, + zkeyPath: zkeyPath, + slotDepth: slotDepth, datasetDepth: datasetDepth, - blkDepth : blkDepth, - cellElms : cellElms, - numSamples : numSamples, - backendCfg : cfg, - vkp : vkpPtr) + blkDepth: blkDepth, + cellElms: cellElms, + numSamples: numSamples, + backendCfg: cfg, + vkp: vkpPtr, + ) diff --git a/codex/slots/proofs/backends/converters.nim b/codex/slots/proofs/backends/converters.nim index 60c64f5c9..ee771477d 100644 --- a/codex/slots/proofs/backends/converters.nim +++ b/codex/slots/proofs/backends/converters.nim @@ -19,8 +19,8 @@ type CircomG1* = G1 CircomG2* = G2 - CircomProof* = Proof - CircomKey* = VerifyingKey + CircomProof* = Proof + CircomKey* = VerifyingKey CircomInputs* = Inputs proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = @@ -29,18 +29,12 @@ proc toCircomInputs*(inputs: ProofInputs[Poseidon2Hash]): CircomInputs = datasetRoot = inputs.datasetRoot.toBytes.toArray32 entropy = inputs.entropy.toBytes.toArray32 - elms = [ - entropy, - datasetRoot, - slotIndex - ] + elms = [entropy, datasetRoot, slotIndex] let inputsPtr = allocShared0(32 * elms.len) copyMem(inputsPtr, addr elms[0], elms.len * 32) - CircomInputs( - elms: cast[ptr array[32, byte]](inputsPtr), - len: elms.len.uint) + CircomInputs(elms: cast[ptr array[32, byte]](inputsPtr), len: elms.len.uint) proc releaseCircomInputs*(inputs: var CircomInputs) = if not inputs.elms.isNil: @@ -48,23 +42,13 @@ proc releaseCircomInputs*(inputs: var CircomInputs) = inputs.elms = nil func toG1*(g: CircomG1): G1Point = - G1Point( - x: UInt256.fromBytesLE(g.x), - y: UInt256.fromBytesLE(g.y)) + G1Point(x: UInt256.fromBytesLE(g.x), y: UInt256.fromBytesLE(g.y)) func toG2*(g: CircomG2): G2Point = G2Point( - x: Fp2Element( - real: UInt256.fromBytesLE(g.x[0]), - imag: UInt256.fromBytesLE(g.x[1]) - ), - y: Fp2Element( - real: UInt256.fromBytesLE(g.y[0]), - imag: UInt256.fromBytesLE(g.y[1]) - )) + x: Fp2Element(real: UInt256.fromBytesLE(g.x[0]), imag: UInt256.fromBytesLE(g.x[1])), + y: Fp2Element(real: UInt256.fromBytesLE(g.y[0]), imag: UInt256.fromBytesLE(g.y[1])), + ) func toGroth16Proof*(proof: CircomProof): Groth16Proof = - Groth16Proof( - a: proof.a.toG1, - b: proof.b.toG2, - c: proof.c.toG1) + Groth16Proof(a: proof.a.toG1, b: proof.b.toG2, c: proof.c.toG1) diff --git a/codex/slots/proofs/backendutils.nim b/codex/slots/proofs/backendutils.nim index f7e6e2e14..0e334aced 100644 --- a/codex/slots/proofs/backendutils.nim +++ b/codex/slots/proofs/backendutils.nim @@ -1,12 +1,8 @@ import ./backends -type - BackendUtils* = ref object of RootObj +type BackendUtils* = ref object of RootObj method initializeCircomBackend*( - self: BackendUtils, - r1csFile: string, - wasmFile: string, - zKeyFile: string -): AnyBackend {.base, gcsafe.}= + self: BackendUtils, r1csFile: string, wasmFile: string, zKeyFile: string +): AnyBackend {.base, gcsafe.} = CircomCompat.init(r1csFile, wasmFile, zKeyFile) diff --git a/codex/slots/proofs/prover.nim b/codex/slots/proofs/prover.nim index 631e82e1a..36fc0a058 100644 --- a/codex/slots/proofs/prover.nim +++ b/codex/slots/proofs/prover.nim @@ -47,10 +47,8 @@ type nSamples: int proc prove*( - self: Prover, - slotIdx: int, - manifest: Manifest, - challenge: ProofChallenge): Future[?!(AnyProofInputs, AnyProof)] {.async.} = + self: Prover, slotIdx: int, manifest: Manifest, challenge: ProofChallenge +): Future[?!(AnyProofInputs, AnyProof)] {.async.} = ## Prove a statement using backend. ## Returns a future that resolves to a proof. @@ -81,20 +79,13 @@ proc prove*( success (proofInput, proof) proc verify*( - self: Prover, - proof: AnyProof, - inputs: AnyProofInputs): Future[?!bool] {.async.} = + self: Prover, proof: AnyProof, inputs: AnyProofInputs +): Future[?!bool] {.async.} = ## Prove a statement using backend. ## Returns a future that resolves to a proof. self.backend.verify(proof, inputs) proc new*( - _: type Prover, - store: BlockStore, - backend: AnyBackend, - nSamples: int): Prover = - - Prover( - store: store, - backend: backend, - nSamples: nSamples) + _: type Prover, store: BlockStore, backend: AnyBackend, nSamples: int +): Prover = + Prover(store: store, backend: backend, nSamples: nSamples) diff --git a/codex/slots/sampler.nim b/codex/slots/sampler.nim index 10ea26565..23cfb73fc 100644 --- a/codex/slots/sampler.nim +++ b/codex/slots/sampler.nim @@ -5,5 +5,4 @@ import ../merkletree export sampler, utils -type - Poseidon2Sampler* = DataSampler[Poseidon2Tree, Poseidon2Hash] +type Poseidon2Sampler* = DataSampler[Poseidon2Tree, Poseidon2Hash] diff --git a/codex/slots/sampler/sampler.nim b/codex/slots/sampler/sampler.nim index 3270d55a5..bccdaff2f 100644 --- a/codex/slots/sampler/sampler.nim +++ b/codex/slots/sampler/sampler.nim @@ -29,17 +29,14 @@ import ./utils logScope: topics = "codex datasampler" -type - DataSampler*[T, H] = ref object of RootObj - index: Natural - blockStore: BlockStore - builder: SlotsBuilder[T, H] +type DataSampler*[T, H] = ref object of RootObj + index: Natural + blockStore: BlockStore + builder: SlotsBuilder[T, H] func getCell*[T, H]( - self: DataSampler[T, H], - blkBytes: seq[byte], - blkCellIdx: Natural): seq[H] = - + self: DataSampler[T, H], blkBytes: seq[byte], blkCellIdx: Natural +): seq[H] = let cellSize = self.builder.cellSize.uint64 dataStart = cellSize * blkCellIdx.uint64 @@ -50,54 +47,47 @@ func getCell*[T, H]( blkBytes[dataStart ..< dataEnd].elements(H).toSeq() proc getSample*[T, H]( - self: DataSampler[T, H], - cellIdx: int, - slotTreeCid: Cid, - slotRoot: H): Future[?!Sample[H]] {.async.} = - + self: DataSampler[T, H], cellIdx: int, slotTreeCid: Cid, slotRoot: H +): Future[?!Sample[H]] {.async.} = let cellsPerBlock = self.builder.numBlockCells - blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index - blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index - origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx] # convert to original dataset block index + blkCellIdx = cellIdx.toCellInBlk(cellsPerBlock) # block cell index + blkSlotIdx = cellIdx.toBlkInSlot(cellsPerBlock) # slot tree index + origBlockIdx = self.builder.slotIndicies(self.index)[blkSlotIdx] + # convert to original dataset block index logScope: - cellIdx = cellIdx - blkSlotIdx = blkSlotIdx - blkCellIdx = blkCellIdx - origBlockIdx = origBlockIdx + cellIdx = cellIdx + blkSlotIdx = blkSlotIdx + blkCellIdx = blkCellIdx + origBlockIdx = origBlockIdx trace "Retrieving sample from block tree" let - (_, proof) = (await self.blockStore.getCidAndProof( - slotTreeCid, blkSlotIdx.Natural)).valueOr: + (_, proof) = (await self.blockStore.getCidAndProof(slotTreeCid, blkSlotIdx.Natural)).valueOr: return failure("Failed to get slot tree CID and proof") slotProof = proof.toVerifiableProof().valueOr: return failure("Failed to get verifiable proof") - (bytes, blkTree) = (await self.builder.buildBlockTree( - origBlockIdx, blkSlotIdx)).valueOr: + (bytes, blkTree) = (await self.builder.buildBlockTree(origBlockIdx, blkSlotIdx)).valueOr: return failure("Failed to build block tree") cellData = self.getCell(bytes, blkCellIdx) cellProof = blkTree.getProof(blkCellIdx).valueOr: return failure("Failed to get proof from block tree") - success Sample[H]( - cellData: cellData, - merklePaths: (cellProof.path & slotProof.path)) + success Sample[H](cellData: cellData, merklePaths: (cellProof.path & slotProof.path)) proc getProofInput*[T, H]( - self: DataSampler[T, H], - entropy: ProofChallenge, - nSamples: Natural): Future[?!ProofInputs[H]] {.async.} = + self: DataSampler[T, H], entropy: ProofChallenge, nSamples: Natural +): Future[?!ProofInputs[H]] {.async.} = ## Generate proofs as input to the proving circuit. ## let - entropy = H.fromBytes( - array[31, byte].initCopyFrom(entropy[0..30])) # truncate to 31 bytes, otherwise it _might_ be greater than mod + entropy = H.fromBytes(array[31, byte].initCopyFrom(entropy[0 .. 30])) + # truncate to 31 bytes, otherwise it _might_ be greater than mod verifyTree = self.builder.verifyTree.toFailure.valueOr: return failure("Failed to get verify tree") @@ -109,11 +99,8 @@ proc getProofInput*[T, H]( return failure("Failed to get dataset root") slotTreeCid = self.builder.manifest.slotRoots[self.index] - slotRoot = self.builder.slotRoots[self.index] - cellIdxs = entropy.cellIndices( - slotRoot, - self.builder.numSlotCells, - nSamples) + slotRoot = self.builder.slotRoots[self.index] + cellIdxs = entropy.cellIndices(slotRoot, self.builder.numSlotCells, nSamples) logScope: cells = cellIdxs @@ -132,14 +119,15 @@ proc getProofInput*[T, H]( nCellsPerSlot: self.builder.numSlotCells, slotRoot: slotRoot, slotIndex: self.index, - samples: samples) + samples: samples, + ) proc new*[T, H]( _: type DataSampler[T, H], index: Natural, blockStore: BlockStore, - builder: SlotsBuilder[T, H]): ?!DataSampler[T, H] = - + builder: SlotsBuilder[T, H], +): ?!DataSampler[T, H] = if index > builder.slotRoots.high: error "Slot index is out of range" return failure("Slot index is out of range") @@ -147,7 +135,4 @@ proc new*[T, H]( if not builder.verifiable: return failure("Cannot instantiate DataSampler for non-verifiable builder") - success DataSampler[T, H]( - index: index, - blockStore: blockStore, - builder: builder) + success DataSampler[T, H](index: index, blockStore: blockStore, builder: builder) diff --git a/codex/slots/sampler/utils.nim b/codex/slots/sampler/utils.nim index 998f2cdcc..ce78fadc4 100644 --- a/codex/slots/sampler/utils.nim +++ b/codex/slots/sampler/utils.nim @@ -15,21 +15,21 @@ import pkg/constantine/math/arithmetic import ../../merkletree func extractLowBits*[n: static int](elm: BigInt[n], k: int): uint64 = - doAssert( k > 0 and k <= 64 ) - var r = 0'u64 - for i in 0.. 0 and k <= 64) + var r = 0'u64 + for i in 0 ..< k: let b = bit[n](elm, i) let y = uint64(b) if (y != 0): - r = bitor( r, 1'u64 shl i ) + r = bitor(r, 1'u64 shl i) r func extractLowBits(fld: Poseidon2Hash, k: int): uint64 = - let elm : BigInt[254] = fld.toBig() - return extractLowBits(elm, k); + let elm: BigInt[254] = fld.toBig() + return extractLowBits(elm, k) -func floorLog2*(x : int) : int = - doAssert ( x > 0 ) +func floorLog2*(x: int): int = + doAssert (x > 0) var k = -1 var y = x while (y > 0): @@ -37,39 +37,39 @@ func floorLog2*(x : int) : int = y = y shr 1 return k -func ceilingLog2*(x : int) : int = - doAssert ( x > 0 ) +func ceilingLog2*(x: int): int = + doAssert (x > 0) return (floorLog2(x - 1) + 1) func toBlkInSlot*(cell: Natural, numCells: Natural): Natural = let log2 = ceilingLog2(numCells) - doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two") return cell div numCells func toCellInBlk*(cell: Natural, numCells: Natural): Natural = let log2 = ceilingLog2(numCells) - doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two") return cell mod numCells func cellIndex*( - entropy: Poseidon2Hash, - slotRoot: Poseidon2Hash, - numCells: Natural, counter: Natural): Natural = + entropy: Poseidon2Hash, slotRoot: Poseidon2Hash, numCells: Natural, counter: Natural +): Natural = let log2 = ceilingLog2(numCells) - doAssert( 1 shl log2 == numCells , "`numCells` is assumed to be a power of two" ) + doAssert(1 shl log2 == numCells, "`numCells` is assumed to be a power of two") - let hash = Sponge.digest( @[ entropy, slotRoot, counter.toF ], rate = 2 ) - return int( extractLowBits(hash, log2) ) + let hash = Sponge.digest(@[entropy, slotRoot, counter.toF], rate = 2) + return int(extractLowBits(hash, log2)) func cellIndices*( - entropy: Poseidon2Hash, - slotRoot: Poseidon2Hash, - numCells: Natural, nSamples: Natural): seq[Natural] = - + entropy: Poseidon2Hash, + slotRoot: Poseidon2Hash, + numCells: Natural, + nSamples: Natural, +): seq[Natural] = var indices: seq[Natural] - for i in 1..nSamples: + for i in 1 .. nSamples: indices.add(cellIndex(entropy, slotRoot, numCells, i)) indices diff --git a/codex/slots/types.nim b/codex/slots/types.nim index 8703086e2..0cd243261 100644 --- a/codex/slots/types.nim +++ b/codex/slots/types.nim @@ -24,5 +24,7 @@ type slotRoot*: H nCellsPerSlot*: Natural nSlotsPerDataSet*: Natural - slotProof*: seq[H] # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) - samples*: seq[Sample[H]] # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) + slotProof*: seq[H] + # inclusion proof that shows that the slot root (leaf) is part of the dataset (root) + samples*: seq[Sample[H]] + # inclusion proofs which show that the selected cells (leafs) are part of the slot (roots) diff --git a/codex/stores.nim b/codex/stores.nim index 11e7c8df8..91d2c786b 100644 --- a/codex/stores.nim +++ b/codex/stores.nim @@ -7,10 +7,4 @@ import ./stores/keyutils import ./stores/treehelper export - cachestore, - blockstore, - networkstore, - repostore, - keyutils, - treehelper, - maintenance + cachestore, blockstore, networkstore, repostore, keyutils, treehelper, maintenance diff --git a/codex/stores/blockstore.nim b/codex/stores/blockstore.nim index 791e7d5b1..78fab0da7 100644 --- a/codex/stores/blockstore.nim +++ b/codex/stores/blockstore.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import pkg/libp2p @@ -27,9 +28,11 @@ type BlockNotFoundError* = object of CodexError BlockType* {.pure.} = enum - Manifest, Block, Both + Manifest + Block + Both - CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, raises:[].} + CidCallback* = proc(cid: Cid): Future[void] {.gcsafe, raises: [].} BlockStore* = ref object of RootObj onBlockStored*: ?CidCallback @@ -39,7 +42,9 @@ method getBlock*(self: BlockStore, cid: Cid): Future[?!Block] {.base, gcsafe.} = raiseAssert("getBlock by cid not implemented!") -method getBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Block] {.base, gcsafe.} = +method getBlock*( + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## @@ -50,51 +55,49 @@ method getCid*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!Cid] {. ## raiseAssert("getCid by treecid not implemented!") -method getBlock*(self: BlockStore, address: BlockAddress): Future[?!Block] {.base, gcsafe.} = +method getBlock*( + self: BlockStore, address: BlockAddress +): Future[?!Block] {.base, gcsafe.} = ## Get a block from the blockstore ## raiseAssert("getBlock by addr not implemented!") -method getBlockAndProof*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.base, gcsafe.} = +method getBlockAndProof*( + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!(Block, CodexProof)] {.base, gcsafe.} = ## Get a block and associated inclusion proof by Cid of a merkle tree and an index of a leaf in a tree ## raiseAssert("getBlockAndProof not implemented!") method putBlock*( - self: BlockStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.base, gcsafe.} = + self: BlockStore, blk: Block, ttl = Duration.none +): Future[?!void] {.base, gcsafe.} = ## Put a block to the blockstore ## raiseAssert("putBlock not implemented!") method putCidAndProof*( - self: BlockStore, - treeCid: Cid, - index: Natural, - blockCid: Cid, - proof: CodexProof): Future[?!void] {.base, gcsafe.} = + self: BlockStore, treeCid: Cid, index: Natural, blockCid: Cid, proof: CodexProof +): Future[?!void] {.base, gcsafe.} = ## Put a block proof to the blockstore ## raiseAssert("putCidAndProof not implemented!") method getCidAndProof*( - self: BlockStore, - treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] {.base, gcsafe.} = + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!(Cid, CodexProof)] {.base, gcsafe.} = ## Get a block proof from the blockstore ## raiseAssert("getCidAndProof not implemented!") method ensureExpiry*( - self: BlockStore, - cid: Cid, - expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} = + self: BlockStore, cid: Cid, expiry: SecondsSince1970 +): Future[?!void] {.base, gcsafe.} = ## Ensure that block's assosicated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -102,10 +105,8 @@ method ensureExpiry*( raiseAssert("Not implemented!") method ensureExpiry*( - self: BlockStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970): Future[?!void] {.base, gcsafe.} = + self: BlockStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 +): Future[?!void] {.base, gcsafe.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -118,7 +119,9 @@ method delBlock*(self: BlockStore, cid: Cid): Future[?!void] {.base, gcsafe.} = raiseAssert("delBlock not implemented!") -method delBlock*(self: BlockStore, treeCid: Cid, index: Natural): Future[?!void] {.base, gcsafe.} = +method delBlock*( + self: BlockStore, treeCid: Cid, index: Natural +): Future[?!void] {.base, gcsafe.} = ## Delete a block from the blockstore ## @@ -130,15 +133,17 @@ method hasBlock*(self: BlockStore, cid: Cid): Future[?!bool] {.base, gcsafe.} = raiseAssert("hasBlock not implemented!") -method hasBlock*(self: BlockStore, tree: Cid, index: Natural): Future[?!bool] {.base, gcsafe.} = +method hasBlock*( + self: BlockStore, tree: Cid, index: Natural +): Future[?!bool] {.base, gcsafe.} = ## Check if the block exists in the blockstore ## raiseAssert("hasBlock not implemented!") method listBlocks*( - self: BlockStore, - blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] {.base, gcsafe.} = + self: BlockStore, blockType = BlockType.Manifest +): Future[?!AsyncIter[?Cid]] {.base, gcsafe.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## @@ -159,7 +164,8 @@ proc contains*(self: BlockStore, blk: Cid): Future[bool] {.async.} = return (await self.hasBlock(blk)) |? false proc contains*(self: BlockStore, address: BlockAddress): Future[bool] {.async.} = - return if address.leaf: - (await self.hasBlock(address.treeCid, address.index)) |? false + return + if address.leaf: + (await self.hasBlock(address.treeCid, address.index)) |? false else: - (await self.hasBlock(address.cid)) |? false + (await self.hasBlock(address.cid)) |? false diff --git a/codex/stores/cachestore.nim b/codex/stores/cachestore.nim index 130d2adeb..6235c9c61 100644 --- a/codex/stores/cachestore.nim +++ b/codex/stores/cachestore.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/options @@ -43,8 +44,7 @@ type InvalidBlockSize* = object of CodexError -const - DefaultCacheSize*: NBytes = 5.MiBs +const DefaultCacheSize*: NBytes = 5.MiBs method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} = ## Get a block from the stores @@ -68,22 +68,28 @@ method getBlock*(self: CacheStore, cid: Cid): Future[?!Block] {.async.} = return failure exc method getCidAndProof*( - self: CacheStore, - treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] {.async.} = - + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!(Cid, CodexProof)] {.async.} = if cidAndProof =? self.cidAndProofCache.getOption((treeCid, index)): success(cidAndProof) else: - failure(newException(BlockNotFoundError, "Block not in cache: " & $BlockAddress.init(treeCid, index))) - -method getBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} = + failure( + newException( + BlockNotFoundError, "Block not in cache: " & $BlockAddress.init(treeCid, index) + ) + ) + +method getBlock*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!Block] {.async.} = without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: return failure(err) await self.getBlock(cidAndProof[0]) -method getBlockAndProof*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} = +method getBlockAndProof*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!(Block, CodexProof)] {.async.} = without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: return failure(err) @@ -111,7 +117,9 @@ method hasBlock*(self: CacheStore, cid: Cid): Future[?!bool] {.async.} = return (cid in self.cache).success -method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} = +method hasBlock*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!bool] {.async.} = without cidAndProof =? (await self.getCidAndProof(treeCid, index)), err: if err of BlockNotFoundError: return success(false) @@ -120,20 +128,19 @@ method hasBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!bool] await self.hasBlock(cidAndProof[0]) -func cids(self: CacheStore): (iterator: Cid {.gcsafe.}) = - return iterator(): Cid = - for cid in self.cache.keys: - yield cid +func cids(self: CacheStore): (iterator (): Cid {.gcsafe.}) = + return + iterator (): Cid = + for cid in self.cache.keys: + yield cid method listBlocks*( - self: CacheStore, - blockType = BlockType.Manifest + self: CacheStore, blockType = BlockType.Manifest ): Future[?!AsyncIter[?Cid]] {.async.} = ## Get the list of blocks in the BlockStore. This is an intensive operation ## - let - cids = self.cids() + let cids = self.cids() proc isFinished(): bool = return finished(cids) @@ -141,29 +148,32 @@ method listBlocks*( proc genNext(): Future[Cid] {.async.} = cids() - let iter = await (AsyncIter[Cid].new(genNext, isFinished) - .filter( - proc (cid: Cid): Future[bool] {.async.} = + let iter = await ( + AsyncIter[Cid].new(genNext, isFinished).filter( + proc(cid: Cid): Future[bool] {.async.} = without isManifest =? cid.isManifest, err: trace "Error checking if cid is a manifest", err = err.msg return false - case blockType: + case blockType of BlockType.Both: return true of BlockType.Manifest: return isManifest of BlockType.Block: return not isManifest - )) + ) + ) - return success(map[Cid, ?Cid](iter, - proc (cid: Cid): Future[?Cid] {.async.} = - some(cid) - )) + return success( + map[Cid, ?Cid]( + iter, + proc(cid: Cid): Future[?Cid] {.async.} = + some(cid), + ) + ) func putBlockSync(self: CacheStore, blk: Block): bool = - let blkSize = blk.data.len.NBytes # in bytes if blkSize > self.size: @@ -185,9 +195,8 @@ func putBlockSync(self: CacheStore, blk: Block): bool = return true method putBlock*( - self: CacheStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.async.} = + self: CacheStore, blk: Block, ttl = Duration.none +): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -199,23 +208,17 @@ method putBlock*( discard self.putBlockSync(blk) if onBlock =? self.onBlockStored: await onBlock(blk.cid) - + return success() method putCidAndProof*( - self: CacheStore, - treeCid: Cid, - index: Natural, - blockCid: Cid, - proof: CodexProof + self: CacheStore, treeCid: Cid, index: Natural, blockCid: Cid, proof: CodexProof ): Future[?!void] {.async.} = self.cidAndProofCache[(treeCid, index)] = (blockCid, proof) success() method ensureExpiry*( - self: CacheStore, - cid: Cid, - expiry: SecondsSince1970 + self: CacheStore, cid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Updates block's assosicated TTL in store - not applicable for CacheStore ## @@ -223,10 +226,7 @@ method ensureExpiry*( discard # CacheStore does not have notion of TTL method ensureExpiry*( - self: CacheStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970 + self: CacheStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Updates block's associated TTL in store - not applicable for CacheStore ## @@ -248,7 +248,9 @@ method delBlock*(self: CacheStore, cid: Cid): Future[?!void] {.async.} = return success() -method delBlock*(self: CacheStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} = +method delBlock*( + self: CacheStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = let maybeRemoved = self.cidAndProofCache.del((treeCid, index)) if removed =? maybeRemoved: @@ -266,7 +268,7 @@ proc new*( _: type CacheStore, blocks: openArray[Block] = [], cacheSize: NBytes = DefaultCacheSize, - chunkSize: NBytes = DefaultChunkSize + chunkSize: NBytes = DefaultChunkSize, ): CacheStore {.raises: [Defect, ValueError].} = ## Create a new CacheStore instance ## @@ -286,7 +288,8 @@ proc new*( cidAndProofCache: cidAndProofCache, currentSize: currentSize, size: cacheSize, - onBlockStored: CidCallback.none) + onBlockStored: CidCallback.none, + ) for blk in blocks: discard store.putBlockSync(blk) @@ -294,9 +297,6 @@ proc new*( return store proc new*( - _: type CacheStore, - blocks: openArray[Block] = [], - cacheSize: int, - chunkSize: int + _: type CacheStore, blocks: openArray[Block] = [], cacheSize: int, chunkSize: int ): CacheStore {.raises: [Defect, ValueError].} = CacheStore.new(blocks, NBytes cacheSize, NBytes chunkSize) diff --git a/codex/stores/keyutils.nim b/codex/stores/keyutils.nim index 1dbeccb48..0634b6a26 100644 --- a/codex/stores/keyutils.nim +++ b/codex/stores/keyutils.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/sugar import pkg/questionable/results @@ -30,10 +31,9 @@ const QuotaReservedKey* = (QuotaKey / "reserved").tryGet func makePrefixKey*(postFixLen: int, cid: Cid): ?!Key = - let - cidKey = ? Key.init(($cid)[^postFixLen..^1] & "/" & $cid) + let cidKey = ?Key.init(($cid)[^postFixLen ..^ 1] & "/" & $cid) - if ? cid.isManifest: + if ?cid.isManifest: success CodexManifestKey / cidKey else: success CodexBlocksKey / cidKey @@ -42,7 +42,7 @@ proc createBlockExpirationMetadataKey*(cid: Cid): ?!Key = BlocksTtlKey / $cid proc createBlockExpirationMetadataQueryKey*(): ?!Key = - let queryString = ? (BlocksTtlKey / "*") + let queryString = ?(BlocksTtlKey / "*") Key.init(queryString) proc createBlockCidAndProofMetadataKey*(treeCid: Cid, index: Natural): ?!Key = diff --git a/codex/stores/maintenance.nim b/codex/stores/maintenance.nim index 3d1e66ca2..e7ce1bdfa 100644 --- a/codex/stores/maintenance.nim +++ b/codex/stores/maintenance.nim @@ -25,14 +25,13 @@ const DefaultBlockMaintenanceInterval* = 10.minutes DefaultNumberOfBlocksToMaintainPerInterval* = 1000 -type - BlockMaintainer* = ref object of RootObj - repoStore: RepoStore - interval: Duration - timer: Timer - clock: Clock - numberOfBlocksPerInterval: int - offset: int +type BlockMaintainer* = ref object of RootObj + repoStore: RepoStore + interval: Duration + timer: Timer + clock: Clock + numberOfBlocksPerInterval: int + offset: int proc new*( T: type BlockMaintainer, @@ -40,7 +39,7 @@ proc new*( interval: Duration, numberOfBlocksPerInterval = 100, timer = Timer.new(), - clock: Clock = SystemClock.new() + clock: Clock = SystemClock.new(), ): BlockMaintainer = ## Create new BlockMaintainer instance ## @@ -52,13 +51,16 @@ proc new*( numberOfBlocksPerInterval: numberOfBlocksPerInterval, timer: timer, clock: clock, - offset: 0) + offset: 0, + ) proc deleteExpiredBlock(self: BlockMaintainer, cid: Cid): Future[void] {.async.} = if isErr (await self.repoStore.delBlock(cid)): trace "Unable to delete block from repoStore" -proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[void] {.async.} = +proc processBlockExpiration( + self: BlockMaintainer, be: BlockExpiration +): Future[void] {.async.} = if be.expiry < self.clock.now: await self.deleteExpiredBlock(be.cid) else: @@ -66,8 +68,7 @@ proc processBlockExpiration(self: BlockMaintainer, be: BlockExpiration): Future[ proc runBlockCheck(self: BlockMaintainer): Future[void] {.async.} = let expirations = await self.repoStore.getBlockExpirations( - maxNumber = self.numberOfBlocksPerInterval, - offset = self.offset + maxNumber = self.numberOfBlocksPerInterval, offset = self.offset ) without iter =? expirations, err: @@ -93,7 +94,7 @@ proc start*(self: BlockMaintainer) = except CancelledError as error: raise error except CatchableError as exc: - error "Unexpected exception in BlockMaintainer.onTimer(): ", msg=exc.msg + error "Unexpected exception in BlockMaintainer.onTimer(): ", msg = exc.msg self.timer.start(onTimer, self.interval) diff --git a/codex/stores/networkstore.nim b/codex/stores/networkstore.nim index 40758b94a..faee36e1c 100644 --- a/codex/stores/networkstore.nim +++ b/codex/stores/networkstore.nim @@ -7,7 +7,6 @@ ## This file may not be copied, modified, or distributed except according to ## those terms. - {.push raises: [].} import pkg/chronos @@ -28,10 +27,9 @@ export blockstore, blockexchange, asyncheapqueue logScope: topics = "codex networkstore" -type - NetworkStore* = ref object of BlockStore - engine*: BlockExcEngine # blockexc decision engine - localStore*: BlockStore # local block store +type NetworkStore* = ref object of BlockStore + engine*: BlockExcEngine # blockexc decision engine + localStore*: BlockStore # local block store method getBlock*(self: NetworkStore, address: BlockAddress): Future[?!Block] {.async.} = without blk =? (await self.localStore.getBlock(address)), err: @@ -60,9 +58,8 @@ method getBlock*(self: NetworkStore, treeCid: Cid, index: Natural): Future[?!Blo self.getBlock(BlockAddress.init(treeCid, index)) method putBlock*( - self: NetworkStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.async.} = + self: NetworkStore, blk: Block, ttl = Duration.none +): Future[?!void] {.async.} = ## Store block locally and notify the network ## let res = await self.localStore.putBlock(blk, ttl) @@ -73,26 +70,21 @@ method putBlock*( return success() method putCidAndProof*( - self: NetworkStore, - treeCid: Cid, - index: Natural, - blockCid: Cid, - proof: CodexProof): Future[?!void] = + self: NetworkStore, treeCid: Cid, index: Natural, blockCid: Cid, proof: CodexProof +): Future[?!void] = self.localStore.putCidAndProof(treeCid, index, blockCid, proof) method getCidAndProof*( - self: NetworkStore, - treeCid: Cid, - index: Natural): Future[?!(Cid, CodexProof)] = + self: NetworkStore, treeCid: Cid, index: Natural +): Future[?!(Cid, CodexProof)] = ## Get a block proof from the blockstore ## self.localStore.getCidAndProof(treeCid, index) method ensureExpiry*( - self: NetworkStore, - cid: Cid, - expiry: SecondsSince1970): Future[?!void] {.async.} = + self: NetworkStore, cid: Cid, expiry: SecondsSince1970 +): Future[?!void] {.async.} = ## Ensure that block's assosicated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -108,10 +100,8 @@ method ensureExpiry*( return success() method ensureExpiry*( - self: NetworkStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970): Future[?!void] {.async.} = + self: NetworkStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 +): Future[?!void] {.async.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## @@ -127,8 +117,8 @@ method ensureExpiry*( return success() method listBlocks*( - self: NetworkStore, - blockType = BlockType.Manifest): Future[?!AsyncIter[?Cid]] = + self: NetworkStore, blockType = BlockType.Manifest +): Future[?!AsyncIter[?Cid]] = self.localStore.listBlocks(blockType) method delBlock*(self: NetworkStore, cid: Cid): Future[?!void] = @@ -155,9 +145,7 @@ method close*(self: NetworkStore): Future[void] {.async.} = await self.localStore.close proc new*( - T: type NetworkStore, - engine: BlockExcEngine, - localStore: BlockStore + T: type NetworkStore, engine: BlockExcEngine, localStore: BlockStore ): NetworkStore = ## Create new instance of a NetworkStore ## diff --git a/codex/stores/queryiterhelper.nim b/codex/stores/queryiterhelper.nim index 7c51d2152..6bf3090bf 100644 --- a/codex/stores/queryiterhelper.nim +++ b/codex/stores/queryiterhelper.nim @@ -9,9 +9,8 @@ import ../utils/asynciter type KeyVal*[T] = tuple[key: Key, value: T] proc toAsyncIter*[T]( - queryIter: QueryIter[T], - finishOnErr: bool = true - ): Future[?!AsyncIter[?!QueryResponse[T]]] {.async.} = + queryIter: QueryIter[T], finishOnErr: bool = true +): Future[?!AsyncIter[?!QueryResponse[T]]] {.async.} = ## Converts `QueryIter[T]` to `AsyncIter[?!QueryResponse[T]]` and automatically ## runs dispose whenever `QueryIter` finishes or whenever an error occurs (only ## if the flag finishOnErr is set to true) @@ -25,7 +24,7 @@ proc toAsyncIter*[T]( var errOccurred = false - proc genNext: Future[?!QueryResponse[T]] {.async.} = + proc genNext(): Future[?!QueryResponse[T]] {.async.} = let queryResOrErr = await queryIter.next() if queryResOrErr.isErr: @@ -44,8 +43,8 @@ proc toAsyncIter*[T]( AsyncIter[?!QueryResponse[T]].new(genNext, isFinished).success proc filterSuccess*[T]( - iter: AsyncIter[?!QueryResponse[T]] - ): Future[AsyncIter[tuple[key: Key, value: T]]] {.async.} = + iter: AsyncIter[?!QueryResponse[T]] +): Future[AsyncIter[tuple[key: Key, value: T]]] {.async.} = ## Filters out any items that are not success proc mapping(resOrErr: ?!QueryResponse[T]): Future[?KeyVal[T]] {.async.} = diff --git a/codex/stores/repostore/coders.nim b/codex/stores/repostore/coders.nim index 6fc784080..47df72193 100644 --- a/codex/stores/repostore/coders.nim +++ b/codex/stores/repostore/coders.nim @@ -19,20 +19,35 @@ import ../../errors import ../../merkletree import ../../utils/json -proc encode*(t: QuotaUsage): seq[byte] = t.toJson().toBytes() -proc decode*(T: type QuotaUsage, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc encode*(t: QuotaUsage): seq[byte] = + t.toJson().toBytes() -proc encode*(t: BlockMetadata): seq[byte] = t.toJson().toBytes() -proc decode*(T: type BlockMetadata, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc decode*(T: type QuotaUsage, bytes: seq[byte]): ?!T = + T.fromJson(bytes) -proc encode*(t: LeafMetadata): seq[byte] = t.toJson().toBytes() -proc decode*(T: type LeafMetadata, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc encode*(t: BlockMetadata): seq[byte] = + t.toJson().toBytes() -proc encode*(t: DeleteResult): seq[byte] = t.toJson().toBytes() -proc decode*(T: type DeleteResult, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc decode*(T: type BlockMetadata, bytes: seq[byte]): ?!T = + T.fromJson(bytes) -proc encode*(t: StoreResult): seq[byte] = t.toJson().toBytes() -proc decode*(T: type StoreResult, bytes: seq[byte]): ?!T = T.fromJson(bytes) +proc encode*(t: LeafMetadata): seq[byte] = + t.toJson().toBytes() + +proc decode*(T: type LeafMetadata, bytes: seq[byte]): ?!T = + T.fromJson(bytes) + +proc encode*(t: DeleteResult): seq[byte] = + t.toJson().toBytes() + +proc decode*(T: type DeleteResult, bytes: seq[byte]): ?!T = + T.fromJson(bytes) + +proc encode*(t: StoreResult): seq[byte] = + t.toJson().toBytes() + +proc decode*(T: type StoreResult, bytes: seq[byte]): ?!T = + T.fromJson(bytes) proc encode*(i: uint64): seq[byte] = @(i.toBytesBE) @@ -43,5 +58,8 @@ proc decode*(T: type uint64, bytes: seq[byte]): ?!T = else: failure("Not enough bytes to decode `uint64`") -proc encode*(i: Natural | enum): seq[byte] = cast[uint64](i).encode -proc decode*(T: typedesc[Natural | enum], bytes: seq[byte]): ?!T = uint64.decode(bytes).map((ui: uint64) => cast[T](ui)) +proc encode*(i: Natural | enum): seq[byte] = + cast[uint64](i).encode + +proc decode*(T: typedesc[Natural | enum], bytes: seq[byte]): ?!T = + uint64.decode(bytes).map((ui: uint64) => cast[T](ui)) diff --git a/codex/stores/repostore/operations.nim b/codex/stores/repostore/operations.nim index e000bb0a4..dcacbd62f 100644 --- a/codex/stores/repostore/operations.nim +++ b/codex/stores/repostore/operations.nim @@ -32,12 +32,17 @@ declareGauge(codex_repostore_blocks, "codex repostore blocks") declareGauge(codex_repostore_bytes_used, "codex repostore bytes used") declareGauge(codex_repostore_bytes_reserved, "codex repostore bytes reserved") -proc putLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof): Future[?!StoreResultKind] {.async.} = +proc putLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof +): Future[?!StoreResultKind] {.async.} = without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: return failure(err) - await self.metaDs.modifyGet(key, - proc (maybeCurrMd: ?LeafMetadata): Future[(?LeafMetadata, StoreResultKind)] {.async.} = + await self.metaDs.modifyGet( + key, + proc( + maybeCurrMd: ?LeafMetadata + ): Future[(?LeafMetadata, StoreResultKind)] {.async.} = var md: LeafMetadata res: StoreResultKind @@ -49,10 +54,12 @@ proc putLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid md = LeafMetadata(blkCid: blkCid, proof: proof) res = Stored - (md.some, res) + (md.some, res), ) -proc getLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!LeafMetadata] {.async.} = +proc getLeafMetadata*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!LeafMetadata] {.async.} = without key =? createBlockCidAndProofMetadataKey(treeCid, index), err: return failure(err) @@ -64,9 +71,12 @@ proc getLeafMetadata*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!L success(leafMd) -proc updateTotalBlocksCount*(self: RepoStore, plusCount: Natural = 0, minusCount: Natural = 0): Future[?!void] {.async.} = - await self.metaDs.modify(CodexTotalBlocksKey, - proc (maybeCurrCount: ?Natural): Future[?Natural] {.async.} = +proc updateTotalBlocksCount*( + self: RepoStore, plusCount: Natural = 0, minusCount: Natural = 0 +): Future[?!void] {.async.} = + await self.metaDs.modify( + CodexTotalBlocksKey, + proc(maybeCurrCount: ?Natural): Future[?Natural] {.async.} = let count: Natural = if currCount =? maybeCurrCount: currCount + plusCount - minusCount @@ -75,42 +85,49 @@ proc updateTotalBlocksCount*(self: RepoStore, plusCount: Natural = 0, minusCount self.totalBlocks = count codex_repostore_blocks.set(count.int64) - count.some + count.some, ) proc updateQuotaUsage*( - self: RepoStore, - plusUsed: NBytes = 0.NBytes, - minusUsed: NBytes = 0.NBytes, - plusReserved: NBytes = 0.NBytes, - minusReserved: NBytes = 0.NBytes + self: RepoStore, + plusUsed: NBytes = 0.NBytes, + minusUsed: NBytes = 0.NBytes, + plusReserved: NBytes = 0.NBytes, + minusReserved: NBytes = 0.NBytes, ): Future[?!void] {.async.} = - await self.metaDs.modify(QuotaUsedKey, - proc (maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = + await self.metaDs.modify( + QuotaUsedKey, + proc(maybeCurrUsage: ?QuotaUsage): Future[?QuotaUsage] {.async.} = var usage: QuotaUsage if currUsage =? maybeCurrUsage: - usage = QuotaUsage(used: currUsage.used + plusUsed - minusUsed, reserved: currUsage.reserved + plusReserved - minusReserved) + usage = QuotaUsage( + used: currUsage.used + plusUsed - minusUsed, + reserved: currUsage.reserved + plusReserved - minusReserved, + ) else: - usage = QuotaUsage(used: plusUsed - minusUsed, reserved: plusReserved - minusReserved) + usage = + QuotaUsage(used: plusUsed - minusUsed, reserved: plusReserved - minusReserved) if usage.used + usage.reserved > self.quotaMaxBytes: - raise newException(QuotaNotEnoughError, + raise newException( + QuotaNotEnoughError, "Quota usage would exceed the limit. Used: " & $usage.used & ", reserved: " & - $usage.reserved & ", limit: " & $self.quotaMaxBytes) + $usage.reserved & ", limit: " & $self.quotaMaxBytes, + ) else: self.quotaUsage = usage codex_repostore_bytes_used.set(usage.used.int64) codex_repostore_bytes_reserved.set(usage.reserved.int64) - return usage.some + return usage.some, ) proc updateBlockMetadata*( - self: RepoStore, - cid: Cid, - plusRefCount: Natural = 0, - minusRefCount: Natural = 0, - minExpiry: SecondsSince1970 = 0 + self: RepoStore, + cid: Cid, + plusRefCount: Natural = 0, + minusRefCount: Natural = 0, + minExpiry: SecondsSince1970 = 0, ): Future[?!void] {.async.} = if cid.isEmpty: return success() @@ -118,19 +135,24 @@ proc updateBlockMetadata*( without metaKey =? createBlockExpirationMetadataKey(cid), err: return failure(err) - await self.metaDs.modify(metaKey, - proc (maybeCurrBlockMd: ?BlockMetadata): Future[?BlockMetadata] {.async.} = + await self.metaDs.modify( + metaKey, + proc(maybeCurrBlockMd: ?BlockMetadata): Future[?BlockMetadata] {.async.} = if currBlockMd =? maybeCurrBlockMd: BlockMetadata( size: currBlockMd.size, expiry: max(currBlockMd.expiry, minExpiry), - refCount: currBlockMd.refCount + plusRefCount - minusRefCount + refCount: currBlockMd.refCount + plusRefCount - minusRefCount, ).some else: - raise newException(BlockNotFoundError, "Metadata for block with cid " & $cid & " not found") + raise newException( + BlockNotFoundError, "Metadata for block with cid " & $cid & " not found" + ), ) -proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Future[?!StoreResult] {.async.} = +proc storeBlock*( + self: RepoStore, blk: Block, minExpiry: SecondsSince1970 +): Future[?!StoreResult] {.async.} = if blk.isEmpty: return success(StoreResult(kind: AlreadyInStore)) @@ -140,15 +162,20 @@ proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Futu without blkKey =? makePrefixKey(self.postFixLen, blk.cid), err: return failure(err) - await self.metaDs.modifyGet(metaKey, - proc (maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, StoreResult)] {.async.} = + await self.metaDs.modifyGet( + metaKey, + proc(maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, StoreResult)] {.async.} = var md: BlockMetadata res: StoreResult if currMd =? maybeCurrMd: if currMd.size == blk.data.len.NBytes: - md = BlockMetadata(size: currMd.size, expiry: max(currMd.expiry, minExpiry), refCount: currMd.refCount) + md = BlockMetadata( + size: currMd.size, + expiry: max(currMd.expiry, minExpiry), + refCount: currMd.refCount, + ) res = StoreResult(kind: AlreadyInStore) # making sure that the block acutally is stored in the repoDs @@ -156,21 +183,28 @@ proc storeBlock*(self: RepoStore, blk: Block, minExpiry: SecondsSince1970): Futu raise err if not hasBlock: - warn "Block metadata is present, but block is absent. Restoring block.", cid = blk.cid + warn "Block metadata is present, but block is absent. Restoring block.", + cid = blk.cid if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption: raise err else: - raise newException(CatchableError, "Repo already stores a block with the same cid but with a different size, cid: " & $blk.cid) + raise newException( + CatchableError, + "Repo already stores a block with the same cid but with a different size, cid: " & + $blk.cid, + ) else: md = BlockMetadata(size: blk.data.len.NBytes, expiry: minExpiry, refCount: 0) res = StoreResult(kind: Stored, used: blk.data.len.NBytes) if err =? (await self.repoDs.put(blkKey, blk.data)).errorOption: raise err - (md.some, res) + (md.some, res), ) -proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low): Future[?!DeleteResult] {.async.} = +proc tryDeleteBlock*( + self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.low +): Future[?!DeleteResult] {.async.} = if cid.isEmpty: return success(DeleteResult(kind: InUse)) @@ -180,8 +214,11 @@ proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.l without blkKey =? makePrefixKey(self.postFixLen, cid), err: return failure(err) - await self.metaDs.modifyGet(metaKey, - proc (maybeCurrMd: ?BlockMetadata): Future[(?BlockMetadata, DeleteResult)] {.async.} = + await self.metaDs.modifyGet( + metaKey, + proc( + maybeCurrMd: ?BlockMetadata + ): Future[(?BlockMetadata, DeleteResult)] {.async.} = var maybeMeta: ?BlockMetadata res: DeleteResult @@ -209,5 +246,5 @@ proc tryDeleteBlock*(self: RepoStore, cid: Cid, expiryLimit = SecondsSince1970.l if err =? (await self.repoDs.delete(blkKey)).errorOption: raise err - (maybeMeta, res) + (maybeMeta, res), ) diff --git a/codex/stores/repostore/store.nim b/codex/stores/repostore/store.nim index 63c59d2b6..2b14d6b7a 100644 --- a/codex/stores/repostore/store.nim +++ b/codex/stores/repostore/store.nim @@ -61,7 +61,9 @@ method getBlock*(self: RepoStore, cid: Cid): Future[?!Block] {.async.} = trace "Got block for cid", cid return Block.new(cid, data, verify = true) -method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!(Block, CodexProof)] {.async.} = +method getBlockAndProof*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!(Block, CodexProof)] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) @@ -70,7 +72,9 @@ method getBlockAndProof*(self: RepoStore, treeCid: Cid, index: Natural): Future[ success((blk, leafMd.proof)) -method getBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!Block] {.async.} = +method getBlock*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!Block] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) @@ -86,24 +90,20 @@ method getBlock*(self: RepoStore, address: BlockAddress): Future[?!Block] = self.getBlock(address.cid) method ensureExpiry*( - self: RepoStore, - cid: Cid, - expiry: SecondsSince1970 + self: RepoStore, cid: Cid, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact ## if expiry <= 0: - return failure(newException(ValueError, "Expiry timestamp must be larger then zero")) + return + failure(newException(ValueError, "Expiry timestamp must be larger then zero")) await self.updateBlockMetadata(cid, minExpiry = expiry) method ensureExpiry*( - self: RepoStore, - treeCid: Cid, - index: Natural, - expiry: SecondsSince1970 + self: RepoStore, treeCid: Cid, index: Natural, expiry: SecondsSince1970 ): Future[?!void] {.async.} = ## Ensure that block's associated expiry is at least given timestamp ## If the current expiry is lower then it is updated to the given one, otherwise it is left intact @@ -115,11 +115,7 @@ method ensureExpiry*( await self.ensureExpiry(leafMd.blkCid, expiry) method putCidAndProof*( - self: RepoStore, - treeCid: Cid, - index: Natural, - blkCid: Cid, - proof: CodexProof + self: RepoStore, treeCid: Cid, index: Natural, blkCid: Cid, proof: CodexProof ): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -145,29 +141,22 @@ method putCidAndProof*( return success() method getCidAndProof*( - self: RepoStore, - treeCid: Cid, - index: Natural + self: RepoStore, treeCid: Cid, index: Natural ): Future[?!(Cid, CodexProof)] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) success((leafMd.blkCid, leafMd.proof)) -method getCid*( - self: RepoStore, - treeCid: Cid, - index: Natural -): Future[?!Cid] {.async.} = +method getCid*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!Cid] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: return failure(err) success(leafMd.blkCid) method putBlock*( - self: RepoStore, - blk: Block, - ttl = Duration.none): Future[?!void] {.async.} = + self: RepoStore, blk: Block, ttl = Duration.none +): Future[?!void] {.async.} = ## Put a block to the blockstore ## @@ -223,14 +212,17 @@ method delBlock*(self: RepoStore, cid: Cid): Future[?!void] {.async.} = return success() -method delBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!void] {.async.} = +method delBlock*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!void] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: if err of BlockNotFoundError: return success() else: return failure(err) - if err =? (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: + if err =? + (await self.updateBlockMetadata(leafMd.blkCid, minusRefCount = 1)).errorOption: if not (err of BlockNotFoundError): return failure(err) @@ -253,7 +245,9 @@ method hasBlock*(self: RepoStore, cid: Cid): Future[?!bool] {.async.} = return await self.repoDs.has(key) -method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] {.async.} = +method hasBlock*( + self: RepoStore, treeCid: Cid, index: Natural +): Future[?!bool] {.async.} = without leafMd =? await self.getLeafMetadata(treeCid, index), err: if err of BlockNotFoundError: return success(false) @@ -263,23 +257,21 @@ method hasBlock*(self: RepoStore, treeCid: Cid, index: Natural): Future[?!bool] await self.hasBlock(leafMd.blkCid) method listBlocks*( - self: RepoStore, - blockType = BlockType.Manifest + self: RepoStore, blockType = BlockType.Manifest ): Future[?!AsyncIter[?Cid]] {.async.} = ## Get the list of blocks in the RepoStore. ## This is an intensive operation ## - var - iter = AsyncIter[?Cid]() + var iter = AsyncIter[?Cid]() let key = - case blockType: + case blockType of BlockType.Manifest: CodexManifestKey of BlockType.Block: CodexBlocksKey of BlockType.Both: CodexRepoKey - let query = Query.init(key, value=false) + let query = Query.init(key, value = false) without queryIter =? (await self.repoDs.query(query)), err: trace "Error querying cids in repo", blockType, err = err.msg return failure(err) @@ -300,13 +292,12 @@ method listBlocks*( return success iter proc createBlockExpirationQuery(maxNumber: int, offset: int): ?!Query = - let queryKey = ? createBlockExpirationMetadataQueryKey() + let queryKey = ?createBlockExpirationMetadataQueryKey() success Query.init(queryKey, offset = offset, limit = maxNumber) method getBlockExpirations*( - self: RepoStore, - maxNumber: int, - offset: int): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = + self: RepoStore, maxNumber: int, offset: int +): Future[?!AsyncIter[BlockExpiration]] {.async, base.} = ## Get iterator with block expirations ## @@ -322,17 +313,18 @@ method getBlockExpirations*( error "Unable to convert QueryIter to AsyncIter", err = err.msg return failure(err) - let - filteredIter: AsyncIter[KeyVal[BlockMetadata]] = await asyncQueryIter.filterSuccess() + let filteredIter: AsyncIter[KeyVal[BlockMetadata]] = + await asyncQueryIter.filterSuccess() - proc mapping (kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = + proc mapping(kv: KeyVal[BlockMetadata]): Future[?BlockExpiration] {.async.} = without cid =? Cid.init(kv.key.value).mapFailure, err: error "Failed decoding cid", err = err.msg return BlockExpiration.none BlockExpiration(cid: cid, expiry: kv.value.expiry).some - let blockExpIter = await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, mapping) + let blockExpIter = + await mapFilter[KeyVal[BlockMetadata], BlockExpiration](filteredIter, mapping) success(blockExpIter) diff --git a/codex/stores/repostore/types.nim b/codex/stores/repostore/types.nim index 2f88183da..3d455d12c 100644 --- a/codex/stores/repostore/types.nim +++ b/codex/stores/repostore/types.nim @@ -56,17 +56,17 @@ type expiry*: SecondsSince1970 DeleteResultKind* {.serialize.} = enum - Deleted = 0, # block removed from store - InUse = 1, # block not removed, refCount > 0 and not expired - NotFound = 2 # block not found in store + Deleted = 0 # block removed from store + InUse = 1 # block not removed, refCount > 0 and not expired + NotFound = 2 # block not found in store DeleteResult* {.serialize.} = object kind*: DeleteResultKind released*: NBytes StoreResultKind* {.serialize.} = enum - Stored = 0, # new block stored - AlreadyInStore = 1 # block already in store + Stored = 0 # new block stored + AlreadyInStore = 1 # block already in store StoreResult* {.serialize.} = object kind*: StoreResultKind @@ -94,7 +94,7 @@ func new*( clock: Clock = SystemClock.new(), postFixLen = 2, quotaMaxBytes = DefaultQuotaBytes, - blockTtl = DefaultBlockTtl + blockTtl = DefaultBlockTtl, ): RepoStore = ## Create new instance of a RepoStore ## @@ -105,5 +105,5 @@ func new*( postFixLen: postFixLen, quotaMaxBytes: quotaMaxBytes, blockTtl: blockTtl, - onBlockStored: CidCallback.none + onBlockStored: CidCallback.none, ) diff --git a/codex/stores/treehelper.nim b/codex/stores/treehelper.nim index 485cbfc26..e1f5d48d8 100644 --- a/codex/stores/treehelper.nim +++ b/codex/stores/treehelper.nim @@ -9,7 +9,8 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/sugar import pkg/chronos @@ -22,13 +23,18 @@ import ./blockstore import ../utils/asynciter import ../merkletree -proc putSomeProofs*(store: BlockStore, tree: CodexTree, iter: Iter[int]): Future[?!void] {.async.} = +proc putSomeProofs*( + store: BlockStore, tree: CodexTree, iter: Iter[int] +): Future[?!void] {.async.} = without treeCid =? tree.rootCid, err: return failure(err) for i in iter: - if i notin 0.. i.ord)) proc putAllProofs*(store: BlockStore, tree: CodexTree): Future[?!void] = - store.putSomeProofs(tree, Iter[int].new(0..= self.size type LPStreamReadError* = object of LPStreamError - par*: ref CatchableError + par*: ref CatchableError proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = var w = newException(LPStreamReadError, "Read stream failed") @@ -83,9 +76,7 @@ proc newLPStreamReadError*(p: ref CatchableError): ref LPStreamReadError = result = w method readOnce*( - self: StoreStream, - pbytes: pointer, - nbytes: int + self: StoreStream, pbytes: pointer, nbytes: int ): Future[int] {.async: (raises: [CancelledError, LPStreamError]).} = ## Read `nbytes` from current position in the StoreStream into output buffer pointed by `pbytes`. ## Return how many bytes were actually read before EOF was encountered. @@ -97,24 +88,34 @@ method readOnce*( # The loop iterates over blocks in the StoreStream, # reading them and copying their data into outbuf - var read = 0 # Bytes read so far, and thus write offset in the outbuf + var read = 0 # Bytes read so far, and thus write offset in the outbuf while read < nbytes and not self.atEof: # Compute from the current stream position `self.offset` the block num/offset to read # Compute how many bytes to read from this block let - blockNum = self.offset div self.manifest.blockSize.int + blockNum = self.offset div self.manifest.blockSize.int blockOffset = self.offset mod self.manifest.blockSize.int - readBytes = min([self.size - self.offset, - nbytes - read, - self.manifest.blockSize.int - blockOffset]) - address = BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum) - + readBytes = min( + [ + self.size - self.offset, + nbytes - read, + self.manifest.blockSize.int - blockOffset, + ] + ) + address = + BlockAddress(leaf: true, treeCid: self.manifest.treeCid, index: blockNum) # Read contents of block `blockNum` without blk =? (await self.store.getBlock(address)).tryGet.catch, error: raise newLPStreamReadError(error) - trace "Reading bytes from store stream", manifestCid = self.manifest.cid.get(), numBlocks = self.manifest.blocksCount, blockNum, blkCid = blk.cid, bytes = readBytes, blockOffset + trace "Reading bytes from store stream", + manifestCid = self.manifest.cid.get(), + numBlocks = self.manifest.blocksCount, + blockNum, + blkCid = blk.cid, + bytes = readBytes, + blockOffset # Copy `readBytes` bytes starting at `blockOffset` from the block into the outbuf if blk.isEmpty: @@ -130,5 +131,5 @@ method readOnce*( method closeImpl*(self: StoreStream) {.async.} = trace "Closing StoreStream" - self.offset = self.size # set Eof + self.offset = self.size # set Eof await procCall LPStream(self).closeImpl() diff --git a/codex/systemclock.nim b/codex/systemclock.nim index 25ac42162..6226f627d 100644 --- a/codex/systemclock.nim +++ b/codex/systemclock.nim @@ -2,8 +2,7 @@ import std/times import pkg/upraises import ./clock -type - SystemClock* = ref object of Clock +type SystemClock* = ref object of Clock method now*(clock: SystemClock): SecondsSince1970 {.upraises: [].} = let now = times.now().utc diff --git a/codex/units.nim b/codex/units.nim index 73a67a2cd..b600103f5 100644 --- a/codex/units.nim +++ b/codex/units.nim @@ -13,52 +13,78 @@ import std/strutils import ./logutils -type - NBytes* = distinct Natural +type NBytes* = distinct Natural template basicMaths(T: untyped) = - proc `+` *(x: T, y: static[int]): T = T(`+`(x.Natural, y.Natural)) - proc `-` *(x: T, y: static[int]): T = T(`-`(x.Natural, y.Natural)) - proc `*` *(x: T, y: static[int]): T = T(`*`(x.Natural, y.Natural)) - proc `+` *(x, y: T): T = T(`+`(x.Natural, y.Natural)) - proc `-` *(x, y: T): T = T(`-`(x.Natural, y.Natural)) - proc `*` *(x, y: T): T = T(`*`(x.Natural, y.Natural)) - proc `<` *(x, y: T): bool {.borrow.} - proc `<=` *(x, y: T): bool {.borrow.} - proc `==` *(x, y: T): bool {.borrow.} - proc `+=` *(x: var T, y: T) {.borrow.} - proc `-=` *(x: var T, y: T) {.borrow.} - proc `hash` *(x: T): Hash {.borrow.} + proc `+`*(x: T, y: static[int]): T = + T(`+`(x.Natural, y.Natural)) + + proc `-`*(x: T, y: static[int]): T = + T(`-`(x.Natural, y.Natural)) + + proc `*`*(x: T, y: static[int]): T = + T(`*`(x.Natural, y.Natural)) + + proc `+`*(x, y: T): T = + T(`+`(x.Natural, y.Natural)) + + proc `-`*(x, y: T): T = + T(`-`(x.Natural, y.Natural)) + + proc `*`*(x, y: T): T = + T(`*`(x.Natural, y.Natural)) + + proc `<`*(x, y: T): bool {.borrow.} + proc `<=`*(x, y: T): bool {.borrow.} + proc `==`*(x, y: T): bool {.borrow.} + proc `+=`*(x: var T, y: T) {.borrow.} + proc `-=`*(x: var T, y: T) {.borrow.} + proc `hash`*(x: T): Hash {.borrow.} template divMaths(T: untyped) = - proc `mod` *(x, y: T): T = T(`mod`(x.Natural, y.Natural)) - proc `div` *(x, y: T): Natural = `div`(x.Natural, y.Natural) + proc `mod`*(x, y: T): T = + T(`mod`(x.Natural, y.Natural)) + + proc `div`*(x, y: T): Natural = + `div`(x.Natural, y.Natural) + # proc `/` *(x, y: T): Natural = `/`(x.Natural, y.Natural) basicMaths(NBytes) divMaths(NBytes) -proc `$`*(ts: NBytes): string = $(int(ts)) & "'NByte" -proc `'nb`*(n: string): NBytes = parseInt(n).NBytes +proc `$`*(ts: NBytes): string = + $(int(ts)) & "'NByte" + +proc `'nb`*(n: string): NBytes = + parseInt(n).NBytes -logutils.formatIt(NBytes): $it +logutils.formatIt(NBytes): + $it const KiB = 1024.NBytes # ByteSz, 1 kibibyte = 1,024 ByteSz - MiB = KiB * 1024 # ByteSz, 1 mebibyte = 1,048,576 ByteSz - GiB = MiB * 1024 # ByteSz, 1 gibibyte = 1,073,741,824 ByteSz + MiB = KiB * 1024 # ByteSz, 1 mebibyte = 1,048,576 ByteSz + GiB = MiB * 1024 # ByteSz, 1 gibibyte = 1,073,741,824 ByteSz -proc KiBs*(v: Natural): NBytes = v.NBytes * KiB -proc MiBs*(v: Natural): NBytes = v.NBytes * MiB -proc GiBs*(v: Natural): NBytes = v.NBytes * GiB +proc KiBs*(v: Natural): NBytes = + v.NBytes * KiB -func divUp*[T: NBytes](a, b : T): int = +proc MiBs*(v: Natural): NBytes = + v.NBytes * MiB + +proc GiBs*(v: Natural): NBytes = + v.NBytes * GiB + +func divUp*[T: NBytes](a, b: T): int = ## Division with result rounded up (rather than truncated as in 'div') assert(b != T(0)) - if a==T(0): int(0) else: int( ((a - T(1)) div b) + 1 ) + if a == T(0): + int(0) + else: + int(((a - T(1)) div b) + 1) when isMainModule: - import unittest2 suite "maths": diff --git a/codex/utils.nim b/codex/utils.nim index 617f30cb9..30d84e74b 100644 --- a/codex/utils.nim +++ b/codex/utils.nim @@ -23,20 +23,20 @@ export asyncheapqueue, fileutils, asynciter, chronos when defined(posix): import os, posix -func divUp*[T: SomeInteger](a, b : T): T = +func divUp*[T: SomeInteger](a, b: T): T = ## Division with result rounded up (rather than truncated as in 'div') assert(b != T(0)) - if a==T(0): T(0) else: ((a - T(1)) div b) + T(1) + if a == T(0): + T(0) + else: + ((a - T(1)) div b) + T(1) -func roundUp*[T](a, b : T): T = +func roundUp*[T](a, b: T): T = ## Round up 'a' to the next value divisible by 'b' - divUp(a,b) * b + divUp(a, b) * b proc orElse*[A](a, b: Option[A]): Option[A] = - if (a.isSome()): - a - else: - b + if (a.isSome()): a else: b template findIt*(s, pred: untyped): untyped = ## Returns the index of the first object matching a predicate, or -1 if no @@ -57,45 +57,48 @@ template findIt*(s, pred: untyped): untyped = index when not declared(parseDuration): # Odd code formatting to minimize diff v. mainLine - const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'} - - func toLowerAscii(c: char): char = - if c in {'A'..'Z'}: char(uint8(c) xor 0b0010_0000'u8) else: c - - func parseDuration*(s: string, size: var Duration): int = - ## Parse a size qualified by simple time into `Duration`. - ## - runnableExamples: - var res: Duration # caller must still know if 'b' refers to bytes|bits - doAssert parseDuration("10H", res) == 3 - doAssert res == initDuration(hours=10) - doAssert parseDuration("64m", res) == 6 - doAssert res == initDuration(minutes=64) - doAssert parseDuration("7m/block", res) == 2 # '/' stops parse - doAssert res == initDuration(minutes=7) # 1 shl 30, forced binary metric - doAssert parseDuration("3d", res) == 2 # '/' stops parse - doAssert res == initDuration(days=3) # 1 shl 30, forced binary metric - - const prefix = "s" & "mhdw" # byte|bit & lowCase metric-ish prefixes - const timeScale = [1.0, 60.0, 3600.0, 86_400.0, 604_800.0] - - var number: float - var scale = 1.0 - result = parseFloat(s, number) - if number < 0: # While parseFloat accepts negatives .. - result = 0 #.. we do not since sizes cannot be < 0 - else: - let start = result # Save spot to maybe unwind white to EOS - while result < s.len and s[result] in Whitespace: - inc result - if result < s.len: # Illegal starting char => unity - if (let si = prefix.find(s[result].toLowerAscii); si >= 0): - inc result # Now parse the scale - scale = timeScale[si] - else: # Unwind result advancement when there.. - result = start #..is no unit to the end of `s`. - var sizeF = number * scale + 0.5 # Saturate to int64.high when too big - size = seconds(int(sizeF)) + const Whitespace = {' ', '\t', '\v', '\r', '\l', '\f'} + + func toLowerAscii(c: char): char = + if c in {'A' .. 'Z'}: + char(uint8(c) xor 0b0010_0000'u8) + else: + c + + func parseDuration*(s: string, size: var Duration): int = + ## Parse a size qualified by simple time into `Duration`. + ## + runnableExamples: + var res: Duration # caller must still know if 'b' refers to bytes|bits + doAssert parseDuration("10H", res) == 3 + doAssert res == initDuration(hours = 10) + doAssert parseDuration("64m", res) == 6 + doAssert res == initDuration(minutes = 64) + doAssert parseDuration("7m/block", res) == 2 # '/' stops parse + doAssert res == initDuration(minutes = 7) # 1 shl 30, forced binary metric + doAssert parseDuration("3d", res) == 2 # '/' stops parse + doAssert res == initDuration(days = 3) # 1 shl 30, forced binary metric + + const prefix = "s" & "mhdw" # byte|bit & lowCase metric-ish prefixes + const timeScale = [1.0, 60.0, 3600.0, 86_400.0, 604_800.0] + + var number: float + var scale = 1.0 + result = parseFloat(s, number) + if number < 0: # While parseFloat accepts negatives .. + result = 0 #.. we do not since sizes cannot be < 0 + else: + let start = result # Save spot to maybe unwind white to EOS + while result < s.len and s[result] in Whitespace: + inc result + if result < s.len: # Illegal starting char => unity + if (let si = prefix.find(s[result].toLowerAscii); si >= 0): + inc result # Now parse the scale + scale = timeScale[si] + else: # Unwind result advancement when there.. + result = start #..is no unit to the end of `s`. + var sizeF = number * scale + 0.5 # Saturate to int64.high when too big + size = seconds(int(sizeF)) # Block all/most signals in the current thread, so we don't interfere with regular signal # handling elsewhere. @@ -115,12 +118,10 @@ proc ignoreSignalsInThread*() = SIGXCPU = 24 SIGSEGV = 11 SIGBUS = 7 - if sigdelset(signalMask, SIGPWR) != 0 or - sigdelset(signalMask, SIGXCPU) != 0 or - sigdelset(signalMask, SIGSEGV) != 0 or - sigdelset(signalMask, SIGBUS) != 0: + if sigdelset(signalMask, SIGPWR) != 0 or sigdelset(signalMask, SIGXCPU) != 0 or + sigdelset(signalMask, SIGSEGV) != 0 or sigdelset(signalMask, SIGBUS) != 0: echo osErrorMsg(osLastError()) quit(QuitFailure) if pthread_sigmask(SIG_BLOCK, signalMask, oldSignalMask) != 0: echo osErrorMsg(osLastError()) - quit(QuitFailure) \ No newline at end of file + quit(QuitFailure) diff --git a/codex/utils/addrutils.nim b/codex/utils/addrutils.nim index 3eec3015c..a9ec54f56 100644 --- a/codex/utils/addrutils.nim +++ b/codex/utils/addrutils.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import std/strutils import std/options @@ -20,26 +21,26 @@ import pkg/stew/endians2 func remapAddr*( address: MultiAddress, ip: Option[IpAddress] = IpAddress.none, - port: Option[Port] = Port.none + port: Option[Port] = Port.none, ): MultiAddress = ## Remap addresses to new IP and/or Port ## - var - parts = ($address).split("/") + var parts = ($address).split("/") - parts[2] = if ip.isSome: + parts[2] = + if ip.isSome: $ip.get else: parts[2] - parts[4] = if port.isSome: + parts[4] = + if port.isSome: $port.get else: parts[4] - MultiAddress.init(parts.join("/")) - .expect("Should construct multiaddress") + MultiAddress.init(parts.join("/")).expect("Should construct multiaddress") proc getMultiAddrWithIPAndUDPPort*(ip: IpAddress, port: Port): MultiAddress = ## Creates a MultiAddress with the specified IP address and UDP port @@ -50,43 +51,41 @@ proc getMultiAddrWithIPAndUDPPort*(ip: IpAddress, port: Port): MultiAddress = ## ## Returns: ## A MultiAddress in the format "/ip4/
/udp/" or "/ip6/
/udp/" - + let ipFamily = if ip.family == IpAddressFamily.IPv4: "/ip4/" else: "/ip6/" return MultiAddress.init(ipFamily & $ip & "/udp/" & $port).expect("valid multiaddr") -proc getAddressAndPort*(ma: MultiAddress): tuple[ip: Option[IpAddress], port: Option[Port]] = +proc getAddressAndPort*( + ma: MultiAddress +): tuple[ip: Option[IpAddress], port: Option[Port]] = try: # Try IPv4 first let ipv4Result = ma[multiCodec("ip4")] - let ip = if ipv4Result.isOk: - let ipBytes = ipv4Result.get() - .protoArgument() - .expect("Invalid IPv4 format") - let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]] - some(IpAddress(family: IPv4, address_v4: ipArray)) - else: - # Try IPv6 if IPv4 not found - let ipv6Result = ma[multiCodec("ip6")] - if ipv6Result.isOk: - let ipBytes = ipv6Result.get() - .protoArgument() - .expect("Invalid IPv6 format") - var ipArray: array[16, byte] - for i in 0..15: - ipArray[i] = ipBytes[i] - some(IpAddress(family: IPv6, address_v6: ipArray)) + let ip = + if ipv4Result.isOk: + let ipBytes = ipv4Result.get().protoArgument().expect("Invalid IPv4 format") + let ipArray = [ipBytes[0], ipBytes[1], ipBytes[2], ipBytes[3]] + some(IpAddress(family: IPv4, address_v4: ipArray)) else: - none(IpAddress) + # Try IPv6 if IPv4 not found + let ipv6Result = ma[multiCodec("ip6")] + if ipv6Result.isOk: + let ipBytes = ipv6Result.get().protoArgument().expect("Invalid IPv6 format") + var ipArray: array[16, byte] + for i in 0 .. 15: + ipArray[i] = ipBytes[i] + some(IpAddress(family: IPv6, address_v6: ipArray)) + else: + none(IpAddress) # Get TCP Port let portResult = ma[multiCodec("tcp")] - let port = if portResult.isOk: - let portBytes = portResult.get() - .protoArgument() - .expect("Invalid port format") - some(Port(fromBytesBE(uint16, portBytes))) - else: - none(Port) + let port = + if portResult.isOk: + let portBytes = portResult.get().protoArgument().expect("Invalid port format") + some(Port(fromBytesBE(uint16, portBytes))) + else: + none(Port) (ip: ip, port: port) except Exception: (ip: none(IpAddress), port: none(Port)) diff --git a/codex/utils/asyncheapqueue.nim b/codex/utils/asyncheapqueue.nim index e7d7edade..1b0dd8bcb 100644 --- a/codex/utils/asyncheapqueue.nim +++ b/codex/utils/asyncheapqueue.nim @@ -15,7 +15,8 @@ import pkg/stew/results type QueueType* {.pure.} = enum - Min, Max + Min + Max AsyncHeapQueue*[T] = ref object of RootRef ## A priority queue @@ -31,11 +32,11 @@ type maxsize: int AsyncHQErrors* {.pure.} = enum - Empty, Full + Empty + Full proc newAsyncHeapQueue*[T]( - maxsize: int = 0, - queueType: QueueType = QueueType.Min + maxsize: int = 0, queueType: QueueType = QueueType.Min ): AsyncHeapQueue[T] = ## Creates a new asynchronous queue ``AsyncHeapQueue``. ## @@ -54,12 +55,12 @@ proc wakeupNext(waiters: var seq[Future[void]]) {.inline.} = var waiter = waiters[i] inc(i) - if not(waiter.finished()): + if not (waiter.finished()): waiter.complete() break if i > 0: - waiters.delete(0..(i-1)) + waiters.delete(0 .. (i - 1)) proc heapCmp[T](x, y: T, max: bool = false): bool {.inline.} = if max: @@ -93,17 +94,17 @@ proc siftup[T](heap: AsyncHeapQueue[T], p: int) = let startpos = pos let newitem = heap[pos] # Bubble up the smaller child until hitting a leaf. - var childpos = 2*pos + 1 # leftmost child position + var childpos = 2 * pos + 1 # leftmost child position while childpos < endpos: # Set childpos to index of smaller child. let rightpos = childpos + 1 if rightpos < endpos and - not heapCmp(heap[childpos], heap[rightpos], heap.queueType == QueueType.Max): + not heapCmp(heap[childpos], heap[rightpos], heap.queueType == QueueType.Max): childpos = rightpos # Move the smaller child up. heap.queue[pos] = heap[childpos] pos = childpos - childpos = 2*pos + 1 + childpos = 2 * pos + 1 # The leaf at pos is empty now. Put newitem there, and bubble it up # to its final resting place (by sifting its parents down). heap.queue[pos] = newitem @@ -131,7 +132,7 @@ proc pushNoWait*[T](heap: AsyncHeapQueue[T], item: T): Result[void, AsyncHQError return err(AsyncHQErrors.Full) heap.queue.add(item) - siftdown(heap, 0, len(heap)-1) + siftdown(heap, 0, len(heap) - 1) heap.getters.wakeupNext() return ok() @@ -147,7 +148,7 @@ proc push*[T](heap: AsyncHeapQueue[T], item: T) {.async, gcsafe.} = try: await putter except CatchableError as exc: - if not(heap.full()) and not(putter.cancelled()): + if not (heap.full()) and not (putter.cancelled()): heap.putters.wakeupNext() raise exc @@ -180,7 +181,7 @@ proc pop*[T](heap: AsyncHeapQueue[T]): Future[T] {.async.} = try: await getter except CatchableError as exc: - if not(heap.empty()) and not(getter.cancelled()): + if not (heap.empty()) and not (getter.cancelled()): heap.getters.wakeupNext() raise exc @@ -225,7 +226,9 @@ proc update*[T](heap: AsyncHeapQueue[T], item: T): bool = heap.siftup(0) return true -proc pushOrUpdateNoWait*[T](heap: AsyncHeapQueue[T], item: T): Result[void, AsyncHQErrors] = +proc pushOrUpdateNoWait*[T]( + heap: AsyncHeapQueue[T], item: T +): Result[void, AsyncHQErrors] = ## Update an item if it exists or push a new one ## @@ -285,12 +288,12 @@ proc size*[T](heap: AsyncHeapQueue[T]): int {.inline.} = ## Return the maximum number of elements in ``heap``. heap.maxsize -proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural) : T {.inline.} = +proc `[]`*[T](heap: AsyncHeapQueue[T], i: Natural): T {.inline.} = ## Access the i-th element of ``heap`` by order from first to last. ## ``heap[0]`` is the first element, ``heap[^1]`` is the last element. heap.queue[i] -proc `[]`*[T](heap: AsyncHeapQueue[T], i: BackwardsIndex) : T {.inline.} = +proc `[]`*[T](heap: AsyncHeapQueue[T], i: BackwardsIndex): T {.inline.} = ## Access the i-th element of ``heap`` by order from first to last. ## ``heap[0]`` is the first element, ``heap[^1]`` is the last element. heap.queue[len(heap.queue) - int(i)] @@ -314,14 +317,16 @@ proc contains*[T](heap: AsyncHeapQueue[T], item: T): bool {.inline.} = ## Return true if ``item`` is in ``heap`` or false if not found. Usually used ## via the ``in`` operator. for e in heap.queue.items(): - if e == item: return true + if e == item: + return true return false proc `$`*[T](heap: AsyncHeapQueue[T]): string = ## Turn an async queue ``heap`` into its string representation. var res = "[" for item in heap.queue.items(): - if len(res) > 1: res.add(", ") + if len(res) > 1: + res.add(", ") res.addQuoted(item) res.add("]") res diff --git a/codex/utils/asynciter.nim b/codex/utils/asynciter.nim index a17795525..b5371d240 100644 --- a/codex/utils/asynciter.nim +++ b/codex/utils/asynciter.nim @@ -10,10 +10,9 @@ export iter ## AsyncIter[T] is similar to `Iter[Future[T]]` with addition of methods specific to asynchronous processing ## -type - AsyncIter*[T] = ref object - finished: bool - next*: GenNext[Future[T]] +type AsyncIter*[T] = ref object + finished: bool + next*: GenNext[Future[T]] proc finish*[T](self: AsyncIter[T]): void = self.finished = true @@ -39,7 +38,12 @@ proc flatMap*[T, U](fut: Future[T], fn: Function[T, Future[U]]): Future[U] {.asy let t = await fut await fn(t) -proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFinished, finishOnErr: bool = true): AsyncIter[T] = +proc new*[T]( + _: type AsyncIter[T], + genNext: GenNext[Future[T]], + isFinished: IsFinished, + finishOnErr: bool = true, +): AsyncIter[T] = ## Creates a new Iter using elements returned by supplier function `genNext`. ## Iter is finished whenever `isFinished` returns true. ## @@ -63,7 +67,9 @@ proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFi iter.finish return item else: - raise newException(CatchableError, "AsyncIter is finished but next item was requested") + raise newException( + CatchableError, "AsyncIter is finished but next item was requested" + ) if isFinished(): iter.finish @@ -72,29 +78,30 @@ proc new*[T](_: type AsyncIter[T], genNext: GenNext[Future[T]], isFinished: IsFi return iter proc mapAsync*[T, U](iter: Iter[T], fn: Function[T, Future[U]]): AsyncIter[U] = - AsyncIter[U].new( - genNext = () => fn(iter.next()), - isFinished = () => iter.finished() - ) + AsyncIter[U].new(genNext = () => fn(iter.next()), isFinished = () => iter.finished()) proc new*[U, V: Ordinal](_: type AsyncIter[U], slice: HSlice[U, V]): AsyncIter[U] = ## Creates new Iter from a slice ## let iter = Iter[U].new(slice) - mapAsync[U, U](iter, - proc (i: U): Future[U] {.async.} = - i + mapAsync[U, U]( + iter, + proc(i: U): Future[U] {.async.} = + i, ) -proc new*[U, V, S: Ordinal](_: type AsyncIter[U], a: U, b: V, step: S = 1): AsyncIter[U] = +proc new*[U, V, S: Ordinal]( + _: type AsyncIter[U], a: U, b: V, step: S = 1 +): AsyncIter[U] = ## Creates new Iter in range a..b with specified step (default 1) ## let iter = Iter[U].new(a, b, step) - mapAsync[U, U](iter, - proc (i: U): Future[U] {.async.} = - i + mapAsync[U, U]( + iter, + proc(i: U): Future[U] {.async.} = + i, ) proc empty*[T](_: type AsyncIter[T]): AsyncIter[T] = @@ -103,17 +110,20 @@ proc empty*[T](_: type AsyncIter[T]): AsyncIter[T] = proc genNext(): Future[T] {.raises: [CatchableError].} = raise newException(CatchableError, "Next item requested from an empty AsyncIter") - proc isFinished(): bool = true + + proc isFinished(): bool = + true AsyncIter[T].new(genNext, isFinished) proc map*[T, U](iter: AsyncIter[T], fn: Function[T, Future[U]]): AsyncIter[U] = AsyncIter[U].new( - genNext = () => iter.next().flatMap(fn), - isFinished = () => iter.finished + genNext = () => iter.next().flatMap(fn), isFinished = () => iter.finished ) -proc mapFilter*[T, U](iter: AsyncIter[T], mapPredicate: Function[T, Future[Option[U]]]): Future[AsyncIter[U]] {.async.} = +proc mapFilter*[T, U]( + iter: AsyncIter[T], mapPredicate: Function[T, Future[Option[U]]] +): Future[AsyncIter[U]] {.async.} = var nextFutU: Option[Future[U]] proc tryFetch(): Future[void] {.async.} = @@ -145,7 +155,9 @@ proc mapFilter*[T, U](iter: AsyncIter[T], mapPredicate: Function[T, Future[Optio await tryFetch() AsyncIter[U].new(genNext, isFinished) -proc filter*[T](iter: AsyncIter[T], predicate: Function[T, Future[bool]]): Future[AsyncIter[T]] {.async.} = +proc filter*[T]( + iter: AsyncIter[T], predicate: Function[T, Future[bool]] +): Future[AsyncIter[T]] {.async.} = proc wrappedPredicate(t: T): Future[Option[T]] {.async.} = if await predicate(t): some(t) @@ -158,8 +170,9 @@ proc delayBy*[T](iter: AsyncIter[T], d: Duration): AsyncIter[T] = ## Delays emitting each item by given duration ## - map[T, T](iter, - proc (t: T): Future[T] {.async.} = + map[T, T]( + iter, + proc(t: T): Future[T] {.async.} = await sleepAsync(d) - t + t, ) diff --git a/codex/utils/asyncspawn.nim b/codex/utils/asyncspawn.nim index 6717e5e18..95a9f0144 100644 --- a/codex/utils/asyncspawn.nim +++ b/codex/utils/asyncspawn.nim @@ -1,10 +1,10 @@ import pkg/chronos proc asyncSpawn*(future: Future[void], ignore: type CatchableError) = - proc ignoringError {.async.} = + proc ignoringError() {.async.} = try: await future except ignore: discard - asyncSpawn ignoringError() + asyncSpawn ignoringError() diff --git a/codex/utils/asyncstatemachine.nim b/codex/utils/asyncstatemachine.nim index 6bddc24e6..572ae2468 100644 --- a/codex/utils/asyncstatemachine.nim +++ b/codex/utils/asyncstatemachine.nim @@ -3,7 +3,7 @@ import pkg/chronos import ../logutils import ./trackedfutures -{.push raises:[].} +{.push raises: [].} type Machine* = ref object of RootObj @@ -12,9 +12,10 @@ type scheduled: AsyncQueue[Event] started: bool trackedFutures: TrackedFutures + State* = ref object of RootObj Query*[T] = proc(state: State): T - Event* = proc(state: State): ?State {.gcsafe, raises:[].} + Event* = proc(state: State): ?State {.gcsafe, raises: [].} logScope: topics = "statemachine" @@ -26,7 +27,7 @@ method `$`*(state: State): string {.base, gcsafe.} = raiseAssert "not implemented" proc transition(_: type Event, previous, next: State): Event = - return proc (state: State): ?State = + return proc(state: State): ?State = if state == previous: return some next @@ -52,10 +53,10 @@ method onError*(state: State, error: ref CatchableError): ?State {.base.} = raise (ref Defect)(msg: "error in state machine: " & error.msg, parent: error) proc onError(machine: Machine, error: ref CatchableError): Event = - return proc (state: State): ?State = + return proc(state: State): ?State = state.onError(error) -proc run(machine: Machine, state: State) {.async: (raises:[]).} = +proc run(machine: Machine, state: State) {.async: (raises: []).} = try: if next =? await state.run(machine): machine.schedule(Event.transition(state, next)) @@ -73,7 +74,11 @@ proc scheduler(machine: Machine) {.async: (raises: []).} = if not running.isNil and not running.finished: trace "cancelling current state", state = $machine.state await running.cancelAndWait() - let fromState = if machine.state.isNil: "" else: $machine.state + let fromState = + if machine.state.isNil: + "" + else: + $machine.state machine.state = next debug "enter state", state = fromState & " => " & $machine.state running = machine.run(machine.state) diff --git a/codex/utils/digest.nim b/codex/utils/digest.nim index 4b3e68bf8..57c4cd4ec 100644 --- a/codex/utils/digest.nim +++ b/codex/utils/digest.nim @@ -1,8 +1,7 @@ - from pkg/libp2p import MultiHash func digestBytes*(mhash: MultiHash): seq[byte] = ## Extract hash digestBytes ## - mhash.data.buffer[mhash.dpos.. 0 and i > b) or - (step < 0 and i < b) + (step > 0 and i > b) or (step < 0 and i < b) Iter[U].new(genNext, isFinished) @@ -83,8 +87,7 @@ proc new*[T](_: type Iter[T], items: seq[T]): Iter[T] = ## Creates a new Iter from a sequence ## - Iter[int].new(0.. items[i]) + Iter[int].new(0 ..< items.len).map((i: int) => items[i]) proc empty*[T](_: type Iter[T]): Iter[T] = ## Creates an empty Iter @@ -92,15 +95,14 @@ proc empty*[T](_: type Iter[T]): Iter[T] = proc genNext(): T {.raises: [CatchableError].} = raise newException(CatchableError, "Next item requested from an empty Iter") - proc isFinished(): bool = true + + proc isFinished(): bool = + true Iter[T].new(genNext, isFinished) proc map*[T, U](iter: Iter[T], fn: Function[T, U]): Iter[U] = - Iter[U].new( - genNext = () => fn(iter.next()), - isFinished = () => iter.finished - ) + Iter[U].new(genNext = () => fn(iter.next()), isFinished = () => iter.finished) proc mapFilter*[T, U](iter: Iter[T], mapPredicate: Function[T, Option[U]]): Iter[U] = var nextUOrErr: Option[Result[U, ref CatchableError]] diff --git a/codex/utils/json.nim b/codex/utils/json.nim index 4113b6326..5bd168464 100644 --- a/codex/utils/json.nim +++ b/codex/utils/json.nim @@ -1,8 +1,8 @@ - import std/options import std/typetraits from pkg/ethers import Address -from pkg/libp2p import Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$` +from pkg/libp2p import + Cid, PeerId, SignedPeerRecord, MultiAddress, AddressInfo, init, `$` import pkg/contractabi import pkg/codexdht/discv5/node as dn import pkg/serde/json @@ -11,24 +11,27 @@ import ../errors export json - -proc fromJson*( - _: type Cid, - json: JsonNode -): ?!Cid = +proc fromJson*(_: type Cid, json: JsonNode): ?!Cid = expectJsonKind(Cid, JString, json) Cid.init(json.str).mapFailure -func `%`*(cid: Cid): JsonNode = % $cid +func `%`*(cid: Cid): JsonNode = + % $cid -func `%`*(obj: PeerId): JsonNode = % $obj +func `%`*(obj: PeerId): JsonNode = + % $obj -func `%`*(obj: SignedPeerRecord): JsonNode = % $obj +func `%`*(obj: SignedPeerRecord): JsonNode = + % $obj -func `%`*(obj: dn.Address): JsonNode = % $obj +func `%`*(obj: dn.Address): JsonNode = + % $obj -func `%`*(obj: AddressInfo): JsonNode = % $obj.address +func `%`*(obj: AddressInfo): JsonNode = + % $obj.address -func `%`*(obj: MultiAddress): JsonNode = % $obj +func `%`*(obj: MultiAddress): JsonNode = + % $obj -func `%`*(address: ethers.Address): JsonNode = % $address +func `%`*(address: ethers.Address): JsonNode = + % $address diff --git a/codex/utils/keyutils.nim b/codex/utils/keyutils.nim index c7f762630..664396d3b 100644 --- a/codex/utils/keyutils.nim +++ b/codex/utils/keyutils.nim @@ -8,7 +8,8 @@ ## those terms. import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/questionable/results import pkg/libp2p/crypto/crypto @@ -28,17 +29,18 @@ proc setupKey*(path: string): ?!PrivateKey = if not path.fileAccessible({AccessFlags.Find}): info "Creating a private key and saving it" let - res = ? PrivateKey.random(Rng.instance()[]).mapFailure(CodexKeyError) - bytes = ? res.getBytes().mapFailure(CodexKeyError) + res = ?PrivateKey.random(Rng.instance()[]).mapFailure(CodexKeyError) + bytes = ?res.getBytes().mapFailure(CodexKeyError) - ? path.secureWriteFile(bytes).mapFailure(CodexKeyError) + ?path.secureWriteFile(bytes).mapFailure(CodexKeyError) return PrivateKey.init(bytes).mapFailure(CodexKeyError) info "Found a network private key" - if not ? checkSecureFile(path).mapFailure(CodexKeyError): + if not ?checkSecureFile(path).mapFailure(CodexKeyError): warn "The network private key file is not safe, aborting" return failure newException( - CodexKeyUnsafeError, "The network private key file is not safe") + CodexKeyUnsafeError, "The network private key file is not safe" + ) - let kb = ? path.readAllBytes().mapFailure(CodexKeyError) + let kb = ?path.readAllBytes().mapFailure(CodexKeyError) return PrivateKey.init(kb).mapFailure(CodexKeyError) diff --git a/codex/utils/natutils.nim b/codex/utils/natutils.nim index 86497e12b..8a641e950 100644 --- a/codex/utils/natutils.nim +++ b/codex/utils/natutils.nim @@ -1,27 +1,26 @@ {.push raises: [].} import - std/[tables, hashes], - stew/results, stew/shims/net as stewNet, chronos, chronicles + std/[tables, hashes], stew/results, stew/shims/net as stewNet, chronos, chronicles import pkg/libp2p -type - NatStrategy* = enum - NatAny - NatUpnp - NatPmp - NatNone +type NatStrategy* = enum + NatAny + NatUpnp + NatPmp + NatNone -type - IpLimits* = object - limit*: uint - ips: Table[IpAddress, uint] +type IpLimits* = object + limit*: uint + ips: Table[IpAddress, uint] func hash*(ip: IpAddress): Hash = case ip.family - of IpAddressFamily.IPv6: hash(ip.address_v6) - of IpAddressFamily.IPv4: hash(ip.address_v4) + of IpAddressFamily.IPv6: + hash(ip.address_v6) + of IpAddressFamily.IPv4: + hash(ip.address_v4) func inc*(ipLimits: var IpLimits, ip: IpAddress): bool = let val = ipLimits.ips.getOrDefault(ip, 0) @@ -39,10 +38,7 @@ func dec*(ipLimits: var IpLimits, ip: IpAddress) = ipLimits.ips[ip] = val - 1 func isGlobalUnicast*(address: TransportAddress): bool = - if address.isGlobal() and address.isUnicast(): - true - else: - false + if address.isGlobal() and address.isUnicast(): true else: false func isGlobalUnicast*(address: IpAddress): bool = let a = initTAddress(address, Port(0)) @@ -53,16 +49,19 @@ proc getRouteIpv4*(): Result[IpAddress, cstring] = # Note: `publicAddress` is only used an "example" IP to find the best route, # no data is send over the network to this IP! let - publicAddress = TransportAddress(family: AddressFamily.IPv4, - address_v4: [1'u8, 1, 1, 1], port: Port(0)) + publicAddress = TransportAddress( + family: AddressFamily.IPv4, address_v4: [1'u8, 1, 1, 1], port: Port(0) + ) route = getBestRoute(publicAddress) if route.source.isUnspecified(): err("No best ipv4 route found") else: - let ip = try: route.source.address() - except ValueError as e: - # This should not occur really. - error "Address conversion error", exception = e.name, msg = e.msg - return err("Invalid IP address") - ok(ip) \ No newline at end of file + let ip = + try: + route.source.address() + except ValueError as e: + # This should not occur really. + error "Address conversion error", exception = e.name, msg = e.msg + return err("Invalid IP address") + ok(ip) diff --git a/codex/utils/options.nim b/codex/utils/options.nim index 0362eebfe..ad44a717f 100644 --- a/codex/utils/options.nim +++ b/codex/utils/options.nim @@ -31,7 +31,6 @@ template WrapOption*(input: untyped): type = else: Option[input] - macro createType(t: typedesc): untyped = var objectType = getType(t) @@ -47,22 +46,26 @@ macro createType(t: typedesc): untyped = # re-wrapping already filed which is `Option[T]`. for field in objectType[2]: let fieldType = getTypeInst(field) - let newFieldNode = - nnkIdentDefs.newTree(ident($field), nnkCall.newTree(ident("WrapOption"), fieldType), newEmptyNode()) + let newFieldNode = nnkIdentDefs.newTree( + ident($field), nnkCall.newTree(ident("WrapOption"), fieldType), newEmptyNode() + ) fields.add(newFieldNode) # Creates new object type T with the fields lists from steps above. let tSym = genSym(nskType, "T") nnkStmtList.newTree( - nnkTypeSection.newTree( - nnkTypeDef.newTree(tSym, newEmptyNode(), nnkObjectTy.newTree(newEmptyNode(), newEmptyNode(), fields)) - ), - tSym + nnkTypeSection.newTree( + nnkTypeDef.newTree( + tSym, + newEmptyNode(), + nnkObjectTy.newTree(newEmptyNode(), newEmptyNode(), fields), + ) + ), + tSym, ) template Optionalize*(t: typed): untyped = ## Takes object type and wraps all the first level fields into ## Option type unless it is already Option type. createType(t) - diff --git a/codex/utils/poseidon2digest.nim b/codex/utils/poseidon2digest.nim index efdb3c6a9..6eaf21e98 100644 --- a/codex/utils/poseidon2digest.nim +++ b/codex/utils/poseidon2digest.nim @@ -15,27 +15,24 @@ import pkg/stew/byteutils import ../merkletree func spongeDigest*( - _: type Poseidon2Hash, - bytes: openArray[byte], - rate: static int = 2): ?!Poseidon2Hash = + _: type Poseidon2Hash, bytes: openArray[byte], rate: static int = 2 +): ?!Poseidon2Hash = ## Hashes chunks of data with a sponge of rate 1 or 2. ## success Sponge.digest(bytes, rate) func spongeDigest*( - _: type Poseidon2Hash, - bytes: openArray[Bn254Fr], - rate: static int = 2): ?!Poseidon2Hash = + _: type Poseidon2Hash, bytes: openArray[Bn254Fr], rate: static int = 2 +): ?!Poseidon2Hash = ## Hashes chunks of elements with a sponge of rate 1 or 2. ## success Sponge.digest(bytes, rate) func digestTree*( - _: type Poseidon2Tree, - bytes: openArray[byte], - chunkSize: int): ?!Poseidon2Tree = + _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int +): ?!Poseidon2Tree = ## Hashes chunks of data with a sponge of rate 2, and combines the ## resulting chunk hashes in a merkle root. ## @@ -50,30 +47,27 @@ func digestTree*( while index < bytes.len: let start = index let finish = min(index + chunkSize, bytes.len) - let digest = ? Poseidon2Hash.spongeDigest(bytes.toOpenArray(start, finish - 1), 2) + let digest = ?Poseidon2Hash.spongeDigest(bytes.toOpenArray(start, finish - 1), 2) leaves.add(digest) index += chunkSize return Poseidon2Tree.init(leaves) func digest*( - _: type Poseidon2Tree, - bytes: openArray[byte], - chunkSize: int): ?!Poseidon2Hash = + _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int +): ?!Poseidon2Hash = ## Hashes chunks of data with a sponge of rate 2, and combines the ## resulting chunk hashes in a merkle root. ## - (? Poseidon2Tree.digestTree(bytes, chunkSize)).root + (?Poseidon2Tree.digestTree(bytes, chunkSize)).root func digestMhash*( - _: type Poseidon2Tree, - bytes: openArray[byte], - chunkSize: int): ?!MultiHash = + _: type Poseidon2Tree, bytes: openArray[byte], chunkSize: int +): ?!MultiHash = ## Hashes chunks of data with a sponge of rate 2 and ## returns the multihash of the root ## - let - hash = ? Poseidon2Tree.digest(bytes, chunkSize) + let hash = ?Poseidon2Tree.digest(bytes, chunkSize) - ? MultiHash.init(Pos2Bn128MrklCodec, hash).mapFailure + ?MultiHash.init(Pos2Bn128MrklCodec, hash).mapFailure diff --git a/codex/utils/stintutils.nim b/codex/utils/stintutils.nim index 125ff8b60..48e332d02 100644 --- a/codex/utils/stintutils.nim +++ b/codex/utils/stintutils.nim @@ -1,4 +1,4 @@ import pkg/stint -func fromDecimal*(T: typedesc[StUint|StInt], s: string): T {.inline.} = +func fromDecimal*(T: typedesc[StUint | StInt], s: string): T {.inline.} = parse(s, type result, radix = 10) diff --git a/codex/utils/timer.nim b/codex/utils/timer.nim index 9cf594891..0a5a940a0 100644 --- a/codex/utils/timer.nim +++ b/codex/utils/timer.nim @@ -12,14 +12,15 @@ import pkg/upraises -push: {.upraises: [].} +push: + {.upraises: [].} import pkg/chronos import ../logutils type - TimerCallback* = proc(): Future[void] {.gcsafe, upraises:[].} + TimerCallback* = proc(): Future[void] {.gcsafe, upraises: [].} Timer* = ref object of RootObj callback: TimerCallback interval: Duration @@ -38,12 +39,14 @@ proc timerLoop(timer: Timer) {.async: (raises: []).} = except CancelledError: discard # do not propagate as timerLoop is asyncSpawned except CatchableError as exc: - error "Timer caught unhandled exception: ", name=timer.name, msg=exc.msg + error "Timer caught unhandled exception: ", name = timer.name, msg = exc.msg -method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsafe, base.} = +method start*( + timer: Timer, callback: TimerCallback, interval: Duration +) {.gcsafe, base.} = if timer.loopFuture != nil: return - trace "Timer starting: ", name=timer.name + trace "Timer starting: ", name = timer.name timer.callback = callback timer.interval = interval timer.loopFuture = timerLoop(timer) @@ -51,6 +54,6 @@ method start*(timer: Timer, callback: TimerCallback, interval: Duration) {.gcsaf method stop*(timer: Timer) {.async, base.} = if timer.loopFuture != nil and not timer.loopFuture.finished: - trace "Timer stopping: ", name=timer.name + trace "Timer stopping: ", name = timer.name await timer.loopFuture.cancelAndWait() timer.loopFuture = nil diff --git a/codex/utils/trackedfutures.nim b/codex/utils/trackedfutures.nim index 2505ffe2f..eb3cc2191 100644 --- a/codex/utils/trackedfutures.nim +++ b/codex/utils/trackedfutures.nim @@ -5,15 +5,15 @@ import ../logutils {.push raises: [].} -type - TrackedFutures* = ref object - futures: Table[uint, FutureBase] - cancelling: bool +type TrackedFutures* = ref object + futures: Table[uint, FutureBase] + cancelling: bool logScope: topics = "trackable futures" -proc len*(self: TrackedFutures): int = self.futures.len +proc len*(self: TrackedFutures): int = + self.futures.len proc removeFuture(self: TrackedFutures, future: FutureBase) = if not self.cancelling and not future.isNil: diff --git a/codex/validation.nim b/codex/validation.nim index 3e9e63ffa..6e3135e42 100644 --- a/codex/validation.nim +++ b/codex/validation.nim @@ -12,28 +12,23 @@ export market export sets export validationconfig -type - Validation* = ref object - slots: HashSet[SlotId] - clock: Clock - market: Market - subscriptions: seq[Subscription] - running: Future[void] - periodicity: Periodicity - proofTimeout: UInt256 - config: ValidationConfig - -const - MaxStorageRequestDuration = 30.days +type Validation* = ref object + slots: HashSet[SlotId] + clock: Clock + market: Market + subscriptions: seq[Subscription] + running: Future[void] + periodicity: Periodicity + proofTimeout: UInt256 + config: ValidationConfig + +const MaxStorageRequestDuration = 30.days logScope: topics = "codex validator" proc new*( - _: type Validation, - clock: Clock, - market: Market, - config: ValidationConfig + _: type Validation, clock: Clock, market: Market, config: ValidationConfig ): Validation = Validation(clock: clock, market: market, config: config) @@ -49,20 +44,17 @@ proc waitUntilNextPeriod(validation: Validation) {.async.} = trace "Waiting until next period", currentPeriod = period await validation.clock.waitUntil(periodEnd.truncate(int64) + 1) -func groupIndexForSlotId*(slotId: SlotId, - validationGroups: ValidationGroups): uint16 = +func groupIndexForSlotId*(slotId: SlotId, validationGroups: ValidationGroups): uint16 = let slotIdUInt256 = UInt256.fromBytesBE(slotId.toArray) (slotIdUInt256 mod validationGroups.u256).truncate(uint16) func maxSlotsConstraintRespected(validation: Validation): bool = - validation.config.maxSlots == 0 or - validation.slots.len < validation.config.maxSlots + validation.config.maxSlots == 0 or validation.slots.len < validation.config.maxSlots func shouldValidateSlot(validation: Validation, slotId: SlotId): bool = without validationGroups =? validation.config.groups: return true - groupIndexForSlotId(slotId, validationGroups) == - validation.config.groupIndex + groupIndexForSlotId(slotId, validationGroups) == validation.config.groupIndex proc subscribeSlotFilled(validation: Validation) {.async.} = proc onSlotFilled(requestId: RequestId, slotIndex: UInt256) = @@ -72,6 +64,7 @@ proc subscribeSlotFilled(validation: Validation) {.async.} = if validation.shouldValidateSlot(slotId): trace "Adding slot", slotId validation.slots.incl(slotId) + let subscription = await validation.market.subscribeSlotFilled(onSlotFilled) validation.subscriptions.add(subscription) @@ -85,9 +78,9 @@ proc removeSlotsThatHaveEnded(validation: Validation) {.async.} = ended.incl(slotId) validation.slots.excl(ended) -proc markProofAsMissing(validation: Validation, - slotId: SlotId, - period: Period) {.async.} = +proc markProofAsMissing( + validation: Validation, slotId: SlotId, period: Period +) {.async.} = logScope: currentPeriod = validation.getCurrentPeriod() @@ -122,15 +115,16 @@ proc run(validation: Validation) {.async: (raises: []).} = except CatchableError as e: error "Validation failed", msg = e.msg -proc epochForDurationBackFromNow(validation: Validation, - duration: Duration): SecondsSince1970 = +proc epochForDurationBackFromNow( + validation: Validation, duration: Duration +): SecondsSince1970 = return validation.clock.now - duration.secs proc restoreHistoricalState(validation: Validation) {.async.} = trace "Restoring historical state..." let startTimeEpoch = validation.epochForDurationBackFromNow(MaxStorageRequestDuration) - let slotFilledEvents = await validation.market.queryPastSlotFilledEvents( - fromTime = startTimeEpoch) + let slotFilledEvents = + await validation.market.queryPastSlotFilledEvents(fromTime = startTimeEpoch) for event in slotFilledEvents: if not validation.maxSlotsConstraintRespected: break @@ -142,8 +136,8 @@ proc restoreHistoricalState(validation: Validation) {.async.} = trace "Historical state restored", numberOfSlots = validation.slots.len proc start*(validation: Validation) {.async.} = - trace "Starting validator", groups = validation.config.groups, - groupIndex = validation.config.groupIndex + trace "Starting validator", + groups = validation.config.groups, groupIndex = validation.config.groupIndex validation.periodicity = await validation.market.periodicity() validation.proofTimeout = await validation.market.proofTimeout() await validation.subscribeSlotFilled() diff --git a/codex/validationconfig.nim b/codex/validationconfig.nim index dd36a25af..3e21c4fab 100644 --- a/codex/validationconfig.nim +++ b/codex/validationconfig.nim @@ -3,7 +3,7 @@ import pkg/questionable import pkg/questionable/results type - ValidationGroups* = range[2..65535] + ValidationGroups* = range[2 .. 65535] MaxSlots* = int ValidationConfig* = object maxSlots: MaxSlots @@ -14,17 +14,16 @@ func init*( _: type ValidationConfig, maxSlots: MaxSlots, groups: ?ValidationGroups, - groupIndex: uint16 = 0): ?!ValidationConfig = + groupIndex: uint16 = 0, +): ?!ValidationConfig = if maxSlots < 0: return failure "The value of maxSlots must be greater than " & - fmt"or equal to 0! (got: {maxSlots})" + fmt"or equal to 0! (got: {maxSlots})" if validationGroups =? groups and groupIndex >= uint16(validationGroups): return failure "The value of the group index must be less than " & - fmt"validation groups! (got: {groupIndex = }, " & - fmt"groups = {validationGroups})" - - success ValidationConfig( - maxSlots: maxSlots, groups: groups, groupIndex: groupIndex) + fmt"validation groups! (got: {groupIndex = }, " & fmt"groups = {validationGroups})" + + success ValidationConfig(maxSlots: maxSlots, groups: groups, groupIndex: groupIndex) func maxSlots*(config: ValidationConfig): MaxSlots = config.maxSlots diff --git a/tests/checktest.nim b/tests/checktest.nim index 8ca5c53ed..b1d80ff99 100644 --- a/tests/checktest.nim +++ b/tests/checktest.nim @@ -3,7 +3,7 @@ import ./helpers ## Unit testing suite that calls checkTrackers in teardown to check for memory leaks using chronos trackers. template checksuite*(name, body) = suite name: - proc suiteProc = + proc suiteProc() = multisetup() teardown: @@ -15,7 +15,7 @@ template checksuite*(name, body) = template asyncchecksuite*(name, body) = suite name: - proc suiteProc = + proc suiteProc() = asyncmultisetup() teardown: diff --git a/tests/codex/blockexchange/discovery/testdiscovery.nim b/tests/codex/blockexchange/discovery/testdiscovery.nim index a136f89e3..88331c3f1 100644 --- a/tests/codex/blockexchange/discovery/testdiscovery.nim +++ b/tests/codex/blockexchange/discovery/testdiscovery.nim @@ -19,7 +19,6 @@ import ../../helpers import ../../helpers/mockdiscovery import ../../examples - asyncchecksuite "Block Advertising and Discovery": let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) @@ -56,8 +55,8 @@ asyncchecksuite "Block Advertising and Discovery": pendingBlocks = PendingBlocksManager.new() (manifest, tree) = makeManifestAndTree(blocks).tryGet() - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + manifestBlock = + bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(manifestBlock)).tryGet() @@ -67,42 +66,33 @@ asyncchecksuite "Block Advertising and Discovery": network, blockDiscovery, pendingBlocks, - minPeersPerBlock = 1) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + minPeersPerBlock = 1, ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) switch.mount(network) test "Should discover want list": - let - pendingBlocks = blocks.mapIt( - engine.pendingBlocks.getWantHandle(it.cid) - ) + let pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) await engine.start() - blockDiscovery.publishBlockProvideHandler = - proc(d: MockDiscovery, cid: Cid): Future[void] {.async, gcsafe.} = - return + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async, gcsafe.} = + return - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = - await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async.} = + await engine.resolveBlocks(blocks.filterIt(it.cid == cid)) - await allFuturesThrowing( - allFinished(pendingBlocks)) + await allFuturesThrowing(allFinished(pendingBlocks)) await engine.stop() @@ -110,26 +100,27 @@ asyncchecksuite "Block Advertising and Discovery": let cids = @[manifest.cid.tryGet, manifest.treeCid] advertised = initTable.collect: - for cid in cids: {cid: newFuture[void]()} + for cid in cids: + {cid: newFuture[void]()} - blockDiscovery - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = - if cid in advertised and not advertised[cid].finished(): - advertised[cid].complete() + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ) {.async.} = + if cid in advertised and not advertised[cid].finished(): + advertised[cid].complete() await engine.start() - await allFuturesThrowing( - allFinished(toSeq(advertised.values))) + await allFuturesThrowing(allFinished(toSeq(advertised.values))) await engine.stop() test "Should not advertise local blocks": - let - blockCids = blocks.mapIt(it.cid) + let blockCids = blocks.mapIt(it.cid) - blockDiscovery - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid) {.async.} = - check: - cid notin blockCids + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ) {.async.} = + check: + cid notin blockCids await engine.start() await sleepAsync(3.seconds) @@ -137,29 +128,25 @@ asyncchecksuite "Block Advertising and Discovery": test "Should not launch discovery if remote peer has block": let - pendingBlocks = blocks.mapIt( - engine.pendingBlocks.getWantHandle(it.cid) - ) + pendingBlocks = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) peerId = PeerId.example haves = collect(initTable()): for blk in blocks: - { blk.address: Presence(address: blk.address, price: 0.u256) } + {blk.address: Presence(address: blk.address, price: 0.u256)} - engine.peers.add( - BlockExcPeerCtx( - id: peerId, - blocks: haves - )) + engine.peers.add(BlockExcPeerCtx(id: peerId, blocks: haves)) - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] = - check false + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] = + check false await engine.start() - engine.pendingBlocks.resolve(blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + engine.pendingBlocks.resolve( + blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + ) - await allFuturesThrowing( - allFinished(pendingBlocks)) + await allFuturesThrowing(allFinished(pendingBlocks)) await engine.stop() @@ -176,7 +163,7 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": trees: seq[CodexTree] setup: - for _ in 0..<4: + for _ in 0 ..< 4: let chunker = RandomChunker.new(Rng.instance(), size = 4096, chunkSize = 256) var blocks = newSeq[bt.Block]() while true: @@ -205,21 +192,14 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": network, blockDiscovery, pendingBlocks, - minPeersPerBlock = 1) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + minPeersPerBlock = 1, ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) networkStore = NetworkStore.new(engine, localStore) s.mount(network) @@ -239,46 +219,70 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": var advertised: Table[Cid, SignedPeerRecord] - MockDiscovery(blockexc[1].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[1].peerInfo.signedPeerRecord + MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[1].peerInfo.signedPeerRecord - MockDiscovery(blockexc[2].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[2].peerInfo.signedPeerRecord + MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[2].peerInfo.signedPeerRecord - MockDiscovery(blockexc[3].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[3].peerInfo.signedPeerRecord + MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) - await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid))]) + await blockexc[1].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid) + ) + ], + ) discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) - await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid))]) + await blockexc[2].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid) + ) + ], + ) discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) - await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid))]) + await blockexc[3].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid) + ) + ], + ) - MockDiscovery(blockexc[0].engine.discovery.discovery) - .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - result.add(advertised[cid]) + MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async.} = + if cid in advertised: + result.add(advertised[cid]) let futs = collect(newSeq): - for m in mBlocks[0..2]: + for m in mBlocks[0 .. 2]: blockexc[0].engine.requestBlock(m.cid) await allFuturesThrowing( - switch.mapIt(it.start()) & - blockexc.mapIt(it.engine.start())).wait(10.seconds) + switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) + ) + .wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing( - blockexc.mapIt(it.engine.stop()) & - switch.mapIt(it.stop())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) + .wait(10.seconds) test "E2E - Should advertise and discover blocks with peers already connected": # Distribute the blocks amongst 1..3 @@ -286,42 +290,65 @@ asyncchecksuite "E2E - Multiple Nodes Discovery": var advertised: Table[Cid, SignedPeerRecord] - MockDiscovery(blockexc[1].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[1].peerInfo.signedPeerRecord + MockDiscovery(blockexc[1].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[1].peerInfo.signedPeerRecord - MockDiscovery(blockexc[2].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[2].peerInfo.signedPeerRecord + MockDiscovery(blockexc[2].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[2].peerInfo.signedPeerRecord - MockDiscovery(blockexc[3].engine.discovery.discovery) - .publishBlockProvideHandler = proc(d: MockDiscovery, cid: Cid): Future[void] {.async.} = - advertised[cid] = switch[3].peerInfo.signedPeerRecord + MockDiscovery(blockexc[3].engine.discovery.discovery).publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[void] {.async.} = + advertised[cid] = switch[3].peerInfo.signedPeerRecord discard blockexc[1].engine.pendingBlocks.getWantHandle(mBlocks[0].cid) - await blockexc[1].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid))]) + await blockexc[1].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[0], address: BlockAddress(leaf: false, cid: mBlocks[0].cid) + ) + ], + ) discard blockexc[2].engine.pendingBlocks.getWantHandle(mBlocks[1].cid) - await blockexc[2].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid))]) + await blockexc[2].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[1], address: BlockAddress(leaf: false, cid: mBlocks[1].cid) + ) + ], + ) discard blockexc[3].engine.pendingBlocks.getWantHandle(mBlocks[2].cid) - await blockexc[3].engine.blocksDeliveryHandler(switch[0].peerInfo.peerId, @[BlockDelivery(blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid))]) + await blockexc[3].engine.blocksDeliveryHandler( + switch[0].peerInfo.peerId, + @[ + BlockDelivery( + blk: mBlocks[2], address: BlockAddress(leaf: false, cid: mBlocks[2].cid) + ) + ], + ) - MockDiscovery(blockexc[0].engine.discovery.discovery) - .findBlockProvidersHandler = proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.async.} = - if cid in advertised: - return @[advertised[cid]] + MockDiscovery(blockexc[0].engine.discovery.discovery).findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async.} = + if cid in advertised: + return @[advertised[cid]] - let - futs = mBlocks[0..2].mapIt(blockexc[0].engine.requestBlock(it.cid)) + let futs = mBlocks[0 .. 2].mapIt(blockexc[0].engine.requestBlock(it.cid)) await allFuturesThrowing( - switch.mapIt(it.start()) & - blockexc.mapIt(it.engine.start())).wait(10.seconds) + switch.mapIt(it.start()) & blockexc.mapIt(it.engine.start()) + ) + .wait(10.seconds) await allFutures(futs).wait(10.seconds) - await allFuturesThrowing( - blockexc.mapIt(it.engine.stop()) & - switch.mapIt(it.stop())).wait(10.seconds) + await allFuturesThrowing(blockexc.mapIt(it.engine.stop()) & switch.mapIt(it.stop())) + .wait(10.seconds) diff --git a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim index 42bc84a9e..904703a0f 100644 --- a/tests/codex/blockexchange/discovery/testdiscoveryengine.nim +++ b/tests/codex/blockexchange/discovery/testdiscoveryengine.nim @@ -62,12 +62,18 @@ asyncchecksuite "Test Discovery Engine": network, blockDiscovery, pendingBlocks, - discoveryLoopSleep = 100.millis) - wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid) ) - - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - pendingBlocks.resolve(blocks.filterIt(it.cid == cid).mapIt(BlockDelivery(blk: it, address: it.address))) + discoveryLoopSleep = 100.millis, + ) + wants = blocks.mapIt(pendingBlocks.getWantHandle(it.cid)) + + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + pendingBlocks.resolve( + blocks.filterIt(it.cid == cid).mapIt( + BlockDelivery(blk: it, address: it.address) + ) + ) await discoveryEngine.start() await allFuturesThrowing(allFinished(wants)).wait(1.seconds) @@ -82,14 +88,16 @@ asyncchecksuite "Test Discovery Engine": network, blockDiscovery, pendingBlocks, - discoveryLoopSleep = 100.millis) + discoveryLoopSleep = 100.millis, + ) want = newFuture[void]() - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - check cid == blocks[0].cid - if not want.finished: - want.complete() + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + check cid == blocks[0].cid + if not want.finished: + want.complete() await discoveryEngine.start() discoveryEngine.queueFindBlocksReq(@[blocks[0].cid]) @@ -107,23 +115,24 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery, pendingBlocks, discoveryLoopSleep = 5.minutes, - minPeersPerBlock = minPeers) + minPeersPerBlock = minPeers, + ) want = newAsyncEvent() var pendingCids = newSeq[Cid]() - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = - check cid in pendingCids - pendingCids.keepItIf(it != cid) - check peerStore.len < minPeers - var - peerCtx = BlockExcPeerCtx(id: PeerId.example) + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.async, gcsafe.} = + check cid in pendingCids + pendingCids.keepItIf(it != cid) + check peerStore.len < minPeers + var peerCtx = BlockExcPeerCtx(id: PeerId.example) - let address = BlockAddress(leaf: false, cid: cid) + let address = BlockAddress(leaf: false, cid: cid) - peerCtx.blocks[address] = Presence(address: address, price: 0.u256) - peerStore.add(peerCtx) - want.fire() + peerCtx.blocks[address] = Presence(address: address, price: 0.u256) + peerStore.add(peerCtx) + want.fire() await discoveryEngine.start() var idx = 0 @@ -148,19 +157,20 @@ asyncchecksuite "Test Discovery Engine": blockDiscovery, pendingBlocks, discoveryLoopSleep = 100.millis, - concurrentDiscReqs = 2) + concurrentDiscReqs = 2, + ) reqs = newFuture[void]() count = 0 - blockDiscovery.findBlockProvidersHandler = - proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.gcsafe, async.} = - check cid == blocks[0].cid - if count > 0: - check false - count.inc + blockDiscovery.findBlockProvidersHandler = proc( + d: MockDiscovery, cid: Cid + ): Future[seq[SignedPeerRecord]] {.gcsafe, async.} = + check cid == blocks[0].cid + if count > 0: + check false + count.inc - await reqs # queue the request + await reqs # queue the request await discoveryEngine.start() discoveryEngine.queueFindBlocksReq(@[blocks[0].cid]) diff --git a/tests/codex/blockexchange/engine/testadvertiser.nim b/tests/codex/blockexchange/engine/testadvertiser.nim index c1bf1c682..157564d6f 100644 --- a/tests/codex/blockexchange/engine/testadvertiser.nim +++ b/tests/codex/blockexchange/engine/testadvertiser.nim @@ -22,24 +22,22 @@ asyncchecksuite "Advertiser": advertised: seq[Cid] let manifest = Manifest.new( - treeCid = Cid.example, - blockSize = 123.NBytes, - datasetSize = 234.NBytes) - manifestBlk = Block.new(data = manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + treeCid = Cid.example, blockSize = 123.NBytes, datasetSize = 234.NBytes + ) + manifestBlk = + Block.new(data = manifest.encode().tryGet(), codec = ManifestCodec).tryGet() setup: blockDiscovery = MockDiscovery.new() localStore = CacheStore.new() advertised = newSeq[Cid]() - blockDiscovery.publishBlockProvideHandler = - proc(d: MockDiscovery, cid: Cid) {.async, gcsafe.} = - advertised.add(cid) + blockDiscovery.publishBlockProvideHandler = proc( + d: MockDiscovery, cid: Cid + ) {.async, gcsafe.} = + advertised.add(cid) - advertiser = Advertiser.new( - localStore, - blockDiscovery - ) + advertiser = Advertiser.new(localStore, blockDiscovery) await advertiser.start() @@ -86,14 +84,10 @@ asyncchecksuite "Advertiser": check manifest.treeCid in advertised test "Should advertise existing manifests and their trees": - let - newStore = CacheStore.new([manifestBlk]) + let newStore = CacheStore.new([manifestBlk]) await advertiser.stop() - advertiser = Advertiser.new( - newStore, - blockDiscovery - ) + advertiser = Advertiser.new(newStore, blockDiscovery) await advertiser.start() check eventually manifestBlk.cid in advertised diff --git a/tests/codex/blockexchange/engine/testblockexc.nim b/tests/codex/blockexchange/engine/testblockexc.nim index e1a2bfcf4..aa15f795c 100644 --- a/tests/codex/blockexchange/engine/testblockexc.nim +++ b/tests/codex/blockexchange/engine/testblockexc.nim @@ -34,16 +34,15 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps1.engine.start(), nodeCmps2.switch.start(), nodeCmps2.blockDiscovery.start(), - nodeCmps2.engine.start()) + nodeCmps2.engine.start(), + ) # initialize our want lists - pendingBlocks1 = blocks2[0..3].mapIt( - nodeCmps1.pendingBlocks.getWantHandle( it.cid ) - ) + pendingBlocks1 = + blocks2[0 .. 3].mapIt(nodeCmps1.pendingBlocks.getWantHandle(it.cid)) - pendingBlocks2 = blocks1[0..3].mapIt( - nodeCmps2.pendingBlocks.getWantHandle( it.cid ) - ) + pendingBlocks2 = + blocks1[0 .. 3].mapIt(nodeCmps2.pendingBlocks.getWantHandle(it.cid)) pricing1 = Pricing.example() pricing2 = Pricing.example() @@ -54,8 +53,8 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps2.engine.pricing = pricing2.some await nodeCmps1.switch.connect( - nodeCmps2.switch.peerInfo.peerId, - nodeCmps2.switch.peerInfo.addrs) + nodeCmps2.switch.peerInfo.peerId, nodeCmps2.switch.peerInfo.addrs + ) await sleepAsync(1.seconds) # give some time to exchange lists peerCtx2 = nodeCmps1.peerStore.get(nodeCmps2.switch.peerInfo.peerId) @@ -71,39 +70,32 @@ asyncchecksuite "NetworkStore engine - 2 nodes": nodeCmps1.switch.stop(), nodeCmps2.blockDiscovery.stop(), nodeCmps2.engine.stop(), - nodeCmps2.switch.stop()) + nodeCmps2.switch.stop(), + ) test "Should exchange blocks on connect": - await allFuturesThrowing( - allFinished(pendingBlocks1)) - .wait(10.seconds) + await allFuturesThrowing(allFinished(pendingBlocks1)).wait(10.seconds) - await allFuturesThrowing( - allFinished(pendingBlocks2)) - .wait(10.seconds) + await allFuturesThrowing(allFinished(pendingBlocks2)).wait(10.seconds) check: - (await allFinished( - blocks1[0..3].mapIt( - nodeCmps2.localStore.getBlock( it.cid ) ))) - .filterIt( it.completed and it.read.isOk ) - .mapIt( $it.read.get.cid ).sorted(cmp[string]) == - blocks1[0..3].mapIt( $it.cid ).sorted(cmp[string]) - - (await allFinished( - blocks2[0..3].mapIt( - nodeCmps1.localStore.getBlock( it.cid ) ))) - .filterIt( it.completed and it.read.isOk ) - .mapIt( $it.read.get.cid ).sorted(cmp[string]) == - blocks2[0..3].mapIt( $it.cid ).sorted(cmp[string]) + (await allFinished(blocks1[0 .. 3].mapIt(nodeCmps2.localStore.getBlock(it.cid)))) + .filterIt(it.completed and it.read.isOk) + .mapIt($it.read.get.cid) + .sorted(cmp[string]) == blocks1[0 .. 3].mapIt($it.cid).sorted(cmp[string]) + + (await allFinished(blocks2[0 .. 3].mapIt(nodeCmps1.localStore.getBlock(it.cid)))) + .filterIt(it.completed and it.read.isOk) + .mapIt($it.read.get.cid) + .sorted(cmp[string]) == blocks2[0 .. 3].mapIt($it.cid).sorted(cmp[string]) test "Should exchanges accounts on connect": - check peerCtx1.account.?address == pricing1.address.some - check peerCtx2.account.?address == pricing2.address.some + check peerCtx1.account .? address == pricing1.address.some + check peerCtx2.account .? address == pricing2.address.some test "Should send want-have for block": let blk = bt.Block.new("Block 1".toBytes).tryGet() - let blkFut = nodeCmps1.pendingBlocks.getWantHandle( blk.cid ) + let blkFut = nodeCmps1.pendingBlocks.getWantHandle(blk.cid) (await nodeCmps2.localStore.putBlock(blk)).tryGet() let entry = WantListEntry( @@ -111,25 +103,20 @@ asyncchecksuite "NetworkStore engine - 2 nodes": priority: 1, cancel: false, wantType: WantType.WantBlock, - sendDontHave: false) + sendDontHave: false, + ) peerCtx1.peerWants.add(entry) - check nodeCmps2 - .engine - .taskQueue - .pushOrUpdateNoWait(peerCtx1).isOk + check nodeCmps2.engine.taskQueue.pushOrUpdateNoWait(peerCtx1).isOk check eventually (await nodeCmps1.localStore.hasBlock(blk.cid)).tryGet() check eventually (await blkFut) == blk test "Should get blocks from remote": - let - blocks = await allFinished( - blocks2[4..7].mapIt( - nodeCmps1.networkStore.getBlock(it.cid) - )) + let blocks = + await allFinished(blocks2[4 .. 7].mapIt(nodeCmps1.networkStore.getBlock(it.cid))) - check blocks.mapIt( it.read().tryGet() ) == blocks2[4..7] + check blocks.mapIt(it.read().tryGet()) == blocks2[4 .. 7] test "Remote should send blocks when available": let blk = bt.Block.new("Block 1".toBytes).tryGet() @@ -142,19 +129,15 @@ asyncchecksuite "NetworkStore engine - 2 nodes": (await nodeCmps2.networkStore.putBlock(blk)).tryGet() # should succeed retrieving block from remote - check await nodeCmps1.networkStore.getBlock(blk.cid) - .withTimeout(100.millis) # should succeed + check await nodeCmps1.networkStore.getBlock(blk.cid).withTimeout(100.millis) + # should succeed test "Should receive payments for blocks that were sent": - discard await allFinished( - blocks2[4..7].mapIt( - nodeCmps2.networkStore.putBlock(it) - )) + discard + await allFinished(blocks2[4 .. 7].mapIt(nodeCmps2.networkStore.putBlock(it))) - discard await allFinished( - blocks2[4..7].mapIt( - nodeCmps1.networkStore.getBlock(it.cid) - )) + discard + await allFinished(blocks2[4 .. 7].mapIt(nodeCmps1.networkStore.getBlock(it.cid))) let channel = !peerCtx1.paymentChannel @@ -173,14 +156,10 @@ asyncchecksuite "NetworkStore - multiple nodes": for e in nodes: await e.engine.start() - await allFuturesThrowing( - nodes.mapIt( it.switch.start() ) - ) + await allFuturesThrowing(nodes.mapIt(it.switch.start())) teardown: - await allFuturesThrowing( - nodes.mapIt( it.switch.stop() ) - ) + await allFuturesThrowing(nodes.mapIt(it.switch.stop())) nodes = @[] @@ -191,34 +170,23 @@ asyncchecksuite "NetworkStore - multiple nodes": # Add blocks from 1st peer to want list let - downloadCids = - blocks[0..3].mapIt( - it.cid - ) & - blocks[12..15].mapIt( - it.cid - ) - - pendingBlocks = downloadCids.mapIt( - engine.pendingBlocks.getWantHandle( it ) - ) - - for i in 0..15: + downloadCids = blocks[0 .. 3].mapIt(it.cid) & blocks[12 .. 15].mapIt(it.cid) + + pendingBlocks = downloadCids.mapIt(engine.pendingBlocks.getWantHandle(it)) + + for i in 0 .. 15: (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) await sleepAsync(1.seconds) - await allFuturesThrowing( - allFinished(pendingBlocks)) + await allFuturesThrowing(allFinished(pendingBlocks)) check: - (await allFinished( - downloadCids.mapIt( - downloader.localStore.getBlock( it ) ))) - .filterIt( it.completed and it.read.isOk ) - .mapIt( $it.read.get.cid ).sorted(cmp[string]) == - downloadCids.mapIt( $it ).sorted(cmp[string]) + (await allFinished(downloadCids.mapIt(downloader.localStore.getBlock(it)))) + .filterIt(it.completed and it.read.isOk) + .mapIt($it.read.get.cid) + .sorted(cmp[string]) == downloadCids.mapIt($it).sorted(cmp[string]) test "Should exchange blocks with multiple nodes": let @@ -227,25 +195,20 @@ asyncchecksuite "NetworkStore - multiple nodes": # Add blocks from 1st peer to want list let - pendingBlocks1 = blocks[0..3].mapIt( - engine.pendingBlocks.getWantHandle( it.cid ) - ) - pendingBlocks2 = blocks[12..15].mapIt( - engine.pendingBlocks.getWantHandle( it.cid ) - ) - - for i in 0..15: + pendingBlocks1 = blocks[0 .. 3].mapIt(engine.pendingBlocks.getWantHandle(it.cid)) + pendingBlocks2 = + blocks[12 .. 15].mapIt(engine.pendingBlocks.getWantHandle(it.cid)) + + for i in 0 .. 15: (await nodes[i div 4].networkStore.engine.localStore.putBlock(blocks[i])).tryGet() await connectNodes(nodes) await sleepAsync(1.seconds) - await allFuturesThrowing( - allFinished(pendingBlocks1), - allFinished(pendingBlocks2)) + await allFuturesThrowing(allFinished(pendingBlocks1), allFinished(pendingBlocks2)) - check pendingBlocks1.mapIt( it.read ) == blocks[0..3] - check pendingBlocks2.mapIt( it.read ) == blocks[12..15] + check pendingBlocks1.mapIt(it.read) == blocks[0 .. 3] + check pendingBlocks2.mapIt(it.read) == blocks[12 .. 15] test "Should actively cancel want-haves if block received from elsewhere": let @@ -265,13 +228,9 @@ asyncchecksuite "NetworkStore - multiple nodes": # ... and bystander learns that downloader wants it, but can't provide it. check eventually( - bystander - .engine - .peers - .get(downloader.switch.peerInfo.peerId) - .peerWants - .filterIt( it.address == aBlock.address ) - .len == 1 + bystander.engine.peers + .get(downloader.switch.peerInfo.peerId).peerWants + .filterIt(it.address == aBlock.address).len == 1 ) # As soon as we connect the downloader to the blockHolder, the block should @@ -282,11 +241,7 @@ asyncchecksuite "NetworkStore - multiple nodes": # ... and the bystander should have cancelled the want-have check eventually( - bystander - .engine - .peers - .get(downloader.switch.peerInfo.peerId) - .peerWants - .filterIt( it.address == aBlock.address ) - .len == 0 + bystander.engine.peers + .get(downloader.switch.peerInfo.peerId).peerWants + .filterIt(it.address == aBlock.address).len == 0 ) diff --git a/tests/codex/blockexchange/engine/testengine.nim b/tests/codex/blockexchange/engine/testengine.nim index d97016d5a..f7cc82941 100644 --- a/tests/codex/blockexchange/engine/testengine.nim +++ b/tests/codex/blockexchange/engine/testengine.nim @@ -54,42 +54,30 @@ asyncchecksuite "NetworkStore engine basic": test "Should send want list to new peers": proc sendWantList( - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false) {.gcsafe, async.} = - check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt( $it.cid ).sorted - done.complete() + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + check addresses.mapIt($it.cidOrTreeCid).sorted == blocks.mapIt($it.cid).sorted + done.complete() let - network = BlockExcNetwork(request: BlockExcRequest( - sendWantList: sendWantList, - )) + network = BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) - localStore = CacheStore.new(blocks.mapIt( it )) + localStore = CacheStore.new(blocks.mapIt(it)) discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + localStore, peerStore, network, blockDiscovery, pendingBlocks ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) for b in blocks: discard engine.pendingBlocks.getWantHandle(b.cid) @@ -105,32 +93,18 @@ asyncchecksuite "NetworkStore engine basic": done.complete() let - network = BlockExcNetwork( - request: BlockExcRequest( - sendAccount: sendAccount - )) + network = BlockExcNetwork(request: BlockExcRequest(sendAccount: sendAccount)) localStore = CacheStore.new() discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) - - advertiser = Advertiser.new( - localStore, - blockDiscovery + localStore, peerStore, network, blockDiscovery, pendingBlocks ) + advertiser = Advertiser.new(localStore, blockDiscovery) + engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) engine.pricing = pricing.some await engine.setupPeer(peerId) @@ -156,9 +130,9 @@ asyncchecksuite "NetworkStore engine handlers": blocks: seq[Block] const NopSendWantCancellationsProc = proc( - id: PeerId, - addresses: seq[BlockAddress] - ) {.gcsafe, async.} = discard + id: PeerId, addresses: seq[BlockAddress] + ) {.gcsafe, async.} = + discard setup: rng = Rng.instance() @@ -181,43 +155,27 @@ asyncchecksuite "NetworkStore engine handlers": localStore = CacheStore.new() network = BlockExcNetwork() - discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) - advertiser = Advertiser.new( - localStore, - blockDiscovery - ) + advertiser = Advertiser.new(localStore, blockDiscovery) engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) - - peerCtx = BlockExcPeerCtx( - id: peerId + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks ) + + peerCtx = BlockExcPeerCtx(id: peerId) engine.peers.add(peerCtx) test "Should schedule block requests": - let - wantList = makeWantList( - blocks.mapIt( it.cid ), - wantType = WantType.WantBlock) # only `wantBlock` are stored in `peerWants` + let wantList = makeWantList(blocks.mapIt(it.cid), wantType = WantType.WantBlock) + # only `wantBlock` are stored in `peerWants` proc handler() {.async.} = let ctx = await engine.taskQueue.pop() check ctx.id == peerId # only `wantBlock` scheduled - check ctx.peerWants.mapIt( it.address.cidOrTreeCid ) == blocks.mapIt( it.cid ) + check ctx.peerWants.mapIt(it.address.cidOrTreeCid) == blocks.mapIt(it.cid) let done = handler() await engine.wantListHandler(peerId, wantList) @@ -226,19 +184,16 @@ asyncchecksuite "NetworkStore engine handlers": test "Should handle want list": let done = newFuture[void]() - wantList = makeWantList(blocks.mapIt( it.cid )) + wantList = makeWantList(blocks.mapIt(it.cid)) proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address ) + check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) done.complete() - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendPresence: sendPresence - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence)) - await allFuturesThrowing( - allFinished(blocks.mapIt( localStore.putBlock(it) ))) + await allFuturesThrowing(allFinished(blocks.mapIt(localStore.putBlock(it)))) await engine.wantListHandler(peerId, wantList) await done @@ -246,21 +201,18 @@ asyncchecksuite "NetworkStore engine handlers": test "Should handle want list - `dont-have`": let done = newFuture[void]() - wantList = makeWantList( - blocks.mapIt( it.cid ), - sendDontHave = true) + wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt( it.address ) == wantList.entries.mapIt( it.address ) + check presence.mapIt(it.address) == wantList.entries.mapIt(it.address) for p in presence: check: p.`type` == BlockPresenceType.DontHave done.complete() - engine.network = BlockExcNetwork(request: BlockExcRequest( - sendPresence: sendPresence - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence)) await engine.wantListHandler(peerId, wantList) await done @@ -268,23 +220,20 @@ asyncchecksuite "NetworkStore engine handlers": test "Should handle want list - `dont-have` some blocks": let done = newFuture[void]() - wantList = makeWantList( - blocks.mapIt( it.cid ), - sendDontHave = true) + wantList = makeWantList(blocks.mapIt(it.cid), sendDontHave = true) proc sendPresence(peerId: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = for p in presence: - if p.address.cidOrTreeCid != blocks[0].cid and p.address.cidOrTreeCid != blocks[1].cid: + if p.address.cidOrTreeCid != blocks[0].cid and + p.address.cidOrTreeCid != blocks[1].cid: check p.`type` == BlockPresenceType.DontHave else: check p.`type` == BlockPresenceType.Have done.complete() - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendPresence: sendPresence - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendPresence: sendPresence)) (await engine.localStore.putBlock(blocks[0])).tryGet() (await engine.localStore.putBlock(blocks[1])).tryGet() @@ -293,19 +242,18 @@ asyncchecksuite "NetworkStore engine handlers": await done test "Should store blocks in local store": - let pending = blocks.mapIt( - engine.pendingBlocks.getWantHandle( it.cid ) - ) + let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) let blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) # Install NOP for want list cancellations so they don't cause a crash engine.network = BlockExcNetwork( - request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc)) + request: BlockExcRequest(sendWantCancellations: NopSendWantCancellationsProc) + ) await engine.blocksDeliveryHandler(peerId, blocksDelivery) let resolved = await allFinished(pending) - check resolved.mapIt( it.read ) == blocks + check resolved.mapIt(it.read) == blocks for b in blocks: let present = await engine.localStore.hasBlock(b.cid) check present.tryGet() @@ -325,10 +273,7 @@ asyncchecksuite "NetworkStore engine handlers": request: BlockExcRequest( sendPayment: proc(receiver: PeerId, payment: SignedState) {.gcsafe, async.} = let - amount = - blocks.mapIt( - peerContext.blocks[it.address].price - ).foldl(a + b) + amount = blocks.mapIt(peerContext.blocks[it.address].price).foldl(a + b) balances = !payment.state.outcome.balances(Asset) @@ -337,48 +282,46 @@ asyncchecksuite "NetworkStore engine handlers": done.complete(), # Install NOP for want list cancellations so they don't cause a crash - sendWantCancellations: NopSendWantCancellationsProc - )) + sendWantCancellations: NopSendWantCancellationsProc, + ) + ) - await engine.blocksDeliveryHandler(peerId, blocks.mapIt( - BlockDelivery(blk: it, address: it.address))) + await engine.blocksDeliveryHandler( + peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + ) await done.wait(100.millis) test "Should handle block presence": - var - handles: Table[Cid, Future[Block]] + var handles: Table[Cid, Future[Block]] proc sendWantList( - id: PeerId, - addresses: seq[BlockAddress], - priority: int32 = 0, - cancel: bool = false, - wantType: WantType = WantType.WantHave, - full: bool = false, - sendDontHave: bool = false) {.gcsafe, async.} = - engine.pendingBlocks.resolve(blocks - .filterIt( it.address in addresses ) - .mapIt(BlockDelivery(blk: it, address: it.address))) + id: PeerId, + addresses: seq[BlockAddress], + priority: int32 = 0, + cancel: bool = false, + wantType: WantType = WantType.WantHave, + full: bool = false, + sendDontHave: bool = false, + ) {.gcsafe, async.} = + engine.pendingBlocks.resolve( + blocks.filterIt(it.address in addresses).mapIt( + BlockDelivery(blk: it, address: it.address) + ) + ) - engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendWantList: sendWantList - )) + engine.network = + BlockExcNetwork(request: BlockExcRequest(sendWantList: sendWantList)) # only Cids in peer want lists are requested - handles = blocks.mapIt( - (it.cid, engine.pendingBlocks.getWantHandle( it.cid ))).toTable + handles = blocks.mapIt((it.cid, engine.pendingBlocks.getWantHandle(it.cid))).toTable let price = UInt256.example await engine.blockPresenceHandler( peerId, blocks.mapIt( - PresenceMessage.init( - Presence( - address: it.address, - have: true, - price: price - )))) + PresenceMessage.init(Presence(address: it.address, have: true, price: price)) + ), + ) for a in blocks.mapIt(it.address): check a in peerCtx.peerHave @@ -388,21 +331,17 @@ asyncchecksuite "NetworkStore engine handlers": let pending = blocks.mapIt(engine.pendingBlocks.getWantHandle(it.cid)) blocksDelivery = blocks.mapIt(BlockDelivery(blk: it, address: it.address)) - cancellations = newTable( - blocks.mapIt((it.address, newFuture[void]())).toSeq - ) + cancellations = newTable(blocks.mapIt((it.address, newFuture[void]())).toSeq) proc sendWantCancellations( - id: PeerId, - addresses: seq[BlockAddress] + id: PeerId, addresses: seq[BlockAddress] ) {.gcsafe, async.} = - for address in addresses: - cancellations[address].complete() + for address in addresses: + cancellations[address].complete() engine.network = BlockExcNetwork( - request: BlockExcRequest( - sendWantCancellations: sendWantCancellations - )) + request: BlockExcRequest(sendWantCancellations: sendWantCancellations) + ) await engine.blocksDeliveryHandler(peerId, blocksDelivery) discard await allFinished(pending) @@ -448,43 +387,29 @@ asyncchecksuite "Task Handler": localStore = CacheStore.new() network = BlockExcNetwork() - discovery = DiscoveryEngine.new( - localStore, - peerStore, - network, - blockDiscovery, - pendingBlocks) + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) - advertiser = Advertiser.new( - localStore, - blockDiscovery - ) + advertiser = Advertiser.new(localStore, blockDiscovery) engine = BlockExcEngine.new( - localStore, - wallet, - network, - discovery, - advertiser, - peerStore, - pendingBlocks) + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) peersCtx = @[] - for i in 0..3: + for i in 0 .. 3: let seckey = PrivateKey.random(rng[]).tryGet() peers.add(PeerId.init(seckey.getPublicKey().tryGet()).tryGet()) - peersCtx.add(BlockExcPeerCtx( - id: peers[i] - )) + peersCtx.add(BlockExcPeerCtx(id: peers[i])) peerStore.add(peersCtx[i]) engine.pricing = Pricing.example.some test "Should send want-blocks in priority order": proc sendBlocksDelivery( - id: PeerId, - blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + id: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check blocksDelivery.len == 2 check: blocksDelivery[1].address == blocks[0].address @@ -501,7 +426,8 @@ asyncchecksuite "Task Handler": priority: 49, cancel: false, wantType: WantType.WantBlock, - sendDontHave: false) + sendDontHave: false, + ) ) # first block to send by priority @@ -511,39 +437,44 @@ asyncchecksuite "Task Handler": priority: 50, cancel: false, wantType: WantType.WantBlock, - sendDontHave: false) + sendDontHave: false, + ) ) await engine.taskHandler(peersCtx[0]) test "Should set in-flight for outgoing blocks": proc sendBlocksDelivery( - id: PeerId, - blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + id: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check peersCtx[0].peerWants[0].inFlight for blk in blocks: (await engine.localStore.putBlock(blk)).tryGet() engine.network.request.sendBlocksDelivery = sendBlocksDelivery - peersCtx[0].peerWants.add(WantListEntry( - address: blocks[0].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - inFlight: false) + peersCtx[0].peerWants.add( + WantListEntry( + address: blocks[0].address, + priority: 50, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + inFlight: false, + ) ) await engine.taskHandler(peersCtx[0]) test "Should clear in-flight when local lookup fails": - peersCtx[0].peerWants.add(WantListEntry( - address: blocks[0].address, - priority: 50, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false, - inFlight: false) + peersCtx[0].peerWants.add( + WantListEntry( + address: blocks[0].address, + priority: 50, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + inFlight: false, + ) ) await engine.taskHandler(peersCtx[0]) @@ -555,11 +486,12 @@ asyncchecksuite "Task Handler": let price = (!engine.pricing).price proc sendPresence(id: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = - check presence.mapIt(!Presence.init(it)) == @[ - Presence(address: present[0].address, have: true, price: price), - Presence(address: present[1].address, have: true, price: price), - Presence(address: missing[0].address, have: false) - ] + check presence.mapIt(!Presence.init(it)) == + @[ + Presence(address: present[0].address, have: true, price: price), + Presence(address: present[1].address, have: true, price: price), + Presence(address: missing[0].address, have: false), + ] for blk in blocks: (await engine.localStore.putBlock(blk)).tryGet() @@ -572,7 +504,8 @@ asyncchecksuite "Task Handler": priority: 1, cancel: false, wantType: WantType.WantHave, - sendDontHave: false) + sendDontHave: false, + ) ) # have block @@ -582,7 +515,8 @@ asyncchecksuite "Task Handler": priority: 1, cancel: false, wantType: WantType.WantHave, - sendDontHave: false) + sendDontHave: false, + ) ) # don't have block @@ -592,7 +526,8 @@ asyncchecksuite "Task Handler": priority: 1, cancel: false, wantType: WantType.WantHave, - sendDontHave: false) + sendDontHave: false, + ) ) await engine.taskHandler(peersCtx[0]) diff --git a/tests/codex/blockexchange/engine/testpayments.nim b/tests/codex/blockexchange/engine/testpayments.nim index 03c08e09e..24d5dab6e 100644 --- a/tests/codex/blockexchange/engine/testpayments.nim +++ b/tests/codex/blockexchange/engine/testpayments.nim @@ -5,7 +5,6 @@ import ../../examples import ../../helpers checksuite "engine payments": - let address = EthAddress.example let amount = 42.u256 @@ -29,6 +28,6 @@ checksuite "engine payments": test "uses same channel for consecutive payments": let payment1, payment2 = wallet.pay(peer, amount) - let channel1 = payment1.?state.?channel.?getChannelId - let channel2 = payment2.?state.?channel.?getChannelId + let channel1 = payment1 .? state .? channel .? getChannelId + let channel2 = payment2 .? state .? channel .? getChannelId check channel1 == channel2 diff --git a/tests/codex/blockexchange/protobuf/testpayments.nim b/tests/codex/blockexchange/protobuf/testpayments.nim index 81bc5dfc4..d0773d709 100644 --- a/tests/codex/blockexchange/protobuf/testpayments.nim +++ b/tests/codex/blockexchange/protobuf/testpayments.nim @@ -6,9 +6,7 @@ import ../../../asynctest import ../../examples import ../../helpers - checksuite "account protobuf messages": - let account = Account(address: EthAddress.example) let message = AccountMessage.init(account) @@ -16,7 +14,7 @@ checksuite "account protobuf messages": check message.address == @(account.address.toArray) test "decodes recipient of payments": - check Account.init(message).?address == account.address.some + check Account.init(message) .? address == account.address.some test "fails to decode when address has incorrect number of bytes": var incorrect = message @@ -24,7 +22,6 @@ checksuite "account protobuf messages": check Account.init(incorrect).isNone checksuite "channel update messages": - let state = SignedState.example let update = StateChannelUpdate.init(state) diff --git a/tests/codex/blockexchange/protobuf/testpresence.nim b/tests/codex/blockexchange/protobuf/testpresence.nim index 963dd0ec0..7e3b94e6d 100644 --- a/tests/codex/blockexchange/protobuf/testpresence.nim +++ b/tests/codex/blockexchange/protobuf/testpresence.nim @@ -7,7 +7,6 @@ import ../../examples import ../../helpers checksuite "block presence protobuf messages": - let cid = Cid.example address = BlockAddress(leaf: false, cid: cid) @@ -26,17 +25,17 @@ checksuite "block presence protobuf messages": check message.price == @(price.toBytesBE) test "decodes CID": - check Presence.init(message).?address == address.some + check Presence.init(message) .? address == address.some test "decodes have/donthave": var message = message message.`type` = BlockPresenceType.Have - check Presence.init(message).?have == true.some + check Presence.init(message) .? have == true.some message.`type` = BlockPresenceType.DontHave - check Presence.init(message).?have == false.some + check Presence.init(message) .? have == false.some test "decodes price": - check Presence.init(message).?price == price.some + check Presence.init(message) .? price == price.some test "fails to decode when price is invalid": var incorrect = message diff --git a/tests/codex/blockexchange/testnetwork.nim b/tests/codex/blockexchange/testnetwork.nim index 756d86a1d..0fae4ffec 100644 --- a/tests/codex/blockexchange/testnetwork.nim +++ b/tests/codex/blockexchange/testnetwork.nim @@ -39,9 +39,7 @@ asyncchecksuite "Network - Handlers": done = newFuture[void]() buffer = BufferStream.new() - network = BlockExcNetwork.new( - switch = newStandardSwitch(), - connProvider = getConn) + network = BlockExcNetwork.new(switch = newStandardSwitch(), connProvider = getConn) network.setupPeer(peerId) networkPeer = network.peers[peerId] discard await networkPeer.connect() @@ -63,10 +61,8 @@ asyncchecksuite "Network - Handlers": network.handlers.onWantList = wantListHandler - let wantList = makeWantList( - blocks.mapIt( it.cid ), - 1, true, WantType.WantHave, - true, true) + let wantList = + makeWantList(blocks.mapIt(it.cid), 1, true, WantType.WantHave, true, true) let msg = Message(wantlist: wantList) await buffer.pushData(lenPrefix(protobufEncode(msg))) @@ -74,21 +70,22 @@ asyncchecksuite "Network - Handlers": await done.wait(500.millis) test "Blocks Handler": - proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + proc blocksDeliveryHandler( + peer: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() network.handlers.onBlocksDelivery = blocksDeliveryHandler - let msg = Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + let msg = + Message(payload: blocks.mapIt(BlockDelivery(blk: it, address: it.address))) await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) test "Presence Handler": - proc presenceHandler( - peer: PeerId, - presence: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler(peer: PeerId, presence: seq[BlockPresence]) {.gcsafe, async.} = for b in blocks: check: b.address in presence @@ -98,11 +95,9 @@ asyncchecksuite "Network - Handlers": network.handlers.onPresence = presenceHandler let msg = Message( - blockPresences: blocks.mapIt( - BlockPresence( - address: it.address, - type: BlockPresenceType.Have - ))) + blockPresences: + blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)) + ) await buffer.pushData(lenPrefix(protobufEncode(msg))) await done.wait(500.millis) @@ -136,8 +131,7 @@ asyncchecksuite "Network - Handlers": await done.wait(100.millis) asyncchecksuite "Network - Senders": - let - chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) + let chunker = RandomChunker.new(Rng.instance(), size = 1024, chunkSize = 256) var switch1, switch2: Switch @@ -156,25 +150,19 @@ asyncchecksuite "Network - Senders": done = newFuture[void]() switch1 = newStandardSwitch() switch2 = newStandardSwitch() - network1 = BlockExcNetwork.new( - switch = switch1) + network1 = BlockExcNetwork.new(switch = switch1) switch1.mount(network1) - network2 = BlockExcNetwork.new( - switch = switch2) + network2 = BlockExcNetwork.new(switch = switch2) switch2.mount(network2) await switch1.start() await switch2.start() - await switch1.connect( - switch2.peerInfo.peerId, - switch2.peerInfo.addrs) + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) teardown: - await allFuturesThrowing( - switch1.stop(), - switch2.stop()) + await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Send want list": proc wantListHandler(peer: PeerId, wantList: WantList) {.gcsafe, async.} = @@ -194,28 +182,32 @@ asyncchecksuite "Network - Senders": network2.handlers.onWantList = wantListHandler await network1.sendWantList( switch2.peerInfo.peerId, - blocks.mapIt( it.address ), - 1, true, WantType.WantHave, - true, true) + blocks.mapIt(it.address), + 1, + true, + WantType.WantHave, + true, + true, + ) await done.wait(500.millis) test "send blocks": - proc blocksDeliveryHandler(peer: PeerId, blocksDelivery: seq[BlockDelivery]) {.gcsafe, async.} = + proc blocksDeliveryHandler( + peer: PeerId, blocksDelivery: seq[BlockDelivery] + ) {.gcsafe, async.} = check blocks == blocksDelivery.mapIt(it.blk) done.complete() network2.handlers.onBlocksDelivery = blocksDeliveryHandler await network1.sendBlocksDelivery( - switch2.peerInfo.peerId, - blocks.mapIt(BlockDelivery(blk: it, address: it.address))) + switch2.peerInfo.peerId, blocks.mapIt(BlockDelivery(blk: it, address: it.address)) + ) await done.wait(500.millis) test "send presence": - proc presenceHandler( - peer: PeerId, - precense: seq[BlockPresence]) {.gcsafe, async.} = + proc presenceHandler(peer: PeerId, precense: seq[BlockPresence]) {.gcsafe, async.} = for b in blocks: check: b.address in precense @@ -226,11 +218,8 @@ asyncchecksuite "Network - Senders": await network1.sendBlockPresence( switch2.peerInfo.peerId, - blocks.mapIt( - BlockPresence( - address: it.address, - type: BlockPresenceType.Have - ))) + blocks.mapIt(BlockPresence(address: it.address, type: BlockPresenceType.Have)), + ) await done.wait(500.millis) @@ -269,36 +258,30 @@ asyncchecksuite "Network - Test Limits": switch1 = newStandardSwitch() switch2 = newStandardSwitch() - network1 = BlockExcNetwork.new( - switch = switch1, - maxInflight = 0) + network1 = BlockExcNetwork.new(switch = switch1, maxInflight = 0) switch1.mount(network1) - network2 = BlockExcNetwork.new( - switch = switch2) + network2 = BlockExcNetwork.new(switch = switch2) switch2.mount(network2) await switch1.start() await switch2.start() - await switch1.connect( - switch2.peerInfo.peerId, - switch2.peerInfo.addrs) + await switch1.connect(switch2.peerInfo.peerId, switch2.peerInfo.addrs) teardown: - await allFuturesThrowing( - switch1.stop(), - switch2.stop()) + await allFuturesThrowing(switch1.stop(), switch2.stop()) test "Concurrent Sends": let account = Account(address: EthAddress.example) - network2.handlers.onAccount = - proc(peer: PeerId, received: Account) {.gcsafe, async.} = - check false + network2.handlers.onAccount = proc( + peer: PeerId, received: Account + ) {.gcsafe, async.} = + check false let fut = network1.send( - switch2.peerInfo.peerId, - Message(account: AccountMessage.init(account))) + switch2.peerInfo.peerId, Message(account: AccountMessage.init(account)) + ) await sleepAsync(100.millis) check not fut.finished diff --git a/tests/codex/blockexchange/testpeerctxstore.nim b/tests/codex/blockexchange/testpeerctxstore.nim index dc77fbd8d..6ea601d17 100644 --- a/tests/codex/blockexchange/testpeerctxstore.nim +++ b/tests/codex/blockexchange/testpeerctxstore.nim @@ -40,10 +40,12 @@ checksuite "Peer Context Store Peer Selection": setup: store = PeerCtxStore.new() addresses = collect(newSeq): - for i in 0..<10: BlockAddress(leaf: false, cid: Cid.example) + for i in 0 ..< 10: + BlockAddress(leaf: false, cid: Cid.example) peerCtxs = collect(newSeq): - for i in 0..<10: BlockExcPeerCtx.example + for i in 0 ..< 10: + BlockExcPeerCtx.example for p in peerCtxs: store.add(p) @@ -56,34 +58,33 @@ checksuite "Peer Context Store Peer Selection": test "Should select peers that have Cid": peerCtxs[0].blocks = collect(initTable): for i, a in addresses: - { a: Presence(address: a, price: i.u256) } + {a: Presence(address: a, price: i.u256)} peerCtxs[5].blocks = collect(initTable): for i, a in addresses: - { a: Presence(address: a, price: i.u256) } + {a: Presence(address: a, price: i.u256)} - let - peers = store.peersHave(addresses[0]) + let peers = store.peersHave(addresses[0]) check peers.len == 2 check peerCtxs[0] in peers check peerCtxs[5] in peers test "Should select peers that want Cid": - let - entries = addresses.mapIt( - WantListEntry( - address: it, - priority: 1, - cancel: false, - wantType: WantType.WantBlock, - sendDontHave: false)) + let entries = addresses.mapIt( + WantListEntry( + address: it, + priority: 1, + cancel: false, + wantType: WantType.WantBlock, + sendDontHave: false, + ) + ) peerCtxs[0].peerWants = entries peerCtxs[5].peerWants = entries - let - peers = store.peersWant(addresses[4]) + let peers = store.peersWant(addresses[4]) check peers.len == 2 check peerCtxs[0] in peers diff --git a/tests/codex/blockexchange/testpendingblocks.nim b/tests/codex/blockexchange/testpendingblocks.nim index dd94c4da1..45b065c0e 100644 --- a/tests/codex/blockexchange/testpendingblocks.nim +++ b/tests/codex/blockexchange/testpendingblocks.nim @@ -58,24 +58,24 @@ checksuite "Pending Blocks": test "Should get wants list": let pendingBlocks = PendingBlocksManager.new() - blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet ) + blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet) - discard blks.mapIt( pendingBlocks.getWantHandle( it.cid ) ) + discard blks.mapIt(pendingBlocks.getWantHandle(it.cid)) check: - blks.mapIt( $it.cid ).sorted(cmp[string]) == - toSeq(pendingBlocks.wantListBlockCids).mapIt( $it ).sorted(cmp[string]) + blks.mapIt($it.cid).sorted(cmp[string]) == + toSeq(pendingBlocks.wantListBlockCids).mapIt($it).sorted(cmp[string]) test "Should get want handles list": let pendingBlocks = PendingBlocksManager.new() - blks = (0..9).mapIt( bt.Block.new(("Hello " & $it).toBytes).tryGet ) - handles = blks.mapIt( pendingBlocks.getWantHandle( it.cid ) ) + blks = (0 .. 9).mapIt(bt.Block.new(("Hello " & $it).toBytes).tryGet) + handles = blks.mapIt(pendingBlocks.getWantHandle(it.cid)) wantHandles = toSeq(pendingBlocks.wantHandles) check wantHandles.len == handles.len pendingBlocks.resolve(blks.mapIt(BlockDelivery(blk: it, address: it.address))) check: - (await allFinished(wantHandles)).mapIt( $it.read.cid ).sorted(cmp[string]) == - (await allFinished(handles)).mapIt( $it.read.cid ).sorted(cmp[string]) + (await allFinished(wantHandles)).mapIt($it.read.cid).sorted(cmp[string]) == + (await allFinished(handles)).mapIt($it.read.cid).sorted(cmp[string]) diff --git a/tests/codex/examples.nim b/tests/codex/examples.nim index 2e68d2363..c97c7a8a0 100644 --- a/tests/codex/examples.nim +++ b/tests/codex/examples.nim @@ -34,10 +34,7 @@ proc example*(_: type SignedState): SignedState = wallet.pay(channel, asset, receiver, amount).get proc example*(_: type Pricing): Pricing = - Pricing( - address: EthAddress.example, - price: uint32.rand.u256 - ) + Pricing(address: EthAddress.example, price: uint32.rand.u256) proc example*(_: type bt.Block): bt.Block = let length = rand(4096) @@ -64,14 +61,14 @@ proc example*(_: type Availability): Availability = freeSize = uint16.example.u256, duration = uint16.example.u256, minPrice = uint64.example.u256, - maxCollateral = uint16.example.u256 + maxCollateral = uint16.example.u256, ) proc example*(_: type Reservation): Reservation = Reservation.init( availabilityId = AvailabilityId(array[32, byte].example), size = uint16.example.u256, - slotId = SlotId.example + slotId = SlotId.example, ) proc example*(_: type MerkleProof): MerkleProof = @@ -80,5 +77,5 @@ proc example*(_: type MerkleProof): MerkleProof = proc example*(_: type Poseidon2Proof): Poseidon2Proof = var example = MerkleProof[Poseidon2Hash, PoseidonKeysEnum]() example.index = 123 - example.path = @[1, 2, 3, 4].mapIt( it.toF ) + example.path = @[1, 2, 3, 4].mapIt(it.toF) example diff --git a/tests/codex/helpers.nim b/tests/codex/helpers.nim index 89aeafd11..6d7415d33 100644 --- a/tests/codex/helpers.nim +++ b/tests/codex/helpers.nim @@ -41,25 +41,25 @@ proc lenPrefix*(msg: openArray[byte]): seq[byte] = let vbytes = PB.toBytes(msg.len().uint64) var buf = newSeqUninitialized[byte](msg.len() + vbytes.len) - buf[0.. 0): - + while (let chunk = await chunker.getBytes(); chunk.len > 0): let blk = Block.new(chunk).tryGet() cids.add(blk.cid) (await store.putBlock(blk)).tryGet() @@ -98,19 +100,20 @@ proc storeDataGetManifest*(store: BlockStore, chunker: Chunker): Future[Manifest manifest = Manifest.new( treeCid = treeCid, blockSize = NBytes(chunker.chunkSize), - datasetSize = NBytes(chunker.offset)) + datasetSize = NBytes(chunker.offset), + ) - for i in 0..= dataset.len: return 0 var read = 0 - while read < len and - read < chunkSize.int and - (consumed + read) < dataset.len: + while read < len and read < chunkSize.int and (consumed + read) < dataset.len: data[read] = dataset[consumed + read] read.inc consumed += read return read - Chunker.new( - reader = reader, - pad = pad, - chunkSize = chunkSize) + Chunker.new(reader = reader, pad = pad, chunkSize = chunkSize) diff --git a/tests/codex/helpers/mockclock.nim b/tests/codex/helpers/mockclock.nim index 75a251c9e..be1eb4d2f 100644 --- a/tests/codex/helpers/mockclock.nim +++ b/tests/codex/helpers/mockclock.nim @@ -8,14 +8,12 @@ type MockClock* = ref object of Clock time: SecondsSince1970 waiting: seq[Waiting] + Waiting = ref object until: SecondsSince1970 future: Future[void] -func new*( - _: type MockClock, - time: SecondsSince1970 = getTime().toUnix -): MockClock = +func new*(_: type MockClock, time: SecondsSince1970 = getTime().toUnix): MockClock = ## Create a mock clock instance MockClock(time: time) diff --git a/tests/codex/helpers/mockdiscovery.nim b/tests/codex/helpers/mockdiscovery.nim index 5d5e8132c..42ad76a99 100644 --- a/tests/codex/helpers/mockdiscovery.nim +++ b/tests/codex/helpers/mockdiscovery.nim @@ -13,32 +13,24 @@ import pkg/questionable import pkg/codex/discovery import pkg/contractabi/address as ca -type - MockDiscovery* = ref object of Discovery - findBlockProvidersHandler*: proc(d: MockDiscovery, cid: Cid): - Future[seq[SignedPeerRecord]] {.gcsafe.} - publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): - Future[void] {.gcsafe.} - findHostProvidersHandler*: proc(d: MockDiscovery, host: ca.Address): - Future[seq[SignedPeerRecord]] {.gcsafe.} - publishHostProvideHandler*: proc(d: MockDiscovery, host: ca.Address): - Future[void] {.gcsafe.} +type MockDiscovery* = ref object of Discovery + findBlockProvidersHandler*: + proc(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.gcsafe.} + publishBlockProvideHandler*: proc(d: MockDiscovery, cid: Cid): Future[void] {.gcsafe.} + findHostProvidersHandler*: + proc(d: MockDiscovery, host: ca.Address): Future[seq[SignedPeerRecord]] {.gcsafe.} + publishHostProvideHandler*: + proc(d: MockDiscovery, host: ca.Address): Future[void] {.gcsafe.} proc new*(T: type MockDiscovery): MockDiscovery = MockDiscovery() -proc findPeer*( - d: Discovery, - peerId: PeerId -): Future[?PeerRecord] {.async.} = +proc findPeer*(d: Discovery, peerId: PeerId): Future[?PeerRecord] {.async.} = ## mock find a peer - always return none ## return none(PeerRecord) -method find*( - d: MockDiscovery, - cid: Cid -): Future[seq[SignedPeerRecord]] {.async.} = +method find*(d: MockDiscovery, cid: Cid): Future[seq[SignedPeerRecord]] {.async.} = if isNil(d.findBlockProvidersHandler): return @@ -51,8 +43,7 @@ method provide*(d: MockDiscovery, cid: Cid): Future[void] {.async.} = await d.publishBlockProvideHandler(d, cid) method find*( - d: MockDiscovery, - host: ca.Address + d: MockDiscovery, host: ca.Address ): Future[seq[SignedPeerRecord]] {.async.} = if isNil(d.findHostProvidersHandler): return diff --git a/tests/codex/helpers/mockmarket.nim b/tests/codex/helpers/mockmarket.nim index 358a52062..bb8c01801 100644 --- a/tests/codex/helpers/mockmarket.nim +++ b/tests/codex/helpers/mockmarket.nim @@ -48,16 +48,19 @@ type canReserveSlot*: bool reserveSlotThrowError*: ?(ref MarketError) clock: ?Clock + Fulfillment* = object requestId*: RequestId proof*: Groth16Proof host*: Address + MockSlot* = object requestId*: RequestId host*: Address slotIndex*: UInt256 proof*: Groth16Proof timestamp: ?SecondsSince1970 + Subscriptions = object onRequest: seq[RequestSubscription] onFulfillment: seq[FulfillmentSubscription] @@ -67,32 +70,40 @@ type onRequestCancelled: seq[RequestCancelledSubscription] onRequestFailed: seq[RequestFailedSubscription] onProofSubmitted: seq[ProofSubmittedSubscription] + RequestSubscription* = ref object of Subscription market: MockMarket callback: OnRequest + FulfillmentSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId callback: OnFulfillment + SlotFilledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId slotIndex: ?UInt256 callback: OnSlotFilled + SlotFreedSubscription* = ref object of Subscription market: MockMarket callback: OnSlotFreed + SlotReservationsFullSubscription* = ref object of Subscription market: MockMarket callback: OnSlotReservationsFull + RequestCancelledSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId callback: OnRequestCancelled + RequestFailedSubscription* = ref object of Subscription market: MockMarket requestId: ?RequestId callback: OnRequestCancelled + ProofSubmittedSubscription = ref object of Subscription market: MockMarket callback: OnProofSubmitted @@ -111,17 +122,15 @@ proc new*(_: type MockMarket, clock: ?Clock = Clock.none): MockMarket = repairRewardPercentage: 10, maxNumberOfSlashes: 5, slashCriterion: 3, - slashPercentage: 10 + slashPercentage: 10, ), proofs: ProofConfig( - period: 10.u256, - timeout: 5.u256, - downtime: 64.uint8, - downtimeProduct: 67.uint8 - ) + period: 10.u256, timeout: 5.u256, downtime: 64.uint8, downtimeProduct: 67.uint8 + ), + ) + MockMarket( + signer: Address.example, config: config, canReserveSlot: true, clock: clock ) - MockMarket(signer: Address.example, config: config, - canReserveSlot: true, clock: clock) method getSigner*(market: MockMarket): Future[Address] {.async.} = return market.signer @@ -145,9 +154,7 @@ method requestStorage*(market: MockMarket, request: StorageRequest) {.async.} = market.requested.add(request) var subscriptions = market.subscriptions.onRequest for subscription in subscriptions: - subscription.callback(request.id, - request.ask, - request.expiry) + subscription.callback(request.id, request.ask, request.expiry) method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} = return market.activeRequests[market.signer] @@ -155,75 +162,67 @@ method myRequests*(market: MockMarket): Future[seq[RequestId]] {.async.} = method mySlots*(market: MockMarket): Future[seq[SlotId]] {.async.} = return market.activeSlots[market.signer] -method getRequest*(market: MockMarket, - id: RequestId): Future[?StorageRequest] {.async.} = +method getRequest*( + market: MockMarket, id: RequestId +): Future[?StorageRequest] {.async.} = for request in market.requested: if request.id == id: return some request return none StorageRequest -method getActiveSlot*( - market: MockMarket, - slotId: SlotId): Future[?Slot] {.async.} = - +method getActiveSlot*(market: MockMarket, slotId: SlotId): Future[?Slot] {.async.} = for slot in market.filled: if slotId(slot.requestId, slot.slotIndex) == slotId and - request =? await market.getRequest(slot.requestId): + request =? await market.getRequest(slot.requestId): return some Slot(request: request, slotIndex: slot.slotIndex) return none Slot -method requestState*(market: MockMarket, - requestId: RequestId): Future[?RequestState] {.async.} = - return market.requestState.?[requestId] +method requestState*( + market: MockMarket, requestId: RequestId +): Future[?RequestState] {.async.} = + return market.requestState .? [requestId] -method slotState*(market: MockMarket, - slotId: SlotId): Future[SlotState] {.async.} = +method slotState*(market: MockMarket, slotId: SlotId): Future[SlotState] {.async.} = if not market.slotState.hasKey(slotId): return SlotState.Free return market.slotState[slotId] -method getRequestEnd*(market: MockMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method getRequestEnd*( + market: MockMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = return market.requestEnds[id] -method requestExpiresAt*(market: MockMarket, - id: RequestId): Future[SecondsSince1970] {.async.} = +method requestExpiresAt*( + market: MockMarket, id: RequestId +): Future[SecondsSince1970] {.async.} = return market.requestExpiry[id] -method getHost*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256): Future[?Address] {.async.} = +method getHost*( + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +): Future[?Address] {.async.} = for slot in market.filled: if slot.requestId == requestId and slot.slotIndex == slotIndex: return some slot.host return none Address -proc emitSlotFilled*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) = +proc emitSlotFilled*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = var subscriptions = market.subscriptions.onSlotFilled for subscription in subscriptions: let requestMatches = - subscription.requestId.isNone or - subscription.requestId == some requestId + subscription.requestId.isNone or subscription.requestId == some requestId let slotMatches = - subscription.slotIndex.isNone or - subscription.slotIndex == some slotIndex + subscription.slotIndex.isNone or subscription.slotIndex == some slotIndex if requestMatches and slotMatches: subscription.callback(requestId, slotIndex) -proc emitSlotFreed*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) = +proc emitSlotFreed*(market: MockMarket, requestId: RequestId, slotIndex: UInt256) = var subscriptions = market.subscriptions.onSlotFreed for subscription in subscriptions: subscription.callback(requestId, slotIndex) proc emitSlotReservationsFull*( - market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) = - + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +) = var subscriptions = market.subscriptions.onSlotReservationsFull for subscription in subscriptions: subscription.callback(requestId, slotIndex) @@ -231,45 +230,46 @@ proc emitSlotReservationsFull*( proc emitRequestCancelled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestCancelled for subscription in subscriptions: - if subscription.requestId == requestId.some or - subscription.requestId.isNone: + if subscription.requestId == requestId.some or subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFulfilled*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onFulfillment for subscription in subscriptions: - if subscription.requestId == requestId.some or - subscription.requestId.isNone: + if subscription.requestId == requestId.some or subscription.requestId.isNone: subscription.callback(requestId) proc emitRequestFailed*(market: MockMarket, requestId: RequestId) = var subscriptions = market.subscriptions.onRequestFailed for subscription in subscriptions: - if subscription.requestId == requestId.some or - subscription.requestId.isNone: + if subscription.requestId == requestId.some or subscription.requestId.isNone: subscription.callback(requestId) -proc fillSlot*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - host: Address) = +proc fillSlot*( + market: MockMarket, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + host: Address, +) = let slot = MockSlot( requestId: requestId, slotIndex: slotIndex, proof: proof, host: host, - timestamp: market.clock.?now + timestamp: market.clock .? now, ) market.filled.add(slot) market.slotState[slotId(slot.requestId, slot.slotIndex)] = SlotState.Filled market.emitSlotFilled(requestId, slotIndex) -method fillSlot*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256, - proof: Groth16Proof, - collateral: UInt256) {.async.} = +method fillSlot*( + market: MockMarket, + requestId: RequestId, + slotIndex: UInt256, + proof: Groth16Proof, + collateral: UInt256, +) {.async.} = market.fillSlot(requestId, slotIndex, proof, market.signer) method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = @@ -280,11 +280,10 @@ method freeSlot*(market: MockMarket, slotId: SlotId) {.async.} = break market.slotState[slotId] = SlotState.Free -method withdrawFunds*(market: MockMarket, - requestId: RequestId) {.async.} = +method withdrawFunds*(market: MockMarket, requestId: RequestId) {.async.} = market.withdrawn.add(requestId) - if state =? market.requestState.?[requestId] and state == RequestState.Cancelled: + if state =? market.requestState .? [requestId] and state == RequestState.Cancelled: market.emitRequestCancelled(requestId) proc setProofRequired*(mock: MockMarket, id: SlotId, required: bool) = @@ -293,8 +292,7 @@ proc setProofRequired*(mock: MockMarket, id: SlotId, required: bool) = else: mock.proofsRequired.excl(id) -method isProofRequired*(mock: MockMarket, - id: SlotId): Future[bool] {.async.} = +method isProofRequired*(mock: MockMarket, id: SlotId): Future[bool] {.async.} = return mock.proofsRequired.contains(id) proc setProofToBeRequired*(mock: MockMarket, id: SlotId, required: bool) = @@ -303,8 +301,7 @@ proc setProofToBeRequired*(mock: MockMarket, id: SlotId, required: bool) = else: mock.proofsToBeRequired.excl(id) -method willProofBeRequired*(mock: MockMarket, - id: SlotId): Future[bool] {.async.} = +method willProofBeRequired*(mock: MockMarket, id: SlotId): Future[bool] {.async.} = return mock.proofsToBeRequired.contains(id) method getChallenge*(mock: MockMarket, id: SlotId): Future[ProofChallenge] {.async.} = @@ -318,9 +315,7 @@ method submitProof*(mock: MockMarket, id: SlotId, proof: Groth16Proof) {.async.} for subscription in mock.subscriptions.onProofSubmitted: subscription.callback(id) -method markProofAsMissing*(market: MockMarket, - id: SlotId, - period: Period) {.async.} = +method markProofAsMissing*(market: MockMarket, id: SlotId, period: Period) {.async.} = market.markedAsMissingProofs.add(id) proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) = @@ -329,204 +324,172 @@ proc setCanProofBeMarkedAsMissing*(mock: MockMarket, id: SlotId, required: bool) else: mock.canBeMarkedAsMissing.excl(id) -method canProofBeMarkedAsMissing*(market: MockMarket, - id: SlotId, - period: Period): Future[bool] {.async.} = +method canProofBeMarkedAsMissing*( + market: MockMarket, id: SlotId, period: Period +): Future[bool] {.async.} = return market.canBeMarkedAsMissing.contains(id) method reserveSlot*( - market: MockMarket, - requestId: RequestId, - slotIndex: UInt256) {.async.} = - + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +) {.async.} = if error =? market.reserveSlotThrowError: raise error method canReserveSlot*( - market: MockMarket, - requestId: RequestId, - slotIndex: UInt256): Future[bool] {.async.} = - + market: MockMarket, requestId: RequestId, slotIndex: UInt256 +): Future[bool] {.async.} = return market.canReserveSlot func setCanReserveSlot*(market: MockMarket, canReserveSlot: bool) = market.canReserveSlot = canReserveSlot -func setReserveSlotThrowError*( - market: MockMarket, error: ?(ref MarketError)) = - +func setReserveSlotThrowError*(market: MockMarket, error: ?(ref MarketError)) = market.reserveSlotThrowError = error -method subscribeRequests*(market: MockMarket, - callback: OnRequest): - Future[Subscription] {.async.} = - let subscription = RequestSubscription( - market: market, - callback: callback - ) +method subscribeRequests*( + market: MockMarket, callback: OnRequest +): Future[Subscription] {.async.} = + let subscription = RequestSubscription(market: market, callback: callback) market.subscriptions.onRequest.add(subscription) return subscription -method subscribeFulfillment*(market: MockMarket, - callback: OnFulfillment): - Future[Subscription] {.async.} = +method subscribeFulfillment*( + market: MockMarket, callback: OnFulfillment +): Future[Subscription] {.async.} = let subscription = FulfillmentSubscription( - market: market, - requestId: none RequestId, - callback: callback + market: market, requestId: none RequestId, callback: callback ) market.subscriptions.onFulfillment.add(subscription) return subscription -method subscribeFulfillment*(market: MockMarket, - requestId: RequestId, - callback: OnFulfillment): - Future[Subscription] {.async.} = +method subscribeFulfillment*( + market: MockMarket, requestId: RequestId, callback: OnFulfillment +): Future[Subscription] {.async.} = let subscription = FulfillmentSubscription( - market: market, - requestId: some requestId, - callback: callback + market: market, requestId: some requestId, callback: callback ) market.subscriptions.onFulfillment.add(subscription) return subscription -method subscribeSlotFilled*(market: MockMarket, - callback: OnSlotFilled): - Future[Subscription] {.async.} = +method subscribeSlotFilled*( + market: MockMarket, callback: OnSlotFilled +): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription(market: market, callback: callback) market.subscriptions.onSlotFilled.add(subscription) return subscription -method subscribeSlotFilled*(market: MockMarket, - requestId: RequestId, - slotIndex: UInt256, - callback: OnSlotFilled): - Future[Subscription] {.async.} = +method subscribeSlotFilled*( + market: MockMarket, requestId: RequestId, slotIndex: UInt256, callback: OnSlotFilled +): Future[Subscription] {.async.} = let subscription = SlotFilledSubscription( market: market, requestId: some requestId, slotIndex: some slotIndex, - callback: callback + callback: callback, ) market.subscriptions.onSlotFilled.add(subscription) return subscription -method subscribeSlotFreed*(market: MockMarket, - callback: OnSlotFreed): - Future[Subscription] {.async.} = +method subscribeSlotFreed*( + market: MockMarket, callback: OnSlotFreed +): Future[Subscription] {.async.} = let subscription = SlotFreedSubscription(market: market, callback: callback) market.subscriptions.onSlotFreed.add(subscription) return subscription method subscribeSlotReservationsFull*( - market: MockMarket, - callback: OnSlotReservationsFull): Future[Subscription] {.async.} = - + market: MockMarket, callback: OnSlotReservationsFull +): Future[Subscription] {.async.} = let subscription = SlotReservationsFullSubscription(market: market, callback: callback) market.subscriptions.onSlotReservationsFull.add(subscription) return subscription -method subscribeRequestCancelled*(market: MockMarket, - callback: OnRequestCancelled): - Future[Subscription] {.async.} = +method subscribeRequestCancelled*( + market: MockMarket, callback: OnRequestCancelled +): Future[Subscription] {.async.} = let subscription = RequestCancelledSubscription( - market: market, - requestId: none RequestId, - callback: callback + market: market, requestId: none RequestId, callback: callback ) market.subscriptions.onRequestCancelled.add(subscription) return subscription -method subscribeRequestCancelled*(market: MockMarket, - requestId: RequestId, - callback: OnRequestCancelled): - Future[Subscription] {.async.} = +method subscribeRequestCancelled*( + market: MockMarket, requestId: RequestId, callback: OnRequestCancelled +): Future[Subscription] {.async.} = let subscription = RequestCancelledSubscription( - market: market, - requestId: some requestId, - callback: callback + market: market, requestId: some requestId, callback: callback ) market.subscriptions.onRequestCancelled.add(subscription) return subscription -method subscribeRequestFailed*(market: MockMarket, - callback: OnRequestFailed): - Future[Subscription] {.async.} = +method subscribeRequestFailed*( + market: MockMarket, callback: OnRequestFailed +): Future[Subscription] {.async.} = let subscription = RequestFailedSubscription( - market: market, - requestId: none RequestId, - callback: callback + market: market, requestId: none RequestId, callback: callback ) market.subscriptions.onRequestFailed.add(subscription) return subscription -method subscribeRequestFailed*(market: MockMarket, - requestId: RequestId, - callback: OnRequestFailed): - Future[Subscription] {.async.} = +method subscribeRequestFailed*( + market: MockMarket, requestId: RequestId, callback: OnRequestFailed +): Future[Subscription] {.async.} = let subscription = RequestFailedSubscription( - market: market, - requestId: some requestId, - callback: callback + market: market, requestId: some requestId, callback: callback ) market.subscriptions.onRequestFailed.add(subscription) return subscription -method subscribeProofSubmission*(mock: MockMarket, - callback: OnProofSubmitted): - Future[Subscription] {.async.} = - let subscription = ProofSubmittedSubscription( - market: mock, - callback: callback - ) +method subscribeProofSubmission*( + mock: MockMarket, callback: OnProofSubmitted +): Future[Subscription] {.async.} = + let subscription = ProofSubmittedSubscription(market: mock, callback: callback) mock.subscriptions.onProofSubmitted.add(subscription) return subscription method queryPastStorageRequestedEvents*( - market: MockMarket, - fromBlock: BlockTag): Future[seq[StorageRequested]] {.async.} = - return market.requested.map(request => - StorageRequested(requestId: request.id, - ask: request.ask, - expiry: request.expiry) + market: MockMarket, fromBlock: BlockTag +): Future[seq[StorageRequested]] {.async.} = + return market.requested.map( + request => + StorageRequested(requestId: request.id, ask: request.ask, expiry: request.expiry) ) method queryPastStorageRequestedEvents*( - market: MockMarket, - blocksAgo: int): Future[seq[StorageRequested]] {.async.} = - return market.requested.map(request => - StorageRequested(requestId: request.id, - ask: request.ask, - expiry: request.expiry) + market: MockMarket, blocksAgo: int +): Future[seq[StorageRequested]] {.async.} = + return market.requested.map( + request => + StorageRequested(requestId: request.id, ask: request.ask, expiry: request.expiry) ) method queryPastSlotFilledEvents*( - market: MockMarket, - fromBlock: BlockTag): Future[seq[SlotFilled]] {.async.} = - return market.filled.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + market: MockMarket, fromBlock: BlockTag +): Future[seq[SlotFilled]] {.async.} = + return market.filled.map( + slot => SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) ) method queryPastSlotFilledEvents*( - market: MockMarket, - blocksAgo: int): Future[seq[SlotFilled]] {.async.} = - return market.filled.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + market: MockMarket, blocksAgo: int +): Future[seq[SlotFilled]] {.async.} = + return market.filled.map( + slot => SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) ) method queryPastSlotFilledEvents*( - market: MockMarket, - fromTime: SecondsSince1970): Future[seq[SlotFilled]] {.async.} = + market: MockMarket, fromTime: SecondsSince1970 +): Future[seq[SlotFilled]] {.async.} = let filtered = market.filled.filter( - proc (slot: MockSlot): bool = + proc(slot: MockSlot): bool = if timestamp =? slot.timestamp: return timestamp >= fromTime else: true ) - return filtered.map(slot => - SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) + return filtered.map( + slot => SlotFilled(requestId: slot.requestId, slotIndex: slot.slotIndex) ) method unsubscribe*(subscription: RequestSubscription) {.async.} = diff --git a/tests/codex/helpers/mockrepostore.nim b/tests/codex/helpers/mockrepostore.nim index a29b7d2f9..50b47f026 100644 --- a/tests/codex/helpers/mockrepostore.nim +++ b/tests/codex/helpers/mockrepostore.nim @@ -16,21 +16,22 @@ import pkg/questionable/results import pkg/codex/stores/repostore import pkg/codex/utils/asynciter -type - MockRepoStore* = ref object of RepoStore - delBlockCids*: seq[Cid] - getBeMaxNumber*: int - getBeOffset*: int +type MockRepoStore* = ref object of RepoStore + delBlockCids*: seq[Cid] + getBeMaxNumber*: int + getBeOffset*: int - testBlockExpirations*: seq[BlockExpiration] - getBlockExpirationsThrows*: bool + testBlockExpirations*: seq[BlockExpiration] + getBlockExpirationsThrows*: bool method delBlock*(self: MockRepoStore, cid: Cid): Future[?!void] {.async.} = self.delBlockCids.add(cid) self.testBlockExpirations = self.testBlockExpirations.filterIt(it.cid != cid) return success() -method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): Future[?!AsyncIter[BlockExpiration]] {.async.} = +method getBlockExpirations*( + self: MockRepoStore, maxNumber: int, offset: int +): Future[?!AsyncIter[BlockExpiration]] {.async.} = if self.getBlockExpirationsThrows: raise new CatchableError @@ -42,10 +43,11 @@ method getBlockExpirations*(self: MockRepoStore, maxNumber: int, offset: int): F limit = min(offset + maxNumber, len(testBlockExpirationsCpy)) let - iter1 = AsyncIter[int].new(offset..= size: return 0 @@ -45,7 +45,4 @@ proc new*( consumed += read return read - Chunker.new( - reader = reader, - pad = pad, - chunkSize = chunkSize) + Chunker.new(reader = reader, pad = pad, chunkSize = chunkSize) diff --git a/tests/codex/merkletree/generictreetests.nim b/tests/codex/merkletree/generictreetests.nim index df9ba33c3..0e1f7c9f0 100644 --- a/tests/codex/merkletree/generictreetests.nim +++ b/tests/codex/merkletree/generictreetests.nim @@ -3,127 +3,109 @@ import std/unittest import pkg/codex/merkletree proc testGenericTree*[H, K, U]( - name: string, - data: openArray[H], - zero: H, - compress: proc(z, y: H, key: K): H, - makeTree: proc(data: seq[H]): U) = - - let - data = @data + name: string, + data: openArray[H], + zero: H, + compress: proc(z, y: H, key: K): H, + makeTree: proc(data: seq[H]): U, +) = + let data = @data suite "Correctness tests - " & name: - test "Should build correct tree for even bottom layer": - let - expectedRoot = compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], data[7], K.KeyBottomLayer), - K.KeyNone - ), - K.KeyNone - ) - - let - tree = makeTree( data[0..7] ) + let expectedRoot = compress( + compress( + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, + ), + compress( + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], data[7], K.KeyBottomLayer), + K.KeyNone, + ), + K.KeyNone, + ) + + let tree = makeTree(data[0 .. 7]) check: tree.root.tryGet == expectedRoot test "Should build correct tree for odd bottom layer": - let - expectedRoot = compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], zero, K.KeyOddAndBottomLayer), - K.KeyNone - ), - K.KeyNone - ) - - let - tree = makeTree( data[0..6] ) + let expectedRoot = compress( + compress( + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, + ), + compress( + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], zero, K.KeyOddAndBottomLayer), + K.KeyNone, + ), + K.KeyNone, + ) + + let tree = makeTree(data[0 .. 6]) check: tree.root.tryGet == expectedRoot test "Should build correct tree for even bottom and odd upper layers": - let - expectedRoot = compress( + let expectedRoot = compress( + compress( compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], data[7], K.KeyBottomLayer), - K.KeyNone - ), - K.KeyNone + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, ), compress( - compress( - compress(data[8], data[9], K.KeyBottomLayer), - zero, - K.KeyOdd - ), - zero, - K.KeyOdd + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], data[7], K.KeyBottomLayer), + K.KeyNone, ), - K.KeyNone - ) - - let - tree = makeTree( data[0..9] ) + K.KeyNone, + ), + compress( + compress(compress(data[8], data[9], K.KeyBottomLayer), zero, K.KeyOdd), + zero, + K.KeyOdd, + ), + K.KeyNone, + ) + + let tree = makeTree(data[0 .. 9]) check: tree.root.tryGet == expectedRoot test "Should get and validate correct proofs": - let - expectedRoot = compress( + let expectedRoot = compress( + compress( compress( - compress( - compress(data[0], data[1], K.KeyBottomLayer), - compress(data[2], data[3], K.KeyBottomLayer), - K.KeyNone - ), - compress( - compress(data[4], data[5], K.KeyBottomLayer), - compress(data[6], data[7], K.KeyBottomLayer), - K.KeyNone - ), - K.KeyNone + compress(data[0], data[1], K.KeyBottomLayer), + compress(data[2], data[3], K.KeyBottomLayer), + K.KeyNone, ), compress( - compress( - compress(data[8], data[9], K.KeyBottomLayer), - zero, - K.KeyOdd - ), - zero, - K.KeyOdd + compress(data[4], data[5], K.KeyBottomLayer), + compress(data[6], data[7], K.KeyBottomLayer), + K.KeyNone, ), - K.KeyNone - ) - - let - tree = makeTree( data ) - - for i in 0.. 0): + while (let chunk = await chunker.getBytes(); chunk.len > 0): await stream.pushData(chunk) finally: await stream.pushEof() @@ -101,20 +98,27 @@ template setupAndTearDown*() {.dirty.} = blockDiscovery = Discovery.new( switch.peerInfo.privateKey, - announceAddrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/0") - .expect("Should return multiaddress")]) + announceAddrs = + @[ + MultiAddress.init("/ip4/127.0.0.1/tcp/0").expect("Should return multiaddress") + ], + ) peerStore = PeerCtxStore.new() pendingBlocks = PendingBlocksManager.new() - discovery = DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) + discovery = + DiscoveryEngine.new(localStore, peerStore, network, blockDiscovery, pendingBlocks) advertiser = Advertiser.new(localStore, blockDiscovery) - engine = BlockExcEngine.new(localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks) + engine = BlockExcEngine.new( + localStore, wallet, network, discovery, advertiser, peerStore, pendingBlocks + ) store = NetworkStore.new(engine, localStore) node = CodexNodeRef.new( switch = switch, networkStore = store, engine = engine, prover = Prover.none, - discovery = blockDiscovery) + discovery = blockDiscovery, + ) teardown: close(file) diff --git a/tests/codex/node/testcontracts.nim b/tests/codex/node/testcontracts.nim index 83d1ee987..cce6d5bdc 100644 --- a/tests/codex/node/testcontracts.nim +++ b/tests/codex/node/testcontracts.nim @@ -65,16 +65,16 @@ asyncchecksuite "Test Node - Host contracts": node.contracts = ( none ClientInteractions, some HostInteractions.new(clock, sales), - none ValidatorInteractions) + none ValidatorInteractions, + ) await node.start() # Populate manifest in local store manifest = await storeDataGetManifest(localStore, chunker) let - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), - codec = ManifestCodec).tryGet() + manifestBlock = + bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) manifestCid = manifestBlock.cid @@ -85,9 +85,8 @@ asyncchecksuite "Test Node - Host contracts": protected = (await erasure.encode(manifest, 3, 2)).tryGet() builder = Poseidon2Builder.new(localStore, protected).tryGet() verifiable = (await builder.buildManifest()).tryGet() - verifiableBlock = bt.Block.new( - verifiable.encode().tryGet(), - codec = ManifestCodec).tryGet() + verifiableBlock = + bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(verifiableBlock)).tryGet() @@ -102,7 +101,7 @@ asyncchecksuite "Test Node - Host contracts": (await expiryUpdateCallback(manifestCidStr, expectedExpiry)).tryGet() - for index in 0.. 0 and blocks.len <= batchSize - return success() - )).tryGet() + for batchSize in 1 .. 12: + ( + await node.fetchBatched( + manifest, + batchSize = batchSize, + proc(blocks: seq[bt.Block]): Future[?!void] {.gcsafe, async.} = + check blocks.len > 0 and blocks.len <= batchSize + return success(), + ) + ).tryGet() test "Store and retrieve Data Stream": let stream = BufferStream.new() storeFut = node.store(stream) - oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes # Let's check that node.store can correctly rechunk these odd chunks - oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) # TODO: doesn't work with pad=tue + oddChunkSize = math.trunc(DefaultBlockSize.float / 3.14).NBytes + # Let's check that node.store can correctly rechunk these odd chunks + oddChunker = FileChunker.new(file = file, chunkSize = oddChunkSize, pad = false) + # TODO: doesn't work with pad=tue - var - original: seq[byte] + var original: seq[byte] try: - while ( - let chunk = await oddChunker.getBytes(); - chunk.len > 0): + while (let chunk = await oddChunker.getBytes(); chunk.len > 0): original &= chunk await stream.pushData(chunk) finally: @@ -129,7 +128,8 @@ asyncchecksuite "Test Node - Basic": (await localStore.putBlock(blk)).tryGet() let stream = (await node.retrieve(blk.cid)).tryGet() - defer: await stream.close() + defer: + await stream.close() var data = newSeq[byte](testString.len) await stream.readExactly(addr data[0], data.len) @@ -139,20 +139,18 @@ asyncchecksuite "Test Node - Basic": let erasure = Erasure.new(store, leoEncoderProvider, leoDecoderProvider) manifest = await storeDataGetManifest(localStore, chunker) - manifestBlock = bt.Block.new( - manifest.encode().tryGet(), - codec = ManifestCodec).tryGet() + manifestBlock = + bt.Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() protected = (await erasure.encode(manifest, 3, 2)).tryGet() builder = Poseidon2Builder.new(localStore, protected).tryGet() verifiable = (await builder.buildManifest()).tryGet() - verifiableBlock = bt.Block.new( - verifiable.encode().tryGet(), - codec = ManifestCodec).tryGet() + verifiableBlock = + bt.Block.new(verifiable.encode().tryGet(), codec = ManifestCodec).tryGet() (await localStore.putBlock(manifestBlock)).tryGet() - let - request = (await node.setupRequest( + let request = ( + await node.setupRequest( cid = manifestBlock.cid, nodes = 5, tolerance = 2, @@ -160,7 +158,9 @@ asyncchecksuite "Test Node - Basic": reward = 2.u256, proofProbability = 3.u256, expiry = 200.u256, - collateral = 200.u256)).tryGet + collateral = 200.u256, + ) + ).tryGet check: (await verifiableBlock.cid in localStore) == true diff --git a/tests/codex/sales/states/testcancelled.nim b/tests/codex/sales/states/testcancelled.nim index e252cd9c0..16f694d3f 100644 --- a/tests/codex/sales/states/testcancelled.nim +++ b/tests/codex/sales/states/testcancelled.nim @@ -24,18 +24,12 @@ asyncchecksuite "sales state 'cancelled'": var reprocessSlotWas = false setup: - let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - returnBytesWas = returnBytes - reprocessSlotWas = reprocessSlot + let onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot - let context = SalesContext( - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleCancelled.new() diff --git a/tests/codex/sales/states/testerrored.nim b/tests/codex/sales/states/testerrored.nim index dc5258947..f3d486698 100644 --- a/tests/codex/sales/states/testerrored.nim +++ b/tests/codex/sales/states/testerrored.nim @@ -24,26 +24,17 @@ asyncchecksuite "sales state 'errored'": var reprocessSlotWas = false setup: - let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - returnBytesWas = returnBytes - reprocessSlotWas = reprocessSlot + let onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot - let context = SalesContext( - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleErrored(error: newException(ValueError, "oh no!")) test "calls onCleanUp with returnBytes = false and reprocessSlot = true": - state = SaleErrored( - error: newException(ValueError, "oh no!"), - reprocessSlot: true - ) + state = SaleErrored(error: newException(ValueError, "oh no!"), reprocessSlot: true) discard await state.run(agent) check eventually returnBytesWas == true check eventually reprocessSlotWas == true diff --git a/tests/codex/sales/states/testfilled.nim b/tests/codex/sales/states/testfilled.nim index e0efb5fc7..f8f77da6b 100644 --- a/tests/codex/sales/states/testfilled.nim +++ b/tests/codex/sales/states/testfilled.nim @@ -15,7 +15,6 @@ import ../../examples import ../../helpers checksuite "sales state 'filled'": - let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 @@ -27,22 +26,23 @@ checksuite "sales state 'filled'": setup: market = MockMarket.new() - slot = MockSlot(requestId: request.id, - host: Address.example, - slotIndex: slotIndex, - proof: Groth16Proof.default) + slot = MockSlot( + requestId: request.id, + host: Address.example, + slotIndex: slotIndex, + proof: Groth16Proof.default, + ) market.requestEnds[request.id] = 321 onExpiryUpdatePassedExpiry = -1 - let onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + let onExpiryUpdate = proc( + rootCid: string, expiry: SecondsSince1970 + ): Future[?!void] {.async.} = onExpiryUpdatePassedExpiry = expiry return success() let context = SalesContext(market: market, onExpiryUpdate: some onExpiryUpdate) - agent = newSalesAgent(context, - request.id, - slotIndex, - some request) + agent = newSalesAgent(context, request.id, slotIndex, some request) state = SaleFilled.new() test "switches to proving state when slot is filled by me": diff --git a/tests/codex/sales/states/testfilling.nim b/tests/codex/sales/states/testfilling.nim index 9a6f316df..f0ce70590 100644 --- a/tests/codex/sales/states/testfilling.nim +++ b/tests/codex/sales/states/testfilling.nim @@ -8,7 +8,6 @@ import ../../examples import ../../helpers checksuite "sales state 'filling'": - let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 var state: SaleFilling diff --git a/tests/codex/sales/states/testfinished.nim b/tests/codex/sales/states/testfinished.nim index a5f6690f3..c6bf5aba8 100644 --- a/tests/codex/sales/states/testfinished.nim +++ b/tests/codex/sales/states/testfinished.nim @@ -8,7 +8,6 @@ import ../../examples import ../../helpers checksuite "sales state 'finished'": - let request = StorageRequest.example var state: SaleFinished diff --git a/tests/codex/sales/states/testignored.nim b/tests/codex/sales/states/testignored.nim index 4f4dbbf6e..787ff6273 100644 --- a/tests/codex/sales/states/testignored.nim +++ b/tests/codex/sales/states/testignored.nim @@ -24,18 +24,12 @@ asyncchecksuite "sales state 'ignored'": var reprocessSlotWas = false setup: - let onCleanUp = proc (returnBytes = false, reprocessSlot = false) {.async.} = - returnBytesWas = returnBytes - reprocessSlotWas = reprocessSlot + let onCleanUp = proc(returnBytes = false, reprocessSlot = false) {.async.} = + returnBytesWas = returnBytes + reprocessSlotWas = reprocessSlot - let context = SalesContext( - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let context = SalesContext(market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) agent.onCleanUp = onCleanUp state = SaleIgnored.new() diff --git a/tests/codex/sales/states/testinitialproving.nim b/tests/codex/sales/states/testinitialproving.nim index af12852b9..97331a077 100644 --- a/tests/codex/sales/states/testinitialproving.nim +++ b/tests/codex/sales/states/testinitialproving.nim @@ -29,21 +29,16 @@ asyncchecksuite "sales state 'initialproving'": var receivedChallenge: ProofChallenge setup: - let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = - receivedChallenge = challenge - return success(proof) - let context = SalesContext( - onProve: onProve.some, - market: market, - clock: clock - ) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + let onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = + receivedChallenge = challenge + return success(proof) + let context = SalesContext(onProve: onProve.some, market: market, clock: clock) + agent = newSalesAgent(context, request.id, slotIndex, request.some) state = SaleInitialProving.new() - proc allowProofToStart {.async.} = + proc allowProofToStart() {.async.} = # it won't start proving until the next period await clock.advanceToNextPeriod(market) @@ -91,18 +86,14 @@ asyncchecksuite "sales state 'initialproving'": check SaleFilling(!next).proof == proof test "switches to errored state when onProve callback fails": - let onProveFailed: OnProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + let onProveFailed: OnProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = return failure("oh no!") - let proofFailedContext = SalesContext( - onProve: onProveFailed.some, - market: market, - clock: clock - ) - agent = newSalesAgent(proofFailedContext, - request.id, - slotIndex, - request.some) + let proofFailedContext = + SalesContext(onProve: onProveFailed.some, market: market, clock: clock) + agent = newSalesAgent(proofFailedContext, request.id, slotIndex, request.some) let future = state.run(agent) await allowProofToStart() diff --git a/tests/codex/sales/states/testpreparing.nim b/tests/codex/sales/states/testpreparing.nim index 94febbacd..8ad5e7d64 100644 --- a/tests/codex/sales/states/testpreparing.nim +++ b/tests/codex/sales/states/testpreparing.nim @@ -38,7 +38,7 @@ asyncchecksuite "sales state 'preparing'": freeSize: request.ask.slotSize + 100.u256, duration: request.ask.duration + 60.u256, minPrice: request.ask.pricePerSlot - 10.u256, - maxCollateral: request.ask.collateral + 400.u256 + maxCollateral: request.ask.collateral + 400.u256, ) let repoDs = SQLiteDatastore.new(Memory).tryGet() let metaDs = SQLiteDatastore.new(Memory).tryGet() @@ -46,17 +46,11 @@ asyncchecksuite "sales state 'preparing'": await repo.start() state = SalePreparing.new() - context = SalesContext( - market: market, - clock: clock - ) + context = SalesContext(market: market, clock: clock) reservations = MockReservations.new(repo) context.reservations = reservations - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slotIndex, request.some) teardown: await repo.stop() @@ -75,10 +69,8 @@ asyncchecksuite "sales state 'preparing'": proc createAvailability() {.async.} = let a = await reservations.createAvailability( - availability.totalSize, - availability.duration, - availability.minPrice, - availability.maxCollateral + availability.totalSize, availability.duration, availability.minPrice, + availability.maxCollateral, ) availability = a.get diff --git a/tests/codex/sales/states/testproving.nim b/tests/codex/sales/states/testproving.nim index 5f18746b2..afdeb4d2b 100644 --- a/tests/codex/sales/states/testproving.nim +++ b/tests/codex/sales/states/testproving.nim @@ -16,7 +16,6 @@ import ../../helpers/mockmarket import ../../helpers/mockclock asyncchecksuite "sales state 'proving'": - let slot = Slot.example let request = slot.request let proof = Groth16Proof.example @@ -30,14 +29,13 @@ asyncchecksuite "sales state 'proving'": setup: clock = MockClock.new() market = MockMarket.new() - let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = - receivedChallenge = challenge - return success(proof) + let onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = + receivedChallenge = challenge + return success(proof) let context = SalesContext(market: market, clock: clock, onProve: onProve.some) - agent = newSalesAgent(context, - request.id, - slot.slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slot.slotIndex, request.some) state = SaleProving.new() proc advanceToNextPeriod(market: Market) {.async.} = diff --git a/tests/codex/sales/states/testsimulatedproving.nim b/tests/codex/sales/states/testsimulatedproving.nim index f4ca3ba97..1fc5331c8 100644 --- a/tests/codex/sales/states/testsimulatedproving.nim +++ b/tests/codex/sales/states/testsimulatedproving.nim @@ -16,7 +16,6 @@ import ../../helpers/mockmarket import ../../helpers/mockclock asyncchecksuite "sales state 'simulated-proving'": - let slot = Slot.example let request = slot.request let proof = Groth16Proof.example @@ -43,13 +42,12 @@ asyncchecksuite "sales state 'simulated-proving'": market.setProofRequired(slot.id, true) subscription = await market.subscribeProofSubmission(onProofSubmission) - let onProve = proc (slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = - return success(proof) + let onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = + return success(proof) let context = SalesContext(market: market, clock: clock, onProve: onProve.some) - agent = newSalesAgent(context, - request.id, - slot.slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slot.slotIndex, request.some) state = SaleProvingSimulated.new() state.failEveryNProofs = failEveryNProofs diff --git a/tests/codex/sales/states/testslotreserving.nim b/tests/codex/sales/states/testslotreserving.nim index 20d007455..b15fc8b4b 100644 --- a/tests/codex/sales/states/testslotreserving.nim +++ b/tests/codex/sales/states/testslotreserving.nim @@ -31,15 +31,9 @@ asyncchecksuite "sales state 'SlotReserving'": clock = MockClock.new() state = SaleSlotReserving.new() - context = SalesContext( - market: market, - clock: clock - ) + context = SalesContext(market: market, clock: clock) - agent = newSalesAgent(context, - request.id, - slotIndex, - request.some) + agent = newSalesAgent(context, request.id, slotIndex, request.some) test "switches to cancelled state when request expires": let next = state.onCancelled(request) diff --git a/tests/codex/sales/states/testunknown.nim b/tests/codex/sales/states/testunknown.nim index e02b3c90b..97730f498 100644 --- a/tests/codex/sales/states/testunknown.nim +++ b/tests/codex/sales/states/testunknown.nim @@ -15,7 +15,6 @@ import ../../examples import ../../helpers checksuite "sales state 'unknown'": - let request = StorageRequest.example let slotIndex = (request.ask.slots div 2).u256 let slotId = slotId(request.id, slotIndex) @@ -27,10 +26,7 @@ checksuite "sales state 'unknown'": setup: market = MockMarket.new() let context = SalesContext(market: market) - agent = newSalesAgent(context, - request.id, - slotIndex, - StorageRequest.none) + agent = newSalesAgent(context, request.id, slotIndex, StorageRequest.none) state = SaleUnknown.new() test "switches to error state when on chain state cannot be fetched": diff --git a/tests/codex/sales/testreservations.nim b/tests/codex/sales/testreservations.nim index 285ad0e3c..7ce785825 100644 --- a/tests/codex/sales/testreservations.nim +++ b/tests/codex/sales/testreservations.nim @@ -39,22 +39,16 @@ asyncchecksuite "Reservations module": proc createAvailability(): Availability = let example = Availability.example - let totalSize = rand(100000..200000) + let totalSize = rand(100000 .. 200000) let availability = waitFor reservations.createAvailability( - totalSize.u256, - example.duration, - example.minPrice, - example.maxCollateral + totalSize.u256, example.duration, example.minPrice, example.maxCollateral ) return availability.get proc createReservation(availability: Availability): Reservation = - let size = rand(1.. agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) - check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + ) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + ) asyncchecksuite "Sales": let @@ -140,7 +140,7 @@ asyncchecksuite "Sales": freeSize: 100.u256, duration: 60.u256, minPrice: 600.u256, - maxCollateral: 400.u256 + maxCollateral: 400.u256, ) request = StorageRequest( ask: StorageAsk( @@ -150,10 +150,8 @@ asyncchecksuite "Sales": reward: 10.u256, collateral: 200.u256, ), - content: StorageContent( - cid: "some cid" - ), - expiry: (getTime() + initDuration(hours=1)).toUnix.u256 + content: StorageContent(cid: "some cid"), + expiry: (getTime() + initDuration(hours = 1)).toUnix.u256, ) market = MockMarket.new() @@ -169,16 +167,20 @@ asyncchecksuite "Sales": await repo.start() sales = Sales.new(market, clock, repo) reservations = sales.context.reservations - sales.onStore = proc(request: StorageRequest, - slot: UInt256, - onBatch: BatchProc): Future[?!void] {.async.} = + sales.onStore = proc( + request: StorageRequest, slot: UInt256, onBatch: BatchProc + ): Future[?!void] {.async.} = return success() - sales.onExpiryUpdate = proc(rootCid: string, expiry: SecondsSince1970): Future[?!void] {.async.} = + sales.onExpiryUpdate = proc( + rootCid: string, expiry: SecondsSince1970 + ): Future[?!void] {.async.} = return success() queue = sales.context.slotQueue - sales.onProve = proc(slot: Slot, challenge: ProofChallenge): Future[?!Groth16Proof] {.async.} = + sales.onProve = proc( + slot: Slot, challenge: ProofChallenge + ): Future[?!Groth16Proof] {.async.} = return success(proof) await sales.start() itemsProcessed = @[] @@ -197,28 +199,25 @@ asyncchecksuite "Sales": return false sales.agents[idx].query(description) == state.some - proc allowRequestToStart {.async.} = + proc allowRequestToStart() {.async.} = check eventually isInState(0, "SaleInitialProving") # it won't start proving until the next period await clock.advanceToNextPeriod(market) - proc getAvailability: Availability = + proc getAvailability(): Availability = let key = availability.id.key.get (waitFor reservations.get(key, Availability)).get proc createAvailability() = let a = waitFor reservations.createAvailability( - availability.totalSize, - availability.duration, - availability.minPrice, - availability.maxCollateral + availability.totalSize, availability.duration, availability.minPrice, + availability.maxCollateral, ) availability = a.get # update id - proc notProcessed(itemsProcessed: seq[SlotQueueItem], - request: StorageRequest): bool = + proc notProcessed(itemsProcessed: seq[SlotQueueItem], request: StorageRequest): bool = let items = SlotQueueItem.init(request) - for i in 0.. 0 # queue starts paused, allow items to be added to the queue + check eventually queue.len > 0 + # queue starts paused, allow items to be added to the queue check eventually queue.paused # The first processed item will be will have been re-pushed with `seen = # true`. Then, once this item is processed by the queue, its 'seen' flag @@ -312,14 +312,15 @@ asyncchecksuite "Sales": for item in items: check queue.contains(item) - for i in 0.. 0 # queue starts paused, allow items to be added to the queue + check eventually queue.len > 0 + # queue starts paused, allow items to be added to the queue check eventually queue.paused # The first processed item/slot will be filled (eventually). Subsequent # items will be processed and eventually re-pushed with `seen = true`. Once @@ -329,28 +330,29 @@ asyncchecksuite "Sales": # Therefore, there should eventually be 3 items remaining in the queue, all # seen. check eventually queue.len == 3 - for i in 0.. agent.data.requestId == request.id and agent.data.slotIndex == 0.u256) - check sales.agents.any(agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 0.u256 + ) + check sales.agents.any( + agent => agent.data.requestId == request.id and agent.data.slotIndex == 1.u256 + ) test "deletes inactive reservations on load": createAvailability() discard await reservations.createReservation( - availability.id, - 100.u256, - RequestId.example, - UInt256.example) + availability.id, 100.u256, RequestId.example, UInt256.example + ) check (await reservations.all(Reservation)).get.len == 1 await sales.load() check (await reservations.all(Reservation)).get.len == 0 diff --git a/tests/codex/sales/testsalesagent.nim b/tests/codex/sales/testsalesagent.nim index fe19ecb04..f17711d3b 100644 --- a/tests/codex/sales/testsalesagent.nim +++ b/tests/codex/sales/testsalesagent.nim @@ -21,8 +21,11 @@ type MockState = ref object of SaleState MockErrorState = ref object of ErrorHandlingState -method `$`*(state: MockState): string = "MockState" -method `$`*(state: MockErrorState): string = "MockErrorState" +method `$`*(state: MockState): string = + "MockState" + +method `$`*(state: MockErrorState): string = + "MockErrorState" method onCancelled*(state: MockState, request: StorageRequest): ?State = onCancelCalled = true @@ -30,8 +33,9 @@ method onCancelled*(state: MockState, request: StorageRequest): ?State = method onFailed*(state: MockState, request: StorageRequest): ?State = onFailedCalled = true -method onSlotFilled*(state: MockState, requestId: RequestId, - slotIndex: UInt256): ?State = +method onSlotFilled*( + state: MockState, requestId: RequestId, slotIndex: UInt256 +): ?State = onSlotFilledCalled = true method onError*(state: MockErrorState, err: ref CatchableError): ?State = @@ -50,26 +54,21 @@ asyncchecksuite "Sales agent": setup: market = MockMarket.new() - market.requestExpiry[request.id] = getTime().toUnix() + request.expiry.truncate(int64) + market.requestExpiry[request.id] = + getTime().toUnix() + request.expiry.truncate(int64) clock = MockClock.new() context = SalesContext(market: market, clock: clock) slotIndex = 0.u256 onCancelCalled = false onFailedCalled = false onSlotFilledCalled = false - agent = newSalesAgent(context, - request.id, - slotIndex, - some request) + agent = newSalesAgent(context, request.id, slotIndex, some request) teardown: await agent.stop() test "can retrieve request": - agent = newSalesAgent(context, - request.id, - slotIndex, - none StorageRequest) + agent = newSalesAgent(context, request.id, slotIndex, none StorageRequest) market.requested = @[request] await agent.retrieveRequest() check agent.data.request == some request @@ -101,7 +100,9 @@ asyncchecksuite "Sales agent": clock.set(market.requestExpiry[request.id] + 1) check eventually onCancelCalled - for requestState in {RequestState.New, RequestState.Started, RequestState.Finished, RequestState.Failed}: + for requestState in { + RequestState.New, RequestState.Started, RequestState.Finished, RequestState.Failed + }: test "onCancelled is not called when request state is " & $requestState: agent.start(MockState.new()) await agent.subscribe() diff --git a/tests/codex/sales/testslotqueue.nim b/tests/codex/sales/testslotqueue.nim index 885e1037b..e6583bb77 100644 --- a/tests/codex/sales/testslotqueue.nim +++ b/tests/codex/sales/testslotqueue.nim @@ -13,7 +13,6 @@ import ../helpers/mockslotqueueitem import ../examples suite "Slot queue start/stop": - var queue: SlotQueue setup: @@ -46,7 +45,6 @@ suite "Slot queue start/stop": check not queue.running suite "Slot queue workers": - var queue: SlotQueue proc onProcessSlot(item: SlotQueueItem, doneProcessing: Future[void]) {.async.} = @@ -106,7 +104,6 @@ suite "Slot queue workers": check eventually queue.activeWorkers == 1 suite "Slot queue": - var onProcessSlotCalled = false var onProcessSlotCalledWith: seq[(RequestId, uint16)] var queue: SlotQueue @@ -169,7 +166,7 @@ suite "Slot queue": reward: 2.u256, # profitability is higher (good) collateral: 1.u256, expiry: 1.u256, - seen: true # seen (bad), more weight than profitability + seen: true, # seen (bad), more weight than profitability ) let itemB = MockSlotQueueItem( requestId: request.id, @@ -179,7 +176,7 @@ suite "Slot queue": reward: 1.u256, # profitability is lower (bad) collateral: 1.u256, expiry: 1.u256, - seen: false # not seen (good) + seen: false, # not seen (good) ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # B higher priority than A check itemA.toSlotQueueItem > itemB.toSlotQueueItem @@ -194,7 +191,7 @@ suite "Slot queue": reward: 1.u256, # reward is lower (bad) collateral: 1.u256, # collateral is lower (good) expiry: 1.u256, - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, @@ -204,7 +201,7 @@ suite "Slot queue": reward: 2.u256, # reward is higher (good), more weight than collateral collateral: 2.u256, # collateral is higher (bad) expiry: 1.u256, - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -219,7 +216,7 @@ suite "Slot queue": reward: 1.u256, collateral: 2.u256, # collateral is higher (bad) expiry: 2.u256, # expiry is longer (good) - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, @@ -229,7 +226,7 @@ suite "Slot queue": reward: 1.u256, collateral: 1.u256, # collateral is lower (good), more weight than expiry expiry: 1.u256, # expiry is shorter (bad) - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -244,7 +241,7 @@ suite "Slot queue": reward: 1.u256, collateral: 1.u256, expiry: 1.u256, # expiry is shorter (bad) - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, @@ -254,7 +251,7 @@ suite "Slot queue": reward: 1.u256, collateral: 1.u256, expiry: 2.u256, # expiry is longer (good), more weight than slotSize - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -269,7 +266,7 @@ suite "Slot queue": reward: 1.u256, collateral: 1.u256, expiry: 1.u256, # expiry is shorter (bad) - seen: false + seen: false, ) let itemB = MockSlotQueueItem( requestId: request.id, @@ -279,7 +276,7 @@ suite "Slot queue": reward: 1.u256, collateral: 1.u256, expiry: 1.u256, - seen: false + seen: false, ) check itemB.toSlotQueueItem < itemA.toSlotQueueItem # < indicates higher priority @@ -289,7 +286,7 @@ suite "Slot queue": let items = SlotQueueItem.init(request) check items.len.uint64 == request.ask.slots var checked = 0 - for slotIndex in 0'u16.. 0): let blk = bt.Block.new(chunk).tryGet() @@ -78,24 +79,24 @@ proc createBlocks*( blk proc createProtectedManifest*( - datasetBlocks: seq[bt.Block], - store: BlockStore, - numDatasetBlocks: int, - ecK: int, ecM: int, - blockSize: NBytes, - originalDatasetSize: int, - totalDatasetSize: int): - Future[tuple[manifest: Manifest, protected: Manifest]] {.async.} = - + datasetBlocks: seq[bt.Block], + store: BlockStore, + numDatasetBlocks: int, + ecK: int, + ecM: int, + blockSize: NBytes, + originalDatasetSize: int, + totalDatasetSize: int, +): Future[tuple[manifest: Manifest, protected: Manifest]] {.async.} = let cids = datasetBlocks.mapIt(it.cid) - datasetTree = CodexTree.init(cids[0.. " & $expected & ")": + test "Can get slotBlockIndex from slotCellIndex (" & $input & " -> " & $expected & + ")": let slotBlockIndex = toBlkInSlot(input, numCells = cellsPerBlock) check: slotBlockIndex == expected for (input, expected) in [(10, 10), (31, 31), (32, 0), (63, 31), (64, 0)]: - test "Can get blockCellIndex from slotCellIndex (" & $input & " -> " & $expected & ")": + test "Can get blockCellIndex from slotCellIndex (" & $input & " -> " & $expected & + ")": let blockCellIndex = toCellInBlk(input, numCells = cellsPerBlock) check: diff --git a/tests/codex/slots/testbackendfactory.nim b/tests/codex/slots/testbackendfactory.nim index 87a7733a1..a24bc41a5 100644 --- a/tests/codex/slots/testbackendfactory.nim +++ b/tests/codex/slots/testbackendfactory.nim @@ -12,17 +12,13 @@ import pkg/codex/utils/natutils import ../helpers import ../examples -type - BackendUtilsMock = ref object of BackendUtils - argR1csFile: string - argWasmFile: string - argZKeyFile: string +type BackendUtilsMock = ref object of BackendUtils + argR1csFile: string + argWasmFile: string + argZKeyFile: string method initializeCircomBackend*( - self: BackendUtilsMock, - r1csFile: string, - wasmFile: string, - zKeyFile: string + self: BackendUtilsMock, r1csFile: string, wasmFile: string, zKeyFile: string ): AnyBackend = self.argR1csFile = r1csFile self.argWasmFile = wasmFile @@ -48,15 +44,13 @@ suite "Test BackendFactory": let config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, marketplaceAddress: EthAddress.example.some, circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), - circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey") + circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), ) backend = config.initializeBackend(utilsMock).tryGet @@ -70,16 +64,14 @@ suite "Test BackendFactory": let config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, marketplaceAddress: EthAddress.example.some, # Set the circuitDir such that the tests/circuits/fixtures/ files # will be picked up as local files: - circuitDir: OutDir("tests/circuits/fixtures") + circuitDir: OutDir("tests/circuits/fixtures"), ) backend = config.initializeBackend(utilsMock).tryGet @@ -93,13 +85,11 @@ suite "Test BackendFactory": let config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, marketplaceAddress: EthAddress.example.some, - circuitDir: OutDir(circuitDir) + circuitDir: OutDir(circuitDir), ) backendResult = config.initializeBackend(utilsMock) diff --git a/tests/codex/slots/testconverters.nim b/tests/codex/slots/testconverters.nim index cf18d6b21..58857f6bb 100644 --- a/tests/codex/slots/testconverters.nim +++ b/tests/codex/slots/testconverters.nim @@ -10,8 +10,7 @@ import ../../asynctest import ../examples import ../merkletree/helpers -let - hash: Poseidon2Hash = toF(12345) +let hash: Poseidon2Hash = toF(12345) suite "Converters": test "CellBlock cid": @@ -44,4 +43,4 @@ suite "Converters": poseidonProof = toVerifiableProof(codexProof).tryGet() check: - Poseidon2Proof.example == poseidonProof + Poseidon2Proof.example == poseidonProof diff --git a/tests/codex/slots/testprover.nim b/tests/codex/slots/testprover.nim index f6deaebac..c567db55d 100644 --- a/tests/codex/slots/testprover.nim +++ b/tests/codex/slots/testprover.nim @@ -36,15 +36,13 @@ suite "Test Prover": metaDs = metaTmp.newDb() config = CodexConf( cmd: StartUpCmd.persistence, - nat: NatConfig( - hasExtIp: false, - nat: NatNone), + nat: NatConfig(hasExtIp: false, nat: NatNone), metricsAddress: parseIpAddress("127.0.0.1"), persistenceCmd: PersistenceCmd.prover, circomR1cs: InputFile("tests/circuits/fixtures/proof_main.r1cs"), circomWasm: InputFile("tests/circuits/fixtures/proof_main.wasm"), circomZkey: InputFile("tests/circuits/fixtures/proof_main.zkey"), - numProofSamples: samples + numProofSamples: samples, ) backend = config.initializeBackend().tryGet() @@ -56,42 +54,35 @@ suite "Test Prover": await metaTmp.destroyDb() test "Should sample and prove a slot": - let - (_, _, verifiable) = - await createVerifiableManifest( - store, - 8, # number of blocks in the original dataset (before EC) - 5, # ecK - 3, # ecM - blockSize, - cellSize) + let (_, _, verifiable) = await createVerifiableManifest( + store, + 8, # number of blocks in the original dataset (before EC) + 5, # ecK + 3, # ecM + blockSize, + cellSize, + ) - let - (inputs, proof) = ( - await prover.prove(1, verifiable, challenge)).tryGet + let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet check: (await prover.verify(proof, inputs)).tryGet == true test "Should generate valid proofs when slots consist of single blocks": - # To get single-block slots, we just need to set the number of blocks in # the original dataset to be the same as ecK. The total number of blocks # after generating random data for parity will be ecK + ecM, which will # match the number of slots. - let - (_, _, verifiable) = - await createVerifiableManifest( - store, - 2, # number of blocks in the original dataset (before EC) - 2, # ecK - 1, # ecM - blockSize, - cellSize) + let (_, _, verifiable) = await createVerifiableManifest( + store, + 2, # number of blocks in the original dataset (before EC) + 2, # ecK + 1, # ecM + blockSize, + cellSize, + ) - let - (inputs, proof) = ( - await prover.prove(1, verifiable, challenge)).tryGet + let (inputs, proof) = (await prover.prove(1, verifiable, challenge)).tryGet check: (await prover.verify(proof, inputs)).tryGet == true diff --git a/tests/codex/slots/testslotbuilder.nim b/tests/codex/slots/testslotbuilder.nim index 88ee9ae59..ef83bdee7 100644 --- a/tests/codex/slots/testslotbuilder.nim +++ b/tests/codex/slots/testslotbuilder.nim @@ -27,8 +27,7 @@ import pkg/codex/slots {.all.} privateAccess(Poseidon2Builder) # enable access to private fields privateAccess(Manifest) # enable access to private fields -const - Strategy = SteppedStrategy +const Strategy = SteppedStrategy suite "Slot builder": let @@ -39,24 +38,27 @@ suite "Slot builder": numSlots = ecK + ecM numDatasetBlocks = 8 - numTotalBlocks = calcEcBlocksCount(numDatasetBlocks, ecK, ecM) # total number of blocks in the dataset after - # EC (should will match number of slots) + numTotalBlocks = calcEcBlocksCount(numDatasetBlocks, ecK, ecM) + # total number of blocks in the dataset after + # EC (should will match number of slots) originalDatasetSize = numDatasetBlocks * blockSize.int - totalDatasetSize = numTotalBlocks * blockSize.int + totalDatasetSize = numTotalBlocks * blockSize.int - numSlotBlocks = numTotalBlocks div numSlots - numBlockCells = (blockSize div cellSize).int # number of cells per block - numSlotCells = numSlotBlocks * numBlockCells # number of uncorrected slot cells - pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot - numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks # pow2 blocks per slot + numSlotBlocks = numTotalBlocks div numSlots + numBlockCells = (blockSize div cellSize).int # number of cells per block + numSlotCells = numSlotBlocks * numBlockCells # number of uncorrected slot cells + pow2SlotCells = nextPowerOfTwo(numSlotCells) # pow2 cells per slot + numPadSlotBlocks = (pow2SlotCells div numBlockCells) - numSlotBlocks + # pow2 blocks per slot - numSlotBlocksTotal = # pad blocks per slot + numSlotBlocksTotal = + # pad blocks per slot if numPadSlotBlocks > 0: - numPadSlotBlocks + numSlotBlocks - else: - numSlotBlocks + numPadSlotBlocks + numSlotBlocks + else: + numSlotBlocks - numBlocksTotal = numSlotBlocksTotal * numSlots + numBlocksTotal = numSlotBlocksTotal * numSlots # empty digest emptyDigest = SpongeMerkle.digest(newSeq[byte](blockSize.int), cellSize.int) @@ -78,18 +80,14 @@ suite "Slot builder": metaDs = metaTmp.newDb() localStore = RepoStore.new(repoDs, metaDs) - chunker = RandomChunker.new(Rng.instance(), size = totalDatasetSize, chunkSize = blockSize) + chunker = + RandomChunker.new(Rng.instance(), size = totalDatasetSize, chunkSize = blockSize) datasetBlocks = await chunker.createBlocks(localStore) - (manifest, protectedManifest) = - await createProtectedManifest( - datasetBlocks, - localStore, - numDatasetBlocks, - ecK, ecM, - blockSize, - originalDatasetSize, - totalDatasetSize) + (manifest, protectedManifest) = await createProtectedManifest( + datasetBlocks, localStore, numDatasetBlocks, ecK, ecM, blockSize, + originalDatasetSize, totalDatasetSize, + ) teardown: await localStore.close() @@ -109,55 +107,55 @@ suite "Slot builder": reset(chunker) test "Can only create builder with protected manifest": - let - unprotectedManifest = Manifest.new( - treeCid = Cid.example, - blockSize = blockSize.NBytes, - datasetSize = originalDatasetSize.NBytes) + let unprotectedManifest = Manifest.new( + treeCid = Cid.example, + blockSize = blockSize.NBytes, + datasetSize = originalDatasetSize.NBytes, + ) check: - Poseidon2Builder.new(localStore, unprotectedManifest, cellSize = cellSize) - .error.msg == "Manifest is not protected." + Poseidon2Builder.new(localStore, unprotectedManifest, cellSize = cellSize).error.msg == + "Manifest is not protected." test "Number of blocks must be devisable by number of slots": - let - mismatchManifest = Manifest.new( - manifest = Manifest.new( - treeCid = Cid.example, - blockSize = blockSize.NBytes, - datasetSize = originalDatasetSize.NBytes), + let mismatchManifest = Manifest.new( + manifest = Manifest.new( treeCid = Cid.example, - datasetSize = totalDatasetSize.NBytes, - ecK = ecK - 1, - ecM = ecM, - strategy = Strategy) + blockSize = blockSize.NBytes, + datasetSize = originalDatasetSize.NBytes, + ), + treeCid = Cid.example, + datasetSize = totalDatasetSize.NBytes, + ecK = ecK - 1, + ecM = ecM, + strategy = Strategy, + ) check: - Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize) - .error.msg == "Number of blocks must be divisable by number of slots." + Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == + "Number of blocks must be divisable by number of slots." test "Block size must be divisable by cell size": - let - mismatchManifest = Manifest.new( - manifest = Manifest.new( - treeCid = Cid.example, - blockSize = (blockSize + 1).NBytes, - datasetSize = (originalDatasetSize - 1).NBytes), + let mismatchManifest = Manifest.new( + manifest = Manifest.new( treeCid = Cid.example, - datasetSize = (totalDatasetSize - 1).NBytes, - ecK = ecK, - ecM = ecM, - strategy = Strategy) + blockSize = (blockSize + 1).NBytes, + datasetSize = (originalDatasetSize - 1).NBytes, + ), + treeCid = Cid.example, + datasetSize = (totalDatasetSize - 1).NBytes, + ecK = ecK, + ecM = ecM, + strategy = Strategy, + ) check: - Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize) - .error.msg == "Block size must be divisable by cell size." + Poseidon2Builder.new(localStore, mismatchManifest, cellSize = cellSize).error.msg == + "Block size must be divisable by cell size." test "Should build correct slot builder": - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = + Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() check: builder.cellSize == cellSize @@ -169,15 +167,13 @@ suite "Slot builder": test "Should build slot hashes for all slots": let - steppedStrategy = Strategy.init( - 0, numBlocksTotal - 1, numSlots) + steppedStrategy = Strategy.init(0, numBlocksTotal - 1, numSlots) - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() - for i in 0.. (protectedManifest.numSlotBlocks - 1): - emptyDigest - else: - SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + for i in 0 ..< numSlots: + let slotHashes = collect(newSeq): + for j, idx in steppedStrategy.getIndicies(i): + if j > (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) Merkle.digest(slotHashes) @@ -272,20 +262,18 @@ suite "Slot builder": test "Should build correct verification root manifest": let steppedStrategy = Strategy.init(0, numBlocksTotal - 1, numSlots) - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() slotsHashes = collect(newSeq): - for i in 0.. (protectedManifest.numSlotBlocks - 1): - emptyDigest - else: - SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) + for i in 0 ..< numSlots: + let slotHashes = collect(newSeq): + for j, idx in steppedStrategy.getIndicies(i): + if j > (protectedManifest.numSlotBlocks - 1): + emptyDigest + else: + SpongeMerkle.digest(datasetBlocks[idx].data, cellSize.int) Merkle.digest(slotHashes) @@ -300,68 +288,46 @@ suite "Slot builder": test "Should not build from verifiable manifest with 0 slots": var - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() verifyManifest.slotRoots = @[] - check Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).isErr + check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr test "Should not build from verifiable manifest with incorrect number of slots": var - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() - verifyManifest.slotRoots.del( - verifyManifest.slotRoots.len - 1 - ) + verifyManifest.slotRoots.del(verifyManifest.slotRoots.len - 1) - check Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).isErr + check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr test "Should not build from verifiable manifest with invalid verify root": - let - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + let builder = + Poseidon2Builder.new(localStore, protectedManifest, cellSize = cellSize).tryGet() - var - verifyManifest = (await builder.buildManifest()).tryGet() + var verifyManifest = (await builder.buildManifest()).tryGet() - rng.shuffle( - Rng.instance, - verifyManifest.verifyRoot.data.buffer) + rng.shuffle(Rng.instance, verifyManifest.verifyRoot.data.buffer) - check Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).isErr + check Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).isErr test "Should build from verifiable manifest": let - builder = Poseidon2Builder.new( - localStore, - protectedManifest, - cellSize = cellSize).tryGet() + builder = Poseidon2Builder + .new(localStore, protectedManifest, cellSize = cellSize) + .tryGet() verifyManifest = (await builder.buildManifest()).tryGet() - verificationBuilder = Poseidon2Builder.new( - localStore, - verifyManifest, - cellSize = cellSize).tryGet() + verificationBuilder = + Poseidon2Builder.new(localStore, verifyManifest, cellSize = cellSize).tryGet() check: builder.slotRoots == verificationBuilder.slotRoots diff --git a/tests/codex/stores/commonstoretests.nim b/tests/codex/stores/commonstoretests.nim index 7d6cc89ad..5e722a8aa 100644 --- a/tests/codex/stores/commonstoretests.nim +++ b/tests/codex/stores/commonstoretests.nim @@ -22,11 +22,9 @@ type Before* = proc(): Future[void] {.gcsafe.} After* = proc(): Future[void] {.gcsafe.} -proc commonBlockStoreTests*(name: string, - provider: StoreProvider, - before: Before = nil, - after: After = nil) = - +proc commonBlockStoreTests*( + name: string, provider: StoreProvider, before: Before = nil, after: After = nil +) = asyncchecksuite name & " Store Common": var newBlock, newBlock1, newBlock2, newBlock3: Block @@ -40,7 +38,8 @@ proc commonBlockStoreTests*(name: string, newBlock2 = Block.new("2".repeat(100).toBytes()).tryGet() newBlock3 = Block.new("3".repeat(100).toBytes()).tryGet() - (manifest, tree) = makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() + (manifest, tree) = + makeManifestAndTree(@[newBlock, newBlock1, newBlock2, newBlock3]).tryGet() if not isNil(before): await before() @@ -59,8 +58,9 @@ proc commonBlockStoreTests*(name: string, test "putBlock raises onBlockStored": var storedCid = Cid.example - proc onStored(cid: Cid) {.async.} = + proc onStored(cid: Cid) {.async.} = storedCid = cid + store.onBlockStored = onStored.some() (await store.putBlock(newBlock1)).tryGet() @@ -100,15 +100,13 @@ proc commonBlockStoreTests*(name: string, let blocks = @[newBlock1, newBlock2, newBlock3] - putHandles = await allFinished( - blocks.mapIt( store.putBlock( it ) )) + putHandles = await allFinished(blocks.mapIt(store.putBlock(it))) for handle in putHandles: check not handle.failed check handle.read.isOk - let - cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet() + let cids = (await store.listBlocks(blockType = BlockType.Block)).tryGet() var count = 0 for c in cids: @@ -121,17 +119,18 @@ proc commonBlockStoreTests*(name: string, test "listBlocks Manifest": let blocks = @[newBlock1, newBlock2, newBlock3] - manifestBlock = Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + manifestBlock = + Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() treeBlock = Block.new(tree.encode()).tryGet() putHandles = await allFinished( - (@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) )) + (@[treeBlock, manifestBlock] & blocks).mapIt(store.putBlock(it)) + ) for handle in putHandles: check not handle.failed check handle.read.isOk - let - cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet() + let cids = (await store.listBlocks(blockType = BlockType.Manifest)).tryGet() var count = 0 for c in cids: @@ -145,17 +144,18 @@ proc commonBlockStoreTests*(name: string, test "listBlocks Both": let blocks = @[newBlock1, newBlock2, newBlock3] - manifestBlock = Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() + manifestBlock = + Block.new(manifest.encode().tryGet(), codec = ManifestCodec).tryGet() treeBlock = Block.new(tree.encode()).tryGet() putHandles = await allFinished( - (@[treeBlock, manifestBlock] & blocks).mapIt( store.putBlock( it ) )) + (@[treeBlock, manifestBlock] & blocks).mapIt(store.putBlock(it)) + ) for handle in putHandles: check not handle.failed check handle.read.isOk - let - cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet() + let cids = (await store.listBlocks(blockType = BlockType.Both)).tryGet() var count = 0 for c in cids: diff --git a/tests/codex/stores/repostore/testcoders.nim b/tests/codex/stores/repostore/testcoders.nim index 47cf40973..f4d2b5e7f 100644 --- a/tests/codex/stores/repostore/testcoders.nim +++ b/tests/codex/stores/repostore/testcoders.nim @@ -12,7 +12,6 @@ import pkg/codex/stores/repostore/coders import ../../helpers checksuite "Test coders": - proc rand(T: type NBytes): T = rand(Natural).NBytes @@ -21,29 +20,18 @@ checksuite "Test coders": E(ordinals[rand(ordinals.len - 1)]) proc rand(T: type QuotaUsage): T = - QuotaUsage( - used: rand(NBytes), - reserved: rand(NBytes) - ) + QuotaUsage(used: rand(NBytes), reserved: rand(NBytes)) proc rand(T: type BlockMetadata): T = BlockMetadata( - expiry: rand(SecondsSince1970), - size: rand(NBytes), - refCount: rand(Natural) + expiry: rand(SecondsSince1970), size: rand(NBytes), refCount: rand(Natural) ) proc rand(T: type DeleteResult): T = - DeleteResult( - kind: rand(DeleteResultKind), - released: rand(NBytes) - ) + DeleteResult(kind: rand(DeleteResultKind), released: rand(NBytes)) proc rand(T: type StoreResult): T = - StoreResult( - kind: rand(StoreResultKind), - used: rand(NBytes) - ) + StoreResult(kind: rand(StoreResultKind), used: rand(NBytes)) test "Natural encode/decode": for val in newSeqWith[Natural](100, rand(Natural)) & @[Natural.low, Natural.high]: diff --git a/tests/codex/stores/testcachestore.nim b/tests/codex/stores/testcachestore.nim index 51c59bbfb..e7025388b 100644 --- a/tests/codex/stores/testcachestore.nim +++ b/tests/codex/stores/testcachestore.nim @@ -37,16 +37,13 @@ checksuite "Cache Store": # initial cache blocks total more than cache size, currentSize should # never exceed max cache size store = CacheStore.new( - blocks = @[newBlock1, newBlock2, newBlock3], - cacheSize = 200, - chunkSize = 1) + blocks = @[newBlock1, newBlock2, newBlock3], cacheSize = 200, chunkSize = 1 + ) check store.currentSize == 200'nb # cache size cannot be less than chunks size expect ValueError: - discard CacheStore.new( - cacheSize = 99, - chunkSize = 100) + discard CacheStore.new(cacheSize = 99, chunkSize = 100) test "putBlock": (await store.putBlock(newBlock1)).tryGet() @@ -58,10 +55,8 @@ checksuite "Cache Store": check not (await store.hasBlock(newBlock1.cid)).tryGet() # block being added causes removal of LRU block - store = CacheStore.new( - @[newBlock1, newBlock2, newBlock3], - cacheSize = 200, - chunkSize = 1) + store = + CacheStore.new(@[newBlock1, newBlock2, newBlock3], cacheSize = 200, chunkSize = 1) check: not (await store.hasBlock(newBlock1.cid)).tryGet() (await store.hasBlock(newBlock2.cid)).tryGet() @@ -69,5 +64,7 @@ checksuite "Cache Store": store.currentSize.int == newBlock2.data.len + newBlock3.data.len # 200 commonBlockStoreTests( - "Cache", proc: BlockStore = - BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1))) + "Cache", + proc(): BlockStore = + BlockStore(CacheStore.new(cacheSize = 1000, chunkSize = 1)), +) diff --git a/tests/codex/stores/testkeyutils.nim b/tests/codex/stores/testkeyutils.nim index e1a8cf1b8..238e2681a 100644 --- a/tests/codex/stores/testkeyutils.nim +++ b/tests/codex/stores/testkeyutils.nim @@ -32,15 +32,15 @@ proc createManifestCid(): ?!Cid = codec = ManifestCodec version = CIDv1 - let hash = ? MultiHash.digest($mcodec, bytes).mapFailure - let cid = ? Cid.init(version, codec, hash).mapFailure + let hash = ?MultiHash.digest($mcodec, bytes).mapFailure + let cid = ?Cid.init(version, codec, hash).mapFailure return success cid checksuite "KeyUtils": test "makePrefixKey should create block key": let length = 6 let cid = Cid.example - let expectedPrefix = ($cid)[^length..^1] + let expectedPrefix = ($cid)[^length ..^ 1] let expectedPostfix = $cid let key = !makePrefixKey(length, cid).option @@ -56,7 +56,7 @@ checksuite "KeyUtils": test "makePrefixKey should create manifest key": let length = 6 let cid = !createManifestCid().option - let expectedPrefix = ($cid)[^length..^1] + let expectedPrefix = ($cid)[^length ..^ 1] let expectedPostfix = $cid let key = !makePrefixKey(length, cid).option diff --git a/tests/codex/stores/testmaintenance.nim b/tests/codex/stores/testmaintenance.nim index c63d6bef1..e5ff519ed 100644 --- a/tests/codex/stores/testmaintenance.nim +++ b/tests/codex/stores/testmaintenance.nim @@ -34,10 +34,7 @@ checksuite "BlockMaintainer": var testBe3: BlockExpiration proc createTestExpiration(expiry: SecondsSince1970): BlockExpiration = - BlockExpiration( - cid: bt.Block.example.cid, - expiry: expiry - ) + BlockExpiration(cid: bt.Block.example.cid, expiry: expiry) setup: mockClock = MockClock.new() @@ -56,11 +53,8 @@ checksuite "BlockMaintainer": mockTimer = MockTimer.new() blockMaintainer = BlockMaintainer.new( - mockRepoStore, - interval, - numberOfBlocksPerInterval = 2, - mockTimer, - mockClock) + mockRepoStore, interval, numberOfBlocksPerInterval = 2, mockTimer, mockClock + ) test "Start should start timer at provided interval": blockMaintainer.start() @@ -179,9 +173,11 @@ checksuite "BlockMaintainer": mockClock.set(650) await invokeTimerManyTimes() # First new block has expired - check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid] + check mockRepoStore.delBlockCids == + [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid] mockClock.set(750) await invokeTimerManyTimes() # Second new block has expired - check mockRepoStore.delBlockCids == [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid, testBe5.cid] + check mockRepoStore.delBlockCids == + [testBe1.cid, testBe2.cid, testBe3.cid, testBe4.cid, testBe5.cid] diff --git a/tests/codex/stores/testqueryiterhelper.nim b/tests/codex/stores/testqueryiterhelper.nim index ddc769c84..5d3d68fd7 100644 --- a/tests/codex/stores/testqueryiterhelper.nim +++ b/tests/codex/stores/testqueryiterhelper.nim @@ -18,8 +18,7 @@ proc decode(T: type string, bytes: seq[byte]): ?!T = success(string.fromBytes(bytes)) asyncchecksuite "Test QueryIter helper": - var - tds: TypedDatastore + var tds: TypedDatastore setupAll: tds = TypedDatastore.init(SQLiteDatastore.new(Memory).tryGet()) @@ -29,10 +28,7 @@ asyncchecksuite "Test QueryIter helper": test "Should auto-dispose when QueryIter finishes": let - source = { - "a": "11", - "b": "22" - }.toTable + source = {"a": "11", "b": "22"}.toTable Root = Key.init("/queryitertest").tryGet() for k, v in source: diff --git a/tests/codex/stores/testrepostore.nim b/tests/codex/stores/testrepostore.nim index 1bcffbf63..dda4ed821 100644 --- a/tests/codex/stores/testrepostore.nim +++ b/tests/codex/stores/testrepostore.nim @@ -23,7 +23,6 @@ import ../examples import ./commonstoretests checksuite "Test RepoStore start/stop": - var repoDs: Datastore metaDs: Datastore @@ -63,8 +62,7 @@ asyncchecksuite "RepoStore": repo: RepoStore - let - now: SecondsSince1970 = 123 + let now: SecondsSince1970 = 123 setup: repoDs = SQLiteDatastore.new(Memory).tryGet() @@ -191,8 +189,7 @@ asyncchecksuite "RepoStore": duration = 10.seconds blk = createTestBlock(100) - let - expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) + let expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + 10) (await repo.putBlock(blk, duration.some)).tryGet @@ -202,11 +199,10 @@ asyncchecksuite "RepoStore": expectedExpiration in expirations test "Should store block with default expiration timestamp when not provided": - let - blk = createTestBlock(100) + let blk = createTestBlock(100) - let - expectedExpiration = BlockExpiration(cid: blk.cid, expiry: now + DefaultBlockTtl.seconds) + let expectedExpiration = + BlockExpiration(cid: blk.cid, expiry: now + DefaultBlockTtl.seconds) (await repo.putBlock(blk)).tryGet @@ -234,8 +230,7 @@ asyncchecksuite "RepoStore": (await repo.ensureExpiry(blk.cid, 0)).tryGet test "Should fail when updating expiry of non-existing block": - let - blk = createTestBlock(100) + let blk = createTestBlock(100) expect BlockNotFoundError: (await repo.ensureExpiry(blk.cid, 10)).tryGet @@ -296,7 +291,9 @@ asyncchecksuite "RepoStore": expirations.len == 0 test "Should retrieve block expiration information": - proc unpack(beIter: Future[?!AsyncIter[BlockExpiration]]): Future[seq[BlockExpiration]] {.async.} = + proc unpack( + beIter: Future[?!AsyncIter[BlockExpiration]] + ): Future[seq[BlockExpiration]] {.async.} = var expirations = newSeq[BlockExpiration](0) without iter =? (await beIter), err: return expirations @@ -311,22 +308,22 @@ asyncchecksuite "RepoStore": blk2 = createTestBlock(11) blk3 = createTestBlock(12) - let - expectedExpiration: SecondsSince1970 = now + 10 + let expectedExpiration: SecondsSince1970 = now + 10 proc assertExpiration(be: BlockExpiration, expectedBlock: bt.Block) = check: be.cid == expectedBlock.cid be.expiry == expectedExpiration - (await repo.putBlock(blk1, duration.some)).tryGet (await repo.putBlock(blk2, duration.some)).tryGet (await repo.putBlock(blk3, duration.some)).tryGet let - blockExpirations1 = await unpack(repo.getBlockExpirations(maxNumber=2, offset=0)) - blockExpirations2 = await unpack(repo.getBlockExpirations(maxNumber=2, offset=2)) + blockExpirations1 = + await unpack(repo.getBlockExpirations(maxNumber = 2, offset = 0)) + blockExpirations2 = + await unpack(repo.getBlockExpirations(maxNumber = 2, offset = 2)) check blockExpirations1.len == 2 assertExpiration(blockExpirations1[0], blk2) @@ -358,15 +355,18 @@ asyncchecksuite "RepoStore": check has.get commonBlockStoreTests( - "RepoStore Sql backend", proc: BlockStore = + "RepoStore Sql backend", + proc(): BlockStore = BlockStore( RepoStore.new( SQLiteDatastore.new(Memory).tryGet(), SQLiteDatastore.new(Memory).tryGet(), - clock = MockClock.new()))) + clock = MockClock.new(), + ) + ), +) -const - path = currentSourcePath().parentDir / "test" +const path = currentSourcePath().parentDir / "test" proc before() {.async.} = createDir(path) @@ -374,15 +374,18 @@ proc before() {.async.} = proc after() {.async.} = removeDir(path) -let - depth = path.split(DirSep).len +let depth = path.split(DirSep).len commonBlockStoreTests( - "RepoStore FS backend", proc: BlockStore = + "RepoStore FS backend", + proc(): BlockStore = BlockStore( RepoStore.new( FSDatastore.new(path, depth).tryGet(), SQLiteDatastore.new(Memory).tryGet(), - clock = MockClock.new())), + clock = MockClock.new(), + ) + ), before = before, - after = after) + after = after, +) diff --git a/tests/codex/testasyncheapqueue.nim b/tests/codex/testasyncheapqueue.nim index dcdafcf2c..eb3767cd3 100644 --- a/tests/codex/testasyncheapqueue.nim +++ b/tests/codex/testasyncheapqueue.nim @@ -7,8 +7,7 @@ import pkg/codex/rng import ../asynctest import ./helpers -type - Task* = tuple[name: string, priority: int] +type Task* = tuple[name: string, priority: int] proc `<`*(a, b: Task): bool = a.priority < b.priority @@ -70,7 +69,7 @@ checksuite "Synchronous tests": check res == @[9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - test "Test del": # Test del + test "Test del": var heap = newAsyncHeapQueue[int]() let data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0] for item in data: @@ -91,7 +90,7 @@ checksuite "Synchronous tests": heap.del(heap.find(2)) check heap.toSortedSeq == @[1, 3, 4, 8, 9] - test "Test del last": # Test del last + test "Test del last": var heap = newAsyncHeapQueue[int]() let data = [1, 2, 3] for item in data: @@ -190,11 +189,11 @@ asyncchecksuite "Asynchronous Tests": for item in data: check heap.pushNoWait(item).isOk - check heap[0] == ("b", 3) # sanity check for order + check heap[0] == ("b", 3) # sanity check for order - let fut = heap.pushOrUpdate(("c", 2)) # attempt to push a non existen item but block - check heap.popNoWait().tryGet() == ("b", 3) # pop one off - await fut # wait for push to complete + let fut = heap.pushOrUpdate(("c", 2)) # attempt to push a non existen item but block + check heap.popNoWait().tryGet() == ("b", 3) # pop one off + await fut # wait for push to complete check heap[0] == (name: "c", priority: 2) # check order again @@ -215,10 +214,7 @@ asyncchecksuite "Asynchronous Tests": let data = ["d", "b", "c", "a", "h", "e", "f", "g"] for item in data: - check heap.pushNoWait(( - name: item, - priority: Rng.instance().rand(data.len) - )).isOk + check heap.pushNoWait((name: item, priority: Rng.instance().rand(data.len))).isOk let del = heap[3] heap.delete(del) diff --git a/tests/codex/testasyncstreamwrapper.nim b/tests/codex/testasyncstreamwrapper.nim index 8a3253511..2aa23039b 100644 --- a/tests/codex/testasyncstreamwrapper.nim +++ b/tests/codex/testasyncstreamwrapper.nim @@ -9,12 +9,10 @@ import ../asynctest import ./helpers asyncchecksuite "AsyncStreamWrapper": - let data = "0123456789012345678901234567890123456789" let address = initTAddress("127.0.0.1:46001") - proc serveReadingClient(server: StreamServer, - transp: StreamTransport) {.async.} = + proc serveReadingClient(server: StreamServer, transp: StreamTransport) {.async.} = var wstream = newAsyncStreamWriter(transp) await wstream.write(data) await wstream.finish() @@ -73,7 +71,8 @@ asyncchecksuite "AsyncStreamWrapper": test "Write all data": var buf = newSeq[byte](data.len) - var server = createStreamServer(address, serveWritingClient(addr buf[0], buf.len), {ReuseAddr}) + var server = + createStreamServer(address, serveWritingClient(addr buf[0], buf.len), {ReuseAddr}) server.start() var transp = await connect(address) diff --git a/tests/codex/testchunking.nim b/tests/codex/testchunking.nim index 216fbcc4c..2241a82bf 100644 --- a/tests/codex/testchunking.nim +++ b/tests/codex/testchunking.nim @@ -13,14 +13,11 @@ import ./helpers # CancelledError* = object of FutureError # LPStreamError* = object of LPError -type - CrashingStreamWrapper* = ref object of LPStream - toRaise*: proc(): void {.gcsafe, raises: [CancelledError, LPStreamError].} +type CrashingStreamWrapper* = ref object of LPStream + toRaise*: proc(): void {.gcsafe, raises: [CancelledError, LPStreamError].} method readOnce*( - self: CrashingStreamWrapper, - pbytes: pointer, - nbytes: int + self: CrashingStreamWrapper, pbytes: pointer, nbytes: int ): Future[int] {.gcsafe, async: (raises: [CancelledError, LPStreamError]).} = self.toRaise() @@ -28,9 +25,9 @@ asyncchecksuite "Chunking": test "should return proper size chunks": var offset = 0 let contents = [1.byte, 2, 3, 4, 5, 6, 7, 8, 9, 0] - proc reader(data: ChunkBuffer, len: int): Future[int] - {.gcsafe, async, raises: [Defect].} = - + proc reader( + data: ChunkBuffer, len: int + ): Future[int] {.gcsafe, async, raises: [Defect].} = let read = min(contents.len - offset, len) if read == 0: return 0 @@ -39,9 +36,7 @@ asyncchecksuite "Chunking": offset += read return read - let chunker = Chunker.new( - reader = reader, - chunkSize = 2'nb) + let chunker = Chunker.new(reader = reader, chunkSize = 2'nb) check: (await chunker.getBytes()) == [1.byte, 2] @@ -54,9 +49,7 @@ asyncchecksuite "Chunking": test "should chunk LPStream": let stream = BufferStream.new() - let chunker = LPStreamChunker.new( - stream = stream, - chunkSize = 2'nb) + let chunker = LPStreamChunker.new(stream = stream, chunkSize = 2'nb) proc writer() {.async.} = for d in [@[1.byte, 2, 3, 4], @[5.byte, 6, 7, 8], @[9.byte, 0]]: @@ -97,9 +90,7 @@ asyncchecksuite "Chunking": proc raiseStreamException(exc: ref CancelledError | ref LPStreamError) {.async.} = let stream = CrashingStreamWrapper.new() - let chunker = LPStreamChunker.new( - stream = stream, - chunkSize = 2'nb) + let chunker = LPStreamChunker.new(stream = stream, chunkSize = 2'nb) stream.toRaise = proc(): void {.raises: [CancelledError, LPStreamError].} = raise exc @@ -118,4 +109,4 @@ asyncchecksuite "Chunking": test "stream should forward LPStreamError": expect LPStreamError: - await raiseStreamException(newException(LPStreamError, "test error")) \ No newline at end of file + await raiseStreamException(newException(LPStreamError, "test error")) diff --git a/tests/codex/testclock.nim b/tests/codex/testclock.nim index 513e49630..2b0158cf9 100644 --- a/tests/codex/testclock.nim +++ b/tests/codex/testclock.nim @@ -12,15 +12,7 @@ checksuite "Clock": check restored == seconds test "SecondsSince1970 should support bytes conversions": - let secondsToTest: seq[int64] = @[ - int64.high, - int64.low, - 0, - 1, - 12345, - -1, - -12345 - ] + let secondsToTest: seq[int64] = @[int64.high, int64.low, 0, 1, 12345, -1, -12345] for seconds in secondsToTest: testConversion(seconds) diff --git a/tests/codex/testerasure.nim b/tests/codex/testerasure.nim index 96e900d83..952497e9e 100644 --- a/tests/codex/testerasure.nim +++ b/tests/codex/testerasure.nim @@ -43,11 +43,8 @@ suite "Erasure encode/decode": await metaTmp.destroyDb() proc encode(buffers, parity: int): Future[Manifest] {.async.} = - let - encoded = (await erasure.encode( - manifest, - buffers.Natural, - parity.Natural)).tryGet() + let encoded = + (await erasure.encode(manifest, buffers.Natural, parity.Natural)).tryGet() check: encoded.blocksCount mod (buffers + parity) == 0 @@ -67,14 +64,13 @@ suite "Erasure encode/decode": column = rng.rand((encoded.blocksCount div encoded.steps) - 1) # random column dropped: seq[int] - for _ in 0.. blockSize": @@ -85,25 +79,22 @@ asyncchecksuite "StoreStream": else: check read == 1 - check sequentialBytes(buf,read,n) + check sequentialBytes(buf, read, n) n += read test "Read exact bytes within block boundary": - var - buf = newSeq[byte](5) + var buf = newSeq[byte](5) await stream.readExactly(addr buf[0], 5) - check sequentialBytes(buf,5,0) + check sequentialBytes(buf, 5, 0) test "Read exact bytes outside of block boundary": - var - buf = newSeq[byte](15) + var buf = newSeq[byte](15) await stream.readExactly(addr buf[0], 15) - check sequentialBytes(buf,15,0) + check sequentialBytes(buf, 15, 0) suite "StoreStream - Size Tests": - var stream: StoreStream teardown: @@ -111,9 +102,7 @@ suite "StoreStream - Size Tests": test "Should return dataset size as stream size": let manifest = Manifest.new( - treeCid = Cid.example, - datasetSize = 80.NBytes, - blockSize = 10.NBytes + treeCid = Cid.example, datasetSize = 80.NBytes, blockSize = 10.NBytes ) stream = StoreStream.new(CacheStore.new(), manifest) @@ -132,7 +121,7 @@ suite "StoreStream - Size Tests": ecM = 1, originalTreeCid = Cid.example, originalDatasetSize = 80.NBytes, # size without parity bytes - strategy = StrategyType.SteppedStrategy + strategy = StrategyType.SteppedStrategy, ) stream = StoreStream.new(CacheStore.new(), protectedManifest) diff --git a/tests/codex/testvalidation.nim b/tests/codex/testvalidation.nim index 2cfe2f063..b8eaef658 100644 --- a/tests/codex/testvalidation.nim +++ b/tests/codex/testvalidation.nim @@ -29,21 +29,22 @@ asyncchecksuite "validation": var groupIndex: uint16 var validation: Validation - proc initValidationConfig(maxSlots: MaxSlots, - validationGroups: ?ValidationGroups, - groupIndex: uint16 = 0): ValidationConfig = - without validationConfig =? ValidationConfig.init( - maxSlots, groups=validationGroups, groupIndex), error: + proc initValidationConfig( + maxSlots: MaxSlots, validationGroups: ?ValidationGroups, groupIndex: uint16 = 0 + ): ValidationConfig = + without validationConfig =? + ValidationConfig.init(maxSlots, groups = validationGroups, groupIndex), error: raiseAssert fmt"Creating ValidationConfig failed! Error msg: {error.msg}" validationConfig - - proc newValidation(clock: Clock, - market: Market, - maxSlots: MaxSlots, - validationGroups: ?ValidationGroups, - groupIndex: uint16 = 0): Validation = - let validationConfig = initValidationConfig( - maxSlots, validationGroups, groupIndex) + + proc newValidation( + clock: Clock, + market: Market, + maxSlots: MaxSlots, + validationGroups: ?ValidationGroups, + groupIndex: uint16 = 0, + ): Validation = + let validationConfig = initValidationConfig(maxSlots, validationGroups, groupIndex) Validation.new(clock, market, validationConfig) setup: @@ -52,14 +53,13 @@ asyncchecksuite "validation": market = MockMarket.new(clock = Clock(clock).some) market.config.proofs.period = period.u256 market.config.proofs.timeout = timeout.u256 - validation = newValidation( - clock, market, maxSlots, validationGroups, groupIndex) + validation = newValidation(clock, market, maxSlots, validationGroups, groupIndex) teardown: # calling stop on validation that did not start is harmless await validation.stop() - proc advanceToNextPeriod = + proc advanceToNextPeriod() = let periodicity = Periodicity(seconds: period.u256) let period = periodicity.periodOf(clock.now().u256) let periodEnd = periodicity.periodEnd(period) @@ -70,37 +70,44 @@ asyncchecksuite "validation": for (validationGroups, groupIndex) in [(100, 100'u16), (100, 101'u16)]: test "initializing ValidationConfig fails when groupIndex is " & - "greater than or equal to validationGroups " & - fmt"(testing for {groupIndex = }, {validationGroups = })": + "greater than or equal to validationGroups " & + fmt"(testing for {groupIndex = }, {validationGroups = })": let groups = ValidationGroups(validationGroups).some - let validationConfig = ValidationConfig.init( - maxSlots, groups = groups, groupIndex = groupIndex) + let validationConfig = + ValidationConfig.init(maxSlots, groups = groups, groupIndex = groupIndex) check validationConfig.isFailure == true - check validationConfig.error.msg == "The value of the group index " & - "must be less than validation groups! " & - fmt"(got: {groupIndex = }, groups = {!groups})" + check validationConfig.error.msg == + "The value of the group index " & "must be less than validation groups! " & + fmt"(got: {groupIndex = }, groups = {!groups})" test "initializing ValidationConfig fails when maxSlots is negative": let maxSlots = -1 - let validationConfig = ValidationConfig.init( - maxSlots = maxSlots, groups = ValidationGroups.none) + let validationConfig = + ValidationConfig.init(maxSlots = maxSlots, groups = ValidationGroups.none) check validationConfig.isFailure == true - check validationConfig.error.msg == "The value of maxSlots must " & - fmt"be greater than or equal to 0! (got: {maxSlots})" + check validationConfig.error.msg == + "The value of maxSlots must " & + fmt"be greater than or equal to 0! (got: {maxSlots})" test "initializing ValidationConfig fails when maxSlots is negative " & - "(validationGroups set)": + "(validationGroups set)": let maxSlots = -1 let groupIndex = 0'u16 - let validationConfig = ValidationConfig.init( - maxSlots = maxSlots, groups = validationGroups, groupIndex) + let validationConfig = + ValidationConfig.init(maxSlots = maxSlots, groups = validationGroups, groupIndex) check validationConfig.isFailure == true - check validationConfig.error.msg == "The value of maxSlots must " & - fmt"be greater than or equal to 0! (got: {maxSlots})" + check validationConfig.error.msg == + "The value of maxSlots must " & + fmt"be greater than or equal to 0! (got: {maxSlots})" test "slot is not observed if it is not in the validation group": - validation = newValidation(clock, market, maxSlots, validationGroups, - (groupIndex + 1) mod uint16(!validationGroups)) + validation = newValidation( + clock, + market, + maxSlots, + validationGroups, + (groupIndex + 1) mod uint16(!validationGroups), + ) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) check validation.slots.len == 0 @@ -117,7 +124,7 @@ asyncchecksuite "validation": check validation.slots == @[slot.id] test "slot should be observed if validation group is not set (and " & - "maxSlots is not 0)": + "maxSlots is not 0)": validation = newValidation(clock, market, maxSlots, ValidationGroups.none) await validation.start() await market.fillSlot(slot.request.id, slot.slotIndex, proof, collateral) @@ -151,49 +158,45 @@ asyncchecksuite "validation": test "it does not monitor more than the maximum number of slots": validation = newValidation(clock, market, maxSlots, ValidationGroups.none) await validation.start() - for _ in 0.. $i) + let iter = Iter.new(0 ..< 5).map((i: int) => $i) check: iter.toSeq() == @["0", "1", "2", "3", "4"] test "Should leave only odd items using `filter`": - let iter = Iter.new(0..<5) - .filter((i: int) => (i mod 2) == 1) + let iter = Iter.new(0 ..< 5).filter((i: int) => (i mod 2) == 1) check: iter.toSeq() == @[1, 3] test "Should leave only odd items using `mapFilter`": let - iter1 = Iter.new(0..<5) - iter2 = mapFilter[int, string](iter1, + iter1 = Iter.new(0 ..< 5) + iter2 = mapFilter[int, string]( + iter1, proc(i: int): ?string = if (i mod 2) == 1: some($i) else: - string.none + string.none, ) check: iter2.toSeq() == @["1", "3"] test "Should yield all items before err using `map`": - let - iter = Iter.new(0..<5) - .map( - proc (i: int): string = - if i < 3: - return $i - else: - raise newException(CatchableError, "Some error") - ) + let iter = Iter.new(0 ..< 5).map( + proc(i: int): string = + if i < 3: + return $i + else: + raise newException(CatchableError, "Some error") + ) var collected: seq[string] @@ -87,15 +81,13 @@ checksuite "Test Iter": iter.finished test "Should yield all items before err using `filter`": - let - iter = Iter.new(0..<5) - .filter( - proc (i: int): bool = - if i < 3: - return true - else: - raise newException(CatchableError, "Some error") - ) + let iter = Iter.new(0 ..< 5).filter( + proc(i: int): bool = + if i < 3: + return true + else: + raise newException(CatchableError, "Some error") + ) var collected: seq[int] @@ -109,14 +101,15 @@ checksuite "Test Iter": test "Should yield all items before err using `mapFilter`": let - iter1 = Iter.new(0..<5) - iter2 = mapFilter[int, string](iter1, - proc (i: int): ?string = - if i < 3: - return some($i) - else: - raise newException(CatchableError, "Some error") - ) + iter1 = Iter.new(0 ..< 5) + iter2 = mapFilter[int, string]( + iter1, + proc(i: int): ?string = + if i < 3: + return some($i) + else: + raise newException(CatchableError, "Some error"), + ) var collected: seq[string] diff --git a/tests/codex/utils/testkeyutils.nim b/tests/codex/utils/testkeyutils.nim index c16f21de3..2124e6822 100644 --- a/tests/codex/utils/testkeyutils.nim +++ b/tests/codex/utils/testkeyutils.nim @@ -29,4 +29,3 @@ checksuite "keyutils": test "reads key file when it does exist": let key = setupKey(path / "keyfile").get() check setupKey(path / "keyfile").get() == key - diff --git a/tests/codex/utils/testoptions.nim b/tests/codex/utils/testoptions.nim index eb566ad79..05f7509e7 100644 --- a/tests/codex/utils/testoptions.nim +++ b/tests/codex/utils/testoptions.nim @@ -13,6 +13,7 @@ checksuite "optional casts": type BaseType = ref object of RootObj SubType = ref object of BaseType + let x: BaseType = SubType() check x as SubType == SubType(x).some @@ -21,6 +22,7 @@ checksuite "optional casts": BaseType = ref object of RootObj SubType = ref object of BaseType OtherType = ref object of BaseType + let x: BaseType = SubType() check x as OtherType == OtherType.none diff --git a/tests/codex/utils/testtrackedfutures.nim b/tests/codex/utils/testtrackedfutures.nim index cbabd39c5..350749190 100644 --- a/tests/codex/utils/testtrackedfutures.nim +++ b/tests/codex/utils/testtrackedfutures.nim @@ -63,5 +63,3 @@ asyncchecksuite "tracked futures": check eventually fut2.cancelled check eventually fut3.cancelled check eventually module.trackedFutures.len == 0 - - diff --git a/tests/codex/utils/testutils.nim b/tests/codex/utils/testutils.nim index b8e386d01..92c883beb 100644 --- a/tests/codex/utils/testutils.nim +++ b/tests/codex/utils/testutils.nim @@ -3,17 +3,17 @@ import std/unittest import pkg/codex/utils suite "findIt": - setup: type AnObject = object attribute1*: int - var objList = @[ - AnObject(attribute1: 1), - AnObject(attribute1: 3), - AnObject(attribute1: 5), - AnObject(attribute1: 3), - ] + var objList = + @[ + AnObject(attribute1: 1), + AnObject(attribute1: 3), + AnObject(attribute1: 5), + AnObject(attribute1: 3), + ] test "should retur index of first object matching predicate": assert objList.findIt(it.attribute1 == 3) == 1 @@ -22,15 +22,13 @@ suite "findIt": assert objList.findIt(it.attribute1 == 15) == -1 suite "parseDuration": - test "should parse durations": - var res: Duration # caller must still know if 'b' refers to bytes|bits + var res: Duration # caller must still know if 'b' refers to bytes|bits check parseDuration("10Hr", res) == 3 check res == hours(10) check parseDuration("64min", res) == 3 check res == minutes(64) check parseDuration("7m/block", res) == 2 # '/' stops parse - check res == minutes(7) # 1 shl 30, forced binary metric + check res == minutes(7) # 1 shl 30, forced binary metric check parseDuration("3d", res) == 2 # '/' stops parse - check res == days(3) # 1 shl 30, forced binary metric - + check res == days(3) # 1 shl 30, forced binary metric diff --git a/tests/config.nims b/tests/config.nims index 558584278..8ae008067 100644 --- a/tests/config.nims +++ b/tests/config.nims @@ -1,8 +1,14 @@ ---path:".." ---threads:on ---tlsEmulation:off +--path: + ".." +--threads: + on +--tlsEmulation: + off when not defined(chronicles_log_level): - --define:"chronicles_log_level:NONE" # compile all log statements - --define:"chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime - --"import":"logging" # ensure that logging is ignored at runtime + --define: + "chronicles_log_level:NONE" # compile all log statements + --define: + "chronicles_sinks:textlines[dynamic]" # allow logs to be filtered at runtime + --"import": + "logging" # ensure that logging is ignored at runtime diff --git a/tests/contracts/deployment.nim b/tests/contracts/deployment.nim index f62bb1be1..f45aa6257 100644 --- a/tests/contracts/deployment.nim +++ b/tests/contracts/deployment.nim @@ -16,7 +16,4 @@ proc address*(_: type Marketplace, dummyVerifier = false): Address = return address - if dummyVerifier: - hardhatMarketWithDummyVerifier - else: - hardhatMarketAddress + if dummyVerifier: hardhatMarketWithDummyVerifier else: hardhatMarketAddress diff --git a/tests/contracts/helpers/mockprovider.nim b/tests/contracts/helpers/mockprovider.nim index ce6e9e346..09e653980 100644 --- a/tests/contracts/helpers/mockprovider.nim +++ b/tests/contracts/helpers/mockprovider.nim @@ -12,9 +12,8 @@ type MockProvider* = ref object of Provider latest: ?int method getBlock*( - provider: MockProvider, - tag: BlockTag -): Future[?Block] {.async: (raises:[ProviderError]).} = + provider: MockProvider, tag: BlockTag +): Future[?Block] {.async: (raises: [ProviderError]).} = try: if tag == BlockTag.latest: if latestBlock =? provider.latest: @@ -33,7 +32,6 @@ method getBlock*( return Block.none except: return Block.none - proc updateEarliestAndLatest(provider: MockProvider, blockNumber: int) = if provider.earliest.isNone: @@ -54,9 +52,7 @@ proc addBlock*(provider: MockProvider, number: int, blk: Block) = proc newMockProvider*(): MockProvider = MockProvider( - blocks: newOrderedTable[int, Block](), - earliest: int.none, - latest: int.none + blocks: newOrderedTable[int, Block](), earliest: int.none, latest: int.none ) proc newMockProvider*(blocks: OrderedTableRef[int, Block]): MockProvider = @@ -65,21 +61,22 @@ proc newMockProvider*(blocks: OrderedTableRef[int, Block]): MockProvider = provider proc newMockProvider*( - numberOfBlocks: int, - earliestBlockNumber: int, - earliestBlockTimestamp: SecondsSince1970, - timeIntervalBetweenBlocks: SecondsSince1970 + numberOfBlocks: int, + earliestBlockNumber: int, + earliestBlockTimestamp: SecondsSince1970, + timeIntervalBetweenBlocks: SecondsSince1970, ): MockProvider = var blocks = newOrderedTable[int, provider.Block]() var blockNumber = earliestBlockNumber var blockTime = earliestBlockTimestamp - for i in 0.. 292 # 1728436106 => 292 # 1728436110 => 292 - proc generateExpectations( - blocks: seq[(UInt256, UInt256)]): seq[Expectations] = + proc generateExpectations(blocks: seq[(UInt256, UInt256)]): seq[Expectations] = var expectations: seq[Expectations] = @[] - for i in 0..= 3, "must be more than 3 blocks" let rng = Rng.instance() let chunker = RandomChunker.new( - rng, size = DefaultBlockSize * blocks.NBytes, chunkSize = DefaultBlockSize) + rng, size = DefaultBlockSize * blocks.NBytes, chunkSize = DefaultBlockSize + ) var data: seq[byte] while (let moar = await chunker.getBytes(); moar != []): data.add moar return byteutils.toHex(data) -proc example*(_: type RandomChunker): Future[string] {.async.} = +proc example*(_: type RandomChunker): Future[string] {.async.} = await RandomChunker.example(3) diff --git a/tests/helpers/multisetup.nim b/tests/helpers/multisetup.nim index 781b0062c..aa434678f 100644 --- a/tests/helpers/multisetup.nim +++ b/tests/helpers/multisetup.nim @@ -1,10 +1,9 @@ import pkg/chronos # Allow multiple setups and teardowns in a test suite -template asyncmultisetup* = - var setups: seq[proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] - var teardowns: seq[ - proc: Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] +template asyncmultisetup*() = + var setups: seq[proc(): Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] + var teardowns: seq[proc(): Future[void].Raising([AsyncExceptionError]) {.gcsafe.}] setup: for setup in setups: @@ -15,14 +14,18 @@ template asyncmultisetup* = await teardown() template setup(setupBody) {.inject, used.} = - setups.add(proc {.async: ( - handleException: true, raises: [AsyncExceptionError]).} = setupBody) + setups.add( + proc() {.async: (handleException: true, raises: [AsyncExceptionError]).} = + setupBody + ) template teardown(teardownBody) {.inject, used.} = - teardowns.insert(proc {.async: ( - handleException: true, raises: [AsyncExceptionError]).} = teardownBody) + teardowns.insert( + proc() {.async: (handleException: true, raises: [AsyncExceptionError]).} = + teardownBody + ) -template multisetup* = +template multisetup*() = var setups: seq[proc() {.gcsafe.}] var teardowns: seq[proc() {.gcsafe.}] @@ -35,8 +38,12 @@ template multisetup* = teardown() template setup(setupBody) {.inject, used.} = - let setupProc = proc = setupBody + let setupProc = proc() = + setupBody setups.add(setupProc) template teardown(teardownBody) {.inject, used.} = - teardowns.insert(proc = teardownBody) + teardowns.insert( + proc() = + teardownBody + ) diff --git a/tests/helpers/templeveldb.nim b/tests/helpers/templeveldb.nim index 054336915..dbc53bb42 100644 --- a/tests/helpers/templeveldb.nim +++ b/tests/helpers/templeveldb.nim @@ -4,10 +4,9 @@ import pkg/datastore import pkg/chronos import pkg/questionable/results -type - TempLevelDb* = ref object - currentPath: string - ds: LevelDbDatastore +type TempLevelDb* = ref object + currentPath: string + ds: LevelDbDatastore var number = 0 diff --git a/tests/helpers/trackers.nim b/tests/helpers/trackers.nim index f4b10a2ea..ed8c56920 100644 --- a/tests/helpers/trackers.nim +++ b/tests/helpers/trackers.nim @@ -2,17 +2,17 @@ import pkg/codex/streams/storestream import std/unittest # From lip2p/tests/helpers -const trackerNames = [ - StoreStreamTrackerName - ] +const trackerNames = [StoreStreamTrackerName] iterator testTrackers*(extras: openArray[string] = []): TrackerBase = for name in trackerNames: let t = getTracker(name) - if not isNil(t): yield t + if not isNil(t): + yield t for name in extras: let t = getTracker(name) - if not isNil(t): yield t + if not isNil(t): + yield t proc checkTracker*(name: string) = var tracker = getTracker(name) @@ -27,4 +27,5 @@ proc checkTrackers*() = fail() try: GC_fullCollect() - except: discard + except: + discard diff --git a/tests/integration/clioption.nim b/tests/integration/clioption.nim index 5f756d805..f845fbca5 100644 --- a/tests/integration/clioption.nim +++ b/tests/integration/clioption.nim @@ -1,7 +1,6 @@ -type - CliOption* = object - key*: string # option key, including `--` - value*: string # option value +type CliOption* = object + key*: string # option key, including `--` + value*: string # option value proc `$`*(option: CliOption): string = var res = option.key diff --git a/tests/integration/codexclient.nim b/tests/integration/codexclient.nim index 5e8761d16..1bf3f2118 100644 --- a/tests/integration/codexclient.nim +++ b/tests/integration/codexclient.nim @@ -24,19 +24,19 @@ const HttpClientTimeoutMs = 60 * 1000 proc new*(_: type CodexClient, baseurl: string): CodexClient = CodexClient( - http: newHttpClient(timeout=HttpClientTimeoutMs), + http: newHttpClient(timeout = HttpClientTimeoutMs), baseurl: baseurl, - session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}) + session: HttpSessionRef.new({HttpClientFlag.Http11Pipeline}), ) proc info*(client: CodexClient): ?!JsonNode = let url = client.baseurl & "/debug/info" - JsonNode.parse( client.http.getContent(url) ) + JsonNode.parse(client.http.getContent(url)) proc setLogLevel*(client: CodexClient, level: string) = let url = client.baseurl & "/debug/chronicles/loglevel?level=" & level let headers = newHttpHeaders({"Content-Type": "text/plain"}) - let response = client.http.request(url, httpMethod=HttpPost, headers=headers) + let response = client.http.request(url, httpMethod = HttpPost, headers = headers) assert response.status == "200 OK" proc upload*(client: CodexClient, contents: string): ?!Cid = @@ -45,10 +45,9 @@ proc upload*(client: CodexClient, contents: string): ?!Cid = Cid.init(response.body).mapFailure proc download*(client: CodexClient, cid: Cid, local = false): ?!string = - let - response = client.http.get( - client.baseurl & "/data/" & $cid & - (if local: "" else: "/network/stream")) + let response = client.http.get( + client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream") + ) if response.status != "200 OK": return failure(response.status) @@ -56,9 +55,7 @@ proc download*(client: CodexClient, cid: Cid, local = false): ?!string = success response.body proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = - let - response = client.http.get( - client.baseurl & "/data/" & $cid & "/network/manifest") + let response = client.http.get(client.baseurl & "/data/" & $cid & "/network/manifest") if response.status != "200 OK": return failure(response.status) @@ -66,9 +63,7 @@ proc downloadManifestOnly*(client: CodexClient, cid: Cid): ?!string = success response.body proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = - let - response = client.http.post( - client.baseurl & "/data/" & $cid & "/network") + let response = client.http.post(client.baseurl & "/data/" & $cid & "/network") if response.status != "200 OK": return failure(response.status) @@ -76,14 +71,10 @@ proc downloadNoStream*(client: CodexClient, cid: Cid): ?!string = success response.body proc downloadBytes*( - client: CodexClient, - cid: Cid, - local = false): Future[?!seq[byte]] {.async.} = - - let uri = parseUri( - client.baseurl & "/data/" & $cid & - (if local: "" else: "/network/stream") - ) + client: CodexClient, cid: Cid, local = false +): Future[?!seq[byte]] {.async.} = + let uri = + parseUri(client.baseurl & "/data/" & $cid & (if local: "" else: "/network/stream")) let (status, bytes) = await client.session.fetch(uri) @@ -119,19 +110,19 @@ proc requestStorageRaw*( collateral: UInt256, expiry: uint = 0, nodes: uint = 3, - tolerance: uint = 1 + tolerance: uint = 1, ): Response = - ## Call request storage REST endpoint ## let url = client.baseurl & "/storage/request/" & $cid - let json = %*{ + let json = + %*{ "duration": duration, "reward": reward, "proofProbability": proofProbability, "collateral": collateral, "nodes": nodes, - "tolerance": tolerance + "tolerance": tolerance, } if expiry != 0: @@ -148,11 +139,13 @@ proc requestStorage*( expiry: uint, collateral: UInt256, nodes: uint = 3, - tolerance: uint = 1 + tolerance: uint = 1, ): ?!PurchaseId = ## Call request storage REST endpoint ## - let response = client.requestStorageRaw(cid, duration, reward, proofProbability, collateral, expiry, nodes, tolerance) + let response = client.requestStorageRaw( + cid, duration, reward, proofProbability, collateral, expiry, nodes, tolerance + ) if response.status != "200 OK": doAssert(false, response.body) PurchaseId.fromHex(response.body).catch @@ -179,26 +172,27 @@ proc getSlots*(client: CodexClient): ?!seq[Slot] = seq[Slot].fromJson(body) proc postAvailability*( - client: CodexClient, - totalSize, duration, minPrice, maxCollateral: UInt256 + client: CodexClient, totalSize, duration, minPrice, maxCollateral: UInt256 ): ?!Availability = ## Post sales availability endpoint ## let url = client.baseurl & "/sales/availability" - let json = %*{ - "totalSize": totalSize, - "duration": duration, - "minPrice": minPrice, - "maxCollateral": maxCollateral, - } + let json = + %*{ + "totalSize": totalSize, + "duration": duration, + "minPrice": minPrice, + "maxCollateral": maxCollateral, + } let response = client.http.post(url, $json) - doAssert response.status == "201 Created", "expected 201 Created, got " & response.status & ", body: " & response.body + doAssert response.status == "201 Created", + "expected 201 Created, got " & response.status & ", body: " & response.body Availability.fromJson(response.body) proc patchAvailabilityRaw*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, freeSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none + totalSize, freeSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none, ): Response = ## Updates availability ## @@ -227,9 +221,15 @@ proc patchAvailabilityRaw*( proc patchAvailability*( client: CodexClient, availabilityId: AvailabilityId, - totalSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none + totalSize, duration, minPrice, maxCollateral: ?UInt256 = UInt256.none, ): void = - let response = client.patchAvailabilityRaw(availabilityId, totalSize=totalSize, duration=duration, minPrice=minPrice, maxCollateral=maxCollateral) + let response = client.patchAvailabilityRaw( + availabilityId, + totalSize = totalSize, + duration = duration, + minPrice = minPrice, + maxCollateral = maxCollateral, + ) doAssert response.status == "200 OK", "expected 200 OK, got " & response.status proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = @@ -238,7 +238,9 @@ proc getAvailabilities*(client: CodexClient): ?!seq[Availability] = let body = client.http.getContent(url) seq[Availability].fromJson(body) -proc getAvailabilityReservations*(client: CodexClient, availabilityId: AvailabilityId): ?!seq[Reservation] = +proc getAvailabilityReservations*( + client: CodexClient, availabilityId: AvailabilityId +): ?!seq[Reservation] = ## Retrieves Availability's Reservations let url = client.baseurl & "/sales/availability/" & $availabilityId & "/reservations" let body = client.http.getContent(url) @@ -249,23 +251,29 @@ proc close*(client: CodexClient) = proc restart*(client: CodexClient) = client.http.close() - client.http = newHttpClient(timeout=HttpClientTimeoutMs) + client.http = newHttpClient(timeout = HttpClientTimeoutMs) proc purchaseStateIs*(client: CodexClient, id: PurchaseId, state: string): bool = - client.getPurchase(id).option.?state == some state + client.getPurchase(id).option .? state == some state proc saleStateIs*(client: CodexClient, id: SlotId, state: string): bool = - client.getSalesAgent(id).option.?state == some state + client.getSalesAgent(id).option .? state == some state proc requestId*(client: CodexClient, id: PurchaseId): ?RequestId = - return client.getPurchase(id).option.?requestId + return client.getPurchase(id).option .? requestId -proc uploadRaw*(client: CodexClient, contents: string, headers = newHttpHeaders()): Response = - return client.http.request(client.baseurl & "/data", body = contents, httpMethod=HttpPost, headers = headers) +proc uploadRaw*( + client: CodexClient, contents: string, headers = newHttpHeaders() +): Response = + return client.http.request( + client.baseurl & "/data", body = contents, httpMethod = HttpPost, headers = headers + ) proc listRaw*(client: CodexClient): Response = - return client.http.request(client.baseurl & "/data", httpMethod=HttpGet) + return client.http.request(client.baseurl & "/data", httpMethod = HttpGet) proc downloadRaw*(client: CodexClient, cid: string, local = false): Response = - return client.http.request(client.baseurl & "/data/" & cid & - (if local: "" else: "/network/stream"), httpMethod=HttpGet) \ No newline at end of file + return client.http.request( + client.baseurl & "/data/" & cid & (if local: "" else: "/network/stream"), + httpMethod = HttpGet, + ) diff --git a/tests/integration/codexconfig.nim b/tests/integration/codexconfig.nim index f321364fa..41d7109c0 100644 --- a/tests/integration/codexconfig.nim +++ b/tests/integration/codexconfig.nim @@ -19,10 +19,12 @@ export confutils type CodexConfigs* = object configs*: seq[CodexConfig] + CodexConfig* = object cliOptions: Table[StartUpCmd, Table[string, CliOption]] cliPersistenceOptions: Table[PersistenceCmd, Table[string, CliOption]] debugEnabled*: bool + CodexConfigError* = object of CatchableError proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} @@ -43,17 +45,17 @@ func nodes*(self: CodexConfigs): int = self.configs.len proc checkBounds(self: CodexConfigs, idx: int) {.raises: [CodexConfigError].} = - if idx notin 0.. 0: ": " & msg - else: "" + else: + "" try: return CodexConf.load(cmdLine = config.cliArgs, quitOnFailure = false) @@ -64,53 +66,42 @@ proc buildConfig( raiseCodexConfigError msg & e.msg.postFix proc addCliOption*( - config: var CodexConfig, - group = PersistenceCmd.noCmd, - cliOption: CliOption) {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = PersistenceCmd.noCmd, cliOption: CliOption +) {.raises: [CodexConfigError].} = var options = config.cliPersistenceOptions.getOrDefault(group) options[cliOption.key] = cliOption # overwrite if already exists config.cliPersistenceOptions[group] = options discard config.buildConfig("Invalid cli arg " & $cliOption) proc addCliOption*( - config: var CodexConfig, - group = PersistenceCmd.noCmd, - key: string, value = "") {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = PersistenceCmd.noCmd, key: string, value = "" +) {.raises: [CodexConfigError].} = config.addCliOption(group, CliOption(key: key, value: value)) proc addCliOption*( - config: var CodexConfig, - group = StartUpCmd.noCmd, - cliOption: CliOption) {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = StartUpCmd.noCmd, cliOption: CliOption +) {.raises: [CodexConfigError].} = var options = config.cliOptions.getOrDefault(group) options[cliOption.key] = cliOption # overwrite if already exists config.cliOptions[group] = options discard config.buildConfig("Invalid cli arg " & $cliOption) proc addCliOption*( - config: var CodexConfig, - group = StartUpCmd.noCmd, - key: string, value = "") {.raises: [CodexConfigError].} = - + config: var CodexConfig, group = StartUpCmd.noCmd, key: string, value = "" +) {.raises: [CodexConfigError].} = config.addCliOption(group, CliOption(key: key, value: value)) proc addCliOption*( - config: var CodexConfig, - cliOption: CliOption) {.raises: [CodexConfigError].} = - + config: var CodexConfig, cliOption: CliOption +) {.raises: [CodexConfigError].} = config.addCliOption(StartUpCmd.noCmd, cliOption) proc addCliOption*( - config: var CodexConfig, - key: string, value = "") {.raises: [CodexConfigError].} = - + config: var CodexConfig, key: string, value = "" +) {.raises: [CodexConfigError].} = config.addCliOption(StartUpCmd.noCmd, CliOption(key: key, value: value)) -proc cliArgs*( - config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} = +proc cliArgs*(config: CodexConfig): seq[string] {.gcsafe, raises: [CodexConfigError].} = ## converts CodexConfig cli options and command groups in a sequence of args ## and filters out cli options by node index if provided in the CliOption var args: seq[string] = @[] @@ -121,14 +112,14 @@ proc cliArgs*( if cmd != StartUpCmd.noCmd: args.add $cmd var opts = config.cliOptions[cmd].values.toSeq - args = args.concat( opts.map(o => $o) ) + args = args.concat(opts.map(o => $o)) for cmd in PersistenceCmd: if config.cliPersistenceOptions.hasKey(cmd): if cmd != PersistenceCmd.noCmd: args.add $cmd var opts = config.cliPersistenceOptions[cmd].values.toSeq - args = args.concat( opts.map(o => $o) ) + args = args.concat(opts.map(o => $o)) return args @@ -142,9 +133,8 @@ proc logLevel*(config: CodexConfig): LogLevel {.raises: [CodexConfigError].} = return parseEnum[LogLevel](built.logLevel.toUpperAscii) proc debug*( - self: CodexConfigs, - idx: int, - enabled = true): CodexConfigs {.raises: [CodexConfigError].} = + self: CodexConfigs, idx: int, enabled = true +): CodexConfigs {.raises: [CodexConfigError].} = ## output log in stdout for a specific node in the group self.checkBounds idx @@ -161,17 +151,15 @@ proc debug*(self: CodexConfigs, enabled = true): CodexConfigs {.raises: [].} = return startConfig proc withLogFile*( - self: CodexConfigs, - idx: int): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self startConfig.configs[idx].addCliOption("--log-file", "") return startConfig -proc withLogFile*( - self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} = +proc withLogFile*(self: CodexConfigs): CodexConfigs {.raises: [CodexConfigError].} = ## typically called from test, sets config such that a log file should be ## created var startConfig = self @@ -180,8 +168,8 @@ proc withLogFile*( return startConfig proc withLogFile*( - self: var CodexConfig, - logFile: string) {.raises: [CodexConfigError].} = #: CodexConfigs = + self: var CodexConfig, logFile: string +) {.raises: [CodexConfigError].} = #: CodexConfigs = ## typically called internally from the test suite, sets a log file path to ## be created during the test run, for a specified node in the group # var config = self @@ -189,18 +177,15 @@ proc withLogFile*( # return startConfig proc withLogLevel*( - self: CodexConfig, - level: LogLevel | string): CodexConfig {.raises: [CodexConfigError].} = - + self: CodexConfig, level: LogLevel | string +): CodexConfig {.raises: [CodexConfigError].} = var config = self config.addCliOption("--log-level", $level) return config proc withLogLevel*( - self: CodexConfigs, - idx: int, - level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, level: LogLevel | string +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self @@ -208,86 +193,75 @@ proc withLogLevel*( return startConfig proc withLogLevel*( - self: CodexConfigs, - level: LogLevel | string): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, level: LogLevel | string +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: config.addCliOption("--log-level", $level) return startConfig proc withSimulateProofFailures*( - self: CodexConfigs, - idx: int, - failEveryNProofs: int + self: CodexConfigs, idx: int, failEveryNProofs: int ): CodexConfigs {.raises: [CodexConfigError].} = - self.checkBounds idx var startConfig = self startConfig.configs[idx].addCliOption( - StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs) + StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs + ) return startConfig proc withSimulateProofFailures*( - self: CodexConfigs, - failEveryNProofs: int): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, failEveryNProofs: int +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: config.addCliOption( - StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs) + StartUpCmd.persistence, "--simulate-proof-failures", $failEveryNProofs + ) return startConfig proc withValidationGroups*( - self: CodexConfigs, - groups: ValidationGroups): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, groups: ValidationGroups +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: - config.addCliOption( - StartUpCmd.persistence, "--validator-groups", $(groups)) + config.addCliOption(StartUpCmd.persistence, "--validator-groups", $(groups)) return startConfig proc withValidationGroupIndex*( - self: CodexConfigs, - idx: int, - groupIndex: uint16): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, groupIndex: uint16 +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self startConfig.configs[idx].addCliOption( - StartUpCmd.persistence, "--validator-group-index", $groupIndex) + StartUpCmd.persistence, "--validator-group-index", $groupIndex + ) return startConfig proc withEthProvider*( - self: CodexConfigs, - idx: int, - ethProvider: string + self: CodexConfigs, idx: int, ethProvider: string ): CodexConfigs {.raises: [CodexConfigError].} = - self.checkBounds idx var startConfig = self - startConfig.configs[idx].addCliOption(StartUpCmd.persistence, - "--eth-provider", ethProvider) + startConfig.configs[idx].addCliOption( + StartUpCmd.persistence, "--eth-provider", ethProvider + ) return startConfig proc withEthProvider*( - self: CodexConfigs, - ethProvider: string): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, ethProvider: string +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: - config.addCliOption(StartUpCmd.persistence, - "--eth-provider", ethProvider) + config.addCliOption(StartUpCmd.persistence, "--eth-provider", ethProvider) return startConfig proc logLevelWithTopics( - config: CodexConfig, - topics: varargs[string]): string {.raises: [CodexConfigError].} = - + config: CodexConfig, topics: varargs[string] +): string {.raises: [CodexConfigError].} = convertError: var logLevel = LogLevel.INFO let built = config.buildConfig("Invalid codex config cli params") @@ -296,10 +270,8 @@ proc logLevelWithTopics( return level proc withLogTopics*( - self: CodexConfigs, - idx: int, - topics: varargs[string]): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, topics: varargs[string] +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx convertError: @@ -309,10 +281,8 @@ proc withLogTopics*( return startConfig.withLogLevel(idx, level) proc withLogTopics*( - self: CodexConfigs, - topics: varargs[string] + self: CodexConfigs, topics: varargs[string] ): CodexConfigs {.raises: [CodexConfigError].} = - var startConfig = self for config in startConfig.configs.mitems: let level = config.logLevelWithTopics(topics) @@ -320,10 +290,8 @@ proc withLogTopics*( return startConfig proc withStorageQuota*( - self: CodexConfigs, - idx: int, - quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, idx: int, quota: NBytes +): CodexConfigs {.raises: [CodexConfigError].} = self.checkBounds idx var startConfig = self @@ -331,9 +299,8 @@ proc withStorageQuota*( return startConfig proc withStorageQuota*( - self: CodexConfigs, - quota: NBytes): CodexConfigs {.raises: [CodexConfigError].} = - + self: CodexConfigs, quota: NBytes +): CodexConfigs {.raises: [CodexConfigError].} = var startConfig = self for config in startConfig.configs.mitems: config.addCliOption("--storage-quota", $quota) diff --git a/tests/integration/codexprocess.nim b/tests/integration/codexprocess.nim index 5097a968e..79d4b040b 100644 --- a/tests/integration/codexprocess.nim +++ b/tests/integration/codexprocess.nim @@ -18,9 +18,8 @@ export nodeprocess logScope: topics = "integration testing codex process" -type - CodexProcess* = ref object of NodeProcess - client: ?CodexClient +type CodexProcess* = ref object of NodeProcess + client: ?CodexClient method workingDir(node: CodexProcess): string = return currentSourcePath() / ".." / ".." / ".." diff --git a/tests/integration/hardhatconfig.nim b/tests/integration/hardhatconfig.nim index fbd04fe86..5de5bbc55 100644 --- a/tests/integration/hardhatconfig.nim +++ b/tests/integration/hardhatconfig.nim @@ -1,7 +1,6 @@ -type - HardhatConfig* = object - logFile*: bool - debugEnabled*: bool +type HardhatConfig* = object + logFile*: bool + debugEnabled*: bool proc debug*(self: HardhatConfig, enabled = true): HardhatConfig = ## output log in stdout diff --git a/tests/integration/hardhatprocess.nim b/tests/integration/hardhatprocess.nim index b4259de43..40c7942d4 100644 --- a/tests/integration/hardhatprocess.nim +++ b/tests/integration/hardhatprocess.nim @@ -21,9 +21,8 @@ logScope: topics = "integration testing hardhat process" nodeName = "hardhat" -type - HardhatProcess* = ref object of NodeProcess - logFile: ?IoHandle +type HardhatProcess* = ref object of NodeProcess + logFile: ?IoHandle method workingDir(node: HardhatProcess): string = return currentSourcePath() / ".." / ".." / ".." / "vendor" / "codex-contracts-eth" @@ -41,22 +40,18 @@ method outputLineEndings(node: HardhatProcess): string {.raises: [].} = return "\n" proc openLogFile(node: HardhatProcess, logFilePath: string): IoHandle = - let logFileHandle = openFile( - logFilePath, - {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate} - ) + let logFileHandle = + openFile(logFilePath, {OpenFlags.Write, OpenFlags.Create, OpenFlags.Truncate}) without fileHandle =? logFileHandle: fatal "failed to open log file", - path = logFilePath, - errorCode = $logFileHandle.error + path = logFilePath, errorCode = $logFileHandle.error raiseAssert "failed to open log file, aborting" return fileHandle method start*(node: HardhatProcess) {.async.} = - let poptions = node.processOptions + {AsyncProcessOption.StdErrToStdOut} trace "starting node", args = node.arguments, @@ -70,7 +65,7 @@ method start*(node: HardhatProcess) {.async.} = node.workingDir, @["node", "--export", "deployment-localhost.json"].concat(node.arguments), options = poptions, - stdoutHandle = AsyncProcess.Pipe + stdoutHandle = AsyncProcess.Pipe, ) except CancelledError as error: raise error @@ -78,12 +73,11 @@ method start*(node: HardhatProcess) {.async.} = error "failed to start hardhat process", error = e.msg proc startNode*( - _: type HardhatProcess, - args: seq[string], - debug: string | bool = false, - name: string + _: type HardhatProcess, + args: seq[string], + debug: string | bool = false, + name: string, ): Future[HardhatProcess] {.async.} = - var logFilePath = "" var arguments = newSeq[string]() @@ -100,7 +94,7 @@ proc startNode*( arguments: arguments, debug: ($debug != "false"), trackedFutures: TrackedFutures.new(), - name: "hardhat" + name: "hardhat", ) await hardhat.start() diff --git a/tests/integration/marketplacesuite.nim b/tests/integration/marketplacesuite.nim index e666ad173..bc2f9cfb3 100644 --- a/tests/integration/marketplacesuite.nim +++ b/tests/integration/marketplacesuite.nim @@ -11,11 +11,8 @@ import ../contracts/deployment export mp export multinodes -template marketplacesuite*(name: string, - body: untyped) = - +template marketplacesuite*(name: string, body: untyped) = multinodesuite name: - var marketplace {.inject, used.}: Marketplace var period: uint64 var periodicity: Periodicity @@ -32,12 +29,12 @@ template marketplacesuite*(name: string, await ethProvider.advanceTimeTo(endOfPeriod + 1) template eventuallyP(condition: untyped, finalPeriod: Period): bool = - - proc eventuallyP: Future[bool] {.async.} = - while( - let currentPeriod = await getCurrentPeriod(); + proc eventuallyP(): Future[bool] {.async.} = + while ( + let currentPeriod = await getCurrentPeriod() currentPeriod <= finalPeriod - ): + ) + : if condition: return true await sleepAsync(1.millis) @@ -50,34 +47,36 @@ template marketplacesuite*(name: string, proc createAvailabilities(datasetSize: int, duration: uint64) = # post availability to each provider - for i in 0.. //_.log - var logDir = currentSourcePath.parentDir() / - "logs" / - sanitize($starttime & "__" & name) / + var logDir = + currentSourcePath.parentDir() / "logs" / sanitize($starttime & "__" & name) / sanitize($currentTestName) createDir(logDir) @@ -123,10 +121,8 @@ template multinodesuite*(name: string, body: untyped) = return fileName proc newHardhatProcess( - config: HardhatConfig, - role: Role + config: HardhatConfig, role: Role ): Future[NodeProcess] {.async.} = - var args: seq[string] = @[] if config.logFile: let updatedLogFile = getLogFile(role, none int) @@ -141,11 +137,9 @@ template multinodesuite*(name: string, body: untyped) = trace "hardhat node started" return node - proc newCodexProcess(roleIdx: int, - conf: CodexConfig, - role: Role + proc newCodexProcess( + roleIdx: int, conf: CodexConfig, role: Role ): Future[NodeProcess] {.async.} = - let nodeIdx = running.len var config = conf @@ -153,9 +147,8 @@ template multinodesuite*(name: string, body: untyped) = raiseMultiNodeSuiteError "Cannot start node at nodeIdx " & $nodeIdx & ", not enough eth accounts." - let datadir = getTempDir() / "Codex" / - sanitize($starttime) / - sanitize($role & "_" & $roleIdx) + let datadir = + getTempDir() / "Codex" / sanitize($starttime) / sanitize($role & "_" & $roleIdx) try: if config.logFile.isSome: @@ -164,19 +157,16 @@ template multinodesuite*(name: string, body: untyped) = for bootstrapNode in bootstrapNodes: config.addCliOption("--bootstrap-node", bootstrapNode) - config.addCliOption("--api-port", $ await nextFreePort(8080 + nodeIdx)) + config.addCliOption("--api-port", $await nextFreePort(8080 + nodeIdx)) config.addCliOption("--data-dir", datadir) config.addCliOption("--nat", "none") config.addCliOption("--listen-addrs", "/ip4/127.0.0.1/tcp/0") - config.addCliOption("--disc-port", $ await nextFreePort(8090 + nodeIdx)) - + config.addCliOption("--disc-port", $await nextFreePort(8090 + nodeIdx)) except CodexConfigError as e: raiseMultiNodeSuiteError "invalid cli option, error: " & e.msg let node = await CodexProcess.startNode( - config.cliArgs, - config.debugEnabled, - $role & $roleIdx + config.cliArgs, config.debugEnabled, $role & $roleIdx ) try: @@ -187,25 +177,25 @@ template multinodesuite*(name: string, body: untyped) = return node - proc hardhat: HardhatProcess = + proc hardhat(): HardhatProcess = for r in running: if r.role == Role.Hardhat: return HardhatProcess(r.node) return nil - proc clients: seq[CodexProcess] {.used.} = + proc clients(): seq[CodexProcess] {.used.} = return collect: for r in running: if r.role == Role.Client: CodexProcess(r.node) - proc providers: seq[CodexProcess] {.used.} = + proc providers(): seq[CodexProcess] {.used.} = return collect: for r in running: if r.role == Role.Provider: CodexProcess(r.node) - proc validators: seq[CodexProcess] {.used.} = + proc validators(): seq[CodexProcess] {.used.} = return collect: for r in running: if r.role == Role.Validator: @@ -218,20 +208,30 @@ template multinodesuite*(name: string, body: untyped) = let clientIdx = clients().len var config = conf config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) - config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + config.addCliOption( + StartUpCmd.persistence, "--eth-account", $accounts[running.len] + ) return await newCodexProcess(clientIdx, config, Role.Client) proc startProviderNode(conf: CodexConfig): Future[NodeProcess] {.async.} = let providerIdx = providers().len var config = conf config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) - config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) - config.addCliOption(PersistenceCmd.prover, "--circom-r1cs", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs") - config.addCliOption(PersistenceCmd.prover, "--circom-wasm", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm") - config.addCliOption(PersistenceCmd.prover, "--circom-zkey", - "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey") + config.addCliOption( + StartUpCmd.persistence, "--eth-account", $accounts[running.len] + ) + config.addCliOption( + PersistenceCmd.prover, "--circom-r1cs", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.r1cs", + ) + config.addCliOption( + PersistenceCmd.prover, "--circom-wasm", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.wasm", + ) + config.addCliOption( + PersistenceCmd.prover, "--circom-zkey", + "vendor/codex-contracts-eth/verifier/networks/hardhat/proof_main.zkey", + ) return await newCodexProcess(providerIdx, config, Role.Provider) @@ -239,7 +239,9 @@ template multinodesuite*(name: string, body: untyped) = let validatorIdx = validators().len var config = conf config.addCliOption(StartUpCmd.persistence, "--eth-provider", jsonRpcProviderUrl) - config.addCliOption(StartUpCmd.persistence, "--eth-account", $accounts[running.len]) + config.addCliOption( + StartUpCmd.persistence, "--eth-account", $accounts[running.len] + ) config.addCliOption(StartUpCmd.persistence, "--validator") return await newCodexProcess(validatorIdx, config, Role.Validator) @@ -264,7 +266,7 @@ template multinodesuite*(name: string, body: untyped) = try: tryBody except CatchableError as er: - fatal message, error=er.msg + fatal message, error = er.msg echo "[FATAL] ", message, ": ", er.msg await teardownImpl() when declared(teardownAllIMPL): @@ -294,8 +296,7 @@ template multinodesuite*(name: string, body: untyped) = # Do not use websockets, but use http and polling to stop subscriptions # from being removed after 5 minutes ethProvider = JsonRpcProvider.new( - jsonRpcProviderUrl, - pollingInterval = chronos.milliseconds(100) + jsonRpcProviderUrl, pollingInterval = chronos.milliseconds(100) ) # if hardhat was NOT started by the test, take a snapshot so it can be # reverted in the test teardown @@ -304,8 +305,7 @@ template multinodesuite*(name: string, body: untyped) = accounts = await ethProvider.listAccounts() except CatchableError as e: echo "Hardhat not running. Run hardhat manually " & - "before executing tests, or include a " & - "HardhatConfig in the test setup." + "before executing tests, or include a " & "HardhatConfig in the test setup." fail() quit(1) @@ -313,30 +313,21 @@ template multinodesuite*(name: string, body: untyped) = failAndTeardownOnError "failed to start client nodes": for config in clients.configs: let node = await startClientNode(config) - running.add RunningNode( - role: Role.Client, - node: node - ) + running.add RunningNode(role: Role.Client, node: node) CodexProcess(node).updateBootstrapNodes() if var providers =? nodeConfigs.providers: failAndTeardownOnError "failed to start provider nodes": for config in providers.configs.mitems: let node = await startProviderNode(config) - running.add RunningNode( - role: Role.Provider, - node: node - ) + running.add RunningNode(role: Role.Provider, node: node) CodexProcess(node).updateBootstrapNodes() if var validators =? nodeConfigs.validators: failAndTeardownOnError "failed to start validator nodes": for config in validators.configs.mitems: let node = await startValidatorNode(config) - running.add RunningNode( - role: Role.Validator, - node: node - ) + running.add RunningNode(role: Role.Validator, node: node) # ensure that we have a recent block with a fresh timestamp discard await send(ethProvider, "evm_mine") diff --git a/tests/integration/nodeconfig.nim b/tests/integration/nodeconfig.nim index d6adb80fa..a96c05257 100644 --- a/tests/integration/nodeconfig.nim +++ b/tests/integration/nodeconfig.nim @@ -3,11 +3,10 @@ import pkg/questionable export chronicles -type - NodeConfig* = ref object of RootObj - logFile*: bool - logLevel*: ?LogLevel - debugEnabled*: bool +type NodeConfig* = ref object of RootObj + logFile*: bool + logLevel*: ?LogLevel + debugEnabled*: bool proc debug*[T: NodeConfig](config: T, enabled = true): T = ## output log in stdout @@ -15,20 +14,12 @@ proc debug*[T: NodeConfig](config: T, enabled = true): T = startConfig.debugEnabled = enabled return startConfig -proc withLogFile*[T: NodeConfig]( - config: T, - logToFile: bool = true -): T = - +proc withLogFile*[T: NodeConfig](config: T, logToFile: bool = true): T = var startConfig = config startConfig.logFile = logToFile return startConfig -proc withLogLevel*[T: NodeConfig]( - config: NodeConfig, - level: LogLevel -): T = - +proc withLogLevel*[T: NodeConfig](config: NodeConfig, level: LogLevel): T = var startConfig = config startConfig.logLevel = some level return startConfig diff --git a/tests/integration/nodeconfigs.nim b/tests/integration/nodeconfigs.nim index 56309006a..19e797e32 100644 --- a/tests/integration/nodeconfigs.nim +++ b/tests/integration/nodeconfigs.nim @@ -2,10 +2,8 @@ import pkg/questionable import ./codexconfig import ./hardhatconfig -type - NodeConfigs* = object - clients*: ?CodexConfigs - providers*: ?CodexConfigs - validators*: ?CodexConfigs - hardhat*: ?HardhatConfig - +type NodeConfigs* = object + clients*: ?CodexConfigs + providers*: ?CodexConfigs + validators*: ?CodexConfigs + hardhat*: ?HardhatConfig diff --git a/tests/integration/nodeprocess.nim b/tests/integration/nodeprocess.nim index a08b4fe13..d50dacbe2 100644 --- a/tests/integration/nodeprocess.nim +++ b/tests/integration/nodeprocess.nim @@ -24,6 +24,7 @@ type debug: bool trackedFutures*: TrackedFutures name*: string + NodeProcessError* = object of CatchableError method workingDir(node: NodeProcess): string {.base, gcsafe.} = @@ -38,10 +39,12 @@ method startedOutput(node: NodeProcess): string {.base, gcsafe.} = method processOptions(node: NodeProcess): set[AsyncProcessOption] {.base, gcsafe.} = raiseAssert "not implemented" -method outputLineEndings(node: NodeProcess): string {.base, gcsafe raises: [].} = +method outputLineEndings(node: NodeProcess): string {.base, gcsafe, raises: [].} = raiseAssert "not implemented" -method onOutputLineCaptured(node: NodeProcess, line: string) {.base, gcsafe, raises: [].} = +method onOutputLineCaptured( + node: NodeProcess, line: string +) {.base, gcsafe, raises: [].} = raiseAssert "not implemented" method start*(node: NodeProcess) {.base, async.} = @@ -63,7 +66,7 @@ method start*(node: NodeProcess) {.base, async.} = node.workingDir, node.arguments, options = poptions, - stdoutHandle = AsyncProcess.Pipe + stdoutHandle = AsyncProcess.Pipe, ) except CancelledError as error: raise error @@ -71,11 +74,8 @@ method start*(node: NodeProcess) {.base, async.} = error "failed to start node process", error = e.msg proc captureOutput( - node: NodeProcess, - output: string, - started: Future[void] + node: NodeProcess, output: string, started: Future[void] ) {.async: (raises: []).} = - logScope: nodeName = node.name @@ -85,7 +85,7 @@ proc captureOutput( try: while node.process.running.option == some true: - while(let line = await stream.readLine(0, node.outputLineEndings); line != ""): + while (let line = await stream.readLine(0, node.outputLineEndings); line != ""): if node.debug: # would be nice if chronicles could parse and display with colors echo line @@ -97,27 +97,21 @@ proc captureOutput( await sleepAsync(1.millis) await sleepAsync(1.millis) - except CancelledError: discard # do not propagate as captureOutput was asyncSpawned - except AsyncStreamError as e: error "error reading output stream", error = e.msgDetail proc startNode*[T: NodeProcess]( - _: type T, - args: seq[string], - debug: string | bool = false, - name: string + _: type T, args: seq[string], debug: string | bool = false, name: string ): Future[T] {.async.} = - ## Starts a Codex Node with the specified arguments. ## Set debug to 'true' to see output of the node. let node = T( arguments: @args, debug: ($debug != "false"), trackedFutures: TrackedFutures.new(), - name: name + name: name, ) await node.start() return node @@ -144,7 +138,6 @@ method stop*(node: NodeProcess) {.base, async.} = raise error except CatchableError as e: error "error stopping node process", error = e.msg - finally: node.process = nil @@ -172,8 +165,8 @@ proc waitUntilStarted*(node: NodeProcess) {.async.} = await node.stop() # raise error here so that all nodes (not just this one) can be # shutdown gracefully - raise newException(NodeProcessError, "node did not output '" & - node.startedOutput & "'") + raise + newException(NodeProcessError, "node did not output '" & node.startedOutput & "'") proc restart*(node: NodeProcess) {.async.} = await node.stop() diff --git a/tests/integration/testblockexpiration.nim b/tests/integration/testblockexpiration.nim index e1d38a03b..e3fad75c2 100644 --- a/tests/integration/testblockexpiration.nim +++ b/tests/integration/testblockexpiration.nim @@ -24,17 +24,19 @@ ethersuite "Node block expiration tests": dataDir.removeDir() proc startTestNode(blockTtlSeconds: int) {.async.} = - node = await CodexProcess.startNode(@[ - "--api-port=8080", - "--data-dir=" & dataDir, - "--nat=none", - "--listen-addrs=/ip4/127.0.0.1/tcp/0", - "--disc-port=8090", - "--block-ttl=" & $blockTtlSeconds, - "--block-mi=1", - "--block-mn=10"], + node = await CodexProcess.startNode( + @[ + "--api-port=8080", + "--data-dir=" & dataDir, + "--nat=none", + "--listen-addrs=/ip4/127.0.0.1/tcp/0", + "--disc-port=8090", + "--block-ttl=" & $blockTtlSeconds, + "--block-mi=1", + "--block-mn=10", + ], false, - "cli-test-node" + "cli-test-node", ) await node.waitUntilStarted() @@ -47,16 +49,16 @@ ethersuite "Node block expiration tests": uploadResponse.body proc downloadTestFile(contentId: string, local = false): Response = - let client = newHttpClient(timeout=3000) - let downloadUrl = baseurl & "/data/" & - contentId & (if local: "" else: "/network/stream") + let client = newHttpClient(timeout = 3000) + let downloadUrl = + baseurl & "/data/" & contentId & (if local: "" else: "/network/stream") let content = client.get(downloadUrl) client.close() content proc hasFile(contentId: string): bool = - let client = newHttpClient(timeout=3000) + let client = newHttpClient(timeout = 3000) let dataLocalUrl = baseurl & "/data/" & contentId let content = client.get(dataLocalUrl) client.close() diff --git a/tests/integration/testcli.nim b/tests/integration/testcli.nim index fad85846b..d9f2d0817 100644 --- a/tests/integration/testcli.nim +++ b/tests/integration/testcli.nim @@ -8,36 +8,30 @@ import ./nodeprocess import ../examples asyncchecksuite "Command line interface": - let key = "4242424242424242424242424242424242424242424242424242424242424242" proc startCodex(args: seq[string]): Future[CodexProcess] {.async.} = - return await CodexProcess.startNode( - args, - false, - "cli-test-node" - ) + return await CodexProcess.startNode(args, false, "cli-test-node") test "complains when persistence is enabled without ethereum account": - let node = await startCodex(@[ - "persistence" - ]) + let node = await startCodex(@["persistence"]) await node.waitUntilOutput("Persistence enabled, but no Ethereum account was set") await node.stop() test "complains when ethereum private key file has wrong permissions": let unsafeKeyFile = genTempPath("", "") discard unsafeKeyFile.writeFile(key, 0o666) - let node = await startCodex(@[ - "persistence", - "--eth-private-key=" & unsafeKeyFile]) - await node.waitUntilOutput("Ethereum private key file does not have safe file permissions") + let node = await startCodex(@["persistence", "--eth-private-key=" & unsafeKeyFile]) + await node.waitUntilOutput( + "Ethereum private key file does not have safe file permissions" + ) await node.stop() discard removeFile(unsafeKeyFile) let marketplaceArg = "--marketplace-address=" & $EthAddress.example - expectedDownloadInstruction = "Proving circuit files are not found. Please run the following to download them:" + expectedDownloadInstruction = + "Proving circuit files are not found. Please run the following to download them:" test "suggests downloading of circuit files when persistence is enabled without accessible r1cs file": let node = await startCodex(@["persistence", "prover", marketplaceArg]) @@ -45,22 +39,22 @@ asyncchecksuite "Command line interface": await node.stop() test "suggests downloading of circuit files when persistence is enabled without accessible wasm file": - let node = await startCodex(@[ - "persistence", - "prover", - marketplaceArg, - "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs" - ]) + let node = await startCodex( + @[ + "persistence", "prover", marketplaceArg, + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + ] + ) await node.waitUntilOutput(expectedDownloadInstruction) await node.stop() test "suggests downloading of circuit files when persistence is enabled without accessible zkey file": - let node = await startCodex(@[ - "persistence", - "prover", - marketplaceArg, - "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", - "--circom-wasm=tests/circuits/fixtures/proof_main.wasm" - ]) + let node = await startCodex( + @[ + "persistence", "prover", marketplaceArg, + "--circom-r1cs=tests/circuits/fixtures/proof_main.r1cs", + "--circom-wasm=tests/circuits/fixtures/proof_main.wasm", + ] + ) await node.waitUntilOutput(expectedDownloadInstruction) await node.stop() diff --git a/tests/integration/testecbug.nim b/tests/integration/testecbug.nim index 0545d1d09..fde5eb341 100644 --- a/tests/integration/testecbug.nim +++ b/tests/integration/testecbug.nim @@ -5,35 +5,32 @@ import ./nodeconfigs import ./hardhatconfig marketplacesuite "Bug #821 - node crashes during erasure coding": - test "should be able to create storage request and download dataset", NodeConfigs( - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output.debug() - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("node", "erasure", "marketplace", ) - .some, - - providers: - CodexConfigs.init(nodes=0) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, - ): + clients: CodexConfigs + .init(nodes = 1) + # .debug() # uncomment to enable console log output.debug() + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("node", "erasure", "marketplace").some, + providers: CodexConfigs.init(nodes = 0) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): let reward = 400.u256 let duration = 20.periods let collateral = 200.u256 let expiry = 10.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) let client = clients()[0] let clientApi = client.client let cid = clientApi.upload(data).get var requestId = none RequestId - proc onStorageRequested(eventResult: ?!StorageRequested)= + proc onStorageRequested(eventResult: ?!StorageRequested) = assert not eventResult.isErr requestId = some (!eventResult).requestId @@ -42,15 +39,15 @@ marketplacesuite "Bug #821 - node crashes during erasure coding": # client requests storage but requires multiple slots to host the content let id = await clientApi.requestStorage( cid, - duration=duration, - reward=reward, - expiry=expiry, - collateral=collateral, - nodes=3, - tolerance=1 + duration = duration, + reward = reward, + expiry = expiry, + collateral = collateral, + nodes = 3, + tolerance = 1, ) - check eventually(requestId.isSome, timeout=expiry.int * 1000) + check eventually(requestId.isSome, timeout = expiry.int * 1000) let request = await marketplace.getRequest(requestId.get) let cidFromRequest = Cid.init(request.content.cid).get() diff --git a/tests/integration/testmarketplace.nim b/tests/integration/testmarketplace.nim index 17a3ec17d..b45c3cb88 100644 --- a/tests/integration/testmarketplace.nim +++ b/tests/integration/testmarketplace.nim @@ -7,10 +7,10 @@ import ./nodeconfigs marketplacesuite "Marketplace": let marketplaceConfig = NodeConfigs( - clients: CodexConfigs.init(nodes=1).some, - providers: CodexConfigs.init(nodes=1).some, + clients: CodexConfigs.init(nodes = 1).some, + providers: CodexConfigs.init(nodes = 1).some, ) - + var host: CodexClient var hostAccount: Address var client: CodexClient @@ -29,23 +29,29 @@ marketplacesuite "Marketplace": test "nodes negotiate contracts on the marketplace", marketplaceConfig: let size = 0xFFFFFF.u256 - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) # host makes storage available - let availability = host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + let availability = host.postAvailability( + totalSize = size, + duration = 20 * 60.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, + ).get # client requests storage let cid = client.upload(data).get let id = client.requestStorage( cid, - duration=20*60.u256, - reward=400.u256, - proofProbability=3.u256, - expiry=10*60, - collateral=200.u256, + duration = 20 * 60.u256, + reward = 400.u256, + proofProbability = 3.u256, + expiry = 10 * 60, + collateral = 200.u256, nodes = 3, - tolerance = 1).get + tolerance = 1, + ).get - check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000) + check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get check purchase.error == none string let availabilities = host.getAvailabilities().get @@ -57,33 +63,40 @@ marketplacesuite "Marketplace": check reservations.len == 3 check reservations[0].requestId == purchase.requestId - test "node slots gets paid out and rest of tokens are returned to client", marketplaceConfig: + test "node slots gets paid out and rest of tokens are returned to client", + marketplaceConfig: let size = 0xFFFFFF.u256 let data = await RandomChunker.example(blocks = 8) let marketplace = Marketplace.new(Marketplace.address, ethProvider.getSigner()) let tokenAddress = await marketplace.token() let token = Erc20Token.new(tokenAddress, ethProvider.getSigner()) let reward = 400.u256 - let duration = 20*60.u256 + let duration = 20 * 60.u256 let nodes = 3'u # host makes storage available let startBalanceHost = await token.balanceOf(hostAccount) - discard host.postAvailability(totalSize=size, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + discard host.postAvailability( + totalSize = size, + duration = 20 * 60.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, + ).get # client requests storage let cid = client.upload(data).get let id = client.requestStorage( cid, - duration=duration, - reward=reward, - proofProbability=3.u256, - expiry=10*60, - collateral=200.u256, + duration = duration, + reward = reward, + proofProbability = 3.u256, + expiry = 10 * 60, + collateral = 200.u256, nodes = nodes, - tolerance = 1).get + tolerance = 1, + ).get - check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000) + check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let purchase = client.getPurchase(id).get check purchase.error == none string @@ -95,40 +108,36 @@ marketplacesuite "Marketplace": await ethProvider.advanceTime(duration) # Checking that the hosting node received reward for at least the time between - check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= (duration-5*60)*reward*nodes.u256 + check eventually (await token.balanceOf(hostAccount)) - startBalanceHost >= + (duration - 5 * 60) * reward * nodes.u256 # Checking that client node receives some funds back that were not used for the host nodes check eventually( (await token.balanceOf(clientAccount)) - clientBalanceBeforeFinished > 0, - timeout = 10*1000 # give client a bit of time to withdraw its funds + timeout = 10 * 1000, # give client a bit of time to withdraw its funds ) marketplacesuite "Marketplace payouts": - test "expired request partially pays out for stored time", NodeConfigs( # Uncomment to start Hardhat automatically, typically so logs can be inspected locally hardhat: HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output.debug() - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "erasure") - .some, - - providers: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") - .some, - ): + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output.debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "erasure") + .some, + providers: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "sales", "reservations", "node", "proving", "clock") + .some, + ): let reward = 400.u256 let duration = 20.periods let collateral = 200.u256 let expiry = 10.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) let client = clients()[0] let provider = providers()[0] let clientApi = client.client @@ -140,10 +149,11 @@ marketplacesuite "Marketplace payouts": discard providerApi.postAvailability( # make availability size small enough that we can't fill all the slots, # thus causing a cancellation - totalSize=(data.len div 2).u256, - duration=duration.u256, - minPrice=reward, - maxCollateral=collateral) + totalSize = (data.len div 2).u256, + duration = duration.u256, + minPrice = reward, + maxCollateral = collateral, + ) let cid = clientApi.upload(data).get @@ -157,16 +167,16 @@ marketplacesuite "Marketplace payouts": # client requests storage but requires multiple slots to host the content let id = await clientApi.requestStorage( cid, - duration=duration, - reward=reward, - expiry=expiry, - collateral=collateral, - nodes=3, - tolerance=1 + duration = duration, + reward = reward, + expiry = expiry, + collateral = collateral, + nodes = 3, + tolerance = 1, ) # wait until one slot is filled - check eventually(slotIdxFilled.isSome, timeout=expiry.int * 1000) + check eventually(slotIdxFilled.isSome, timeout = expiry.int * 1000) let slotId = slotId(!clientApi.requestId(id), !slotIdxFilled) # wait until sale is cancelled @@ -176,17 +186,18 @@ marketplacesuite "Marketplace payouts": await advanceToNextPeriod() check eventually ( - let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); + let endBalanceProvider = (await token.balanceOf(provider.ethAccount)) endBalanceProvider > startBalanceProvider and - endBalanceProvider < startBalanceProvider + expiry.u256*reward + endBalanceProvider < startBalanceProvider + expiry.u256 * reward ) check eventually( ( - let endBalanceClient = (await token.balanceOf(client.ethAccount)); - let endBalanceProvider = (await token.balanceOf(provider.ethAccount)); - (startBalanceClient - endBalanceClient) == (endBalanceProvider - startBalanceProvider) + let endBalanceClient = (await token.balanceOf(client.ethAccount)) + let endBalanceProvider = (await token.balanceOf(provider.ethAccount)) + (startBalanceClient - endBalanceClient) == + (endBalanceProvider - startBalanceProvider) ), - timeout = 10*1000 # give client a bit of time to withdraw its funds + timeout = 10 * 1000, # give client a bit of time to withdraw its funds ) await subscription.unsubscribe() diff --git a/tests/integration/testproofs.nim b/tests/integration/testproofs.nim index 2e462d2cb..c03c3d07d 100644 --- a/tests/integration/testproofs.nim +++ b/tests/integration/testproofs.nim @@ -14,45 +14,37 @@ export logutils logScope: topics = "integration test proofs" - marketplacesuite "Hosts submit regular proofs": - - test "hosts submit periodic proofs for slots they fill", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node, marketplace") - .some, - - providers: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") - .some, - ): + test "hosts submit periodic proofs for slots they fill", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node, marketplace") + .some, + providers: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") + .some, + ): let client0 = clients()[0].client let expiry = 10.periods let duration = expiry + 5.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) createAvailabilities(data.len * 2, duration) # TODO: better value for data.len let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( - cid, - expiry=expiry, - duration=duration, - nodes=3, - tolerance=1 + cid, expiry = expiry, duration = duration, nodes = 3, tolerance = 1 + ) + check eventually( + client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 ) - check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) var proofWasSubmitted = false proc onProofSubmitted(event: ?!ProofSubmitted) = @@ -60,65 +52,60 @@ marketplacesuite "Hosts submit regular proofs": let subscription = await marketplace.subscribe(ProofSubmitted, onProofSubmitted) - check eventually(proofWasSubmitted, timeout=(duration - expiry).int * 1000) + check eventually(proofWasSubmitted, timeout = (duration - expiry).int * 1000) await subscription.unsubscribe() - marketplacesuite "Simulate invalid proofs": - # TODO: these are very loose tests in that they are not testing EXACTLY how # proofs were marked as missed by the validator. These tests should be # tightened so that they are showing, as an integration test, that specific # proofs are being marked as missed by the validator. - test "slot is freed after too many invalid proofs submitted", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("node", "marketplace", "clock") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + test "slot is freed after too many invalid proofs submitted", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("node", "marketplace", "clock") + .some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("marketplace", "sales", "reservations", "node", "clock", "slotsbuilder") .some, - - validators: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("validator", "onchain", "ethers", "clock") - .some - ): + validators: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator", "onchain", "ethers", "clock") + .some, + ): let client0 = clients()[0].client let expiry = 10.periods let duration = expiry + 10.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) createAvailabilities(data.len * 2, duration) # TODO: better value for data.len let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=3, - tolerance=1, - proofProbability=1 + expiry = expiry, + duration = duration, + nodes = 3, + tolerance = 1, + proofProbability = 1, ) let requestId = client0.requestId(purchaseId).get - check eventually(client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000) + check eventually( + client0.purchaseStateIs(purchaseId, "started"), timeout = expiry.int * 1000 + ) var slotWasFreed = false proc onSlotFreed(event: ?!SlotFreed) = @@ -127,52 +114,48 @@ marketplacesuite "Simulate invalid proofs": let subscription = await marketplace.subscribe(SlotFreed, onSlotFreed) - check eventually(slotWasFreed, timeout=(duration - expiry).int * 1000) + check eventually(slotWasFreed, timeout = (duration - expiry).int * 1000) await subscription.unsubscribe() - test "slot is not freed when not enough invalid proofs submitted", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) - # .debug() # uncomment to enable console log output - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + test "slot is not freed when not enough invalid proofs submitted", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs.init(nodes = 1) + # .debug() # uncomment to enable console log output + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("marketplace", "sales", "reservations", "node", "clock") + .some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("marketplace", "sales", "reservations", "node") .some, - - validators: - CodexConfigs.init(nodes=1) - # .debug() - # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - # .withLogTopics("validator", "onchain", "ethers", "clock") - .some - ): + validators: CodexConfigs.init(nodes = 1) + # .debug() + # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log + # .withLogTopics("validator", "onchain", "ethers", "clock") + .some, + ): let client0 = clients()[0].client let expiry = 10.periods let duration = expiry + 10.periods - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) createAvailabilities(data.len * 2, duration) # TODO: better value for data.len let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=3, - tolerance=1, - proofProbability=1 + expiry = expiry, + duration = duration, + nodes = 3, + tolerance = 1, + proofProbability = 1, ) let requestId = client0.requestId(purchaseId).get @@ -183,6 +166,7 @@ marketplacesuite "Simulate invalid proofs": if event.requestId == requestId: slotWasFilled = true + let filledSubscription = await marketplace.subscribe(SlotFilled, onSlotFilled) # wait for the first slot to be filled @@ -192,6 +176,7 @@ marketplacesuite "Simulate invalid proofs": proc onSlotFreed(event: ?!SlotFreed) = if event.isOk and event.value.requestId == requestId: slotWasFreed = true + let freedSubscription = await marketplace.subscribe(SlotFreed, onSlotFreed) # In 2 periods you cannot have enough invalid proofs submitted: diff --git a/tests/integration/testpurchasing.nim b/tests/integration/testpurchasing.nim index 4e5fa8664..a78ea1256 100644 --- a/tests/integration/testpurchasing.nim +++ b/tests/integration/testpurchasing.nim @@ -6,29 +6,45 @@ import ../contracts/time import ../examples twonodessuite "Purchasing": - test "node handles storage request", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let id1 = client1.requestStorage(cid, duration=100.u256, reward=2.u256, proofProbability=3.u256, expiry=10, collateral=200.u256).get - let id2 = client1.requestStorage(cid, duration=400.u256, reward=5.u256, proofProbability=6.u256, expiry=10, collateral=201.u256).get + let id1 = client1.requestStorage( + cid, + duration = 100.u256, + reward = 2.u256, + proofProbability = 3.u256, + expiry = 10, + collateral = 200.u256, + ).get + let id2 = client1.requestStorage( + cid, + duration = 400.u256, + reward = 5.u256, + proofProbability = 6.u256, + expiry = 10, + collateral = 201.u256, + ).get check id1 != id2 test "node retrieves purchase status", twoNodesConfig: # get one contiguous chunk let rng = rng.Rng.instance() - let chunker = RandomChunker.new(rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2) + let chunker = RandomChunker.new( + rng, size = DefaultBlockSize * 2, chunkSize = DefaultBlockSize * 2 + ) let data = await chunker.getBytes() let cid = client1.upload(byteutils.toHex(data)).get let id = client1.requestStorage( cid, - duration=100.u256, - reward=2.u256, - proofProbability=3.u256, - expiry=30, - collateral=200.u256, - nodes=3, - tolerance=1).get + duration = 100.u256, + reward = 2.u256, + proofProbability = 3.u256, + expiry = 30, + collateral = 200.u256, + nodes = 3, + tolerance = 1, + ).get let request = client1.getPurchase(id).get.request.get check request.ask.duration == 100.u256 @@ -53,39 +69,55 @@ twonodessuite "Purchasing": # check request.ask.maxSlotLoss == 1'u64 test "node remembers purchase status after restart", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let id = client1.requestStorage(cid, - duration=10*60.u256, - reward=2.u256, - proofProbability=3.u256, - expiry=5*60, - collateral=200.u256, - nodes=3.uint, - tolerance=1.uint).get - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000) + let id = client1.requestStorage( + cid, + duration = 10 * 60.u256, + reward = 2.u256, + proofProbability = 3.u256, + expiry = 5 * 60, + collateral = 200.u256, + nodes = 3.uint, + tolerance = 1.uint, + ).get + check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) await node1.restart() client1.restart() - check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3*60*1000) + check eventually(client1.purchaseStateIs(id, "submitted"), timeout = 3 * 60 * 1000) let request = client1.getPurchase(id).get.request.get - check request.ask.duration == (10*60).u256 + check request.ask.duration == (10 * 60).u256 check request.ask.reward == 2.u256 check request.ask.proofProbability == 3.u256 - check request.expiry == (5*60).u256 + check request.expiry == (5 * 60).u256 check request.ask.collateral == 200.u256 check request.ask.slots == 3'u64 check request.ask.maxSlotLoss == 1'u64 test "node requires expiry and its value to be in future", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let responseMissing = client1.requestStorageRaw(cid, duration=1.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256) + let responseMissing = client1.requestStorageRaw( + cid, + duration = 1.u256, + reward = 2.u256, + proofProbability = 3.u256, + collateral = 200.u256, + ) check responseMissing.status == "400 Bad Request" check responseMissing.body == "Expiry required" - let responseBefore = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=10) + let responseBefore = client1.requestStorageRaw( + cid, + duration = 10.u256, + reward = 2.u256, + proofProbability = 3.u256, + collateral = 200.u256, + expiry = 10, + ) check responseBefore.status == "400 Bad Request" - check "Expiry needs value bigger then zero and smaller then the request's duration" in responseBefore.body + check "Expiry needs value bigger then zero and smaller then the request's duration" in + responseBefore.body diff --git a/tests/integration/testrestapi.nim b/tests/integration/testrestapi.nim index 2d5c3392b..9c1ec6334 100644 --- a/tests/integration/testrestapi.nim +++ b/tests/integration/testrestapi.nim @@ -21,7 +21,9 @@ twonodessuite "REST API": test "node shows used and available space", twoNodesConfig: discard client1.upload("some file contents").get - discard client1.postAvailability(totalSize=12.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + discard client1.postAvailability( + totalSize = 12.u256, duration = 2.u256, minPrice = 3.u256, maxCollateral = 4.u256 + ).get let space = client1.space().tryGet() check: space.totalBlocks == 2 @@ -42,22 +44,38 @@ twonodessuite "REST API": test "request storage fails for datasets that are too small", twoNodesConfig: let cid = client1.upload("some file contents").get - let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + let response = client1.requestStorageRaw( + cid, + duration = 10.u256, + reward = 2.u256, + proofProbability = 3.u256, + collateral = 200.u256, + expiry = 9, + ) check: response.status == "400 Bad Request" - response.body == "Dataset too small for erasure parameters, need at least " & $(2*DefaultBlockSize.int) & " bytes" + response.body == + "Dataset too small for erasure parameters, need at least " & + $(2 * DefaultBlockSize.int) & " bytes" test "request storage succeeds for sufficiently sized datasets", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get - let response = client1.requestStorageRaw(cid, duration=10.u256, reward=2.u256, proofProbability=3.u256, collateral=200.u256, expiry=9) + let response = client1.requestStorageRaw( + cid, + duration = 10.u256, + reward = 2.u256, + proofProbability = 3.u256, + collateral = 200.u256, + expiry = 9, + ) check: response.status == "200 OK" test "request storage fails if tolerance is zero", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 let reward = 2.u256 @@ -67,20 +85,16 @@ twonodessuite "REST API": let nodes = 3 let tolerance = 0 - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, reward, proofProbability, collateral, expiry, nodes.uint, + tolerance.uint, + ) check responseBefore.status == "400 Bad Request" check responseBefore.body == "Tolerance needs to be bigger then zero" test "request storage fails if nodes and tolerance aren't correct", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 let reward = 2.u256 @@ -92,20 +106,18 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, reward, proofProbability, collateral, expiry, nodes.uint, + tolerance.uint, + ) check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" + check responseBefore.body == + "Invalid parameters: parameters must satify `1 < (nodes - tolerance) ≥ tolerance`" - test "request storage fails if tolerance > nodes (underflow protection)", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + test "request storage fails if tolerance > nodes (underflow protection)", + twoNodesConfig: + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 let reward = 2.u256 @@ -117,20 +129,17 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, reward, proofProbability, collateral, expiry, nodes.uint, + tolerance.uint, + ) check responseBefore.status == "400 Bad Request" - check responseBefore.body == "Invalid parameters: `tolerance` cannot be greater than `nodes`" + check responseBefore.body == + "Invalid parameters: `tolerance` cannot be greater than `nodes`" test "request storage succeeds if nodes and tolerance within range", twoNodesConfig: - let data = await RandomChunker.example(blocks=2) + let data = await RandomChunker.example(blocks = 2) let cid = client1.upload(data).get let duration = 100.u256 let reward = 2.u256 @@ -142,14 +151,10 @@ twonodessuite "REST API": for ecParam in ecParams: let (nodes, tolerance) = ecParam - var responseBefore = client1.requestStorageRaw(cid, - duration, - reward, - proofProbability, - collateral, - expiry, - nodes.uint, - tolerance.uint) + var responseBefore = client1.requestStorageRaw( + cid, duration, reward, proofProbability, collateral, expiry, nodes.uint, + tolerance.uint, + ) check responseBefore.status == "200 OK" @@ -161,13 +166,15 @@ twonodessuite "REST API": check response.body != "" test "node accepts file uploads with content disposition", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) + let headers = + newHttpHeaders({"Content-Disposition": "attachment; filename=\"example.txt\""}) let response = client1.uploadRaw("some file contents", headers) check response.status == "200 OK" check response.body != "" - test "node accepts file uploads with content disposition without filename", twoNodesConfig: + test "node accepts file uploads with content disposition without filename", + twoNodesConfig: let headers = newHttpHeaders({"Content-Disposition": "attachment"}) let response = client1.uploadRaw("some file contents", headers) @@ -175,7 +182,8 @@ twonodessuite "REST API": check response.body != "" test "upload fails if content disposition contains bad filename", twoNodesConfig: - let headers = newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) + let headers = + newHttpHeaders({"Content-Disposition": "attachment; filename=\"exam*ple.txt\""}) let response = client1.uploadRaw("some file contents", headers) check response.status == "422 Unprocessable Entity" @@ -189,7 +197,12 @@ twonodessuite "REST API": check response.body == "The MIME type is not valid." test "node retrieve the metadata", twoNodesConfig: - let headers = newHttpHeaders({"Content-Type": "text/plain", "Content-Disposition": "attachment; filename=\"example.txt\""}) + let headers = newHttpHeaders( + { + "Content-Type": "text/plain", + "Content-Disposition": "attachment; filename=\"example.txt\"", + } + ) let uploadResponse = client1.uploadRaw("some file contents", headers) let cid = uploadResponse.body let listResponse = client1.listRaw() @@ -212,10 +225,12 @@ twonodessuite "REST API": check manifest["uploadedAt"].getInt() > 0 test "node set the headers when for download", twoNodesConfig: - let headers = newHttpHeaders({ - "Content-Disposition": "attachment; filename=\"example.txt\"", - "Content-Type": "text/plain" - }) + let headers = newHttpHeaders( + { + "Content-Disposition": "attachment; filename=\"example.txt\"", + "Content-Type": "text/plain", + } + ) let uploadResponse = client1.uploadRaw("some file contents", headers) let cid = uploadResponse.body @@ -228,7 +243,8 @@ twonodessuite "REST API": check response.headers.hasKey("Content-Type") == true check response.headers["Content-Type"] == "text/plain" check response.headers.hasKey("Content-Disposition") == true - check response.headers["Content-Disposition"] == "attachment; filename=\"example.txt\"" + check response.headers["Content-Disposition"] == + "attachment; filename=\"example.txt\"" let local = true let localResponse = client1.downloadRaw(cid, local) @@ -237,4 +253,5 @@ twonodessuite "REST API": check localResponse.headers.hasKey("Content-Type") == true check localResponse.headers["Content-Type"] == "text/plain" check localResponse.headers.hasKey("Content-Disposition") == true - check localResponse.headers["Content-Disposition"] == "attachment; filename=\"example.txt\"" \ No newline at end of file + check localResponse.headers["Content-Disposition"] == + "attachment; filename=\"example.txt\"" diff --git a/tests/integration/testsales.nim b/tests/integration/testsales.nim index f9af76e51..3cf8bd73b 100644 --- a/tests/integration/testsales.nim +++ b/tests/integration/testsales.nim @@ -16,8 +16,8 @@ proc findItem[T](items: seq[T], item: T): ?!T = multinodesuite "Sales": let salesConfig = NodeConfigs( - clients: CodexConfigs.init(nodes=1).some, - providers: CodexConfigs.init(nodes=1).some, + clients: CodexConfigs.init(nodes = 1).some, + providers: CodexConfigs.init(nodes = 1).some, ) var host: CodexClient @@ -28,22 +28,43 @@ multinodesuite "Sales": client = clients()[0].client test "node handles new storage availability", salesConfig: - let availability1 = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get - let availability2 = host.postAvailability(totalSize=4.u256, duration=5.u256, minPrice=6.u256, maxCollateral=7.u256).get + let availability1 = host.postAvailability( + totalSize = 1.u256, duration = 2.u256, minPrice = 3.u256, maxCollateral = 4.u256 + ).get + let availability2 = host.postAvailability( + totalSize = 4.u256, duration = 5.u256, minPrice = 6.u256, maxCollateral = 7.u256 + ).get check availability1 != availability2 test "node lists storage that is for sale", salesConfig: - let availability = host.postAvailability(totalSize=1.u256, duration=2.u256, minPrice=3.u256, maxCollateral=4.u256).get + let availability = host.postAvailability( + totalSize = 1.u256, duration = 2.u256, minPrice = 3.u256, maxCollateral = 4.u256 + ).get check availability in host.getAvailabilities().get test "updating non-existing availability", salesConfig: - let nonExistingResponse = host.patchAvailabilityRaw(AvailabilityId.example, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some) + let nonExistingResponse = host.patchAvailabilityRaw( + AvailabilityId.example, + duration = 100.u256.some, + minPrice = 200.u256.some, + maxCollateral = 200.u256.some, + ) check nonExistingResponse.status == "404 Not Found" test "updating availability", salesConfig: - let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get - - host.patchAvailability(availability.id, duration=100.u256.some, minPrice=200.u256.some, maxCollateral=200.u256.some) + let availability = host.postAvailability( + totalSize = 140000.u256, + duration = 200.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, + ).get + + host.patchAvailability( + availability.id, + duration = 100.u256.some, + minPrice = 200.u256.some, + maxCollateral = 200.u256.some, + ) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.duration == 100 @@ -53,45 +74,66 @@ multinodesuite "Sales": check updatedAvailability.freeSize == 140000 test "updating availability - freeSize is not allowed to be changed", salesConfig: - let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get - let freeSizeResponse = host.patchAvailabilityRaw(availability.id, freeSize=110000.u256.some) + let availability = host.postAvailability( + totalSize = 140000.u256, + duration = 200.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, + ).get + let freeSizeResponse = + host.patchAvailabilityRaw(availability.id, freeSize = 110000.u256.some) check freeSizeResponse.status == "400 Bad Request" - check "not allowed" in freeSizeResponse.body + check "not allowed" in freeSizeResponse.body test "updating availability - updating totalSize", salesConfig: - let availability = host.postAvailability(totalSize=140000.u256, duration=200.u256, minPrice=300.u256, maxCollateral=300.u256).get - host.patchAvailability(availability.id, totalSize=100000.u256.some) + let availability = host.postAvailability( + totalSize = 140000.u256, + duration = 200.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, + ).get + host.patchAvailability(availability.id, totalSize = 100000.u256.some) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.totalSize == 100000 check updatedAvailability.freeSize == 100000 - test "updating availability - updating totalSize does not allow bellow utilized", salesConfig: + test "updating availability - updating totalSize does not allow bellow utilized", + salesConfig: let originalSize = 0xFFFFFF.u256 - let data = await RandomChunker.example(blocks=8) - let availability = host.postAvailability(totalSize=originalSize, duration=20*60.u256, minPrice=300.u256, maxCollateral=300.u256).get + let data = await RandomChunker.example(blocks = 8) + let availability = host.postAvailability( + totalSize = originalSize, + duration = 20 * 60.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, + ).get # Lets create storage request that will utilize some of the availability's space let cid = client.upload(data).get let id = client.requestStorage( cid, - duration=20*60.u256, - reward=400.u256, - proofProbability=3.u256, - expiry=10*60, - collateral=200.u256, + duration = 20 * 60.u256, + reward = 400.u256, + proofProbability = 3.u256, + expiry = 10 * 60, + collateral = 200.u256, nodes = 3, - tolerance = 1).get + tolerance = 1, + ).get - check eventually(client.purchaseStateIs(id, "started"), timeout=10*60*1000) + check eventually(client.purchaseStateIs(id, "started"), timeout = 10 * 60 * 1000) let updatedAvailability = (host.getAvailabilities().get).findItem(availability).get check updatedAvailability.totalSize != updatedAvailability.freeSize let utilizedSize = updatedAvailability.totalSize - updatedAvailability.freeSize - let totalSizeResponse = host.patchAvailabilityRaw(availability.id, totalSize=(utilizedSize-1.u256).some) + let totalSizeResponse = host.patchAvailabilityRaw( + availability.id, totalSize = (utilizedSize - 1.u256).some + ) check totalSizeResponse.status == "400 Bad Request" check "totalSize must be larger then current totalSize" in totalSizeResponse.body - host.patchAvailability(availability.id, totalSize=(originalSize + 20000).some) - let newUpdatedAvailability = (host.getAvailabilities().get).findItem(availability).get + host.patchAvailability(availability.id, totalSize = (originalSize + 20000).some) + let newUpdatedAvailability = + (host.getAvailabilities().get).findItem(availability).get check newUpdatedAvailability.totalSize == originalSize + 20000 check newUpdatedAvailability.freeSize - updatedAvailability.freeSize == 20000 diff --git a/tests/integration/testupdownload.nim b/tests/integration/testupdownload.nim index 73107b525..74bee8c74 100644 --- a/tests/integration/testupdownload.nim +++ b/tests/integration/testupdownload.nim @@ -56,7 +56,8 @@ twonodessuite "Uploads and downloads": let manifest = jsonData["manifest"] check manifest.hasKey("treeCid") == true - check manifest["treeCid"].getStr() == "zDzSvJTezk7bJNQqFq8k1iHXY84psNuUfZVusA5bBQQUSuyzDSVL" + check manifest["treeCid"].getStr() == + "zDzSvJTezk7bJNQqFq8k1iHXY84psNuUfZVusA5bBQQUSuyzDSVL" check manifest.hasKey("datasetSize") == true check manifest["datasetSize"].getInt() == 18 check manifest.hasKey("blockSize") == true @@ -83,12 +84,12 @@ twonodessuite "Uploads and downloads": test "reliable transfer test", twoNodesConfig: proc transferTest(a: CodexClient, b: CodexClient) {.async.} = - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) let cid = a.upload(data).get let response = b.download(cid).get check: response == data - for run in 0..10: + for run in 0 .. 10: await transferTest(client1, client2) await transferTest(client2, client1) diff --git a/tests/integration/testvalidator.nim b/tests/integration/testvalidator.nim index f010a23fd..9b93c0e79 100644 --- a/tests/integration/testvalidator.nim +++ b/tests/integration/testvalidator.nim @@ -15,11 +15,12 @@ export logutils logScope: topics = "integration test validation" -template eventuallyS(expression: untyped, timeout=10, step = 5, - cancelExpression: untyped = false): bool = +template eventuallyS( + expression: untyped, timeout = 10, step = 5, cancelExpression: untyped = false +): bool = bind Moment, now, seconds - proc eventuallyS: Future[bool] {.async.} = + proc eventuallyS(): Future[bool] {.async.} = let endTime = Moment.now() + timeout.seconds var secondsElapsed = 0 while not expression: @@ -38,11 +39,8 @@ marketplacesuite "Validation": let proofProbability = 1 proc waitForRequestToFail( - marketplace: Marketplace, - requestId: RequestId, - timeout=10, - step = 5, - ): Future[bool] {.async.} = + marketplace: Marketplace, requestId: RequestId, timeout = 10, step = 5 + ): Future[bool] {.async.} = let endTime = Moment.now() + timeout.seconds var requestState = await marketplace.requestState(requestId) @@ -55,36 +53,35 @@ marketplacesuite "Validation": requestState = await marketplace.requestState(requestId) return true - test "validator marks proofs as missing when using validation groups", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) + test "validator marks proofs as missing when using validation groups", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs + .init(nodes = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("purchases", "onchain") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("purchases", "onchain").some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("sales", "onchain") .some, - - validators: - CodexConfigs.init(nodes=2) + validators: CodexConfigs + .init(nodes = 2) .withValidationGroups(groups = 2) .withValidationGroupIndex(idx = 0, groupIndex = 0) .withValidationGroupIndex(idx = 1, groupIndex = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("validator") # each topic as a separate string argument - .some - ): + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("validator") + # each topic as a separate string argument + .some, + ): let client0 = clients()[0].client let expiry = 5.periods let duration = expiry + 10.periods @@ -95,8 +92,8 @@ marketplacesuite "Validation": var currentTime = await ethProvider.currentTime() let requestEndTime = currentTime.truncate(uint64) + duration - let data = await RandomChunker.example(blocks=8) - + let data = await RandomChunker.example(blocks = 8) + # TODO: better value for data.len below. This TODO is also present in # testproofs.nim - we may want to address it or remove the comment. createAvailabilities(data.len * 2, duration) @@ -104,18 +101,21 @@ marketplacesuite "Validation": let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=nodes, - tolerance=tolerance, - proofProbability=proofProbability + expiry = expiry, + duration = duration, + nodes = nodes, + tolerance = tolerance, + proofProbability = proofProbability, ) let requestId = client0.requestId(purchaseId).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId - if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"), - timeout = (expiry + 60).int, step = 5): + if not eventuallyS( + client0.purchaseStateIs(purchaseId, "started"), + timeout = (expiry + 60).int, + step = 5, + ): debug "validation suite: timed out waiting for the purchase to start" fail() return @@ -127,31 +127,27 @@ marketplacesuite "Validation": debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds check await marketplace.waitForRequestToFail( - requestId, - timeout = secondsTillRequestEnd + 60, - step = 5 + requestId, timeout = secondsTillRequestEnd + 60, step = 5 ) - test "validator uses historical state to mark missing proofs", NodeConfigs( - # Uncomment to start Hardhat automatically, typically so logs can be inspected locally - hardhat: - HardhatConfig.none, - - clients: - CodexConfigs.init(nodes=1) + test "validator uses historical state to mark missing proofs", + NodeConfigs( + # Uncomment to start Hardhat automatically, typically so logs can be inspected locally + hardhat: HardhatConfig.none, + clients: CodexConfigs + .init(nodes = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log - .withLogTopics("purchases", "onchain") - .some, - - providers: - CodexConfigs.init(nodes=1) - .withSimulateProofFailures(idx=0, failEveryNProofs=1) + .withLogFile() + # uncomment to output log file to tests/integration/logs/ //_.log + .withLogTopics("purchases", "onchain").some, + providers: CodexConfigs + .init(nodes = 1) + .withSimulateProofFailures(idx = 0, failEveryNProofs = 1) # .debug() # uncomment to enable console log output # .withLogFile() # uncomment to output log file to tests/integration/logs/ //_.log # .withLogTopics("sales", "onchain") - .some - ): + .some, + ): let client0 = clients()[0].client let expiry = 5.periods let duration = expiry + 10.periods @@ -162,7 +158,7 @@ marketplacesuite "Validation": var currentTime = await ethProvider.currentTime() let requestEndTime = currentTime.truncate(uint64) + duration - let data = await RandomChunker.example(blocks=8) + let data = await RandomChunker.example(blocks = 8) # TODO: better value for data.len below. This TODO is also present in # testproofs.nim - we may want to address it or remove the comment. @@ -171,42 +167,44 @@ marketplacesuite "Validation": let cid = client0.upload(data).get let purchaseId = await client0.requestStorage( cid, - expiry=expiry, - duration=duration, - nodes=nodes, - tolerance=tolerance, - proofProbability=proofProbability + expiry = expiry, + duration = duration, + nodes = nodes, + tolerance = tolerance, + proofProbability = proofProbability, ) let requestId = client0.requestId(purchaseId).get debug "validation suite", purchaseId = purchaseId.toHex, requestId = requestId - if not eventuallyS(client0.purchaseStateIs(purchaseId, "started"), - timeout = (expiry + 60).int, step = 5): + if not eventuallyS( + client0.purchaseStateIs(purchaseId, "started"), + timeout = (expiry + 60).int, + step = 5, + ): debug "validation suite: timed out waiting for the purchase to start" fail() return - + # extra block just to make sure we have one that separates us # from the block containing the last (past) SlotFilled event discard await ethProvider.send("evm_mine") - var validators = CodexConfigs.init(nodes=2) + var validators = CodexConfigs + .init(nodes = 2) .withValidationGroups(groups = 2) .withValidationGroupIndex(idx = 0, groupIndex = 0) .withValidationGroupIndex(idx = 1, groupIndex = 1) # .debug() # uncomment to enable console log output - .withLogFile() # uncomment to output log file to: # tests/integration/logs/ //_.log + .withLogFile() + # uncomment to output log file to: # tests/integration/logs/ //_.log .withLogTopics("validator") # each topic as a separate string argument - + failAndTeardownOnError "failed to start validator nodes": for config in validators.configs.mitems: let node = await startValidatorNode(config) - running.add RunningNode( - role: Role.Validator, - node: node - ) - + running.add RunningNode(role: Role.Validator, node: node) + discard await ethProvider.send("evm_mine") currentTime = await ethProvider.currentTime() let secondsTillRequestEnd = (requestEndTime - currentTime.truncate(uint64)).int @@ -214,7 +212,5 @@ marketplacesuite "Validation": debug "validation suite", secondsTillRequestEnd = secondsTillRequestEnd.seconds check await marketplace.waitForRequestToFail( - requestId, - timeout = secondsTillRequestEnd + 60, - step = 5 + requestId, timeout = secondsTillRequestEnd + 60, step = 5 ) diff --git a/tests/integration/twonodes.nim b/tests/integration/twonodes.nim index ac2d149ad..5666690e9 100644 --- a/tests/integration/twonodes.nim +++ b/tests/integration/twonodes.nim @@ -10,9 +10,10 @@ import ./nodeconfigs export codexclient export multinodes -template twonodessuite*(name: string, body: untyped) = +template twonodessuite*(name: string, body: untyped) = multinodesuite name: - let twoNodesConfig {.inject, used.} = NodeConfigs(clients: CodexConfigs.init(nodes=2).some) + let twoNodesConfig {.inject, used.} = + NodeConfigs(clients: CodexConfigs.init(nodes = 2).some) var node1 {.inject, used.}: CodexProcess var node2 {.inject, used.}: CodexProcess diff --git a/tests/logging.nim b/tests/logging.nim index ece9c9b08..d165fe485 100644 --- a/tests/logging.nim +++ b/tests/logging.nim @@ -6,5 +6,5 @@ when not defined(nimscript): defaultChroniclesStream.output.writer = ignoreLogging - {.warning[UnusedImport]:off.} + {.warning[UnusedImport]: off.} {.used.} diff --git a/tests/testContracts.nim b/tests/testContracts.nim index aff2c1d77..d5ed7d6a5 100644 --- a/tests/testContracts.nim +++ b/tests/testContracts.nim @@ -4,4 +4,4 @@ import ./contracts/testDeployment import ./contracts/testClock import ./contracts/testProvider -{.warning[UnusedImport]:off.} +{.warning[UnusedImport]: off.} diff --git a/tests/testIntegration.nim b/tests/testIntegration.nim index f0a59ee45..9a2dc472b 100644 --- a/tests/testIntegration.nim +++ b/tests/testIntegration.nim @@ -9,4 +9,4 @@ import ./integration/testproofs import ./integration/testvalidator import ./integration/testecbug -{.warning[UnusedImport]:off.} +{.warning[UnusedImport]: off.} diff --git a/tests/testTaiko.nim b/tests/testTaiko.nim index c0a48396a..e86d2bd41 100644 --- a/tests/testTaiko.nim +++ b/tests/testTaiko.nim @@ -10,35 +10,33 @@ import pkg/questionable/results import ./asynctest import ./integration/nodes - suite "Taiko L2 Integration Tests": - var node1, node2: NodeProcess setup: doAssert existsEnv("CODEX_ETH_PRIVATE_KEY"), "Key for Taiko account missing" - node1 = startNode([ - "--data-dir=" & createTempDir("", ""), - "--api-port=8080", - "--nat=none", - "--disc-port=8090", - "--persistence", - "--eth-provider=https://rpc.test.taiko.xyz" - ]) + node1 = startNode( + [ + "--data-dir=" & createTempDir("", ""), "--api-port=8080", "--nat=none", + "--disc-port=8090", "--persistence", "--eth-provider=https://rpc.test.taiko.xyz", + ] + ) node1.waitUntilStarted() let bootstrap = (!node1.client.info())["spr"].getStr() - node2 = startNode([ - "--data-dir=" & createTempDir("", ""), - "--api-port=8081", - "--nat=none", - "--disc-port=8091", - "--bootstrap-node=" & bootstrap, - "--persistence", - "--eth-provider=https://rpc.test.taiko.xyz" - ]) + node2 = startNode( + [ + "--data-dir=" & createTempDir("", ""), + "--api-port=8081", + "--nat=none", + "--disc-port=8091", + "--bootstrap-node=" & bootstrap, + "--persistence", + "--eth-provider=https://rpc.test.taiko.xyz", + ] + ) node2.waitUntilStarted() teardown: @@ -49,26 +47,33 @@ suite "Taiko L2 Integration Tests": test "node 1 buys storage from node 2": discard node2.client.postAvailability( - size=0xFFFFF.u256, - duration=200.u256, - minPrice=300.u256, - maxCollateral=300.u256 + size = 0xFFFFF.u256, + duration = 200.u256, + minPrice = 300.u256, + maxCollateral = 300.u256, ) let cid = !node1.client.upload("some file contents") echo " - requesting storage, expires in 5 minutes" let expiry = getTime().toUnix().uint64 + 5 * 60 - let purchase = !node1.client.requestStorage( - cid, - duration=30.u256, - reward=400.u256, - proofProbability=3.u256, - collateral=200.u256, - expiry=expiry.u256 - ) + let purchase = + !node1.client.requestStorage( + cid, + duration = 30.u256, + reward = 400.u256, + proofProbability = 3.u256, + collateral = 200.u256, + expiry = expiry.u256, + ) echo " - waiting for request to start, timeout 5 minutes" - check eventually(node1.client.getPurchase(purchase).?state == success "started", timeout = 5 * 60 * 1000) + check eventually( + node1.client.getPurchase(purchase) .? state == success "started", + timeout = 5 * 60 * 1000, + ) echo " - waiting for request to finish, timeout 1 minute" - check eventually(node1.client.getPurchase(purchase).?state == success "finished", timeout = 1 * 60 * 1000) + check eventually( + node1.client.getPurchase(purchase) .? state == success "finished", + timeout = 1 * 60 * 1000, + ) diff --git a/tests/testTools.nim b/tests/testTools.nim index b46759587..f3ead1d17 100644 --- a/tests/testTools.nim +++ b/tests/testTools.nim @@ -1,3 +1,3 @@ import ./tools/cirdl/testcirdl -{.warning[UnusedImport]:off.} +{.warning[UnusedImport]: off.} diff --git a/tests/tools/cirdl/testcirdl.nim b/tests/tools/cirdl/testcirdl.nim index a4fd0fc0f..dc02be4dc 100644 --- a/tests/tools/cirdl/testcirdl.nim +++ b/tests/tools/cirdl/testcirdl.nim @@ -21,20 +21,15 @@ suite "tools/cirdl": let args = [circuitPath, rpcEndpoint, $marketplaceAddress] - let process = osproc.startProcess( - cirdl, - workdir, - args, - options={poParentStreams} - ) + let process = osproc.startProcess(cirdl, workdir, args, options = {poParentStreams}) let returnCode = process.waitForExit() check returnCode == 0 check: - fileExists(circuitPath/"proof_main_verification_key.json") - fileExists(circuitPath/"proof_main.r1cs") - fileExists(circuitPath/"proof_main.wasm") - fileExists(circuitPath/"proof_main.zkey") + fileExists(circuitPath / "proof_main_verification_key.json") + fileExists(circuitPath / "proof_main.r1cs") + fileExists(circuitPath / "proof_main.wasm") + fileExists(circuitPath / "proof_main.zkey") removeDir(circuitPath) diff --git a/tools/scripts/git_pre_commit_format.sh b/tools/scripts/git_pre_commit_format.sh new file mode 100755 index 000000000..f52c36507 --- /dev/null +++ b/tools/scripts/git_pre_commit_format.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +echo "Running pre-commit hook" + +# Regexp for grep to only choose some file extensions for formatting +exts="\.\(nim\|nims\)$" + +# Build nph lazily +make build-nph || (1>&2 echo "failed to build nph. Pre-commit formatting will not be done."; exit 0) + +# Format staged files +git diff --cached --name-only --diff-filter=ACMR | grep "$exts" | while read file; do + echo "Formatting $file" + make nph/"$file" + git add "$file" +done diff --git a/vendor/nph b/vendor/nph new file mode 160000 index 000000000..65b1acfd7 --- /dev/null +++ b/vendor/nph @@ -0,0 +1 @@ +Subproject commit 65b1acfd717ed16f6bb866801df90d8369e85f99