diff --git a/src/include/storage/store/rel_table.h b/src/include/storage/store/rel_table.h index fe4a1294c57..0ced8da8e9f 100644 --- a/src/include/storage/store/rel_table.h +++ b/src/include/storage/store/rel_table.h @@ -54,9 +54,7 @@ struct RelTableScanState : TableScanState { } } - void resetState() override { - nodeGroupScanState->resetState(); - } + void resetState() override { nodeGroupScanState->resetState(); } }; class LocalRelTable; diff --git a/src/storage/local_storage/local_rel_table.cpp b/src/storage/local_storage/local_rel_table.cpp index 7842dfcd7a1..a27da2a6967 100644 --- a/src/storage/local_storage/local_rel_table.cpp +++ b/src/storage/local_storage/local_rel_table.cpp @@ -148,7 +148,7 @@ void LocalRelTable::initializeScan(TableScanState& state) { auto& nodeSelVector = relScanState.boundNodeIDVector->state->getSelVector(); auto& index = relScanState.direction == RelDataDirection::FWD ? fwdIndex : bwdIndex; offset_t nodeOffset = - relScanState.boundNodeIDVector->readNodeOffset(nodeSelVector[relScanState.endNodeIdx++]); + relScanState.boundNodeIDVector->readNodeOffset(nodeSelVector[relScanState.endNodeIdx++]); if (index.contains(nodeOffset)) { relScanState.rowIndices = index[nodeOffset]; KU_ASSERT(std::is_sorted(relScanState.rowIndices.begin(), relScanState.rowIndices.end())); @@ -180,8 +180,8 @@ bool LocalRelTable::scan(Transaction* transaction, TableScanState& state) const KU_ASSERT(relScanState.localTableScanState); auto& localScanState = *relScanState.localTableScanState; KU_ASSERT(localScanState.rowIndices.size() >= localScanState.nextRowToScan); - relScanState.batchSize = std::min(localScanState.rowIndices.size() - localScanState.nextRowToScan, - DEFAULT_VECTOR_CAPACITY); + relScanState.batchSize = std::min( + localScanState.rowIndices.size() - localScanState.nextRowToScan, DEFAULT_VECTOR_CAPACITY); if (relScanState.batchSize == 0) { return false; } diff --git a/src/storage/store/csr_node_group.cpp b/src/storage/store/csr_node_group.cpp index 84f246e39ff..9a2ec65399c 100644 --- a/src/storage/store/csr_node_group.cpp +++ b/src/storage/store/csr_node_group.cpp @@ -25,8 +25,8 @@ void CSRNodeGroup::initializeScanState(Transaction* transaction, TableScanState& initializePersistentCSRHeader(transaction, relScanState, nodeGroupScanState); // Queue persistent nodes to be scanned in the node group. while (relScanState.endNodeIdx < relScanState.totalNodeIdx) { - auto nodeOffset = - relScanState.boundNodeIDVector->readNodeOffset(nodeSelVector[relScanState.endNodeIdx]); + auto nodeOffset = relScanState.boundNodeIDVector->readNodeOffset( + nodeSelVector[relScanState.endNodeIdx]); if (nodeOffset >= StorageConstants::MAX_NUM_ROWS_IN_TABLE) { break; } @@ -56,15 +56,17 @@ void CSRNodeGroup::initializeInMemScanState(TableScanState& state) { KU_ASSERT(relScanState.nodeGroupScanState); auto& nodeGroupScanState = relScanState.nodeGroupScanState->cast(); const auto startNodeOffset = StorageUtils::getStartOffsetOfNodeGroup(nodeGroupIdx); - const auto offsetInGroup = relScanState.boundNodeIDVector->readNodeOffset( - nodeSelVector[relScanState.endNodeIdx++]) - startNodeOffset; + const auto offsetInGroup = + relScanState.boundNodeIDVector->readNodeOffset(nodeSelVector[relScanState.endNodeIdx++]) - + startNodeOffset; nodeGroupScanState.inMemCSRList = csrIndex->indices[offsetInGroup]; if (!nodeGroupScanState.inMemCSRList.isSequential) { - KU_ASSERT(std::is_sorted(nodeGroupScanState.inMemCSRList.rowIndices.begin(), + KU_ASSERT(std::is_sorted(nodeGroupScanState.inMemCSRList.rowIndices.begin(), nodeGroupScanState.inMemCSRList.rowIndices.end())); } nodeGroupScanState.source = nodeGroupScanState.inMemCSRList.rowIndices.size() > 0 ? - CSRNodeGroupScanSource::COMMITTED_IN_MEMORY : CSRNodeGroupScanSource::NONE; + CSRNodeGroupScanSource::COMMITTED_IN_MEMORY : + CSRNodeGroupScanSource::NONE; } void CSRNodeGroup::initializePersistentCSRHeader(Transaction* transaction, @@ -130,9 +132,10 @@ NodeGroupScanResult CSRNodeGroup::scan(Transaction* transaction, TableScanState& return result; } case CSRNodeGroupScanSource::COMMITTED_IN_MEMORY: { - const auto result = nodeGroupScanState.inMemCSRList.isSequential ? - scanCommittedInMemSequential(transaction, relScanState, nodeGroupScanState) : - scanCommittedInMemRandom(transaction, relScanState, nodeGroupScanState); + const auto result = + nodeGroupScanState.inMemCSRList.isSequential ? + scanCommittedInMemSequential(transaction, relScanState, nodeGroupScanState) : + scanCommittedInMemRandom(transaction, relScanState, nodeGroupScanState); if (result == NODE_GROUP_SCAN_EMMPTY_RESULT) { relScanState.IDVector->state->getSelVectorUnsafe().setSelSize(0); return NODE_GROUP_SCAN_EMMPTY_RESULT; diff --git a/src/storage/store/rel_table.cpp b/src/storage/store/rel_table.cpp index f2d8f149d8e..25bd496c824 100644 --- a/src/storage/store/rel_table.cpp +++ b/src/storage/store/rel_table.cpp @@ -60,7 +60,7 @@ void RelTable::initializeScanState(Transaction* transaction, TableScanState& sca KU_ASSERT(relScanState.totalNodeIdx > 0); KU_ASSERT(relScanState.endNodeIdx == relScanState.currNodeIdx); KU_ASSERT(relScanState.endNodeIdx < relScanState.totalNodeIdx); - offset_t nodeOffset = + offset_t nodeOffset = relScanState.boundNodeIDVector->readNodeOffset(nodeSelVector[relScanState.currNodeIdx]); if (nodeOffset >= StorageConstants::MAX_NUM_ROWS_IN_TABLE) { // No more to read from committed