Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
JingsongLi committed Sep 23, 2024
1 parent e7d1f33 commit 656689d
Show file tree
Hide file tree
Showing 23 changed files with 165 additions and 104 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@

import org.apache.paimon.annotation.Public;
import org.apache.paimon.data.BinaryRow;
import org.apache.paimon.io.DataFileMeta;
import org.apache.paimon.table.source.DataSplit;

import java.util.Collection;
import java.util.HashMap;
Expand Down Expand Up @@ -97,6 +99,14 @@ public static PartitionEntry fromManifestEntry(ManifestEntry entry) {
entry.file().creationTimeEpochMillis());
}

public static PartitionEntry fromDataFile(BinaryRow partition, DataFileMeta file) {
long recordCount = file.rowCount();
long fileSizeInBytes = file.fileSize();
long fileCount = 1;
return new PartitionEntry(
partition, recordCount, fileSizeInBytes, fileCount, file.creationTimeEpochMillis());
}

public static Collection<PartitionEntry> merge(Collection<ManifestEntry> fileEntries) {
Map<BinaryRow, PartitionEntry> partitions = new HashMap<>();
for (ManifestEntry entry : fileEntries) {
Expand All @@ -108,6 +118,23 @@ public static Collection<PartitionEntry> merge(Collection<ManifestEntry> fileEnt
return partitions.values();
}

public static Collection<PartitionEntry> mergeSplits(Collection<DataSplit> splits) {
Map<BinaryRow, PartitionEntry> partitions = new HashMap<>();
for (DataSplit split : splits) {
BinaryRow partition = split.partition();
if (!split.beforeFiles().isEmpty()) {
throw new UnsupportedOperationException();
}
for (DataFileMeta file : split.dataFiles()) {
PartitionEntry partitionEntry = fromDataFile(partition, file);
partitions.compute(
partition,
(part, old) -> old == null ? partitionEntry : old.merge(partitionEntry));
}
}
return partitions.values();
}

public static void merge(Collection<PartitionEntry> from, Map<BinaryRow, PartitionEntry> to) {
for (PartitionEntry entry : from) {
to.compute(entry.partition(), (part, old) -> old == null ? entry : old.merge(entry));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import org.apache.paimon.Snapshot;
import org.apache.paimon.data.BinaryRow;
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.io.DataFileMeta;
import org.apache.paimon.manifest.BucketEntry;
import org.apache.paimon.manifest.FileEntry;
import org.apache.paimon.manifest.ManifestCacheFilter;
Expand Down Expand Up @@ -55,7 +54,6 @@
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Collectors;
Expand Down Expand Up @@ -289,22 +287,6 @@ public List<BucketEntry> readBucketEntries() {
.collect(Collectors.toList());
}

@Override
public long rowCount() {
List<ManifestFileMeta> manifests = readManifests().filteredManifests;
AtomicLong rowCount = new AtomicLong(0);
Consumer<ManifestFileMeta> processor =
m -> {
List<ManifestEntry> entries = readManifest(m);
for (ManifestEntry entry : entries) {
DataFileMeta file = entry.file();
rowCount.addAndGet(file.addRowCount().orElse(file.rowCount()));
}
};
randomlyOnlyExecute(getExecutorService(parallelism), processor, manifests);
return rowCount.get();
}

private Pair<Snapshot, List<ManifestEntry>> doPlan() {
long started = System.nanoTime();
ManifestsReader.Result manifestsResult = readManifests();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,6 @@ default Long totalRecordCount(Snapshot snapshot) {

List<BucketEntry> readBucketEntries();

/**
* Short path of count all records, this is not work good for primary key table and deletion
* vectors table.
*/
long rowCount();

default List<BinaryRow> listPartitions() {
return readPartitionEntries().stream()
.map(PartitionEntry::partition)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.paimon.table.source;

import org.apache.paimon.CoreOptions;
import org.apache.paimon.manifest.PartitionEntry;
import org.apache.paimon.operation.DefaultValueAssigner;
import org.apache.paimon.predicate.Predicate;
import org.apache.paimon.table.source.snapshot.SnapshotReader;
Expand Down Expand Up @@ -81,6 +82,19 @@ public TableScan.Plan plan() {
}
}

public List<PartitionEntry> planPartitions() {
if (startingScanner == null) {
startingScanner = createStartingScanner(false);
}

if (hasNext) {
hasNext = false;
return startingScanner.scanPartitions(snapshotReader);
} else {
throw new EndOfScanException();
}
}

private StartingScanner.Result applyPushDownLimit(StartingScanner.Result result) {
if (pushDownLimit != null && result instanceof ScannedResult) {
long scannedRowCount = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,14 @@

package org.apache.paimon.table.source.snapshot;

import org.apache.paimon.manifest.PartitionEntry;
import org.apache.paimon.table.source.ScanMode;
import org.apache.paimon.utils.SnapshotManager;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

/** The abstract class for StartingScanner. */
public abstract class AbstractStartingScanner implements StartingScanner {

Expand All @@ -44,4 +49,13 @@ public StartingContext startingContext() {
return new StartingContext(startingSnapshotId, startingScanMode() == ScanMode.ALL);
}
}

@Override
public List<PartitionEntry> scanPartitions(SnapshotReader snapshotReader) {
Result result = scan(snapshotReader);
if (result instanceof ScannedResult) {
return new ArrayList<>(PartitionEntry.mergeSplits(((ScannedResult) result).splits()));
}
return Collections.emptyList();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
import javax.annotation.Nullable;

/** {@link StartingScanner} for the {@link CoreOptions.StartupMode#COMPACTED_FULL} startup mode. */
public class CompactedStartingScanner extends AbstractStartingScanner {
public class CompactedStartingScanner extends ReadPlanStartingScanner {

private static final Logger LOG = LoggerFactory.getLogger(CompactedStartingScanner.class);

Expand All @@ -44,22 +44,21 @@ public ScanMode startingScanMode() {
}

@Override
public Result scan(SnapshotReader snapshotReader) {
public SnapshotReader configure(SnapshotReader snapshotReader) {
Long startingSnapshotId = pick();
if (startingSnapshotId == null) {
startingSnapshotId = snapshotManager.latestSnapshotId();
if (startingSnapshotId == null) {
LOG.debug("There is currently no snapshot. Wait for the snapshot generation.");
return new NoSnapshot();
return null;
} else {
LOG.debug(
"No compact snapshot found, reading from the latest snapshot {}.",
startingSnapshotId);
}
}

return StartingScanner.fromPlan(
snapshotReader.withMode(ScanMode.ALL).withSnapshot(startingSnapshotId).read());
return snapshotReader.withMode(ScanMode.ALL).withSnapshot(startingSnapshotId);
}

@Nullable
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
* {@link StartingScanner} for the {@link CoreOptions.StartupMode#FROM_SNAPSHOT_FULL} startup mode
* of a batch read.
*/
public class ContinuousFromSnapshotFullStartingScanner extends AbstractStartingScanner {
public class ContinuousFromSnapshotFullStartingScanner extends ReadPlanStartingScanner {

public ContinuousFromSnapshotFullStartingScanner(
SnapshotManager snapshotManager, long snapshotId) {
Expand All @@ -40,13 +40,12 @@ public ScanMode startingScanMode() {
}

@Override
public Result scan(SnapshotReader snapshotReader) {
public SnapshotReader configure(SnapshotReader snapshotReader) {
Long earliestSnapshotId = snapshotManager.earliestSnapshotId();
if (earliestSnapshotId == null) {
return new NoSnapshot();
return null;
}
long ceiledSnapshotId = Math.max(startingSnapshotId, earliestSnapshotId);
return StartingScanner.fromPlan(
snapshotReader.withMode(ScanMode.ALL).withSnapshot(ceiledSnapshotId).read());
return snapshotReader.withMode(ScanMode.ALL).withSnapshot(ceiledSnapshotId);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
* {@link StartingScanner} for the {@link CoreOptions.StartupMode#FROM_FILE_CREATION_TIME} startup
* mode.
*/
public class FileCreationTimeStartingScanner extends AbstractStartingScanner {
public class FileCreationTimeStartingScanner extends ReadPlanStartingScanner {

private static final Logger LOG =
LoggerFactory.getLogger(FileCreationTimeStartingScanner.class);
Expand All @@ -47,18 +47,16 @@ public ScanMode startingScanMode() {
}

@Override
public Result scan(SnapshotReader snapshotReader) {
public SnapshotReader configure(SnapshotReader snapshotReader) {
Long startingSnapshotId = snapshotManager.latestSnapshotId();
if (startingSnapshotId == null) {
LOG.debug("There is currently no snapshot. Waiting for snapshot generation.");
return new NoSnapshot();
return null;
}
return StartingScanner.fromPlan(
snapshotReader
.withMode(ScanMode.ALL)
.withSnapshot(startingSnapshotId)
.withManifestEntryFilter(
entry -> entry.file().creationTimeEpochMillis() >= startupMillis)
.read());
return snapshotReader
.withMode(ScanMode.ALL)
.withSnapshot(startingSnapshotId)
.withManifestEntryFilter(
entry -> entry.file().creationTimeEpochMillis() >= startupMillis);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
* {@link StartingScanner} for the {@link StartupMode#COMPACTED_FULL} startup mode with
* 'full-compaction.delta-commits'.
*/
public class FullCompactedStartingScanner extends AbstractStartingScanner {
public class FullCompactedStartingScanner extends ReadPlanStartingScanner {

private static final Logger LOG = LoggerFactory.getLogger(FullCompactedStartingScanner.class);

Expand Down Expand Up @@ -62,22 +62,21 @@ public ScanMode startingScanMode() {
}

@Override
public Result scan(SnapshotReader snapshotReader) {
public SnapshotReader configure(SnapshotReader snapshotReader) {
Long startingSnapshotId = pick();
if (startingSnapshotId == null) {
startingSnapshotId = snapshotManager.latestSnapshotId();
if (startingSnapshotId == null) {
LOG.debug("There is currently no snapshot. Wait for the snapshot generation.");
return new NoSnapshot();
return null;
} else {
LOG.debug(
"No compact snapshot found, reading from the latest snapshot {}.",
startingSnapshotId);
}
}

return StartingScanner.fromPlan(
snapshotReader.withMode(ScanMode.ALL).withSnapshot(startingSnapshotId).read());
return snapshotReader.withMode(ScanMode.ALL).withSnapshot(startingSnapshotId);
}

public static boolean isFullCompactedIdentifier(long identifier, int deltaCommits) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import org.slf4j.LoggerFactory;

/** {@link StartingScanner} for the {@link CoreOptions.StartupMode#LATEST_FULL} startup mode. */
public class FullStartingScanner extends AbstractStartingScanner {
public class FullStartingScanner extends ReadPlanStartingScanner {

private static final Logger LOG = LoggerFactory.getLogger(FullStartingScanner.class);

Expand All @@ -41,16 +41,15 @@ public ScanMode startingScanMode() {
}

@Override
public Result scan(SnapshotReader snapshotReader) {
public SnapshotReader configure(SnapshotReader snapshotReader) {
if (startingSnapshotId == null) {
// try to get first snapshot again
startingSnapshotId = snapshotManager.latestSnapshotId();
}
if (startingSnapshotId == null) {
LOG.debug("There is currently no snapshot. Waiting for snapshot generation.");
return new NoSnapshot();
return null;
}
return StartingScanner.fromPlan(
snapshotReader.withMode(ScanMode.ALL).withSnapshot(startingSnapshotId).read());
return snapshotReader.withMode(ScanMode.ALL).withSnapshot(startingSnapshotId);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.paimon.table.source.snapshot;

import org.apache.paimon.manifest.PartitionEntry;
import org.apache.paimon.utils.SnapshotManager;

import javax.annotation.Nullable;

import java.util.Collections;
import java.util.List;

/** An {@link AbstractStartingScanner} to return plan. */
public abstract class ReadPlanStartingScanner extends AbstractStartingScanner {

ReadPlanStartingScanner(SnapshotManager snapshotManager) {
super(snapshotManager);
}

@Nullable
protected abstract SnapshotReader configure(SnapshotReader snapshotReader);

@Override
public Result scan(SnapshotReader snapshotReader) {
SnapshotReader configured = configure(snapshotReader);
if (configured == null) {
return new NoSnapshot();
}
return StartingScanner.fromPlan(configured.read());
}

@Override
public List<PartitionEntry> scanPartitions(SnapshotReader snapshotReader) {
SnapshotReader configured = configure(snapshotReader);
if (configured == null) {
return Collections.emptyList();
}
return configured.partitionEntries();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,6 @@ public interface SnapshotReader {

List<BucketEntry> bucketEntries();

/**
* Short path of count all records, this is not work good for primary key table and deletion
* vectors table.
*/
long rowCount();

/** Result plan of this scan. */
interface Plan extends TableScan.Plan {

Expand Down
Loading

0 comments on commit 656689d

Please sign in to comment.