diff --git a/.github/workflows/pr-build-and-test.yml b/.github/workflows/pr-build-and-test.yml
index 659c812e4..b811b09f7 100644
--- a/.github/workflows/pr-build-and-test.yml
+++ b/.github/workflows/pr-build-and-test.yml
@@ -28,7 +28,7 @@ jobs:
java-version: 17
- name: Build and Verify
- run: mvn --no-transfer-progress --batch-mode verify -Dcheckstyle.skip -Dspotless.check.skip
+ run: mvn --no-transfer-progress --batch-mode verify
- name: package surefire test results
if: failure()
diff --git a/README.md b/README.md
index 623bd9b16..9a1c83634 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,9 @@
# OpenMessaging Benchmark Framework
+
[](https://github.com/openmessaging/benchmark/actions/workflows/pr-build-and-test.yml)
[](https://www.apache.org/licenses/LICENSE-2.0.html)
-**Notice:** We do not consider or plan to release any unilateral test results based on this standard. For reference, you can purchase server tests on the cloud by yourself.
+**Notice:** We do not consider or plan to release any unilateral test results based on this standard. For reference, you can purchase server tests on the cloud by yourself.
This repository houses user-friendly, cloud-ready benchmarking suites for the following messaging platforms:
@@ -14,7 +15,7 @@ This repository houses user-friendly, cloud-ready benchmarking suites for the fo
* Generic [JMS](https://javaee.github.io/jms-spec/)
* [KoP (Kafka-on-Pulsar)](https://github.com/streamnative/kop)
* [NATS Streaming](https://nats.io/)
-* [NSQ](https://nsq.io)
+* [NSQ](https://nsq.io)
* [Pravega](https://pravega.io/)
* [RabbitMQ](https://www.rabbitmq.com/)
* [Redis](https://redis.com/)
diff --git a/benchmark-framework/pom.xml b/benchmark-framework/pom.xml
index 997a5ab45..1c6b2b8bf 100644
--- a/benchmark-framework/pom.xml
+++ b/benchmark-framework/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- benchmark-framework
+ benchmark-framework
-
- 9.4.42.v20210604
-
+
+ 9.4.42.v20210604
+
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- org.hdrhistogram
- HdrHistogram
- 2.1.10
-
-
- com.beust
- jcommander
-
-
- ${project.groupId}
- driver-pulsar
- ${project.version}
-
-
- ${project.groupId}
- driver-jms
- ${project.version}
-
-
- ${project.groupId}
- driver-kafka
- ${project.version}
-
-
- ${project.groupId}
- driver-kop
- ${project.version}
-
-
- ${project.groupId}
- driver-pravega
- ${project.version}
-
-
- ${project.groupId}
- driver-rocketmq
- ${project.version}
-
-
- ${project.groupId}
- driver-rabbitmq
- ${project.version}
-
-
- ${project.groupId}
- driver-artemis
- ${project.version}
-
-
- ${project.groupId}
- driver-bookkeeper
- ${project.version}
-
-
- ${project.groupId}
- driver-nats
- ${project.version}
-
-
- ${project.groupId}
- driver-nats-streaming
- ${project.version}
-
-
- ${project.groupId}
- driver-nsq
- ${project.version}
-
-
- ${project.groupId}
- driver-redis
- ${project.version}
-
-
- org.apache.bookkeeper.stats
- prometheus-metrics-provider
- ${bookkeeper.version}
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-artemis
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-bookkeeper
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-jms
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-kafka
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-kop
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-nats
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-nats-streaming
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-nsq
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-pravega
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-pulsar
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-rabbitmq
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-redis
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-rocketmq
+ ${project.version}
+
+
+ com.beust
+ jcommander
+
+ io.javalinjavalin1.3.0
-
- org.asynchttpclient
- async-http-client
- 2.12.3
-
-
-
- org.eclipse.jetty
- jetty-server
- ${jetty.version}
-
-
- org.eclipse.jetty
- jetty-util
- ${jetty.version}
-
-
- org.assertj
- assertj-core
-
-
- org.junit.jupiter
- junit-jupiter
-
-
- org.mockito
- mockito-junit-jupiter
-
-
+
+ org.apache.bookkeeper.stats
+ prometheus-metrics-provider
+ ${bookkeeper.version}
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+ org.asynchttpclient
+ async-http-client
+ 2.12.3
+
+
+ org.asynchttpclient
+ async-http-client
+ 2.12.3
+
+
+
+ org.eclipse.jetty
+ jetty-server
+ ${jetty.version}
+
+
+ org.eclipse.jetty
+ jetty-util
+ ${jetty.version}
+
+
+ org.hdrhistogram
+ HdrHistogram
+ 2.1.10
+
+
+ org.assertj
+ assertj-core
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter
+ test
+
+
+ org.mockito
+ mockito-junit-jupiter
+ test
+
+
-
-
-
- org.apache.maven.plugins
- maven-dependency-plugin
- 2.10
-
-
- build-classpath
- generate-sources
-
- build-classpath
-
-
- target/classpath.txt
-
-
-
-
-
-
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+ 2.10
+
+
+ build-classpath
+
+ build-classpath
+
+ generate-sources
+
+ target/classpath.txt
+
+
+
+
+
+
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/Benchmark.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/Benchmark.java
index 8d0728098..25c5c5a9f 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/Benchmark.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/Benchmark.java
@@ -13,16 +13,6 @@
*/
package io.openmessaging.benchmark;
-import java.io.File;
-import java.text.DateFormat;
-import java.text.SimpleDateFormat;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
@@ -31,40 +21,62 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-
import io.openmessaging.benchmark.worker.DistributedWorkersEnsemble;
import io.openmessaging.benchmark.worker.LocalWorker;
import io.openmessaging.benchmark.worker.Worker;
+import java.io.File;
+import java.text.DateFormat;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class Benchmark {
static class Arguments {
- @Parameter(names = {"-c", "--csv"}, description = "Print results from this directory to a csv file")
+ @Parameter(
+ names = {"-c", "--csv"},
+ description = "Print results from this directory to a csv file")
String resultsDir;
- @Parameter(names = { "-h", "--help" }, description = "Help message", help = true)
+ @Parameter(
+ names = {"-h", "--help"},
+ description = "Help message",
+ help = true)
boolean help;
- @Parameter(names = { "-d",
- "--drivers" }, description = "Drivers list. eg.: pulsar/pulsar.yaml,kafka/kafka.yaml")//, required = true)
+ @Parameter(
+ names = {"-d", "--drivers"},
+ description =
+ "Drivers list. eg.: pulsar/pulsar.yaml,kafka/kafka.yaml") // , required = true)
public List drivers;
- @Parameter(names = { "-w",
- "--workers" }, description = "List of worker nodes. eg: http://1.2.3.4:8080,http://4.5.6.7:8080")
+ @Parameter(
+ names = {"-w", "--workers"},
+ description = "List of worker nodes. eg: http://1.2.3.4:8080,http://4.5.6.7:8080")
public List workers;
- @Parameter(names = { "-wf",
- "--workers-file" }, description = "Path to a YAML file containing the list of workers addresses")
+ @Parameter(
+ names = {"-wf", "--workers-file"},
+ description = "Path to a YAML file containing the list of workers addresses")
public File workersFile;
- @Parameter(names = { "-x", "--extra" }, description = "Allocate extra consumer workers when your backlog builds.")
+ @Parameter(
+ names = {"-x", "--extra"},
+ description = "Allocate extra consumer workers when your backlog builds.")
boolean extraConsumers;
- @Parameter(description = "Workloads")//, required = true)
+ @Parameter(description = "Workloads") // , required = true)
public List workloads;
- @Parameter(names = { "-o", "--output" }, description = "Output", required = false)
+ @Parameter(
+ names = {"-o", "--output"},
+ description = "Output",
+ required = false)
public String output;
}
@@ -86,7 +98,7 @@ public static void main(String[] args) throws Exception {
System.exit(-1);
}
- if(arguments.resultsDir != null) {
+ if (arguments.resultsDir != null) {
ResultsToCsv r = new ResultsToCsv();
r.writeAllResultFiles(arguments.resultsDir);
System.exit(0);
@@ -132,51 +144,67 @@ public static void main(String[] args) throws Exception {
worker = new LocalWorker();
}
- workloads.forEach((workloadName, workload) -> {
- arguments.drivers.forEach(driverConfig -> {
- try {
- DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
- File driverConfigFile = new File(driverConfig);
- DriverConfiguration driverConfiguration = mapper.readValue(driverConfigFile,
- DriverConfiguration.class);
- log.info("--------------- WORKLOAD : {} --- DRIVER : {}---------------", workload.name,
- driverConfiguration.name);
-
- // Stop any left over workload
- worker.stopAll();
-
- worker.initializeDriver(new File(driverConfig));
-
- WorkloadGenerator generator = new WorkloadGenerator(driverConfiguration.name, workload, worker);
-
- TestResult result = generator.run();
-
- boolean useOutput = (arguments.output != null) && (arguments.output.length() > 0);
-
- String fileName = useOutput? arguments.output: String.format("%s-%s-%s.json", workloadName,
- driverConfiguration.name, dateFormat.format(new Date()));
-
- log.info("Writing test result into {}/{}", workloadName, fileName);
- File folder = new File(workloadName);
- if (!folder.mkdirs()) {
- log.debug("Unable to create folder {}", folder);
- }
- writer.writeValue(new File(folder, fileName), result);
-
- generator.close();
- } catch (Exception e) {
- log.error("Failed to run the workload '{}' for driver '{}'", workload.name, driverConfig, e);
- } finally {
- worker.stopAll();
- }
- });
- });
+ workloads.forEach(
+ (workloadName, workload) -> {
+ arguments.drivers.forEach(
+ driverConfig -> {
+ try {
+ DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss");
+ File driverConfigFile = new File(driverConfig);
+ DriverConfiguration driverConfiguration =
+ mapper.readValue(driverConfigFile, DriverConfiguration.class);
+ log.info(
+ "--------------- WORKLOAD : {} --- DRIVER : {}---------------",
+ workload.name,
+ driverConfiguration.name);
+
+ // Stop any left over workload
+ worker.stopAll();
+
+ worker.initializeDriver(new File(driverConfig));
+
+ WorkloadGenerator generator =
+ new WorkloadGenerator(driverConfiguration.name, workload, worker);
+
+ TestResult result = generator.run();
+
+ boolean useOutput = (arguments.output != null) && (arguments.output.length() > 0);
+
+ String fileName =
+ useOutput
+ ? arguments.output
+ : String.format(
+ "%s-%s-%s.json",
+ workloadName,
+ driverConfiguration.name,
+ dateFormat.format(new Date()));
+
+ log.info("Writing test result into {}/{}", workloadName, fileName);
+ File folder = new File(workloadName);
+ if (!folder.mkdirs()) {
+ log.debug("Unable to create folder {}", folder);
+ }
+ writer.writeValue(new File(folder, fileName), result);
+
+ generator.close();
+ } catch (Exception e) {
+ log.error(
+ "Failed to run the workload '{}' for driver '{}'",
+ workload.name,
+ driverConfig,
+ e);
+ } finally {
+ worker.stopAll();
+ }
+ });
+ });
worker.close();
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
mapper.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE);
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/DriverConfiguration.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/DriverConfiguration.java
index e220cfa2b..8e6c858a3 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/DriverConfiguration.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/DriverConfiguration.java
@@ -17,5 +17,4 @@ public class DriverConfiguration {
public String name;
public String driverClass;
-
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/ResultsToCsv.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/ResultsToCsv.java
index c1133a793..9ee84ece3 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/ResultsToCsv.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/ResultsToCsv.java
@@ -14,13 +14,11 @@
package io.openmessaging.benchmark;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.HdrHistogram.Histogram;
-import org.bouncycastle.util.test.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import java.io.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
import java.text.MessageFormat;
import java.time.Instant;
import java.util.ArrayList;
@@ -28,6 +26,9 @@
import java.util.Comparator;
import java.util.List;
import java.util.stream.Collectors;
+import org.HdrHistogram.Histogram;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class ResultsToCsv {
@@ -43,24 +44,29 @@ public void writeAllResultFiles(String directory) {
Arrays.sort(directoryListing);
List lines = new ArrayList<>();
- lines.add("topics,partitions,message-size,producers-per-topic,consumers-per-topic," +
- "prod-rate-min,prod-rate-avg,prod-rate-std-dev,prod-rate-max," +
- "con-rate-min,con-rate-avg,con-rate-std-dev,con-rate-max,");
+ lines.add(
+ "topics,partitions,message-size,producers-per-topic,consumers-per-topic,"
+ + "prod-rate-min,prod-rate-avg,prod-rate-std-dev,prod-rate-max,"
+ + "con-rate-min,con-rate-avg,con-rate-std-dev,con-rate-max,");
List results = new ArrayList<>();
for (File file : directoryListing) {
if (file.isFile() && file.getAbsolutePath().endsWith(".json")) {
ObjectMapper objectMapper = new ObjectMapper();
- TestResult tr = objectMapper.readValue(new File(file.getAbsolutePath()), TestResult.class);
+ TestResult tr =
+ objectMapper.readValue(new File(file.getAbsolutePath()), TestResult.class);
results.add(tr);
}
}
- List sortedResults = results.stream().sorted(
- Comparator.comparing(TestResult::getMessageSize)
- .thenComparing(TestResult::getTopics)
- .thenComparing(TestResult::getPartitions)).collect(Collectors.toList());
- for(TestResult tr : sortedResults) {
+ List sortedResults =
+ results.stream()
+ .sorted(
+ Comparator.comparing(TestResult::getMessageSize)
+ .thenComparing(TestResult::getTopics)
+ .thenComparing(TestResult::getPartitions))
+ .collect(Collectors.toList());
+ for (TestResult tr : sortedResults) {
lines.add(extractResults(tr));
}
@@ -71,8 +77,7 @@ public void writeAllResultFiles(String directory) {
}
log.info("Results extracted into CSV " + resultsFileName);
}
- }
- catch(IOException e) {
+ } catch (IOException e) {
log.error("Failed creating csv file.", e);
}
}
@@ -82,38 +87,37 @@ public String extractResults(TestResult tr) {
Histogram prodRateHistogram = new Histogram(10000000, 1);
Histogram conRateHistogram = new Histogram(10000000, 1);
- for(Double rate : tr.publishRate) {
+ for (Double rate : tr.publishRate) {
prodRateHistogram.recordValueWithCount(rate.longValue(), 2);
}
- for(Double rate : tr.consumeRate) {
+ for (Double rate : tr.consumeRate) {
conRateHistogram.recordValueWithCount(rate.longValue(), 2);
}
- String line = MessageFormat.format("{0,number,#},{1,number,#},{2,number,#},{3,number,#},{4,number,#}," +
- "{5,number,#},{6,number,#},{7,number,#.##},{8,number,#}," +
- "{9,number,#},{10,number,#},{11,number,#.##},{12,number,#}",
- tr.topics,
- tr.partitions,
- tr.messageSize,
- tr.producersPerTopic,
- tr.consumersPerTopic,
- prodRateHistogram.getMinNonZeroValue(),
- prodRateHistogram.getMean(),
- prodRateHistogram.getStdDeviation(),
- prodRateHistogram.getMaxValue(),
- conRateHistogram.getMinNonZeroValue(),
- conRateHistogram.getMean(),
- conRateHistogram.getStdDeviation(),
- conRateHistogram.getMaxValue());
+ String line =
+ MessageFormat.format(
+ "{0,number,#},{1,number,#},{2,number,#},{3,number,#},{4,number,#},"
+ + "{5,number,#},{6,number,#},{7,number,#.##},{8,number,#},"
+ + "{9,number,#},{10,number,#},{11,number,#.##},{12,number,#}",
+ tr.topics,
+ tr.partitions,
+ tr.messageSize,
+ tr.producersPerTopic,
+ tr.consumersPerTopic,
+ prodRateHistogram.getMinNonZeroValue(),
+ prodRateHistogram.getMean(),
+ prodRateHistogram.getStdDeviation(),
+ prodRateHistogram.getMaxValue(),
+ conRateHistogram.getMinNonZeroValue(),
+ conRateHistogram.getMean(),
+ conRateHistogram.getStdDeviation(),
+ conRateHistogram.getMaxValue());
return line;
- }
- catch(Exception e) {
+ } catch (Exception e) {
log.error("Error writing results csv", e);
throw new RuntimeException(e);
}
}
-
-
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/TestResult.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/TestResult.java
index 88a11c0e2..fc9781b4d 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/TestResult.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/TestResult.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark;
+
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workers.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workers.java
index 52da2c530..fbc16e613 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workers.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workers.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark;
+
import java.util.ArrayList;
import java.util.List;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workload.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workload.java
index 759eb09af..c437b187e 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workload.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/Workload.java
@@ -13,15 +13,16 @@
*/
package io.openmessaging.benchmark;
+
import io.openmessaging.benchmark.utils.distributor.KeyDistributorType;
public class Workload {
public String name;
- /** Number of topics to create in the test */
+ /** Number of topics to create in the test. */
public int topics;
- /** Number of partitions each topic will contain */
+ /** Number of partitions each topic will contain. */
public int partitionsPerTopic;
public KeyDistributorType keyDistributor = KeyDistributorType.NO_KEY;
@@ -43,11 +44,11 @@ public class Workload {
public int producerRate;
/**
- * If the consumer backlog is > 0, the generator will accumulate messages until the requested amount of storage is
- * retained and then it will start the consumers to drain it.
+ * If the consumer backlog is > 0, the generator will accumulate messages until the requested
+ * amount of storage is retained and then it will start the consumers to drain it.
*
- * The testDurationMinutes will be overruled to allow the test to complete when the consumer has drained all the
- * backlog and it's on par with the producer
+ *
The testDurationMinutes will be overruled to allow the test to complete when the consumer
+ * has drained all the backlog and it's on par with the producer
*/
public long consumerBacklogSizeGB = 0;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/WorkloadGenerator.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/WorkloadGenerator.java
index 122a4efb3..6905fdf22 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/WorkloadGenerator.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/WorkloadGenerator.java
@@ -13,23 +13,10 @@
*/
package io.openmessaging.benchmark;
-import io.openmessaging.benchmark.utils.RandomGenerator;
-import java.io.IOException;
-import java.text.DecimalFormat;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.lang.ArrayUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import io.netty.util.concurrent.DefaultThreadFactory;
import io.openmessaging.benchmark.utils.PaddingDecimalFormat;
+import io.openmessaging.benchmark.utils.RandomGenerator;
import io.openmessaging.benchmark.utils.Timer;
import io.openmessaging.benchmark.utils.payload.FilePayloadReader;
import io.openmessaging.benchmark.utils.payload.PayloadReader;
@@ -41,6 +28,18 @@
import io.openmessaging.benchmark.worker.commands.ProducerWorkAssignment;
import io.openmessaging.benchmark.worker.commands.TopicSubscription;
import io.openmessaging.benchmark.worker.commands.TopicsInfo;
+import java.io.IOException;
+import java.text.DecimalFormat;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import org.apache.commons.lang.ArrayUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class WorkloadGenerator implements AutoCloseable {
@@ -48,8 +47,8 @@ public class WorkloadGenerator implements AutoCloseable {
private final Workload workload;
private final Worker worker;
- private final ExecutorService executor = Executors
- .newCachedThreadPool(new DefaultThreadFactory("messaging-benchmark"));
+ private final ExecutorService executor =
+ Executors.newCachedThreadPool(new DefaultThreadFactory("messaging-benchmark"));
private volatile boolean runCompleted = false;
private volatile boolean needToWaitForBacklogDraining = false;
@@ -62,13 +61,15 @@ public WorkloadGenerator(String driverName, Workload workload, Worker worker) {
this.worker = worker;
if (workload.consumerBacklogSizeGB > 0 && workload.producerRate == 0) {
- throw new IllegalArgumentException("Cannot probe producer sustainable rate when building backlog");
+ throw new IllegalArgumentException(
+ "Cannot probe producer sustainable rate when building backlog");
}
}
public TestResult run() throws Exception {
Timer timer = new Timer();
- List topics = worker.createTopics(new TopicsInfo(workload.topics, workload.partitionsPerTopic));
+ List topics =
+ worker.createTopics(new TopicsInfo(workload.topics, workload.partitionsPerTopic));
log.info("Created {} topics in {} ms", topics.size(), timer.elapsedMillis());
createConsumers(topics);
@@ -82,14 +83,15 @@ public TestResult run() throws Exception {
// Producer rate is 0 and we need to discover the sustainable rate
targetPublishRate = 10000;
- executor.execute(() -> {
- // Run background controller to adjust rate
- try {
- findMaximumSustainableRate(targetPublishRate);
- } catch (IOException e) {
- log.warn("Failure in finding max sustainable rate", e);
- }
- });
+ executor.execute(
+ () -> {
+ // Run background controller to adjust rate
+ try {
+ findMaximumSustainableRate(targetPublishRate);
+ } catch (IOException e) {
+ log.warn("Failure in finding max sustainable rate", e);
+ }
+ });
}
final PayloadReader payloadReader = new FilePayloadReader(workload.messageSize);
@@ -99,21 +101,20 @@ public TestResult run() throws Exception {
producerWorkAssignment.publishRate = targetPublishRate;
producerWorkAssignment.payloadData = new ArrayList<>();
- if(workload.useRandomizedPayloads) {
+ if (workload.useRandomizedPayloads) {
// create messages that are part random and part zeros
// better for testing effects of compression
Random r = new Random();
- int randomBytes = (int)(workload.messageSize * workload.randomBytesRatio);
+ int randomBytes = (int) (workload.messageSize * workload.randomBytesRatio);
int zerodBytes = workload.messageSize - randomBytes;
- for(int i = 0; i 0) {
- executor.execute(() -> {
- try {
- buildAndDrainBacklog(topics);
- } catch (IOException e) {
- e.printStackTrace();
- }
- });
+ executor.execute(
+ () -> {
+ try {
+ buildAndDrainBacklog(topics);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ });
}
worker.resetStats();
@@ -146,7 +148,8 @@ public TestResult run() throws Exception {
private void ensureTopicsAreReady() throws IOException {
log.info("Waiting for consumers to be ready");
- // This is work around the fact that there's no way to have a consumer ready in Kafka without first publishing
+ // This is work around the fact that there's no way to have a consumer ready in Kafka without
+ // first publishing
// some message on the topic, which will then trigger the partitions assignment to the consumers
int expectedMessages = workload.topics * workload.subscriptionsPerTopic;
@@ -154,12 +157,15 @@ private void ensureTopicsAreReady() throws IOException {
// In this case we just publish 1 message and then wait for consumers to receive the data
worker.probeProducers();
- long start = System.currentTimeMillis();
+ long start = System.currentTimeMillis();
long end = start + 60 * 1000;
while (System.currentTimeMillis() < end) {
CountersStats stats = worker.getCountersStats();
- log.info("Waiting for topics to be ready -- Sent: {}, Received: {}", stats.messagesSent, stats.messagesReceived);
+ log.info(
+ "Waiting for topics to be ready -- Sent: {}, Received: {}",
+ stats.messagesSent,
+ stats.messagesReceived);
if (stats.messagesReceived < expectedMessages) {
try {
Thread.sleep(2_000);
@@ -173,16 +179,18 @@ private void ensureTopicsAreReady() throws IOException {
if (System.currentTimeMillis() >= end) {
throw new RuntimeException("Timed out waiting for consumers to be ready");
- }
- else {
+ } else {
log.info("All consumers are ready");
- }
+ }
}
/**
- * Adjust the publish rate to a level that is sustainable, meaning that we can consume all the messages that are
- * being produced
+ * Adjust the publish rate to a level that is sustainable, meaning that we can consume all the
+ * messages that are being produced.
+ *
+ * @param currentRate
*/
+ @SuppressWarnings("checkstyle:LineLength")
private void findMaximumSustainableRate(double currentRate) throws IOException {
double maxRate = Double.MAX_VALUE; // Discovered max sustainable rate
double minRate = 0.1;
@@ -212,16 +220,24 @@ private void findMaximumSustainableRate(double currentRate) throws IOException {
long totalMessagesReceived = stats.messagesReceived;
long messagesPublishedInPeriod = totalMessagesSent - localTotalMessagesSentCounter;
long messagesReceivedInPeriod = totalMessagesReceived - localTotalMessagesReceivedCounter;
- double publishRateInLastPeriod = messagesPublishedInPeriod / (double) (currentTime - lastControlTimestamp)
- * TimeUnit.SECONDS.toNanos(1);
- double receiveRateInLastPeriod = messagesReceivedInPeriod / (double) (currentTime - lastControlTimestamp)
- * TimeUnit.SECONDS.toNanos(1);
+ double publishRateInLastPeriod =
+ messagesPublishedInPeriod
+ / (double) (currentTime - lastControlTimestamp)
+ * TimeUnit.SECONDS.toNanos(1);
+ double receiveRateInLastPeriod =
+ messagesReceivedInPeriod
+ / (double) (currentTime - lastControlTimestamp)
+ * TimeUnit.SECONDS.toNanos(1);
if (log.isDebugEnabled()) {
log.debug(
"total-send: {} -- total-received: {} -- int-sent: {} -- int-received: {} -- sent-rate: {} -- received-rate: {}",
- totalMessagesSent, totalMessagesReceived, messagesPublishedInPeriod, messagesReceivedInPeriod,
- publishRateInLastPeriod, receiveRateInLastPeriod);
+ totalMessagesSent,
+ totalMessagesReceived,
+ messagesPublishedInPeriod,
+ messagesReceivedInPeriod,
+ publishRateInLastPeriod,
+ receiveRateInLastPeriod);
}
localTotalMessagesSentCounter = totalMessagesSent;
@@ -229,9 +245,13 @@ private void findMaximumSustainableRate(double currentRate) throws IOException {
lastControlTimestamp = currentTime;
if (log.isDebugEnabled()) {
- log.debug("Current rate: {} -- Publish rate {} -- Consume Rate: {} -- min-rate: {} -- max-rate: {}",
- dec.format(currentRate), dec.format(publishRateInLastPeriod),
- dec.format(receiveRateInLastPeriod), dec.format(minRate), dec.format(maxRate));
+ log.debug(
+ "Current rate: {} -- Publish rate {} -- Consume Rate: {} -- min-rate: {} -- max-rate: {}",
+ dec.format(currentRate),
+ dec.format(publishRateInLastPeriod),
+ dec.format(receiveRateInLastPeriod),
+ dec.format(minRate),
+ dec.format(maxRate));
}
if (publishRateInLastPeriod < currentRate * 0.95) {
@@ -250,7 +270,8 @@ private void findMaximumSustainableRate(double currentRate) throws IOException {
worker.adjustPublishRate(minRate / 10);
while (true) {
stats = worker.getCountersStats();
- long backlog = workload.subscriptionsPerTopic * stats.messagesSent - stats.messagesReceived;
+ long backlog =
+ workload.subscriptionsPerTopic * stats.messagesSent - stats.messagesReceived;
if (backlog < 1000) {
break;
}
@@ -299,12 +320,13 @@ public void close() throws Exception {
private void createConsumers(List topics) throws IOException {
ConsumerAssignment consumerAssignment = new ConsumerAssignment();
- for(String topic: topics){
- for(int i = 0; i < workload.subscriptionsPerTopic; i++){
- String subscriptionName = String.format("sub-%03d-%s", i, RandomGenerator.getRandomString());
+ for (String topic : topics) {
+ for (int i = 0; i < workload.subscriptionsPerTopic; i++) {
+ String subscriptionName =
+ String.format("sub-%03d-%s", i, RandomGenerator.getRandomString());
for (int j = 0; j < workload.consumerPerSubscription; j++) {
- consumerAssignment.topicsSubscriptions
- .add(new TopicSubscription(topic, subscriptionName));
+ consumerAssignment.topicsSubscriptions.add(
+ new TopicSubscription(topic, subscriptionName));
}
}
}
@@ -314,7 +336,10 @@ private void createConsumers(List topics) throws IOException {
Timer timer = new Timer();
worker.createConsumers(consumerAssignment);
- log.info("Created {} consumers in {} ms", consumerAssignment.topicsSubscriptions.size(), timer.elapsedMillis());
+ log.info(
+ "Created {} consumers in {} ms",
+ consumerAssignment.topicsSubscriptions.size(),
+ timer.elapsedMillis());
}
private void createProducers(List topics) throws IOException {
@@ -344,8 +369,9 @@ private void buildAndDrainBacklog(List topics) throws IOException {
while (true) {
CountersStats stats = worker.getCountersStats();
- long currentBacklogSize = (workload.subscriptionsPerTopic * stats.messagesSent - stats.messagesReceived)
- * workload.messageSize;
+ long currentBacklogSize =
+ (workload.subscriptionsPerTopic * stats.messagesSent - stats.messagesReceived)
+ * workload.messageSize;
if (currentBacklogSize >= requestedBacklogSize) {
break;
@@ -368,7 +394,8 @@ private void buildAndDrainBacklog(List topics) throws IOException {
while (true) {
CountersStats stats = worker.getCountersStats();
- long currentBacklog = workload.subscriptionsPerTopic * stats.messagesSent - stats.messagesReceived;
+ long currentBacklog =
+ workload.subscriptionsPerTopic * stats.messagesSent - stats.messagesReceived;
if (currentBacklog <= minBacklog) {
log.info("--- Completed backlog draining in {} s ---", timer.elapsedSeconds());
needToWaitForBacklogDraining = false;
@@ -383,6 +410,7 @@ private void buildAndDrainBacklog(List topics) throws IOException {
}
}
+ @SuppressWarnings({"checkstyle:LineLength", "checkstyle:MethodLength"})
private TestResult printAndCollectStats(long testDurations, TimeUnit unit) throws IOException {
long startTime = System.nanoTime();
@@ -419,14 +447,19 @@ private TestResult printAndCollectStats(long testDurations, TimeUnit unit) throw
double consumeRate = stats.messagesReceived / elapsed;
double consumeThroughput = stats.bytesReceived / elapsed / 1024 / 1024;
- long currentBacklog = Math.max(0L, workload.subscriptionsPerTopic * stats.totalMessagesSent
- - stats.totalMessagesReceived);
+ long currentBacklog =
+ Math.max(
+ 0L,
+ workload.subscriptionsPerTopic * stats.totalMessagesSent
+ - stats.totalMessagesReceived);
log.info(
"Pub rate {} msg/s / {} MB/s | Pub err {} err/s | Cons rate {} msg/s / {} MB/s | Backlog: {} K | Pub Latency (ms) avg: {} - 50%: {} - 99%: {} - 99.9%: {} - Max: {} | Pub Delay Latency (us) avg: {} - 50%: {} - 99%: {} - 99.9%: {} - Max: {}",
- rateFormat.format(publishRate), throughputFormat.format(publishThroughput),
+ rateFormat.format(publishRate),
+ throughputFormat.format(publishThroughput),
rateFormat.format(errorRate),
- rateFormat.format(consumeRate), throughputFormat.format(consumeThroughput),
+ rateFormat.format(consumeRate),
+ throughputFormat.format(consumeThroughput),
dec.format(currentBacklog / 1000.0), //
dec.format(microsToMillis(stats.publishLatency.getMean())),
dec.format(microsToMillis(stats.publishLatency.getValueAtPercentile(50))),
@@ -448,8 +481,10 @@ private TestResult printAndCollectStats(long testDurations, TimeUnit unit) throw
result.publishLatency75pct.add(microsToMillis(stats.publishLatency.getValueAtPercentile(75)));
result.publishLatency95pct.add(microsToMillis(stats.publishLatency.getValueAtPercentile(95)));
result.publishLatency99pct.add(microsToMillis(stats.publishLatency.getValueAtPercentile(99)));
- result.publishLatency999pct.add(microsToMillis(stats.publishLatency.getValueAtPercentile(99.9)));
- result.publishLatency9999pct.add(microsToMillis(stats.publishLatency.getValueAtPercentile(99.99)));
+ result.publishLatency999pct.add(
+ microsToMillis(stats.publishLatency.getValueAtPercentile(99.9)));
+ result.publishLatency9999pct.add(
+ microsToMillis(stats.publishLatency.getValueAtPercentile(99.99)));
result.publishLatencyMax.add(microsToMillis(stats.publishLatency.getMaxValue()));
result.publishDelayLatencyAvg.add(stats.publishDelayLatency.getMean());
@@ -461,14 +496,19 @@ private TestResult printAndCollectStats(long testDurations, TimeUnit unit) throw
result.publishDelayLatency9999pct.add(stats.publishDelayLatency.getValueAtPercentile(99.99));
result.publishDelayLatencyMax.add(stats.publishDelayLatency.getMaxValue());
-
result.endToEndLatencyAvg.add(microsToMillis(stats.endToEndLatency.getMean()));
- result.endToEndLatency50pct.add(microsToMillis(stats.endToEndLatency.getValueAtPercentile(50)));
- result.endToEndLatency75pct.add(microsToMillis(stats.endToEndLatency.getValueAtPercentile(75)));
- result.endToEndLatency95pct.add(microsToMillis(stats.endToEndLatency.getValueAtPercentile(95)));
- result.endToEndLatency99pct.add(microsToMillis(stats.endToEndLatency.getValueAtPercentile(99)));
- result.endToEndLatency999pct.add(microsToMillis(stats.endToEndLatency.getValueAtPercentile(99.9)));
- result.endToEndLatency9999pct.add(microsToMillis(stats.endToEndLatency.getValueAtPercentile(99.99)));
+ result.endToEndLatency50pct.add(
+ microsToMillis(stats.endToEndLatency.getValueAtPercentile(50)));
+ result.endToEndLatency75pct.add(
+ microsToMillis(stats.endToEndLatency.getValueAtPercentile(75)));
+ result.endToEndLatency95pct.add(
+ microsToMillis(stats.endToEndLatency.getValueAtPercentile(95)));
+ result.endToEndLatency99pct.add(
+ microsToMillis(stats.endToEndLatency.getValueAtPercentile(99)));
+ result.endToEndLatency999pct.add(
+ microsToMillis(stats.endToEndLatency.getValueAtPercentile(99.9)));
+ result.endToEndLatency9999pct.add(
+ microsToMillis(stats.endToEndLatency.getValueAtPercentile(99.99)));
result.endToEndLatencyMax.add(microsToMillis(stats.endToEndLatency.getMaxValue()));
if (now >= testEndTime && !needToWaitForBacklogDraining) {
@@ -495,42 +535,65 @@ private TestResult printAndCollectStats(long testDurations, TimeUnit unit) throw
result.aggregatedPublishLatency75pct = agg.publishLatency.getValueAtPercentile(75) / 1000.0;
result.aggregatedPublishLatency95pct = agg.publishLatency.getValueAtPercentile(95) / 1000.0;
result.aggregatedPublishLatency99pct = agg.publishLatency.getValueAtPercentile(99) / 1000.0;
- result.aggregatedPublishLatency999pct = agg.publishLatency.getValueAtPercentile(99.9) / 1000.0;
- result.aggregatedPublishLatency9999pct = agg.publishLatency.getValueAtPercentile(99.99) / 1000.0;
+ result.aggregatedPublishLatency999pct =
+ agg.publishLatency.getValueAtPercentile(99.9) / 1000.0;
+ result.aggregatedPublishLatency9999pct =
+ agg.publishLatency.getValueAtPercentile(99.99) / 1000.0;
result.aggregatedPublishLatencyMax = agg.publishLatency.getMaxValue() / 1000.0;
result.aggregatedPublishDelayLatencyAvg = agg.publishDelayLatency.getMean();
- result.aggregatedPublishDelayLatency50pct = agg.publishDelayLatency.getValueAtPercentile(50);
- result.aggregatedPublishDelayLatency75pct = agg.publishDelayLatency.getValueAtPercentile(75);
- result.aggregatedPublishDelayLatency95pct = agg.publishDelayLatency.getValueAtPercentile(95);
- result.aggregatedPublishDelayLatency99pct = agg.publishDelayLatency.getValueAtPercentile(99);
- result.aggregatedPublishDelayLatency999pct = agg.publishDelayLatency.getValueAtPercentile(99.9);
- result.aggregatedPublishDelayLatency9999pct = agg.publishDelayLatency.getValueAtPercentile(99.99);
+ result.aggregatedPublishDelayLatency50pct =
+ agg.publishDelayLatency.getValueAtPercentile(50);
+ result.aggregatedPublishDelayLatency75pct =
+ agg.publishDelayLatency.getValueAtPercentile(75);
+ result.aggregatedPublishDelayLatency95pct =
+ agg.publishDelayLatency.getValueAtPercentile(95);
+ result.aggregatedPublishDelayLatency99pct =
+ agg.publishDelayLatency.getValueAtPercentile(99);
+ result.aggregatedPublishDelayLatency999pct =
+ agg.publishDelayLatency.getValueAtPercentile(99.9);
+ result.aggregatedPublishDelayLatency9999pct =
+ agg.publishDelayLatency.getValueAtPercentile(99.99);
result.aggregatedPublishDelayLatencyMax = agg.publishDelayLatency.getMaxValue();
- result.aggregatedEndToEndLatencyAvg = agg.endToEndLatency.getMean() / 1000.0;
- result.aggregatedEndToEndLatency50pct = agg.endToEndLatency.getValueAtPercentile(50) / 1000.0;
- result.aggregatedEndToEndLatency75pct = agg.endToEndLatency.getValueAtPercentile(75) / 1000.0;
- result.aggregatedEndToEndLatency95pct = agg.endToEndLatency.getValueAtPercentile(95) / 1000.0;
- result.aggregatedEndToEndLatency99pct = agg.endToEndLatency.getValueAtPercentile(99) / 1000.0;
- result.aggregatedEndToEndLatency999pct = agg.endToEndLatency.getValueAtPercentile(99.9) / 1000.0;
- result.aggregatedEndToEndLatency9999pct = agg.endToEndLatency.getValueAtPercentile(99.99) / 1000.0;
- result.aggregatedEndToEndLatencyMax = agg.endToEndLatency.getMaxValue() / 1000.0;
-
- agg.publishLatency.percentiles(100).forEach(value -> {
- result.aggregatedPublishLatencyQuantiles.put(value.getPercentile(),
- value.getValueIteratedTo() / 1000.0);
- });
-
- agg.publishDelayLatency.percentiles(100).forEach(value -> {
- result.aggregatedPublishDelayLatencyQuantiles.put(value.getPercentile(),
- value.getValueIteratedTo());
- });
-
- agg.endToEndLatency.percentiles(100).forEach(value -> {
- result.aggregatedEndToEndLatencyQuantiles.put(value.getPercentile(),
- microsToMillis(value.getValueIteratedTo()));
- });
+ result.aggregatedEndToEndLatencyAvg = agg.endToEndLatency.getMean() / 1000.0;
+ result.aggregatedEndToEndLatency50pct =
+ agg.endToEndLatency.getValueAtPercentile(50) / 1000.0;
+ result.aggregatedEndToEndLatency75pct =
+ agg.endToEndLatency.getValueAtPercentile(75) / 1000.0;
+ result.aggregatedEndToEndLatency95pct =
+ agg.endToEndLatency.getValueAtPercentile(95) / 1000.0;
+ result.aggregatedEndToEndLatency99pct =
+ agg.endToEndLatency.getValueAtPercentile(99) / 1000.0;
+ result.aggregatedEndToEndLatency999pct =
+ agg.endToEndLatency.getValueAtPercentile(99.9) / 1000.0;
+ result.aggregatedEndToEndLatency9999pct =
+ agg.endToEndLatency.getValueAtPercentile(99.99) / 1000.0;
+ result.aggregatedEndToEndLatencyMax = agg.endToEndLatency.getMaxValue() / 1000.0;
+
+ agg.publishLatency
+ .percentiles(100)
+ .forEach(
+ value -> {
+ result.aggregatedPublishLatencyQuantiles.put(
+ value.getPercentile(), value.getValueIteratedTo() / 1000.0);
+ });
+
+ agg.publishDelayLatency
+ .percentiles(100)
+ .forEach(
+ value -> {
+ result.aggregatedPublishDelayLatencyQuantiles.put(
+ value.getPercentile(), value.getValueIteratedTo());
+ });
+
+ agg.endToEndLatency
+ .percentiles(100)
+ .forEach(
+ value -> {
+ result.aggregatedEndToEndLatencyQuantiles.put(
+ value.getPercentile(), microsToMillis(value.getValueIteratedTo()));
+ });
break;
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/ListPartition.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/ListPartition.java
index 2af6cfca9..01a1ae51e 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/ListPartition.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/ListPartition.java
@@ -13,18 +13,19 @@
*/
package io.openmessaging.benchmark.utils;
+
import java.util.ArrayList;
import java.util.List;
public class ListPartition {
/**
- * partition a list to specified size
+ * partition a list to specified size.
*
* @param originList
* @param size
* @param
- * @return
+ * @return the partitioned list
*/
public static List> partitionList(List originList, int size) {
@@ -55,5 +56,4 @@ public static List> partitionList(List originList, int size) {
}
return resultList;
}
-
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/PaddingDecimalFormat.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/PaddingDecimalFormat.java
index 1d9d91f18..a598ddb2a 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/PaddingDecimalFormat.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/PaddingDecimalFormat.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.utils;
+
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
import java.text.FieldPosition;
@@ -22,8 +23,11 @@ public class PaddingDecimalFormat extends DecimalFormat {
private int minimumLength;
/**
- * Creates a PaddingDecimalFormat using the given pattern and minimum minimumLength and the symbols for the default
- * locale.
+ * Creates a PaddingDecimalFormat using the given pattern and minimum minimumLength and the
+ * symbols for the default locale.
+ *
+ * @param pattern
+ * @param minLength
*/
public PaddingDecimalFormat(String pattern, int minLength) {
super(pattern);
@@ -32,6 +36,10 @@ public PaddingDecimalFormat(String pattern, int minLength) {
/**
* Creates a PaddingDecimalFormat using the given pattern, symbols and minimum minimumLength.
+ *
+ * @param pattern
+ * @param symbols
+ * @param minLength
*/
public PaddingDecimalFormat(String pattern, DecimalFormatSymbols symbols, int minLength) {
super(pattern, symbols);
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/RandomGenerator.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/RandomGenerator.java
index 2eba74b9e..13c4f501c 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/RandomGenerator.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/RandomGenerator.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.utils;
+
import com.google.common.io.BaseEncoding;
import java.util.Random;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/Timer.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/Timer.java
index 04144d679..11a49de44 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/Timer.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/Timer.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.utils;
+
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/UniformRateLimiter.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/UniformRateLimiter.java
index 187cbf9c2..75ae74797 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/UniformRateLimiter.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/UniformRateLimiter.java
@@ -13,7 +13,7 @@
*/
package io.openmessaging.benchmark.utils;
-import java.time.Clock;
+
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLongFieldUpdater;
import java.util.concurrent.locks.LockSupport;
@@ -46,8 +46,8 @@ public final class UniformRateLimiter {
this.opsPerSec = opsPerSec;
intervalNs = Math.round(ONE_SEC_IN_NS / opsPerSec);
this.nanoClock = nanoClock;
-
}
+
public UniformRateLimiter(final double opsPerSec) {
this(opsPerSec, () -> System.nanoTime());
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributor.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributor.java
index 7cf4d240f..14576c985 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributor.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributor.java
@@ -26,8 +26,8 @@
*/
package io.openmessaging.benchmark.utils.distributor;
-import com.google.common.io.BaseEncoding;
+import com.google.common.io.BaseEncoding;
import java.util.Random;
public abstract class KeyDistributor {
@@ -69,8 +69,9 @@ public static KeyDistributor build(KeyDistributorType keyType) {
case RANDOM_NANO:
keyDistributor = new RandomNano();
break;
+ default:
+ throw new IllegalStateException("Unexpected KeyDistributorType: " + keyType);
}
return keyDistributor;
}
-
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributorType.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributorType.java
index e69d41d75..3cb1cf1c0 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributorType.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyDistributorType.java
@@ -26,22 +26,17 @@
*/
package io.openmessaging.benchmark.utils.distributor;
+
import com.fasterxml.jackson.annotation.JsonEnumDefaultValue;
public enum KeyDistributorType {
+ /** Key distributor that returns null keys to have default publish semantics. */
@JsonEnumDefaultValue
- /**
- * Key distributor that returns null keys to have default publish semantics
- */
NO_KEY,
- /**
- * Genarate a finite number of "keys" and cycle through them in round-robin fashion
- */
+ /** Genarate a finite number of "keys" and cycle through them in round-robin fashion. */
KEY_ROUND_ROBIN,
- /**
- * Random distribution based on System.nanoTime()
- */
+ /** Random distribution based on System.nanoTime(). */
RANDOM_NANO,
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyRoundRobin.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyRoundRobin.java
index 0a3182221..bb600e2ca 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyRoundRobin.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/KeyRoundRobin.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.utils.distributor;
+
import javax.annotation.concurrent.NotThreadSafe;
@NotThreadSafe
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/RandomNano.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/RandomNano.java
index f7265c140..cedbf1b1a 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/RandomNano.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/distributor/RandomNano.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.utils.distributor;
+
import javax.annotation.concurrent.ThreadSafe;
@ThreadSafe
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/payload/FilePayloadReader.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/payload/FilePayloadReader.java
index 27cd9463f..2fe60e052 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/payload/FilePayloadReader.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/utils/payload/FilePayloadReader.java
@@ -13,12 +13,12 @@
*/
package io.openmessaging.benchmark.utils.payload;
+import static java.nio.file.Files.readAllBytes;
+
import java.io.File;
import java.io.IOException;
import java.text.MessageFormat;
-import static java.nio.file.Files.readAllBytes;
-
public class FilePayloadReader implements PayloadReader {
private final int expectedLength;
@@ -41,8 +41,10 @@ public byte[] load(String resourceName) {
private void checkPayloadLength(byte[] payload) {
if (expectedLength != payload.length) {
- throw new PayloadException(MessageFormat.format("Payload length mismatch. Actual is: {0}, but expected: {1} ",
- payload.length, expectedLength));
+ throw new PayloadException(
+ MessageFormat.format(
+ "Payload length mismatch. Actual is: {0}, but expected: {1} ",
+ payload.length, expectedLength));
}
}
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/BenchmarkWorker.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/BenchmarkWorker.java
index d6f9d52fa..8723384cf 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/BenchmarkWorker.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/BenchmarkWorker.java
@@ -13,36 +13,40 @@
*/
package io.openmessaging.benchmark.worker;
-import org.apache.bookkeeper.stats.Stats;
-import org.apache.bookkeeper.stats.StatsProvider;
-import org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider;
-import org.apache.commons.configuration.CompositeConfiguration;
-import org.apache.commons.configuration.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
-
import io.javalin.Javalin;
+import org.apache.bookkeeper.stats.Stats;
+import org.apache.bookkeeper.stats.StatsProvider;
+import org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider;
+import org.apache.commons.configuration.CompositeConfiguration;
+import org.apache.commons.configuration.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
-/**
- * A benchmark worker that listen for tasks to perform
- */
+/** A benchmark worker that listen for tasks to perform. */
public class BenchmarkWorker {
static class Arguments {
- @Parameter(names = { "-h", "--help" }, description = "Help message", help = true)
+ @Parameter(
+ names = {"-h", "--help"},
+ description = "Help message",
+ help = true)
boolean help;
- @Parameter(names = { "-p", "--port" }, description = "HTTP port to listen on")
+ @Parameter(
+ names = {"-p", "--port"},
+ description = "HTTP port to listen on")
public int httpPort = 8080;
- @Parameter(names = { "-sp", "--stats-port" }, description = "Stats port to listen on")
+ @Parameter(
+ names = {"-sp", "--stats-port"},
+ description = "Stats port to listen on")
public int statsPort = 8081;
}
@@ -71,9 +75,8 @@ public static void main(String[] args) throws Exception {
StatsProvider provider = Stats.get();
provider.start(conf);
- Runtime.getRuntime().addShutdownHook(new Thread(
- () -> provider.stop(),
- "benchmark-worker-shutdown-thread"));
+ Runtime.getRuntime()
+ .addShutdownHook(new Thread(() -> provider.stop(), "benchmark-worker-shutdown-thread"));
// Dump configuration variables
log.info("Starting benchmark with config: {}", writer.writeValueAsString(arguments));
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/DistributedWorkersEnsemble.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/DistributedWorkersEnsemble.java
index ad34ac808..a27628851 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/DistributedWorkersEnsemble.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/DistributedWorkersEnsemble.java
@@ -17,6 +17,22 @@
import static java.util.stream.Collectors.toList;
import static org.asynchttpclient.Dsl.asyncHttpClient;
+import com.beust.jcommander.internal.Maps;
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import io.netty.buffer.ByteBufUtil;
+import io.netty.buffer.Unpooled;
+import io.openmessaging.benchmark.utils.ListPartition;
+import io.openmessaging.benchmark.worker.commands.ConsumerAssignment;
+import io.openmessaging.benchmark.worker.commands.CountersStats;
+import io.openmessaging.benchmark.worker.commands.CumulativeLatencies;
+import io.openmessaging.benchmark.worker.commands.PeriodStats;
+import io.openmessaging.benchmark.worker.commands.ProducerWorkAssignment;
+import io.openmessaging.benchmark.worker.commands.TopicSubscription;
+import io.openmessaging.benchmark.worker.commands.TopicsInfo;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -28,7 +44,6 @@
import java.util.concurrent.TimeUnit;
import java.util.stream.Stream;
import java.util.zip.DataFormatException;
-
import org.HdrHistogram.Histogram;
import org.asynchttpclient.AsyncHttpClient;
import org.asynchttpclient.DefaultAsyncHttpClientConfig;
@@ -36,24 +51,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.beust.jcommander.internal.Maps;
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-import io.netty.buffer.ByteBufUtil;
-import io.netty.buffer.Unpooled;
-import io.openmessaging.benchmark.utils.ListPartition;
-import io.openmessaging.benchmark.worker.commands.ConsumerAssignment;
-import io.openmessaging.benchmark.worker.commands.CountersStats;
-import io.openmessaging.benchmark.worker.commands.CumulativeLatencies;
-import io.openmessaging.benchmark.worker.commands.PeriodStats;
-import io.openmessaging.benchmark.worker.commands.ProducerWorkAssignment;
-import io.openmessaging.benchmark.worker.commands.TopicSubscription;
-import io.openmessaging.benchmark.worker.commands.TopicsInfo;
-
public class DistributedWorkersEnsemble implements Worker {
private final Thread shutdownHook = new Thread(this::stopAll);
private final List workers;
@@ -66,16 +63,17 @@ public class DistributedWorkersEnsemble implements Worker {
public DistributedWorkersEnsemble(List workers, boolean extraConsumerWorkers) {
Preconditions.checkArgument(workers.size() > 1);
- DefaultAsyncHttpClientConfig.Builder clientBuilder = Dsl.config()
- .setReadTimeout(600000)
- .setRequestTimeout(600000);
+ DefaultAsyncHttpClientConfig.Builder clientBuilder =
+ Dsl.config().setReadTimeout(600000).setRequestTimeout(600000);
httpClient = asyncHttpClient(clientBuilder);
this.workers = unmodifiableList(workers);
// For driver-jms extra consumers are required.
// If there is an odd number of workers then allocate the extra to consumption.
- int numberOfProducerWorkers = extraConsumerWorkers ? (workers.size() + 2) / 3 : workers.size() / 2;
- List> partitions = Lists.partition(Lists.reverse(workers), workers.size() - numberOfProducerWorkers);
+ int numberOfProducerWorkers =
+ extraConsumerWorkers ? (workers.size() + 2) / 3 : workers.size() / 2;
+ List> partitions =
+ Lists.partition(Lists.reverse(workers), workers.size() - numberOfProducerWorkers);
this.producerWorkers = partitions.get(1);
this.consumerWorkers = partitions.get(0);
@@ -95,14 +93,15 @@ public void initializeDriver(File configurationFile) throws IOException {
@SuppressWarnings("unchecked")
public List createTopics(TopicsInfo topicsInfo) throws IOException {
// Create all topics from a single worker node
- return (List) post(workers.get(0), "/create-topics", writer.writeValueAsBytes(topicsInfo), List.class)
- .join();
+ return (List)
+ post(workers.get(0), "/create-topics", writer.writeValueAsBytes(topicsInfo), List.class)
+ .join();
}
@Override
public void createProducers(List topics) {
- List> topicsPerProducer = ListPartition.partitionList(topics,
- producerWorkers.size());
+ List> topicsPerProducer =
+ ListPartition.partitionList(topics, producerWorkers.size());
Map> topicsPerProducerMap = Maps.newHashMap();
int i = 0;
for (List assignedTopics : topicsPerProducer) {
@@ -110,19 +109,27 @@ public void createProducers(List topics) {
}
// Number of actually used workers might be less than available workers
- numberOfUsedProducerWorkers = (int) topicsPerProducerMap.values().stream().filter(t -> !t.isEmpty()).count();
- log.debug("Producing worker count: {} of {}", numberOfUsedProducerWorkers, producerWorkers.size());
-
- CompletableFuture[] futures = topicsPerProducerMap.keySet().stream().map(producer -> {
- try {
- return sendPost(producer, "/create-producers",
- writer.writeValueAsBytes(topicsPerProducerMap.get(producer)));
- } catch (Exception e) {
- CompletableFuture future = new CompletableFuture<>();
- future.completeExceptionally(e);
- return future;
- }
- }).toArray(this::newArray);
+ numberOfUsedProducerWorkers =
+ (int) topicsPerProducerMap.values().stream().filter(t -> !t.isEmpty()).count();
+ log.debug(
+ "Producing worker count: {} of {}", numberOfUsedProducerWorkers, producerWorkers.size());
+
+ CompletableFuture[] futures =
+ topicsPerProducerMap.keySet().stream()
+ .map(
+ producer -> {
+ try {
+ return sendPost(
+ producer,
+ "/create-producers",
+ writer.writeValueAsBytes(topicsPerProducerMap.get(producer)));
+ } catch (Exception e) {
+ CompletableFuture future = new CompletableFuture<>();
+ future.completeExceptionally(e);
+ return future;
+ }
+ })
+ .toArray(this::newArray);
CompletableFuture.allOf(futures).join();
}
@@ -131,7 +138,8 @@ public void createProducers(List topics) {
public void startLoad(ProducerWorkAssignment producerWorkAssignment) throws IOException {
// Reduce the publish rate across all the brokers
producerWorkAssignment.publishRate /= numberOfUsedProducerWorkers;
- log.debug("Setting worker assigned publish rate to {} msgs/sec", producerWorkAssignment.publishRate);
+ log.debug(
+ "Setting worker assigned publish rate to {} msgs/sec", producerWorkAssignment.publishRate);
sendPost(producerWorkers, "/start-load", writer.writeValueAsBytes(producerWorkAssignment));
}
@@ -165,9 +173,9 @@ public void resumeConsumers() throws IOException {
@Override
public void createConsumers(ConsumerAssignment overallConsumerAssignment) {
- List> subscriptionsPerConsumer = ListPartition.partitionList(
- overallConsumerAssignment.topicsSubscriptions,
- consumerWorkers.size());
+ List> subscriptionsPerConsumer =
+ ListPartition.partitionList(
+ overallConsumerAssignment.topicsSubscriptions, consumerWorkers.size());
Map topicsPerWorkerMap = Maps.newHashMap();
int i = 0;
for (List tsl : subscriptionsPerConsumer) {
@@ -176,16 +184,22 @@ public void createConsumers(ConsumerAssignment overallConsumerAssignment) {
topicsPerWorkerMap.put(consumerWorkers.get(i++), individualAssignement);
}
- CompletableFuture[] futures = topicsPerWorkerMap.keySet().stream().map(consumer -> {
- try {
- return sendPost(consumer, "/create-consumers",
- writer.writeValueAsBytes(topicsPerWorkerMap.get(consumer)));
- } catch (Exception e) {
- CompletableFuture future = new CompletableFuture<>();
- future.completeExceptionally(e);
- return future;
- }
- }).toArray(this::newArray);
+ CompletableFuture[] futures =
+ topicsPerWorkerMap.keySet().stream()
+ .map(
+ consumer -> {
+ try {
+ return sendPost(
+ consumer,
+ "/create-consumers",
+ writer.writeValueAsBytes(topicsPerWorkerMap.get(consumer)));
+ } catch (Exception e) {
+ CompletableFuture future = new CompletableFuture<>();
+ future.completeExceptionally(e);
+ return future;
+ }
+ })
+ .toArray(this::newArray);
CompletableFuture.allOf(futures).join();
}
@@ -194,69 +208,80 @@ public void createConsumers(ConsumerAssignment overallConsumerAssignment) {
public PeriodStats getPeriodStats() {
List individualStats = get(workers, "/period-stats", PeriodStats.class);
PeriodStats stats = new PeriodStats();
- individualStats.forEach(is -> {
- stats.messagesSent += is.messagesSent;
- stats.messageSendErrors += is.messageSendErrors;
- stats.bytesSent += is.bytesSent;
- stats.messagesReceived += is.messagesReceived;
- stats.bytesReceived += is.bytesReceived;
- stats.totalMessagesSent += is.totalMessagesSent;
- stats.totalMessageSendErrors += is.totalMessageSendErrors;
- stats.totalMessagesReceived += is.totalMessagesReceived;
-
- try {
- stats.publishLatency.add(Histogram.decodeFromCompressedByteBuffer(
- ByteBuffer.wrap(is.publishLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
-
- stats.publishDelayLatency.add(Histogram.decodeFromCompressedByteBuffer(
- ByteBuffer.wrap(is.publishDelayLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
-
- stats.endToEndLatency.add(Histogram.decodeFromCompressedByteBuffer(
- ByteBuffer.wrap(is.endToEndLatencyBytes), TimeUnit.HOURS.toMicros(12)));
- } catch (ArrayIndexOutOfBoundsException | DataFormatException e) {
- throw new RuntimeException(e);
- }
- });
+ individualStats.forEach(
+ is -> {
+ stats.messagesSent += is.messagesSent;
+ stats.messageSendErrors += is.messageSendErrors;
+ stats.bytesSent += is.bytesSent;
+ stats.messagesReceived += is.messagesReceived;
+ stats.bytesReceived += is.bytesReceived;
+ stats.totalMessagesSent += is.totalMessagesSent;
+ stats.totalMessageSendErrors += is.totalMessageSendErrors;
+ stats.totalMessagesReceived += is.totalMessagesReceived;
+
+ try {
+ stats.publishLatency.add(
+ Histogram.decodeFromCompressedByteBuffer(
+ ByteBuffer.wrap(is.publishLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
+
+ stats.publishDelayLatency.add(
+ Histogram.decodeFromCompressedByteBuffer(
+ ByteBuffer.wrap(is.publishDelayLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
+
+ stats.endToEndLatency.add(
+ Histogram.decodeFromCompressedByteBuffer(
+ ByteBuffer.wrap(is.endToEndLatencyBytes), TimeUnit.HOURS.toMicros(12)));
+ } catch (ArrayIndexOutOfBoundsException | DataFormatException e) {
+ throw new RuntimeException(e);
+ }
+ });
return stats;
}
@Override
public CumulativeLatencies getCumulativeLatencies() {
- List individualStats = get(workers, "/cumulative-latencies", CumulativeLatencies.class);
+ List individualStats =
+ get(workers, "/cumulative-latencies", CumulativeLatencies.class);
CumulativeLatencies stats = new CumulativeLatencies();
- individualStats.forEach(is -> {
- try {
- stats.publishLatency.add(Histogram.decodeFromCompressedByteBuffer(
- ByteBuffer.wrap(is.publishLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
- } catch (Exception e) {
- log.error("Failed to decode publish latency: {}",
- ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(is.publishLatencyBytes)));
- throw new RuntimeException(e);
- }
-
- try {
- stats.publishDelayLatency.add(Histogram.decodeFromCompressedByteBuffer(
- ByteBuffer.wrap(is.publishDelayLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
- } catch (Exception e) {
- log.error("Failed to decode publish delay latency: {}",
- ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(is.publishDelayLatencyBytes)));
- throw new RuntimeException(e);
- }
-
- try {
- stats.endToEndLatency.add(Histogram.decodeFromCompressedByteBuffer(
- ByteBuffer.wrap(is.endToEndLatencyBytes), TimeUnit.HOURS.toMicros(12)));
- } catch (Exception e) {
- log.error("Failed to decode end-to-end latency: {}",
- ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(is.endToEndLatencyBytes)));
- throw new RuntimeException(e);
- }
- });
+ individualStats.forEach(
+ is -> {
+ try {
+ stats.publishLatency.add(
+ Histogram.decodeFromCompressedByteBuffer(
+ ByteBuffer.wrap(is.publishLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
+ } catch (Exception e) {
+ log.error(
+ "Failed to decode publish latency: {}",
+ ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(is.publishLatencyBytes)));
+ throw new RuntimeException(e);
+ }
+
+ try {
+ stats.publishDelayLatency.add(
+ Histogram.decodeFromCompressedByteBuffer(
+ ByteBuffer.wrap(is.publishDelayLatencyBytes), TimeUnit.SECONDS.toMicros(30)));
+ } catch (Exception e) {
+ log.error(
+ "Failed to decode publish delay latency: {}",
+ ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(is.publishDelayLatencyBytes)));
+ throw new RuntimeException(e);
+ }
+
+ try {
+ stats.endToEndLatency.add(
+ Histogram.decodeFromCompressedByteBuffer(
+ ByteBuffer.wrap(is.endToEndLatencyBytes), TimeUnit.HOURS.toMicros(12)));
+ } catch (Exception e) {
+ log.error(
+ "Failed to decode end-to-end latency: {}",
+ ByteBufUtil.prettyHexDump(Unpooled.wrappedBuffer(is.endToEndLatencyBytes)));
+ throw new RuntimeException(e);
+ }
+ });
return stats;
-
}
@Override
@@ -264,11 +289,12 @@ public CountersStats getCountersStats() throws IOException {
List individualStats = get(workers, "/counters-stats", CountersStats.class);
CountersStats stats = new CountersStats();
- individualStats.forEach(is -> {
- stats.messagesSent += is.messagesSent;
- stats.messagesReceived += is.messagesReceived;
- stats.messageSendErrors += is.messageSendErrors;
- });
+ individualStats.forEach(
+ is -> {
+ stats.messagesSent += is.messagesSent;
+ stats.messagesReceived += is.messagesReceived;
+ stats.messageSendErrors += is.messageSendErrors;
+ });
return stats;
}
@@ -279,63 +305,103 @@ public void resetStats() throws IOException {
}
/**
- * Send a request to multiple hosts and wait for all responses
+ * Send a request to multiple hosts and wait for all responses.
+ *
+ * @param hosts
+ * @param path
+ * @param body
*/
private void sendPost(List hosts, String path, byte[] body) {
- CompletableFuture[] futures = hosts.stream().map(w -> sendPost(w, path, body)).toArray(this::newArray);
+ CompletableFuture[] futures =
+ hosts.stream().map(w -> sendPost(w, path, body)).toArray(this::newArray);
CompletableFuture.allOf(futures).join();
}
private CompletableFuture sendPost(String host, String path, byte[] body) {
- return httpClient.preparePost(host + path).setBody(body).execute().toCompletableFuture().thenApply(x -> {
- if (x.getStatusCode() != 200) {
- log.error("Failed to do HTTP post request to {}{} -- code: {}", host, path, x.getStatusCode());
- }
- Preconditions.checkArgument(x.getStatusCode() == 200);
- return (Void) null;
- });
+ return httpClient
+ .preparePost(host + path)
+ .setBody(body)
+ .execute()
+ .toCompletableFuture()
+ .thenApply(
+ x -> {
+ if (x.getStatusCode() != 200) {
+ log.error(
+ "Failed to do HTTP post request to {}{} -- code: {}",
+ host,
+ path,
+ x.getStatusCode());
+ }
+ Preconditions.checkArgument(x.getStatusCode() == 200);
+ return (Void) null;
+ });
}
private List get(List hosts, String path, Class clazz) {
- CompletableFuture[] futures = hosts.stream().map(w -> get(w, path, clazz)).toArray(this::newArray);
+ CompletableFuture[] futures =
+ hosts.stream().map(w -> get(w, path, clazz)).toArray(this::newArray);
CompletableFuture> resultFuture = new CompletableFuture<>();
- CompletableFuture.allOf(futures).thenRun(() -> {
- resultFuture.complete(Stream.of(futures).map(CompletableFuture::join).collect(toList()));
- }).exceptionally(ex -> {
- resultFuture.completeExceptionally(ex);
- return null;
- });
+ CompletableFuture.allOf(futures)
+ .thenRun(
+ () -> {
+ resultFuture.complete(
+ Stream.of(futures).map(CompletableFuture::join).collect(toList()));
+ })
+ .exceptionally(
+ ex -> {
+ resultFuture.completeExceptionally(ex);
+ return null;
+ });
return resultFuture.join();
}
private CompletableFuture get(String host, String path, Class clazz) {
- return httpClient.prepareGet(host + path).execute().toCompletableFuture().thenApply(response -> {
- try {
- if (response.getStatusCode() != 200) {
- log.error("Failed to do HTTP get request to {}{} -- code: {}", host, path, response.getStatusCode());
- }
- Preconditions.checkArgument(response.getStatusCode() == 200);
- return mapper.readValue(response.getResponseBody(), clazz);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- });
+ return httpClient
+ .prepareGet(host + path)
+ .execute()
+ .toCompletableFuture()
+ .thenApply(
+ response -> {
+ try {
+ if (response.getStatusCode() != 200) {
+ log.error(
+ "Failed to do HTTP get request to {}{} -- code: {}",
+ host,
+ path,
+ response.getStatusCode());
+ }
+ Preconditions.checkArgument(response.getStatusCode() == 200);
+ return mapper.readValue(response.getResponseBody(), clazz);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
private CompletableFuture post(String host, String path, byte[] body, Class clazz) {
- return httpClient.preparePost(host + path).setBody(body).execute().toCompletableFuture().thenApply(response -> {
- try {
- if (response.getStatusCode() != 200) {
- log.error("Failed to do HTTP post request to {}{} -- code: {}", host, path, response.getStatusCode());
- }
- Preconditions.checkArgument(response.getStatusCode() == 200);
- return mapper.readValue(response.getResponseBody(), clazz);
- } catch (IOException e) {
- throw new RuntimeException(e);
- }
- });
+ return httpClient
+ .preparePost(host + path)
+ .setBody(body)
+ .execute()
+ .toCompletableFuture()
+ .thenApply(
+ response -> {
+ try {
+ if (response.getStatusCode() != 200) {
+ log.error(
+ "Failed to do HTTP post request to {}{} -- code: {}",
+ host,
+ path,
+ response.getStatusCode());
+ }
+ Preconditions.checkArgument(response.getStatusCode() == 200);
+ return mapper.readValue(response.getResponseBody(), clazz);
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
@Override
@@ -351,13 +417,12 @@ private CompletableFuture[] newArray(int size) {
private static final ObjectWriter writer = new ObjectMapper().writerWithDefaultPrettyPrinter();
- private static final ObjectMapper mapper = new ObjectMapper()
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
mapper.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE);
}
private static final Logger log = LoggerFactory.getLogger(DistributedWorkersEnsemble.class);
-
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/LocalWorker.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/LocalWorker.java
index 925dd635a..ca5d20b69 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/LocalWorker.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/LocalWorker.java
@@ -13,45 +13,24 @@
*/
package io.openmessaging.benchmark.worker;
-import static io.openmessaging.benchmark.utils.UniformRateLimiter.*;
+import static io.openmessaging.benchmark.utils.UniformRateLimiter.uninterruptibleSleepNs;
import static java.util.stream.Collectors.toList;
-import io.openmessaging.benchmark.driver.BenchmarkDriver.TopicInfo;
-import java.io.File;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.LongAdder;
-
-import io.openmessaging.benchmark.utils.UniformRateLimiter;
-import java.util.stream.IntStream;
-import org.HdrHistogram.Recorder;
-import org.apache.bookkeeper.stats.Counter;
-import org.apache.bookkeeper.stats.NullStatsLogger;
-import org.apache.bookkeeper.stats.OpStatsLogger;
-import org.apache.bookkeeper.stats.StatsLogger;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.google.common.base.Preconditions;
-
import io.netty.util.concurrent.DefaultThreadFactory;
import io.openmessaging.benchmark.DriverConfiguration;
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.BenchmarkDriver;
+import io.openmessaging.benchmark.driver.BenchmarkDriver.TopicInfo;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
import io.openmessaging.benchmark.utils.RandomGenerator;
import io.openmessaging.benchmark.utils.Timer;
+import io.openmessaging.benchmark.utils.UniformRateLimiter;
import io.openmessaging.benchmark.utils.distributor.KeyDistributor;
import io.openmessaging.benchmark.worker.commands.ConsumerAssignment;
import io.openmessaging.benchmark.worker.commands.CountersStats;
@@ -59,6 +38,28 @@
import io.openmessaging.benchmark.worker.commands.PeriodStats;
import io.openmessaging.benchmark.worker.commands.ProducerWorkAssignment;
import io.openmessaging.benchmark.worker.commands.TopicsInfo;
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.TreeMap;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.LongAdder;
+import java.util.stream.IntStream;
+import org.HdrHistogram.Recorder;
+import org.apache.bookkeeper.stats.Counter;
+import org.apache.bookkeeper.stats.NullStatsLogger;
+import org.apache.bookkeeper.stats.OpStatsLogger;
+import org.apache.bookkeeper.stats.StatsLogger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class LocalWorker implements Worker, ConsumerCallback {
@@ -69,7 +70,8 @@ public class LocalWorker implements Worker, ConsumerCallback {
private volatile UniformRateLimiter rateLimiter = new UniformRateLimiter(1.0);
- private final ExecutorService executor = Executors.newCachedThreadPool(new DefaultThreadFactory("local-worker"));
+ private final ExecutorService executor =
+ Executors.newCachedThreadPool(new DefaultThreadFactory("local-worker"));
// stats
@@ -91,17 +93,19 @@ public class LocalWorker implements Worker, ConsumerCallback {
private final LongAdder totalMessageSendErrors = new LongAdder();
private final LongAdder totalMessagesReceived = new LongAdder();
- private final static long highestTrackableValue = TimeUnit.SECONDS.toMicros(60);
+ private static final long highestTrackableValue = TimeUnit.SECONDS.toMicros(60);
private final Recorder publishLatencyRecorder = new Recorder(highestTrackableValue, 5);
private final Recorder cumulativePublishLatencyRecorder = new Recorder(highestTrackableValue, 5);
private final OpStatsLogger publishLatencyStats;
private final Recorder publishDelayLatencyRecorder = new Recorder(highestTrackableValue, 5);
- private final Recorder cumulativePublishDelayLatencyRecorder = new Recorder(highestTrackableValue, 5);
+ private final Recorder cumulativePublishDelayLatencyRecorder =
+ new Recorder(highestTrackableValue, 5);
private final OpStatsLogger publishDelayLatencyStats;
private final Recorder endToEndLatencyRecorder = new Recorder(TimeUnit.HOURS.toMicros(12), 5);
- private final Recorder endToEndCumulativeLatencyRecorder = new Recorder(TimeUnit.HOURS.toMicros(12), 5);
+ private final Recorder endToEndCumulativeLatencyRecorder =
+ new Recorder(TimeUnit.HOURS.toMicros(12), 5);
private final OpStatsLogger endToEndLatencyStats;
private boolean testCompleted = false;
@@ -133,14 +137,19 @@ public void initializeDriver(File driverConfigFile) throws IOException {
Preconditions.checkArgument(benchmarkDriver == null);
testCompleted = false;
- DriverConfiguration driverConfiguration = mapper.readValue(driverConfigFile, DriverConfiguration.class);
+ DriverConfiguration driverConfiguration =
+ mapper.readValue(driverConfigFile, DriverConfiguration.class);
log.info("Driver: {}", writer.writeValueAsString(driverConfiguration));
try {
- benchmarkDriver = (BenchmarkDriver) Class.forName(driverConfiguration.driverClass).newInstance();
+ benchmarkDriver =
+ (BenchmarkDriver) Class.forName(driverConfiguration.driverClass).newInstance();
benchmarkDriver.initialize(driverConfigFile, statsLogger);
- } catch (InstantiationException | IllegalAccessException | ClassNotFoundException | InterruptedException e) {
+ } catch (InstantiationException
+ | IllegalAccessException
+ | ClassNotFoundException
+ | InterruptedException e) {
throw new RuntimeException(e);
}
}
@@ -149,9 +158,11 @@ public void initializeDriver(File driverConfigFile) throws IOException {
public List createTopics(TopicsInfo topicsInfo) {
Timer timer = new Timer();
- List topicInfos = IntStream.range(0, topicsInfo.numberOfTopics)
- .mapToObj(i -> new TopicInfo(generateTopicName(i), topicsInfo.numberOfPartitionsPerTopic))
- .collect(toList());
+ List topicInfos =
+ IntStream.range(0, topicsInfo.numberOfTopics)
+ .mapToObj(
+ i -> new TopicInfo(generateTopicName(i), topicsInfo.numberOfPartitionsPerTopic))
+ .collect(toList());
benchmarkDriver.createTopics(topicInfos).join();
@@ -162,15 +173,16 @@ public List createTopics(TopicsInfo topicsInfo) {
}
private String generateTopicName(int i) {
- return String.format("%s-%07d-%s", benchmarkDriver.getTopicNamePrefix(), i, RandomGenerator.getRandomString());
+ return String.format(
+ "%s-%07d-%s", benchmarkDriver.getTopicNamePrefix(), i, RandomGenerator.getRandomString());
}
@Override
public void createProducers(List topics) {
Timer timer = new Timer();
- List> futures = topics.stream()
- .map(topic -> benchmarkDriver.createProducer(topic)).collect(toList());
+ List> futures =
+ topics.stream().map(topic -> benchmarkDriver.createProducer(topic)).collect(toList());
futures.forEach(f -> producers.add(f.join()));
log.info("Created {} producers in {} ms", producers.size(), timer.elapsedMillis());
@@ -180,8 +192,10 @@ public void createProducers(List topics) {
public void createConsumers(ConsumerAssignment consumerAssignment) {
Timer timer = new Timer();
- List> futures = consumerAssignment.topicsSubscriptions.stream()
- .map(ts -> benchmarkDriver.createConsumer(ts.topic, ts.subscription, this)).collect(toList());
+ List> futures =
+ consumerAssignment.topicsSubscriptions.stream()
+ .map(ts -> benchmarkDriver.createConsumer(ts.topic, ts.subscription, this))
+ .collect(toList());
futures.forEach(f -> consumers.add(f.join()));
log.info("Created {} consumers in {} ms", consumers.size(), timer.elapsedMillis());
@@ -197,71 +211,97 @@ public void startLoad(ProducerWorkAssignment producerWorkAssignment) {
int processorIdx = 0;
for (BenchmarkProducer p : producers) {
- processorAssignment.computeIfAbsent(processorIdx, x -> new ArrayList()).add(p);
+ processorAssignment
+ .computeIfAbsent(processorIdx, x -> new ArrayList())
+ .add(p);
processorIdx = (processorIdx + 1) % processors;
}
- processorAssignment.values().forEach(producers -> submitProducersToExecutor(producers,
- KeyDistributor.build(producerWorkAssignment.keyDistributorType), producerWorkAssignment.payloadData));
+ processorAssignment
+ .values()
+ .forEach(
+ producers ->
+ submitProducersToExecutor(
+ producers,
+ KeyDistributor.build(producerWorkAssignment.keyDistributorType),
+ producerWorkAssignment.payloadData));
}
@Override
public void probeProducers() throws IOException {
- producers.forEach(producer -> producer.sendAsync(Optional.of("key"), new byte[10])
- .thenRun(() -> totalMessagesSent.increment()));
+ producers.forEach(
+ producer ->
+ producer
+ .sendAsync(Optional.of("key"), new byte[10])
+ .thenRun(() -> totalMessagesSent.increment()));
}
- private void submitProducersToExecutor(List producers, KeyDistributor keyDistributor, List payloads) {
- executor.submit(() -> {
- int payloadCount = payloads.size();
- ThreadLocalRandom r = ThreadLocalRandom.current();
- byte[] firstPayload = payloads.get(0);
-
- try {
- while (!testCompleted) {
- producers.forEach(producer -> {
- byte[] payloadData = payloadCount == 0 ? firstPayload : payloads.get(r.nextInt(payloadCount));
- final long intendedSendTime = rateLimiter.acquire();
- uninterruptibleSleepNs(intendedSendTime);
- final long sendTime = System.nanoTime();
- producer.sendAsync(Optional.ofNullable(keyDistributor.next()), payloadData)
- .thenRun(() -> {
- messagesSent.increment();
- totalMessagesSent.increment();
- messagesSentCounter.inc();
- bytesSent.add(payloadData.length);
- bytesSentCounter.add(payloadData.length);
-
- final long latencyMicros = Math.min(highestTrackableValue,
- TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime));
- publishLatencyRecorder.recordValue(latencyMicros);
- cumulativePublishLatencyRecorder.recordValue(latencyMicros);
- publishLatencyStats.registerSuccessfulEvent(latencyMicros, TimeUnit.MICROSECONDS);
-
- final long sendDelayMicros = Math.min(highestTrackableValue,
- TimeUnit.NANOSECONDS.toMicros(sendTime - intendedSendTime));
- publishDelayLatencyRecorder.recordValue(sendDelayMicros);
- cumulativePublishDelayLatencyRecorder.recordValue(sendDelayMicros);
- publishDelayLatencyStats.registerSuccessfulEvent(sendDelayMicros, TimeUnit.MICROSECONDS);
- }).exceptionally(ex -> {
- messageSendErrors.increment();
- messageSendErrorCounter.inc();
- totalMessageSendErrors.increment();
- log.warn("Write error on message", ex);
- return null;
- });
- });
- }
- } catch (Throwable t) {
- log.error("Got error", t);
- }
- });
+ @SuppressWarnings("checkstyle:LineLength")
+ private void submitProducersToExecutor(
+ List producers, KeyDistributor keyDistributor, List payloads) {
+ executor.submit(
+ () -> {
+ int payloadCount = payloads.size();
+ ThreadLocalRandom r = ThreadLocalRandom.current();
+ byte[] firstPayload = payloads.get(0);
+
+ try {
+ while (!testCompleted) {
+ producers.forEach(
+ producer -> {
+ byte[] payloadData =
+ payloadCount == 0 ? firstPayload : payloads.get(r.nextInt(payloadCount));
+ final long intendedSendTime = rateLimiter.acquire();
+ uninterruptibleSleepNs(intendedSendTime);
+ final long sendTime = System.nanoTime();
+ producer
+ .sendAsync(Optional.ofNullable(keyDistributor.next()), payloadData)
+ .thenRun(
+ () -> {
+ messagesSent.increment();
+ totalMessagesSent.increment();
+ messagesSentCounter.inc();
+ bytesSent.add(payloadData.length);
+ bytesSentCounter.add(payloadData.length);
+
+ final long latencyMicros =
+ Math.min(
+ highestTrackableValue,
+ TimeUnit.NANOSECONDS.toMicros(System.nanoTime() - sendTime));
+ publishLatencyRecorder.recordValue(latencyMicros);
+ cumulativePublishLatencyRecorder.recordValue(latencyMicros);
+ publishLatencyStats.registerSuccessfulEvent(
+ latencyMicros, TimeUnit.MICROSECONDS);
+
+ final long sendDelayMicros =
+ Math.min(
+ highestTrackableValue,
+ TimeUnit.NANOSECONDS.toMicros(sendTime - intendedSendTime));
+ publishDelayLatencyRecorder.recordValue(sendDelayMicros);
+ cumulativePublishDelayLatencyRecorder.recordValue(sendDelayMicros);
+ publishDelayLatencyStats.registerSuccessfulEvent(
+ sendDelayMicros, TimeUnit.MICROSECONDS);
+ })
+ .exceptionally(
+ ex -> {
+ messageSendErrors.increment();
+ messageSendErrorCounter.inc();
+ totalMessageSendErrors.increment();
+ log.warn("Write error on message", ex);
+ return null;
+ });
+ });
+ }
+ } catch (Throwable t) {
+ log.error("Got error", t);
+ }
+ });
}
@Override
public void adjustPublishRate(double publishRate) {
- if(publishRate < 1.0) {
+ if (publishRate < 1.0) {
rateLimiter = new UniformRateLimiter(1.0);
return;
}
@@ -412,8 +452,9 @@ public void close() throws Exception {
private static final ObjectWriter writer = new ObjectMapper().writerWithDefaultPrettyPrinter();
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
mapper.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE);
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/Worker.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/Worker.java
index fa9dde4d9..5333f516e 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/Worker.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/Worker.java
@@ -13,9 +13,6 @@
*/
package io.openmessaging.benchmark.worker;
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
import io.openmessaging.benchmark.worker.commands.ConsumerAssignment;
import io.openmessaging.benchmark.worker.commands.CountersStats;
@@ -23,6 +20,9 @@
import io.openmessaging.benchmark.worker.commands.PeriodStats;
import io.openmessaging.benchmark.worker.commands.ProducerWorkAssignment;
import io.openmessaging.benchmark.worker.commands.TopicsInfo;
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
public interface Worker extends AutoCloseable {
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/WorkerHandler.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/WorkerHandler.java
index 951fab4e9..05fad840e 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/WorkerHandler.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/WorkerHandler.java
@@ -13,19 +13,11 @@
*/
package io.openmessaging.benchmark.worker;
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.util.List;
-
-import org.apache.bookkeeper.stats.StatsLogger;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
import com.google.common.io.Files;
-
import io.javalin.Context;
import io.javalin.Javalin;
import io.openmessaging.benchmark.worker.commands.ConsumerAssignment;
@@ -33,6 +25,12 @@
import io.openmessaging.benchmark.worker.commands.PeriodStats;
import io.openmessaging.benchmark.worker.commands.ProducerWorkAssignment;
import io.openmessaging.benchmark.worker.commands.TopicsInfo;
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.util.List;
+import org.apache.bookkeeper.stats.StatsLogger;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
@SuppressWarnings("unchecked")
public class WorkerHandler {
@@ -87,7 +85,8 @@ private void handleProbeProducers(Context ctx) throws Exception {
private void handleCreateConsumers(Context ctx) throws Exception {
ConsumerAssignment consumerAssignment = mapper.readValue(ctx.body(), ConsumerAssignment.class);
- log.info("Received create consumers request for topics: {}", consumerAssignment.topicsSubscriptions);
+ log.info(
+ "Received create consumers request for topics: {}", consumerAssignment.topicsSubscriptions);
localWorker.createConsumers(consumerAssignment);
}
@@ -100,9 +99,12 @@ private void handleResumeConsumers(Context ctx) throws Exception {
}
private void handleStartLoad(Context ctx) throws Exception {
- ProducerWorkAssignment producerWorkAssignment = mapper.readValue(ctx.body(), ProducerWorkAssignment.class);
+ ProducerWorkAssignment producerWorkAssignment =
+ mapper.readValue(ctx.body(), ProducerWorkAssignment.class);
- log.info("Start load publish-rate: {} msg/s -- payload-size: {}", producerWorkAssignment.publishRate,
+ log.info(
+ "Start load publish-rate: {} msg/s -- payload-size: {}",
+ producerWorkAssignment.publishRate,
producerWorkAssignment.payloadData.get(0).length);
localWorker.startLoad(producerWorkAssignment);
@@ -186,13 +188,12 @@ private void handleResetStats(Context ctx) throws Exception {
private static final Logger log = LoggerFactory.getLogger(WorkerHandler.class);
- private static final ObjectMapper mapper = new ObjectMapper()
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper().configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
mapper.enable(DeserializationFeature.READ_UNKNOWN_ENUM_VALUES_USING_DEFAULT_VALUE);
}
private static final ObjectWriter writer = new ObjectMapper().writerWithDefaultPrettyPrinter();
-
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ConsumerAssignment.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ConsumerAssignment.java
index 3ae9c97e7..9d97642ec 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ConsumerAssignment.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ConsumerAssignment.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.worker.commands;
+
import java.util.ArrayList;
import java.util.List;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/CumulativeLatencies.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/CumulativeLatencies.java
index 92a12ef7b..e489d9107 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/CumulativeLatencies.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/CumulativeLatencies.java
@@ -13,23 +13,21 @@
*/
package io.openmessaging.benchmark.worker.commands;
-import java.util.concurrent.TimeUnit;
-
-import org.HdrHistogram.Histogram;
import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.concurrent.TimeUnit;
+import org.HdrHistogram.Histogram;
public class CumulativeLatencies {
- @JsonIgnore
- public Histogram publishLatency = new Histogram(TimeUnit.SECONDS.toMicros(60), 5);
+ @JsonIgnore public Histogram publishLatency = new Histogram(TimeUnit.SECONDS.toMicros(60), 5);
public byte[] publishLatencyBytes;
@JsonIgnore
public Histogram publishDelayLatency = new Histogram(TimeUnit.SECONDS.toMicros(60), 5);
+
public byte[] publishDelayLatencyBytes;
- @JsonIgnore
- public Histogram endToEndLatency = new Histogram(TimeUnit.HOURS.toMicros(12), 5);
+ @JsonIgnore public Histogram endToEndLatency = new Histogram(TimeUnit.HOURS.toMicros(12), 5);
public byte[] endToEndLatencyBytes;
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/PeriodStats.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/PeriodStats.java
index 2dbe57d72..ec99f75dd 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/PeriodStats.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/PeriodStats.java
@@ -13,11 +13,10 @@
*/
package io.openmessaging.benchmark.worker.commands;
-import java.util.concurrent.TimeUnit;
-
-import org.HdrHistogram.Histogram;
import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.concurrent.TimeUnit;
+import org.HdrHistogram.Histogram;
public class PeriodStats {
public long messagesSent = 0;
@@ -31,16 +30,14 @@ public class PeriodStats {
public long totalMessageSendErrors = 0;
public long totalMessagesReceived = 0;
- @JsonIgnore
- public Histogram publishLatency = new Histogram(TimeUnit.SECONDS.toMicros(60), 5);
+ @JsonIgnore public Histogram publishLatency = new Histogram(TimeUnit.SECONDS.toMicros(60), 5);
public byte[] publishLatencyBytes;
@JsonIgnore
public Histogram publishDelayLatency = new Histogram(TimeUnit.SECONDS.toMicros(60), 5);
- public byte[] publishDelayLatencyBytes;
+ public byte[] publishDelayLatencyBytes;
- @JsonIgnore
- public Histogram endToEndLatency = new Histogram(TimeUnit.HOURS.toMicros(12), 5);
+ @JsonIgnore public Histogram endToEndLatency = new Histogram(TimeUnit.HOURS.toMicros(12), 5);
public byte[] endToEndLatencyBytes;
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ProducerWorkAssignment.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ProducerWorkAssignment.java
index 53c7e8869..50dfe0704 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ProducerWorkAssignment.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/ProducerWorkAssignment.java
@@ -13,14 +13,14 @@
*/
package io.openmessaging.benchmark.worker.commands;
-import io.openmessaging.benchmark.utils.distributor.KeyDistributorType;
+import io.openmessaging.benchmark.utils.distributor.KeyDistributorType;
import java.util.List;
public class ProducerWorkAssignment {
-
+
public List payloadData;
-
+
public double publishRate;
public KeyDistributorType keyDistributorType;
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicSubscription.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicSubscription.java
index f21ce6745..bd9bf9989 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicSubscription.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicSubscription.java
@@ -17,8 +17,7 @@ public class TopicSubscription {
public String topic;
public String subscription;
- public TopicSubscription() {
- }
+ public TopicSubscription() {}
public TopicSubscription(String topic, String subscription) {
this.topic = topic;
@@ -27,9 +26,13 @@ public TopicSubscription(String topic, String subscription) {
@Override
public String toString() {
- return "TopicSubscription{" +
- "topic='" + topic + '\'' +
- ", subscription='" + subscription + '\'' +
- '}';
+ return "TopicSubscription{"
+ + "topic='"
+ + topic
+ + '\''
+ + ", subscription='"
+ + subscription
+ + '\''
+ + '}';
}
}
diff --git a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicsInfo.java b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicsInfo.java
index b277a8f5f..90545c725 100644
--- a/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicsInfo.java
+++ b/benchmark-framework/src/main/java/io/openmessaging/benchmark/worker/commands/TopicsInfo.java
@@ -17,8 +17,7 @@ public class TopicsInfo {
public int numberOfTopics;
public int numberOfPartitionsPerTopic;
- public TopicsInfo() {
- }
+ public TopicsInfo() {}
public TopicsInfo(int numberOfTopics, int numberOfPartitionsPerTopic) {
this.numberOfTopics = numberOfTopics;
diff --git a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/ListPartitionTest.java b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/ListPartitionTest.java
index b3e7c0a53..a7472df95 100644
--- a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/ListPartitionTest.java
+++ b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/ListPartitionTest.java
@@ -15,6 +15,7 @@
import static java.util.Arrays.asList;
import static org.assertj.core.api.Assertions.assertThat;
+
import java.util.List;
import org.junit.jupiter.api.Test;
@@ -24,11 +25,13 @@ class ListPartitionTest {
void partitionList() {
List list = asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
List> lists = ListPartition.partitionList(list, 3);
- assertThat(lists).satisfies(s -> {
- assertThat(s).hasSize(3);
- assertThat(s.get(0)).isEqualTo(asList(1, 4, 7, 10));
- assertThat(s.get(1)).isEqualTo(asList(2, 5, 8));
- assertThat(s.get(2)).isEqualTo(asList(3, 6, 9));
- });
+ assertThat(lists)
+ .satisfies(
+ s -> {
+ assertThat(s).hasSize(3);
+ assertThat(s.get(0)).isEqualTo(asList(1, 4, 7, 10));
+ assertThat(s.get(1)).isEqualTo(asList(2, 5, 8));
+ assertThat(s.get(2)).isEqualTo(asList(3, 6, 9));
+ });
}
-}
\ No newline at end of file
+}
diff --git a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/PaddingDecimalFormatTest.java b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/PaddingDecimalFormatTest.java
index dfe52031a..be1b9a532 100644
--- a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/PaddingDecimalFormatTest.java
+++ b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/PaddingDecimalFormatTest.java
@@ -14,6 +14,7 @@
package io.openmessaging.benchmark.utils;
import static org.assertj.core.api.Assertions.assertThat;
+
import org.junit.jupiter.api.Test;
class PaddingDecimalFormatTest {
@@ -25,5 +26,4 @@ void format() {
assertThat(format.format(1000L)).isEqualTo(" 1000.0");
assertThat(format.format(10000000L)).isEqualTo("10000000.0");
}
-
-}
\ No newline at end of file
+}
diff --git a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/TimerTest.java b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/TimerTest.java
index 5d52fbe01..87004e332 100644
--- a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/TimerTest.java
+++ b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/TimerTest.java
@@ -18,6 +18,7 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import java.util.function.Supplier;
import org.junit.jupiter.api.Test;
@@ -38,4 +39,4 @@ void elapsedSeconds() {
Timer timer = new Timer(mockClock);
assertThat(timer.elapsedSeconds()).isEqualTo(2.0d);
}
-}
\ No newline at end of file
+}
diff --git a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/UniformRateLimiterTest.java b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/UniformRateLimiterTest.java
index b9f200846..a9b90462e 100644
--- a/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/UniformRateLimiterTest.java
+++ b/benchmark-framework/src/test/java/io/openmessaging/benchmark/utils/UniformRateLimiterTest.java
@@ -19,6 +19,7 @@
import static org.assertj.core.api.Assertions.assertThatCode;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
+
import java.util.function.Supplier;
import org.junit.jupiter.api.Test;
@@ -55,9 +56,11 @@ void uninterruptibleSleepNs() {
@Test
void cinitExceptions() {
- assertThatCode(() -> new UniformRateLimiter(Double.NaN)).isInstanceOf(IllegalArgumentException.class);
- assertThatCode(() -> new UniformRateLimiter(1.0d / 0.0d)).isInstanceOf(IllegalArgumentException.class);
+ assertThatCode(() -> new UniformRateLimiter(Double.NaN))
+ .isInstanceOf(IllegalArgumentException.class);
+ assertThatCode(() -> new UniformRateLimiter(1.0d / 0.0d))
+ .isInstanceOf(IllegalArgumentException.class);
assertThatCode(() -> new UniformRateLimiter(-0.1)).isInstanceOf(IllegalArgumentException.class);
assertThatCode(() -> new UniformRateLimiter(0.0)).isInstanceOf(IllegalArgumentException.class);
}
-}
\ No newline at end of file
+}
diff --git a/deployment/kubernetes/helm/README.md b/deployment/kubernetes/helm/README.md
index ac43ac652..f8ef3125a 100644
--- a/deployment/kubernetes/helm/README.md
+++ b/deployment/kubernetes/helm/README.md
@@ -1,11 +1,10 @@
-
Users can deploy the helm chart:
```bash
$ helm install ./benchmark --name benchmark
```
-After the chart has started, users can exec into the pod name "benchmark-driver" and run the benchmark from there.
+After the chart has started, users can exec into the pod name "benchmark-driver" and run the benchmark from there.
For example, once inside the "benchmark-driver" pod, users can execute:
diff --git a/docker/README.md b/docker/README.md
index 1cefa026d..a58ef527c 100644
--- a/docker/README.md
+++ b/docker/README.md
@@ -1,9 +1,12 @@
# OpenMessaging Benchmark Framework Docker
-You can use either of the Dockerfiles - `./docker/Dockerfile` or `./docker/Dockerfile.build` based on your needs.
+
+You can use either of the Dockerfiles - `./docker/Dockerfile` or `./docker/Dockerfile.build` based on your needs.
### `Dockerfile`
+
Uses `openjdk-8` and takes `BENCHMARK_TARBALL` as an argument.
While using this Dockerfile, you will need to build the project locally **first**.
+
```
#> mvn build
#> export BENCHMARK_TARBALL=package/target/openmessaging-benchmark--SNAPSHOT-bin.tar.gz
@@ -11,8 +14,11 @@ While using this Dockerfile, you will need to build the project locally **first*
```
### `Dockerfile.build`
+
Uses the latest version of `maven` in order to build the project, and then use `openjdk-8` as runtime.
This Dockerfile has no dependency (you do not need Mavent to be installed locally).
+
```
#> docker build . -f docker/Dockerfile.build
-```
\ No newline at end of file
+```
+
diff --git a/docker/pom.xml b/docker/pom.xml
index bf5188238..6f9925976 100644
--- a/docker/pom.xml
+++ b/docker/pom.xml
@@ -1,3 +1,4 @@
+
+ 4.0.0
- messaging-benchmarkio.openmessaging.benchmark
+ messaging-benchmark0.0.1-SNAPSHOT
- 4.0.0messaging-benchmark-docker
+ pomOpen Messaging Benchmark Docker Image
- pom1.3.7
@@ -52,6 +53,14 @@
com.spotifydockerfile-maven-plugin${dockerfile-maven.version}
+
+ ${docker.organization}/openmessaging-benchmark
+ false
+ ${project.version}
+
+ target/package-${project.version}-bin.tar.gz
+
+ default
@@ -80,14 +89,6 @@
-
- ${docker.organization}/openmessaging-benchmark
- false
- ${project.version}
-
- target/package-${project.version}-bin.tar.gz
-
- org.apache.maven.plugins
diff --git a/driver-api/pom.xml b/driver-api/pom.xml
index c26dd24d3..cbf66b392 100644
--- a/driver-api/pom.xml
+++ b/driver-api/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- driver-api
+ driver-api
-
-
- com.fasterxml.jackson.jaxrs
- jackson-jaxrs-base
-
-
- com.fasterxml.jackson.jaxrs
- jackson-jaxrs-json-provider
-
-
- com.fasterxml.jackson.core
- jackson-annotations
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-yaml
-
-
- org.apache.bookkeeper.stats
- bookkeeper-stats-api
- ${bookkeeper.version}
-
-
- org.projectlombok
- lombok
-
-
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-yaml
+
+
+ com.fasterxml.jackson.jaxrs
+ jackson-jaxrs-base
+
+
+ com.fasterxml.jackson.jaxrs
+ jackson-jaxrs-json-provider
+
+
+ org.apache.bookkeeper.stats
+ bookkeeper-stats-api
+ ${bookkeeper.version}
+
+
+ org.projectlombok
+ lombok
+
+
diff --git a/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkConsumer.java b/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkConsumer.java
index 1788dc6a5..a8e745e88 100644
--- a/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkConsumer.java
+++ b/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkConsumer.java
@@ -13,6 +13,4 @@
*/
package io.openmessaging.benchmark.driver;
-public interface BenchmarkConsumer extends AutoCloseable {
-
-}
+public interface BenchmarkConsumer extends AutoCloseable {}
diff --git a/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkDriver.java b/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkDriver.java
index 920e3c41e..32f18f082 100644
--- a/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkDriver.java
+++ b/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkDriver.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver;
+
import java.io.File;
import java.io.IOException;
import java.util.List;
@@ -20,63 +21,73 @@
import lombok.Value;
import org.apache.bookkeeper.stats.StatsLogger;
-/**
- * Base driver interface
- */
+/** Base driver interface. */
public interface BenchmarkDriver extends AutoCloseable {
/**
- * Driver implementation can use this method to initialize the client libraries, with the provided configuration
- * file.
- *
- * The format of the configuration file is specific to the driver implementation.
- *
+ * Driver implementation can use this method to initialize the client libraries, with the provided
+ * configuration file.
+ *
+ *
The format of the configuration file is specific to the driver implementation.
+ *
* @param configurationFile
* @param statsLogger stats logger to collect stats from benchmark driver
* @throws IOException
*/
- void initialize(File configurationFile, StatsLogger statsLogger) throws IOException, InterruptedException;
+ void initialize(File configurationFile, StatsLogger statsLogger)
+ throws IOException, InterruptedException;
/**
- * Get a driver specific prefix to be used in creating multiple topic names
+ * Get a driver specific prefix to be used in creating multiple topic names.
+ *
+ * @return the topic name prefix
*/
String getTopicNamePrefix();
/**
- * Create a new topic with a given number of partitions
+ * Create a new topic with a given number of partitions.
+ *
+ * @param topic
+ * @param partitions
+ * @return a future the completes when the topic is created
*/
CompletableFuture createTopic(String topic, int partitions);
/**
- * Create a list of new topics with the given number of partitions
+ * Create a list of new topics with the given number of partitions.
+ *
+ * @param topicInfos
+ * @return a future the completes when the topics are created
*/
default CompletableFuture createTopics(List topicInfos) {
@SuppressWarnings("unchecked")
- CompletableFuture[] futures = topicInfos.stream()
- .map(topicInfo -> createTopic(topicInfo.getTopic(), topicInfo.getPartitions()))
- .toArray(CompletableFuture[]::new);
+ CompletableFuture[] futures =
+ topicInfos.stream()
+ .map(topicInfo -> createTopic(topicInfo.getTopic(), topicInfo.getPartitions()))
+ .toArray(CompletableFuture[]::new);
return CompletableFuture.allOf(futures);
}
/**
- * Create a producer for a given topic
+ * Create a producer for a given topic.
+ *
+ * @param topic
+ * @return a producer future
*/
CompletableFuture createProducer(String topic);
/**
* Create a benchmark consumer relative to one particular topic and subscription.
- *
- * It is responsibility of the driver implementation to invoke the consumerCallback each time a message
- * is received.
- *
+ *
+ *
It is responsibility of the driver implementation to invoke the consumerCallback
+ * each time a message is received.
+ *
* @param topic
* @param subscriptionName
* @param consumerCallback
- * @return
+ * @return a consumer future
*/
CompletableFuture createConsumer(
- String topic,
- String subscriptionName,
- ConsumerCallback consumerCallback);
+ String topic, String subscriptionName, ConsumerCallback consumerCallback);
@Value
class TopicInfo {
diff --git a/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkProducer.java b/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkProducer.java
index da9bf06ac..1662cf1c6 100644
--- a/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkProducer.java
+++ b/driver-api/src/main/java/io/openmessaging/benchmark/driver/BenchmarkProducer.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver;
+
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
@@ -21,12 +22,9 @@ public interface BenchmarkProducer extends AutoCloseable {
/**
* Publish a message and return a callback to track the completion of the operation.
*
- * @param key
- * the key associated with this message
- * @param payload
- * the message payload
+ * @param key the key associated with this message
+ * @param payload the message payload
* @return a future that will be triggered when the message is successfully published
*/
CompletableFuture sendAsync(Optional key, byte[] payload);
-
}
diff --git a/driver-api/src/main/java/io/openmessaging/benchmark/driver/ConsumerCallback.java b/driver-api/src/main/java/io/openmessaging/benchmark/driver/ConsumerCallback.java
index 88817009f..b68ecf451 100644
--- a/driver-api/src/main/java/io/openmessaging/benchmark/driver/ConsumerCallback.java
+++ b/driver-api/src/main/java/io/openmessaging/benchmark/driver/ConsumerCallback.java
@@ -13,29 +13,24 @@
*/
package io.openmessaging.benchmark.driver;
+
import java.nio.ByteBuffer;
-/**
- * Callback that the driver implementation calls when a message is received
- */
+/** Callback that the driver implementation calls when a message is received. */
public interface ConsumerCallback {
/**
- * Driver should invoke this method (or the ByteBuffer variant) once for each message received
- *
- * @param payload
- * the received message payload
- * @param publishTimestamp
- * the publish timestamp in milliseconds
+ * Driver should invoke this method (or the ByteBuffer variant) once for each message received.
+ *
+ * @param payload the received message payload
+ * @param publishTimestamp the publish timestamp in milliseconds
*/
void messageReceived(byte[] payload, long publishTimestamp);
/**
- * Driver should invoke this method (or the byte[] variant) once for each message received
+ * Driver should invoke this method (or the byte[] variant) once for each message received.
*
- * @param payload
- * the received message payload
- * @param publishTimestamp
- * the publish timestamp in milliseconds
+ * @param payload the received message payload
+ * @param publishTimestamp the publish timestamp in milliseconds
*/
void messageReceived(ByteBuffer payload, long publishTimestamp);
}
diff --git a/driver-artemis/pom.xml b/driver-artemis/pom.xml
index 9c1f9989a..fe8eed01d 100644
--- a/driver-artemis/pom.xml
+++ b/driver-artemis/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- driver-artemis
+ driver-artemis
-
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- org.apache.activemq
- artemis-core-client
- 2.23.1
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ org.apache.activemq
+ artemis-core-client
+ 2.23.1
+
+
diff --git a/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkConsumer.java b/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkConsumer.java
index c714e71fc..e0b988219 100644
--- a/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkConsumer.java
+++ b/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkConsumer.java
@@ -13,6 +13,9 @@
*/
package io.openmessaging.benchmark.driver.artemis;
+
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.RoutingType;
import org.apache.activemq.artemis.api.core.SimpleString;
@@ -22,31 +25,36 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
-
public class ArtemisBenchmarkConsumer implements BenchmarkConsumer {
private final ClientSession session;
private final ClientConsumer consumer;
- public ArtemisBenchmarkConsumer(String topic, String queueName, ClientSessionFactory sessionFactory, ConsumerCallback callback)
+ public ArtemisBenchmarkConsumer(
+ String topic,
+ String queueName,
+ ClientSessionFactory sessionFactory,
+ ConsumerCallback callback)
throws ActiveMQException {
session = sessionFactory.createSession();
- session.createQueue(SimpleString.toSimpleString(topic), RoutingType.MULTICAST,
- SimpleString.toSimpleString(queueName), true /* durable */);
+ session.createQueue(
+ SimpleString.toSimpleString(topic),
+ RoutingType.MULTICAST,
+ SimpleString.toSimpleString(queueName),
+ true /* durable */);
consumer = session.createConsumer(queueName);
- consumer.setMessageHandler(message -> {
- byte[] payload = new byte[message.getBodyBuffer().readableBytes()];
- message.getBodyBuffer().readBytes(payload);
- callback.messageReceived(payload, message.getTimestamp());
- try {
- message.acknowledge();
- } catch (ActiveMQException e) {
- log.warn("Failed to acknowledge message", e);
- }
- });
-
+ consumer.setMessageHandler(
+ message -> {
+ byte[] payload = new byte[message.getBodyBuffer().readableBytes()];
+ message.getBodyBuffer().readBytes(payload);
+ callback.messageReceived(payload, message.getTimestamp());
+ try {
+ message.acknowledge();
+ } catch (ActiveMQException e) {
+ log.warn("Failed to acknowledge message", e);
+ }
+ });
+
session.start();
}
diff --git a/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkDriver.java b/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkDriver.java
index ae49824c1..f1a966ca2 100644
--- a/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkDriver.java
+++ b/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkDriver.java
@@ -13,11 +13,19 @@
*/
package io.openmessaging.benchmark.driver.artemis;
+
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.BenchmarkDriver;
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ForkJoinPool;
-
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.RoutingType;
import org.apache.activemq.artemis.api.core.SimpleString;
@@ -29,16 +37,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.BenchmarkDriver;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
-
public class ArtemisBenchmarkDriver implements BenchmarkDriver {
private ArtemisConfig config;
@@ -52,7 +50,7 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
try {
ServerLocator serverLocator = ActiveMQClient.createServerLocator(config.brokerAddress);
serverLocator.setConfirmationWindowSize(1000);
-
+
sessionFactory = serverLocator.createSessionFactory();
session = sessionFactory.createSession();
} catch (Exception e) {
@@ -69,18 +67,22 @@ public String getTopicNamePrefix() {
public CompletableFuture createTopic(String topic, int partitions) {
CompletableFuture future = new CompletableFuture<>();
if (partitions != 1) {
- future.completeExceptionally(new IllegalArgumentException("Partitions are not supported in Artemis"));
+ future.completeExceptionally(
+ new IllegalArgumentException("Partitions are not supported in Artemis"));
return future;
}
- ForkJoinPool.commonPool().submit(() -> {
- try {
- session.createAddress(SimpleString.toSimpleString(topic), RoutingType.MULTICAST, true);
- future.complete(null);
- } catch (ActiveMQException e) {
- future.completeExceptionally(e);
- }
- });
+ ForkJoinPool.commonPool()
+ .submit(
+ () -> {
+ try {
+ session.createAddress(
+ SimpleString.toSimpleString(topic), RoutingType.MULTICAST, true);
+ future.complete(null);
+ } catch (ActiveMQException e) {
+ future.completeExceptionally(e);
+ }
+ });
return future;
}
@@ -97,19 +99,22 @@ public CompletableFuture createProducer(String topic) {
}
@Override
- public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
CompletableFuture future = new CompletableFuture<>();
- ForkJoinPool.commonPool().submit(() -> {
- try {
- String queueName = topic + "-" + subscriptionName;
- BenchmarkConsumer consumer = new ArtemisBenchmarkConsumer(topic, queueName, sessionFactory,
- consumerCallback);
- future.complete(consumer);
- } catch (ActiveMQException e) {
- future.completeExceptionally(e);
- }
- });
+ ForkJoinPool.commonPool()
+ .submit(
+ () -> {
+ try {
+ String queueName = topic + "-" + subscriptionName;
+ BenchmarkConsumer consumer =
+ new ArtemisBenchmarkConsumer(
+ topic, queueName, sessionFactory, consumerCallback);
+ future.complete(consumer);
+ } catch (ActiveMQException e) {
+ future.completeExceptionally(e);
+ }
+ });
return future;
}
@@ -129,8 +134,9 @@ public void close() throws Exception {
log.info("ActiveMQ Artemis benchmark driver successfully shut down");
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static ArtemisConfig readConfig(File configurationFile) throws IOException {
return mapper.readValue(configurationFile, ArtemisConfig.class);
diff --git a/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkProducer.java b/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkProducer.java
index baaa8cf7c..622d37ea8 100644
--- a/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkProducer.java
+++ b/driver-artemis/src/main/java/io/openmessaging/benchmark/driver/artemis/ArtemisBenchmarkProducer.java
@@ -13,23 +13,23 @@
*/
package io.openmessaging.benchmark.driver.artemis;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-
import org.apache.activemq.artemis.api.core.ActiveMQException;
import org.apache.activemq.artemis.api.core.client.ClientMessage;
import org.apache.activemq.artemis.api.core.client.ClientProducer;
import org.apache.activemq.artemis.api.core.client.ClientSession;
import org.apache.activemq.artemis.api.core.client.ClientSessionFactory;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
public class ArtemisBenchmarkProducer implements BenchmarkProducer {
private final ClientSession session;
private final ClientProducer producer;
- public ArtemisBenchmarkProducer(String address, ClientSessionFactory sessionFactory) throws ActiveMQException {
+ public ArtemisBenchmarkProducer(String address, ClientSessionFactory sessionFactory)
+ throws ActiveMQException {
session = sessionFactory.createSession();
producer = session.createProducer(address);
session.start();
@@ -43,20 +43,21 @@ public void close() throws Exception {
@Override
public CompletableFuture sendAsync(Optional key, byte[] payload) {
- ClientMessage msg = session.createMessage(true /* durable */ );
+ ClientMessage msg = session.createMessage(true /* durable */);
msg.setTimestamp(System.currentTimeMillis());
msg.getBodyBuffer().writeBytes(payload);
CompletableFuture future = new CompletableFuture<>();
try {
- producer.send(msg, message -> {
- future.complete(null);
- });
+ producer.send(
+ msg,
+ message -> {
+ future.complete(null);
+ });
} catch (ActiveMQException e) {
future.completeExceptionally(e);
}
return future;
}
-
}
diff --git a/driver-bookkeeper/pom.xml b/driver-bookkeeper/pom.xml
index c24790138..3bbea8691 100644
--- a/driver-bookkeeper/pom.xml
+++ b/driver-bookkeeper/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- driver-bookkeeper
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- org.apache.distributedlog
- distributedlog-core-shaded
- 4.14.4
-
-
- io.netty
- netty-all
-
-
+ driver-bookkeeper
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ io.netty
+ netty-all
+
+
+ org.apache.distributedlog
+ distributedlog-core-shaded
+ 4.14.4
+
+
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/Config.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/Config.java
index 04b660c50..db8c83a0b 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/Config.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/Config.java
@@ -18,5 +18,4 @@ public class Config {
public String dlogUri;
public String dlogConf;
-
}
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkConsumer.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkConsumer.java
index 88657e6ee..95414ef84 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkConsumer.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkConsumer.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper;
+
import dlshade.com.google.common.util.concurrent.ThreadFactoryBuilder;
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
@@ -44,64 +45,74 @@ private static boolean backoff(long backoffTime, TimeUnit timeUnit) {
return true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- log.warn("Interrupted at backoff {} ms",
- timeUnit.toMillis(backoffTime), e);
+ log.warn("Interrupted at backoff {} ms", timeUnit.toMillis(backoffTime), e);
return false;
}
}
- public DlogBenchmarkConsumer(DistributedLogManager dlm,
- ConsumerCallback callback) {
+ @SuppressWarnings("checkstyle:LineLength")
+ public DlogBenchmarkConsumer(DistributedLogManager dlm, ConsumerCallback callback) {
this.dlm = dlm;
- this.executor = Executors.newSingleThreadExecutor(
- new ThreadFactoryBuilder().setNameFormat("dlog-benchmark-reader-thread-%d").build());
+ this.executor =
+ Executors.newSingleThreadExecutor(
+ new ThreadFactoryBuilder().setNameFormat("dlog-benchmark-reader-thread-%d").build());
- this.readerTask = executor.submit(() -> {
- LogReader reader = null;
- DLSN lastDLSN = DLSN.InitialDLSN;
- LogRecordWithDLSN record;
+ this.readerTask =
+ executor.submit(
+ () -> {
+ LogReader reader = null;
+ DLSN lastDLSN = DLSN.InitialDLSN;
+ LogRecordWithDLSN record;
- while (!closing) {
- if (null == reader) {
- try {
- reader = dlm.openLogReader(lastDLSN);
- log.info("Successfully open log reader for stream {} at {}",
- dlm.getStreamName(), lastDLSN);
- } catch (IOException e) {
- log.error("Failed to open reader of stream {} at {}",
- dlm.getStreamName(), lastDLSN, e);
- if (backoff(10, TimeUnit.SECONDS)) {
- continue;
- } else {
- break;
- }
- }
- }
+ while (!closing) {
+ if (null == reader) {
+ try {
+ reader = dlm.openLogReader(lastDLSN);
+ log.info(
+ "Successfully open log reader for stream {} at {}",
+ dlm.getStreamName(),
+ lastDLSN);
+ } catch (IOException e) {
+ log.error(
+ "Failed to open reader of stream {} at {}",
+ dlm.getStreamName(),
+ lastDLSN,
+ e);
+ if (backoff(10, TimeUnit.SECONDS)) {
+ continue;
+ } else {
+ break;
+ }
+ }
+ }
- try {
- record = reader.readNext(false);
- if (null == record) {
- try {
- Thread.sleep(1);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- }
- continue;
- }
+ try {
+ record = reader.readNext(false);
+ if (null == record) {
+ try {
+ Thread.sleep(1);
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ continue;
+ }
- callback.messageReceived(record.getPayload(), record.getTransactionId());
+ callback.messageReceived(record.getPayload(), record.getTransactionId());
- lastDLSN = record.getDlsn();
- } catch (IOException e) {
- log.info("Encountered error on reading records from reading stream {}, last record = {}",
- dlm.getStreamName(), lastDLSN, e);
- Utils.closeQuietly(reader);
- reader = null;
- }
- }
+ lastDLSN = record.getDlsn();
+ } catch (IOException e) {
+ log.info(
+ "Encountered error on reading records from reading stream {}, last record = {}",
+ dlm.getStreamName(),
+ lastDLSN,
+ e);
+ Utils.closeQuietly(reader);
+ reader = null;
+ }
+ }
- Utils.closeQuietly(reader);
- });
+ Utils.closeQuietly(reader);
+ });
}
@Override
@@ -113,5 +124,4 @@ public void close() throws Exception {
dlm.close();
}
}
-
}
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkDriver.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkDriver.java
index b1ea6071c..fd60a4cdf 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkDriver.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkDriver.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
@@ -21,7 +22,6 @@
import io.openmessaging.benchmark.driver.BenchmarkDriver;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
-
import io.openmessaging.benchmark.driver.bookkeeper.stats.StatsLoggerAdaptor;
import java.io.File;
import java.io.IOException;
@@ -38,14 +38,13 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-/**
- * Benchmark driver testing distributedlog.
- */
+/** Benchmark driver testing distributedlog. */
public class DlogBenchmarkDriver implements BenchmarkDriver {
private static final Logger log = LoggerFactory.getLogger(DlogBenchmarkProducer.class);
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private Config config;
private Namespace namespace;
@@ -66,13 +65,10 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
URI dlogUri = URI.create(config.dlogUri);
dlshade.org.apache.bookkeeper.stats.StatsLogger dlStatsLogger =
- new CachingStatsLogger(new StatsLoggerAdaptor(statsLogger.scope("dlog")));
+ new CachingStatsLogger(new StatsLoggerAdaptor(statsLogger.scope("dlog")));
- namespace = NamespaceBuilder.newBuilder()
- .conf(conf)
- .uri(dlogUri)
- .statsLogger(dlStatsLogger)
- .build();
+ namespace =
+ NamespaceBuilder.newBuilder().conf(conf).uri(dlogUri).statsLogger(dlStatsLogger).build();
log.info("Initialized distributedlog namespace at {}", dlogUri);
}
@@ -88,53 +84,53 @@ private static String getFullyQualifiedPartitionedStreamName(String topic, int p
@Override
public CompletableFuture createTopic(String topic, int partitions) {
- return CompletableFuture.runAsync(() -> {
- try {
- namespace.createLog(topic);
- if (partitions > 1) {
- for (int i = 0; i < partitions; i++) {
- namespace.createLog(getFullyQualifiedPartitionedStreamName(topic, i));
+ return CompletableFuture.runAsync(
+ () -> {
+ try {
+ namespace.createLog(topic);
+ if (partitions > 1) {
+ for (int i = 0; i < partitions; i++) {
+ namespace.createLog(getFullyQualifiedPartitionedStreamName(topic, i));
+ }
+ }
+ log.info("Successfully create topic {} with {} partitions", topic, partitions);
+ } catch (IOException ioe) {
+ log.error("Failed to create topic {} with {} partitions", topic, partitions, ioe);
+ throw new RuntimeException(ioe);
}
- }
- log.info("Successfully create topic {} with {} partitions", topic, partitions);
- } catch (IOException ioe) {
- log.error("Failed to create topic {} with {} partitions",
- topic, partitions, ioe);
- throw new RuntimeException(ioe);
- }
- });
+ });
}
@Override
public CompletableFuture createProducer(String topic) {
- return CompletableFuture.supplyAsync(() -> {
- try {
- DistributedLogManager dlm = namespace.openLog(topic);
- log.info("Open stream {} for producer", topic);
- return dlm;
- } catch (IOException ioe) {
- throw new RuntimeException(ioe);
- }
- })
- .thenCompose(dlm -> dlm.openAsyncLogWriter())
- .thenApply(writer -> new DlogBenchmarkProducer(writer));
+ return CompletableFuture.supplyAsync(
+ () -> {
+ try {
+ DistributedLogManager dlm = namespace.openLog(topic);
+ log.info("Open stream {} for producer", topic);
+ return dlm;
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
+ }
+ })
+ .thenCompose(dlm -> dlm.openAsyncLogWriter())
+ .thenApply(writer -> new DlogBenchmarkProducer(writer));
}
@Override
public CompletableFuture createConsumer(
- String topic,
- String subscriptionName,
- ConsumerCallback consumerCallback) {
- return CompletableFuture.supplyAsync(() -> {
- try {
- DistributedLogManager dlm = namespace.openLog(topic);
- log.info("Open stream {} for consumer", topic);
- return dlm;
- } catch (IOException ioe) {
- throw new RuntimeException(ioe);
- }
- })
- .thenApply(dlm -> new DlogBenchmarkConsumer(dlm, consumerCallback));
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
+ return CompletableFuture.supplyAsync(
+ () -> {
+ try {
+ DistributedLogManager dlm = namespace.openLog(topic);
+ log.info("Open stream {} for consumer", topic);
+ return dlm;
+ } catch (IOException ioe) {
+ throw new RuntimeException(ioe);
+ }
+ })
+ .thenApply(dlm -> new DlogBenchmarkConsumer(dlm, consumerCallback));
}
@Override
@@ -147,5 +143,4 @@ public void close() throws Exception {
log.info("BookKeeper benchmark driver successfully shut down");
}
-
}
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkProducer.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkProducer.java
index ed2f667a3..43f7399a7 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkProducer.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/DlogBenchmarkProducer.java
@@ -13,15 +13,14 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-
import org.apache.distributedlog.LogRecord;
import org.apache.distributedlog.api.AsyncLogWriter;
import org.apache.distributedlog.util.TimeSequencer;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
public class DlogBenchmarkProducer implements BenchmarkProducer {
private final AsyncLogWriter writer;
@@ -39,10 +38,8 @@ public void close() throws Exception {
@Override
public CompletableFuture sendAsync(Optional key, byte[] payload) {
- LogRecord record = new LogRecord(
- sequencer.nextId(), payload);
+ LogRecord record = new LogRecord(sequencer.nextId(), payload);
return writer.write(record).thenApply(dlsn -> null);
}
-
}
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/CounterAdaptor.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/CounterAdaptor.java
index 61d359969..95d908774 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/CounterAdaptor.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/CounterAdaptor.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper.stats;
+
import org.apache.bookkeeper.stats.Counter;
class CounterAdaptor implements dlshade.org.apache.bookkeeper.stats.Counter {
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/GaugeAdaptor.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/GaugeAdaptor.java
index 3900ef854..a675a7484 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/GaugeAdaptor.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/GaugeAdaptor.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper.stats;
+
import org.apache.bookkeeper.stats.Gauge;
class GaugeAdaptor implements Gauge {
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/OpStatsLoggerAdaptor.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/OpStatsLoggerAdaptor.java
index 4f0c4fbdd..8f3d16680 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/OpStatsLoggerAdaptor.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/OpStatsLoggerAdaptor.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper.stats;
+
import dlshade.org.apache.bookkeeper.stats.OpStatsData;
import java.util.concurrent.TimeUnit;
import org.apache.bookkeeper.stats.OpStatsLogger;
diff --git a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/StatsLoggerAdaptor.java b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/StatsLoggerAdaptor.java
index 37337557d..7f59c2db7 100644
--- a/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/StatsLoggerAdaptor.java
+++ b/driver-bookkeeper/src/main/java/io/openmessaging/benchmark/driver/bookkeeper/stats/StatsLoggerAdaptor.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.bookkeeper.stats;
+
import dlshade.com.google.common.collect.Maps;
import java.util.concurrent.ConcurrentMap;
import org.apache.bookkeeper.stats.Gauge;
@@ -22,7 +23,8 @@ public class StatsLoggerAdaptor implements dlshade.org.apache.bookkeeper.stats.S
private final StatsLogger statsLogger;
private final ConcurrentMap gauges;
- private final ConcurrentMap statsLoggers;
+ private final ConcurrentMap
+ statsLoggers;
public StatsLoggerAdaptor(StatsLogger statsLogger) {
this.statsLogger = statsLogger;
@@ -41,14 +43,16 @@ public dlshade.org.apache.bookkeeper.stats.Counter getCounter(String name) {
}
@Override
- public void registerGauge(String name, dlshade.org.apache.bookkeeper.stats.Gauge gauge) {
+ public void registerGauge(
+ String name, dlshade.org.apache.bookkeeper.stats.Gauge gauge) {
Gauge gaugeAdaptor = new GaugeAdaptor<>(gauge);
statsLogger.registerGauge(name, gaugeAdaptor);
gauges.put(gauge, gaugeAdaptor);
}
@Override
- public void unregisterGauge(String name, dlshade.org.apache.bookkeeper.stats.Gauge gauge) {
+ public void unregisterGauge(
+ String name, dlshade.org.apache.bookkeeper.stats.Gauge gauge) {
Gauge gaugeAdaptor = gauges.remove(gauge);
if (null != gaugeAdaptor) {
statsLogger.unregisterGauge(name, gaugeAdaptor);
@@ -64,7 +68,8 @@ public dlshade.org.apache.bookkeeper.stats.StatsLogger scope(String name) {
}
@Override
- public void removeScope(String name, dlshade.org.apache.bookkeeper.stats.StatsLogger dlShadeStatsLogger) {
+ public void removeScope(
+ String name, dlshade.org.apache.bookkeeper.stats.StatsLogger dlShadeStatsLogger) {
StatsLogger scopedStatsLogger = statsLoggers.remove(dlShadeStatsLogger);
if (null != scopedStatsLogger) {
statsLogger.removeScope(name, scopedStatsLogger);
diff --git a/driver-jms/README.md b/driver-jms/README.md
index 360c6ebba..3792d8bc3 100644
--- a/driver-jms/README.md
+++ b/driver-jms/README.md
@@ -13,10 +13,12 @@ Rather than simply dropping a JMS Client Library into `/opt/benchmark/lib` the l
Follow these instructions to compile the openmessaging benchmark for Fast JMS for Apache Pulsar
- Build the openmessaging benchmark package as you would normally
+
```
mvn clean package
```
- Run the repacking script
+
```
bash driver-jms/package-pulsar.sh
```
@@ -28,6 +30,7 @@ You can now deploy to AWS from `driver-pulsar/deploy`.
Follow the [Confluent instructions][1] to create a fat jar.
- Create a directory
+
```
cd ~
mkdir kafka-jms-client
@@ -36,6 +39,7 @@ Follow the [Confluent instructions][1] to create a fat jar.
- Create the pom.xml
- Change `http://packages.confluent.io/maven/` to `https://packages.confluent.io/maven/`
- Build the fat jar
+
```
mvn clean package
```
@@ -43,10 +47,12 @@ Follow the [Confluent instructions][1] to create a fat jar.
Follow these instructions to compile the openmessaging benchmark for Confluent JMS Client
- Build the openmessaging benchmark package
+
```
mvn clean package
```
- Run the repacking script passing in the location of the fat jar. EG. `~/kafka-jms-client/target/kafka-jms-client-fat-6.2.1.jar`
+
```
bash driver-jms/package-kafka.sh /path/to/the/kafka-jms-client.jar
```
@@ -60,6 +66,5 @@ For Pulsar JMS (and likely Kafka) you will likely want to allocate additional co
- Edit your `terraform.tfvars` file to adjust `num_instances["client"]`.
- Run `bin/benchmark` with the `--extra` option to allocate more workers as consumers.
+[1]: https://docs.confluent.io/platform/current/clients/kafka-jms-client/installation.html#appendix-1
-
-[1]: https://docs.confluent.io/platform/current/clients/kafka-jms-client/installation.html#appendix-1
\ No newline at end of file
diff --git a/driver-jms/pom.xml b/driver-jms/pom.xml
index b8cac2434..12101a80f 100644
--- a/driver-jms/pom.xml
+++ b/driver-jms/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- driver-jms
-
- 2.0.3
-
+ driver-jms
+
+ 2.0.3
+
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- jakarta.jms
- jakarta.jms-api
- ${jms.version}
-
-
- com.google.guava
- guava
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ com.google.guava
+ guava
+
+
+ jakarta.jms
+ jakarta.jms-api
+ ${jms.version}
+
+
diff --git a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkConsumer.java b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkConsumer.java
index ba2a0f520..e88fe86dc 100644
--- a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkConsumer.java
+++ b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkConsumer.java
@@ -13,18 +13,17 @@
*/
package io.openmessaging.benchmark.driver.jms;
+
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
import javax.jms.BytesMessage;
import javax.jms.Connection;
import javax.jms.Message;
import javax.jms.MessageConsumer;
import javax.jms.Session;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
-
public class JMSBenchmarkConsumer implements BenchmarkConsumer {
private final Connection connection;
@@ -32,33 +31,39 @@ public class JMSBenchmarkConsumer implements BenchmarkConsumer {
private final MessageConsumer consumer;
private final boolean useGetBody;
- public JMSBenchmarkConsumer(Connection connection,
+ public JMSBenchmarkConsumer(
+ Connection connection,
Session session,
- MessageConsumer consumer, ConsumerCallback callback,
- boolean useGetBody) throws Exception {
+ MessageConsumer consumer,
+ ConsumerCallback callback,
+ boolean useGetBody)
+ throws Exception {
this.connection = connection;
this.consumer = consumer;
this.session = session;
this.useGetBody = useGetBody;
- consumer.setMessageListener(message -> {
- try {
- byte[] payload = getPayload(message);
- callback.messageReceived(payload, message.getLongProperty("E2EStartMillis"));
- message.acknowledge();
- } catch (Throwable e) {
- log.warn("Failed to acknowledge message", e);
- }
- });
+ consumer.setMessageListener(
+ message -> {
+ try {
+ byte[] payload = getPayload(message);
+ callback.messageReceived(payload, message.getLongProperty("E2EStartMillis"));
+ message.acknowledge();
+ } catch (Throwable e) {
+ log.warn("Failed to acknowledge message", e);
+ }
+ });
// Kafka JMS client does not allow you to add a listener after the connection has been started
connection.start();
}
@Override
public void close() throws Exception {
- // This exception may be thrown: java.util.concurrent.ExecutionException: java.util.ConcurrentModificationException: KafkaConsumer is not safe for multi-threaded access
- // See https://jakarta.ee/specifications/platform/8/apidocs/javax/jms/session#close--
- // and https://jakarta.ee/specifications/platform/8/apidocs/javax/jms/connection#close--
- // It should be enough to just close the connection.
+ // This exception may be thrown: java.util.concurrent.ExecutionException:
+ // java.util.ConcurrentModificationException: KafkaConsumer is not safe for multi-threaded
+ // access
+ // See https://jakarta.ee/specifications/platform/8/apidocs/javax/jms/session#close--
+ // and https://jakarta.ee/specifications/platform/8/apidocs/javax/jms/connection#close--
+ // It should be enough to just close the connection.
connection.close();
}
diff --git a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkDriver.java b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkDriver.java
index ef20d38d5..221c30bc3 100644
--- a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkDriver.java
+++ b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkDriver.java
@@ -13,6 +13,16 @@
*/
package io.openmessaging.benchmark.driver.jms;
+
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.BenchmarkDriver;
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
+import io.openmessaging.benchmark.driver.jms.config.JMSConfig;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
@@ -21,28 +31,16 @@
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.CompletableFuture;
-
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.Destination;
import javax.jms.MessageConsumer;
import javax.jms.Session;
import javax.jms.Topic;
-
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.BenchmarkDriver;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
-import io.openmessaging.benchmark.driver.jms.config.JMSConfig;
-
public class JMSBenchmarkDriver implements BenchmarkDriver {
private ConnectionFactory connectionFactory;
@@ -55,35 +53,46 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
this.config = readConfig(configurationFile);
log.info("JMS driver configuration: {}", writer.writeValueAsString(config));
- if (config.delegateForAdminOperationsClassName != null && !config.delegateForAdminOperationsClassName.isEmpty()) {
- log.info("Initializing Driver for Admin operations {}", config.delegateForAdminOperationsClassName);
- try
- {
- delegateForAdminOperations = (BenchmarkDriver) Class.forName(config.delegateForAdminOperationsClassName,
- true, JMSBenchmarkDriver.class.getClassLoader())
- .getConstructor().newInstance();
+ if (config.delegateForAdminOperationsClassName != null
+ && !config.delegateForAdminOperationsClassName.isEmpty()) {
+ log.info(
+ "Initializing Driver for Admin operations {}",
+ config.delegateForAdminOperationsClassName);
+ try {
+ delegateForAdminOperations =
+ (BenchmarkDriver)
+ Class.forName(
+ config.delegateForAdminOperationsClassName,
+ true,
+ JMSBenchmarkDriver.class.getClassLoader())
+ .getConstructor()
+ .newInstance();
delegateForAdminOperations.initialize(configurationFile, statsLogger);
- }
- catch (Throwable e)
- {
- log.error("Cannot created delegate driver " + config.delegateForAdminOperationsClassName, e);
+ } catch (Throwable e) {
+ log.error(
+ "Cannot created delegate driver " + config.delegateForAdminOperationsClassName, e);
throw new IOException(e);
}
}
- try
- {
+ try {
connectionFactory = buildConnectionFactory();
connection = connectionFactory.createConnection();
connection.start();
} catch (Throwable t) {
- log.error("Cannot initialize connectionFactoryClassName = "+config.connectionFactoryClassName, t);
+ log.error(
+ "Cannot initialize connectionFactoryClassName = " + config.connectionFactoryClassName, t);
throw new IOException(t);
}
}
private ConnectionFactory buildConnectionFactory() throws Exception {
- Class clazz = (Class) Class.forName(config.connectionFactoryClassName, true, Thread.currentThread().getContextClassLoader());
+ Class clazz =
+ (Class)
+ Class.forName(
+ config.connectionFactoryClassName,
+ true,
+ Thread.currentThread().getContextClassLoader());
// constructor with a String (like DataStax Pulsar JMS)
try {
@@ -97,7 +106,8 @@ private ConnectionFactory buildConnectionFactory() throws Exception {
Constructor constructor = clazz.getConstructor(Properties.class);
Properties props = new Properties();
ObjectMapper mapper = new ObjectMapper();
- Map map = mapper.readValue(new StringReader(config.connectionFactoryConfigurationParam), Map.class);
+ Map map =
+ mapper.readValue(new StringReader(config.connectionFactoryConfigurationParam), Map.class);
props.putAll(map);
return constructor.newInstance(props);
} catch (NoSuchMethodException ignore) {
@@ -126,11 +136,14 @@ public CompletableFuture createTopic(String topic, int partitions) {
public CompletableFuture createProducer(String topic) {
try {
if (config.sendWithTransactions) {
- return CompletableFuture.completedFuture(new JMSBenchmarkTransactionProducer(connection, topic, config.use20api, config.properties));
+ return CompletableFuture.completedFuture(
+ new JMSBenchmarkTransactionProducer(
+ connection, topic, config.use20api, config.properties));
} else {
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
Destination destination = session.createTopic(topic);
- return CompletableFuture.completedFuture(new JMSBenchmarkProducer(session, destination, config.use20api, config.properties));
+ return CompletableFuture.completedFuture(
+ new JMSBenchmarkProducer(session, destination, config.use20api, config.properties));
}
} catch (Exception err) {
CompletableFuture res = new CompletableFuture<>();
@@ -140,22 +153,28 @@ public CompletableFuture createProducer(String topic) {
}
@Override
- public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
try {
- String selector = config.messageSelector != null && !config.messageSelector.isEmpty() ? config.messageSelector : null;
+ String selector =
+ config.messageSelector != null && !config.messageSelector.isEmpty()
+ ? config.messageSelector
+ : null;
Connection connection = connectionFactory.createConnection();
Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
Topic destination = session.createTopic(topic);
MessageConsumer durableConsumer;
if (config.use20api) {
- durableConsumer = session.createSharedDurableConsumer(destination, subscriptionName, selector);
+ durableConsumer =
+ session.createSharedDurableConsumer(destination, subscriptionName, selector);
} else {
// in JMS 1.0 we should use session.createDurableSubscriber()
// but it is not supported in Confluent Kafka JMS client
durableConsumer = session.createConsumer(destination, selector);
}
- return CompletableFuture.completedFuture(new JMSBenchmarkConsumer(connection, session, durableConsumer, consumerCallback, config.use20api));
+ return CompletableFuture.completedFuture(
+ new JMSBenchmarkConsumer(
+ connection, session, durableConsumer, consumerCallback, config.use20api));
} catch (Exception err) {
CompletableFuture res = new CompletableFuture<>();
res.completeExceptionally(err);
@@ -178,7 +197,8 @@ public void close() throws Exception {
}
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static JMSConfig readConfig(File configurationFile) throws IOException {
diff --git a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkProducer.java b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkProducer.java
index 2a1718066..b6ae05a69 100644
--- a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkProducer.java
+++ b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkProducer.java
@@ -13,11 +13,13 @@
*/
package io.openmessaging.benchmark.driver.jms;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
+import io.openmessaging.benchmark.driver.jms.config.JMSConfig;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-
import javax.jms.BytesMessage;
import javax.jms.CompletionListener;
import javax.jms.Destination;
@@ -25,13 +27,9 @@
import javax.jms.Message;
import javax.jms.MessageProducer;
import javax.jms.Session;
-
-import io.openmessaging.benchmark.driver.jms.config.JMSConfig;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
public class JMSBenchmarkProducer implements BenchmarkProducer {
private final Session session;
@@ -39,7 +37,13 @@ public class JMSBenchmarkProducer implements BenchmarkProducer {
private final MessageProducer producer;
private final boolean useAsyncSend;
private final List properties;
- public JMSBenchmarkProducer(Session session, Destination destination, boolean useAsyncSend, List properties) throws Exception {
+
+ public JMSBenchmarkProducer(
+ Session session,
+ Destination destination,
+ boolean useAsyncSend,
+ List properties)
+ throws Exception {
this.session = session;
this.destination = destination;
this.useAsyncSend = useAsyncSend;
@@ -55,36 +59,33 @@ public void close() throws Exception {
@Override
public CompletableFuture sendAsync(Optional key, byte[] payload) {
CompletableFuture res = new CompletableFuture<>();
- try
- {
+ try {
BytesMessage bytesMessage = session.createBytesMessage();
bytesMessage.writeBytes(payload);
- if (key.isPresent())
- {
+ if (key.isPresent()) {
// a behaviour similar to https://activemq.apache.org/message-groups
bytesMessage.setStringProperty("JMSXGroupID", key.get());
}
for (JMSConfig.AddProperty prop : properties) {
bytesMessage.setStringProperty(prop.name, prop.value);
}
- // Add a timer property for end to end
- bytesMessage.setLongProperty("E2EStartMillis",System.currentTimeMillis());
+ // Add a timer property for end to end
+ bytesMessage.setLongProperty("E2EStartMillis", System.currentTimeMillis());
if (useAsyncSend) {
- producer.send(bytesMessage, new CompletionListener()
- {
- @Override
- public void onCompletion(Message message)
- {
- res.complete(null);
- }
+ producer.send(
+ bytesMessage,
+ new CompletionListener() {
+ @Override
+ public void onCompletion(Message message) {
+ res.complete(null);
+ }
- @Override
- public void onException(Message message, Exception exception)
- {
- log.error("send completed with error", exception);
- res.completeExceptionally(exception);
- }
- });
+ @Override
+ public void onException(Message message, Exception exception) {
+ log.error("send completed with error", exception);
+ res.completeExceptionally(exception);
+ }
+ });
} else {
producer.send(bytesMessage);
res.complete(null);
@@ -94,5 +95,6 @@ public void onException(Message message, Exception exception)
}
return res;
}
+
private static final Logger log = LoggerFactory.getLogger(JMSBenchmarkProducer.class);
}
diff --git a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkTransactionProducer.java b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkTransactionProducer.java
index fc3ca70a6..01087a9cb 100644
--- a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkTransactionProducer.java
+++ b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/JMSBenchmarkTransactionProducer.java
@@ -13,17 +13,23 @@
*/
package io.openmessaging.benchmark.driver.jms;
+
import io.openmessaging.benchmark.driver.BenchmarkProducer;
import io.openmessaging.benchmark.driver.jms.config.JMSConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.jms.*;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
+import javax.jms.BytesMessage;
+import javax.jms.CompletionListener;
+import javax.jms.Connection;
+import javax.jms.JMSException;
+import javax.jms.Message;
+import javax.jms.MessageProducer;
+import javax.jms.Session;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
public class JMSBenchmarkTransactionProducer implements BenchmarkProducer {
@@ -31,7 +37,13 @@ public class JMSBenchmarkTransactionProducer implements BenchmarkProducer {
private final boolean useAsyncSend;
private final Connection connection;
private final List properties;
- public JMSBenchmarkTransactionProducer(Connection connection, String destination, boolean useAsyncSend, List properties) throws Exception {
+
+ public JMSBenchmarkTransactionProducer(
+ Connection connection,
+ String destination,
+ boolean useAsyncSend,
+ List properties)
+ throws Exception {
this.destination = destination;
this.useAsyncSend = useAsyncSend;
this.connection = connection;
@@ -39,57 +51,54 @@ public JMSBenchmarkTransactionProducer(Connection connection, String destination
}
@Override
- public void close() {
- }
+ public void close() {}
@Override
public CompletableFuture sendAsync(Optional key, byte[] payload) {
- try
- {
+ try {
// start a new Session every time, we cannot share the same Session
// among the Producers because we want to have control over the commit operation
Session session = connection.createSession(true, Session.SESSION_TRANSACTED);
MessageProducer producer = session.createProducer(session.createTopic(destination));
BytesMessage bytesMessage = session.createBytesMessage();
bytesMessage.writeBytes(payload);
- if (key.isPresent())
- {
+ if (key.isPresent()) {
// a behaviour similar to https://activemq.apache.org/message-groups
bytesMessage.setStringProperty("JMSXGroupID", key.get());
}
for (JMSConfig.AddProperty prop : properties) {
bytesMessage.setStringProperty(prop.name, prop.value);
}
- // Add a timer property for end to end
- bytesMessage.setLongProperty("E2EStartMillis",System.currentTimeMillis());
+ // Add a timer property for end to end
+ bytesMessage.setLongProperty("E2EStartMillis", System.currentTimeMillis());
if (useAsyncSend) {
CompletableFuture res = new CompletableFuture<>();
- producer.send(bytesMessage, new CompletionListener()
- {
- @Override
- public void onCompletion(Message message)
- {
- res.complete(null);
- }
+ producer.send(
+ bytesMessage,
+ new CompletionListener() {
+ @Override
+ public void onCompletion(Message message) {
+ res.complete(null);
+ }
- @Override
- public void onException(Message message, Exception exception)
- {
- log.info("send completed with error", exception);
- res.completeExceptionally(exception);
- }
- });
- return res.whenCompleteAsync((msg, error) -> {
- if (error == null) {
- // you cannot close the producer and session inside the CompletionListener
- try {
- session.commit();
- } catch (JMSException err) {
- throw new CompletionException(err);
- }
- }
- ensureClosed(producer, session);;
- });
+ @Override
+ public void onException(Message message, Exception exception) {
+ log.info("send completed with error", exception);
+ res.completeExceptionally(exception);
+ }
+ });
+ return res.whenCompleteAsync(
+ (msg, error) -> {
+ if (error == null) {
+ // you cannot close the producer and session inside the CompletionListener
+ try {
+ session.commit();
+ } catch (JMSException err) {
+ throw new CompletionException(err);
+ }
+ }
+ ensureClosed(producer, session);
+ });
} else {
try {
@@ -107,10 +116,9 @@ public void onException(Message message, Exception exception)
res.completeExceptionally(err);
return res;
}
-
}
- private void ensureClosed(MessageProducer producer, Session session) {
+ private void ensureClosed(MessageProducer producer, Session session) {
try {
producer.close();
} catch (Throwable err) {
diff --git a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/config/JMSConfig.java b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/config/JMSConfig.java
index e550872d5..3b936024c 100644
--- a/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/config/JMSConfig.java
+++ b/driver-jms/src/main/java/io/openmessaging/benchmark/driver/jms/config/JMSConfig.java
@@ -14,11 +14,11 @@
package io.openmessaging.benchmark.driver.jms.config;
+
import java.util.ArrayList;
import java.util.List;
-public class JMSConfig
-{
+public class JMSConfig {
public String connectionFactoryClassName = "";
public String connectionFactoryConfigurationParam = "";
diff --git a/driver-kafka/deploy/README.md b/driver-kafka/deploy/README.md
index 67f1fadbf..9cadfd6c9 100644
--- a/driver-kafka/deploy/README.md
+++ b/driver-kafka/deploy/README.md
@@ -9,8 +9,8 @@ Customize the instance types in the terraform.tfvars.
If you choose larger instances, they come with more drives. To include those in the benchmarks you must:
- - update the Ansible script to include them in the mount and filesystem tasks
- - update the server.properties to include them in the logs.dir config
+- update the Ansible script to include them in the mount and filesystem tasks
+- update the server.properties to include them in the logs.dir config
NOTE: When using d2 instances, the instance stores are not automatically generated. You must add them to the provision-kafka-aws.tf file.
@@ -34,4 +34,4 @@ For instructions on how to run a benchmark see the [Kafka instructions](http://o
When using 4 client VMs or less you may see lower throughput when using compression. Compression is performed by the producers and consumers only (when using defaults) and clients need to be spread across more VMs to see any throughput gains.
-Obviously, throughput may not be your primary goal when using compression.
\ No newline at end of file
+Obviously, throughput may not be your primary goal when using compression.
diff --git a/driver-kafka/pom.xml b/driver-kafka/pom.xml
index 6619a4cc9..176bdfff4 100644
--- a/driver-kafka/pom.xml
+++ b/driver-kafka/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
- driver-kafka
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
+ driver-kafka
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- org.apache.kafka
- kafka-clients
- 3.2.1
-
-
- org.projectlombok
- lombok
-
-
- org.assertj
- assertj-core
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
- test
-
-
- org.junit.jupiter
- junit-jupiter
-
-
- org.mockito
- mockito-junit-jupiter
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ org.apache.kafka
+ kafka-clients
+ 3.2.1
+
+
+ org.projectlombok
+ lombok
+ provided
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+ test
+
+
+ org.assertj
+ assertj-core
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter
+ test
+
+
+ org.mockito
+ mockito-junit-jupiter
+ test
+
+
diff --git a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkConsumer.java b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkConsumer.java
index 171c852e5..e07b417d9 100644
--- a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkConsumer.java
+++ b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkConsumer.java
@@ -13,6 +13,9 @@
*/
package io.openmessaging.benchmark.driver.kafka;
+
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
@@ -20,16 +23,12 @@
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
-
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
-
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -44,41 +43,50 @@ public class KafkaBenchmarkConsumer implements BenchmarkConsumer {
private volatile boolean closing = false;
private boolean autoCommit;
- public KafkaBenchmarkConsumer(KafkaConsumer consumer,
- Properties consumerConfig,
- ConsumerCallback callback) {
+ public KafkaBenchmarkConsumer(
+ KafkaConsumer consumer,
+ Properties consumerConfig,
+ ConsumerCallback callback) {
this(consumer, consumerConfig, callback, 100L);
}
- public KafkaBenchmarkConsumer(KafkaConsumer consumer,
- Properties consumerConfig,
- ConsumerCallback callback,
- long pollTimeoutMs) {
+ public KafkaBenchmarkConsumer(
+ KafkaConsumer consumer,
+ Properties consumerConfig,
+ ConsumerCallback callback,
+ long pollTimeoutMs) {
this.consumer = consumer;
this.executor = Executors.newSingleThreadExecutor();
- this.autoCommit= Boolean.valueOf((String)consumerConfig.getOrDefault(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false"));
- this.consumerTask = this.executor.submit(() -> {
- while (!closing) {
- try {
- ConsumerRecords records = consumer.poll(Duration.ofMillis(pollTimeoutMs));
+ this.autoCommit =
+ Boolean.valueOf(
+ (String)
+ consumerConfig.getOrDefault(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"));
+ this.consumerTask =
+ this.executor.submit(
+ () -> {
+ while (!closing) {
+ try {
+ ConsumerRecords records =
+ consumer.poll(Duration.ofMillis(pollTimeoutMs));
- Map offsetMap = new HashMap<>();
- for (ConsumerRecord record : records) {
- callback.messageReceived(record.value(), record.timestamp());
+ Map offsetMap = new HashMap<>();
+ for (ConsumerRecord record : records) {
+ callback.messageReceived(record.value(), record.timestamp());
- offsetMap.put(new TopicPartition(record.topic(), record.partition()),
- new OffsetAndMetadata(record.offset()+1));
- }
+ offsetMap.put(
+ new TopicPartition(record.topic(), record.partition()),
+ new OffsetAndMetadata(record.offset() + 1));
+ }
- if (!autoCommit&&!offsetMap.isEmpty()) {
- // Async commit all messages polled so far
- consumer.commitAsync(offsetMap, null);
- }
- } catch(Exception e){
- log.error("exception occur while consuming message", e);
- }
- }
- });
+ if (!autoCommit && !offsetMap.isEmpty()) {
+ // Async commit all messages polled so far
+ consumer.commitAsync(offsetMap, null);
+ }
+ } catch (Exception e) {
+ log.error("exception occur while consuming message", e);
+ }
+ }
+ });
}
@Override
@@ -88,5 +96,4 @@ public void close() throws Exception {
consumerTask.get();
consumer.close();
}
-
}
diff --git a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkDriver.java b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkDriver.java
index 605d42257..a2789ac2a 100644
--- a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkDriver.java
+++ b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkDriver.java
@@ -13,6 +13,14 @@
*/
package io.openmessaging.benchmark.driver.kafka;
+
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.BenchmarkDriver;
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
import java.io.File;
import java.io.IOException;
import java.io.StringReader;
@@ -24,7 +32,6 @@
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.CompletableFuture;
-
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.consumer.ConsumerConfig;
@@ -36,15 +43,6 @@
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.BenchmarkDriver;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
-
public class KafkaBenchmarkDriver implements BenchmarkDriver {
private Config config;
@@ -68,14 +66,18 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
producerProperties = new Properties();
commonProperties.forEach((key, value) -> producerProperties.put(key, value));
producerProperties.load(new StringReader(config.producerConfig));
- producerProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
- producerProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
+ producerProperties.put(
+ ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
+ producerProperties.put(
+ ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
consumerProperties = new Properties();
commonProperties.forEach((key, value) -> consumerProperties.put(key, value));
consumerProperties.load(new StringReader(config.consumerConfig));
- consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
- consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
+ consumerProperties.put(
+ ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
+ consumerProperties.put(
+ ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
topicProperties = new Properties();
topicProperties.load(new StringReader(config.topicConfig));
@@ -97,7 +99,8 @@ public CompletableFuture createTopic(String topic, int partitions) {
public CompletableFuture createTopics(List topicInfos) {
@SuppressWarnings({"unchecked", "rawtypes"})
Map topicConfigs = new HashMap<>((Map) topicProperties);
- KafkaTopicCreator topicCreator = new KafkaTopicCreator(admin, topicConfigs, config.replicationFactor);
+ KafkaTopicCreator topicCreator =
+ new KafkaTopicCreator(admin, topicConfigs, config.replicationFactor);
return topicCreator.create(topicInfos);
}
@@ -118,22 +121,22 @@ public CompletableFuture createProducer(String topic) {
}
@Override
- public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
Properties properties = new Properties();
consumerProperties.forEach((key, value) -> properties.put(key, value));
properties.put(ConsumerConfig.GROUP_ID_CONFIG, subscriptionName);
KafkaConsumer consumer = new KafkaConsumer<>(properties);
try {
consumer.subscribe(Arrays.asList(topic));
- return CompletableFuture.completedFuture(new KafkaBenchmarkConsumer(consumer,consumerProperties,consumerCallback));
+ return CompletableFuture.completedFuture(
+ new KafkaBenchmarkConsumer(consumer, consumerProperties, consumerCallback));
} catch (Throwable t) {
consumer.close();
CompletableFuture future = new CompletableFuture<>();
future.completeExceptionally(t);
return future;
}
-
}
@Override
@@ -148,6 +151,7 @@ public void close() throws Exception {
admin.close();
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
diff --git a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkProducer.java b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkProducer.java
index 30c10b509..6c62d9fbc 100644
--- a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkProducer.java
+++ b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaBenchmarkProducer.java
@@ -26,15 +26,13 @@
*/
package io.openmessaging.benchmark.driver.kafka;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-
-import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
public class KafkaBenchmarkProducer implements BenchmarkProducer {
private final Producer producer;
@@ -51,13 +49,15 @@ public CompletableFuture sendAsync(Optional key, byte[] payload) {
CompletableFuture future = new CompletableFuture<>();
- producer.send(record, (metadata, exception) -> {
- if (exception != null) {
- future.completeExceptionally(exception);
- } else {
- future.complete(null);
- }
- });
+ producer.send(
+ record,
+ (metadata, exception) -> {
+ if (exception != null) {
+ future.completeExceptionally(exception);
+ } else {
+ future.complete(null);
+ }
+ });
return future;
}
@@ -66,5 +66,4 @@ public CompletableFuture sendAsync(Optional key, byte[] payload) {
public void close() throws Exception {
producer.close();
}
-
}
diff --git a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreator.java b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreator.java
index d114d1521..82d3e38b7 100644
--- a/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreator.java
+++ b/driver-kafka/src/main/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreator.java
@@ -17,6 +17,7 @@
import static java.util.function.Function.identity;
import static java.util.stream.Collectors.toList;
import static java.util.stream.Collectors.toMap;
+
import io.openmessaging.benchmark.driver.BenchmarkDriver.TopicInfo;
import java.util.ArrayList;
import java.util.List;
@@ -60,21 +61,26 @@ private void createBlocking(List topicInfos) {
AtomicInteger succeeded = new AtomicInteger();
ScheduledFuture> loggingFuture =
- executor.scheduleAtFixedRate(() -> log.info("Created topics {}/{}", succeeded.get(), topicInfos.size()),
- 10, 10, SECONDS);
+ executor.scheduleAtFixedRate(
+ () -> log.info("Created topics {}/{}", succeeded.get(), topicInfos.size()),
+ 10,
+ 10,
+ SECONDS);
try {
while (succeeded.get() < topicInfos.size()) {
int batchSize = queue.drainTo(batch, maxBatchSize);
if (batchSize > 0) {
- executeBatch(batch).forEach((topicInfo, success) -> {
- if (success) {
- succeeded.incrementAndGet();
- } else {
- //noinspection ResultOfMethodCallIgnored
- queue.offer(topicInfo);
- }
- });
+ executeBatch(batch)
+ .forEach(
+ (topicInfo, success) -> {
+ if (success) {
+ succeeded.incrementAndGet();
+ } else {
+ //noinspection ResultOfMethodCallIgnored
+ queue.offer(topicInfo);
+ }
+ });
batch.clear();
}
}
@@ -85,20 +91,17 @@ private void createBlocking(List topicInfos) {
private Map executeBatch(List batch) {
log.debug("Executing batch, size: {}", batch.size());
- Map lookup = batch.stream()
- .collect(toMap(TopicInfo::getTopic, identity()));
+ Map lookup = batch.stream().collect(toMap(TopicInfo::getTopic, identity()));
- List newTopics = batch.stream()
- .map(this::newTopic)
- .collect(toList());
+ List newTopics = batch.stream().map(this::newTopic).collect(toList());
- return admin.createTopics(newTopics).values()
- .entrySet().stream()
+ return admin.createTopics(newTopics).values().entrySet().stream()
.collect(toMap(e -> lookup.get(e.getKey()), e -> isSuccess(e.getValue())));
}
private NewTopic newTopic(TopicInfo topicInfo) {
- NewTopic newTopic = new NewTopic(topicInfo.getTopic(), topicInfo.getPartitions(), replicationFactor);
+ NewTopic newTopic =
+ new NewTopic(topicInfo.getTopic(), topicInfo.getPartitions(), replicationFactor);
newTopic.configs(topicConfigs);
return newTopic;
}
diff --git a/driver-kafka/src/test/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreatorTest.java b/driver-kafka/src/test/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreatorTest.java
index ca5b084dc..25b38a0e1 100644
--- a/driver-kafka/src/test/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreatorTest.java
+++ b/driver-kafka/src/test/java/io/openmessaging/benchmark/driver/kafka/KafkaTopicCreatorTest.java
@@ -20,6 +20,7 @@
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
+
import io.openmessaging.benchmark.driver.BenchmarkDriver.TopicInfo;
import java.util.HashMap;
import java.util.List;
@@ -46,12 +47,9 @@ class KafkaTopicCreatorTest {
private final int partitions = 1;
private final short replicationFactor = 1;
private final TopicInfo topicInfo = new TopicInfo(topic, partitions);
- @Mock
- private AdminClient admin;
- @Mock
- private CreateTopicsResult createTopicsResult;
- @Captor
- private ArgumentCaptor> captor;
+ @Mock private AdminClient admin;
+ @Mock private CreateTopicsResult createTopicsResult;
+ @Captor private ArgumentCaptor> captor;
private KafkaTopicCreator topicCreator;
@BeforeEach
@@ -121,4 +119,4 @@ private void assertNewTopics(List newTopics) {
assertThat(newTopic.replicationFactor()).isEqualTo(replicationFactor);
assertThat(newTopic.configs()).isSameAs(topicConfigs);
}
-}
\ No newline at end of file
+}
diff --git a/driver-kop/pom.xml b/driver-kop/pom.xml
index 5770fdfe2..1904ed9d4 100644
--- a/driver-kop/pom.xml
+++ b/driver-kop/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- driver-kop
+ driver-kop
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- ${project.groupId}
- driver-kafka
- ${project.version}
-
-
- ${project.groupId}
- driver-pulsar
- ${project.version}
-
-
- io.streamnative.pulsar.handlers
- kafka-payload-processor
- 2.10.1.7
-
-
- org.apache.commons
- commons-lang3
-
-
- org.testng
- testng
- 7.6.1
- test
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-kafka
+ ${project.version}
+
+
+ ${project.groupId}
+ driver-pulsar
+ ${project.version}
+
+
+ io.streamnative.pulsar.handlers
+ kafka-payload-processor
+ 2.10.1.7
+
+
+ org.apache.commons
+ commons-lang3
+
+
+ org.testng
+ testng
+ 7.6.1
+ test
+
+
diff --git a/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriver.java b/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriver.java
index d855d7c84..32ccb6ba2 100644
--- a/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriver.java
+++ b/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriver.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.kop;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
@@ -61,8 +62,9 @@
public class KopBenchmarkDriver implements BenchmarkDriver {
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private final List producers = new CopyOnWriteArrayList<>();
private final List consumers = new CopyOnWriteArrayList<>();
@@ -93,23 +95,30 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
consumerProperties = new Properties();
commonProperties.forEach(consumerProperties::put);
consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
- consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
+ consumerProperties.put(
+ ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
final PulsarConfig pulsarConfig = config.pulsarConfig;
if (config.producerType.equals(ClientType.PULSAR)) {
- producerBuilder = getPulsarClient(pulsarConfig.serviceUrl).newProducer()
- .enableBatching(pulsarConfig.batchingEnabled)
- .blockIfQueueFull(pulsarConfig.blockIfQueueFull)
- .batchingMaxPublishDelay(pulsarConfig.batchingMaxPublishDelayMs, TimeUnit.MILLISECONDS)
- .batchingMaxBytes(pulsarConfig.batchingMaxBytes)
- .maxPendingMessages(pulsarConfig.pendingQueueSize)
- .maxPendingMessagesAcrossPartitions(pulsarConfig.maxPendingMessagesAcrossPartitions);
+ producerBuilder =
+ getPulsarClient(pulsarConfig.serviceUrl)
+ .newProducer()
+ .enableBatching(pulsarConfig.batchingEnabled)
+ .blockIfQueueFull(pulsarConfig.blockIfQueueFull)
+ .batchingMaxPublishDelay(
+ pulsarConfig.batchingMaxPublishDelayMs, TimeUnit.MILLISECONDS)
+ .batchingMaxBytes(pulsarConfig.batchingMaxBytes)
+ .maxPendingMessages(pulsarConfig.pendingQueueSize)
+ .maxPendingMessagesAcrossPartitions(pulsarConfig.maxPendingMessagesAcrossPartitions);
}
if (config.consumerType.equals(ClientType.PULSAR)) {
- consumerBuilder = getPulsarClient(pulsarConfig.serviceUrl).newConsumer(Schema.BYTEBUFFER)
- .subscriptionType(SubscriptionType.Failover)
- .receiverQueueSize(pulsarConfig.receiverQueueSize)
- .maxTotalReceiverQueueSizeAcrossPartitions(pulsarConfig.maxTotalReceiverQueueSizeAcrossPartitions);
+ consumerBuilder =
+ getPulsarClient(pulsarConfig.serviceUrl)
+ .newConsumer(Schema.BYTEBUFFER)
+ .subscriptionType(SubscriptionType.Failover)
+ .receiverQueueSize(pulsarConfig.receiverQueueSize)
+ .maxTotalReceiverQueueSizeAcrossPartitions(
+ pulsarConfig.maxTotalReceiverQueueSizeAcrossPartitions);
}
}
@@ -123,13 +132,17 @@ public CompletableFuture createTopic(String topic, int partitions) {
// replicationFactor is meaningless in KoP
final NewTopic newTopic = new NewTopic(topic, partitions, (short) 1L);
final CompletableFuture future = new CompletableFuture<>();
- admin.createTopics(Collections.singletonList(newTopic)).all().whenComplete((result, throwable) -> {
- if (throwable == null) {
- future.complete(result);
- } else {
- future.completeExceptionally(throwable);
- }
- });
+ admin
+ .createTopics(Collections.singletonList(newTopic))
+ .all()
+ .whenComplete(
+ (result, throwable) -> {
+ if (throwable == null) {
+ future.complete(result);
+ } else {
+ future.completeExceptionally(throwable);
+ }
+ });
return future;
}
@@ -141,12 +154,17 @@ public CompletableFuture createProducer(String topic) {
producers.add(producer);
return CompletableFuture.completedFuture(producer);
} else if (config.consumerType.equals(ClientType.PULSAR)) {
- return producerBuilder.clone().topic(topic).createAsync().thenApply(PulsarBenchmarkProducer::new);
+ return producerBuilder
+ .clone()
+ .topic(topic)
+ .createAsync()
+ .thenApply(PulsarBenchmarkProducer::new);
} else {
throw new IllegalArgumentException("producerType " + config.producerType + " is invalid");
}
}
+ @SuppressWarnings("checkstyle:LineLength")
@Override
public CompletableFuture createConsumer(
String topic, String subscriptionName, ConsumerCallback consumerCallback) {
@@ -157,19 +175,27 @@ public CompletableFuture createConsumer(
properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
final KafkaConsumer kafkaConsumer = new KafkaConsumer<>(properties);
kafkaConsumer.subscribe(Collections.singleton(topic));
- final BenchmarkConsumer consumer = new KafkaBenchmarkConsumer(
- kafkaConsumer, properties, consumerCallback, config.pollTimeoutMs);
+ final BenchmarkConsumer consumer =
+ new KafkaBenchmarkConsumer(
+ kafkaConsumer, properties, consumerCallback, config.pollTimeoutMs);
consumers.add(consumer);
return CompletableFuture.completedFuture(consumer);
} else if (config.consumerType.equals(ClientType.PULSAR)) {
final List>> futures = new ArrayList<>();
- return client.getPartitionsForTopic(topic)
- .thenCompose(partitions -> {
- partitions.forEach(p -> futures.add(
- createInternalPulsarConsumer(p, subscriptionName, consumerCallback)));
- return FutureUtil.waitForAll(futures);
- }).thenApply(__ -> new PulsarBenchmarkConsumer(futures.stream().map(CompletableFuture::join)
- .collect(Collectors.toList())));
+ return client
+ .getPartitionsForTopic(topic)
+ .thenCompose(
+ partitions -> {
+ partitions.forEach(
+ p ->
+ futures.add(
+ createInternalPulsarConsumer(p, subscriptionName, consumerCallback)));
+ return FutureUtil.waitForAll(futures);
+ })
+ .thenApply(
+ __ ->
+ new PulsarBenchmarkConsumer(
+ futures.stream().map(CompletableFuture::join).collect(Collectors.toList())));
} else {
throw new IllegalArgumentException("consumerType " + config.consumerType + " is invalid");
}
@@ -198,19 +224,22 @@ private PulsarClient getPulsarClient(String serviceUrl) throws PulsarClientExcep
private CompletableFuture> createInternalPulsarConsumer(
String topic, String subscriptionName, ConsumerCallback callback) {
- return consumerBuilder.clone()
+ return consumerBuilder
+ .clone()
.topic(topic)
.subscriptionName(subscriptionName)
- .messagePayloadProcessor(new KafkaPayloadProcessor()) // support consuming Kafka format messages
+ .messagePayloadProcessor(
+ new KafkaPayloadProcessor()) // support consuming Kafka format messages
.poolMessages(true)
- .messageListener((c, msg) -> {
- try {
- callback.messageReceived(msg.getValue(), msg.getPublishTime());
- c.acknowledgeAsync(msg);
- } finally {
- msg.release();
- }
- })
+ .messageListener(
+ (c, msg) -> {
+ try {
+ callback.messageReceived(msg.getValue(), msg.getPublishTime());
+ c.acknowledgeAsync(msg);
+ } finally {
+ msg.release();
+ }
+ })
.subscribeAsync();
}
}
diff --git a/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/ClientType.java b/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/ClientType.java
index 1e25a856e..0f279f645 100644
--- a/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/ClientType.java
+++ b/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/ClientType.java
@@ -13,9 +13,12 @@
*/
package io.openmessaging.benchmark.driver.kop.config;
+
import com.fasterxml.jackson.annotation.JsonProperty;
public enum ClientType {
- @JsonProperty("kafka") KAFKA,
- @JsonProperty("pulsar") PULSAR
+ @JsonProperty("kafka")
+ KAFKA,
+ @JsonProperty("pulsar")
+ PULSAR
}
diff --git a/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/Config.java b/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/Config.java
index daff56a9c..36cc4b0fe 100644
--- a/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/Config.java
+++ b/driver-kop/src/main/java/io/openmessaging/benchmark/driver/kop/config/Config.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.kop.config;
+
import java.io.IOException;
import java.io.StringReader;
import java.util.Properties;
diff --git a/driver-kop/src/test/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriverTest.java b/driver-kop/src/test/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriverTest.java
index 59e6d9156..277bdb146 100644
--- a/driver-kop/src/test/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriverTest.java
+++ b/driver-kop/src/test/java/io/openmessaging/benchmark/driver/kop/KopBenchmarkDriverTest.java
@@ -19,14 +19,14 @@
import static org.testng.Assert.assertTrue;
import static org.testng.Assert.fail;
+import io.openmessaging.benchmark.driver.kop.config.ClientType;
+import io.openmessaging.benchmark.driver.kop.config.Config;
+import io.openmessaging.benchmark.driver.kop.config.PulsarConfig;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Properties;
-import io.openmessaging.benchmark.driver.kop.config.ClientType;
-import io.openmessaging.benchmark.driver.kop.config.Config;
-import io.openmessaging.benchmark.driver.kop.config.PulsarConfig;
import org.testng.annotations.Test;
public class KopBenchmarkDriverTest {
diff --git a/driver-nats-streaming/pom.xml b/driver-nats-streaming/pom.xml
index 40a876e2e..a589c5277 100644
--- a/driver-nats-streaming/pom.xml
+++ b/driver-nats-streaming/pom.xml
@@ -1,3 +1,4 @@
+
-
-
+ 4.0.0
- messaging-benchmarkio.openmessaging.benchmark
+ messaging-benchmark0.0.1-SNAPSHOT
- 4.0.0driver-nats-streaming
-
- io.nats
- java-nats-streaming
- 2.1.0
- ${project.groupId}driver-api${project.version}
+
+ io.nats
+ java-nats-streaming
+ 2.1.0
+
-
\ No newline at end of file
+
diff --git a/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkConsumer.java b/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkConsumer.java
index 6483ffc14..1e6e1c1bd 100644
--- a/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkConsumer.java
+++ b/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkConsumer.java
@@ -13,19 +13,21 @@
*/
package io.openmessaging.benchmark.driver.natsStreaming;
+
import io.nats.streaming.StreamingConnection;
-import io.nats.streaming.Subscription;
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
public class NatsStreamingBenchmarkConsumer implements BenchmarkConsumer {
private StreamingConnection streamingConnection;
private boolean unsubscribe;
+
public NatsStreamingBenchmarkConsumer(StreamingConnection streamingConnection) {
this.unsubscribe = false;
this.streamingConnection = streamingConnection;
}
- @Override public void close() throws Exception {
+ @Override
+ public void close() throws Exception {
if (!unsubscribe) {
unsubscribe = true;
streamingConnection.close();
diff --git a/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkDriver.java b/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkDriver.java
index 3a213a70e..06e31b988 100644
--- a/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkDriver.java
+++ b/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkDriver.java
@@ -14,6 +14,7 @@
package io.openmessaging.benchmark.driver.natsStreaming;
import static java.nio.charset.StandardCharsets.UTF_8;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
@@ -37,7 +38,6 @@
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.LoggerFactory;
-
public class NatsStreamingBenchmarkDriver implements BenchmarkDriver {
private final String defaultClusterId = "test-cluster";
private String clusterId;
@@ -45,7 +45,9 @@ public class NatsStreamingBenchmarkDriver implements BenchmarkDriver {
private StreamingConnection natsStreamingPublisher;
private SubscriptionOptions.Builder subBuilder = new SubscriptionOptions.Builder();
private Options.Builder optsBuilder = new Options.Builder();
- @Override public void initialize(File configurationFile, StatsLogger statsLogger) throws IOException {
+
+ @Override
+ public void initialize(File configurationFile, StatsLogger statsLogger) throws IOException {
config = mapper.readValue(configurationFile, NatsStreamingClientConfig.class);
log.info("read config file," + config.toString());
if (config.clusterId != null) {
@@ -62,11 +64,13 @@ public class NatsStreamingBenchmarkDriver implements BenchmarkDriver {
subBuilder.maxInFlight(config.maxInFlight);
}
- @Override public String getTopicNamePrefix() {
+ @Override
+ public String getTopicNamePrefix() {
return "Nats-streaming-benchmark";
}
- @Override public CompletableFuture createTopic(String topic, int partitions) {
+ @Override
+ public CompletableFuture createTopic(String topic, int partitions) {
log.info("nats streaming create a topic" + topic);
log.info("ignore partitions");
CompletableFuture future = new CompletableFuture<>();
@@ -74,7 +78,8 @@ public class NatsStreamingBenchmarkDriver implements BenchmarkDriver {
return future;
}
- @Override public CompletableFuture createProducer(String topic) {
+ @Override
+ public CompletableFuture createProducer(String topic) {
if (natsStreamingPublisher == null) {
String clientId = "ProducerInstance" + getRandomString();
try {
@@ -85,43 +90,54 @@ public class NatsStreamingBenchmarkDriver implements BenchmarkDriver {
}
}
- return CompletableFuture.completedFuture(new NatsStreamingBenchmarkProducer(natsStreamingPublisher, topic));
+ return CompletableFuture.completedFuture(
+ new NatsStreamingBenchmarkProducer(natsStreamingPublisher, topic));
}
- @Override public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
+ @Override
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
Subscription sub;
StreamingConnection streamingConnection;
String clientId = "ConsumerInstance" + getRandomString();
try {
streamingConnection = NatsStreaming.connect(clusterId, clientId, optsBuilder.build());
- streamingConnection.subscribe(topic, subscriptionName, new MessageHandler() {
- @Override public void onMessage(Message message) {
- consumerCallback.messageReceived(message.getData(), message.getTimestamp());
- }
- }, subBuilder.build());
+ streamingConnection.subscribe(
+ topic,
+ subscriptionName,
+ new MessageHandler() {
+ @Override
+ public void onMessage(Message message) {
+ consumerCallback.messageReceived(message.getData(), message.getTimestamp());
+ }
+ },
+ subBuilder.build());
} catch (Exception e) {
log.warn("nats streaming create consumer exception", e);
return null;
}
- return CompletableFuture.completedFuture(new NatsStreamingBenchmarkConsumer(streamingConnection));
+ return CompletableFuture.completedFuture(
+ new NatsStreamingBenchmarkConsumer(streamingConnection));
}
- @Override public void close() throws Exception {
+ @Override
+ public void close() throws Exception {
if (natsStreamingPublisher != null) {
natsStreamingPublisher.close();
natsStreamingPublisher = null;
}
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
- private static final org.slf4j.Logger log = LoggerFactory.getLogger(NatsStreamingBenchmarkDriver.class);
+ private static final org.slf4j.Logger log =
+ LoggerFactory.getLogger(NatsStreamingBenchmarkDriver.class);
private static final Random random = new Random();
- private static final String getRandomString() {
+ private static String getRandomString() {
byte[] buffer = new byte[5];
random.nextBytes(buffer);
return BaseEncoding.base64Url().omitPadding().encode(buffer);
@@ -131,24 +147,34 @@ public static void main(String[] args) throws Exception {
try {
Options opts = new Options.Builder().natsUrl("nats://0.0.0.0:4222").build();
SubscriptionOptions.Builder builder = new SubscriptionOptions.Builder();
- StreamingConnection streamingConnection = NatsStreaming.connect("mycluster", "benchmark-sub", opts);
- Subscription sub = streamingConnection.subscribe("topicTest", "subscription", new MessageHandler() {
- @Override public void onMessage(Message message) {
- System.out.println(message.toString());
- }
- }, builder.build());
- StreamingConnection natsStreamingPublisher = NatsStreaming.connect("mycluster", "benchmark-pub", opts);
+ StreamingConnection streamingConnection =
+ NatsStreaming.connect("mycluster", "benchmark-sub", opts);
+ Subscription sub =
+ streamingConnection.subscribe(
+ "topicTest",
+ "subscription",
+ new MessageHandler() {
+ @Override
+ public void onMessage(Message message) {
+ System.out.println(message.toString());
+ }
+ },
+ builder.build());
+ StreamingConnection natsStreamingPublisher =
+ NatsStreaming.connect("mycluster", "benchmark-pub", opts);
final String[] guid = new String[1];
- AckHandler acb = new AckHandler() {
- @Override public void onAck(String s, Exception e) {
- if ((e != null) || !guid[0].equals(s)) {
- System.out.println("pub error");
- } else {
- System.out.println("pub success");
- }
- }
- };
+ AckHandler acb =
+ new AckHandler() {
+ @Override
+ public void onAck(String s, Exception e) {
+ if ((e != null) || !guid[0].equals(s)) {
+ System.out.println("pub error");
+ } else {
+ System.out.println("pub success");
+ }
+ }
+ };
guid[0] = natsStreamingPublisher.publish("topicTest", "HelloStreaming".getBytes(UTF_8), acb);
diff --git a/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkProducer.java b/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkProducer.java
index 06228f6c0..2393c0719 100644
--- a/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkProducer.java
+++ b/driver-nats-streaming/src/main/java/io/openmessaging/benchmark/driver/natsStreaming/NatsStreamingBenchmarkProducer.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.natsStreaming;
+
import io.nats.streaming.AckHandler;
import io.nats.streaming.StreamingConnection;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
@@ -22,23 +23,28 @@
public class NatsStreamingBenchmarkProducer implements BenchmarkProducer {
private StreamingConnection natsStreamingPublisher;
private String topic;
- public NatsStreamingBenchmarkProducer (StreamingConnection natsStreamingPublisher, String topic) {
+
+ public NatsStreamingBenchmarkProducer(StreamingConnection natsStreamingPublisher, String topic) {
this.natsStreamingPublisher = natsStreamingPublisher;
this.topic = topic;
}
- @Override public CompletableFuture sendAsync(Optional key, byte[] payload) {
+ @Override
+ public CompletableFuture sendAsync(Optional key, byte[] payload) {
CompletableFuture future = new CompletableFuture<>();
final String[] guid = new String[1];
- AckHandler acb = new AckHandler() {
- @Override public void onAck(String s, Exception e) {
- if ((e != null) || !guid[0].equals(s)) {
- future.completeExceptionally(e != null ? e : new IllegalStateException("guid != nuid"));
- } else {
- future.complete(null);
- }
- }
- };
+ AckHandler acb =
+ new AckHandler() {
+ @Override
+ public void onAck(String s, Exception e) {
+ if ((e != null) || !guid[0].equals(s)) {
+ future.completeExceptionally(
+ e != null ? e : new IllegalStateException("guid != nuid"));
+ } else {
+ future.complete(null);
+ }
+ }
+ };
try {
guid[0] = natsStreamingPublisher.publish(topic, payload, acb);
} catch (Exception e) {
@@ -50,7 +56,6 @@ public NatsStreamingBenchmarkProducer (StreamingConnection natsStreamingPublishe
return future;
}
- @Override public void close() throws Exception {
-
- }
+ @Override
+ public void close() throws Exception {}
}
diff --git a/driver-nats/pom.xml b/driver-nats/pom.xml
index 6518eae9c..d2d6bc6d1 100644
--- a/driver-nats/pom.xml
+++ b/driver-nats/pom.xml
@@ -1,3 +1,4 @@
+
-
+ 4.0.0
- messaging-benchmarkio.openmessaging.benchmark
+ messaging-benchmark0.0.1-SNAPSHOT
- 4.0.0driver-nats
-
- io.nats
- jnats
- 2.15.6
- ${project.groupId}driver-api${project.version}
+
+ io.nats
+ jnats
+ 2.15.6
+
-
\ No newline at end of file
+
diff --git a/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkConsumer.java b/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkConsumer.java
index fd44b4a2b..8627475df 100644
--- a/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkConsumer.java
+++ b/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkConsumer.java
@@ -13,10 +13,10 @@
*/
package io.openmessaging.benchmark.driver.nats;
-import io.nats.client.Connection;
+
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
public class NatsBenchmarkConsumer implements BenchmarkConsumer {
- @Override public void close() {
- }
+ @Override
+ public void close() {}
}
diff --git a/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkDriver.java b/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkDriver.java
index 3de6ea775..aa2989254 100644
--- a/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkDriver.java
+++ b/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkDriver.java
@@ -13,17 +13,17 @@
*/
package io.openmessaging.benchmark.driver.nats;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import io.nats.client.Connection;
import io.nats.client.Dispatcher;
import io.nats.client.ErrorListener;
import io.nats.client.JetStream;
import io.nats.client.JetStreamManagement;
-import io.nats.client.JetStreamOptions;
import io.nats.client.JetStreamSubscription;
import io.nats.client.Message;
-import io.nats.client.MessageHandler;
import io.nats.client.Nats;
import io.nats.client.Options;
import io.nats.client.PushSubscribeOptions;
@@ -31,7 +31,6 @@
import io.nats.client.api.StreamConfiguration;
import io.nats.client.api.StreamInfo;
import io.nats.client.support.JsonUtils;
-import java.time.Duration;
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.BenchmarkDriver;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
@@ -42,8 +41,6 @@
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import io.nats.client.Connection;
-
public class NatsBenchmarkDriver implements BenchmarkDriver {
private NatsConfig config;
@@ -52,24 +49,28 @@ public class NatsBenchmarkDriver implements BenchmarkDriver {
private JetStream jetStream;
@Override
- public void initialize(File configurationFile, StatsLogger statsLogger) throws IOException, InterruptedException {
+ public void initialize(File configurationFile, StatsLogger statsLogger)
+ throws IOException, InterruptedException {
config = mapper.readValue(configurationFile, NatsConfig.class);
log.info("read config file," + config.toString());
- this.connection = Nats.connect(new Options.Builder()
- .server(config.natsHostUrl)
- .maxReconnects(5)
- .errorListener(new ErrorListener() {
- @Override
- public void errorOccurred(Connection conn, String error) {
- log.error("Error on connection {}: {}", conn, error);
- }
-
- @Override
- public void exceptionOccurred(Connection conn, Exception exp) {
- log.error("Exception on connection {}", conn, exp);
- }
- })
- .build());
+ this.connection =
+ Nats.connect(
+ new Options.Builder()
+ .server(config.natsHostUrl)
+ .maxReconnects(5)
+ .errorListener(
+ new ErrorListener() {
+ @Override
+ public void errorOccurred(Connection conn, String error) {
+ log.error("Error on connection {}: {}", conn, error);
+ }
+
+ @Override
+ public void exceptionOccurred(Connection conn, Exception exp) {
+ log.error("Exception on connection {}", conn, exp);
+ }
+ })
+ .build());
this.jetStream = connection.jetStream();
}
@@ -82,12 +83,14 @@ public String getTopicNamePrefix() {
public CompletableFuture createTopic(String topic, int partitions) {
try {
JetStreamManagement jsm = connection.jetStreamManagement();
- StreamInfo streamInfo = jsm.addStream(StreamConfiguration.builder()
- .name(topic)
- .subjects(topic)
- .storageType(StorageType.File)
- .replicas(config.replicationFactor)
- .build());
+ StreamInfo streamInfo =
+ jsm.addStream(
+ StreamConfiguration.builder()
+ .name(topic)
+ .subjects(topic)
+ .storageType(StorageType.File)
+ .replicas(config.replicationFactor)
+ .build());
log.info("Created stream {} -- {}", topic, JsonUtils.getFormatted(streamInfo));
return CompletableFuture.completedFuture(null);
} catch (Exception e) {
@@ -103,18 +106,23 @@ public CompletableFuture createProducer(String topic) {
}
@Override
- public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
-
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
Dispatcher dispatcher = connection.createDispatcher();
try {
- JetStreamSubscription sub = jetStream.subscribe(topic, dispatcher, (Message msg) -> {
- long publishTimestamp = readLongFromBytes(msg.getData());
- consumerCallback.messageReceived(msg.getData(), publishTimestamp);
- msg.ack();
- }, false, new PushSubscribeOptions.Builder().build());
+ JetStreamSubscription sub =
+ jetStream.subscribe(
+ topic,
+ dispatcher,
+ (Message msg) -> {
+ long publishTimestamp = readLongFromBytes(msg.getData());
+ consumerCallback.messageReceived(msg.getData(), publishTimestamp);
+ msg.ack();
+ },
+ false,
+ new PushSubscribeOptions.Builder().build());
return CompletableFuture.completedFuture(new NatsBenchmarkConsumer());
} catch (Exception e) {
CompletableFuture f = new CompletableFuture<>();
@@ -129,8 +137,9 @@ public void close() throws Exception {
}
private static final Logger log = LoggerFactory.getLogger(NatsBenchmarkDriver.class);
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static long readLongFromBytes(final byte[] b) {
long result = 0;
@@ -140,5 +149,4 @@ private static long readLongFromBytes(final byte[] b) {
}
return result;
}
-
}
diff --git a/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkProducer.java b/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkProducer.java
index c9f856bb3..1c9dd8a54 100644
--- a/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkProducer.java
+++ b/driver-nats/src/main/java/io/openmessaging/benchmark/driver/nats/NatsBenchmarkProducer.java
@@ -13,19 +13,11 @@
*/
package io.openmessaging.benchmark.driver.nats;
+
import io.nats.client.JetStream;
-import io.nats.client.Message;
-import io.nats.client.PublishOptions;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
-import java.time.Duration;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-import io.nats.client.Connection;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Semaphore;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
public class NatsBenchmarkProducer implements BenchmarkProducer {
private final String topic;
@@ -44,8 +36,7 @@ public CompletableFuture sendAsync(Optional key, byte[] payload) {
}
@Override
- public void close() throws Exception {
- }
+ public void close() throws Exception {}
public static void writeLongToBytes(long l, byte[] dst) {
for (int i = 7; i >= 0; i--) {
@@ -53,6 +44,4 @@ public static void writeLongToBytes(long l, byte[] dst) {
l >>= 8;
}
}
-
-
}
diff --git a/driver-nsq/pom.xml b/driver-nsq/pom.xml
index 5f16cf095..377f1f983 100644
--- a/driver-nsq/pom.xml
+++ b/driver-nsq/pom.xml
@@ -1,3 +1,4 @@
+
-
+ 4.0.0
- messaging-benchmarkio.openmessaging.benchmark
+ messaging-benchmark0.0.1-SNAPSHOT
- 4.0.0driver-nsq
-
- com.github.brainlag
- nsq-client
- 1.0.0.RC4
- ${project.groupId}driver-api${project.version}
+
+ com.github.brainlag
+ nsq-client
+ 1.0.0.RC4
+
-
\ No newline at end of file
+
diff --git a/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkConsumer.java b/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkConsumer.java
index a301da2ca..3e63c5e2a 100644
--- a/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkConsumer.java
+++ b/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkConsumer.java
@@ -13,15 +13,19 @@
*/
package io.openmessaging.benchmark.driver.nsq;
+
import com.github.brainlag.nsq.NSQConsumer;
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
public class NsqBenchmarkConsumer implements BenchmarkConsumer {
private NSQConsumer nsqConsumer;
+
public NsqBenchmarkConsumer(NSQConsumer nsqConsumer) {
this.nsqConsumer = nsqConsumer;
}
- @Override public void close() throws Exception {
+
+ @Override
+ public void close() throws Exception {
this.nsqConsumer.shutdown();
}
}
diff --git a/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkDriver.java b/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkDriver.java
index 65043b057..99f9f7586 100644
--- a/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkDriver.java
+++ b/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkDriver.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.nsq;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
@@ -23,7 +24,6 @@
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.BenchmarkDriver;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
import io.openmessaging.benchmark.driver.ConsumerCallback;
import java.io.File;
import java.io.IOException;
@@ -35,16 +35,19 @@
public class NsqBenchmarkDriver implements BenchmarkDriver {
private NsqConfig config;
- @Override public void initialize(File configurationFile, StatsLogger statsLogger) throws IOException {
+ @Override
+ public void initialize(File configurationFile, StatsLogger statsLogger) throws IOException {
config = mapper.readValue(configurationFile, NsqConfig.class);
log.info("read config file," + config.toString());
}
- @Override public String getTopicNamePrefix() {
+ @Override
+ public String getTopicNamePrefix() {
return "Nsq-Benchmark";
}
- @Override public CompletableFuture createTopic(String topic, int partitions) {
+ @Override
+ public CompletableFuture createTopic(String topic, int partitions) {
log.info("create a topic" + topic);
log.info("ignore partitions");
CompletableFuture future = new CompletableFuture<>();
@@ -52,7 +55,8 @@ public class NsqBenchmarkDriver implements BenchmarkDriver {
return future;
}
- @Override public CompletableFuture createProducer(final String topic) {
+ @Override
+ public CompletableFuture createProducer(final String topic) {
NSQProducer nsqProducer = new NSQProducer();
nsqProducer.addAddress(config.nsqdHost, 4150);
nsqProducer.start();
@@ -61,19 +65,26 @@ public class NsqBenchmarkDriver implements BenchmarkDriver {
return CompletableFuture.completedFuture(new NsqBenchmarkProducer(nsqProducer, topic));
}
- @Override public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
- //Channel can be treat as subscriptionName
+ @Override
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
+ // Channel can be treat as subscriptionName
NSQLookup lookup = new DefaultNSQLookup();
lookup.addLookupAddress(config.lookupHost, 4161);
- NSQConsumer nsqConsumer = new NSQConsumer(lookup, topic, subscriptionName, (message) -> {
- //now mark the message as finished.
- consumerCallback.messageReceived(message.getMessage(), message.getTimestamp().getTime());
- message.finished();
+ NSQConsumer nsqConsumer =
+ new NSQConsumer(
+ lookup,
+ topic,
+ subscriptionName,
+ (message) -> {
+ // now mark the message as finished.
+ consumerCallback.messageReceived(
+ message.getMessage(), message.getTimestamp().getTime());
+ message.finished();
- //or you could requeue it, which indicates a failure and puts it back on the queue.
- //message.requeue();
- });
+ // or you could requeue it, which indicates a failure and puts it back on the queue.
+ // message.requeue();
+ });
nsqConsumer.start();
log.info("start a nsq consumer");
@@ -81,11 +92,11 @@ public class NsqBenchmarkDriver implements BenchmarkDriver {
return CompletableFuture.completedFuture(new NsqBenchmarkConsumer(nsqConsumer));
}
- @Override public void close() throws Exception {
+ @Override
+ public void close() throws Exception {}
- }
private static final Logger log = LoggerFactory.getLogger(NsqBenchmarkDriver.class);
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
-
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
}
diff --git a/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkProducer.java b/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkProducer.java
index 1ae87645e..f1f85c5e3 100644
--- a/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkProducer.java
+++ b/driver-nsq/src/main/java/io/openmessaging/benchmark/driver/nsq/NsqBenchmarkProducer.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.nsq;
+
import com.github.brainlag.nsq.NSQProducer;
import com.github.brainlag.nsq.exceptions.NSQException;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
@@ -36,26 +37,27 @@ public NsqBenchmarkProducer(final NSQProducer nsqProducer, final String topic) {
this.topic = topic;
}
- @Override public CompletableFuture sendAsync(Optional key, byte[] payload) {
+ @Override
+ public CompletableFuture sendAsync(Optional key, byte[] payload) {
CompletableFuture future = new CompletableFuture<>();
try {
semaphore.acquire();
- executor.submit(() -> {
- try {
- nsqProducer.produce(topic, payload);
- } catch (NSQException e) {
- log.error("send exception", e);
- future.exceptionally(null);
- } catch (TimeoutException e) {
- log.error("send exception", e);
- future.exceptionally(null);
- } finally {
- semaphore.release();
- }
- future.complete(null);
-
- });
+ executor.submit(
+ () -> {
+ try {
+ nsqProducer.produce(topic, payload);
+ } catch (NSQException e) {
+ log.error("send exception", e);
+ future.exceptionally(null);
+ } catch (TimeoutException e) {
+ log.error("send exception", e);
+ future.exceptionally(null);
+ } finally {
+ semaphore.release();
+ }
+ future.complete(null);
+ });
} catch (InterruptedException e) {
log.error("semaphore exception", e);
future.exceptionally(null);
@@ -64,8 +66,10 @@ public NsqBenchmarkProducer(final NSQProducer nsqProducer, final String topic) {
return future;
}
- @Override public void close() throws Exception {
+ @Override
+ public void close() throws Exception {
this.nsqProducer.shutdown();
}
+
private static final Logger log = LoggerFactory.getLogger(NsqBenchmarkProducer.class);
}
diff --git a/driver-pravega/README.md b/driver-pravega/README.md
index 707f025db..e67d9ba21 100644
--- a/driver-pravega/README.md
+++ b/driver-pravega/README.md
@@ -1,25 +1,31 @@
# PRAVEGA BENCHMARKS
-This tutorial shows you how to run OpenMessaging benchmarks for [Pravega](https://pravega.io/).
+This tutorial shows you how to run OpenMessaging benchmarks for [Pravega](https://pravega.io/).
You can currently deploy to the following platforms:
* [Amazon Web Services (AWS)](#deploy-a-pravega-cluster-on-amazon-web-services)
# INITIAL SETUP
+
To begin with, you will need to clone the benchmark repo from the Pravega organization on GitHub:
+
```
$ git clone https://github.com/openmessaging/openmessaging-benchmark.git
$ cd openmessaging-benchmark
```
+
You will also need to have [Maven](https://maven.apache.org/install.html) installed.
# CREATE LOCAL ARTIFACTS
-Once you have the repo cloned locally, you can create all the artifacts necessary to run the benchmarks with a single
+
+Once you have the repo cloned locally, you can create all the artifacts necessary to run the benchmarks with a single
Maven command:
+
```
$ mvn install
```
-If you want to use the pre-release version of Pravega or the master branch of Pravega, please
+
+If you want to use the pre-release version of Pravega or the master branch of Pravega, please
check [how to build Pravega](doc/build_pravega.md).
# DEPLOY A PRAVEGA CLUSTER ON AMAZON WEB SERVICES
@@ -28,6 +34,7 @@ You can deploy a Pravega cluster on AWS (for benchmarking purposes) using [Terra
You’ll need to have both of those tools installed as well as the `terraform-inventory` [plugin](https://github.com/adammck/terraform-inventory) for Terraform.
You also need to install an Ansible modules to support metrics.
+
```
ansible-galaxy install cloudalchemy.node-exporter
```
@@ -38,53 +45,58 @@ In addition, you’ll need to:
* [Install the `aws` CLI tool](https://aws.amazon.com/cli/)
* [Configure the `aws` CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html)
-
# SSH KEYS
+
Once you’re all set up with AWS and have the necessary tools installed locally, you’ll need to create both a public and a private SSH key at `~/.ssh/pravega_aws` (private) and `~/.ssh/pravega_aws.pub` (public), respectively.
```
$ ssh-keygen -f ~/.ssh/pravega_aws
```
-When prompted to enter a passphrase, simply hit `Enter` twice. Then, make sure that the keys have been created:
+When prompted to enter a passphrase, simply hit `Enter` twice. Then, make sure that the keys have been created:
```
$ ls ~/.ssh/pravega_aws*
```
# CREATE RESOURCES USING TERRAFORM
+
With SSH keys in place, you can create the necessary AWS resources using just a few Terraform commands:
+
```
$ cd driver-pravega/deploy
$ terraform init
$ echo "yes" | terraform apply
```
+
This will install the following [EC2](https://aws.amazon.com/ec2) instances (plus some other resources, such as a [Virtual Private Cloud](https://aws.amazon.com/vpc/) (VPC)):
-| Resource | Description | Count |
-| ----- | ----------- | ------ |
-| Controller instances| The VMs on which a Pravega controller will run | 1 |
-| Bookkeeper instances | The VMs on which a Bookkeeper and Segmentstore will run | 3 |
-| ZooKeeper instances | The VMs on which a ZooKeeper node will run | 3 |
-| Client instance | The VM from which the benchmarking suite itself will be run | 2 |
+| Resource | Description | Count |
+|----------------------|-------------------------------------------------------------|-------|
+| Controller instances | The VMs on which a Pravega controller will run | 1 |
+| Bookkeeper instances | The VMs on which a Bookkeeper and Segmentstore will run | 3 |
+| ZooKeeper instances | The VMs on which a ZooKeeper node will run | 3 |
+| Client instance | The VM from which the benchmarking suite itself will be run | 2 |
When you run `terraform apply`, you will be prompted to type `yes`. Type `yes` to continue with the installation or anything else to quit.
# VARIABLES
+
There’s a handful of configurable parameters related to the Terraform deployment that you can alter by modifying the defaults in the `terraform.tfvars` file.
-| Variable | Description | Default |
-| ----- | ----------- | ------ |
-| `region` | The AWS region in which the Pravega cluster will be deployed | `us-west-2` |
-| `public_key_path` | The path to the SSH public key that you’ve generated | `~/.ssh/pravega_aws.pub` |
-| `ami` | The [Amazon Machine Image (AWI)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) to be used by the cluster’s machines | `ami-9fa343e7` |
-| `instance_types` | The EC2 instance types used by the various components | `i3.4xlarge` (BookKeeper bookies), `m5.large`(Controller), `t3.small` (ZooKeeper), `c5.4xlarge` (benchmarking client) |
+| Variable | Description | Default |
+|-------------------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------|
+| `region` | The AWS region in which the Pravega cluster will be deployed | `us-west-2` |
+| `public_key_path` | The path to the SSH public key that you’ve generated | `~/.ssh/pravega_aws.pub` |
+| `ami` | The [Amazon Machine Image (AWI)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) to be used by the cluster’s machines | `ami-9fa343e7` |
+| `instance_types` | The EC2 instance types used by the various components | `i3.4xlarge` (BookKeeper bookies), `m5.large`(Controller), `t3.small` (ZooKeeper), `c5.4xlarge` (benchmarking client) |
If you modify the `public_key_path`, make sure that you point to the appropriate SSH key path when running the [Ansible playbook](#_RUNNING_THE_ANSIBLE_PLAYBOOK).
# RUNNING THE ANSIBLE PLAYBOOK
With the appropriate infrastructure in place, you can install and start the Pravega cluster using Ansible with just one command:
+
```
# Fixes "terraform-inventory had an execution error: Error reading tfstate file: 0.12 format error"
$ export TF_STATE=./
@@ -93,14 +105,19 @@ $ ansible-playbook \
--inventory `which terraform-inventory` \
deploy.yaml
```
+
If you’re using an SSH private key path different from `~/.ssh/pravega_aws`, you can specify that path using the `--private-key` flag, for example `--private-key=~/.ssh/my_key`.
# SSHING INTO THE CLIENT HOST
+
In the [output](https://learn.hashicorp.com/terraform/getting-started/outputs.html) produced by Terraform, there’s a `client_ssh_host` variable that provides the IP address for the client EC2 host from which benchmarks can be run. You can SSH into that host using this command:
+
```
$ ssh -i ~/.ssh/pravega_aws ec2-user@$(terraform output client_ssh_host)
```
+
# RUNNING THE BENCHMARKS FROM THE CLIENT HOSTS
+
> The benchmark scripts can be run from the /opt/benchmark working directory.
Once you’ve successfully SSHed into the client host, you can run any of the [existing benchmarking workloads](http://openmessaging.cloud/docs/benchmarks/#benchmarking-workloads) by specifying the YAML file for that workload when running the `benchmark` executable. All workloads are in the `workloads` folder. Here’s an example:
@@ -110,24 +127,26 @@ $ sudo bin/benchmark \
--drivers driver-pravega/pravega.yaml \
workloads/1-topic-16-partitions-1kb.yaml
```
+
> Although benchmarks are run from a specific client host, the benchmarks are run in distributed mode, across multiple client hosts.
There are multiple Pravega “modes” for which you can run benchmarks. Each mode has its own YAML configuration file in the driver-pravega folder.
-| Mode | Description | Config file |
-| ----- | ----------- | ------ |
-| Standard | Pravega with transaction disabled (at-least-once semantics) | [pravega.yaml](./pravega.yaml) |
-| Exactly Once | Pravega with transaction enabled (exactly-once semantics) | [pravega-exactly-once.yaml](./pravega-exactly-once.yaml) |
+| Mode | Description | Config file |
+|--------------|-------------------------------------------------------------|----------------------------------------------------------|
+| Standard | Pravega with transaction disabled (at-least-once semantics) | [pravega.yaml](./pravega.yaml) |
+| Exactly Once | Pravega with transaction enabled (exactly-once semantics) | [pravega-exactly-once.yaml](./pravega-exactly-once.yaml) |
The example used the “standard” mode as configured in `driver-pravega/pravega.yaml`. Here’s an example of running a benchmarking workload in exactly-once mode:
+
```
$ sudo bin/benchmark \
--drivers driver-pravega/pravega-exactly-once.yaml \
workloads/1-topic-16-partitions-1kb.yaml
```
-
# SPECIFY CLIENT HOSTS
+
By default, benchmarks will be run from the set of hosts created by Terraform. You can also specify a comma-separated list of client hosts using the `--workers` flag (or `-w` for short):
```
@@ -136,23 +155,31 @@ $ sudo bin/benchmark \
--workers 1.2.3.4:8080,4.5.6.7:8080 \ # or -w 1.2.3.4:8080,4.5.6.7:8080
workloads/1-topic-16-partitions-1kb.yaml
```
+
# DOWNLOADING YOUR BENCHMARKING RESULTS
+
The OpenMessaging benchmarking suite stores results in JSON files in the `/opt/benchmark` folder on the client host from which the benchmarks are run. You can download those results files onto your local machine using `scp`. You can download all generated JSON results files using this command:
```
$ scp -i ~/.ssh/pravega_aws ec2-user@$(terraform output client_ssh_host):/opt/benchmark/*.json .
```
+
# COLLECTING METRICS AND LOGS
+
See [metrics and logs](doc/metrics_and_logs.md).
# TEARING DOWN YOUR BENCHMARKING INFRASTRUCTURE
+
Once you’re finished running your benchmarks, you should tear down the AWS infrastructure you deployed for the sake of saving costs. You can do that with one command:
+
```
$ terraform destroy -force
```
+
Make sure to let the process run to completion (it could take several minutes). Once the tear down is complete, all AWS resources that you created for the Pravega benchmarking suite will have been removed.
# RUN IN KUBERNETES
+
See [run in Kubernetes](doc/run_in_k8s.md).
# P3 Test Driver
@@ -160,4 +187,5 @@ See [run in Kubernetes](doc/run_in_k8s.md).
[P3 Test Driver](https://github.com/pravega/p3_test_driver) can be used to run multiple tests automatically and plot the results.
# TROUBLESHOOTING
+
See [troubleshooting](doc/troubleshooting.md).
diff --git a/driver-pravega/doc/build_pravega.md b/driver-pravega/doc/build_pravega.md
index b1a82a20c..1808b7690 100644
--- a/driver-pravega/doc/build_pravega.md
+++ b/driver-pravega/doc/build_pravega.md
@@ -11,6 +11,7 @@ git checkout master
This will build the file `pravega/build/distributions/pravega-0.9.0.tgz.`
Then comment `pravegaSrc` and `pravegaSrcRemote: yes` and uncomment `pravegaSrc` `pravegaSrcRemote: no` in `driver-pravega/deploy/deploy.yaml`
+
```
# Change below to use a published release of Pravega or a local build.
# pravegaSrc: "https://github.com/pravega/pravega/releases/download/v{{ pravegaVersion }}/pravega-{{ pravegaVersion }}.tgz"
@@ -19,12 +20,16 @@ Then comment `pravegaSrc` and `pravegaSrcRemote: yes` and uncomment `pravegaSrc`
pravegaSrc: "../../../pravega/build/distributions/pravega-{{ pravegaVersion }}.tgz"
pravegaSrcRemote: no
```
+
If needed, change the variable `pravegaVersion` in [vars.yaml](../deploy/vars.yaml) to match the version built.
If needed, change [pom.xml](../pom.xml) to match the version built.
## Build Benchmark
+
Add flag to skip license check`-Dlicense.skip=true` if license check failed.
+
```
mvn clean install
```
+
diff --git a/driver-pravega/doc/metrics_and_logs.md b/driver-pravega/doc/metrics_and_logs.md
index 929492273..9e72570eb 100644
--- a/driver-pravega/doc/metrics_and_logs.md
+++ b/driver-pravega/doc/metrics_and_logs.md
@@ -44,12 +44,12 @@ Login using user name "admin" and any password.
Configure Grafana with the following data sources:
- - Prometheus
- - Name: Prometheus
- - HTTP URL: http://prometheus:9090
- - InfluxDB
- - Name: pravega-influxdb
- - HTTP URL: http://influxdb:8086
- - InfluxDB Details Database: pravega
+- Prometheus
+ - Name: Prometheus
+ - HTTP URL: http://prometheus:9090
+- InfluxDB
+ - Name: pravega-influxdb
+ - HTTP URL: http://influxdb:8086
+ - InfluxDB Details Database: pravega
Load dashboards from [deploy/templates/dashboards](../deploy/templates/dashboards).
diff --git a/driver-pravega/doc/run_in_k8s.md b/driver-pravega/doc/run_in_k8s.md
index 73ee6aa5f..073bc2f89 100644
--- a/driver-pravega/doc/run_in_k8s.md
+++ b/driver-pravega/doc/run_in_k8s.md
@@ -7,6 +7,7 @@
```
## Run local driver on Kubernetes:
+
```
kubectl run -n examples --rm -it --image pravega/openmessaging-benchmark:latest --serviceaccount examples-pravega openmessaging-benchmark
```
@@ -15,3 +16,5 @@ kubectl run -n examples --rm -it --image pravega/openmessaging-benchmark:latest
```
./deploy-k8s-components.sh
+```
+
diff --git a/driver-pravega/doc/troubleshooting.md b/driver-pravega/doc/troubleshooting.md
index 6ff3c0927..2ec033937 100644
--- a/driver-pravega/doc/troubleshooting.md
+++ b/driver-pravega/doc/troubleshooting.md
@@ -13,3 +13,4 @@ journalctl -u pravega-segmentstore
```
export TF_STATE=./
```
+
diff --git a/driver-pravega/pom.xml b/driver-pravega/pom.xml
index 6c8071f3a..5cce48a41 100644
--- a/driver-pravega/pom.xml
+++ b/driver-pravega/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- driver-pulsar
-
- 2.10.1
-
+ driver-pulsar
+
+ 2.10.1
+
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- org.apache.pulsar
- pulsar-client-all
- ${pulsar.version}
-
-
- com.google.guava
- guava
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ com.google.guava
+ guava
+
+
+ org.apache.pulsar
+ pulsar-client-all
+ ${pulsar.version}
+
+
diff --git a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkConsumer.java b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkConsumer.java
index 84e9305e0..064cf8e98 100644
--- a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkConsumer.java
+++ b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkConsumer.java
@@ -14,9 +14,9 @@
package io.openmessaging.benchmark.driver.pulsar;
import static java.util.Collections.unmodifiableList;
+
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import java.nio.ByteBuffer;
-import java.util.Collections;
import java.util.List;
import org.apache.pulsar.client.api.Consumer;
diff --git a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkDriver.java b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkDriver.java
index 08778fc63..dd02fb874 100644
--- a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkDriver.java
+++ b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkDriver.java
@@ -13,6 +13,19 @@
*/
package io.openmessaging.benchmark.driver.pulsar;
+
+import com.fasterxml.jackson.databind.DeserializationFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectWriter;
+import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import com.google.common.collect.Sets;
+import com.google.common.io.BaseEncoding;
+import io.openmessaging.benchmark.driver.BenchmarkConsumer;
+import io.openmessaging.benchmark.driver.BenchmarkDriver;
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
+import io.openmessaging.benchmark.driver.ConsumerCallback;
+import io.openmessaging.benchmark.driver.pulsar.config.PulsarClientConfig.PersistenceConfiguration;
+import io.openmessaging.benchmark.driver.pulsar.config.PulsarConfig;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
@@ -22,7 +35,6 @@
import java.util.Random;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
-
import java.util.stream.Collectors;
import org.apache.bookkeeper.stats.StatsLogger;
import org.apache.pulsar.client.admin.PulsarAdmin;
@@ -44,20 +56,6 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.databind.ObjectWriter;
-import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-import com.google.common.collect.Sets;
-import com.google.common.io.BaseEncoding;
-
-import io.openmessaging.benchmark.driver.BenchmarkConsumer;
-import io.openmessaging.benchmark.driver.BenchmarkDriver;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-import io.openmessaging.benchmark.driver.ConsumerCallback;
-import io.openmessaging.benchmark.driver.pulsar.config.PulsarClientConfig.PersistenceConfiguration;
-import io.openmessaging.benchmark.driver.pulsar.config.PulsarConfig;
-
public class PulsarBenchmarkDriver implements BenchmarkDriver {
private PulsarClient client;
@@ -73,33 +71,40 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
this.config = readConfig(configurationFile);
log.info("Pulsar driver configuration: {}", writer.writeValueAsString(config));
- ClientBuilder clientBuilder = PulsarClient.builder()
- .ioThreads(config.client.ioThreads)
- .connectionsPerBroker(config.client.connectionsPerBroker)
- .statsInterval(0, TimeUnit.SECONDS)
- .serviceUrl(config.client.serviceUrl)
- .maxConcurrentLookupRequests(config.client.maxConcurrentLookupRequests)
- .maxLookupRequests(Integer.MAX_VALUE)
- .memoryLimit(config.client.clientMemoryLimitMB, SizeUnit.MEGA_BYTES)
- .operationTimeout(10, TimeUnit.MINUTES)
- .listenerThreads(Runtime.getRuntime().availableProcessors());
+ ClientBuilder clientBuilder =
+ PulsarClient.builder()
+ .ioThreads(config.client.ioThreads)
+ .connectionsPerBroker(config.client.connectionsPerBroker)
+ .statsInterval(0, TimeUnit.SECONDS)
+ .serviceUrl(config.client.serviceUrl)
+ .maxConcurrentLookupRequests(config.client.maxConcurrentLookupRequests)
+ .maxLookupRequests(Integer.MAX_VALUE)
+ .memoryLimit(config.client.clientMemoryLimitMB, SizeUnit.MEGA_BYTES)
+ .operationTimeout(10, TimeUnit.MINUTES)
+ .listenerThreads(Runtime.getRuntime().availableProcessors());
if (config.client.serviceUrl.startsWith("pulsar+ssl")) {
- clientBuilder.allowTlsInsecureConnection(config.client.tlsAllowInsecureConnection)
- .enableTlsHostnameVerification(config.client.tlsEnableHostnameVerification)
- .tlsTrustCertsFilePath(config.client.tlsTrustCertsFilePath);
+ clientBuilder
+ .allowTlsInsecureConnection(config.client.tlsAllowInsecureConnection)
+ .enableTlsHostnameVerification(config.client.tlsEnableHostnameVerification)
+ .tlsTrustCertsFilePath(config.client.tlsTrustCertsFilePath);
}
- PulsarAdminBuilder pulsarAdminBuilder = PulsarAdmin.builder().serviceHttpUrl(config.client.httpUrl);
+ PulsarAdminBuilder pulsarAdminBuilder =
+ PulsarAdmin.builder().serviceHttpUrl(config.client.httpUrl);
if (config.client.httpUrl.startsWith("https")) {
- pulsarAdminBuilder.allowTlsInsecureConnection(config.client.tlsAllowInsecureConnection)
- .enableTlsHostnameVerification(config.client.tlsEnableHostnameVerification)
- .tlsTrustCertsFilePath(config.client.tlsTrustCertsFilePath);
+ pulsarAdminBuilder
+ .allowTlsInsecureConnection(config.client.tlsAllowInsecureConnection)
+ .enableTlsHostnameVerification(config.client.tlsEnableHostnameVerification)
+ .tlsTrustCertsFilePath(config.client.tlsTrustCertsFilePath);
}
- if (config.client.authentication.plugin != null && !config.client.authentication.plugin.isEmpty()) {
- clientBuilder.authentication(config.client.authentication.plugin, config.client.authentication.data);
- pulsarAdminBuilder.authentication(config.client.authentication.plugin, config.client.authentication.data);
+ if (config.client.authentication.plugin != null
+ && !config.client.authentication.plugin.isEmpty()) {
+ clientBuilder.authentication(
+ config.client.authentication.plugin, config.client.authentication.data);
+ pulsarAdminBuilder.authentication(
+ config.client.authentication.plugin, config.client.authentication.data);
}
client = clientBuilder.build();
@@ -110,14 +115,17 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
log.info("Created Pulsar admin client for HTTP URL {}", config.client.httpUrl);
- producerBuilder = client.newProducer()
- .enableBatching(config.producer.batchingEnabled)
- .batchingMaxPublishDelay(config.producer.batchingMaxPublishDelayMs, TimeUnit.MILLISECONDS)
- .batchingMaxMessages(Integer.MAX_VALUE)
- .batchingMaxBytes(config.producer.batchingMaxBytes)
- .blockIfQueueFull(config.producer.blockIfQueueFull)
- .sendTimeout(0, TimeUnit.MILLISECONDS)
- .maxPendingMessages(config.producer.pendingQueueSize);
+ producerBuilder =
+ client
+ .newProducer()
+ .enableBatching(config.producer.batchingEnabled)
+ .batchingMaxPublishDelay(
+ config.producer.batchingMaxPublishDelayMs, TimeUnit.MILLISECONDS)
+ .batchingMaxMessages(Integer.MAX_VALUE)
+ .batchingMaxBytes(config.producer.batchingMaxBytes)
+ .blockIfQueueFull(config.producer.blockIfQueueFull)
+ .sendTimeout(0, TimeUnit.MILLISECONDS)
+ .maxPendingMessages(config.producer.pendingQueueSize);
try {
// Create namespace and set the configuration
@@ -125,8 +133,14 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
String cluster = config.client.clusterName;
if (!adminClient.tenants().getTenants().contains(tenant)) {
try {
- adminClient.tenants().createTenant(tenant,
- TenantInfo.builder().adminRoles(Collections.emptySet()).allowedClusters(Sets.newHashSet(cluster)).build());
+ adminClient
+ .tenants()
+ .createTenant(
+ tenant,
+ TenantInfo.builder()
+ .adminRoles(Collections.emptySet())
+ .allowedClusters(Sets.newHashSet(cluster))
+ .build());
} catch (ConflictException e) {
// Ignore. This can happen when multiple workers are initializing at the same time
}
@@ -138,18 +152,27 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
log.info("Created Pulsar namespace {}", namespace);
PersistenceConfiguration p = config.client.persistence;
- adminClient.namespaces().setPersistence(namespace,
- new PersistencePolicies(p.ensembleSize, p.writeQuorum, p.ackQuorum, 1.0));
-
- adminClient.namespaces().setBacklogQuota(namespace,
- BacklogQuota.builder()
- .limitSize(-1L)
- .limitTime(-1)
- .retentionPolicy(RetentionPolicy.producer_exception)
- .build());
+ adminClient
+ .namespaces()
+ .setPersistence(
+ namespace, new PersistencePolicies(p.ensembleSize, p.writeQuorum, p.ackQuorum, 1.0));
+
+ adminClient
+ .namespaces()
+ .setBacklogQuota(
+ namespace,
+ BacklogQuota.builder()
+ .limitSize(-1L)
+ .limitTime(-1)
+ .retentionPolicy(RetentionPolicy.producer_exception)
+ .build());
adminClient.namespaces().setDeduplicationStatus(namespace, p.deduplicationEnabled);
- log.info("Applied persistence configuration for namespace {}/{}/{}: {}", tenant, cluster, namespace,
- writer.writeValueAsString(p));
+ log.info(
+ "Applied persistence configuration for namespace {}/{}/{}: {}",
+ tenant,
+ cluster,
+ namespace,
+ writer.writeValueAsString(p));
} catch (PulsarAdminException e) {
throw new IOException(e);
@@ -173,37 +196,42 @@ public CompletableFuture createTopic(String topic, int partitions) {
@Override
public CompletableFuture createProducer(String topic) {
- return producerBuilder.topic(topic).createAsync()
- .thenApply(PulsarBenchmarkProducer::new);
+ return producerBuilder.topic(topic).createAsync().thenApply(PulsarBenchmarkProducer::new);
}
@Override
- public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
List>> futures = new ArrayList<>();
- return client.getPartitionsForTopic(topic)
- .thenCompose(partitions -> {
- partitions.forEach(p -> futures.add(createInternalConsumer(p, subscriptionName, consumerCallback)));
- return FutureUtil.waitForAll(futures);
- }).thenApply(__ -> new PulsarBenchmarkConsumer(
- futures.stream().map(CompletableFuture::join).collect(Collectors.toList())
- )
- );
+ return client
+ .getPartitionsForTopic(topic)
+ .thenCompose(
+ partitions -> {
+ partitions.forEach(
+ p -> futures.add(createInternalConsumer(p, subscriptionName, consumerCallback)));
+ return FutureUtil.waitForAll(futures);
+ })
+ .thenApply(
+ __ ->
+ new PulsarBenchmarkConsumer(
+ futures.stream().map(CompletableFuture::join).collect(Collectors.toList())));
}
- CompletableFuture> createInternalConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
- return client.newConsumer(Schema.BYTEBUFFER)
+ CompletableFuture> createInternalConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
+ return client
+ .newConsumer(Schema.BYTEBUFFER)
.priorityLevel(0)
.subscriptionType(SubscriptionType.Failover)
- .messageListener((c, msg) -> {
- try {
- consumerCallback.messageReceived(msg.getValue(), msg.getPublishTime());
- c.acknowledgeAsync(msg);
- } finally {
- msg.release();
- }
- })
+ .messageListener(
+ (c, msg) -> {
+ try {
+ consumerCallback.messageReceived(msg.getValue(), msg.getPublishTime());
+ c.acknowledgeAsync(msg);
+ } finally {
+ msg.release();
+ }
+ })
.topic(topic)
.subscriptionName(subscriptionName)
.receiverQueueSize(config.consumer.receiverQueueSize)
@@ -227,7 +255,8 @@ public void close() throws Exception {
log.info("Pulsar benchmark driver successfully shut down");
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static PulsarConfig readConfig(File configurationFile) throws IOException {
@@ -236,7 +265,7 @@ private static PulsarConfig readConfig(File configurationFile) throws IOExceptio
private static final Random random = new Random();
- private static final String getRandomString() {
+ private static String getRandomString() {
byte[] buffer = new byte[5];
random.nextBytes(buffer);
return BaseEncoding.base64Url().omitPadding().encode(buffer);
diff --git a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkProducer.java b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkProducer.java
index cad7a4a12..80adb2add 100644
--- a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkProducer.java
+++ b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/PulsarBenchmarkProducer.java
@@ -13,14 +13,13 @@
*/
package io.openmessaging.benchmark.driver.pulsar;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-
import org.apache.pulsar.client.api.Producer;
import org.apache.pulsar.client.api.TypedMessageBuilder;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
public class PulsarBenchmarkProducer implements BenchmarkProducer {
private final Producer producer;
@@ -43,5 +42,4 @@ public CompletableFuture sendAsync(Optional key, byte[] payload) {
return msgBuilder.sendAsync().thenApply(msgId -> null);
}
-
}
diff --git a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/config/PulsarClientConfig.java b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/config/PulsarClientConfig.java
index 15e350b4e..0dedaa5d2 100644
--- a/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/config/PulsarClientConfig.java
+++ b/driver-pulsar/src/main/java/io/openmessaging/benchmark/driver/pulsar/config/PulsarClientConfig.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.pulsar.config;
+
import org.apache.pulsar.common.naming.TopicDomain;
public class PulsarClientConfig {
diff --git a/driver-rabbitmq/README.md b/driver-rabbitmq/README.md
index 16ef49782..fc8b68d69 100644
--- a/driver-rabbitmq/README.md
+++ b/driver-rabbitmq/README.md
@@ -53,10 +53,10 @@ $ terraform apply
That will install the following [EC2](https://aws.amazon.com/ec2) instances (plus some other resources, such as a [Virtual Private Cloud](https://aws.amazon.com/vpc/) (VPC)):
-Resource | Description | Count
-:--------|:------------|:-----
-RabbitMQ instances | The VMs on which RabbitMQ brokers will run | 3
-Client instance | The VM from which the benchmarking suite itself will be run | 1
+| Resource | Description | Count |
+|:-------------------|:------------------------------------------------------------|:------|
+| RabbitMQ instances | The VMs on which RabbitMQ brokers will run | 3 |
+| Client instance | The VM from which the benchmarking suite itself will be run | 1 |
When you run `terraform apply`, you will be prompted to type `yes`. Type `yes` to continue with the installation or anything else to quit.
@@ -66,20 +66,20 @@ Once the installation is complete, you will see a confirmation message listing t
There's a handful of configurable parameters related to the Terraform deployment that you can alter by modifying the defaults in the `terraform.tfvars` file.
-Variable | Description | Default
-:--------|:------------|:-------
-`region` | The AWS region in which the RabbitMQ cluster will be deployed | `us-west-2`
-`az` | The availability zone in which the RabbitMQ cluster will be deployed | `us-west-2a`
-`public_key_path` | The path to the SSH public key that you've generated | `~/.ssh/rabbitmq_aws.pub`
-`ami` | The [Amazon Machine Image](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (AWI) to be used by the cluster's machines | [`ami-9fa343e7`](https://access.redhat.com/articles/3135091)
-`instance_types` | The EC2 instance types used by the various components | `i3.4xlarge` (RabbitMQ brokers), `c4.8xlarge` (benchmarking client)
+| Variable | Description | Default |
+|:------------------|:------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------|
+| `region` | The AWS region in which the RabbitMQ cluster will be deployed | `us-west-2` |
+| `az` | The availability zone in which the RabbitMQ cluster will be deployed | `us-west-2a` |
+| `public_key_path` | The path to the SSH public key that you've generated | `~/.ssh/rabbitmq_aws.pub` |
+| `ami` | The [Amazon Machine Image](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) (AWI) to be used by the cluster's machines | [`ami-9fa343e7`](https://access.redhat.com/articles/3135091) |
+| `instance_types` | The EC2 instance types used by the various components | `i3.4xlarge` (RabbitMQ brokers), `c4.8xlarge` (benchmarking client) |
> If you modify the `public_key_path`, make sure that you point to the appropriate SSH key path when running the [Ansible playbook](#running-the-ansible-playbook).
### Running the Ansible playbook
With the appropriate infrastructure in place, you can install and start the RabbitMQ cluster using Ansible with just one command.
-Note that a `TFSTATE` environment must point to the folder in which the `tf.state` file is located.
+Note that a `TFSTATE` environment must point to the folder in which the `tf.state` file is located.
```bash
$ TF_STATE=. ansible-playbook \
@@ -120,4 +120,5 @@ The `rabbitmq_management` plugin is installed, and the HTTP endpoint is exposed
to access the management REST API at this endpoint.
Note that the connection is authenticated but not currently encrypted and so passwords will be passed in plain text. Use
-the `admin` account configured in the [Terraform](deploy/provision-rabbitmq-aws.tf) file to log in.
\ No newline at end of file
+the `admin` account configured in the [Terraform](deploy/provision-rabbitmq-aws.tf) file to log in.
+
diff --git a/driver-rabbitmq/pom.xml b/driver-rabbitmq/pom.xml
index 04b95ad32..f6fe0fe29 100644
--- a/driver-rabbitmq/pom.xml
+++ b/driver-rabbitmq/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
- driver-rabbitmq
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
+ driver-rabbitmq
-
-
- ${project.groupId}
- driver-api
- ${project.version}
-
-
- com.rabbitmq
- amqp-client
- 4.8.0
-
-
- io.netty
- netty-all
-
-
+
+
+ ${project.groupId}
+ driver-api
+ ${project.version}
+
+
+ com.rabbitmq
+ amqp-client
+ 4.8.0
+
+
+ io.netty
+ netty-all
+
+
diff --git a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkConsumer.java b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkConsumer.java
index 975121e18..27f87d36e 100644
--- a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkConsumer.java
+++ b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkConsumer.java
@@ -13,16 +13,15 @@
*/
package io.openmessaging.benchmark.driver.rabbitmq;
-import com.rabbitmq.client.AlreadyClosedException;
-import java.io.IOException;
import com.rabbitmq.client.AMQP.BasicProperties;
+import com.rabbitmq.client.AlreadyClosedException;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
-
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
+import java.io.IOException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -33,7 +32,8 @@ public class RabbitMqBenchmarkConsumer extends DefaultConsumer implements Benchm
private final Channel channel;
private final ConsumerCallback callback;
- public RabbitMqBenchmarkConsumer(Channel channel, String queueName, ConsumerCallback callback) throws IOException {
+ public RabbitMqBenchmarkConsumer(Channel channel, String queueName, ConsumerCallback callback)
+ throws IOException {
super(channel);
this.channel = channel;
@@ -42,7 +42,8 @@ public RabbitMqBenchmarkConsumer(Channel channel, String queueName, ConsumerCall
}
@Override
- public void handleDelivery(String consumerTag, Envelope envelope, BasicProperties properties, byte[] body) {
+ public void handleDelivery(
+ String consumerTag, Envelope envelope, BasicProperties properties, byte[] body) {
callback.messageReceived(body, properties.getTimestamp().getTime());
}
@@ -54,5 +55,4 @@ public void close() throws Exception {
log.warn("Channel already closed", e);
}
}
-
}
diff --git a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkDriver.java b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkDriver.java
index 6181d15c5..d56442205 100644
--- a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkDriver.java
+++ b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkDriver.java
@@ -13,32 +13,31 @@
*/
package io.openmessaging.benchmark.driver.rabbitmq;
-import com.rabbitmq.client.AlreadyClosedException;
-import java.io.File;
-import java.io.IOException;
-import java.net.URI;
-import java.util.Collections;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.atomic.AtomicInteger;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
+import com.rabbitmq.client.AlreadyClosedException;
import com.rabbitmq.client.BuiltinExchangeType;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Connection;
import com.rabbitmq.client.ConnectionFactory;
-
import io.netty.handler.codec.http.QueryStringDecoder;
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.BenchmarkDriver;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
+import java.io.File;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.atomic.AtomicInteger;
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -56,7 +55,8 @@ public void initialize(File configurationFile, StatsLogger statsLogger) throws I
@Override
public void close() {
- for(Iterator> it = connections.entrySet().iterator(); it.hasNext(); ) {
+ for (Iterator> it = connections.entrySet().iterator();
+ it.hasNext(); ) {
Connection connection = it.next().getValue();
try {
connection.close();
@@ -67,13 +67,13 @@ public void close() {
}
it.remove();
}
-
}
@Override
public String getTopicNamePrefix() {
// Do a round-robin on AMQP URIs
- URI configUri = URI.create(config.amqpUris.get(uriIndex.getAndIncrement() % config.amqpUris.size()));
+ URI configUri =
+ URI.create(config.amqpUris.get(uriIndex.getAndIncrement() % config.amqpUris.size()));
URI topicUri = configUri.resolve(configUri.getRawPath() + "?exchange=test-exchange");
return topicUri.toString();
}
@@ -93,43 +93,54 @@ public CompletableFuture createTopic(String topic, int partitions) {
@Override
public CompletableFuture createProducer(String topic) {
CompletableFuture future = new CompletableFuture<>();
- ForkJoinPool.commonPool().execute(() -> {
- try {
- String uri = topic.split("\\?")[0];
- Connection connection = getOrCreateConnection(uri);
- Channel channel = connection.createChannel();
- String exchange = getExchangeName(topic);
- channel.exchangeDeclare(exchange, BuiltinExchangeType.FANOUT, true);
- channel.confirmSelect();
- future.complete(new RabbitMqBenchmarkProducer(channel, exchange, config.messagePersistence));
- } catch (Exception e) {
- future.completeExceptionally(e);
- }
- });
+ ForkJoinPool.commonPool()
+ .execute(
+ () -> {
+ try {
+ String uri = topic.split("\\?")[0];
+ Connection connection = getOrCreateConnection(uri);
+ Channel channel = connection.createChannel();
+ String exchange = getExchangeName(topic);
+ channel.exchangeDeclare(exchange, BuiltinExchangeType.FANOUT, true);
+ channel.confirmSelect();
+ future.complete(
+ new RabbitMqBenchmarkProducer(channel, exchange, config.messagePersistence));
+ } catch (Exception e) {
+ future.completeExceptionally(e);
+ }
+ });
return future;
}
@Override
- public CompletableFuture createConsumer(String topic, String subscriptionName,
- ConsumerCallback consumerCallback) {
+ public CompletableFuture createConsumer(
+ String topic, String subscriptionName, ConsumerCallback consumerCallback) {
CompletableFuture future = new CompletableFuture<>();
- ForkJoinPool.commonPool().execute(() -> {
- try {
- String uri = topic.split("\\?")[0];
- Connection connection = getOrCreateConnection(uri);
- Channel channel = connection.createChannel();
- String exchange = getExchangeName(topic);
- String queueName = exchange + "-" + subscriptionName;
- channel.exchangeDeclare(exchange, BuiltinExchangeType.FANOUT, true);
- // Create the queue
- channel.queueDeclare(queueName, true, false, false, Collections.singletonMap("x-queue-type", "quorum"));
- channel.queueBind(queueName, exchange, "");
- future.complete(new RabbitMqBenchmarkConsumer(channel, queueName, consumerCallback));
- } catch (IOException e) {
- future.completeExceptionally(e);
- }
- });
+ ForkJoinPool.commonPool()
+ .execute(
+ () -> {
+ try {
+ String uri = topic.split("\\?")[0];
+ Connection connection = getOrCreateConnection(uri);
+ Channel channel = connection.createChannel();
+ String exchange = getExchangeName(topic);
+ String queueName = exchange + "-" + subscriptionName;
+ channel.exchangeDeclare(exchange, BuiltinExchangeType.FANOUT, true);
+ // Create the queue
+ channel.queueDeclare(
+ queueName,
+ true,
+ false,
+ false,
+ Collections.singletonMap("x-queue-type", "quorum"));
+ channel.queueBind(queueName, exchange, "");
+ future.complete(
+ new RabbitMqBenchmarkConsumer(channel, queueName, consumerCallback));
+ } catch (IOException e) {
+ future.completeExceptionally(e);
+ }
+ });
return future;
}
@@ -138,27 +149,30 @@ private String getExchangeName(String uri) {
QueryStringDecoder decoder = new QueryStringDecoder(uri);
Map> parameters = decoder.parameters();
- if(!parameters.containsKey("exchange")) {
+ if (!parameters.containsKey("exchange")) {
throw new IllegalArgumentException("Missing exchange param");
}
return parameters.get("exchange").get(0);
}
private Connection getOrCreateConnection(String uri) {
- return connections.computeIfAbsent(uri, uriKey -> {
- try {
- ConnectionFactory connectionFactory = new ConnectionFactory();
- connectionFactory.setAutomaticRecoveryEnabled(true);
- connectionFactory.setUri(uri);
- return connectionFactory.newConnection();
- } catch (Exception e) {
- throw new RuntimeException("Couldn't establish connection", e);
- }
- });
+ return connections.computeIfAbsent(
+ uri,
+ uriKey -> {
+ try {
+ ConnectionFactory connectionFactory = new ConnectionFactory();
+ connectionFactory.setAutomaticRecoveryEnabled(true);
+ connectionFactory.setUri(uri);
+ return connectionFactory.newConnection();
+ } catch (Exception e) {
+ throw new RuntimeException("Couldn't establish connection", e);
+ }
+ });
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static final Logger log = LoggerFactory.getLogger(RabbitMqBenchmarkDriver.class);
}
diff --git a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkProducer.java b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkProducer.java
index f795158ee..d7d3841ad 100644
--- a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkProducer.java
+++ b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqBenchmarkProducer.java
@@ -13,8 +13,12 @@
*/
package io.openmessaging.benchmark.driver.rabbitmq;
+
+import com.rabbitmq.client.AMQP.BasicProperties;
import com.rabbitmq.client.AlreadyClosedException;
+import com.rabbitmq.client.Channel;
import com.rabbitmq.client.ConfirmListener;
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.Collections;
import java.util.Date;
import java.util.Iterator;
@@ -22,11 +26,6 @@
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.CompletableFuture;
-
-import com.rabbitmq.client.AMQP.BasicProperties;
-import com.rabbitmq.client.Channel;
-
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,67 +37,72 @@ public class RabbitMqBenchmarkProducer implements BenchmarkProducer {
private final Channel channel;
private final String exchange;
private final ConfirmListener listener;
- /**To record msg and it's future structure.**/
+ /** To record msg and it's future structure. */
volatile SortedSet ackSet = Collections.synchronizedSortedSet(new TreeSet<>());
- private final ConcurrentHashMap> futureConcurrentHashMap = new ConcurrentHashMap<>();
+
+ private final ConcurrentHashMap> futureConcurrentHashMap =
+ new ConcurrentHashMap<>();
private final boolean messagePersistence;
public RabbitMqBenchmarkProducer(Channel channel, String exchange, boolean messagePersistence) {
this.channel = channel;
this.exchange = exchange;
this.messagePersistence = messagePersistence;
- this.listener = new ConfirmListener() {
- @Override
- public void handleNack(long deliveryTag, boolean multiple) {
- if (multiple) {
- SortedSet treeHeadSet = ackSet.headSet(deliveryTag + 1);
- synchronized(ackSet) {
- for(Iterator iterator = treeHeadSet.iterator(); iterator.hasNext();) {
- long value = iterator.next();
- iterator.remove();
- CompletableFuture future = futureConcurrentHashMap.get(value);
+ this.listener =
+ new ConfirmListener() {
+ @Override
+ public void handleNack(long deliveryTag, boolean multiple) {
+ if (multiple) {
+ SortedSet treeHeadSet = ackSet.headSet(deliveryTag + 1);
+ synchronized (ackSet) {
+ for (Iterator iterator = treeHeadSet.iterator(); iterator.hasNext(); ) {
+ long value = iterator.next();
+ iterator.remove();
+ CompletableFuture future = futureConcurrentHashMap.get(value);
+ if (future != null) {
+ future.completeExceptionally(
+ new RuntimeException("Message was negatively acknowledged"));
+ futureConcurrentHashMap.remove(value);
+ }
+ }
+ treeHeadSet.clear();
+ }
+
+ } else {
+ CompletableFuture future = futureConcurrentHashMap.get(deliveryTag);
if (future != null) {
- future.completeExceptionally(new RuntimeException("Message was negatively acknowledged"));
- futureConcurrentHashMap.remove(value);
+ future.completeExceptionally(
+ new RuntimeException("Message was negatively acknowledged"));
+ futureConcurrentHashMap.remove(deliveryTag);
}
+ ackSet.remove(deliveryTag);
}
- treeHeadSet.clear();
}
- } else {
- CompletableFuture future = futureConcurrentHashMap.get(deliveryTag);
- if (future != null) {
- future.completeExceptionally(new RuntimeException("Message was negatively acknowledged"));
- futureConcurrentHashMap.remove(deliveryTag);
- }
- ackSet.remove(deliveryTag);
- }
- }
- @Override
- public void handleAck(long deliveryTag, boolean multiple) {
- if (multiple) {
- SortedSet treeHeadSet = ackSet.headSet(deliveryTag + 1);
- synchronized(ackSet) {
- for(long value : treeHeadSet) {
- CompletableFuture future = futureConcurrentHashMap.get(value);
+ @Override
+ public void handleAck(long deliveryTag, boolean multiple) {
+ if (multiple) {
+ SortedSet treeHeadSet = ackSet.headSet(deliveryTag + 1);
+ synchronized (ackSet) {
+ for (long value : treeHeadSet) {
+ CompletableFuture future = futureConcurrentHashMap.get(value);
+ if (future != null) {
+ future.complete(null);
+ futureConcurrentHashMap.remove(value);
+ }
+ }
+ treeHeadSet.clear();
+ }
+ } else {
+ CompletableFuture future = futureConcurrentHashMap.get(deliveryTag);
if (future != null) {
future.complete(null);
- futureConcurrentHashMap.remove(value);
+ futureConcurrentHashMap.remove(deliveryTag);
}
+ ackSet.remove(deliveryTag);
}
- treeHeadSet.clear();
- }
- } else {
- CompletableFuture future = futureConcurrentHashMap.get(deliveryTag);
- if (future != null) {
- future.complete(null);
- futureConcurrentHashMap.remove(deliveryTag);
}
- ackSet.remove(deliveryTag);
- }
-
- }
- };
+ };
channel.addConfirmListener(listener);
}
@@ -133,5 +137,4 @@ public CompletableFuture sendAsync(Optional key, byte[] payload) {
return future;
}
-
}
diff --git a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqConfig.java b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqConfig.java
index 6518e44fe..4d2382046 100644
--- a/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqConfig.java
+++ b/driver-rabbitmq/src/main/java/io/openmessaging/benchmark/driver/rabbitmq/RabbitMqConfig.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.rabbitmq;
+
import java.util.ArrayList;
import java.util.List;
diff --git a/driver-redis/pom.xml b/driver-redis/pom.xml
index 95e079b07..38c9a2855 100644
--- a/driver-redis/pom.xml
+++ b/driver-redis/pom.xml
@@ -1,3 +1,4 @@
+
-
+ 4.0.0
- messaging-benchmarkio.openmessaging.benchmark
+ messaging-benchmark0.0.1-SNAPSHOT
- 4.0.0driver-redis
@@ -31,15 +31,15 @@
driver-api${project.version}
+
+ com.google.guava
+ guava
+ redis.clientsjedis3.7.0
-
- com.google.guava
- guava
-
diff --git a/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkConsumer.java b/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkConsumer.java
index c9d8b60a2..877d12080 100644
--- a/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkConsumer.java
+++ b/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkConsumer.java
@@ -13,8 +13,16 @@
*/
package io.openmessaging.benchmark.driver.redis;
+import static java.nio.charset.StandardCharsets.UTF_8;
+
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import redis.clients.jedis.Jedis;
@@ -23,14 +31,6 @@
import redis.clients.jedis.StreamEntryID;
import redis.clients.jedis.params.XReadGroupParams;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
public class RedisBenchmarkConsumer implements BenchmarkConsumer {
private final JedisPool pool;
private final String topic;
@@ -40,7 +40,12 @@ public class RedisBenchmarkConsumer implements BenchmarkConsumer {
private final Future> consumerTask;
private volatile boolean closing = false;
- public RedisBenchmarkConsumer(final String consumerId, final String topic, final String subscriptionName, final JedisPool pool, ConsumerCallback consumerCallback) {
+ public RedisBenchmarkConsumer(
+ final String consumerId,
+ final String topic,
+ final String subscriptionName,
+ final JedisPool pool,
+ ConsumerCallback consumerCallback) {
this.pool = pool;
this.topic = topic;
this.subscriptionName = subscriptionName;
@@ -48,31 +53,34 @@ public RedisBenchmarkConsumer(final String consumerId, final String topic, final
this.executor = Executors.newSingleThreadExecutor();
Jedis jedis = this.pool.getResource();
+ this.consumerTask =
+ this.executor.submit(
+ () -> {
+ while (!closing) {
+ try {
+ Map streamQuery =
+ Collections.singletonMap(this.topic, StreamEntryID.UNRECEIVED_ENTRY);
+ List>> range =
+ jedis.xreadGroup(
+ this.subscriptionName,
+ this.consumerId,
+ XReadGroupParams.xReadGroupParams().block(0),
+ streamQuery);
+ if (range != null) {
+ for (Map.Entry> streamEntries : range) {
+ for (StreamEntry entry : streamEntries.getValue()) {
+ long timestamp = entry.getID().getTime();
+ byte[] payload = entry.getFields().get("payload").getBytes(UTF_8);
+ consumerCallback.messageReceived(payload, timestamp);
+ }
+ }
+ }
- this.consumerTask = this.executor.submit(() -> {
- while (!closing) {
- try {
- Map streamQuery = Collections.singletonMap(this.topic, StreamEntryID.UNRECEIVED_ENTRY);
- List>> range = jedis.xreadGroup(this.subscriptionName, this.consumerId,
- XReadGroupParams.xReadGroupParams().block(0), streamQuery);
- if (range!=null){
- for (Map.Entry> streamEntries:
- range) {
- for (StreamEntry entry:
- streamEntries.getValue()) {
- long timestamp = entry.getID().getTime();
- byte[]payload = entry.getFields().get("payload").getBytes(StandardCharsets.UTF_8);
- consumerCallback.messageReceived(payload, timestamp);
+ } catch (Exception e) {
+ log.error("Failed to read from consumer instance.", e);
+ }
}
- }
- }
-
- } catch (Exception e) {
- log.error("Failed to read from consumer instance.", e);
- }
- }
- });
-
+ });
}
@Override
@@ -82,6 +90,6 @@ public void close() throws Exception {
consumerTask.get();
pool.close();
}
- private static final Logger log = LoggerFactory.getLogger(RedisBenchmarkDriver.class);
+ private static final Logger log = LoggerFactory.getLogger(RedisBenchmarkDriver.class);
}
diff --git a/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkDriver.java b/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkDriver.java
index 08b1833c5..2579ccb30 100644
--- a/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkDriver.java
+++ b/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkDriver.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.redis;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
@@ -21,6 +22,7 @@
import io.openmessaging.benchmark.driver.BenchmarkDriver;
import io.openmessaging.benchmark.driver.BenchmarkProducer;
import io.openmessaging.benchmark.driver.ConsumerCallback;
+import io.openmessaging.benchmark.driver.redis.client.RedisClientConfig;
import java.io.File;
import java.io.IOException;
import java.util.Random;
@@ -28,16 +30,17 @@
import org.apache.bookkeeper.stats.StatsLogger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import redis.clients.jedis.*;
-import io.openmessaging.benchmark.driver.redis.client.RedisClientConfig;
-
+import redis.clients.jedis.Jedis;
+import redis.clients.jedis.JedisPool;
+import redis.clients.jedis.JedisPoolConfig;
public class RedisBenchmarkDriver implements BenchmarkDriver {
JedisPool jedisPool;
private RedisClientConfig clientConfig;
@Override
- public void initialize(final File configurationFile, final StatsLogger statsLogger) throws IOException {
+ public void initialize(final File configurationFile, final StatsLogger statsLogger)
+ throws IOException {
this.clientConfig = readConfig(configurationFile);
}
@@ -48,8 +51,7 @@ public String getTopicNamePrefix() {
@Override
public CompletableFuture createTopic(final String topic, final int partitions) {
- return CompletableFuture.runAsync(() -> {
- });
+ return CompletableFuture.runAsync(() -> {});
}
@Override
@@ -61,9 +63,9 @@ public CompletableFuture createProducer(final String topic) {
}
@Override
- public CompletableFuture createConsumer(final String topic, final String subscriptionName,
- final ConsumerCallback consumerCallback) {
- String consumerId = "consumer-"+getRandomString();
+ public CompletableFuture createConsumer(
+ final String topic, final String subscriptionName, final ConsumerCallback consumerCallback) {
+ String consumerId = "consumer-" + getRandomString();
if (jedisPool == null) {
setupJedisConn();
}
@@ -72,21 +74,37 @@ public CompletableFuture createConsumer(final String topic, f
} catch (Exception e) {
log.info("Failed to create consumer instance.", e);
}
- return CompletableFuture.completedFuture(new RedisBenchmarkConsumer( consumerId, topic, subscriptionName,jedisPool, consumerCallback));
+ return CompletableFuture.completedFuture(
+ new RedisBenchmarkConsumer(
+ consumerId, topic, subscriptionName, jedisPool, consumerCallback));
}
private void setupJedisConn() {
JedisPoolConfig poolConfig = new JedisPoolConfig();
poolConfig.setMaxTotal(this.clientConfig.jedisPoolMaxTotal);
poolConfig.setMaxIdle(this.clientConfig.jedisPoolMaxIdle);
- if( this.clientConfig.redisPass != null ){
- if ( this.clientConfig.redisUser != null){
- jedisPool = new JedisPool(poolConfig, this.clientConfig.redisHost, this.clientConfig.redisPort, 2000, this.clientConfig.redisPass, this.clientConfig.redisUser);
+ if (this.clientConfig.redisPass != null) {
+ if (this.clientConfig.redisUser != null) {
+ jedisPool =
+ new JedisPool(
+ poolConfig,
+ this.clientConfig.redisHost,
+ this.clientConfig.redisPort,
+ 2000,
+ this.clientConfig.redisPass,
+ this.clientConfig.redisUser);
} else {
- jedisPool = new JedisPool(poolConfig, this.clientConfig.redisHost, this.clientConfig.redisPort,2000, this.clientConfig.redisPass );
- }
+ jedisPool =
+ new JedisPool(
+ poolConfig,
+ this.clientConfig.redisHost,
+ this.clientConfig.redisPort,
+ 2000,
+ this.clientConfig.redisPass);
+ }
} else {
- jedisPool = new JedisPool(poolConfig, this.clientConfig.redisHost, this.clientConfig.redisPort,2000 );
+ jedisPool =
+ new JedisPool(poolConfig, this.clientConfig.redisHost, this.clientConfig.redisPort, 2000);
}
}
@@ -97,8 +115,9 @@ public void close() throws Exception {
}
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static RedisClientConfig readConfig(File configurationFile) throws IOException {
return mapper.readValue(configurationFile, RedisClientConfig.class);
@@ -106,7 +125,7 @@ private static RedisClientConfig readConfig(File configurationFile) throws IOExc
private static final Random random = new Random();
- private static final String getRandomString() {
+ private static String getRandomString() {
byte[] buffer = new byte[5];
random.nextBytes(buffer);
return BaseEncoding.base64Url().omitPadding().encode(buffer);
diff --git a/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkProducer.java b/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkProducer.java
index 0951a9018..ead1047ac 100644
--- a/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkProducer.java
+++ b/driver-redis/src/main/java/io/openmessaging/benchmark/driver/redis/RedisBenchmarkProducer.java
@@ -14,13 +14,14 @@
package io.openmessaging.benchmark.driver.redis;
import static java.nio.charset.StandardCharsets.UTF_8;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.HashMap;
import java.util.Map;
-
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-import redis.clients.jedis.*;
+import redis.clients.jedis.Jedis;
+import redis.clients.jedis.JedisPool;
import redis.clients.jedis.params.XAddParams;
public class RedisBenchmarkProducer implements BenchmarkProducer {
@@ -44,12 +45,12 @@ public CompletableFuture sendAsync(final Optional key, final byte[
}
CompletableFuture future = new CompletableFuture<>();
- try (Jedis jedis = this.pool.getResource()) {
- jedis.xadd(this.rmqTopic.getBytes(UTF_8),map1, this.xaddParams);
- future.complete(null);
- } catch (Exception e) {
- future.completeExceptionally(e);
- }
+ try (Jedis jedis = this.pool.getResource()) {
+ jedis.xadd(this.rmqTopic.getBytes(UTF_8), map1, this.xaddParams);
+ future.complete(null);
+ } catch (Exception e) {
+ future.completeExceptionally(e);
+ }
return future;
}
diff --git a/driver-rocketmq/pom.xml b/driver-rocketmq/pom.xml
index 6fe1abbb6..a251ada24 100644
--- a/driver-rocketmq/pom.xml
+++ b/driver-rocketmq/pom.xml
@@ -1,3 +1,4 @@
+
-
+ 4.0.0
- messaging-benchmarkio.openmessaging.benchmark
+ messaging-benchmark0.0.1-SNAPSHOT
- 4.0.0driver-rocketmq
@@ -35,6 +35,10 @@
driver-api${project.version}
+
+ com.google.guava
+ guava
+ org.apache.rocketmqrocketmq-acl
@@ -50,10 +54,6 @@
rocketmq-tools${rocketmq.version}
-
- com.google.guava
- guava
-
diff --git a/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkConsumer.java b/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkConsumer.java
index d09475f9d..97a7e4912 100644
--- a/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkConsumer.java
+++ b/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkConsumer.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.rocketmq;
+
import io.openmessaging.benchmark.driver.BenchmarkConsumer;
import org.apache.rocketmq.client.consumer.MQPushConsumer;
diff --git a/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkDriver.java b/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkDriver.java
index 8ac7c1f72..d7d547098 100644
--- a/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkDriver.java
+++ b/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkDriver.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.driver.rocketmq;
+
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectWriter;
@@ -53,10 +54,14 @@ public class RocketMQBenchmarkDriver implements BenchmarkDriver {
private RPCHook rpcHook;
@Override
- public void initialize(final File configurationFile, final StatsLogger statsLogger) throws IOException {
+ public void initialize(final File configurationFile, final StatsLogger statsLogger)
+ throws IOException {
this.rmqClientConfig = readConfig(configurationFile);
if (isAclEnabled()) {
- rpcHook = new AclClientRPCHook(new SessionCredentials(this.rmqClientConfig.accessKey, this.rmqClientConfig.secretKey));
+ rpcHook =
+ new AclClientRPCHook(
+ new SessionCredentials(
+ this.rmqClientConfig.accessKey, this.rmqClientConfig.secretKey));
this.rmqAdmin = new DefaultMQAdminExt(rpcHook);
} else {
this.rmqAdmin = new DefaultMQAdminExt();
@@ -68,7 +73,6 @@ public void initialize(final File configurationFile, final StatsLogger statsLogg
} catch (MQClientException e) {
log.error("Start the RocketMQ admin tool failed.");
}
-
}
@Override
@@ -78,26 +82,33 @@ public String getTopicNamePrefix() {
@Override
public CompletableFuture createTopic(final String topic, final int partitions) {
- return CompletableFuture.runAsync(() -> {
- TopicConfig topicConfig = new TopicConfig();
- topicConfig.setOrder(false);
- topicConfig.setPerm(6);
- topicConfig.setReadQueueNums(partitions);
- topicConfig.setWriteQueueNums(partitions);
- topicConfig.setTopicName(topic);
-
- try {
- Set brokerList = CommandUtil.fetchMasterAddrByClusterName(this.rmqAdmin, this.rmqClientConfig.clusterName);
- topicConfig.setReadQueueNums(Math.max(1, partitions / brokerList.size()));
- topicConfig.setWriteQueueNums(Math.max(1, partitions / brokerList.size()));
-
- for (String brokerAddr : brokerList) {
- this.rmqAdmin.createAndUpdateTopicConfig(brokerAddr, topicConfig);
- }
- } catch (Exception e) {
- throw new RuntimeException(String.format("Failed to create topic [%s] to cluster [%s]", topic, this.rmqClientConfig.clusterName), e);
- }
- });
+ return CompletableFuture.runAsync(
+ () -> {
+ TopicConfig topicConfig = new TopicConfig();
+ topicConfig.setOrder(false);
+ topicConfig.setPerm(6);
+ topicConfig.setReadQueueNums(partitions);
+ topicConfig.setWriteQueueNums(partitions);
+ topicConfig.setTopicName(topic);
+
+ try {
+ Set brokerList =
+ CommandUtil.fetchMasterAddrByClusterName(
+ this.rmqAdmin, this.rmqClientConfig.clusterName);
+ topicConfig.setReadQueueNums(Math.max(1, partitions / brokerList.size()));
+ topicConfig.setWriteQueueNums(Math.max(1, partitions / brokerList.size()));
+
+ for (String brokerAddr : brokerList) {
+ this.rmqAdmin.createAndUpdateTopicConfig(brokerAddr, topicConfig);
+ }
+ } catch (Exception e) {
+ throw new RuntimeException(
+ String.format(
+ "Failed to create topic [%s] to cluster [%s]",
+ topic, this.rmqClientConfig.clusterName),
+ e);
+ }
+ });
}
@Override
@@ -130,11 +141,12 @@ public CompletableFuture createProducer(final String topic) {
}
@Override
- public CompletableFuture createConsumer(final String topic, final String subscriptionName,
- final ConsumerCallback consumerCallback) {
+ public CompletableFuture createConsumer(
+ final String topic, final String subscriptionName, final ConsumerCallback consumerCallback) {
DefaultMQPushConsumer rmqConsumer;
if (isAclEnabled()) {
- rmqConsumer = new DefaultMQPushConsumer(subscriptionName, rpcHook, new AllocateMessageQueueAveragely());
+ rmqConsumer =
+ new DefaultMQPushConsumer(subscriptionName, rpcHook, new AllocateMessageQueueAveragely());
} else {
rmqConsumer = new DefaultMQPushConsumer(subscriptionName);
}
@@ -145,12 +157,14 @@ public CompletableFuture createConsumer(final String topic, f
}
try {
rmqConsumer.subscribe(topic, "*");
- rmqConsumer.registerMessageListener((MessageListenerConcurrently) (msgs, context) -> {
- for (MessageExt message : msgs) {
- consumerCallback.messageReceived(message.getBody(), message.getBornTimestamp());
- }
- return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
- });
+ rmqConsumer.registerMessageListener(
+ (MessageListenerConcurrently)
+ (msgs, context) -> {
+ for (MessageExt message : msgs) {
+ consumerCallback.messageReceived(message.getBody(), message.getBornTimestamp());
+ }
+ return ConsumeConcurrentlyStatus.CONSUME_SUCCESS;
+ });
rmqConsumer.start();
} catch (MQClientException e) {
log.error("Failed to create consumer instance.", e);
@@ -160,8 +174,8 @@ public CompletableFuture createConsumer(final String topic, f
}
public boolean isAclEnabled() {
- return !(StringUtils.isAnyBlank(this.rmqClientConfig.accessKey, this.rmqClientConfig.secretKey) ||
- StringUtils.isAnyEmpty(this.rmqClientConfig.accessKey, this.rmqClientConfig.secretKey));
+ return !(StringUtils.isAnyBlank(this.rmqClientConfig.accessKey, this.rmqClientConfig.secretKey)
+ || StringUtils.isAnyEmpty(this.rmqClientConfig.accessKey, this.rmqClientConfig.secretKey));
}
@Override
@@ -172,8 +186,9 @@ public void close() throws Exception {
this.rmqAdmin.shutdown();
}
- private static final ObjectMapper mapper = new ObjectMapper(new YAMLFactory())
- .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
+ private static final ObjectMapper mapper =
+ new ObjectMapper(new YAMLFactory())
+ .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static RocketMQClientConfig readConfig(File configurationFile) throws IOException {
return mapper.readValue(configurationFile, RocketMQClientConfig.class);
@@ -181,7 +196,7 @@ private static RocketMQClientConfig readConfig(File configurationFile) throws IO
private static final Random random = new Random();
- private static final String getRandomString() {
+ private static String getRandomString() {
byte[] buffer = new byte[5];
random.nextBytes(buffer);
return BaseEncoding.base64Url().omitPadding().encode(buffer);
diff --git a/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkProducer.java b/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkProducer.java
index a60498933..bd80bf558 100644
--- a/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkProducer.java
+++ b/driver-rocketmq/src/main/java/io/openmessaging/benchmark/driver/rocketmq/RocketMQBenchmarkProducer.java
@@ -13,17 +13,15 @@
*/
package io.openmessaging.benchmark.driver.rocketmq;
+
+import io.openmessaging.benchmark.driver.BenchmarkProducer;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
-
-import org.apache.rocketmq.client.producer.DefaultMQProducer;
import org.apache.rocketmq.client.producer.MQProducer;
import org.apache.rocketmq.client.producer.SendCallback;
import org.apache.rocketmq.client.producer.SendResult;
import org.apache.rocketmq.common.message.Message;
-import io.openmessaging.benchmark.driver.BenchmarkProducer;
-
public class RocketMQBenchmarkProducer implements BenchmarkProducer {
private final MQProducer rmqProducer;
private final String rmqTopic;
@@ -42,17 +40,19 @@ public CompletableFuture sendAsync(final Optional key, final byte[
CompletableFuture future = new CompletableFuture<>();
try {
- this.rmqProducer.send(message, new SendCallback() {
- @Override
- public void onSuccess(final SendResult sendResult) {
- future.complete(null);
- }
+ this.rmqProducer.send(
+ message,
+ new SendCallback() {
+ @Override
+ public void onSuccess(final SendResult sendResult) {
+ future.complete(null);
+ }
- @Override
- public void onException(final Throwable e) {
- future.completeExceptionally(e);
- }
- });
+ @Override
+ public void onException(final Throwable e) {
+ future.completeExceptionally(e);
+ }
+ });
} catch (Exception e) {
future.completeExceptionally(e);
}
diff --git a/package/pom.xml b/package/pom.xml
index 5af96eb4c..199d8f145 100644
--- a/package/pom.xml
+++ b/package/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
-
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
-
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- package
- Packaging
- pom
+ package
+ pom
+ Packaging
-
-
-
- maven-assembly-plugin
- 3.3.0
-
-
- distro-assembly
- package
-
- single
-
-
- true
- posix
- openmessaging-benchmark-${project.version}
-
- src/assemble/bin.xml
- src/assemble/src.xml
-
-
-
-
-
-
-
+
+
+ ${project.groupId}
+ benchmark-framework
+ ${project.version}
+
+
-
-
- ${project.groupId}
- benchmark-framework
- ${project.version}
-
-
+
+
+
+ maven-assembly-plugin
+ 3.3.0
+
+
+ distro-assembly
+
+ single
+
+ package
+
+ true
+ posix
+ openmessaging-benchmark-${project.version}
+
+ src/assemble/bin.xml
+ src/assemble/src.xml
+
+
+
+
+
+
+
diff --git a/pom.xml b/pom.xml
index f36d00ab4..64981c973 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1,3 +1,4 @@
+
- 4.0.0
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ 4.0.0
- io.openmessaging.benchmark
- messaging-benchmark
- 0.0.1-SNAPSHOT
- Messaging Benchmark
- pom
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+ pom
+ Messaging Benchmark
-
- benchmark-framework
- driver-api
+ 2017
- driver-pravega
- driver-pulsar
- driver-kafka
- driver-rabbitmq
- driver-artemis
- driver-bookkeeper
- driver-rocketmq
- driver-nats
- driver-nats-streaming
- driver-nsq
- driver-jms
- driver-redis
- package
- docker
- driver-kop
- tool
-
+
+ OpenMessaging Project
+ https://openmessaging.cloud
+
-
- OpenMessaging Project
- https://openmessaging.cloud
-
+
+
+ Apache License, Version 2.0
+ https://www.apache.org/licenses/LICENSE-2.0.txt
+ repo
+
+
- 2017
+
+ benchmark-framework
+ driver-api
-
-
- Apache License, Version 2.0
- https://www.apache.org/licenses/LICENSE-2.0.txt
- repo
-
-
+ driver-pravega
+ driver-pulsar
+ driver-kafka
+ driver-rabbitmq
+ driver-artemis
+ driver-bookkeeper
+ driver-rocketmq
+ driver-nats
+ driver-nats-streaming
+ driver-nsq
+ driver-jms
+ driver-redis
+ package
+ docker
+ driver-kop
+ tool
+
-
- UTF-8
- UTF-8
+
+ UTF-8
+ UTF-8
- 3.23.1
- 4.14.4
- 10.3.3
- 3.12.0
- 2.17.1
- 1.18.24
- 2.13.2
- 1.48
- 5.9.0
- 4.8.0
- 4.1.65.Final
- 1.7.36
+ 3.23.1
+ 4.14.4
+ 10.3.3
+ 3.12.0
+ 2.17.1
+ 1.18.24
+ 2.13.2
+ 1.48
+ 5.9.0
+ 4.8.0
+ 4.1.65.Final
+ 1.7.36
- 3.2.0
- 0.8.8
- 4.1
- 3.10.1
- 3.1.0
- 3.0.0-M7
- 4.7.2.0
- 2.25.0
-
+ 3.2.0
+ 0.8.8
+ 4.1
+ 3.10.1
+ 3.1.0
+ 3.0.0-M7
+ 4.7.2.0
+ 2.25.0
+
-
-
-
-
- com.diffplug.spotless
- spotless-maven-plugin
- ${spotless.plugin.version}
-
-
-
- false
- true
- scope,groupId,artifactId
- groupId,artifactId
- groupId,artifactId
-
-
-
-
-
-
-
-
-
- **/*.md
-
-
-
-
-
-
- etc/**/*.xml
- .github/workflows/**/*.yml
- **/doc/**/*.puml
-
-
-
-
- true
- 4
-
-
-
-
-
-
- check
-
- check
-
-
-
-
-
-
- com.github.spotbugs
- spotbugs-maven-plugin
- ${spotbugs.plugin.version}
-
- etc/findbugsExclude.xml
-
-
-
-
- check
-
-
-
-
-
-
- com.mycila
- license-maven-plugin
- ${license.plugin.version}
-
- etc/APACHE-2.txt
-
- LICENSE
- NOTICE
- payload/**
- **/*.pyc
- **/.pydevproject
- .github/**
-
-
- SCRIPT_STYLE
- SCRIPT_STYLE
- SCRIPT_STYLE
- SCRIPT_STYLE
- XML_STYLE
- SCRIPT_STYLE
- APOSTROPHE_STYLE
-
-
-
-
- com.mycila
- license-maven-plugin-git
- ${license.plugin.version}
-
-
-
-
-
- check
-
- process-sources
-
-
-
-
-
- org.apache.maven.plugins
- maven-checkstyle-plugin
- ${checkstyle.plugin.version}
-
- true
- true
- true
- etc/checkstyle.xml
- true
-
-
-
- com.puppycrawl.tools
- checkstyle
- ${checkstyle.version}
-
-
-
-
-
- check
-
- validate
-
-
-
-
- org.apache.maven.plugins
- maven-enforcer-plugin
- ${maven.enforcer.plugin.version}
-
-
- enforce-maven-version
-
- enforce
-
-
-
-
- [3.8.6,)
-
-
- true
-
-
-
-
-
- maven-failsafe-plugin
- ${maven.surefire.plugin.version}
-
-
-
- integration-test
- verify
-
-
-
-
-
- maven-surefire-plugin
- ${maven.surefire.plugin.version}
-
-
-
- org.jacoco
- jacoco-maven-plugin
- ${jacoco.plugin.version}
-
-
-
- prepare-agent
-
-
-
- report
-
- report
-
- verify
-
-
- check
-
- check
-
- verify
-
-
-
- BUNDLE
-
-
- CLASS
- COVEREDRATIO
- 0.00
-
-
- METHOD
- COVEREDRATIO
- 0.00
-
-
- LINE
- COVEREDRATIO
- 0.00
-
-
- BRANCH
- COVEREDRATIO
- 0.00
-
-
-
-
-
-
-
-
-
-
+
+
+
+ com.beust
+ jcommander
+ ${jcommander.version}
+
+
+ com.fasterxml.jackson.core
+ jackson-annotations
+ ${jackson.version}
+
+
+ com.fasterxml.jackson.dataformat
+ jackson-dataformat-yaml
+ ${jackson.version}
+
+
+ com.fasterxml.jackson.jaxrs
+ jackson-jaxrs-base
+ ${jackson.version}
+
+
+ com.fasterxml.jackson.jaxrs
+ jackson-jaxrs-json-provider
+ ${jackson.version}
+
+
+ com.google.guava
+ guava
+ 29.0-jre
+
+
+ io.netty
+ netty-all
+ ${netty.version}
+
+
+ org.apache.commons
+ commons-lang3
+ ${commons.lang3.version}
+
+
+ org.apache.logging.log4j
+ log4j-core
+ ${log4j.version}
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+ ${log4j.version}
+
+
+ org.projectlombok
+ lombok
+ ${lombok.version}
+ provided
+
+
+ org.assertj
+ assertj-core
+ ${assertj.version}
+ test
+
+
+ org.junit.jupiter
+ junit-jupiter
+ ${junit.jupiter.version}
+ test
+
+
+ org.mockito
+ mockito-junit-jupiter
+ ${mockito.junit.jupiter.version}
+ test
+
+
+
-
-
-
- com.beust
- jcommander
- ${jcommander.version}
-
-
- com.fasterxml.jackson.jaxrs
- jackson-jaxrs-base
- ${jackson.version}
-
-
- com.fasterxml.jackson.jaxrs
- jackson-jaxrs-json-provider
- ${jackson.version}
-
-
- com.fasterxml.jackson.core
- jackson-annotations
- ${jackson.version}
-
-
- com.fasterxml.jackson.dataformat
- jackson-dataformat-yaml
- ${jackson.version}
-
-
- com.google.guava
- guava
- 29.0-jre
-
-
- io.netty
- netty-all
- ${netty.version}
-
-
- org.assertj
- assertj-core
- ${assertj.version}
- test
-
-
- org.apache.commons
- commons-lang3
- ${commons.lang3.version}
-
-
- org.apache.logging.log4j
- log4j-core
- ${log4j.version}
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
- ${log4j.version}
-
-
- org.projectlombok
- lombok
- ${lombok.version}
- provided
-
-
- org.junit.jupiter
- junit-jupiter
- ${junit.jupiter.version}
- test
-
-
- org.mockito
- mockito-junit-jupiter
- ${mockito.junit.jupiter.version}
- test
-
-
-
+
+
+
+
+ com.diffplug.spotless
+ spotless-maven-plugin
+ ${spotless.plugin.version}
+
+
+
+ false
+ true
+ 4
+ scope,groupId,artifactId
+ groupId,artifactId
+ groupId,artifactId
+
+
+
+
+
+
+
+ true
+ 2
+
+
+ true
+ 4
+
+
+
+
+ **/*.md
+
+
+
+
+
+
+ etc/**/*.xml
+ .github/workflows/**/*.yml
+ **/doc/**/*.puml
+
+
+
+
+ true
+ 4
+
+
+
+
+
+
+ check
+
+ check
+
+
+
+
+
+
+ com.github.spotbugs
+ spotbugs-maven-plugin
+ ${spotbugs.plugin.version}
+
+ etc/findbugsExclude.xml
+
+
+
+
+ check
+
+
+
+
+
+
+ com.mycila
+ license-maven-plugin
+ ${license.plugin.version}
+
+ etc/APACHE-2.txt
+
+ LICENSE
+ NOTICE
+ payload/**
+ **/*.pyc
+ **/.pydevproject
+ .github/**
+
+
+ SCRIPT_STYLE
+ SCRIPT_STYLE
+ SCRIPT_STYLE
+ SCRIPT_STYLE
+ XML_STYLE
+ SCRIPT_STYLE
+ APOSTROPHE_STYLE
+
+
+
+
+ com.mycila
+ license-maven-plugin-git
+ ${license.plugin.version}
+
+
+
+
+
+ check
+
+ process-sources
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+ ${checkstyle.plugin.version}
+
+ true
+ true
+ true
+ etc/checkstyle.xml
+ true
+
+
+
+ com.puppycrawl.tools
+ checkstyle
+ ${checkstyle.version}
+
+
+
+
+
+ check
+
+ validate
+
+
+
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+ ${maven.enforcer.plugin.version}
+
+
+ enforce-maven-version
+
+ enforce
+
+
+
+
+ [3.8.6,)
+
+
+ true
+
+
+
+
+
+ maven-failsafe-plugin
+ ${maven.surefire.plugin.version}
+
+
+
+ integration-test
+ verify
+
+
+
+
+
+ maven-surefire-plugin
+ ${maven.surefire.plugin.version}
+
+
+
+ org.jacoco
+ jacoco-maven-plugin
+ ${jacoco.plugin.version}
+
+
+
+ prepare-agent
+
+
+
+ report
+
+ report
+
+ verify
+
+
+ check
+
+ check
+
+ verify
+
+
+
+ BUNDLE
+
+
+ CLASS
+ COVEREDRATIO
+ 0.00
+
+
+ METHOD
+ COVEREDRATIO
+ 0.00
+
+
+ LINE
+ COVEREDRATIO
+ 0.00
+
+
+ BRANCH
+ COVEREDRATIO
+ 0.00
+
+
+
+
+
+
+
+
+
+
-
-
- modern-java-compile
-
- [9,)
-
-
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
- 3.8.1
-
- 8
- 8
- 8
- ${project.build.sourceEncoding}
- true
- true
- true
-
-
-
-
-
-
-
- jdk-8-compile
-
- [,8]
-
-
-
-
-
- org.apache.maven.plugins
- maven-compiler-plugin
- 3.8.1
-
- 8
- 8
- ${project.build.sourceEncoding}
- true
- true
- true
-
-
-
-
-
-
-
+
+
+ modern-java-compile
+
+ [9,)
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.8.1
+
+ 8
+ 8
+ 8
+ ${project.build.sourceEncoding}
+ true
+ true
+ true
+
+
+
+
+
+
+
+ jdk-8-compile
+
+ [,8]
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+ 3.8.1
+
+ 8
+ 8
+ ${project.build.sourceEncoding}
+ true
+ true
+ true
+
+
+
+
+
+
+
diff --git a/tool/README.md b/tool/README.md
index 9acbbfc92..231160428 100644
--- a/tool/README.md
+++ b/tool/README.md
@@ -6,7 +6,8 @@ Generates a set of `Workload` definition files from a `WorkloadSetTemplate` file
### Example
-Template:
+Template:
+
```yaml
nameFormat: "${topics}-topics-${partitionsPerTopic}-partitions-${messageSize}b-${producersPerTopic}p-${consumerPerSubscription}c-${producerRate}"
topics: [1]
@@ -22,6 +23,7 @@ testDurationMinutes: 15
```
Usage:
+
```
mkdir my-workloads
io.openmessaging.benchmark.tool.workload.WorkloadGenerationTool \
@@ -30,6 +32,7 @@ mkdir my-workloads
```
Output:
+
```
Starting benchmark with config: templateFile: "template.yaml"
outputFolder: "my-workloads"
@@ -43,6 +46,7 @@ Generated 49 workloads.
```
Example generated workload:
+
```yaml
name: "1-topics-1-partitions-10kb-64p-2c-50k"
topics: 1
@@ -60,4 +64,5 @@ producerRate: 50000
consumerBacklogSizeGB: 0
testDurationMinutes: 5
warmupDurationMinutes: 1
-```
\ No newline at end of file
+```
+
diff --git a/tool/pom.xml b/tool/pom.xml
index 73d344f36..97af84940 100644
--- a/tool/pom.xml
+++ b/tool/pom.xml
@@ -14,62 +14,61 @@
limitations under the License.
-->
-
-
- messaging-benchmark
- io.openmessaging.benchmark
- 0.0.1-SNAPSHOT
-
- 4.0.0
+ 4.0.0
+
+ io.openmessaging.benchmark
+ messaging-benchmark
+ 0.0.1-SNAPSHOT
+
- tool
+ tool
-
-
- io.openmessaging.benchmark
- benchmark-framework
- ${project.version}
-
-
- org.projectlombok
- lombok
-
-
- com.beust
- jcommander
-
-
- org.apache.logging.log4j
- log4j-slf4j-impl
-
-
- org.apache.commons
- commons-text
- 1.9
-
-
- org.apache.commons
- commons-io
- 1.3.2
-
-
- org.assertj
- assertj-core
-
-
- org.junit.jupiter
- junit-jupiter
- 5.9.0
- test
-
-
- org.mockito
- mockito-junit-jupiter
- 4.7.0
- test
-
-
+
+
+ com.beust
+ jcommander
+
+
+ io.openmessaging.benchmark
+ benchmark-framework
+ ${project.version}
+
+
+ org.apache.commons
+ commons-io
+ 1.3.2
+
+
+ org.apache.commons
+ commons-text
+ 1.9
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+ org.assertj
+ assertj-core
+
+
+ org.projectlombok
+ lombok
+
+
+ org.junit.jupiter
+ junit-jupiter
+ 5.9.0
+ test
+
+
+ org.mockito
+ mockito-junit-jupiter
+ 4.7.0
+ test
+
+
diff --git a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerationTool.java b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerationTool.java
index 0c11323b4..91bfb7d5d 100644
--- a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerationTool.java
+++ b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerationTool.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.tool.workload;
+
import com.beust.jcommander.JCommander;
import com.beust.jcommander.Parameter;
import com.beust.jcommander.ParameterException;
@@ -26,14 +27,13 @@
import java.util.List;
import lombok.extern.slf4j.Slf4j;
-/**
- * Generates a set of {@link Workload} definition files from a {@link WorkloadSetTemplate} file.
- */
+/** Generates a set of {@link Workload} definition files from a {@link WorkloadSetTemplate} file. */
@Slf4j
public class WorkloadGenerationTool {
private static final ObjectMapper mapper =
- new ObjectMapper(new YAMLFactory().configure(YAMLGenerator.Feature.WRITE_DOC_START_MARKER, false))
+ new ObjectMapper(
+ new YAMLFactory().configure(YAMLGenerator.Feature.WRITE_DOC_START_MARKER, false))
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
static {
@@ -61,7 +61,8 @@ public static void main(String[] args) throws IOException {
// Dump configuration variables
log.info("Starting benchmark with config: {}", mapper.writeValueAsString(arguments));
- WorkloadSetTemplate template = mapper.readValue(arguments.templateFile, WorkloadSetTemplate.class);
+ WorkloadSetTemplate template =
+ mapper.readValue(arguments.templateFile, WorkloadSetTemplate.class);
List workloads = new WorkloadGenerator(template).generate();
for (Workload w : workloads) {
File outputFile = null;
@@ -75,14 +76,22 @@ public static void main(String[] args) throws IOException {
}
static class Arguments {
- @Parameter(names = {"-t", "--template-file"},
- description = "Path to a YAML file containing the workload template", required = true)
+ @Parameter(
+ names = {"-t", "--template-file"},
+ description = "Path to a YAML file containing the workload template",
+ required = true)
public File templateFile;
- @Parameter(names = {"-o", "--output-folder"}, description = "Output", required = true)
+ @Parameter(
+ names = {"-o", "--output-folder"},
+ description = "Output",
+ required = true)
public File outputFolder;
- @Parameter(names = {"-h", "--help"}, description = "Help message", help = true)
+ @Parameter(
+ names = {"-h", "--help"},
+ description = "Help message",
+ help = true)
boolean help;
}
}
diff --git a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerator.java b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerator.java
index 7e6d77b8c..d2bfc7956 100644
--- a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerator.java
+++ b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadGenerator.java
@@ -14,6 +14,7 @@
package io.openmessaging.benchmark.tool.workload;
import static java.util.Collections.unmodifiableList;
+
import io.openmessaging.benchmark.Workload;
import java.io.IOException;
import java.util.ArrayList;
diff --git a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormat.java b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormat.java
index 6a97b0c27..501d6fc00 100644
--- a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormat.java
+++ b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormat.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.tool.workload;
+
import io.openmessaging.benchmark.Workload;
import java.util.HashMap;
import java.util.Map;
@@ -20,9 +21,10 @@
import org.apache.commons.lang.text.StrSubstitutor;
/**
- * Generates {@link Workload} names based on a template. Substitutes template place-holders of the form
- * {@code ${variableName}}, where {@code variableName} is the name of a public member in {@link Workload}. Note that the
- * set of variables is statically assigned. Numeric values will typically be in a form that includes an SI suffix.
+ * Generates {@link Workload} names based on a template. Substitutes template place-holders of the
+ * form {@code ${variableName}}, where {@code variableName} is the name of a public member in {@link
+ * Workload}. Note that the set of variables is statically assigned. Numeric values will typically
+ * be in a form that includes an SI suffix.
*/
@RequiredArgsConstructor
class WorkloadNameFormat {
@@ -42,8 +44,11 @@ String from(Workload workload) {
params.put("subscriptionsPerTopic", countToDisplaySize(workload.subscriptionsPerTopic));
params.put("producersPerTopic", countToDisplaySize(workload.producersPerTopic));
params.put("consumerPerSubscription", countToDisplaySize(workload.consumerPerSubscription));
- params.put("producerRate",
- (workload.producerRate >= MAX_PRODUCER_RATE) ? "max-rate" : countToDisplaySize(workload.producerRate));
+ params.put(
+ "producerRate",
+ (workload.producerRate >= MAX_PRODUCER_RATE)
+ ? "max-rate"
+ : countToDisplaySize(workload.producerRate));
params.put("keyDistributor", workload.keyDistributor);
params.put("payloadFile", workload.payloadFile);
params.put("useRandomizedPayloads", workload.useRandomizedPayloads);
diff --git a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadSetTemplate.java b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadSetTemplate.java
index 3ce018847..538a28ef4 100644
--- a/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadSetTemplate.java
+++ b/tool/src/main/java/io/openmessaging/benchmark/tool/workload/WorkloadSetTemplate.java
@@ -13,6 +13,7 @@
*/
package io.openmessaging.benchmark.tool.workload;
+
import io.openmessaging.benchmark.utils.distributor.KeyDistributorType;
import java.util.Collections;
import java.util.List;
@@ -20,27 +21,28 @@
import lombok.NoArgsConstructor;
/**
- * A template that defines a set of workload definitions. This is much like the
- * {@link io.openmessaging.benchmark.Workload} entity, except that for many values that typically change in a benchmark,
- * one can specify a sequence of values.
+ * A template that defines a set of workload definitions. This is much like the {@link
+ * io.openmessaging.benchmark.Workload} entity, except that for many values that typically change in
+ * a benchmark, one can specify a sequence of values.
*/
@Data
@NoArgsConstructor
public class WorkloadSetTemplate {
public static final String DEFAULT_NAME_TEMPLATE =
"${topics}-topics-${partitionsPerTopic}-partitions-${messageSize}b"
- + "-${producersPerTopic}p-${consumerPerSubscription}c-${producerRate}";
+ + "-${producersPerTopic}p-${consumerPerSubscription}c-${producerRate}";
public String nameFormat = DEFAULT_NAME_TEMPLATE;
- /** Number of topics to create in the test */
+ /** Number of topics to create in the test. */
public List topics = Collections.emptyList();
- /** Number of partitions each topic will contain */
- public List partitionsPerTopic= Collections.emptyList();
- public List messageSize= Collections.emptyList();
- public List subscriptionsPerTopic= Collections.emptyList();
- public List producersPerTopic= Collections.emptyList();
- public List consumerPerSubscription= Collections.emptyList();
- public List producerRate= Collections.emptyList();
+ /** Number of partitions each topic will contain. */
+ public List partitionsPerTopic = Collections.emptyList();
+
+ public List messageSize = Collections.emptyList();
+ public List subscriptionsPerTopic = Collections.emptyList();
+ public List producersPerTopic = Collections.emptyList();
+ public List consumerPerSubscription = Collections.emptyList();
+ public List producerRate = Collections.emptyList();
public KeyDistributorType keyDistributor = KeyDistributorType.NO_KEY;
public String payloadFile = null;
diff --git a/tool/src/test/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormatTest.java b/tool/src/test/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormatTest.java
index 6c0d7ae40..63ff456fc 100644
--- a/tool/src/test/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormatTest.java
+++ b/tool/src/test/java/io/openmessaging/benchmark/tool/workload/WorkloadNameFormatTest.java
@@ -20,8 +20,9 @@
class WorkloadNameFormatTest {
- public String nameFormat = "${topics}-topics-${partitionsPerTopic}-partitions-${messageSize}b"
- + "-${producersPerTopic}p-${consumerPerSubscription}c-${producerRate}";
+ public String nameFormat =
+ "${topics}-topics-${partitionsPerTopic}-partitions-${messageSize}b"
+ + "-${producersPerTopic}p-${consumerPerSubscription}c-${producerRate}";
@Test
void nameOverride() {
@@ -43,4 +44,4 @@ void from() {
String name = new WorkloadNameFormat(nameFormat).from(workload);
assertThat(name).isEqualTo("1k-topics-2k-partitions-617kb-45p-541c-1m");
}
-}
\ No newline at end of file
+}