com.github.os72
protoc-jar-maven-plugin
@@ -201,7 +225,7 @@
${postgres.system.lang}
- ${postgres.port}:5432
+ ${postgres.port}:5433
postgres
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PGTableSchemaBuilder.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PGTableSchemaBuilder.java
new file mode 100644
index 00000000000..e1a6210ed44
--- /dev/null
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PGTableSchemaBuilder.java
@@ -0,0 +1,491 @@
+/*
+ * Copyright Debezium Authors.
+ *
+ * Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
+ */
+package io.debezium.connector.postgresql;
+
+import java.sql.Types;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+
+import io.debezium.relational.*;
+import org.apache.kafka.connect.data.Field;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.SchemaBuilder;
+import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.errors.DataException;
+import org.apache.kafka.connect.errors.SchemaBuilderException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.debezium.DebeziumException;
+import io.debezium.annotation.Immutable;
+import io.debezium.annotation.ThreadSafe;
+import io.debezium.data.Envelope;
+import io.debezium.data.SchemaUtil;
+import io.debezium.relational.Key.KeyMapper;
+import io.debezium.relational.Tables.ColumnNameFilter;
+import io.debezium.relational.mapping.ColumnMapper;
+import io.debezium.relational.mapping.ColumnMappers;
+import io.debezium.schema.FieldNameSelector.FieldNamer;
+import io.debezium.schema.SchemaNameAdjuster;
+import io.debezium.spi.topic.TopicNamingStrategy;
+import io.debezium.util.Loggings;
+
+/**
+ * Builder that constructs {@link TableSchema} instances for {@link Table} definitions.
+ *
+ * This builder is responsible for mapping {@link Column table columns} to {@link Field fields} in Kafka Connect {@link Schema}s,
+ * and this is necessarily dependent upon the database's supported types. Although mappings are defined for standard types,
+ * this class may need to be subclassed for each DBMS to add support for DBMS-specific types by overriding any of the
+ * "{@code add*Field}" methods.
+ *
+ * See the Java SE Mapping SQL
+ * and Java Types for details about how JDBC {@link Types types} map to Java value types.
+ *
+ * @author Randall Hauch
+ */
+@ThreadSafe
+@Immutable
+public class PGTableSchemaBuilder extends TableSchemaBuilder {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(PGTableSchemaBuilder.class);
+
+ private final SchemaNameAdjuster schemaNameAdjuster;
+ private final ValueConverterProvider valueConverterProvider;
+ private final DefaultValueConverter defaultValueConverter;
+ private final Schema sourceInfoSchema;
+ private final FieldNamer fieldNamer;
+ private final CustomConverterRegistry customConverterRegistry;
+ private final boolean multiPartitionMode;
+ private final PostgresConnectorConfig connectorConfig;
+
+ /**
+ * Create a new instance of the builder.
+ *
+ * @param valueConverterProvider the provider for obtaining {@link ValueConverter}s and {@link SchemaBuilder}s; may not be
+ * null
+ * @param defaultValueConverter is used to convert the default value literal to a Java type
+ * recognized by value converters for a subset of types. may be null.
+ * @param connectorConfig the connector configuration object; never null.
+ * @param multiPartitionMode whether the connector is operating in multi-partition mode.
+ */
+ public PGTableSchemaBuilder(ValueConverterProvider valueConverterProvider,
+ DefaultValueConverter defaultValueConverter,
+ PostgresConnectorConfig connectorConfig,
+ boolean multiPartitionMode) {
+ super(valueConverterProvider, defaultValueConverter, connectorConfig.schemaNameAdjuster(),
+ connectorConfig.customConverterRegistry(), connectorConfig.getSourceInfoStructMaker().schema(),
+ connectorConfig.getFieldNamer(), multiPartitionMode);
+
+ if (!connectorConfig.plugin().isYBOutput()) {
+ throw new DebeziumException("Class not supposed to be used with the plugin " + connectorConfig.plugin().getPostgresPluginName() + ", check configuration");
+ }
+
+ this.schemaNameAdjuster = connectorConfig.schemaNameAdjuster();
+ this.valueConverterProvider = valueConverterProvider;
+ this.defaultValueConverter = Optional.ofNullable(defaultValueConverter)
+ .orElse(DefaultValueConverter.passthrough());
+ this.sourceInfoSchema = connectorConfig.getSourceInfoStructMaker().schema();
+ this.fieldNamer = connectorConfig.getFieldNamer();
+ this.customConverterRegistry = connectorConfig.customConverterRegistry();
+ this.multiPartitionMode = multiPartitionMode;
+ this.connectorConfig = connectorConfig;
+ }
+
+ /**
+ * Create a {@link TableSchema} from the given {@link Table table definition}. The resulting TableSchema will have a
+ * {@link TableSchema#keySchema() key schema} that contains all of the columns that make up the table's primary key,
+ * and a {@link TableSchema#valueSchema() value schema} that contains only those columns that are not in the table's primary
+ * key.
+ *
+ * This is equivalent to calling {@code create(table,false)}.
+ *
+ * @param topicNamingStrategy the topic naming strategy
+ * @param table the table definition; may not be null
+ * @param filter the filter that specifies whether columns in the table should be included; may be null if all columns
+ * are to be included
+ * @param mappers the mapping functions for columns; may be null if none of the columns are to be mapped to different values
+ * @return the table schema that can be used for sending rows of data for this table to Kafka Connect; never null
+ */
+ public TableSchema create(TopicNamingStrategy topicNamingStrategy, Table table, ColumnNameFilter filter, ColumnMappers mappers, KeyMapper keysMapper) {
+ // Build the schemas ...
+ final TableId tableId = table.id();
+ final String schemaNamePrefix = topicNamingStrategy.recordSchemaPrefix(tableId);
+ final String envelopeSchemaPrefix = topicNamingStrategy.dataChangeTopic(tableId);
+ final String envelopSchemaName = Envelope.schemaName(envelopeSchemaPrefix);
+ LOGGER.debug("Mapping table '{}' to schemas under '{}'", tableId, schemaNamePrefix);
+ SchemaBuilder valSchemaBuilder = SchemaBuilder.struct().name(schemaNameAdjuster.adjust(schemaNamePrefix + ".Value"));
+ SchemaBuilder keySchemaBuilder = SchemaBuilder.struct().name(schemaNameAdjuster.adjust(schemaNamePrefix + ".Key"));
+ AtomicBoolean hasPrimaryKey = new AtomicBoolean(false);
+
+ Key tableKey = new Key.Builder(table).customKeyMapper(keysMapper).build();
+ tableKey.keyColumns().forEach(column -> {
+ addField(keySchemaBuilder, table, column, null);
+ hasPrimaryKey.set(true);
+ });
+ if (topicNamingStrategy.keySchemaAugment().augment(keySchemaBuilder)) {
+ hasPrimaryKey.set(true);
+ }
+
+ table.columns()
+ .stream()
+ .filter(column -> filter == null || filter.matches(tableId.catalog(), tableId.schema(), tableId.table(), column.name()))
+ .forEach(column -> {
+ ColumnMapper mapper = mappers == null ? null : mappers.mapperFor(tableId, column);
+ addField(valSchemaBuilder, table, column, mapper);
+ });
+
+ Schema valSchema = valSchemaBuilder.optional().build();
+ Schema keySchema = hasPrimaryKey.get() ? keySchemaBuilder.build() : null;
+
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("Mapped primary key for table '{}' to schema: {}", tableId, SchemaUtil.asDetailedString(keySchema));
+ LOGGER.debug("Mapped columns for table '{}' to schema: {}", tableId, SchemaUtil.asDetailedString(valSchema));
+ }
+
+ Envelope envelope = Envelope.defineSchema()
+ .withName(schemaNameAdjuster.adjust(envelopSchemaName))
+ .withRecord(valSchema)
+ .withSource(sourceInfoSchema)
+ .build();
+
+ // Create the generators ...
+ StructGenerator keyGenerator = createKeyGenerator(keySchema, tableId, tableKey.keyColumns(), topicNamingStrategy);
+ StructGenerator valueGenerator = createValueGenerator(valSchema, tableId, table.columns(), filter, mappers);
+
+ // And the table schema ...
+ return new TableSchema(tableId, keySchema, keyGenerator, envelope, valSchema, valueGenerator);
+ }
+
+ public boolean isMultiPartitionMode() {
+ return multiPartitionMode;
+ }
+
+ /**
+ * Creates the function that produces a Kafka Connect key object for a row of data.
+ *
+ * @param schema the Kafka Connect schema for the key; may be null if there is no known schema, in which case the generator
+ * will be null
+ * @param columnSetName the name for the set of columns, used in error messages; may not be null
+ * @param columns the column definitions for the table that defines the row; may not be null
+ * @param topicNamingStrategy the topic naming strategy
+ * @return the key-generating function, or null if there is no key schema
+ */
+ protected StructGenerator createKeyGenerator(Schema schema, TableId columnSetName, List columns,
+ TopicNamingStrategy topicNamingStrategy) {
+ if (schema != null) {
+ int[] recordIndexes = indexesForColumns(columns);
+ Field[] fields = fieldsForColumns(schema, columns);
+ int numFields = recordIndexes.length;
+ ValueConverter[] converters = convertersForColumns(schema, columnSetName, columns, null);
+ return (row) -> {
+ Struct result = new Struct(schema);
+ for (int i = 0; i != numFields; ++i) {
+ validateIncomingRowToInternalMetadata(recordIndexes, fields, converters, row, i);
+ Object value = row[recordIndexes[i]];
+ ValueConverter converter = converters[i];
+ if (converter != null) {
+ // A component of primary key must be not-null.
+ // It is possible for some databases and values (MySQL and all-zero datetime)
+ // to be reported as null by JDBC or streaming reader.
+ // It thus makes sense to convert them to a sensible default replacement value.
+
+ // YB Note: Adding YB specific changes.
+ if (connectorConfig.plugin().isYBOutput()) {
+ value = converter.convert(((Object[]) value)[0]);
+ } else {
+ value = converter.convert(value);
+ }
+ try {
+ // YB Note: YugabyteDB specific code to incorporate the plugin name yboutput
+ if (connectorConfig.plugin().isYBOutput()) {
+ if (value != null && !UnchangedToastedReplicationMessageColumn.isUnchangedToastedValue(value)) {
+ Struct cell = new Struct(fields[i].schema());
+ cell.put("value", value);
+ cell.put("set", true);
+ result.put(fields[i], cell);
+ }
+ } else {
+ result.put(fields[i], value);
+ }
+ }
+ catch (DataException e) {
+ Column col = columns.get(i);
+ Loggings.logErrorAndTraceRecord(LOGGER, row,
+ "Failed to properly convert key value for '{}.{}' of type {}", columnSetName,
+ col.name(), col.typeName(), e);
+ }
+ }
+ }
+ topicNamingStrategy.keyValueAugment().augment(columnSetName, schema, result);
+ return result;
+ };
+ }
+ return null;
+ }
+
+ private void validateIncomingRowToInternalMetadata(int[] recordIndexes, Field[] fields, ValueConverter[] converters,
+ Object[] row, int position) {
+ if (position >= converters.length) {
+ LOGGER.error("Error requesting a converter, converters: {}, requested index: {}", converters.length, position);
+ throw new ConnectException(
+ "Column indexing array is larger than number of converters, internal schema representation is probably out of sync with real database schema");
+ }
+ if (position >= fields.length) {
+ LOGGER.error("Error requesting a field, fields: {}, requested index: {}", fields.length, position);
+ throw new ConnectException("Too few schema fields, internal schema representation is probably out of sync with real database schema");
+ }
+ if (recordIndexes[position] >= row.length) {
+ LOGGER.error("Error requesting a row value, row: {}, requested index: {} at position {}", row.length, recordIndexes[position], position);
+ throw new ConnectException("Data row is smaller than a column index, internal schema representation is probably out of sync with real database schema");
+ }
+ }
+
+ /**
+ * Creates the function that produces a Kafka Connect value object for a row of data.
+ *
+ * @param schema the Kafka Connect schema for the value; may be null if there is no known schema, in which case the generator
+ * will be null
+ * @param tableId the table identifier; may not be null
+ * @param columns the column definitions for the table that defines the row; may not be null
+ * @param filter the filter that specifies whether columns in the table should be included; may be null if all columns
+ * are to be included
+ * @param mappers the mapping functions for columns; may be null if none of the columns are to be mapped to different values
+ * @return the value-generating function, or null if there is no value schema
+ */
+ protected StructGenerator createValueGenerator(Schema schema, TableId tableId, List columns,
+ ColumnNameFilter filter, ColumnMappers mappers) {
+ if (schema != null) {
+ List columnsThatShouldBeAdded = columns.stream()
+ .filter(column -> filter == null || filter.matches(tableId.catalog(), tableId.schema(), tableId.table(), column.name()))
+ .collect(Collectors.toList());
+ int[] recordIndexes = indexesForColumns(columnsThatShouldBeAdded);
+ Field[] fields = fieldsForColumns(schema, columnsThatShouldBeAdded);
+ int numFields = recordIndexes.length;
+ ValueConverter[] converters = convertersForColumns(schema, tableId, columnsThatShouldBeAdded, mappers);
+ return (row) -> {
+ Struct result = new Struct(schema);
+ for (int i = 0; i != numFields; ++i) {
+ validateIncomingRowToInternalMetadata(recordIndexes, fields, converters, row, i);
+ Object value = row[recordIndexes[i]];
+
+ ValueConverter converter = converters[i];
+
+ if (converter != null) {
+ LOGGER.trace("converter for value object: *** {} ***", converter);
+ }
+ else {
+ LOGGER.trace("converter is null...");
+ }
+
+ if (converter != null) {
+ try {
+ // YB Note: YugabyteDB specific code to incorporate the plugin name yboutput
+ if (connectorConfig.plugin().isYBOutput()) {
+ if (value != null && !UnchangedToastedReplicationMessageColumn.isUnchangedToastedValue(value)) {
+ value = converter.convert(((Object[]) value)[0]);
+ Struct cell = new Struct(fields[i].schema());
+ cell.put("value", value);
+ cell.put("set", true);
+ result.put(fields[i], cell);
+ } else {
+ result.put(fields[i], null);
+ }
+ } else {
+ value = converter.convert(value);
+ result.put(fields[i], value);
+ }
+ }
+ catch (DataException | IllegalArgumentException e) {
+ Column col = columns.get(i);
+ Loggings.logErrorAndTraceRecord(LOGGER, row,
+ "Failed to properly convert data value for '{}.{}' of type {}", tableId,
+ col.name(), col.typeName(), e);
+ }
+ catch (final Exception e) {
+ Column col = columns.get(i);
+ Loggings.logErrorAndTraceRecord(LOGGER, row,
+ "Failed to properly convert data value for '{}.{}' of type {}", tableId,
+ col.name(), col.typeName(), e);
+ }
+ }
+ }
+ return result;
+ };
+ }
+ return null;
+ }
+
+ protected int[] indexesForColumns(List columns) {
+ int[] recordIndexes = new int[columns.size()];
+ AtomicInteger i = new AtomicInteger(0);
+ columns.forEach(column -> {
+ recordIndexes[i.getAndIncrement()] = column.position() - 1; // position is 1-based, indexes 0-based
+ });
+ return recordIndexes;
+ }
+
+ protected Field[] fieldsForColumns(Schema schema, List columns) {
+ Field[] fields = new Field[columns.size()];
+ AtomicInteger i = new AtomicInteger(0);
+ columns.forEach(column -> {
+ Field field = schema.field(fieldNamer.fieldNameFor(column)); // may be null if the field is unused ...
+ fields[i.getAndIncrement()] = field;
+ });
+ return fields;
+ }
+
+ /**
+ * Obtain the array of converters for each column in a row. A converter might be null if the column is not be included in
+ * the records.
+ *
+ * @param schema the schema; may not be null
+ * @param tableId the identifier of the table that contains the columns
+ * @param columns the columns in the row; may not be null
+ * @param mappers the mapping functions for columns; may be null if none of the columns are to be mapped to different values
+ * @return the converters for each column in the rows; never null
+ */
+ protected ValueConverter[] convertersForColumns(Schema schema, TableId tableId, List columns, ColumnMappers mappers) {
+
+ ValueConverter[] converters = new ValueConverter[columns.size()];
+
+ for (int i = 0; i < columns.size(); i++) {
+ Column column = columns.get(i);
+
+ ValueConverter converter = createValueConverterFor(tableId, column, schema.field(fieldNamer.fieldNameFor(column)));
+ converter = wrapInMappingConverterIfNeeded(mappers, tableId, column, converter);
+
+ if (converter == null) {
+ LOGGER.warn(
+ "No converter found for column {}.{} of type {}. The column will not be part of change events for that table.",
+ tableId, column.name(), column.typeName());
+ }
+
+ // may be null if no converter found
+ converters[i] = converter;
+ }
+
+ return converters;
+ }
+
+ private ValueConverter wrapInMappingConverterIfNeeded(ColumnMappers mappers, TableId tableId, Column column, ValueConverter converter) {
+ if (mappers == null || converter == null) {
+ return converter;
+ }
+
+ ValueConverter mappingConverter = mappers.mappingConverterFor(tableId, column);
+ if (mappingConverter == null) {
+ return converter;
+ }
+
+ return (value) -> mappingConverter.convert(converter.convert(value));
+ }
+
+ /**
+ * Add to the supplied {@link SchemaBuilder} a field for the column with the given information.
+ *
+ * @param builder the schema builder; never null
+ * @param table the table definition; never null
+ * @param column the column definition
+ * @param mapper the mapping function for the column; may be null if the columns is not to be mapped to different values
+ */
+ protected void addField(SchemaBuilder builder, Table table, Column column, ColumnMapper mapper) {
+ final Object defaultValue = column.defaultValueExpression()
+ .flatMap(e -> defaultValueConverter.parseDefaultValue(column, e))
+ .orElse(null);
+
+ final SchemaBuilder fieldBuilder = customConverterRegistry.registerConverterFor(table.id(), column, defaultValue)
+ .orElse(valueConverterProvider.schemaBuilder(column));
+
+ if (fieldBuilder != null) {
+ if (mapper != null) {
+ // Let the mapper add properties to the schema ...
+ mapper.alterFieldSchema(column, fieldBuilder);
+ }
+ if (column.isOptional()) {
+ fieldBuilder.optional();
+ }
+
+ if (column.comment() != null) {
+ fieldBuilder.doc(column.comment());
+ }
+
+ // if the default value is provided
+ if (column.hasDefaultValue() && defaultValue != null) {
+ try {
+ // if the resolution of the default value resulted in null; there is no need to set it
+ // if the column isn't optional, the schema won't be set as such and therefore trying
+ // to set a null default value on a non-optional field schema will assert.
+ fieldBuilder
+ .defaultValue(customConverterRegistry.getValueConverter(table.id(), column)
+ .orElse(ValueConverter.passthrough()).convert(defaultValue));
+ }
+ catch (SchemaBuilderException e) {
+ throw new DebeziumException("Failed to set field default value for '" + table.id() + "."
+ + column.name() + "' of type " + column.typeName() + ", the default value is "
+ + defaultValue + " of type " + defaultValue.getClass(), e);
+ }
+ }
+
+ // YB Note: YugabyteDB specific code to incorporate the plugin name yboutput
+ if (connectorConfig.plugin().isYBOutput()) {
+ Schema optionalCellSchema = cellSchema(fieldNamer.fieldNameFor(column), fieldBuilder.build(), column.isOptional());
+ builder.field(fieldNamer.fieldNameFor(column), optionalCellSchema);
+ } else {
+ builder.field(fieldNamer.fieldNameFor(column), fieldBuilder.build());
+ }
+
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debug("- field '{}' ({}{}) from column {}", column.name(), builder.isOptional() ? "OPTIONAL " : "",
+ fieldBuilder.type(),
+ column);
+ }
+ }
+ else {
+ LOGGER.warn("Unexpected JDBC type '{}' for column '{}' that will be ignored", column.jdbcType(), column.name());
+ }
+ }
+
+ /**
+ * Create a {@link ValueConverter} that can be used to convert row values for the given column into the Kafka Connect value
+ * object described by the {@link Field field definition}. This uses the supplied {@link ValueConverterProvider} object.
+ *
+ * @param tableId the id of the table containing the column; never null
+ * @param column the column describing the input values; never null
+ * @param fieldDefn the definition for the field in a Kafka Connect {@link Schema} describing the output of the function;
+ * never null
+ * @return the value conversion function; may not be null
+ */
+ protected ValueConverter createValueConverterFor(TableId tableId, Column column, Field fieldDefn) {
+ return customConverterRegistry.getValueConverter(tableId, column).orElse(valueConverterProvider.converter(column, fieldDefn));
+ }
+
+ /**
+ * Get a custom schema for columns when plugin name is yboutput. The schema is of the format
+ * {@code fieldName:{"value":fieldValue,"set":booleanValue}}.
+ * @param name of the field
+ * @param valueSchema is the schema of the value the field is supposed to take
+ * @param isOptional indicates whether the field is optional
+ * @return a custom schema for the columns when plugin name is yboutput
+ */
+ static Schema cellSchema(String name, Schema valueSchema, boolean isOptional) {
+ if (valueSchema != null) {
+ SchemaBuilder schemaBuilder = SchemaBuilder.struct().name(name)
+ .field("value", valueSchema)
+ .field("set", Schema.BOOLEAN_SCHEMA);
+ if (isOptional) {
+ schemaBuilder.optional();
+ }
+
+ return schemaBuilder.build();
+ } else {
+ return null;
+ }
+ }
+}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PgOid.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PgOid.java
index a437d61e968..53475640194 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PgOid.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PgOid.java
@@ -6,10 +6,10 @@
package io.debezium.connector.postgresql;
-import org.postgresql.core.Oid;
+import com.yugabyte.core.Oid;
/**
- * Extension to the {@link org.postgresql.core.Oid} class which contains Postgres specific datatypes not found currently in the
+ * Extension to the {@link com.yugabyte.core.Oid} class which contains Postgres specific datatypes not found currently in the
* JDBC driver implementation classes.
*
* @author Horia Chiorean (hchiorea@redhat.com)
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeEventSourceCoordinator.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeEventSourceCoordinator.java
index 144e53afdc1..13a165e547a 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeEventSourceCoordinator.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeEventSourceCoordinator.java
@@ -6,7 +6,16 @@
package io.debezium.connector.postgresql;
import java.sql.SQLException;
+import java.time.Duration;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
+import io.debezium.connector.common.CdcSourceTaskContext;
+import io.debezium.connector.postgresql.spi.OffsetState;
+import io.debezium.pipeline.spi.SnapshotResult;
+import io.debezium.util.Clock;
+import io.debezium.util.LoggingContext;
+import io.debezium.util.Metronome;
import org.apache.kafka.connect.source.SourceConnector;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,6 +47,8 @@ public class PostgresChangeEventSourceCoordinator extends ChangeEventSourceCoord
private final SnapshotterService snapshotterService;
private final SlotState slotInfo;
+ private volatile boolean waitForSnapshotCompletion;
+
public PostgresChangeEventSourceCoordinator(Offsets previousOffsets,
ErrorHandler errorHandler,
Class extends SourceConnector> connectorType,
@@ -52,6 +63,44 @@ public PostgresChangeEventSourceCoordinator(Offsets snapshotSource, Offsets previousOffsets,
+ AtomicReference previousLogContext, ChangeEventSourceContext context)
+ throws InterruptedException {
+ final PostgresPartition partition = previousOffsets.getTheOnlyPartition();
+ final PostgresOffsetContext previousOffset = previousOffsets.getTheOnlyOffset();
+
+ previousLogContext.set(taskContext.configureLoggingContext("snapshot", partition));
+ SnapshotResult snapshotResult = doSnapshot(snapshotSource, context, partition, previousOffset);
+
+ getSignalProcessor(previousOffsets).ifPresent(s -> s.setContext(snapshotResult.getOffset()));
+
+ LOGGER.debug("Snapshot result {}", snapshotResult);
+
+ if (context.isRunning() && snapshotResult.isCompletedOrSkipped()) {
+ if(YugabyteDBServer.isEnabled() && !isSnapshotSkipped(snapshotResult)) {
+ LOGGER.info("Will wait for snapshot completion before transitioning to streaming");
+ waitForSnapshotCompletion = true;
+ while (waitForSnapshotCompletion) {
+ LOGGER.debug("sleeping for 1s to receive snapshot completion offset");
+ Metronome metronome = Metronome.sleeper(Duration.ofSeconds(1), Clock.SYSTEM);
+ metronome.pause();
+ // Note: This heartbeat call is only required to support applications using debezium engine/embedded
+ // engine. It is not required when the connector is run with kakfa-connect.
+ eventDispatcher.alwaysDispatchHeartbeatEvent(partition, snapshotResult.getOffset());
+ }
+ }
+ LOGGER.info("Transitioning to streaming");
+ previousLogContext.set(taskContext.configureLoggingContext("streaming", partition));
+ streamEvents(context, partition, snapshotResult.getOffset());
+ }
+ }
+
+ protected boolean isSnapshotSkipped(SnapshotResult snapshotResult) {
+ return snapshotResult.getStatus() == SnapshotResult.SnapshotResultStatus.SKIPPED;
}
@Override
@@ -85,4 +134,23 @@ private void setSnapshotStartLsn(PostgresSnapshotChangeEventSource snapshotSourc
snapshotSource.updateOffsetForPreSnapshotCatchUpStreaming(offsetContext);
}
+ @Override
+ public void commitOffset(Map partition, Map offset) {
+ if (YugabyteDBServer.isEnabled() && waitForSnapshotCompletion) {
+ LOGGER.debug("Checking the offset value for snapshot completion");
+ OffsetState offsetState = new PostgresOffsetContext.Loader((PostgresConnectorConfig) connectorConfig).load(offset).asOffsetState();
+ if(!offsetState.snapshotInEffect()) {
+ LOGGER.info("Offset conveys that snapshot has completed");
+ waitForSnapshotCompletion = false;
+ }
+ }
+
+ // This block won't be executed when we receive an offset that conveys that snapshot is completed because
+ // streamingSource would be null. It is only initialised once we have transitioned to streaming. So, this
+ // block would only be executed once we have switched to streaming phase.
+ if (!commitOffsetLock.isLocked() && streamingSource != null && offset != null) {
+ streamingSource.commitOffset(partition, offset);
+ }
+ }
+
}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeRecordEmitter.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeRecordEmitter.java
index 6cb4a0f2979..8eee5901664 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeRecordEmitter.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresChangeRecordEmitter.java
@@ -17,9 +17,11 @@
import java.util.Set;
import java.util.stream.Collectors;
+import io.debezium.connector.postgresql.connection.ReplicaIdentityInfo;
+import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.core.BaseConnection;
+import com.yugabyte.core.BaseConnection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -107,6 +109,12 @@ protected Object[] getOldColumnValues() {
case CREATE:
return null;
case UPDATE:
+ // YB Note: For replica identity CHANGE or DEFAULT, there is no old column value available.
+ if (schema.getReplicaIdentity(tableId) == ReplicaIdentityInfo.ReplicaIdentity.CHANGE
+ || schema.getReplicaIdentity(tableId) == ReplicaIdentityInfo.ReplicaIdentity.DEFAULT) {
+ return null;
+ }
+
return columnValues(message.getOldTupleList(), tableId, true, true, true);
default:
return columnValues(message.getOldTupleList(), tableId, true, false, true);
@@ -151,7 +159,7 @@ private DataCollectionSchema synchronizeTableSchema(DataCollectionSchema tableSc
return schema.schemaFor(tableId);
}
- private Object[] columnValues(List columns, TableId tableId, boolean refreshSchemaIfChanged,
+ protected Object[] columnValues(List columns, TableId tableId, boolean refreshSchemaIfChanged,
boolean sourceOfToasted, boolean oldValues)
throws SQLException {
if (columns == null || columns.isEmpty()) {
@@ -188,12 +196,69 @@ private Object[] columnValues(List columns, TableId t
}
}
}
- values[position] = value;
+
+ if (connectorConfig.plugin().isYBOutput()) {
+ // YB Note: In this case, if we have the plugin yboutput and the column contains
+ // the unchanged toasted value, we will not form a value struct for it.
+ // Ultimately, it will be emitted as a NULL value.
+ if (!UnchangedToastedReplicationMessageColumn.isUnchangedToastedValue(value)) {
+ values[position] = new Object[]{value, Boolean.TRUE};
+ }
+ } else {
+ LOGGER.debug("Plugin is NOT yboutput");
+ values[position] = value;
+ }
}
}
return values;
}
+ @Override
+ protected void emitUpdateRecord(Receiver receiver, TableSchema tableSchema) throws InterruptedException {
+ Object[] oldColumnValues = getOldColumnValues();
+ Object[] newColumnValues = getNewColumnValues();
+
+ Struct oldKey = tableSchema.keyFromColumnData(oldColumnValues);
+ Struct newKey = tableSchema.keyFromColumnData(newColumnValues);
+
+ Struct newValue = tableSchema.valueFromColumnData(newColumnValues);
+ Struct oldValue = tableSchema.valueFromColumnData(oldColumnValues);
+
+ if (skipEmptyMessages() && (newColumnValues == null || newColumnValues.length == 0)) {
+ LOGGER.debug("no new values found for table '{}' from update message at '{}'; skipping record", tableSchema, getOffset().getSourceInfo());
+ return;
+ }
+
+ /*
+ * If skip.messages.without.change is configured true,
+ * Skip Publishing the message in case there is no change in monitored columns
+ * (Postgres) Only works if REPLICA IDENTITY is set to FULL - as oldValues won't be available
+ */
+ if (skipMessagesWithoutChange() && Objects.nonNull(newValue) && newValue.equals(oldValue)) {
+ LOGGER.debug("No new values found for table '{}' in included columns from update message at '{}'; skipping record", tableSchema,
+ getOffset().getSourceInfo());
+ return;
+ }
+ // some configurations does not provide old values in case of updates
+ // in this case we handle all updates as regular ones
+
+ // YB Note: If replica identity is change, we always know there will be no
+ // oldKey present so we should simply go ahead with this block. Also, oldKey would be null
+ // at this stage if replica identity is CHANGE.
+ // Another point to be noted here is that in case the source database is YugabyteDB, we will
+ // always handle updates as regular ones since the CDC service itself sends the primary key
+ // updates as two separate records i.e. delete of the original key and insert with new key.
+ if (YugabyteDBServer.isEnabled() || oldKey == null || Objects.equals(oldKey, newKey)) {
+ Struct envelope = tableSchema.getEnvelopeSchema().update(oldValue, newValue, getOffset().getSourceInfo(), getClock().currentTimeAsInstant());
+ receiver.changeRecord(getPartition(), tableSchema, Operation.UPDATE, newKey, envelope, getOffset(), null);
+ }
+ // PK update -> emit as delete and re-insert with new key
+ else {
+ // YB Note: In case of YugabyteDB as source database, the code flow will never come here.
+ emitUpdateAsPrimaryKeyChangeRecord(receiver, tableSchema, oldKey, newKey, oldValue, newValue);
+ }
+ }
+
private int getPosition(String columnName, Table table, Object[] values) {
final Column tableColumn = table.columnWithName(columnName);
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorConfig.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorConfig.java
index 9e1c1e972f9..f57647c02e3 100755
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorConfig.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorConfig.java
@@ -11,12 +11,23 @@
import java.util.List;
import java.util.Map;
import java.util.Optional;
-
+import java.util.regex.Pattern;
+
+import io.debezium.DebeziumException;
+import io.debezium.data.Envelope;
+import io.debezium.heartbeat.Heartbeat;
+import io.debezium.heartbeat.HeartbeatConnectionProvider;
+import io.debezium.heartbeat.HeartbeatErrorHandler;
+import io.debezium.jdbc.JdbcConnection;
+import io.debezium.jdbc.JdbcValueConverters;
+import io.debezium.schema.SchemaNameAdjuster;
+import io.debezium.spi.topic.TopicNamingStrategy;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.apache.kafka.common.config.ConfigDef.Width;
import org.apache.kafka.common.config.ConfigValue;
+import org.apache.kafka.connect.data.Struct;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,7 +52,7 @@
import io.debezium.util.Strings;
/**
- * The configuration properties for the {@link PostgresConnector}
+ * The configuration properties for the {@link YugabyteDBConnector}
*
* @author Horia Chiorean
*/
@@ -383,6 +394,11 @@ public boolean supportsTruncate() {
public boolean supportsLogicalDecodingMessage() {
return true;
}
+
+ @Override
+ public boolean isYBOutput() {
+ return false;
+ }
},
DECODERBUFS("decoderbufs") {
@Override
@@ -404,6 +420,37 @@ public boolean supportsTruncate() {
public boolean supportsLogicalDecodingMessage() {
return false;
}
+
+ @Override
+ public boolean isYBOutput() {
+ return false;
+ }
+ },
+ YBOUTPUT("yboutput") {
+ @Override
+ public MessageDecoder messageDecoder(MessageDecoderContext config, PostgresConnection connection) {
+ return new PgOutputMessageDecoder(config, connection);
+ }
+
+ @Override
+ public String getPostgresPluginName() {
+ return getValue();
+ }
+
+ @Override
+ public boolean supportsTruncate() {
+ return false;
+ }
+
+ @Override
+ public boolean supportsLogicalDecodingMessage() {
+ return true;
+ }
+
+ @Override
+ public boolean isYBOutput() {
+ return true;
+ }
};
private final String decoderName;
@@ -423,6 +470,8 @@ public String getValue() {
return decoderName;
}
+ public abstract boolean isYBOutput();
+
public abstract String getPostgresPluginName();
public abstract boolean supportsTruncate();
@@ -543,24 +592,45 @@ public static SnapshotLockingMode parse(String value, String defaultValue) {
}
protected static final String DATABASE_CONFIG_PREFIX = "database.";
- protected static final int DEFAULT_PORT = 5_432;
+ protected static final int DEFAULT_PORT = 5_433;
protected static final int DEFAULT_SNAPSHOT_FETCH_SIZE = 10_240;
protected static final int DEFAULT_MAX_RETRIES = 6;
+ public static final Pattern YB_HOSTNAME_PATTERN = Pattern.compile("^[a-zA-Z0-9-_.,:]+$");
public static final Field PORT = RelationalDatabaseConnectorConfig.PORT
.withDefault(DEFAULT_PORT);
+ public static final Field HOSTNAME = Field.create(DATABASE_CONFIG_PREFIX + JdbcConfiguration.HOSTNAME)
+ .withDisplayName("Hostname")
+ .withType(Type.STRING)
+ .withGroup(Field.createGroupEntry(Field.Group.CONNECTION, 2))
+ .withWidth(Width.MEDIUM)
+ .withImportance(Importance.HIGH)
+ .required()
+ .withValidation(PostgresConnectorConfig::validateYBHostname)
+ .withDescription("Resolvable hostname or IP address of the database server.");
+
+ public static final Field DECIMAL_HANDLING_MODE = Field.create("decimal.handling.mode")
+ .withDisplayName("Decimal Handling")
+ .withGroup(Field.createGroupEntry(Field.Group.CONNECTOR, 2))
+ .withEnum(DecimalHandlingMode.class, DecimalHandlingMode.DOUBLE)
+ .withWidth(Width.SHORT)
+ .withImportance(Importance.MEDIUM)
+ .withDescription("Specify how DECIMAL and NUMERIC columns should be represented in change events, including: "
+ + "'string' uses string to represent values; "
+ + "'double' represents values using Java's 'double', which may not offer the precision but will be far easier to use in consumers.");
+
public static final Field PLUGIN_NAME = Field.create("plugin.name")
.withDisplayName("Plugin")
.withGroup(Field.createGroupEntry(Field.Group.CONNECTION_ADVANCED_REPLICATION, 0))
- .withEnum(LogicalDecoder.class, LogicalDecoder.DECODERBUFS)
+ .withEnum(LogicalDecoder.class, LogicalDecoder.YBOUTPUT)
.withWidth(Width.MEDIUM)
.withImportance(Importance.MEDIUM)
.withDescription("The name of the Postgres logical decoding plugin installed on the server. " +
- "Supported values are '" + LogicalDecoder.DECODERBUFS.getValue()
- + "' and '" + LogicalDecoder.PGOUTPUT.getValue()
+ "Supported values are '" + LogicalDecoder.PGOUTPUT.getValue()
+ + "' and '" + LogicalDecoder.YBOUTPUT.getValue()
+ "'. " +
- "Defaults to '" + LogicalDecoder.DECODERBUFS.getValue() + "'.");
+ "Defaults to '" + LogicalDecoder.YBOUTPUT.getValue() + "'.");
public static final Field SLOT_NAME = Field.create("slot.name")
.withDisplayName("Slot")
@@ -604,6 +674,14 @@ public static SnapshotLockingMode parse(String value, String defaultValue) {
.withDescription("The name of the Postgres 10+ publication used for streaming changes from a plugin. " +
"Defaults to '" + ReplicationConnection.Builder.DEFAULT_PUBLICATION_NAME + "'");
+ public static final Field YB_CONSISTENT_SNAPSHOT = Field.create("yb.consistent.snapshot")
+ .withDisplayName("YB Consistent Snapshot")
+ .withType(Type.BOOLEAN)
+ .withDefault(true)
+ .withImportance(Importance.LOW)
+ .withDescription("Whether or not to take a consistent snapshot of the tables." +
+ "Disabling this option may result in duplication of some already snapshot data in the streaming phase.");
+
public enum AutoCreateMode implements EnumeratedValue {
/**
* No Publication will be created, it's expected the user
@@ -797,7 +875,7 @@ public static AutoCreateMode parse(String value, String defaultValue) {
.withWidth(Width.LONG)
.withImportance(Importance.MEDIUM)
.withDescription(
- "A name of class to that creates SSL Sockets. Use org.postgresql.ssl.NonValidatingFactory to disable SSL validation in development environments");
+ "A name of class to that creates SSL Sockets. Use com.yugabyte.ssl.NonValidatingFactory to disable SSL validation in development environments");
public static final Field SNAPSHOT_MODE = Field.create("snapshot.mode")
.withDisplayName("Snapshot mode")
@@ -947,7 +1025,6 @@ public static AutoCreateMode parse(String value, String defaultValue) {
.withImportance(Importance.LOW)
.withDefault(2)
.withDescription("Number of fractional digits when money type is converted to 'precise' decimal number.");
-
public static final Field SHOULD_FLUSH_LSN_IN_SOURCE_DB = Field.create("flush.lsn.source")
.withDisplayName("Boolean to determine if Debezium should flush LSN in the source database")
.withType(Type.BOOLEAN)
@@ -1082,6 +1159,10 @@ public Map validate() {
return getConfig().validate(ALL_FIELDS);
}
+ public boolean isYbConsistentSnapshotEnabled() {
+ return getConfig().getBoolean(YB_CONSISTENT_SNAPSHOT);
+ }
+
protected boolean skipRefreshSchemaOnMissingToastableData() {
return SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST == this.schemaRefreshMode;
}
@@ -1103,6 +1184,19 @@ public byte[] getUnavailableValuePlaceholder() {
return placeholder.getBytes();
}
+ @Override
+ public JdbcValueConverters.DecimalMode getDecimalMode() {
+ JdbcValueConverters.DecimalMode decimalMode = DecimalHandlingMode
+ .parse(getConfig().getString(DECIMAL_HANDLING_MODE))
+ .asDecimalMode();
+
+ if (decimalMode == JdbcValueConverters.DecimalMode.PRECISE) {
+ throw new DebeziumException("Decimal handling mode PRECISE is unsupported, please use DOUBLE or STRING");
+ } else {
+ return decimalMode;
+ }
+ }
+
public Optional replicaIdentityMapper() {
return Optional.ofNullable(this.replicaIdentityMapper);
}
@@ -1170,6 +1264,7 @@ protected SourceInfoStructMaker extends AbstractSourceInfo> getSourceInfoStruc
SOURCE_INFO_STRUCT_MAKER)
.connector(
SNAPSHOT_MODE,
+ YB_CONSISTENT_SNAPSHOT,
SNAPSHOT_QUERY_MODE,
SNAPSHOT_QUERY_MODE_CUSTOM_NAME,
SNAPSHOT_LOCKING_MODE_CUSTOM_NAME,
@@ -1228,6 +1323,19 @@ private static int validateFlushLsnSource(Configuration config, Field field, Fie
return 0;
}
+ /**
+ * Method to get the connection factory depending on the provided hostname value.
+ * @param hostName the host(s) for the PostgreSQL/YugabyteDB instance
+ * @return a {@link io.debezium.jdbc.JdbcConnection.ConnectionFactory} instance
+ */
+ public static JdbcConnection.ConnectionFactory getConnectionFactory(String hostName) {
+ return hostName.contains(":")
+ ? JdbcConnection.patternBasedFactory(PostgresConnection.MULTI_HOST_URL_PATTERN, com.yugabyte.Driver.class.getName(),
+ PostgresConnection.class.getClassLoader(), JdbcConfiguration.PORT.withDefault(PostgresConnectorConfig.PORT.defaultValueAsString()))
+ : JdbcConnection.patternBasedFactory(PostgresConnection.URL_PATTERN, com.yugabyte.Driver.class.getName(),
+ PostgresConnection.class.getClassLoader(), JdbcConfiguration.PORT.withDefault(PostgresConnectorConfig.PORT.defaultValueAsString()));
+ }
+
protected static int validateReplicaAutoSetField(Configuration config, Field field, Field.ValidationOutput problems) {
String replica_autoset_values = config.getString(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES);
int problemCount = 0;
@@ -1272,4 +1380,58 @@ public boolean isIncluded(TableId t) {
!t.schema().startsWith(TEMP_TABLE_SCHEMA_PREFIX);
}
}
+
+ @Override
+ public Heartbeat createHeartbeat(TopicNamingStrategy topicNamingStrategy,
+ SchemaNameAdjuster schemaNameAdjuster,
+ HeartbeatConnectionProvider connectionProvider,
+ HeartbeatErrorHandler errorHandler) {
+ if (YugabyteDBServer.isEnabled()) {
+ // We do not need any heartbeat when snapshot is never required.
+ if (snapshotMode.equals(SnapshotMode.NEVER)) {
+ return Heartbeat.DEFAULT_NOOP_HEARTBEAT;
+ }
+
+ return new YBHeartbeatImpl(getHeartbeatInterval(), topicNamingStrategy.heartbeatTopic(),
+ getLogicalName(), schemaNameAdjuster);
+ } else {
+ return super.createHeartbeat(topicNamingStrategy, schemaNameAdjuster, connectionProvider, errorHandler);
+ }
+ }
+
+ public Optional parseSignallingMessage(Struct value) {
+ final Struct after = value.getStruct(Envelope.FieldName.AFTER);
+ if (after == null) {
+ LOGGER.warn("After part of signal '{}' is missing", value);
+ return Optional.empty();
+ }
+ List fields = after.schema().fields();
+ return Optional.of(new String[]{
+ after.getString(fields.get(0).name()),
+ after.getString(fields.get(1).name()),
+ after.getString(fields.get(2).name())
+ });
+ }
+
+ protected static int validateYBHostname(Configuration config, Field field, Field.ValidationOutput problems) {
+ String hostName = config.getString(field);
+ int problemCount = 0;
+
+ if (!Strings.isNullOrBlank(hostName)) {
+ if (hostName.contains(",") && !hostName.contains(":")) {
+ // Basic validation for cases when a user has only specified comma separated IPs which is not the correct format.
+ problems.accept(field, hostName, hostName + " has invalid format (specify mutiple hosts in the format ip1:port1,ip2:port2,ip3:port3)");
+ ++problemCount;
+ }
+
+ if (!YB_HOSTNAME_PATTERN.asPredicate().test(hostName)) {
+ problems.accept(field, hostName, hostName + " has invalid format (only the underscore, hyphen, dot, comma, colon and alphanumeric characters are allowed)");
+ ++problemCount;
+ }
+ }
+
+ return problemCount;
+ }
+
+
}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorTask.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorTask.java
index df1f1a467a6..8f214974f11 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorTask.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnectorTask.java
@@ -82,56 +82,55 @@ public class PostgresConnectorTask extends BaseSourceTask start(Configuration config) {
- final PostgresConnectorConfig connectorConfig = new PostgresConnectorConfig(config);
- final TopicNamingStrategy topicNamingStrategy = connectorConfig.getTopicNamingStrategy(CommonConnectorConfig.TOPIC_NAMING_STRATEGY);
- final SchemaNameAdjuster schemaNameAdjuster = connectorConfig.schemaNameAdjuster();
+ try {
+ final PostgresConnectorConfig connectorConfig = new PostgresConnectorConfig(config);
+ final TopicNamingStrategy topicNamingStrategy = connectorConfig.getTopicNamingStrategy(CommonConnectorConfig.TOPIC_NAMING_STRATEGY);
+ // final Snapshotter snapshotter = connectorConfig.getSnapshotter();
+ final SchemaNameAdjuster schemaNameAdjuster = connectorConfig.schemaNameAdjuster();
+
+ final Charset databaseCharset;
+ try (PostgresConnection tempConnection = new PostgresConnection(connectorConfig.getJdbcConfig(), PostgresConnection.CONNECTION_GENERAL)) {
+ databaseCharset = tempConnection.getDatabaseCharset();
+ }
- final Charset databaseCharset;
- try (PostgresConnection tempConnection = new PostgresConnection(connectorConfig.getJdbcConfig(), PostgresConnection.CONNECTION_GENERAL)) {
- databaseCharset = tempConnection.getDatabaseCharset();
- }
+ final PostgresValueConverterBuilder valueConverterBuilder = (typeRegistry) -> PostgresValueConverter.of(
+ connectorConfig,
+ databaseCharset,
+ typeRegistry);
+
+ MainConnectionProvidingConnectionFactory connectionFactory = new DefaultMainConnectionProvidingConnectionFactory<>(
+ () -> new PostgresConnection(connectorConfig.getJdbcConfig(), valueConverterBuilder, PostgresConnection.CONNECTION_GENERAL));
+ // Global JDBC connection used both for snapshotting and streaming.
+ // Must be able to resolve datatypes.
+ jdbcConnection = connectionFactory.mainConnection();
+ try {
+ jdbcConnection.setAutoCommit(false);
+ }
+ catch (SQLException e) {
+ throw new DebeziumException(e);
+ }
- final PostgresValueConverterBuilder valueConverterBuilder = (typeRegistry) -> PostgresValueConverter.of(
- connectorConfig,
- databaseCharset,
- typeRegistry);
+ final TypeRegistry typeRegistry = jdbcConnection.getTypeRegistry();
+ final PostgresDefaultValueConverter defaultValueConverter = jdbcConnection.getDefaultValueConverter();
+ final PostgresValueConverter valueConverter = valueConverterBuilder.build(typeRegistry);
- MainConnectionProvidingConnectionFactory connectionFactory = new DefaultMainConnectionProvidingConnectionFactory<>(
- () -> new PostgresConnection(connectorConfig.getJdbcConfig(), valueConverterBuilder, PostgresConnection.CONNECTION_GENERAL));
- // Global JDBC connection used both for snapshotting and streaming.
- // Must be able to resolve datatypes.
- jdbcConnection = connectionFactory.mainConnection();
- try {
- jdbcConnection.setAutoCommit(false);
- }
- catch (SQLException e) {
- throw new DebeziumException(e);
- }
+ schema = new PostgresSchema(connectorConfig, defaultValueConverter, topicNamingStrategy, valueConverter);
+ this.taskContext = new PostgresTaskContext(connectorConfig, schema, topicNamingStrategy);
+ final Offsets previousOffsets = getPreviousOffsets(
+ new PostgresPartition.Provider(connectorConfig, config), new PostgresOffsetContext.Loader(connectorConfig));
+ final Clock clock = Clock.system();
+ final PostgresOffsetContext previousOffset = previousOffsets.getTheOnlyOffset();
+
+ // Manual Bean Registration
+ beanRegistryJdbcConnection = connectionFactory.newConnection();
+ connectorConfig.getBeanRegistry().add(StandardBeanNames.CONFIGURATION, config);
+ connectorConfig.getBeanRegistry().add(StandardBeanNames.CONNECTOR_CONFIG, connectorConfig);
+ connectorConfig.getBeanRegistry().add(StandardBeanNames.DATABASE_SCHEMA, schema);
+ connectorConfig.getBeanRegistry().add(StandardBeanNames.JDBC_CONNECTION, beanRegistryJdbcConnection);
+ connectorConfig.getBeanRegistry().add(StandardBeanNames.VALUE_CONVERTER, valueConverter);
- final TypeRegistry typeRegistry = jdbcConnection.getTypeRegistry();
- final PostgresDefaultValueConverter defaultValueConverter = jdbcConnection.getDefaultValueConverter();
- final PostgresValueConverter valueConverter = valueConverterBuilder.build(typeRegistry);
-
- schema = new PostgresSchema(connectorConfig, defaultValueConverter, topicNamingStrategy, valueConverter);
- this.taskContext = new PostgresTaskContext(connectorConfig, schema, topicNamingStrategy);
- this.partitionProvider = new PostgresPartition.Provider(connectorConfig, config);
- this.offsetContextLoader = new PostgresOffsetContext.Loader(connectorConfig);
- final Offsets previousOffsets = getPreviousOffsets(
- this.partitionProvider, this.offsetContextLoader);
- final Clock clock = Clock.system();
- final PostgresOffsetContext previousOffset = previousOffsets.getTheOnlyOffset();
-
- // Manual Bean Registration
- beanRegistryJdbcConnection = connectionFactory.newConnection();
- connectorConfig.getBeanRegistry().add(StandardBeanNames.CONFIGURATION, config);
- connectorConfig.getBeanRegistry().add(StandardBeanNames.CONNECTOR_CONFIG, connectorConfig);
- connectorConfig.getBeanRegistry().add(StandardBeanNames.DATABASE_SCHEMA, schema);
- connectorConfig.getBeanRegistry().add(StandardBeanNames.JDBC_CONNECTION, beanRegistryJdbcConnection);
- connectorConfig.getBeanRegistry().add(StandardBeanNames.VALUE_CONVERTER, valueConverter);
- connectorConfig.getBeanRegistry().add(StandardBeanNames.OFFSETS, previousOffsets);
-
- // Service providers
- registerServiceProviders(connectorConfig.getServiceRegistry());
+ // Service providers
+ registerServiceProviders(connectorConfig.getServiceRegistry());
final SnapshotterService snapshotterService = connectorConfig.getServiceRegistry().tryGetService(SnapshotterService.class);
final Snapshotter snapshotter = snapshotterService.getSnapshotter();
@@ -161,95 +160,101 @@ public ChangeEventSourceCoordinator st
SlotCreationResult slotCreatedInfo = tryToCreateSlot(snapshotter, connectorConfig, slotInfo);
- try {
- jdbcConnection.commit();
- }
- catch (SQLException e) {
- throw new DebeziumException(e);
- }
-
- queue = new ChangeEventQueue.Builder()
- .pollInterval(connectorConfig.getPollInterval())
- .maxBatchSize(connectorConfig.getMaxBatchSize())
- .maxQueueSize(connectorConfig.getMaxQueueSize())
- .maxQueueSizeInBytes(connectorConfig.getMaxQueueSizeInBytes())
- .loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME))
- .build();
-
- errorHandler = new PostgresErrorHandler(connectorConfig, queue, errorHandler);
-
- final PostgresEventMetadataProvider metadataProvider = new PostgresEventMetadataProvider();
-
- SignalProcessor signalProcessor = new SignalProcessor<>(
- PostgresConnector.class, connectorConfig, Map.of(),
- getAvailableSignalChannels(),
- DocumentReader.defaultReader(),
- previousOffsets);
+ try {
+ jdbcConnection.commit();
+ }
+ catch (SQLException e) {
+ throw new DebeziumException(e);
+ }
- final PostgresEventDispatcher dispatcher = new PostgresEventDispatcher<>(
- connectorConfig,
- topicNamingStrategy,
- schema,
- queue,
- connectorConfig.getTableFilters().dataCollectionFilter(),
- DataChangeEvent::new,
- PostgresChangeRecordEmitter::updateSchema,
- metadataProvider,
- connectorConfig.createHeartbeat(
- topicNamingStrategy,
- schemaNameAdjuster,
- () -> new PostgresConnection(connectorConfig.getJdbcConfig(), PostgresConnection.CONNECTION_GENERAL),
- exception -> {
- String sqlErrorId = exception.getSQLState();
- switch (sqlErrorId) {
- case "57P01":
- // Postgres error admin_shutdown, see https://www.postgresql.org/docs/12/errcodes-appendix.html
- throw new DebeziumException("Could not execute heartbeat action query (Error: " + sqlErrorId + ")", exception);
- case "57P03":
- // Postgres error cannot_connect_now, see https://www.postgresql.org/docs/12/errcodes-appendix.html
- throw new RetriableException("Could not execute heartbeat action query (Error: " + sqlErrorId + ")", exception);
- default:
- break;
- }
- }),
- schemaNameAdjuster,
- signalProcessor);
-
- NotificationService notificationService = new NotificationService<>(getNotificationChannels(),
- connectorConfig, SchemaFactory.get(), dispatcher::enqueueNotification);
-
- ChangeEventSourceCoordinator coordinator = new PostgresChangeEventSourceCoordinator(
- previousOffsets,
- errorHandler,
- PostgresConnector.class,
- connectorConfig,
- new PostgresChangeEventSourceFactory(
- connectorConfig,
- snapshotterService,
- connectionFactory,
- errorHandler,
- dispatcher,
- clock,
- schema,
- taskContext,
- replicationConnection,
- slotCreatedInfo,
- slotInfo),
- new DefaultChangeEventSourceMetricsFactory<>(),
- dispatcher,
- schema,
- snapshotterService,
- slotInfo,
- signalProcessor,
- notificationService);
-
- coordinator.start(taskContext, this.queue, metadataProvider);
-
- return coordinator;
+ queue = new ChangeEventQueue.Builder()
+ .pollInterval(connectorConfig.getPollInterval())
+ .maxBatchSize(connectorConfig.getMaxBatchSize())
+ .maxQueueSize(connectorConfig.getMaxQueueSize())
+ .maxQueueSizeInBytes(connectorConfig.getMaxQueueSizeInBytes())
+ .loggingContextSupplier(() -> taskContext.configureLoggingContext(CONTEXT_NAME))
+ .build();
+
+ errorHandler = new PostgresErrorHandler(connectorConfig, queue, errorHandler);
+
+ final PostgresEventMetadataProvider metadataProvider = new PostgresEventMetadataProvider();
+
+ SignalProcessor signalProcessor = new SignalProcessor<>(
+ YugabyteDBConnector.class, connectorConfig, Map.of(),
+ getAvailableSignalChannels(),
+ DocumentReader.defaultReader(),
+ previousOffsets);
+
+ final PostgresEventDispatcher dispatcher = new PostgresEventDispatcher<>(
+ connectorConfig,
+ topicNamingStrategy,
+ schema,
+ queue,
+ connectorConfig.getTableFilters().dataCollectionFilter(),
+ DataChangeEvent::new,
+ PostgresChangeRecordEmitter::updateSchema,
+ metadataProvider,
+ connectorConfig.createHeartbeat(
+ topicNamingStrategy,
+ schemaNameAdjuster,
+ () -> new PostgresConnection(connectorConfig.getJdbcConfig(), PostgresConnection.CONNECTION_GENERAL),
+ exception -> {
+ String sqlErrorId = exception.getSQLState();
+ switch (sqlErrorId) {
+ case "57P01":
+ // Postgres error admin_shutdown, see https://www.postgresql.org/docs/12/errcodes-appendix.html
+ throw new DebeziumException("Could not execute heartbeat action query (Error: " + sqlErrorId + ")", exception);
+ case "57P03":
+ // Postgres error cannot_connect_now, see https://www.postgresql.org/docs/12/errcodes-appendix.html
+ throw new RetriableException("Could not execute heartbeat action query (Error: " + sqlErrorId + ")", exception);
+ default:
+ break;
+ }
+ }),
+ schemaNameAdjuster,
+ signalProcessor);
+
+ NotificationService notificationService = new NotificationService<>(getNotificationChannels(),
+ connectorConfig, SchemaFactory.get(), dispatcher::enqueueNotification);
+
+ ChangeEventSourceCoordinator coordinator = new PostgresChangeEventSourceCoordinator(
+ previousOffsets,
+ errorHandler,
+ YugabyteDBConnector.class,
+ connectorConfig,
+ new PostgresChangeEventSourceFactory(
+ connectorConfig,
+ snapshotterService,
+ connectionFactory,
+ errorHandler,
+ dispatcher,
+ clock,
+ schema,
+ taskContext,
+ replicationConnection,
+ slotCreatedInfo,
+ slotInfo),
+ new DefaultChangeEventSourceMetricsFactory<>(),
+ dispatcher,
+ schema,
+ snapshotterService,
+ slotInfo,
+ signalProcessor,
+ notificationService);
+
+ coordinator.start(taskContext, this.queue, metadataProvider);
+
+ return coordinator;
+ } finally {
+ previousContext.restore();
+ }
}
- finally {
- previousContext.restore();
+ catch (Exception exception) {
+ // YB Note: Catch all the exceptions and retry.
+ LOGGER.warn("Received exception, will be retrying", exception);
+ throw new RetriableException(exception);
}
+
}
private SlotCreationResult tryToCreateSlot(Snapshotter snapshotter, PostgresConnectorConfig connectorConfig, SlotState slotInfo) {
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresErrorHandler.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresErrorHandler.java
index b0e72e451d0..cedccbf43c7 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresErrorHandler.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresErrorHandler.java
@@ -21,7 +21,7 @@
public class PostgresErrorHandler extends ErrorHandler {
public PostgresErrorHandler(PostgresConnectorConfig connectorConfig, ChangeEventQueue> queue, ErrorHandler replacedErrorHandler) {
- super(PostgresConnector.class, connectorConfig, queue, replacedErrorHandler);
+ super(YugabyteDBConnector.class, connectorConfig, queue, replacedErrorHandler);
}
@Override
@@ -34,4 +34,10 @@ protected Set> communicationExceptions() {
protected boolean isRetriable(Throwable throwable) {
return super.isRetriable(throwable);
}
+
+ @Override
+ protected boolean isCustomRetriable(Throwable throwable) {
+ // YB Note: Yes, all the errors are custom retriable.
+ return true;
+ }
}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSchema.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSchema.java
index 6a064fc9469..b3f5f701265 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSchema.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSchema.java
@@ -12,6 +12,7 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.errors.ConnectException;
@@ -32,7 +33,7 @@
import io.debezium.spi.topic.TopicNamingStrategy;
/**
- * Component that records the schema information for the {@link PostgresConnector}. The schema information contains
+ * Component that records the schema information for the {@link YugabyteDBConnector}. The schema information contains
* the {@link Tables table definitions} and the Kafka Connect {@link #schemaFor(TableId) Schema}s for each table, where the
* {@link Schema} excludes any columns that have been {@link PostgresConnectorConfig#COLUMN_EXCLUDE_LIST specified} in the
* configuration.
@@ -47,6 +48,7 @@ public class PostgresSchema extends RelationalDatabaseSchema {
private final Map> tableIdToToastableColumns;
private final Map relationIdToTableId;
+ private final Map tableIdToReplicaIdentity;
private final boolean readToastableColumns;
private final PostgresConnectorConfig connectorConfig;
@@ -65,13 +67,17 @@ protected PostgresSchema(PostgresConnectorConfig config, PostgresDefaultValueCon
this.tableIdToToastableColumns = new HashMap<>();
this.relationIdToTableId = new HashMap<>();
this.readToastableColumns = config.skipRefreshSchemaOnMissingToastableData();
+ this.tableIdToReplicaIdentity = new HashMap<>();
}
private static TableSchemaBuilder getTableSchemaBuilder(PostgresConnectorConfig config, PostgresValueConverter valueConverter,
PostgresDefaultValueConverter defaultValueConverter) {
- return new TableSchemaBuilder(valueConverter, defaultValueConverter, config.schemaNameAdjuster(),
- config.customConverterRegistry(), config.getSourceInfoStructMaker().schema(),
- config.getFieldNamer(), false);
+ if (!config.plugin().isYBOutput()) {
+ return new TableSchemaBuilder(valueConverter, defaultValueConverter, config.schemaNameAdjuster(),
+ config.customConverterRegistry(), config.getSourceInfoStructMaker().schema(), config.getFieldNamer(), false);
+ }
+
+ return new PGTableSchemaBuilder(valueConverter, defaultValueConverter, config, false /* multiPartitionMode */);
}
/**
@@ -97,6 +103,13 @@ protected PostgresSchema refresh(PostgresConnection connection, boolean printRep
return this;
}
+ public ReplicaIdentityInfo.ReplicaIdentity getReplicaIdentity(TableId tableId) {
+ ReplicaIdentityInfo.ReplicaIdentity replicaIdentity = tableIdToReplicaIdentity.get(tableId);
+ Objects.requireNonNull(replicaIdentity);
+
+ return replicaIdentity;
+ }
+
private void printReplicaIdentityInfo(PostgresConnection connection, TableId tableId) {
try {
ReplicaIdentityInfo replicaIdentity = connection.readReplicaIdentityInfo(tableId);
@@ -264,6 +277,23 @@ public void applySchemaChangesForTable(int relationId, Table table) {
refresh(table);
}
+ /**
+ * YugabyteDB specific. Applies schema changes for the specified table, also stores the replica
+ * identity information.
+ *
+ * @param relationId the postgres relation unique identifier for the table
+ * @param table externally constructed table, typically from the decoder; must not be null
+ * @param replicaIdentityId the integer ID for replica identity
+ */
+ public void applySchemaChangesForTableWithReplicaIdentity(int relationId, Table table, int replicaIdentityId) {
+ applySchemaChangesForTable(relationId, table);
+
+ tableIdToReplicaIdentity.put(table.id(),
+ ReplicaIdentityInfo.ReplicaIdentity.parseFromDB(String.valueOf((char) replicaIdentityId)));
+
+ LOGGER.info("Replica identity being stored for table {} is {}", table.id(), getReplicaIdentity(table.id()));
+ }
+
/**
* Resolve a {@link Table} based on a supplied table relation unique identifier.
*
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSnapshotChangeEventSource.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSnapshotChangeEventSource.java
index 65a5af63c67..97b7dec18bf 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSnapshotChangeEventSource.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresSnapshotChangeEventSource.java
@@ -7,12 +7,14 @@
import java.sql.SQLException;
import java.time.Duration;
+import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
+import io.debezium.pipeline.spi.ChangeRecordEmitter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -94,16 +96,41 @@ protected SnapshotContext prepare(Post
return new PostgresSnapshotContext(partition, connectorConfig.databaseName(), onDemand);
}
+ @Override
+ protected ChangeRecordEmitter getChangeRecordEmitter(
+ PostgresPartition partition, PostgresOffsetContext offset, TableId tableId, Object[] row,
+ Instant timestamp) {
+ if (YugabyteDBServer.isEnabled() && connectorConfig.plugin().isYBOutput()) {
+ offset.event(tableId, timestamp);
+
+ return new YBSnapshotChangeRecordEmitter<>(partition, offset, row, getClock(),
+ connectorConfig);
+ } else {
+ return super.getChangeRecordEmitter(partition, offset, tableId, row, timestamp);
+ }
+ }
+
@Override
protected void connectionCreated(RelationalSnapshotContext snapshotContext)
throws Exception {
- // If using catch up streaming, the connector opens the transaction that the snapshot will eventually use
- // before the catch up streaming starts. By looking at the current wal location, the transaction can determine
- // where the catch up streaming should stop. The transaction is held open throughout the catch up
- // streaming phase so that the snapshot is performed from a consistent view of the data. Since the isolation
- // level on the transaction used in catch up streaming has already set the isolation level and executed
- // statements, the transaction does not need to get set the level again here.
- if (snapshotterService.getSnapshotter().shouldStreamEventsStartingFromSnapshot() && startingSlotInfo == null) {
+ if (YugabyteDBServer.isEnabled()) {
+ // In case of YB, the consistent snapshot is performed as follows -
+ // 1) If connector created the slot, then the snapshotName returned as part of the CREATE_REPLICATION_SLOT
+ // command will have the hybrid time as of which the snapshot query is to be run
+ // 2) If slot already exists, then the snapshot query will be run as of the hybrid time corresponding to the
+ // restart_lsn. This information is available in the pg_replication_slots view
+ // In either case, the setSnapshotTransactionIsolationLevel function needs to be called so that the preparatory
+ // commands can be run on the snapshot connection so that the snapshot query can be run as of the appropriate
+ // hybrid time
+ setSnapshotTransactionIsolationLevel(snapshotContext.onDemand);
+ }
+ else if (snapshotterService.getSnapshotter().shouldStreamEventsStartingFromSnapshot() && startingSlotInfo == null) {
+ // If using catch up streaming, the connector opens the transaction that the snapshot will eventually use
+ // before the catch up streaming starts. By looking at the current wal location, the transaction can determine
+ // where the catch up streaming should stop. The transaction is held open throughout the catch up
+ // streaming phase so that the snapshot is performed from a consistent view of the data. Since the isolation
+ // level on the transaction used in catch up streaming has already set the isolation level and executed
+ // statements, the transaction does not need to get set the level again here.
setSnapshotTransactionIsolationLevel(snapshotContext.onDemand);
}
schema.refresh(jdbcConnection, false);
@@ -195,6 +222,14 @@ private Lsn getTransactionStartLsn() throws SQLException {
// they'll be lost.
return slotCreatedInfo.startLsn();
}
+ else if (YugabyteDBServer.isEnabled()) {
+ // For YB, there are only 2 cases -
+ // 1) Connector creates the slot - in this case (slotCreatedInfo != null) will hold
+ // 2) Slot already exists - in this case, the streaming should start from the confirmed_flush_lsn
+ SlotState currentSlotState = jdbcConnection.getReplicationSlotState(connectorConfig.slotName(),
+ connectorConfig.plugin().getPostgresPluginName());
+ return currentSlotState.slotLastFlushedLsn();
+ }
else if (!snapshotterService.getSnapshotter().shouldStreamEventsStartingFromSnapshot() && startingSlotInfo != null) {
// Allow streaming to resume from where streaming stopped last rather than where the current snapshot starts.
SlotState currentSlotState = jdbcConnection.getReplicationSlotState(connectorConfig.slotName(),
@@ -263,15 +298,18 @@ protected void aborted(SnapshotContext
@Override
protected Optional getSnapshotSelect(RelationalSnapshotContext snapshotContext,
TableId tableId, List columns) {
-
return snapshotterService.getSnapshotQuery().snapshotQuery(tableId.toDoubleQuotedString(), columns);
}
protected void setSnapshotTransactionIsolationLevel(boolean isOnDemand) throws SQLException {
- LOGGER.info("Setting isolation level");
- String transactionStatement = snapshotTransactionIsolationLevelStatement(slotCreatedInfo, isOnDemand);
- LOGGER.info("Opening transaction with statement {}", transactionStatement);
- jdbcConnection.executeWithoutCommitting(transactionStatement);
+ if (!YugabyteDBServer.isEnabled() || connectorConfig.isYbConsistentSnapshotEnabled()) {
+ LOGGER.info("Setting isolation level");
+ String transactionStatement = snapshotTransactionIsolationLevelStatement(slotCreatedInfo, isOnDemand);
+ LOGGER.info("Opening transaction with statement {}", transactionStatement);
+ jdbcConnection.executeWithoutCommitting(transactionStatement);
+ } else {
+ LOGGER.info("Skipping setting snapshot time, snapshot data will not be consistent");
+ }
}
private String snapshotTransactionIsolationLevelStatement(SlotCreationResult newSlotInfo, boolean isOnDemand) {
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresStreamingChangeEventSource.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresStreamingChangeEventSource.java
index 75a28d28b3e..a2fae7069ed 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresStreamingChangeEventSource.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresStreamingChangeEventSource.java
@@ -12,7 +12,7 @@
import java.util.concurrent.atomic.AtomicReference;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.core.BaseConnection;
+import com.yugabyte.core.BaseConnection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -20,6 +20,7 @@
import io.debezium.connector.postgresql.connection.LogicalDecodingMessage;
import io.debezium.connector.postgresql.connection.Lsn;
import io.debezium.connector.postgresql.connection.PostgresConnection;
+import io.debezium.connector.postgresql.connection.PostgresReplicationConnection;
import io.debezium.connector.postgresql.connection.ReplicationConnection;
import io.debezium.connector.postgresql.connection.ReplicationMessage;
import io.debezium.connector.postgresql.connection.ReplicationMessage.Operation;
@@ -48,7 +49,6 @@ public class PostgresStreamingChangeEventSource implements StreamingChangeEventS
* trigger a "WAL backlog growing" warning.
*/
private static final int GROWING_WAL_WARNING_LOG_INTERVAL = 10_000;
-
private static final Logger LOGGER = LoggerFactory.getLogger(PostgresStreamingChangeEventSource.class);
// PGOUTPUT decoder sends the messages with larger time gaps than other decoders
@@ -62,7 +62,7 @@ public class PostgresStreamingChangeEventSource implements StreamingChangeEventS
private final PostgresSchema schema;
private final PostgresConnectorConfig connectorConfig;
private final PostgresTaskContext taskContext;
- private final ReplicationConnection replicationConnection;
+ private final PostgresReplicationConnection replicationConnection;
private final AtomicReference replicationStream = new AtomicReference<>();
private final SnapshotterService snapshotterService;
private final DelayStrategy pauseNoMessage;
@@ -82,6 +82,11 @@ public class PostgresStreamingChangeEventSource implements StreamingChangeEventS
private Lsn lastCompletelyProcessedLsn;
private PostgresOffsetContext effectiveOffset;
+ /**
+ * For DEBUGGING
+ */
+ private OptionalLong lastTxnidForWhichCommitSeen = OptionalLong.empty();
+
public PostgresStreamingChangeEventSource(PostgresConnectorConfig connectorConfig, SnapshotterService snapshotterService,
PostgresConnection connection, PostgresEventDispatcher dispatcher, ErrorHandler errorHandler, Clock clock,
PostgresSchema schema, PostgresTaskContext taskContext, ReplicationConnection replicationConnection) {
@@ -94,7 +99,7 @@ public PostgresStreamingChangeEventSource(PostgresConnectorConfig connectorConfi
pauseNoMessage = DelayStrategy.constant(taskContext.getConfig().getPollInterval());
this.taskContext = taskContext;
this.snapshotterService = snapshotterService;
- this.replicationConnection = replicationConnection;
+ this.replicationConnection = (PostgresReplicationConnection) replicationConnection;
this.connectionProbeTimer = ElapsedTimeStrategy.constant(Clock.system(), connectorConfig.statusUpdateInterval());
}
@@ -129,6 +134,15 @@ public void execute(ChangeEventSourceContext context, PostgresPartition partitio
try {
final WalPositionLocator walPosition;
+ // This log can be printed either once or twice.
+ // once - it means that the wal position is not being searched
+ // twice - the wal position locator is searching for a wal position
+ if (YugabyteDBServer.isEnabled()) {
+ LOGGER.info("PID for replication connection: {} on node {}",
+ replicationConnection.getBackendPid(),
+ replicationConnection.getConnectedNodeIp());
+ }
+
if (hasStartLsnStoredInContext) {
// start streaming from the last recorded position in the offset
final Lsn lsn = this.effectiveOffset.hasCompletelyProcessedPosition() ? this.effectiveOffset.lastCompletelyProcessedLsn()
@@ -147,7 +161,7 @@ public void execute(ChangeEventSourceContext context, PostgresPartition partitio
// such that the connection times out. We must enable keep
// alive to ensure that it doesn't time out
ReplicationStream stream = this.replicationStream.get();
- stream.startKeepAlive(Threads.newSingleThreadExecutor(PostgresConnector.class, connectorConfig.getLogicalName(), KEEP_ALIVE_THREAD_NAME));
+ stream.startKeepAlive(Threads.newSingleThreadExecutor(YugabyteDBConnector.class, connectorConfig.getLogicalName(), KEEP_ALIVE_THREAD_NAME));
initSchema();
@@ -159,23 +173,35 @@ public void execute(ChangeEventSourceContext context, PostgresPartition partitio
this.lastCompletelyProcessedLsn = replicationStream.get().startLsn();
- if (walPosition.searchingEnabled() && this.effectiveOffset.hasCompletelyProcessedPosition()) {
- searchWalPosition(context, partition, this.effectiveOffset, stream, walPosition);
- try {
- if (!isInPreSnapshotCatchUpStreaming(this.effectiveOffset)) {
- connection.commit();
+ // Against YB, filtering of records based on Wal position is only enabled when connector config provide.transaction.metadata is set to false.
+ if (!YugabyteDBServer.isEnabled() || (YugabyteDBServer.isEnabled() && !connectorConfig.shouldProvideTransactionMetadata())) {
+ if (walPosition.searchingEnabled()) {
+ searchWalPosition(context, partition, this.effectiveOffset, stream, walPosition);
+ try {
+ if (!isInPreSnapshotCatchUpStreaming(this.effectiveOffset)) {
+ connection.commit();
+ }
+ } catch (Exception e) {
+ LOGGER.info("Commit failed while preparing for reconnect", e);
}
+ walPosition.enableFiltering();
+ stream.stopKeepAlive();
+ replicationConnection.reconnect();
+
+ if (YugabyteDBServer.isEnabled()) {
+ LOGGER.info("PID for replication connection: {} on node {}",
+ replicationConnection.getBackendPid(),
+ replicationConnection.getConnectedNodeIp());
+ }
+
+ replicationStream.set(replicationConnection.startStreaming(walPosition.getLastEventStoredLsn(), walPosition));
+ stream = this.replicationStream.get();
+ stream.startKeepAlive(Threads.newSingleThreadExecutor(YugabyteDBConnector.class, connectorConfig.getLogicalName(), KEEP_ALIVE_THREAD_NAME));
}
- catch (Exception e) {
- LOGGER.info("Commit failed while preparing for reconnect", e);
- }
- walPosition.enableFiltering();
- stream.stopKeepAlive();
- replicationConnection.reconnect();
- replicationStream.set(replicationConnection.startStreaming(walPosition.getLastEventStoredLsn(), walPosition));
- stream = this.replicationStream.get();
- stream.startKeepAlive(Threads.newSingleThreadExecutor(PostgresConnector.class, connectorConfig.getLogicalName(), KEEP_ALIVE_THREAD_NAME));
+ } else {
+ LOGGER.info("Connector config provide.transaction.metadata is set to true. Therefore, skip records filtering in order to ship entire transactions.");
}
+
processMessages(context, partition, this.effectiveOffset, stream);
}
catch (Throwable e) {
@@ -258,6 +284,20 @@ private void processReplicationMessages(PostgresPartition partition, PostgresOff
// Tx BEGIN/END event
if (message.isTransactionalMessage()) {
+ if(message.getOperation() == Operation.BEGIN) {
+ LOGGER.debug("Processing BEGIN with end LSN {} and txnid {}", lsn, message.getTransactionId());
+ } else {
+ LOGGER.debug("Processing COMMIT with end LSN {} and txnid {}", lsn, message.getTransactionId());
+ }
+
+ OptionalLong currentTxnid = message.getTransactionId();
+ if (lastTxnidForWhichCommitSeen.isPresent() && currentTxnid.isPresent()) {
+ long delta = currentTxnid.getAsLong() - lastTxnidForWhichCommitSeen.getAsLong() - 1;
+ if (delta > 0) {
+ LOGGER.debug("Skipped {} empty transactions between {} and {}", delta, lastTxnidForWhichCommitSeen, currentTxnid);
+ }
+ }
+ lastTxnidForWhichCommitSeen = currentTxnid;
offsetContext.updateWalPosition(lsn, lastCompletelyProcessedLsn, message.getCommitTime(), toLong(message.getTransactionId()),
taskContext.getSlotXmin(connection),
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresType.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresType.java
index da039284b4c..2a7f3155350 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresType.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresType.java
@@ -8,8 +8,8 @@
import java.util.List;
import java.util.Objects;
-import org.postgresql.core.Oid;
-import org.postgresql.core.TypeInfo;
+import com.yugabyte.core.Oid;
+import com.yugabyte.core.TypeInfo;
/**
* A class that binds together a PostgresSQL OID, JDBC type id and the string name of the type.
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresValueConverter.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresValueConverter.java
index 17f1bb7f6bd..3bd1fad3139 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresValueConverter.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresValueConverter.java
@@ -41,12 +41,12 @@
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.PGStatement;
-import org.postgresql.geometric.PGpoint;
-import org.postgresql.jdbc.PgArray;
-import org.postgresql.util.HStoreConverter;
-import org.postgresql.util.PGInterval;
-import org.postgresql.util.PGobject;
+import com.yugabyte.PGStatement;
+import com.yugabyte.geometric.PGpoint;
+import com.yugabyte.jdbc.PgArray;
+import com.yugabyte.util.HStoreConverter;
+import com.yugabyte.util.PGInterval;
+import com.yugabyte.util.PGobject;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
@@ -713,7 +713,7 @@ else if (data instanceof Map) {
/**
* Returns an Hstore field as string in the form of {@code "key 1"=>"value1", "key_2"=>"val 1"}; i.e. the given byte
* array is NOT the byte representation returned by {@link HStoreConverter#toBytes(Map,
- * org.postgresql.core.Encoding))}, but the String based representation
+ * com.yugabyte.core.Encoding))}, but the String based representation
*/
private String asHstoreString(byte[] data) {
return new String(data, databaseCharset);
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/ReplicaIdentityMapper.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/ReplicaIdentityMapper.java
index 0f7ab45e540..85d1a2bee27 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/ReplicaIdentityMapper.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/ReplicaIdentityMapper.java
@@ -21,7 +21,7 @@
import io.debezium.relational.TableId;
/**
- * Class that records Replica Identity information for the {@link PostgresConnector}
+ * Class that records Replica Identity information for the {@link YugabyteDBConnector}
* @author Ben White, Miguel Sotomayor
*/
@Immutable
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/TypeRegistry.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/TypeRegistry.java
index 21097e128a9..a37f139a1a2 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/TypeRegistry.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/TypeRegistry.java
@@ -19,9 +19,9 @@
import java.util.Set;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.TypeInfo;
-import org.postgresql.jdbc.PgDatabaseMetaData;
+import com.yugabyte.core.BaseConnection;
+import com.yugabyte.core.TypeInfo;
+import com.yugabyte.jdbc.PgDatabaseMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -436,7 +436,7 @@ public int isbn() {
private static class SqlTypeMapper {
/**
- * Based on org.postgresql.jdbc.TypeInfoCache.getSQLType(String). To emulate the original statement's behavior
+ * Based on com.yugabyte.jdbc.TypeInfoCache.getSQLType(String). To emulate the original statement's behavior
* (which works for single types only), PG's DISTINCT ON extension is used to just return the first entry should a
* type exist in multiple schemas.
*/
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YBHeartbeatImpl.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YBHeartbeatImpl.java
new file mode 100644
index 00000000000..dd87850c9fc
--- /dev/null
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YBHeartbeatImpl.java
@@ -0,0 +1,33 @@
+package io.debezium.connector.postgresql;
+
+import io.debezium.function.BlockingConsumer;
+import io.debezium.heartbeat.HeartbeatImpl;
+import io.debezium.schema.SchemaNameAdjuster;
+import org.apache.kafka.connect.source.SourceRecord;
+
+import java.time.Duration;
+import java.util.Map;
+
+/**
+ * YugabyteDB specific heartbeat implementation to only allow the forcedHeartbeat method which
+ * will be called in the transition phase when we are waiting for transitioning from snapshot to
+ * streaming.
+ */
+public class YBHeartbeatImpl extends HeartbeatImpl {
+ public YBHeartbeatImpl(Duration heartbeatInterval, String topicName, String key, SchemaNameAdjuster schemaNameAdjuster) {
+ super(heartbeatInterval, topicName, key, schemaNameAdjuster);
+ }
+
+ @Override
+ public void heartbeat(Map partition, Map offset, BlockingConsumer consumer) throws InterruptedException {
+ }
+
+ @Override
+ public void heartbeat(Map partition, OffsetProducer offsetProducer, BlockingConsumer consumer) throws InterruptedException {
+ }
+
+ @Override
+ public void forcedBeat(Map partition, Map offset, BlockingConsumer consumer) throws InterruptedException {
+ super.forcedBeat(partition, offset, consumer);
+ }
+}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YBSnapshotChangeRecordEmitter.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YBSnapshotChangeRecordEmitter.java
new file mode 100644
index 00000000000..0c339a08354
--- /dev/null
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YBSnapshotChangeRecordEmitter.java
@@ -0,0 +1,52 @@
+package io.debezium.connector.postgresql;
+
+import io.debezium.connector.postgresql.connection.ReplicaIdentityInfo;
+import io.debezium.data.Envelope;
+import io.debezium.pipeline.spi.OffsetContext;
+import io.debezium.relational.RelationalChangeRecordEmitter;
+import io.debezium.relational.RelationalDatabaseConnectorConfig;
+import io.debezium.util.Clock;
+
+/**
+ * Custom snapshot change record emitter for YugabyteDB which forms column values object based on
+ * the replica identity type
+ * @param instance of {@link io.debezium.pipeline.spi.Partition}
+ * @author Vaibhav Kushwaha (vkushwaha@yugabyte.com)
+ */
+public class YBSnapshotChangeRecordEmitter
extends RelationalChangeRecordEmitter
{
+ private final Object[] row;
+ private final PostgresConnectorConfig connectorConfig;
+
+ public YBSnapshotChangeRecordEmitter(P partition, OffsetContext offset, Object[] row, Clock clock,
+ PostgresConnectorConfig connectorConfig) {
+ super(partition, offset, clock, connectorConfig);
+
+ this.row = row;
+ this.connectorConfig = connectorConfig;
+ }
+
+ @Override
+ public Envelope.Operation getOperation() {
+ return Envelope.Operation.READ;
+ }
+
+ @Override
+ protected Object[] getOldColumnValues() {
+ throw new UnsupportedOperationException("Can't get old row values for READ record");
+ }
+
+ @Override
+ protected Object[] getNewColumnValues() {
+ Object[] values = new Object[row.length];
+
+ for (int position = 0; position < values.length; ++position) {
+ if (connectorConfig.plugin().isYBOutput()) {
+ values[position] = new Object[]{row[position], Boolean.TRUE};
+ } else {
+ values[position] = row[position];
+ }
+ }
+
+ return values;
+ }
+}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnector.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YugabyteDBConnector.java
similarity index 98%
rename from debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnector.java
rename to debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YugabyteDBConnector.java
index f22626ab128..fdf680cf35a 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/PostgresConnector.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YugabyteDBConnector.java
@@ -37,14 +37,14 @@
*
* @author Horia Chiorean
*/
-public class PostgresConnector extends RelationalBaseSourceConnector {
+public class YugabyteDBConnector extends RelationalBaseSourceConnector {
- private static final Logger LOGGER = LoggerFactory.getLogger(PostgresConnector.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(YugabyteDBConnector.class);
public static final int READ_ONLY_SUPPORTED_VERSION = 13;
private Map props;
- public PostgresConnector() {
+ public YugabyteDBConnector() {
}
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YugabyteDBServer.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YugabyteDBServer.java
new file mode 100644
index 00000000000..dcaa291f54e
--- /dev/null
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/YugabyteDBServer.java
@@ -0,0 +1,12 @@
+package io.debezium.connector.postgresql;
+
+/**
+ * Helper class to add server related methods to aid in code execution for YugabyteDB specific flow.
+ *
+ * @author Vaibhav Kushwaha (vkushwaha@yugabyte.com)
+ */
+public class YugabyteDBServer {
+ public static boolean isEnabled() {
+ return true;
+ }
+}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/AbstractColumnValue.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/AbstractColumnValue.java
index 91f354c3cca..576f1bc713f 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/AbstractColumnValue.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/AbstractColumnValue.java
@@ -14,16 +14,16 @@
import java.time.ZoneOffset;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.geometric.PGbox;
-import org.postgresql.geometric.PGcircle;
-import org.postgresql.geometric.PGline;
-import org.postgresql.geometric.PGlseg;
-import org.postgresql.geometric.PGpath;
-import org.postgresql.geometric.PGpoint;
-import org.postgresql.geometric.PGpolygon;
-import org.postgresql.jdbc.PgArray;
-import org.postgresql.util.PGInterval;
-import org.postgresql.util.PGtokenizer;
+import com.yugabyte.geometric.PGbox;
+import com.yugabyte.geometric.PGcircle;
+import com.yugabyte.geometric.PGline;
+import com.yugabyte.geometric.PGlseg;
+import com.yugabyte.geometric.PGpath;
+import com.yugabyte.geometric.PGpoint;
+import com.yugabyte.geometric.PGpolygon;
+import com.yugabyte.jdbc.PgArray;
+import com.yugabyte.util.PGInterval;
+import com.yugabyte.util.PGtokenizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/Lsn.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/Lsn.java
index 55b7223fd51..b9f4b7dc8fe 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/Lsn.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/Lsn.java
@@ -7,11 +7,11 @@
import java.nio.ByteBuffer;
-import org.postgresql.replication.LogSequenceNumber;
+import com.yugabyte.replication.LogSequenceNumber;
/**
* Abstraction of PostgreSQL log sequence number, adapted from
- * {@link org.postgresql.replication.LogSequenceNumber}.
+ * {@link com.yugabyte.replication.LogSequenceNumber}.
*
* @author Jiri Pechanec
*
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/MessageDecoder.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/MessageDecoder.java
index 984f28ddd1f..8406c9b3ab7 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/MessageDecoder.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/MessageDecoder.java
@@ -10,7 +10,7 @@
import java.sql.SQLException;
import java.util.function.Function;
-import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import com.yugabyte.replication.fluent.logical.ChainedLogicalStreamBuilder;
import io.debezium.connector.postgresql.TypeRegistry;
import io.debezium.connector.postgresql.connection.ReplicationStream.ReplicationMessageProcessor;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresConnection.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresConnection.java
index 3bb9070697f..dd3d37501b1 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresConnection.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresConnection.java
@@ -21,13 +21,14 @@
import java.util.concurrent.atomic.AtomicLong;
import java.util.regex.Pattern;
+import com.yugabyte.core.ConnectionFactory;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.jdbc.PgConnection;
-import org.postgresql.jdbc.TimestampUtils;
-import org.postgresql.replication.LogSequenceNumber;
-import org.postgresql.util.PGmoney;
-import org.postgresql.util.PSQLState;
+import com.yugabyte.core.BaseConnection;
+import com.yugabyte.jdbc.PgConnection;
+import com.yugabyte.jdbc.TimestampUtils;
+import com.yugabyte.replication.LogSequenceNumber;
+import com.yugabyte.util.PGmoney;
+import com.yugabyte.util.PSQLState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,6 +42,7 @@
import io.debezium.connector.postgresql.PostgresType;
import io.debezium.connector.postgresql.PostgresValueConverter;
import io.debezium.connector.postgresql.TypeRegistry;
+import io.debezium.connector.postgresql.YugabyteDBServer;
import io.debezium.connector.postgresql.spi.SlotState;
import io.debezium.data.SpecialValueDecimal;
import io.debezium.jdbc.JdbcConfiguration;
@@ -72,26 +74,29 @@ public class PostgresConnection extends JdbcConnection {
public static final String CONNECTION_VALIDATE_CONNECTION = "Debezium Validate Connection";
public static final String CONNECTION_HEARTBEAT = "Debezium Heartbeat";
public static final String CONNECTION_GENERAL = "Debezium General";
+ public static final String CONNECTION_FETCH_REPLICA_IDENTITY = "Debezium YB Fetch Replica Identity";
private static final Pattern FUNCTION_DEFAULT_PATTERN = Pattern.compile("^[(]?[A-Za-z0-9_.]+\\((?:.+(?:, ?.+)*)?\\)");
private static final Pattern EXPRESSION_DEFAULT_PATTERN = Pattern.compile("\\(+(?:.+(?:[+ - * / < > = ~ ! @ # % ^ & | ` ?] ?.+)+)+\\)");
private static Logger LOGGER = LoggerFactory.getLogger(PostgresConnection.class);
- private static final String URL_PATTERN = "jdbc:postgresql://${" + JdbcConfiguration.HOSTNAME + "}:${"
+ public static final String MULTI_HOST_URL_PATTERN = "jdbc:yugabytedb://${" + JdbcConfiguration.HOSTNAME + "}/${" + JdbcConfiguration.DATABASE + "}?load-balance=true";
+ public static final String URL_PATTERN = "jdbc:yugabytedb://${" + JdbcConfiguration.HOSTNAME + "}:${"
+ JdbcConfiguration.PORT + "}/${" + JdbcConfiguration.DATABASE + "}";
- protected static final ConnectionFactory FACTORY = JdbcConnection.patternBasedFactory(URL_PATTERN,
- org.postgresql.Driver.class.getName(),
+ protected static ConnectionFactory CONNECTION_FACTORY = JdbcConnection.patternBasedFactory(URL_PATTERN,
+ com.yugabyte.Driver.class.getName(),
PostgresConnection.class.getClassLoader(), JdbcConfiguration.PORT.withDefault(PostgresConnectorConfig.PORT.defaultValueAsString()));
/**
* Obtaining a replication slot may fail if there's a pending transaction. We're retrying to get a slot for 30 min.
*/
- private static final int MAX_ATTEMPTS_FOR_OBTAINING_REPLICATION_SLOT = 900;
+ private static final int MAX_ATTEMPTS_FOR_OBTAINING_REPLICATION_SLOT = 90;
private static final Duration PAUSE_BETWEEN_REPLICATION_SLOT_RETRIEVAL_ATTEMPTS = Duration.ofSeconds(2);
private final TypeRegistry typeRegistry;
private final PostgresDefaultValueConverter defaultValueConverter;
+ private final JdbcConfiguration jdbcConfig;
/**
* Creates a Postgres connection using the supplied configuration.
@@ -102,9 +107,12 @@ public class PostgresConnection extends JdbcConnection {
* @param config {@link Configuration} instance, may not be null.
* @param valueConverterBuilder supplies a configured {@link PostgresValueConverter} for a given {@link TypeRegistry}
* @param connectionUsage a symbolic name of the connection to be tracked in monitoring tools
+ * @param factory a {@link io.debezium.jdbc.JdbcConnection.ConnectionFactory} instance
*/
- public PostgresConnection(JdbcConfiguration config, PostgresValueConverterBuilder valueConverterBuilder, String connectionUsage) {
- super(addDefaultSettings(config, connectionUsage), FACTORY, PostgresConnection::validateServerVersion, "\"", "\"");
+ public PostgresConnection(JdbcConfiguration config, PostgresValueConverterBuilder valueConverterBuilder, String connectionUsage, ConnectionFactory factory) {
+ super(addDefaultSettings(config, connectionUsage), factory, PostgresConnection::validateServerVersion, "\"", "\"");
+ this.jdbcConfig = config;
+ PostgresConnection.CONNECTION_FACTORY = factory;
if (Objects.isNull(valueConverterBuilder)) {
this.typeRegistry = null;
@@ -118,15 +126,19 @@ public PostgresConnection(JdbcConfiguration config, PostgresValueConverterBuilde
}
}
+ public PostgresConnection(JdbcConfiguration config, PostgresValueConverterBuilder valueConverterBuilder, String connectionUsage) {
+ this(config, valueConverterBuilder, connectionUsage, PostgresConnectorConfig.getConnectionFactory(config.getHostname()));
+ }
+
/**
* Create a Postgres connection using the supplied configuration and {@link TypeRegistry}
* @param config {@link Configuration} instance, may not be null.
* @param typeRegistry an existing/already-primed {@link TypeRegistry} instance
* @param connectionUsage a symbolic name of the connection to be tracked in monitoring tools
*/
- public PostgresConnection(PostgresConnectorConfig config, TypeRegistry typeRegistry, String connectionUsage) {
+ public PostgresConnection(PostgresConnectorConfig config, TypeRegistry typeRegistry, String connectionUsage, ConnectionFactory factory) {
super(addDefaultSettings(config.getJdbcConfig(), connectionUsage),
- FACTORY,
+ factory,
PostgresConnection::validateServerVersion,
"\"", "\"");
@@ -139,6 +151,13 @@ public PostgresConnection(PostgresConnectorConfig config, TypeRegistry typeRegis
final PostgresValueConverter valueConverter = PostgresValueConverter.of(config, this.getDatabaseCharset(), typeRegistry);
this.defaultValueConverter = new PostgresDefaultValueConverter(valueConverter, this.getTimestampUtils(), typeRegistry);
}
+
+ PostgresConnection.CONNECTION_FACTORY = factory;
+ this.jdbcConfig = config.getJdbcConfig();
+ }
+
+ public PostgresConnection(PostgresConnectorConfig config, TypeRegistry typeRegistry, String connectionUsage) {
+ this(config, typeRegistry, connectionUsage, PostgresConnectorConfig.getConnectionFactory(config.getJdbcConfig().getHostname()));
}
/**
@@ -166,7 +185,12 @@ static JdbcConfiguration addDefaultSettings(JdbcConfiguration configuration, Str
* @return a {@code String} where the variables in {@code urlPattern} are replaced with values from the configuration
*/
public String connectionString() {
- return connectionString(URL_PATTERN);
+ String hostName = jdbcConfig.getHostname();
+ if (hostName.contains(":")) {
+ return connectionString(MULTI_HOST_URL_PATTERN);
+ } else {
+ return connectionString(URL_PATTERN);
+ }
}
/**
@@ -303,7 +327,8 @@ private ServerInfo.ReplicationSlot fetchReplicationSlotInfo(String slotName, Str
return null;
}
final Long xmin = rs.getLong("catalog_xmin");
- return new ServerInfo.ReplicationSlot(active, confirmedFlushedLsn, restartLsn, xmin);
+ final Long restartCommitHT = rs.getLong("yb_restart_commit_ht");
+ return new ServerInfo.ReplicationSlot(active, confirmedFlushedLsn, restartLsn, xmin, restartCommitHT);
}
else {
LOGGER.debug("No replication slot '{}' is present for plugin '{}' and database '{}'", slotName,
@@ -522,6 +547,12 @@ public synchronized void close() {
* @throws SQLException if anything fails.
*/
public Long currentTransactionId() throws SQLException {
+ // YB Note: Returning a dummy value since the txid information is not being used to make
+ // any difference.
+ if (YugabyteDBServer.isEnabled()) {
+ return 2L;
+ }
+
AtomicLong txId = new AtomicLong(0);
query("select (case pg_is_in_recovery() when 't' then 0 else txid_current() end) AS pg_current_txid", rs -> {
if (rs.next()) {
@@ -598,7 +629,7 @@ public Charset getDatabaseCharset() {
public TimestampUtils getTimestampUtils() {
try {
- return ((PgConnection) this.connection()).getTimestampUtils();
+ return ((com.yugabyte.jdbc.PgConnection) this.connection()).getTimestampUtils();
}
catch (SQLException e) {
throw new DebeziumException("Couldn't get timestamp utils from underlying connection", e);
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresDefaultValueConverter.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresDefaultValueConverter.java
index 8ff2fef140f..78a801786d6 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresDefaultValueConverter.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresDefaultValueConverter.java
@@ -21,8 +21,8 @@
import org.apache.kafka.connect.data.Schema;
import org.apache.kafka.connect.data.SchemaBuilder;
import org.apache.kafka.connect.data.Struct;
-import org.postgresql.jdbc.TimestampUtils;
-import org.postgresql.util.PGInterval;
+import com.yugabyte.jdbc.TimestampUtils;
+import com.yugabyte.util.PGInterval;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresReplicationConnection.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresReplicationConnection.java
index 6e8cf63f0dd..66b502c500a 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresReplicationConnection.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/PostgresReplicationConnection.java
@@ -28,12 +28,12 @@
import java.util.stream.Collectors;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.core.BaseConnection;
-import org.postgresql.core.ServerVersion;
-import org.postgresql.replication.PGReplicationStream;
-import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
+import com.yugabyte.core.BaseConnection;
+import com.yugabyte.core.ServerVersion;
+import com.yugabyte.replication.PGReplicationStream;
+import com.yugabyte.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import com.yugabyte.util.PSQLException;
+import com.yugabyte.util.PSQLState;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -110,7 +110,7 @@ private PostgresReplicationConnection(PostgresConnectorConfig config,
TypeRegistry typeRegistry,
Properties streamParams,
PostgresSchema schema) {
- super(addDefaultSettings(config.getJdbcConfig()), PostgresConnection.FACTORY, "\"", "\"");
+ super(addDefaultSettings(config.getJdbcConfig()), PostgresConnection.CONNECTION_FACTORY, "\"", "\"");
this.connectorConfig = config;
this.slotName = slotName;
@@ -146,7 +146,7 @@ private ServerInfo.ReplicationSlot getSlotInfo() throws SQLException, Interrupte
}
protected void initPublication() {
- if (PostgresConnectorConfig.LogicalDecoder.PGOUTPUT.equals(plugin)) {
+ if (PostgresConnectorConfig.LogicalDecoder.PGOUTPUT.equals(plugin) || PostgresConnectorConfig.LogicalDecoder.YBOUTPUT.equals(plugin)) {
LOGGER.info("Initializing PgOutput logical decoder publication");
try {
// Unless the autocommit is disabled the SELECT publication query will stay running
@@ -542,6 +542,34 @@ protected BaseConnection pgConnection() throws SQLException {
return (BaseConnection) connection(false);
}
+ public String getBackendPid() {
+ try (Statement stmt = pgConnection().createStatement()) {
+ ResultSet rs = stmt.executeQuery("SELECT pg_backend_pid() backend_pid;");
+
+ if (rs.next()) {
+ return rs.getString("backend_pid");
+ }
+ } catch (SQLException sqle) {
+ LOGGER.warn("Unable to get the backend PID", sqle);
+ }
+
+ return "FAILED_TO_GET_BACKEND_PID";
+ }
+
+ public String getConnectedNodeIp() {
+ try (Statement stmt = pgConnection().createStatement()) {
+ ResultSet rs = stmt.executeQuery("SELECT inet_server_addr() connected_to_host;");
+
+ if (rs.next()) {
+ return rs.getString("connected_to_host");
+ }
+ } catch (SQLException sqle) {
+ LOGGER.warn("Unable to get the connected host node", sqle);
+ }
+
+ return "FAILED_TO_GET_CONNECTED_NODE";
+ }
+
private SlotCreationResult parseSlotCreation(ResultSet rs) {
try {
if (rs.next()) {
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicaIdentityInfo.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicaIdentityInfo.java
index 8738db7eb06..db8e4cf10da 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicaIdentityInfo.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicaIdentityInfo.java
@@ -46,11 +46,15 @@ public String toString() {
* Table REPLICA IDENTITY information.
*/
public enum ReplicaIdentity {
+ // YB Note: CHANGE is a YugabyteDB specific replica identity.
NOTHING("UPDATE and DELETE events will not contain any old values"),
FULL("UPDATE AND DELETE events will contain the previous values of all the columns"),
- DEFAULT("UPDATE and DELETE events will contain previous values only for PK columns"),
+ // YB Note: In case of primary key updates, YugabyteDB sends a delete and an insert event
+ // unlike Postgres which sends a single update event with old primary key set in old tuple.
+ DEFAULT("Only DELETE events will contain previous values only for PK columns"),
INDEX("UPDATE and DELETE events will contain previous values only for columns present in the REPLICA IDENTITY index"),
- UNKNOWN("Unknown REPLICA IDENTITY");
+ UNKNOWN("Unknown REPLICA IDENTITY"),
+ CHANGE("UPDATE events will contain values only for changed columns");
private final String description;
@@ -67,7 +71,8 @@ public String description() {
this.description = description;
}
- static ReplicaIdentityInfo.ReplicaIdentity parseFromDB(String s) {
+ // YB Note: CHANGE is a YugabyteDB specific replica identity.
+ public static ReplicaIdentityInfo.ReplicaIdentity parseFromDB(String s) {
switch (s) {
case "n":
return NOTHING;
@@ -77,6 +82,8 @@ static ReplicaIdentityInfo.ReplicaIdentity parseFromDB(String s) {
return INDEX;
case "f":
return FULL;
+ case "c":
+ return CHANGE;
default:
return UNKNOWN;
}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationConnection.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationConnection.java
index 34738c958c3..d4fa5460b0e 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationConnection.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationConnection.java
@@ -10,7 +10,7 @@
import java.time.Duration;
import java.util.Optional;
-import org.postgresql.replication.PGReplicationStream;
+import com.yugabyte.replication.PGReplicationStream;
import io.debezium.annotation.NotThreadSafe;
import io.debezium.connector.postgresql.PostgresConnectorConfig;
@@ -52,7 +52,7 @@ public interface ReplicationConnection extends AutoCloseable {
* @param offset a value representing the WAL sequence number where replication should start from; if the value
* is {@code null} or negative, this behaves exactly like {@link #startStreaming()}.
* @return a {@link PGReplicationStream} from which data is read; never null
- * @see org.postgresql.replication.LogSequenceNumber
+ * @see com.yugabyte.replication.LogSequenceNumber
* @throws SQLException if anything fails
*/
ReplicationStream startStreaming(Lsn offset, WalPositionLocator walPosition) throws SQLException, InterruptedException;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationMessage.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationMessage.java
index cd1070e21b7..c0a951a8622 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationMessage.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationMessage.java
@@ -13,12 +13,12 @@
import java.util.List;
import java.util.OptionalLong;
-import org.postgresql.geometric.PGbox;
-import org.postgresql.geometric.PGcircle;
-import org.postgresql.geometric.PGline;
-import org.postgresql.geometric.PGpath;
-import org.postgresql.geometric.PGpoint;
-import org.postgresql.geometric.PGpolygon;
+import com.yugabyte.geometric.PGbox;
+import com.yugabyte.geometric.PGcircle;
+import com.yugabyte.geometric.PGline;
+import com.yugabyte.geometric.PGpath;
+import com.yugabyte.geometric.PGpoint;
+import com.yugabyte.geometric.PGpolygon;
import io.debezium.connector.postgresql.PostgresStreamingChangeEventSource;
import io.debezium.connector.postgresql.PostgresStreamingChangeEventSource.PgConnectionSupplier;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationStream.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationStream.java
index 6db31594df1..beaa90c8c7d 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationStream.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ReplicationStream.java
@@ -9,7 +9,7 @@
import java.sql.SQLException;
import java.util.concurrent.ExecutorService;
-import org.postgresql.replication.PGReplicationStream;
+import com.yugabyte.replication.PGReplicationStream;
/**
* A stream from which messages sent by a logical decoding plugin can be consumed over a replication connection.
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ServerInfo.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ServerInfo.java
index c56f3255c86..45ca1033a48 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ServerInfo.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/ServerInfo.java
@@ -114,18 +114,20 @@ public String toString() {
* Information about a server replication slot
*/
protected static class ReplicationSlot {
- protected static final ReplicationSlot INVALID = new ReplicationSlot(false, null, null, null);
+ protected static final ReplicationSlot INVALID = new ReplicationSlot(false, null, null, null, null);
private boolean active;
private Lsn latestFlushedLsn;
private Lsn restartLsn;
private Long catalogXmin;
+ private Long restartCommitHT;
- protected ReplicationSlot(boolean active, Lsn latestFlushedLsn, Lsn restartLsn, Long catalogXmin) {
+ protected ReplicationSlot(boolean active, Lsn latestFlushedLsn, Lsn restartLsn, Long catalogXmin, Long restartCommitHT) {
this.active = active;
this.latestFlushedLsn = latestFlushedLsn;
this.restartLsn = restartLsn;
this.catalogXmin = catalogXmin;
+ this.restartCommitHT = restartCommitHT;
}
protected boolean active() {
@@ -160,12 +162,16 @@ protected Long catalogXmin() {
return catalogXmin;
}
+ protected Long restartCommitHT() {
+ return restartCommitHT;
+ }
+
protected boolean hasValidFlushedLsn() {
return latestFlushedLsn != null;
}
protected SlotState asSlotState() {
- return new SlotState(latestFlushedLsn, restartLsn, catalogXmin, active);
+ return new SlotState(latestFlushedLsn, restartLsn, catalogXmin, active, restartCommitHT);
}
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgoutput/PgOutputMessageDecoder.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgoutput/PgOutputMessageDecoder.java
index 6e18e0a2fe1..a92f4c1d789 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgoutput/PgOutputMessageDecoder.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgoutput/PgOutputMessageDecoder.java
@@ -26,7 +26,9 @@
import java.util.Set;
import java.util.function.Function;
-import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import com.yugabyte.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import io.debezium.connector.postgresql.YugabyteDBServer;
+import io.debezium.connector.postgresql.connection.ReplicaIdentityInfo;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -335,6 +337,11 @@ private void handleRelationMessage(ByteBuffer buffer, TypeRegistry typeRegistry)
optional = true;
}
+ if (YugabyteDBServer.isEnabled() && !key && isReplicaIdentityChange(replicaIdentityId)) {
+ LOGGER.trace("Marking column {} optional for replica identity CHANGE", columnName);
+ optional = true;
+ }
+
final boolean hasDefault = columnDefaults.containsKey(columnName);
final String defaultValueExpression = columnDefaults.getOrDefault(columnName, Optional.empty()).orElse(null);
@@ -360,7 +367,20 @@ private void handleRelationMessage(ByteBuffer buffer, TypeRegistry typeRegistry)
primaryKeyColumns.retainAll(columnNames);
Table table = resolveRelationFromMetadata(new PgOutputRelationMetaData(relationId, schemaName, tableName, columns, primaryKeyColumns));
- decoderContext.getSchema().applySchemaChangesForTable(relationId, table);
+ if (YugabyteDBServer.isEnabled()) {
+ decoderContext.getSchema().applySchemaChangesForTableWithReplicaIdentity(relationId, table, replicaIdentityId);
+ } else {
+ decoderContext.getSchema().applySchemaChangesForTable(relationId, table);
+ }
+ }
+
+ /**
+ * @param replicaIdentityId the integer representation of the character for denoting replica identity.
+ * @return true if the replica identity is change, false otherwise.
+ */
+ private boolean isReplicaIdentityChange(int replicaIdentityId) {
+ return ReplicaIdentityInfo.ReplicaIdentity.CHANGE
+ == ReplicaIdentityInfo.ReplicaIdentity.parseFromDB(String.valueOf((char) replicaIdentityId));
}
private List getTableColumnsFromDatabase(PostgresConnection connection, DatabaseMetaData databaseMetadata, TableId tableId)
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoColumnValue.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoColumnValue.java
index 4ba61008fa1..7710eb5a648 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoColumnValue.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoColumnValue.java
@@ -17,8 +17,8 @@
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
-import org.postgresql.geometric.PGpoint;
-import org.postgresql.jdbc.PgArray;
+import com.yugabyte.geometric.PGpoint;
+import com.yugabyte.jdbc.PgArray;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoMessageDecoder.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoMessageDecoder.java
index d958b8d9343..d246fe3f86e 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoMessageDecoder.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/connection/pgproto/PgProtoMessageDecoder.java
@@ -12,7 +12,7 @@
import java.util.function.Function;
import org.apache.kafka.connect.errors.ConnectException;
-import org.postgresql.replication.fluent.logical.ChainedLogicalStreamBuilder;
+import com.yugabyte.replication.fluent.logical.ChainedLogicalStreamBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/metadata/PostgresConnectorMetadata.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/metadata/PostgresConnectorMetadata.java
index 93b5f746794..f9774e03a54 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/metadata/PostgresConnectorMetadata.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/metadata/PostgresConnectorMetadata.java
@@ -7,7 +7,7 @@
import io.debezium.config.Field;
import io.debezium.connector.postgresql.Module;
-import io.debezium.connector.postgresql.PostgresConnector;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import io.debezium.connector.postgresql.PostgresConnectorConfig;
import io.debezium.metadata.ConnectorDescriptor;
import io.debezium.metadata.ConnectorMetadata;
@@ -16,7 +16,7 @@ public class PostgresConnectorMetadata implements ConnectorMetadata {
@Override
public ConnectorDescriptor getConnectorDescriptor() {
- return new ConnectorDescriptor(PostgresConnector.class.getName(), Module.version());
+ return new ConnectorDescriptor(YugabyteDBConnector.class.getName(), Module.version());
}
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResource.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResource.java
index c171d6b3428..c5dd3ced458 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResource.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResource.java
@@ -16,11 +16,11 @@
import javax.ws.rs.Produces;
import javax.ws.rs.core.MediaType;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import org.apache.kafka.connect.health.ConnectClusterState;
import io.debezium.config.Configuration;
import io.debezium.connector.postgresql.Module;
-import io.debezium.connector.postgresql.PostgresConnector;
import io.debezium.rest.ConnectionValidationResource;
import io.debezium.rest.FilterValidationResource;
import io.debezium.rest.MetricsResource;
@@ -52,8 +52,8 @@ public String getConnectorVersion() {
}
@Override
- public PostgresConnector getConnector() {
- return new PostgresConnector();
+ public YugabyteDBConnector getConnector() {
+ return new YugabyteDBConnector();
}
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/NoSnapshotLock.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/NoSnapshotLock.java
index c44c5baf41d..35f8ad377f4 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/NoSnapshotLock.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/NoSnapshotLock.java
@@ -10,11 +10,11 @@
import java.util.Optional;
import io.debezium.annotation.ConnectorSpecific;
-import io.debezium.connector.postgresql.PostgresConnector;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import io.debezium.connector.postgresql.PostgresConnectorConfig;
import io.debezium.snapshot.spi.SnapshotLock;
-@ConnectorSpecific(connector = PostgresConnector.class)
+@ConnectorSpecific(connector = YugabyteDBConnector.class)
public class NoSnapshotLock implements SnapshotLock {
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/SharedSnapshotLock.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/SharedSnapshotLock.java
index 6c9c52b14b2..68a18854ee4 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/SharedSnapshotLock.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/lock/SharedSnapshotLock.java
@@ -10,11 +10,11 @@
import java.util.Optional;
import io.debezium.annotation.ConnectorSpecific;
-import io.debezium.connector.postgresql.PostgresConnector;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import io.debezium.connector.postgresql.PostgresConnectorConfig;
import io.debezium.snapshot.spi.SnapshotLock;
-@ConnectorSpecific(connector = PostgresConnector.class)
+@ConnectorSpecific(connector = YugabyteDBConnector.class)
public class SharedSnapshotLock implements SnapshotLock {
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/query/SelectAllSnapshotQuery.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/query/SelectAllSnapshotQuery.java
index 0a5732c84e2..ad873aeecdb 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/query/SelectAllSnapshotQuery.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/snapshot/query/SelectAllSnapshotQuery.java
@@ -12,10 +12,10 @@
import io.debezium.annotation.ConnectorSpecific;
import io.debezium.config.CommonConnectorConfig;
-import io.debezium.connector.postgresql.PostgresConnector;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import io.debezium.snapshot.spi.SnapshotQuery;
-@ConnectorSpecific(connector = PostgresConnector.class)
+@ConnectorSpecific(connector = YugabyteDBConnector.class)
public class SelectAllSnapshotQuery implements SnapshotQuery {
@Override
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/spi/SlotState.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/spi/SlotState.java
index 56485323d0b..f34af961777 100644
--- a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/spi/SlotState.java
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/spi/SlotState.java
@@ -17,12 +17,14 @@ public class SlotState {
private final Lsn restartLsn;
private final Long catalogXmin;
private final boolean active;
+ private final Long restartCommitHT;
- public SlotState(Lsn lastFlushLsn, Lsn restartLsn, Long catXmin, boolean active) {
+ public SlotState(Lsn lastFlushLsn, Lsn restartLsn, Long catXmin, boolean active, Long restartCommitHT) {
this.active = active;
this.latestFlushedLsn = lastFlushLsn;
this.restartLsn = restartLsn;
this.catalogXmin = catXmin;
+ this.restartCommitHT = restartCommitHT;
}
/**
@@ -52,4 +54,11 @@ public Long slotCatalogXmin() {
public boolean slotIsActive() {
return active;
}
+
+ /**
+ * @return if the slot's `yb_restart_commit_ht` value
+ */
+ public Long slotRestartCommitHT() {
+ return restartCommitHT;
+ }
}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/transforms/yugabytedb/Pair.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/transforms/yugabytedb/Pair.java
new file mode 100644
index 00000000000..8de3f6ceca7
--- /dev/null
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/transforms/yugabytedb/Pair.java
@@ -0,0 +1,54 @@
+package io.debezium.connector.postgresql.transforms.yugabytedb;
+
+import java.util.Objects;
+
+/**
+ * Helper structure to denote a pair of objects.
+ * @param
+ * @param
+ * @author Vaibhav Kushwaha (vkushwaha@yugabyte.com)
+ */
+public class Pair {
+ private final A first;
+ private final B second;
+
+ public Pair(A first, B second) {
+ this.first = first;
+ this.second = second;
+ }
+
+ public A getFirst() {
+ return this.first;
+ }
+
+ public B getSecond() {
+ return this.second;
+ }
+
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ } else if (o != null && this.getClass() == o.getClass()) {
+ Pair, ?> pair = (Pair) o;
+ if (this.first != null) {
+ if (!this.first.equals(pair.first)) {
+ return false;
+ }
+ } else if (pair.first != null) {
+ return false;
+ }
+
+ if (this.second != null) {
+ return this.second.equals(pair.second);
+ } else {
+ return pair.second == null;
+ }
+ } else {
+ return false;
+ }
+ }
+
+ public int hashCode() {
+ return Objects.hashCode(new Object[]{this.first, this.second});
+ }
+}
diff --git a/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/transforms/yugabytedb/YBExtractNewRecordState.java b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/transforms/yugabytedb/YBExtractNewRecordState.java
new file mode 100644
index 00000000000..17ed72e903b
--- /dev/null
+++ b/debezium-connector-postgres/src/main/java/io/debezium/connector/postgresql/transforms/yugabytedb/YBExtractNewRecordState.java
@@ -0,0 +1,123 @@
+package io.debezium.connector.postgresql.transforms.yugabytedb;
+
+import java.util.Map;
+import java.util.Objects;
+
+import org.apache.kafka.connect.connector.ConnectRecord;
+import org.apache.kafka.connect.data.Field;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.Schema.Type;
+import org.apache.kafka.connect.data.SchemaBuilder;
+import org.apache.kafka.connect.data.Struct;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.debezium.transforms.ExtractNewRecordState;
+
+/**
+ * Custom extractor for YugabyteDB to be used when replica identity is CHANGE; this will be used
+ * to transform records from the format {@code fieldName:{value:"someValue",set:true}}
+ * to {@code fieldName:"someValue"} and omit the records from the message which are not updated
+ * in the given change event.
+ * @param
+ *
+ * @author Vaibhav Kushwaha (vkushwaha@yugabyte.com)
+ */
+public class YBExtractNewRecordState> extends ExtractNewRecordState {
+ private static final Logger LOGGER = LoggerFactory.getLogger(YBExtractNewRecordState.class);
+
+ @Override
+ public R apply(final R record) {
+ final R ret = super.apply(record);
+ if (ret == null || (ret.value() != null && !(ret.value() instanceof Struct))) {
+ return ret;
+ }
+
+ Pair p = getUpdatedValueAndSchema((Struct) ret.key());
+ Schema updatedSchemaForKey = p.getFirst();
+ Struct updatedValueForKey = p.getSecond();
+
+ Schema updatedSchemaForValue = null;
+ Struct updatedValueForValue = null;
+ if (ret.value() != null) {
+ Pair val = getUpdatedValueAndSchema((Struct) ret.value());
+ updatedSchemaForValue = val.getFirst();
+ updatedValueForValue = val.getSecond();
+ }
+
+ return ret.newRecord(ret.topic(), ret.kafkaPartition(), updatedSchemaForKey, updatedValueForKey, updatedSchemaForValue, updatedValueForValue, ret.timestamp());
+ }
+
+ @Override
+ public void close() {
+ super.close();
+ }
+
+ private boolean isSimplifiableField(Field field) {
+ if (field.schema().type() != Type.STRUCT) {
+ return false;
+ }
+
+ return field.schema().fields().size() == 2
+ && (Objects.equals(field.schema().fields().get(0).name(), "value")
+ && Objects.equals(field.schema().fields().get(1).name(), "set"));
+ }
+
+ private Schema makeUpdatedSchema(Schema schema, Struct value) {
+ final SchemaBuilder builder = SchemaUtil.copySchemaBasics(schema, SchemaBuilder.struct());
+
+ for (Field field : schema.fields()) {
+ if (isSimplifiableField(field)) {
+ if (value.get(field.name()) != null) {
+ builder.field(field.name(), field.schema().field("value").schema());
+ }
+ }
+ else {
+ builder.field(field.name(), field.schema());
+ }
+ }
+
+ return builder.build();
+ }
+
+ private Pair getUpdatedValueAndSchema(Struct obj) {
+ final Struct value = obj;
+ Schema updatedSchema = makeUpdatedSchema(value.schema(), value);
+
+ LOGGER.debug("Updated schema as json: " + io.debezium.data.SchemaUtil.asString(value.schema()));
+
+ final Struct updatedValue = new Struct(updatedSchema);
+
+ for (Field field : value.schema().fields()) {
+ if (isSimplifiableField(field)) {
+ Struct fieldValue = (Struct) value.get(field);
+ if (fieldValue != null) {
+ updatedValue.put(field.name(), fieldValue.get("value"));
+ }
+ }
+ else {
+ updatedValue.put(field.name(), value.get(field));
+ }
+ }
+
+ return new Pair<>(updatedSchema, updatedValue);
+ }
+}
+
+class SchemaUtil {
+
+ public static SchemaBuilder copySchemaBasics(Schema source, SchemaBuilder builder) {
+ builder.name(source.name());
+ builder.version(source.version());
+ builder.doc(source.doc());
+
+ final Map params = source.parameters();
+ if (params != null) {
+ builder.parameters(params);
+ }
+
+ return builder;
+ }
+
+}
+
diff --git a/debezium-connector-postgres/src/main/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector b/debezium-connector-postgres/src/main/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector
index 58de09f1bbf..6c2385bd9e1 100644
--- a/debezium-connector-postgres/src/main/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector
+++ b/debezium-connector-postgres/src/main/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector
@@ -1 +1 @@
-io.debezium.connector.postgresql.PostgresConnector
\ No newline at end of file
+io.debezium.connector.postgresql.YugabyteDBConnector
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/AbstractRecordsProducerTest.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/AbstractRecordsProducerTest.java
index d223fc03d4c..b41d0459eb6 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/AbstractRecordsProducerTest.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/AbstractRecordsProducerTest.java
@@ -54,7 +54,7 @@
import org.apache.kafka.connect.source.SourceTask;
import org.junit.Rule;
import org.junit.rules.TestRule;
-import org.postgresql.jdbc.PgStatement;
+import com.yugabyte.jdbc.PgStatement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -964,13 +964,6 @@ protected List schemasAndValuesForCustomConverterTypes() {
}
protected List schemasAndValuesForDomainAliasTypes(boolean streaming) {
- final ByteBuffer boxByteBuffer = ByteBuffer.wrap("(1.0,1.0),(0.0,0.0)".getBytes());
- final ByteBuffer circleByteBuffer = ByteBuffer.wrap("<(10.0,4.0),10.0>".getBytes());
- final ByteBuffer lineByteBuffer = ByteBuffer.wrap("{-1.0,0.0,0.0}".getBytes());
- final ByteBuffer lsegByteBuffer = ByteBuffer.wrap("[(0.0,0.0),(0.0,1.0)]".getBytes());
- final ByteBuffer pathByteBuffer = ByteBuffer.wrap("((0.0,0.0),(0.0,1.0),(0.0,2.0))".getBytes());
- final ByteBuffer polygonByteBuffer = ByteBuffer.wrap("((0.0,0.0),(0.0,1.0),(1.0,0.0),(0.0,0.0))".getBytes());
-
return Arrays.asList(
new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
new SchemaAndValueField("bit_base", Bits.builder(3).build(), new byte[]{ 5 }),
@@ -1007,28 +1000,12 @@ protected List schemasAndValuesForDomainAliasTypes(boolean
MicroDuration.durationMicros(1, 2, 3, 4, 5, 6, MicroDuration.DAYS_PER_MONTH_AVG)),
new SchemaAndValueField("interval_alias", MicroDuration.builder().build(),
MicroDuration.durationMicros(1, 2, 3, 4, 5, 6, MicroDuration.DAYS_PER_MONTH_AVG)),
- new SchemaAndValueField("box_base", SchemaBuilder.BYTES_SCHEMA, boxByteBuffer),
- new SchemaAndValueField("box_alias", SchemaBuilder.BYTES_SCHEMA, boxByteBuffer),
- new SchemaAndValueField("circle_base", SchemaBuilder.BYTES_SCHEMA, circleByteBuffer),
- new SchemaAndValueField("circle_alias", SchemaBuilder.BYTES_SCHEMA, circleByteBuffer),
- new SchemaAndValueField("line_base", SchemaBuilder.BYTES_SCHEMA, lineByteBuffer),
- new SchemaAndValueField("line_alias", SchemaBuilder.BYTES_SCHEMA, lineByteBuffer),
- new SchemaAndValueField("lseg_base", SchemaBuilder.BYTES_SCHEMA, lsegByteBuffer),
- new SchemaAndValueField("lseg_alias", SchemaBuilder.BYTES_SCHEMA, lsegByteBuffer),
- new SchemaAndValueField("path_base", SchemaBuilder.BYTES_SCHEMA, pathByteBuffer),
- new SchemaAndValueField("path_alias", SchemaBuilder.BYTES_SCHEMA, pathByteBuffer),
- new SchemaAndValueField("point_base", Point.builder().build(), Point.createValue(Point.builder().build(), 1, 1)),
- new SchemaAndValueField("point_alias", Point.builder().build(), Point.createValue(Point.builder().build(), 1, 1)),
- new SchemaAndValueField("polygon_base", SchemaBuilder.BYTES_SCHEMA, polygonByteBuffer),
- new SchemaAndValueField("polygon_alias", SchemaBuilder.BYTES_SCHEMA, polygonByteBuffer),
new SchemaAndValueField("char_base", SchemaBuilder.STRING_SCHEMA, "a"),
new SchemaAndValueField("char_alias", SchemaBuilder.STRING_SCHEMA, "a"),
new SchemaAndValueField("text_base", SchemaBuilder.STRING_SCHEMA, "Hello World"),
new SchemaAndValueField("text_alias", SchemaBuilder.STRING_SCHEMA, "Hello World"),
new SchemaAndValueField("json_base", Json.builder().build(), "{\"key\": \"value\"}"),
new SchemaAndValueField("json_alias", Json.builder().build(), "{\"key\": \"value\"}"),
- new SchemaAndValueField("xml_base", Xml.builder().build(), "Hello"),
- new SchemaAndValueField("xml_alias", Xml.builder().build(), "Hello"),
new SchemaAndValueField("uuid_base", Uuid.builder().build(), "40e6215d-b5c6-4896-987c-f30f3678f608"),
new SchemaAndValueField("uuid_alias", Uuid.builder().build(), "40e6215d-b5c6-4896-987c-f30f3678f608"),
new SchemaAndValueField("varbit_base", Bits.builder(3).build(), new byte[]{ 5 }),
@@ -1226,7 +1203,8 @@ public SchemaAndValueField assertWithCondition(final Condition valueCondition) {
}
protected void assertFor(Struct content) {
- assertSchema(content);
+ // YB Note: Not asserting for schema in tests for every record.
+ // assertSchema(content);
assertValue(content);
}
@@ -1235,11 +1213,12 @@ private void assertValue(Struct content) {
return;
}
+ Object actualValue = content.getStruct(fieldName).get("value");
+
if (value == null) {
- assertNull(fieldName + " is present in the actual content", content.get(fieldName));
+ assertNull(fieldName + " is present in the actual content", (content.get(fieldName) == null) ? null : actualValue);
return;
}
- Object actualValue = content.get(fieldName);
// assert the value type; for List all implementation types (e.g. immutable ones) are acceptable
if (actualValue instanceof List) {
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/BlockingSnapshotIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/BlockingSnapshotIT.java
index 06d1be97719..1032c5bcdbf 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/BlockingSnapshotIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/BlockingSnapshotIT.java
@@ -84,8 +84,8 @@ protected Configuration.Builder mutableConfig(boolean signalTableOnly, boolean s
}
@Override
- protected Class connectorClass() {
- return PostgresConnector.class;
+ protected Class connectorClass() {
+ return YugabyteDBConnector.class;
}
@Override
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CloudEventsConverterIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CloudEventsConverterIT.java
index d7084f272a4..e95a7913cfe 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CloudEventsConverterIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CloudEventsConverterIT.java
@@ -16,11 +16,11 @@
import io.debezium.jdbc.JdbcConnection;
/**
- * Integration test for {@link io.debezium.converters.CloudEventsConverter} with {@link PostgresConnector}
+ * Integration test for {@link io.debezium.converters.CloudEventsConverter} with {@link YugabyteDBConnector}
*
* @author Roman Kudryashov
*/
-public class CloudEventsConverterIT extends AbstractCloudEventsConverterTest {
+public class CloudEventsConverterIT extends AbstractCloudEventsConverterTest {
private static final String SETUP_SCHEMA = "DROP SCHEMA IF EXISTS s1 CASCADE;" +
"CREATE SCHEMA s1;";
@@ -51,8 +51,8 @@ public void beforeEach() throws Exception {
}
@Override
- protected Class getConnectorClass() {
- return PostgresConnector.class;
+ protected Class getConnectorClass() {
+ return YugabyteDBConnector.class;
}
@Override
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CustomSnapshotterIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CustomSnapshotterIT.java
index 055c36c0ece..17f42548e2d 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CustomSnapshotterIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/CustomSnapshotterIT.java
@@ -74,7 +74,7 @@ public void shouldAllowForCustomSnapshot() throws InterruptedException {
.with(PostgresConnectorConfig.SNAPSHOT_QUERY_MODE_CUSTOM_NAME, CustomTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(1);
@@ -107,7 +107,7 @@ record = s2recs.get(0);
.with(PostgresConnectorConfig.SNAPSHOT_QUERY_MODE_CUSTOM_NAME, CustomTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
actualRecords = consumeRecordsByTopic(4);
@@ -133,7 +133,7 @@ public void shouldAllowStreamOnlyByConfigurationBasedSnapshot() throws Interrupt
.with(PostgresConnectorConfig.SNAPSHOT_QUERY_MODE_CUSTOM_NAME, CustomTestSnapshot.class.getName())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
assertNoRecordsToConsume();
@@ -166,7 +166,7 @@ public void shouldNotAllowStreamByConfigurationBasedSnapshot() {
.with(PostgresConnectorConfig.SNAPSHOT_QUERY_MODE_CUSTOM_NAME, CustomTestSnapshot.class.getName())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
assertNoRecordsToConsume();
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DebeziumEngineIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DebeziumEngineIT.java
index 84b62a93612..d0bcc6dc565 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DebeziumEngineIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DebeziumEngineIT.java
@@ -84,7 +84,7 @@ public void shouldSerializeToJson() throws Exception {
final Properties props = new Properties();
props.putAll(TestHelper.defaultConfig().build().asMap());
props.setProperty("name", "debezium-engine");
- props.setProperty("connector.class", "io.debezium.connector.postgresql.PostgresConnector");
+ props.setProperty("connector.class", "io.debezium.connector.postgresql.YugabyteDBConnector");
props.setProperty(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
props.setProperty("offset.flush.interval.ms", "0");
@@ -97,14 +97,18 @@ public void shouldSerializeToJson() throws Exception {
.notifying((records, committer) -> {
for (ChangeEvent r : records) {
+ if (r.destination().equals(TestHelper.getDefaultHeartbeatTopic())) {
+ continue;
+ }
+
assertThat(r.key()).isNotNull();
assertThat(r.value()).isNotNull();
try {
final Document key = DocumentReader.defaultReader().read(r.key());
final Document value = DocumentReader.defaultReader().read(r.value());
- assertThat(key.getInteger("id")).isEqualTo(1);
- assertThat(value.getDocument("after").getInteger("id")).isEqualTo(1);
- assertThat(value.getDocument("after").getString("val")).isEqualTo("value1");
+ assertThat(key.getDocument("id").getInteger("value")).isEqualTo(1);
+ assertThat(value.getDocument("after").getDocument("id").getInteger("value")).isEqualTo(1);
+ assertThat(value.getDocument("after").getDocument("val").getString("value")).isEqualTo("value1");
}
catch (IOException e) {
throw new IllegalStateException(e);
@@ -119,7 +123,7 @@ public void shouldSerializeToJson() throws Exception {
LoggingContext.forConnector(getClass().getSimpleName(), "debezium-engine", "engine");
engine.run();
});
- allLatch.await(5000, TimeUnit.MILLISECONDS);
+ allLatch.await(35000, TimeUnit.MILLISECONDS);
assertThat(allLatch.getCount()).isEqualTo(0);
}
}
@@ -131,7 +135,7 @@ public void shouldSerializeToAvro() throws Exception {
final Properties props = new Properties();
props.putAll(TestHelper.defaultConfig().build().asMap());
props.setProperty("name", "debezium-engine");
- props.setProperty("connector.class", "io.debezium.connector.postgresql.PostgresConnector");
+ props.setProperty("connector.class", "io.debezium.connector.postgresql.YugabyteDBConnector");
props.setProperty(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
props.setProperty("offset.flush.interval.ms", "0");
@@ -160,7 +164,7 @@ public void handle(boolean success, String message, Throwable error) {
LoggingContext.forConnector(getClass().getSimpleName(), "debezium-engine", "engine");
engine.run();
});
- allLatch.await(5000, TimeUnit.MILLISECONDS);
+ allLatch.await(35000, TimeUnit.MILLISECONDS);
assertThat(allLatch.getCount()).isEqualTo(0);
}
}
@@ -171,7 +175,7 @@ public void shouldSerializeToCloudEvents() throws Exception {
final Properties props = new Properties();
props.putAll(TestHelper.defaultConfig().build().asMap());
props.setProperty("name", "debezium-engine");
- props.setProperty("connector.class", "io.debezium.connector.postgresql.PostgresConnector");
+ props.setProperty("connector.class", "io.debezium.connector.postgresql.YugabyteDBConnector");
props.setProperty(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
props.setProperty("offset.flush.interval.ms", "0");
@@ -185,14 +189,18 @@ public void shouldSerializeToCloudEvents() throws Exception {
for (ChangeEvent r : records) {
try {
+ if (r.destination().equals(TestHelper.getDefaultHeartbeatTopic())) {
+ continue;
+ }
+
final Document key = DocumentReader.defaultReader().read(r.key());
- assertThat(key.getInteger("id")).isEqualTo(1);
+ assertThat(key.getDocument("id").getInteger("value")).isEqualTo(1);
assertThat(r.value()).isNotNull();
final Document value = DocumentReader.defaultReader().read(r.value());
assertThat(value.getString("id")).contains("txId");
- assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getInteger("id")).isEqualTo(1);
- assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getString("val")).isEqualTo("value1");
+ assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getDocument("id").getInteger("value")).isEqualTo(1);
+ assertThat(value.getDocument("data").getDocument("payload").getDocument("after").getDocument("val").getString("value")).isEqualTo("value1");
}
catch (IOException e) {
throw new IllegalStateException(e);
@@ -207,7 +215,7 @@ public void shouldSerializeToCloudEvents() throws Exception {
LoggingContext.forConnector(getClass().getSimpleName(), "debezium-engine", "engine");
engine.run();
});
- allLatch.await(5000, TimeUnit.MILLISECONDS);
+ allLatch.await(35000, TimeUnit.MILLISECONDS);
assertThat(allLatch.getCount()).isEqualTo(0);
}
}
@@ -288,7 +296,7 @@ public void testOffsetsCommitAfterStop() throws Exception {
final Properties props = new Properties();
props.putAll(TestHelper.defaultConfig().build().asMap());
props.setProperty("name", "debezium-engine");
- props.setProperty("connector.class", "io.debezium.connector.postgresql.PostgresConnector");
+ props.setProperty("connector.class", "io.debezium.connector.postgresql.YugabyteDBConnector");
props.setProperty(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG,
OFFSET_STORE_PATH.toAbsolutePath().toString());
props.setProperty("offset.flush.interval.ms", "3000");
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DomainTypesIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DomainTypesIT.java
index 4e02c7f298a..9e245d0f6c3 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DomainTypesIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/DomainTypesIT.java
@@ -43,7 +43,7 @@ public void before() throws SQLException {
@Test
@FixFor("DBZ-3657")
public void shouldNotChokeOnDomainTypeInArray() throws Exception {
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "domaintypes")
.build());
@@ -63,7 +63,7 @@ public void shouldNotChokeOnDomainTypeInArray() throws Exception {
@Test
@FixFor("DBZ-3657")
public void shouldExportDomainTypeInArrayAsUnknown() throws Exception {
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "domaintypes")
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/IncrementalSnapshotIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/IncrementalSnapshotIT.java
index 15a85688da6..22f957b0ce4 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/IncrementalSnapshotIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/IncrementalSnapshotIT.java
@@ -12,6 +12,7 @@
import java.io.File;
import java.sql.SQLException;
+import java.time.Duration;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -25,6 +26,8 @@
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.config.Configuration;
@@ -43,7 +46,9 @@
import io.debezium.util.Collect;
import io.debezium.util.Testing;
-public class IncrementalSnapshotIT extends AbstractIncrementalSnapshotTest {
+public class IncrementalSnapshotIT extends AbstractIncrementalSnapshotTest {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(IncrementalSnapshotIT.class);
private static final String TOPIC_NAME = "test_server.s1.a";
@@ -53,8 +58,8 @@ public class IncrementalSnapshotIT extends AbstractIncrementalSnapshotTest connectorClass() {
- return PostgresConnector.class;
+ protected Class connectorClass() {
+ return YugabyteDBConnector.class;
}
@Override
@@ -182,7 +186,9 @@ protected String signalTableName() {
@Override
protected void waitForConnectorToStart() {
super.waitForConnectorToStart();
- TestHelper.waitForDefaultReplicationSlotBeActive();
+ if (!YugabyteDBServer.isEnabled()) {
+ TestHelper.waitForDefaultReplicationSlotBeActive();
+ }
}
@Override
@@ -232,10 +238,13 @@ public void inserts4Pks() throws Exception {
populate4PkTable();
startConnector();
+ TestHelper.waitFor(Duration.ofMinutes(1));
+ LOGGER.info("Sending signal to table s1.a4");
sendAdHocSnapshotSignal("s1.a4");
Thread.sleep(5000);
+ LOGGER.info("Inserting more records into the table s1.a4");
try (JdbcConnection connection = databaseConnection()) {
connection.setAutoCommit(false);
for (int i = 0; i < ROW_COUNT; i++) {
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/LogicalDecodingMessageIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/LogicalDecodingMessageIT.java
index f5a28ac5622..587c08b8010 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/LogicalDecodingMessageIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/LogicalDecodingMessageIT.java
@@ -76,7 +76,7 @@ public void shouldNotConsumeLogicalDecodingMessagesWhenAllPrefixesAreInTheExclud
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.LOGICAL_DECODING_MESSAGE_PREFIX_EXCLUDE_LIST, ".*");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -99,7 +99,7 @@ public void shouldConsumeNonTransactionalLogicalDecodingMessages() throws Except
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig();
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -138,7 +138,7 @@ public void shouldConsumeTransactionalLogicalDecodingMessages() throws Exception
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig();
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -180,7 +180,7 @@ public void shouldApplyBinaryHandlingMode() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.BINARY_HANDLING_MODE, "base64");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -206,7 +206,7 @@ public void shouldNotConsumeLogicalDecodingMessagesWithExcludedPrefixes() throws
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.LOGICAL_DECODING_MESSAGE_PREFIX_EXCLUDE_LIST, "excluded_prefix, prefix:excluded");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -232,7 +232,7 @@ public void shouldOnlyConsumeLogicalDecodingMessagesWithIncludedPrefixes() throw
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.LOGICAL_DECODING_MESSAGE_PREFIX_INCLUDE_LIST, "included_prefix, prefix:included, ano.*er_included");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/NotificationsIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/NotificationsIT.java
index 8c6d01c4167..7c443c4f5fa 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/NotificationsIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/NotificationsIT.java
@@ -14,7 +14,7 @@
import io.debezium.config.Configuration;
import io.debezium.pipeline.notification.AbstractNotificationsIT;
-public class NotificationsIT extends AbstractNotificationsIT {
+public class NotificationsIT extends AbstractNotificationsIT {
@Before
public void before() throws SQLException {
@@ -31,8 +31,8 @@ public void after() {
}
@Override
- protected Class connectorClass() {
- return PostgresConnector.class;
+ protected Class connectorClass() {
+ return YugabyteDBConnector.class;
}
@Override
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/OutboxEventRouterIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/OutboxEventRouterIT.java
index 66458595a82..666b2aba902 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/OutboxEventRouterIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/OutboxEventRouterIT.java
@@ -19,11 +19,11 @@
import io.debezium.transforms.outbox.AbstractEventRouterTest;
/**
- * Integration test for {@link io.debezium.transforms.outbox.EventRouter} with {@link PostgresConnector}
+ * Integration test for {@link io.debezium.transforms.outbox.EventRouter} with {@link YugabyteDBConnector}
*
* @author Renato Mefi (gh@mefi.in)
*/
-public class OutboxEventRouterIT extends AbstractEventRouterTest {
+public class OutboxEventRouterIT extends AbstractEventRouterTest {
private static final String SETUP_OUTBOX_SCHEMA = "DROP SCHEMA IF EXISTS outboxsmtit CASCADE;" +
"CREATE SCHEMA outboxsmtit;";
@@ -47,8 +47,8 @@ public void beforeEach() throws Exception {
}
@Override
- protected Class getConnectorClass() {
- return PostgresConnector.class;
+ protected Class getConnectorClass() {
+ return YugabyteDBConnector.class;
}
@Override
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorConfigDefTest.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorConfigDefTest.java
index debbe9b8227..c4e9453fc19 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorConfigDefTest.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorConfigDefTest.java
@@ -15,7 +15,7 @@
public class PostgresConnectorConfigDefTest extends ConfigDefinitionMetadataTest {
public PostgresConnectorConfigDefTest() {
- super(new PostgresConnector());
+ super(new YugabyteDBConnector());
}
@Test
@@ -53,4 +53,57 @@ public void shouldSetReplicaAutoSetRegExValue() {
assertThat((problemCount == 0)).isTrue();
}
+
+ @Test
+ public void shouldValidateWithCorrectSingleHostnamePattern() {
+ validateCorrectHostname(false);
+ }
+
+ @Test
+ public void shouldValidateWithCorrectMultiHostnamePattern() {
+ validateCorrectHostname(true);
+ }
+
+ @Test
+ public void shouldFailWithInvalidCharacterInHostname() {
+ Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.HOSTNAME, "*invalidCharacter");
+
+ int problemCount = PostgresConnectorConfig.validateYBHostname(
+ configBuilder.build(), PostgresConnectorConfig.HOSTNAME, (field, value, problemMessage) -> System.out.println(problemMessage));
+
+ assertThat((problemCount == 1)).isTrue();
+ }
+
+ @Test
+ public void shouldFailIfInvalidMultiHostFormatSpecified() {
+ Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.HOSTNAME, "127.0.0.1,127.0.0.2,127.0.0.3");
+
+ int problemCount = PostgresConnectorConfig.validateYBHostname(
+ configBuilder.build(), PostgresConnectorConfig.HOSTNAME, (field, value, problemMessage) -> System.out.println(problemMessage));
+
+ assertThat((problemCount == 1)).isTrue();
+ }
+
+ @Test
+ public void shouldFailIfInvalidMultiHostFormatSpecifiedWithInvalidCharacter() {
+ Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.HOSTNAME, "127.0.0.1,127.0.0.2,127.0.0.3+");
+
+ int problemCount = PostgresConnectorConfig.validateYBHostname(
+ configBuilder.build(), PostgresConnectorConfig.HOSTNAME, (field, value, problemMessage) -> System.out.println(problemMessage));
+
+ assertThat((problemCount == 2)).isTrue();
+ }
+
+ public void validateCorrectHostname(boolean multiNode) {
+ Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.HOSTNAME, multiNode ? "127.0.0.1:5433,127.0.0.2:5433,127.0.0.3:5433" : "127.0.0.1");
+
+ int problemCount = PostgresConnectorConfig.validateYBHostname(
+ configBuilder.build(), PostgresConnectorConfig.HOSTNAME, (field, value, problemMessage) -> System.out.println(problemMessage));
+
+ assertThat((problemCount == 0)).isTrue();
+ }
}
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorIT.java
index e23a081e368..5ff44f3176b 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresConnectorIT.java
@@ -37,11 +37,16 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
+import java.util.function.BiPredicate;
+import java.util.function.Consumer;
import java.util.function.Predicate;
+import java.util.stream.Collectors;
import java.util.stream.IntStream;
import javax.management.InstanceNotFoundException;
+import io.debezium.custom.snapshotter.CustomTestSnapshot;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.kafka.common.config.Config;
import org.apache.kafka.common.config.ConfigDef;
@@ -60,9 +65,11 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
-import org.postgresql.util.PSQLState;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.yugabyte.util.PSQLState;
import io.debezium.DebeziumException;
import io.debezium.config.CommonConnectorConfig;
@@ -85,7 +92,6 @@
import io.debezium.connector.postgresql.spi.SlotState;
import io.debezium.converters.CloudEventsConverterTest;
import io.debezium.data.Envelope;
-import io.debezium.data.VerifyRecord;
import io.debezium.doc.FixFor;
import io.debezium.embedded.AbstractConnectorTest;
import io.debezium.engine.DebeziumEngine;
@@ -105,26 +111,27 @@
import io.debezium.util.Strings;
/**
- * Integration test for {@link PostgresConnector} using an {@link io.debezium.engine.DebeziumEngine}
+ * Integration test for {@link YugabyteDBConnector} using an {@link io.debezium.engine.DebeziumEngine}
*
* @author Horia Chiorean (hchiorea@redhat.com)
*/
public class PostgresConnectorIT extends AbstractConnectorTest {
+ private static final Logger LOGGER = LoggerFactory.getLogger(PostgresConnectorIT.class);
/*
* Specific tests that need to extend the initial DDL set should do it in a form of
* TestHelper.execute(SETUP_TABLES_STMT + ADDITIONAL_STATEMENTS)
*/
- private static final String INSERT_STMT = "INSERT INTO s1.a (aa) VALUES (1);" +
+ protected static final String INSERT_STMT = "INSERT INTO s1.a (aa) VALUES (1);" +
"INSERT INTO s2.a (aa) VALUES (1);";
- private static final String CREATE_TABLES_STMT = "DROP SCHEMA IF EXISTS s1 CASCADE;" +
+ protected static final String CREATE_TABLES_STMT = "DROP SCHEMA IF EXISTS s1 CASCADE;" +
"DROP SCHEMA IF EXISTS s2 CASCADE;" +
"CREATE SCHEMA s1; " +
"CREATE SCHEMA s2; " +
"CREATE TABLE s1.a (pk SERIAL, aa integer, PRIMARY KEY(pk));" +
"CREATE TABLE s2.a (pk SERIAL, aa integer, bb varchar(20), PRIMARY KEY(pk));";
- private static final String SETUP_TABLES_STMT = CREATE_TABLES_STMT + INSERT_STMT;
- private PostgresConnector connector;
+ protected static final String SETUP_TABLES_STMT = CREATE_TABLES_STMT + INSERT_STMT;
+ private YugabyteDBConnector connector;
@Rule
public final TestRule skipName = new SkipTestDependingOnDecoderPluginNameRule();
@@ -148,7 +155,7 @@ public void after() {
@Test
public void shouldValidateConnectorConfigDef() {
- connector = new PostgresConnector();
+ connector = new YugabyteDBConnector();
ConfigDef configDef = connector.config();
assertThat(configDef).isNotNull();
PostgresConnectorConfig.ALL_FIELDS.forEach(this::validateFieldDef);
@@ -161,7 +168,7 @@ public void shouldNotStartWithInvalidConfiguration() throws Exception {
// we expect the engine will log at least one error, so preface it ...
logger.info("Attempting to start the connector with an INVALID configuration, so MULTIPLE error messages & one exceptions will appear in the log");
- start(PostgresConnector.class, config, (success, msg, error) -> {
+ start(YugabyteDBConnector.class, config, (success, msg, error) -> {
assertThat(success).isFalse();
assertThat(error).isNotNull();
});
@@ -171,11 +178,12 @@ public void shouldNotStartWithInvalidConfiguration() throws Exception {
@Test
public void shouldValidateMinimalConfiguration() throws Exception {
Configuration config = TestHelper.defaultConfig().build();
- Config validateConfig = new PostgresConnector().validate(config.asMap());
+ Config validateConfig = new YugabyteDBConnector().validate(config.asMap());
validateConfig.configValues().forEach(configValue -> assertTrue("Unexpected error for: " + configValue.name(),
configValue.errorMessages().isEmpty()));
}
+ @Ignore("Requires postgis")
@Test
public void shouldNotStartWithInvalidSlotConfigAndUserRoles() throws Exception {
// Start with a clean slate and create database objects
@@ -190,7 +198,7 @@ public void shouldNotStartWithInvalidSlotConfigAndUserRoles() throws Exception {
.with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning();
Configuration failingConfig = TestHelper.defaultConfig()
@@ -200,7 +208,7 @@ public void shouldNotStartWithInvalidSlotConfigAndUserRoles() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
.build();
- List validatedConfig = new PostgresConnector().validate(failingConfig.asMap()).configValues();
+ List validatedConfig = new YugabyteDBConnector().validate(failingConfig.asMap()).configValues();
final List invalidProperties = Collections.singletonList("database.user");
validatedConfig.forEach(
@@ -215,7 +223,7 @@ public void shouldNotStartWithInvalidSlotConfigAndUserRoles() throws Exception {
public void shouldValidateConfiguration() throws Exception {
// use an empty configuration which should be invalid because of the lack of DB connection details
Configuration config = Configuration.create().build();
- PostgresConnector connector = new PostgresConnector();
+ YugabyteDBConnector connector = new YugabyteDBConnector();
Config validatedConfig = connector.validate(config.asMap());
// validate that the required fields have errors
assertConfigurationErrors(validatedConfig, PostgresConnectorConfig.HOSTNAME, 1);
@@ -224,7 +232,7 @@ public void shouldValidateConfiguration() throws Exception {
assertConfigurationErrors(validatedConfig, CommonConnectorConfig.TOPIC_PREFIX, 1);
// validate the non required fields
- validateConfigField(validatedConfig, PostgresConnectorConfig.PLUGIN_NAME, LogicalDecoder.DECODERBUFS.getValue());
+ validateConfigField(validatedConfig, PostgresConnectorConfig.PLUGIN_NAME, LogicalDecoder.YBOUTPUT.getValue());
validateConfigField(validatedConfig, PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME);
validateConfigField(validatedConfig, PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
validateConfigField(validatedConfig, PostgresConnectorConfig.PORT, PostgresConnectorConfig.DEFAULT_PORT);
@@ -253,6 +261,18 @@ public void shouldValidateConfiguration() throws Exception {
validateConfigField(validatedConfig, PostgresConnectorConfig.TCP_KEEPALIVE, true);
validateConfigField(validatedConfig, PostgresConnectorConfig.LOGICAL_DECODING_MESSAGE_PREFIX_EXCLUDE_LIST, null);
validateConfigField(validatedConfig, PostgresConnectorConfig.LOGICAL_DECODING_MESSAGE_PREFIX_INCLUDE_LIST, null);
+ validateConfigField(validatedConfig, PostgresConnectorConfig.YB_CONSISTENT_SNAPSHOT, Boolean.TRUE);
+ }
+
+ @Test
+ public void shouldThrowErrorIfDecimalHandlingModePreciseIsUsed() throws Exception {
+ Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, RelationalDatabaseConnectorConfig.DecimalHandlingMode.PRECISE);
+
+ start(YugabyteDBConnector.class, configBuilder.build(), (success, msg, error) -> {
+ assertFalse(success);
+ assertThat(error.getMessage().contains("Decimal handling mode PRECISE is unsupported, please use DOUBLE or STRING")).isTrue();
+ });
}
@Test
@@ -260,7 +280,7 @@ public void shouldValidateReplicationSlotName() throws Exception {
Configuration config = Configuration.create()
.with(PostgresConnectorConfig.SLOT_NAME, "xx-aa")
.build();
- PostgresConnector connector = new PostgresConnector();
+ YugabyteDBConnector connector = new YugabyteDBConnector();
Config validatedConfig = connector.validate(config.asMap());
assertConfigurationErrors(validatedConfig, PostgresConnectorConfig.SLOT_NAME, 1);
@@ -272,7 +292,7 @@ public void shouldSupportSSLParameters() throws Exception {
// SSL is enabled
Configuration config = TestHelper.defaultConfig().with(PostgresConnectorConfig.SSL_MODE,
PostgresConnectorConfig.SecureConnectionMode.REQUIRED).build();
- start(PostgresConnector.class, config, (success, msg, error) -> {
+ start(YugabyteDBConnector.class, config, (success, msg, error) -> {
if (TestHelper.shouldSSLConnectionFail()) {
// we expect the task to fail at startup when we're printing the server info
assertThat(success).isFalse();
@@ -298,7 +318,7 @@ public void shouldProduceEventsWithInitialSnapshot() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
// check the records from the snapshot
@@ -317,17 +337,51 @@ public void shouldProduceEventsWithInitialSnapshot() throws Exception {
// start the connector back up and check that a new snapshot has not been performed (we're running initial only mode)
// but the 2 records that we were inserted while we were down will be retrieved
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
assertRecordsAfterInsert(2, 3, 3);
}
+ @Test
+ public void initialSnapshotWithExistingSlot() throws Exception {
+ TestHelper.execute(SETUP_TABLES_STMT);
+ Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+
+ waitForStreamingRunning();
+
+ // now stop the connector
+ stopConnector();
+ assertNoRecordsToConsume();
+
+ // insert some more records
+ TestHelper.execute(INSERT_STMT);
+
+ // check the records from the snapshot
+ // start the connector back up and perform snapshot with an existing slot
+ // but the 2 records that were inserted while we were down will NOT be retrieved
+ // as part of the snapshot. These records will be retrieved as part of streaming
+ Configuration.Builder configBuilderInitial = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE);
+
+ start(YugabyteDBConnector.class, configBuilderInitial.build());
+ assertConnectorIsRunning();
+
+ assertRecordsFromSnapshot(2, 1, 1);
+ assertRecordsAfterInsert(2, 2, 2);
+ }
+
@Test
@FixFor("DBZ-1235")
public void shouldUseMillisecondsForTransactionCommitTime() throws InterruptedException {
TestHelper.execute(SETUP_TABLES_STMT);
- start(PostgresConnector.class, TestHelper.defaultConfig().build());
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig().build());
assertConnectorIsRunning();
// check records from snapshot
@@ -361,7 +415,7 @@ public void shouldConsumeMessagesFromSnapshot() throws Exception {
.with(PostgresConnectorConfig.MAX_QUEUE_SIZE, recordCount / 2)
.with(PostgresConnectorConfig.MAX_BATCH_SIZE, 10)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -382,7 +436,7 @@ public void shouldConsumeMessagesFromSnapshotOld() throws Exception {
.with(PostgresConnectorConfig.MAX_QUEUE_SIZE, recordCount / 2)
.with(PostgresConnectorConfig.MAX_BATCH_SIZE, 10)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -413,7 +467,7 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
"CREATE TABLE changepk.test_table (pk SERIAL, text TEXT, PRIMARY KEY(pk));",
"INSERT INTO changepk.test_table(text) VALUES ('insert');");
- start(PostgresConnector.class, config.getConfig());
+ start(YugabyteDBConnector.class, config.getConfig());
assertConnectorIsRunning();
@@ -429,7 +483,7 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
SourceRecord insertRecord = records.recordsForTopic(topicName).get(0);
assertEquals(topicName, insertRecord.topic());
- VerifyRecord.isValidInsert(insertRecord, "newpk", 2);
+ YBVerifyRecord.isValidInsert(insertRecord, "newpk", 2);
TestHelper.execute(
"ALTER TABLE changepk.test_table ADD COLUMN pk2 SERIAL;"
@@ -440,8 +494,8 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
insertRecord = records.recordsForTopic(topicName).get(0);
assertEquals(topicName, insertRecord.topic());
- VerifyRecord.isValidInsert(insertRecord, newPkField, 3);
- VerifyRecord.isValidInsert(insertRecord, "pk2", 8);
+ YBVerifyRecord.isValidInsert(insertRecord, newPkField, 3);
+ YBVerifyRecord.isValidInsert(insertRecord, "pk2", 8);
stopConnector();
@@ -454,13 +508,13 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
+ "ALTER TABLE changepk.test_table ADD PRIMARY KEY(newpk,pk3);"
+ "INSERT INTO changepk.test_table VALUES(5, 'dropandaddpkcol',10)");
- start(PostgresConnector.class, config.getConfig());
+ start(YugabyteDBConnector.class, config.getConfig());
records = consumeRecordsByTopic(2);
insertRecord = records.recordsForTopic(topicName).get(0);
assertEquals(topicName, insertRecord.topic());
- VerifyRecord.isValidInsert(insertRecord, newPkField, 4);
+ YBVerifyRecord.isValidInsert(insertRecord, newPkField, 4);
Struct key = (Struct) insertRecord.key();
// The problematic record PK info is temporarily desynced
assertThat(key.schema().field("pk2")).isNull();
@@ -468,8 +522,8 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
insertRecord = records.recordsForTopic(topicName).get(1);
assertEquals(topicName, insertRecord.topic());
- VerifyRecord.isValidInsert(insertRecord, newPkField, 5);
- VerifyRecord.isValidInsert(insertRecord, "pk3", 10);
+ YBVerifyRecord.isValidInsert(insertRecord, newPkField, 5);
+ YBVerifyRecord.isValidInsert(insertRecord, "pk3", 10);
key = (Struct) insertRecord.key();
assertThat(key.schema().field("pk2")).isNull();
@@ -489,11 +543,22 @@ public void shouldReceiveChangesForChangePKColumnDefinition() throws Exception {
}
}
+ @Ignore("Will require a complete test refactor")
@Test
public void shouldReceiveChangesForChangeColumnDefault() throws Exception {
// Testing.Print.enable();
final String slotName = "default_change" + new Random().nextInt(100);
TestHelper.create().dropReplicationSlot(slotName);
+
+ // YB Note: Creating a table before deploying the connector since dynamic table addition is
+ // not supported yet.
+ TestHelper.execute(
+ "CREATE SCHEMA IF NOT EXISTS default_change;",
+ "DROP TABLE IF EXISTS default_change.test_table;",
+ "CREATE TABLE default_change.test_table (pk SERIAL, i INT DEFAULT 1, text TEXT DEFAULT 'foo', PRIMARY KEY(pk));");
+
+ TestHelper.execute("INSERT INTO default_change.test_table(i, text) VALUES (DEFAULT, DEFAULT);");
+
try {
final PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, Boolean.FALSE)
@@ -504,13 +569,7 @@ public void shouldReceiveChangesForChangeColumnDefault() throws Exception {
final String topicName = topicName("default_change.test_table");
- TestHelper.execute(
- "CREATE SCHEMA IF NOT EXISTS default_change;",
- "DROP TABLE IF EXISTS default_change.test_table;",
- "CREATE TABLE default_change.test_table (pk SERIAL, i INT DEFAULT 1, text TEXT DEFAULT 'foo', PRIMARY KEY(pk));",
- "INSERT INTO default_change.test_table(i, text) VALUES (DEFAULT, DEFAULT);");
-
- start(PostgresConnector.class, config.getConfig());
+ start(YugabyteDBConnector.class, config.getConfig());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -522,9 +581,9 @@ public void shouldReceiveChangesForChangeColumnDefault() throws Exception {
final Integer snapshotIntDefault = 1;
final String snapshotTextDefault = "foo";
snapshotRecords.recordsForTopic(topicName).forEach(snapshotRecord -> {
- assertValueField(snapshotRecord, "after/pk", 1);
- assertValueField(snapshotRecord, "after/i", snapshotIntDefault);
- assertValueField(snapshotRecord, "after/text", snapshotTextDefault);
+ assertValueField(snapshotRecord, "after/pk/value", 1);
+ assertValueField(snapshotRecord, "after/i/value", snapshotIntDefault);
+ assertValueField(snapshotRecord, "after/text/value", snapshotTextDefault);
assertThat(readRecordFieldDefault(snapshotRecord, "pk")).isEqualTo(pkExpectedDefault);
assertThat(readRecordFieldDefault(snapshotRecord, "i")).isEqualTo(snapshotIntDefault);
@@ -592,7 +651,7 @@ public void shouldReceiveChangesForChangeColumnDefault() throws Exception {
TestHelper.execute("INSERT INTO default_change.test_table(i, text) VALUES (DEFAULT, DEFAULT);");
- start(PostgresConnector.class, config.getConfig());
+ start(YugabyteDBConnector.class, config.getConfig());
assertConnectorIsRunning();
@@ -645,6 +704,7 @@ public void shouldReceiveChangesForChangeColumnDefault() throws Exception {
}
}
+ @Ignore("Complete test refactor required")
@Test
public void showThatSchemaColumnDefaultMayApplyRetroactively() throws Exception {
// Testing.Print.enable();
@@ -668,7 +728,7 @@ public void showThatSchemaColumnDefaultMayApplyRetroactively() throws Exception
"CREATE TABLE default_change.test_table (pk SERIAL, i INT DEFAULT 1, text TEXT DEFAULT 'foo', PRIMARY KEY(pk));",
"INSERT INTO default_change.test_table(i, text) VALUES (DEFAULT, DEFAULT);");
- start(PostgresConnector.class, config.getConfig());
+ start(YugabyteDBConnector.class, config.getConfig());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -700,7 +760,7 @@ public void showThatSchemaColumnDefaultMayApplyRetroactively() throws Exception
"ALTER TABLE default_change.test_table ADD COLUMN tstz TIMESTAMPTZ DEFAULT '2021-03-20 14:44:28 +1'::TIMESTAMPTZ;",
"INSERT INTO default_change.test_table(i, text, bi, tstz) VALUES (DEFAULT, DEFAULT, DEFAULT, DEFAULT);");
- start(PostgresConnector.class, config.getConfig());
+ start(YugabyteDBConnector.class, config.getConfig());
assertConnectorIsRunning();
@@ -772,7 +832,7 @@ public void shouldIgnoreEventsForDeletedTable() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -792,7 +852,7 @@ public void shouldIgnoreEventsForDeletedTable() throws Exception {
TestHelper.execute(INSERT_STMT);
TestHelper.execute("DROP TABLE s1.a");
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -801,6 +861,7 @@ public void shouldIgnoreEventsForDeletedTable() throws Exception {
assertThat(actualRecords.recordsForTopic(topicName("s2.a"))).hasSize(1);
}
+ @Ignore("YB Note: This behaviour is not yet implemented, see https://github.com/yugabyte/yugabyte-db/issues/21573")
@Test
@FixFor("DBZ-1021")
@SkipWhenDecoderPluginNameIsNot(value = SkipWhenDecoderPluginNameIsNot.DecoderPluginName.PGOUTPUT, reason = "Pgoutput will generate insert statements even for dropped tables, column optionality will default to true however")
@@ -809,7 +870,7 @@ public void shouldNotIgnoreEventsForDeletedTable() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -829,7 +890,7 @@ public void shouldNotIgnoreEventsForDeletedTable() throws Exception {
TestHelper.execute(INSERT_STMT);
TestHelper.execute("DROP TABLE s1.a");
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -847,7 +908,7 @@ public void shouldIgnoreViews() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -868,7 +929,7 @@ public void shouldIgnoreViews() throws Exception {
// start the connector back up and check that a new snapshot has not been performed (we're running initial only mode)
// but the 2 records that we were inserted while we were down will be retrieved
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -884,7 +945,7 @@ public void shouldLimitDecoderLog() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -905,7 +966,7 @@ public void shouldLimitDecoderLog() throws Exception {
// start the connector back up and check that a new snapshot has not been performed (we're running initial only mode)
// but the 2 records that we were inserted while we were down will be retrieved
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -915,6 +976,7 @@ public void shouldLimitDecoderLog() throws Exception {
1, interceptor.countOccurrences("identified as already processed"));
}
+ @Ignore("We are receiving records out of a certain order, can't control")
@Test
@FixFor("DBZ-693")
public void shouldExecuteOnConnectStatements() throws Exception {
@@ -923,11 +985,15 @@ public void shouldExecuteOnConnectStatements() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.ON_CONNECT_STATEMENTS, "INSERT INTO s1.a (aa) VALUES (2); INSERT INTO s2.a (aa, bb) VALUES (2, 'hello;; world');")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForStreamingRunning();
SourceRecords actualRecords = consumeRecordsByTopic(6);
+
+ // YB Note: This test is failing because the records are coming out of order - we are indeed
+ // receiving all the records but because of the jumbled nature of the records, the second
+ // assertKey assertion fails.
assertKey(actualRecords.allRecordsInOrder().get(0), "pk", 1);
assertKey(actualRecords.allRecordsInOrder().get(1), "pk", 2);
@@ -946,11 +1012,13 @@ public void shouldProduceEventsWhenSnapshotsAreNeverAllowed() throws Interrupted
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
- TestHelper.waitForDefaultReplicationSlotBeActive();
- waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(15));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
// there shouldn't be any snapshot records
assertNoRecordsToConsume();
@@ -967,7 +1035,7 @@ public void shouldNotProduceEventsWithInitialOnlySnapshot() throws InterruptedEx
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
// check the records from the snapshot
@@ -980,13 +1048,14 @@ public void shouldNotProduceEventsWithInitialOnlySnapshot() throws InterruptedEx
assertNoRecordsToConsume();
}
+ @Ignore("Snapshot mode ALWAYS is unsupported")
@Test
public void shouldProduceEventsWhenAlwaysTakingSnapshots() throws InterruptedException {
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -1001,13 +1070,52 @@ public void shouldProduceEventsWhenAlwaysTakingSnapshots() throws InterruptedExc
assertNoRecordsToConsume();
// start the connector back up and check that a new snapshot has been performed
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
assertRecordsFromSnapshot(4, 1, 2, 1, 2);
}
+ @Test
+ public void shouldHaveBeforeImageOfUpdatedRow() throws InterruptedException {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(SETUP_TABLES_STMT);
+ TestHelper.execute("ALTER TABLE s1.a REPLICA IDENTITY FULL;");
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(15));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute(INSERT_STMT);
+ TestHelper.execute("UPDATE s1.a SET aa = 404 WHERE pk = 2;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(3);
+ List records = actualRecords.recordsForTopic(topicName("s1.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord updateRecord = records.get(1);
+
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2);
+ YBVerifyRecord.isValidUpdate(updateRecord, PK_FIELD, 2);
+
+ Struct updateRecordValue = (Struct) updateRecord.value();
+ assertThat(updateRecordValue.get(Envelope.FieldName.AFTER)).isNotNull();
+ assertThat(updateRecordValue.get(Envelope.FieldName.BEFORE)).isNotNull();
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("aa").getInt32("value")).isEqualTo(1);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("aa").getInt32("value")).isEqualTo(404);
+ }
+
@Test
public void shouldResumeSnapshotIfFailingMidstream() throws Exception {
// insert another set of rows so we can stop at certain point
@@ -1025,9 +1133,10 @@ public void shouldResumeSnapshotIfFailingMidstream() throws Exception {
fail("A controlled exception was expected....");
}
};
- start(PostgresConnector.class, configBuilder.build(), completionCallback, stopOnPKPredicate(2));
+ start(YugabyteDBConnector.class, configBuilder.build(), completionCallback, stopOnPKPredicate(2));
+ // YB Note: Increasing the wait time since the connector is taking slightly higher time to initialize.
// wait until we know we've raised the exception at startup AND the engine has been shutdown
- if (!latch.await(TestHelper.waitTimeForRecords() * 5, TimeUnit.SECONDS)) {
+ if (!latch.await(TestHelper.waitTimeForRecords() * 15, TimeUnit.SECONDS)) {
fail("did not reach stop condition in time");
}
// wait until we know we've raised the exception at startup AND the engine has been shutdown
@@ -1040,7 +1149,7 @@ public void shouldResumeSnapshotIfFailingMidstream() throws Exception {
// make sure there are no records to consume
assertNoRecordsToConsume();
// start the connector back up and check that it took another full snapshot since previously it was stopped midstream
- start(PostgresConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
+ start(YugabyteDBConnector.class, configBuilder.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
assertConnectorIsRunning();
// check that the snapshot was recreated
@@ -1064,7 +1173,7 @@ public void shouldRecoverFromRetriableException() throws Exception {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
@@ -1077,6 +1186,8 @@ public void shouldRecoverFromRetriableException() throws Exception {
// kill all opened connections to the database
TestHelper.execute("SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE backend_type='walsender'");
TestHelper.execute(INSERT_STMT);
+ // TODO Vaibhav: Revisit this later to see if this wait can be removed or reduced.
+ TestHelper.waitFor(Duration.ofSeconds(10));
assertRecordsAfterInsert(2, 3, 3);
}
@@ -1094,7 +1205,7 @@ public void shouldUpdateReplicaIdentity() throws Exception {
.with(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES, "s1.a:FULL,s2.a:DEFAULT")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1108,7 +1219,11 @@ public void shouldUpdateReplicaIdentity() throws Exception {
assertEquals(ReplicaIdentityInfo.ReplicaIdentity.FULL, connection.readReplicaIdentityInfo(tableIds1).getReplicaIdentity());
assertEquals(ReplicaIdentityInfo.ReplicaIdentity.DEFAULT, connection.readReplicaIdentityInfo(tableIds2).getReplicaIdentity());
assertThat(logInterceptor.containsMessage(String.format("Replica identity set to FULL for table '%s'", tableIds1))).isTrue();
- assertThat(logInterceptor.containsMessage(String.format("Replica identity for table '%s' is already DEFAULT", tableIds2))).isTrue();
+
+ // YB Note: Fails because we do not get this message when replica identity is already set.
+ // assertThat(logInterceptor.containsMessage(String.format("Replica identity for table '%s' is already DEFAULT", tableIds2))).isTrue();
+ // YB Note: Adding an alternate log message.
+ assertThat(logInterceptor.containsMessage(String.format("Replica identity set to DEFAULT for table '%s'", tableIds2))).isTrue();
}
}
@@ -1123,7 +1238,7 @@ public void shouldUpdateReplicaIdentityWithRegExp() throws Exception {
.with(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES, "(.*).a:FULL,s2.*:NOTHING")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1157,7 +1272,7 @@ public void shouldNotUpdateReplicaIdentityWithRegExpDuplicated() throws Exceptio
.with(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES, "s.*:FULL,s2.*:NOTHING")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1185,7 +1300,7 @@ public void shouldUpdateReplicaIdentityWithOneTable() throws Exception {
.with(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES, "s1.a:FULL")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1200,6 +1315,7 @@ public void shouldUpdateReplicaIdentityWithOneTable() throws Exception {
}
}
+ @Ignore("YB Note: alter replica identity INDEX is unsupported")
@Test
public void shouldUpdateReplicaIdentityUsingIndex() throws Exception {
@@ -1214,7 +1330,7 @@ public void shouldUpdateReplicaIdentityUsingIndex() throws Exception {
.with(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES, "s1.a:FULL,s2.a:INDEX a_pkey")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1254,7 +1370,7 @@ public void shouldLogOwnershipErrorForReplicaIdentityUpdate() throws Exception {
.with("database.password", "role_2_pass")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1279,7 +1395,7 @@ public void shouldCheckTablesToUpdateReplicaIdentityAreCaptured() throws Excepti
.with(PostgresConnectorConfig.REPLICA_IDENTITY_AUTOSET_VALUES, "s1.a:FULL,s2.b:DEFAULT")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1287,6 +1403,9 @@ public void shouldCheckTablesToUpdateReplicaIdentityAreCaptured() throws Excepti
// Waiting for Replica Identity is updated
waitForAvailableRecords(5, TimeUnit.SECONDS);
+ // YB Note: The following block only checks if a certain log message has appeared or not.
+ // In our case, we can alter the replica identity but the actual replica identity for a table
+ // will remain what is set at the time of replication slot creation.
try (PostgresConnection connection = TestHelper.create()) {
TableId tableIds1 = new TableId("", "s1", "a");
assertEquals(ReplicaIdentityInfo.ReplicaIdentity.FULL.toString(), connection.readReplicaIdentityInfo(tableIds1).toString());
@@ -1308,7 +1427,17 @@ public void shouldTakeExcludeListFiltersIntoAccount() throws Exception {
"INSERT INTO s1.a (aa, bb) VALUES (3, 3);" +
"INSERT INTO s1.b (aa, bb) VALUES (4, 4);" +
"INSERT INTO s2.a (aa) VALUES (5);";
- TestHelper.execute(setupStmt);
+
+ // YB Note: Separating the ALTER commands as they were causing transaction abortion in YB
+ // if run collectively, the error being:
+ // java.lang.RuntimeException: com.yugabyte.util.PSQLException: ERROR: Unknown transaction, could be recently aborted: 3273ed66-13c6-4d73-8c6e-014389e5081e
+ TestHelper.execute(SETUP_TABLES_STMT);
+ TestHelper.execute("CREATE TABLE s1.b (pk SERIAL, aa integer, bb integer, PRIMARY KEY(pk));");
+ TestHelper.execute("ALTER TABLE s1.a ADD COLUMN bb integer;");
+ TestHelper.execute("INSERT INTO s1.a (aa, bb) VALUES (2, 2); "
+ + "INSERT INTO s1.a (aa, bb) VALUES (3, 3); "
+ + "INSERT INTO s1.b (aa, bb) VALUES (4, 4); "
+ + "INSERT INTO s2.a (aa) VALUES (5);");
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
@@ -1316,7 +1445,7 @@ public void shouldTakeExcludeListFiltersIntoAccount() throws Exception {
.with(PostgresConnectorConfig.TABLE_EXCLUDE_LIST, ".+b")
.with(PostgresConnectorConfig.COLUMN_EXCLUDE_LIST, ".+bb");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
// check the records from the snapshot take the filters into account
@@ -1328,7 +1457,7 @@ public void shouldTakeExcludeListFiltersIntoAccount() throws Exception {
assertThat(recordsForS1a.size()).isEqualTo(3);
AtomicInteger pkValue = new AtomicInteger(1);
recordsForS1a.forEach(record -> {
- VerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
+ YBVerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
assertFieldAbsent(record, "bb");
});
@@ -1341,14 +1470,21 @@ public void shouldTakeExcludeListFiltersIntoAccount() throws Exception {
@Test
public void shouldTakeBlacklistFiltersIntoAccount() throws Exception {
+ // YB Note: Separating the ALTER commands as they were causing transaction abortion in YB
+ // if run collectively, the error being:
+ // java.lang.RuntimeException: com.yugabyte.util.PSQLException: ERROR: Unknown transaction, could be recently aborted: 3273ed66-13c6-4d73-8c6e-014389e5081e
String setupStmt = SETUP_TABLES_STMT +
- "CREATE TABLE s1.b (pk SERIAL, aa integer, bb integer, PRIMARY KEY(pk));" +
- "ALTER TABLE s1.a ADD COLUMN bb integer;" +
- "INSERT INTO s1.a (aa, bb) VALUES (2, 2);" +
+ "CREATE TABLE s1.b (pk SERIAL, aa integer, bb integer, PRIMARY KEY(pk));";
+
+ TestHelper.execute(setupStmt);
+
+ TestHelper.execute("ALTER TABLE s1.a ADD COLUMN bb integer;");
+
+ String initInsertStmt = "INSERT INTO s1.a (aa, bb) VALUES (2, 2);" +
"INSERT INTO s1.a (aa, bb) VALUES (3, 3);" +
"INSERT INTO s1.b (aa, bb) VALUES (4, 4);" +
"INSERT INTO s2.a (aa) VALUES (5);";
- TestHelper.execute(setupStmt);
+ TestHelper.execute(initInsertStmt);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
@@ -1356,7 +1492,7 @@ public void shouldTakeBlacklistFiltersIntoAccount() throws Exception {
.with(PostgresConnectorConfig.TABLE_EXCLUDE_LIST, ".+b")
.with(PostgresConnectorConfig.COLUMN_EXCLUDE_LIST, ".+bb");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
// check the records from the snapshot take the filters into account
@@ -1368,7 +1504,7 @@ public void shouldTakeBlacklistFiltersIntoAccount() throws Exception {
assertThat(recordsForS1a.size()).isEqualTo(3);
AtomicInteger pkValue = new AtomicInteger(1);
recordsForS1a.forEach(record -> {
- VerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
+ YBVerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
assertFieldAbsent(record, "bb");
});
@@ -1387,13 +1523,19 @@ public void shouldTakeColumnIncludeListFilterIntoAccount() throws Exception {
"ALTER TABLE s1.a ADD COLUMN cc char(12);" +
"INSERT INTO s1.a (aa, bb) VALUES (2, 2);";
- TestHelper.execute(setupStmt);
+ // YB Note: Separating the ALTER commands as they were causing transaction abortion in YB
+ // if run collectively, the error being:
+ // java.lang.RuntimeException: com.yugabyte.util.PSQLException: ERROR: Unknown transaction, could be recently aborted: 3273ed66-13c6-4d73-8c6e-014389e5081e
+ TestHelper.execute(SETUP_TABLES_STMT);
+ TestHelper.execute("ALTER TABLE s1.a ADD COLUMN bb integer;");
+ TestHelper.execute("ALTER TABLE s1.a ADD COLUMN cc char(12);");
+ TestHelper.execute("INSERT INTO s1.a (aa, bb) VALUES (2, 2);");
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with("column.mask.with.5.chars", ".+cc")
.with(PostgresConnectorConfig.COLUMN_INCLUDE_LIST, ".+aa,.+cc");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(1);
@@ -1402,7 +1544,7 @@ public void shouldTakeColumnIncludeListFilterIntoAccount() throws Exception {
assertFieldAbsent(record, "bb");
Struct recordValue = ((Struct) record.value());
- assertThat(recordValue.getStruct("after").getString("cc")).isEqualTo("*****");
+ assertThat(recordValue.getStruct("after").getStruct("cc").getString("value")).isEqualTo("*****");
});
}
@@ -1422,7 +1564,7 @@ public void shouldRemoveWhiteSpaceChars() throws Exception {
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, tableWhitelistWithWhitespace);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(2);
@@ -1431,7 +1573,7 @@ public void shouldRemoveWhiteSpaceChars() throws Exception {
assertThat(records.size()).isEqualTo(1);
SourceRecord record = records.get(0);
- VerifyRecord.isValidRead(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(record, PK_FIELD, 1);
String sourceTable = ((Struct) record.value()).getStruct("source").getString("table");
assertThat(sourceTable).isEqualTo("b");
@@ -1452,7 +1594,7 @@ public void shouldRemoveWhiteSpaceCharsOld() throws Exception {
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, tableWhitelistWithWhitespace);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(2);
@@ -1461,7 +1603,7 @@ public void shouldRemoveWhiteSpaceCharsOld() throws Exception {
assertThat(records.size()).isEqualTo(1);
SourceRecord record = records.get(0);
- VerifyRecord.isValidRead(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(record, PK_FIELD, 1);
String sourceTable = ((Struct) record.value()).getStruct("source").getString("table");
assertThat(sourceTable).isEqualTo("b");
@@ -1480,10 +1622,12 @@ public void shouldCloseTxAfterTypeQuery() throws Exception {
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.b")
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
+ // YB Note: this test will fail since it creates a table after connector has started
+ // and dynamic table addition is not supported in YB yet.
TestHelper.execute("CREATE TABLE s1.b (pk SERIAL, aa isbn, PRIMARY KEY(pk));", "INSERT INTO s1.b (aa) VALUES ('978-0-393-04002-9')");
SourceRecords actualRecords = consumeRecordsByTopic(1);
@@ -1491,8 +1635,8 @@ public void shouldCloseTxAfterTypeQuery() throws Exception {
assertThat(records.size()).isEqualTo(1);
SourceRecord record = records.get(0);
- VerifyRecord.isValidInsert(record, PK_FIELD, 1);
- final String isbn = new String(((Struct) record.value()).getStruct("after").getString("aa"));
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, 1);
+ final String isbn = new String(((Struct) record.value()).getStruct("after").getStruct("aa").getString("value"));
assertThat(isbn).isEqualTo("0-393-04002-X");
TestHelper.assertNoOpenTransactions();
@@ -1512,7 +1656,7 @@ public void shouldReplaceInvalidTopicNameCharacters() throws Exception {
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1")
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1\\.dbz_878_some\\|test@data");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(1);
@@ -1521,7 +1665,7 @@ public void shouldReplaceInvalidTopicNameCharacters() throws Exception {
assertThat(records.size()).isEqualTo(1);
SourceRecord record = records.get(0);
- VerifyRecord.isValidRead(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(record, PK_FIELD, 1);
String sourceTable = ((Struct) record.value()).getStruct("source").getString("table");
assertThat(sourceTable).isEqualTo("dbz_878_some|test@data");
@@ -1539,7 +1683,7 @@ public void shouldNotSendEmptyOffset() throws InterruptedException, SQLException
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.with(Heartbeat.HEARTBEAT_INTERVAL, 10)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
// Generate empty logical decoding message
@@ -1560,9 +1704,13 @@ public void shouldRegularlyFlushLsn() throws InterruptedException, SQLException
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
+
+ // YB Note: Waiting for 15 seconds for streaming to initialise properly.
+ TestHelper.waitFor(Duration.ofSeconds(15));
+
// there shouldn't be any snapshot records
assertNoRecordsToConsume();
@@ -1601,7 +1749,7 @@ public void shouldRegularlyFlushLsnWithTxMonitoring() throws InterruptedExceptio
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.with(PostgresConnectorConfig.PROVIDE_TRANSACTION_METADATA, true)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
// there shouldn't be any snapshot records
@@ -1616,7 +1764,7 @@ public void shouldRegularlyFlushLsnWithTxMonitoring() throws InterruptedExceptio
stopConnector();
assertConnectorNotRunning();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
// there shouldn't be any snapshot records, only potentially transaction messages
@@ -1647,6 +1795,7 @@ public void shouldRegularlyFlushLsnWithTxMonitoring() throws InterruptedExceptio
assertThat(flushLsn.size()).isGreaterThanOrEqualTo((recordCount * 3) / 4);
}
+ @Ignore("Snapshot mode ALWAYS is unsupported")
@Test
@FixFor("DBZ-2456")
public void shouldAllowForSelectiveSnapshot() throws InterruptedException {
@@ -1656,7 +1805,7 @@ public void shouldAllowForSelectiveSnapshot() throws InterruptedException {
.with(CommonConnectorConfig.SNAPSHOT_MODE_TABLES, "s1.a")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
/* Snapshot must be taken only for the listed tables */
@@ -1666,7 +1815,7 @@ public void shouldAllowForSelectiveSnapshot() throws InterruptedException {
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs).isNull();
- VerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
/* streaming should work normally */
TestHelper.execute(INSERT_STMT);
@@ -1677,13 +1826,13 @@ public void shouldAllowForSelectiveSnapshot() throws InterruptedException {
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
stopConnector();
/* start the connector back up and make sure snapshot is being taken */
- start(PostgresConnector.class, configBuilder
+ start(YugabyteDBConnector.class, configBuilder
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SNAPSHOT_MODE_TABLES, "s2.a")
.build());
@@ -1695,8 +1844,8 @@ public void shouldAllowForSelectiveSnapshot() throws InterruptedException {
assertThat(s2recs.size()).isEqualTo(2);
assertThat(s1recs).isNull();
- VerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(1), PK_FIELD, 2);
+ YBVerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(1), PK_FIELD, 2);
}
@Test
@@ -1712,7 +1861,7 @@ public void shouldAllowForExportedSnapshot() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
// Consume records from the snapshot
@@ -1723,8 +1872,8 @@ public void shouldAllowForExportedSnapshot() throws Exception {
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
// Insert 2 more rows
// These are captured by the stream
@@ -1738,15 +1887,15 @@ public void shouldAllowForExportedSnapshot() throws Exception {
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
stopConnector();
config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
TestHelper.execute(INSERT_STMT);
@@ -1757,8 +1906,8 @@ public void shouldAllowForExportedSnapshot() throws Exception {
s2recs = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 3);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 3);
+ YBVerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 3);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 3);
}
@Test
@@ -1782,7 +1931,7 @@ public void exportedSnapshotShouldNotSkipRecordOfParallelTx() throws Exception {
pgConnection.setAutoCommit(false);
pgConnection.executeWithoutCommitting(INSERT_STMT);
final AtomicBoolean inserted = new AtomicBoolean();
- start(PostgresConnector.class, config, loggingCompletion(), x -> false, x -> {
+ start(YugabyteDBConnector.class, config, loggingCompletion(), x -> false, x -> {
if (!inserted.get()) {
TestHelper.execute(INSERT_STMT);
try {
@@ -1814,13 +1963,13 @@ public void exportedSnapshotShouldNotSkipRecordOfParallelTx() throws Exception {
@SkipWhenDecoderPluginNameIsNot(value = SkipWhenDecoderPluginNameIsNot.DecoderPluginName.PGOUTPUT, reason = "Publication not supported")
public void exportedSnapshotShouldNotSkipRecordOfParallelTxPgoutput() throws Exception {
TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(SETUP_TABLES_STMT);
+ TestHelper.execute(INSERT_STMT);
+
TestHelper.createDefaultReplicationSlot();
TestHelper.execute("CREATE PUBLICATION dbz_publication FOR ALL TABLES;");
// Testing.Print.enable();
- TestHelper.execute(SETUP_TABLES_STMT);
- TestHelper.execute(INSERT_STMT);
-
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
@@ -1831,7 +1980,7 @@ public void exportedSnapshotShouldNotSkipRecordOfParallelTxPgoutput() throws Exc
pgConnection.setAutoCommit(false);
pgConnection.executeWithoutCommitting(INSERT_STMT);
final AtomicBoolean inserted = new AtomicBoolean();
- start(PostgresConnector.class, config, loggingCompletion(), x -> false, x -> {
+ start(YugabyteDBConnector.class, config, loggingCompletion(), x -> false, x -> {
if (!inserted.get()) {
TestHelper.execute(INSERT_STMT);
try {
@@ -1871,7 +2020,7 @@ public void shouldPerformSnapshotOnceForInitialOnlySnapshotMode() throws Excepti
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
// Lets wait for snapshot to finish before proceeding
@@ -1889,8 +2038,8 @@ public void shouldPerformSnapshotOnceForInitialOnlySnapshotMode() throws Excepti
List s2recs = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
// Stop the connector
stopConnector();
@@ -1902,7 +2051,7 @@ public void shouldPerformSnapshotOnceForInitialOnlySnapshotMode() throws Excepti
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForConnectorShutdown("postgres", TestHelper.TEST_SERVER);
@@ -1912,6 +2061,55 @@ public void shouldPerformSnapshotOnceForInitialOnlySnapshotMode() throws Excepti
.isTrue();
}
+ @Test
+ public void snapshotInitialOnlyFollowedByNever() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+
+ TestHelper.execute(SETUP_TABLES_STMT);
+ // Start connector in NEVER mode to get the slot and publication created
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+ // now stop the connector
+ stopConnector();
+ assertNoRecordsToConsume();
+
+ // These INSERT events should not be part of snapshot
+ TestHelper.execute(INSERT_STMT);
+
+ // Now start the connector in INITIAL_ONLY mode
+ config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // Lets wait for snapshot to finish before proceeding
+ waitForSnapshotToBeCompleted("postgres", "test_server");
+ waitForAvailableRecords(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
+ assertRecordsFromSnapshot(2, 1, 1);
+
+ // Stop the connector
+ stopConnector();
+ assertConnectorNotRunning();
+
+ // Restart the connector again with NEVER mode
+ // The streaming should continue from where the INITIAL_ONLY connector had finished the snapshot
+ config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ assertRecordsAfterInsert(2, 2, 2);
+ }
+
+ @Ignore("YB: Custom snapshotter not supported")
@Test
@FixFor("DBZ-2094")
public void shouldResumeStreamingFromSlotPositionForCustomSnapshot() throws Exception {
@@ -1922,7 +2120,7 @@ public void shouldResumeStreamingFromSlotPositionForCustomSnapshot() throws Exce
.with(PostgresConnectorConfig.SNAPSHOT_MODE_CUSTOM_NAME, CustomStartFromStreamingTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1931,8 +2129,8 @@ public void shouldResumeStreamingFromSlotPositionForCustomSnapshot() throws Exce
List s2recs = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
stopConnector();
@@ -1945,7 +2143,7 @@ public void shouldResumeStreamingFromSlotPositionForCustomSnapshot() throws Exce
.with(PostgresConnectorConfig.SNAPSHOT_MODE_CUSTOM_NAME, CustomStartFromStreamingTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -1959,18 +2157,19 @@ public void shouldResumeStreamingFromSlotPositionForCustomSnapshot() throws Exce
assertThat(s2recs.size()).isEqualTo(3);
// Validate the first record is from streaming
- VerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
// Validate the rest of the records are from the snapshot
- VerifyRecord.isValidRead(s1recs.get(1), PK_FIELD, 1);
- VerifyRecord.isValidRead(s1recs.get(2), PK_FIELD, 2);
- VerifyRecord.isValidRead(s2recs.get(1), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(2), PK_FIELD, 2);
+ YBVerifyRecord.isValidRead(s1recs.get(1), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(2), PK_FIELD, 2);
+ YBVerifyRecord.isValidRead(s2recs.get(1), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(2), PK_FIELD, 2);
TestHelper.assertNoOpenTransactions();
}
+ @Ignore("YB: Custom snapshotter not supported")
@Test
@FixFor("DBZ-2094")
public void customSnapshotterSkipsTablesOnRestart() throws Exception {
@@ -1982,7 +2181,7 @@ public void customSnapshotterSkipsTablesOnRestart() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -1991,8 +2190,8 @@ public void customSnapshotterSkipsTablesOnRestart() throws Exception {
List s2recs = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
stopConnector();
@@ -2007,7 +2206,7 @@ public void customSnapshotterSkipsTablesOnRestart() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_QUERY_MODE_CUSTOM_NAME, CustomPartialTableTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2023,12 +2222,12 @@ public void customSnapshotterSkipsTablesOnRestart() throws Exception {
assertThat(s2recs.size()).isEqualTo(1);
// streaming records
- VerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
// snapshot records
- VerifyRecord.isValidRead(s1recs.get(1), PK_FIELD, 1);
- VerifyRecord.isValidRead(s1recs.get(2), PK_FIELD, 2);
+ YBVerifyRecord.isValidRead(s1recs.get(1), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(2), PK_FIELD, 2);
assertNoRecordsToConsume();
@@ -2037,6 +2236,7 @@ public void customSnapshotterSkipsTablesOnRestart() throws Exception {
stopConnector(value -> assertThat(logInterceptor.containsMessage("For table 's2.a' the select statement was not provided, skipping table")).isTrue());
}
+ @Ignore("YB: Custom snapshotter not supported")
@Test
@FixFor("DBZ-2094")
public void customSnapshotterSkipsTablesOnRestartWithConcurrentTx() throws Exception {
@@ -2049,7 +2249,7 @@ public void customSnapshotterSkipsTablesOnRestartWithConcurrentTx() throws Excep
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -2058,8 +2258,8 @@ public void customSnapshotterSkipsTablesOnRestartWithConcurrentTx() throws Excep
List s2recs = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(s1recs.size()).isEqualTo(1);
assertThat(s2recs.size()).isEqualTo(1);
- VerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s2recs.get(0), PK_FIELD, 1);
stopConnector();
@@ -2074,7 +2274,7 @@ public void customSnapshotterSkipsTablesOnRestartWithConcurrentTx() throws Excep
.with(PostgresConnectorConfig.SNAPSHOT_QUERY_MODE_CUSTOM_NAME, CustomPartialTableTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
Awaitility.await()
@@ -2100,12 +2300,12 @@ public void customSnapshotterSkipsTablesOnRestartWithConcurrentTx() throws Excep
assertThat(s2recs.size()).isEqualTo(1);
// streaming records
- VerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s1recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
// snapshot records
- VerifyRecord.isValidRead(s1recs.get(1), PK_FIELD, 1);
- VerifyRecord.isValidRead(s1recs.get(2), PK_FIELD, 2);
+ YBVerifyRecord.isValidRead(s1recs.get(1), PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(s1recs.get(2), PK_FIELD, 2);
assertNoRecordsToConsume();
@@ -2124,7 +2324,7 @@ public void testCustomSnapshotterSnapshotCompleteLifecycleHook() throws Exceptio
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.CUSTOM.getValue())
.with(PostgresConnectorConfig.SNAPSHOT_MODE_CUSTOM_NAME, CustomLifecycleHookTestSnapshot.class.getName())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2147,7 +2347,7 @@ private String getConfirmedFlushLsn(PostgresConnection connection) throws SQLExc
final String lsn = connection.prepareQueryAndMap(
"select * from pg_replication_slots where slot_name = ? and database = ? and plugin = ?", statement -> {
statement.setString(1, ReplicationConnection.Builder.DEFAULT_SLOT_NAME);
- statement.setString(2, "postgres");
+ statement.setString(2, "yugabyte");
statement.setString(3, TestHelper.decoderPlugin().getPostgresPluginName());
},
rs -> {
@@ -2174,6 +2374,17 @@ private void assertFieldAbsent(SourceRecord record, String fieldName) {
}
}
+ private void assertFieldAbsentInBeforeImage(SourceRecord record, String fieldName) {
+ Struct value = (Struct) ((Struct) record.value()).get(Envelope.FieldName.BEFORE);
+ try {
+ value.get(fieldName);
+ fail("field should not be present");
+ }
+ catch (DataException e) {
+ // expected
+ }
+ }
+
@Test
@Ignore
public void testStreamingPerformance() throws Exception {
@@ -2182,7 +2393,7 @@ public void testStreamingPerformance() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
final long recordsCount = 1000000;
final int batchSize = 1000;
@@ -2209,8 +2420,8 @@ private void consumeRecords(long recordsCount) {
Strings.duration(System.currentTimeMillis() - start));
}
+ @Ignore("YB: YB doesn't support the way of initial_only snapshot this connector uses, see https://github.com/yugabyte/yugabyte-db/issues/21425")
@Test
- @Ignore
public void testSnapshotPerformance() throws Exception {
TestHelper.dropAllSchemas();
TestHelper.executeDDL("postgres_create_tables.ddl");
@@ -2223,7 +2434,7 @@ public void testSnapshotPerformance() throws Exception {
batchInsertRecords(recordsCount, batchSize).get();
// start the connector only after we've finished inserting all the records
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
CompletableFuture.runAsync(() -> consumeRecords(recordsCount))
@@ -2232,6 +2443,7 @@ public void testSnapshotPerformance() throws Exception {
}).get();
}
+ @Ignore("YB: YB doesn't support the way of initial_only snapshot this connector uses, see https://github.com/yugabyte/yugabyte-db/issues/21425")
@Test
public void shouldProcessPurgedLogsWhenDownAndSnapshotNeeded() throws InterruptedException {
@@ -2240,7 +2452,7 @@ public void shouldProcessPurgedLogsWhenDownAndSnapshotNeeded() throws Interrupte
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2255,7 +2467,7 @@ public void shouldProcessPurgedLogsWhenDownAndSnapshotNeeded() throws Interrupte
assertNoRecordsToConsume();
// start the connector back up and check that a new snapshot has been performed
- start(PostgresConnector.class, configBuilder
+ start(YugabyteDBConnector.class, configBuilder
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.WHEN_NEEDED.getValue())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE).build());
@@ -2278,13 +2490,14 @@ public void testEmptySchemaWarningAfterApplyingFilters() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "my_products");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForAvailableRecords(10 * (TestHelper.waitTimeForRecords() * 5), TimeUnit.MILLISECONDS);
stopConnector(value -> assertThat(logInterceptor.containsWarnMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isTrue());
}
+ @Ignore("YB: YB doesn't support the way of initial_only snapshot this connector uses, see https://github.com/yugabyte/yugabyte-db/issues/21425")
@Test
@FixFor("DBZ-1242")
public void testNoEmptySchemaWarningAfterApplyingFilters() throws Exception {
@@ -2297,7 +2510,7 @@ public void testNoEmptySchemaWarningAfterApplyingFilters() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue());
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
@@ -2318,7 +2531,7 @@ public void testCustomPublicationNameUsed() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.PUBLICATION_NAME, "cdc");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
@@ -2336,7 +2549,7 @@ public void shouldRewriteIdentityKey() throws InterruptedException {
// rewrite key from table 'a': from {pk} to {pk, aa}
.with(PostgresConnectorConfig.MSG_KEY_COLUMNS, "(.*)1.a:pk,aa");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
waitForSnapshotToBeCompleted();
SourceRecords records = consumeRecordsByTopic(2);
records.recordsForTopic("test_server.s1.a").forEach(record -> {
@@ -2368,18 +2581,19 @@ public void shouldNotIssueWarningForNoMonitoredTablesAfterApplyingFilters() thro
.build();
// Start connector, verify that it does not log no captured tables warning
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForSnapshotToBeCompleted();
SourceRecords records = consumeRecordsByTopic(1);
assertThat(logInterceptor.containsMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse();
stopConnector();
// Restart connector, verify it does not log no captured tables warning
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning();
assertThat(logInterceptor.containsMessage(DatabaseSchema.NO_CAPTURED_DATA_COLLECTIONS_WARNING)).isFalse();
}
+ @Ignore("YB: decoderbufs unsupported")
@Test
@FixFor("DBZ-2865")
@SkipWhenDecoderPluginNameIsNot(value = SkipWhenDecoderPluginNameIsNot.DecoderPluginName.DECODERBUFS, reason = "Expected warning message is emitted by protobuf decoder")
@@ -2392,7 +2606,7 @@ public void shouldClearDatabaseWarnings() throws Exception {
.with(PostgresConnectorConfig.POLL_INTERVAL_MS, "10")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForSnapshotToBeCompleted();
Awaitility.await().atMost(Duration.ofSeconds(TestHelper.waitTimeForRecords() * 6))
.until(() -> logInterceptor.containsMessage("Server-side message: 'Exiting startup callback'"));
@@ -2415,7 +2629,7 @@ public void shouldCreatePublicationWhenReplicationSlotExists() throws Exception
// Start connector with no snapshot; by default replication slot and publication should be created
// Wait until streaming mode begins to proceed
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning();
// Check that publication was created
@@ -2428,11 +2642,12 @@ public void shouldCreatePublicationWhenReplicationSlotExists() throws Exception
// Create log interceptor and restart the connector, should observe publication gets re-created
final LogInterceptor interceptor = new LogInterceptor(PostgresReplicationConnection.class);
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning();
+ // YB Note: Increasing the wait time.
// Check that publication was created
- Awaitility.await("Wait until publication is created").atMost(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS)
+ Awaitility.await("Wait until publication is created").atMost(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS)
.until(TestHelper::publicationExists);
// Stop Connector and check log messages
@@ -2447,7 +2662,7 @@ public void shouldConsumeEventsWithMaskedColumns() throws Exception {
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with("column.mask.with.5.chars", "s2.a.bb");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(2);
@@ -2457,11 +2672,11 @@ public void shouldConsumeEventsWithMaskedColumns() throws Exception {
assertThat(recordsForTopicS2.size()).isEqualTo(1);
SourceRecord record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidRead(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(record, PK_FIELD, 1);
Struct value = (Struct) record.value();
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("*****");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("*****");
}
// insert and verify inserts
@@ -2474,11 +2689,11 @@ public void shouldConsumeEventsWithMaskedColumns() throws Exception {
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidInsert(record, PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, 2);
value = (Struct) record.value();
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("*****");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("*****");
}
// update and verify update
@@ -2491,14 +2706,15 @@ record = recordsForTopicS2.remove(0);
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidUpdate(record, PK_FIELD, 2);
+ YBVerifyRecord.isValidUpdate(record, PK_FIELD, 2);
value = (Struct) record.value();
+ // TODO Vaibhav: Note to self - the following assertion is only valid when before image is enabled.
if (value.getStruct("before") != null) {
- assertThat(value.getStruct("before").getString("bb")).isEqualTo("*****");
+ assertThat(value.getStruct("before").getStruct("bb").getString("value")).isEqualTo("*****");
}
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("*****");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("*****");
}
}
@@ -2509,7 +2725,7 @@ public void shouldConsumeEventsWithMaskedHashedColumns() throws Exception {
"CREATE TABLE s2.b (pk SERIAL, bb varchar(255), PRIMARY KEY(pk));");
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with("column.mask.hash.SHA-256.with.salt.CzQMA0cB5K", "s2.a.bb, s2.b.bb");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(2);
@@ -2519,11 +2735,11 @@ public void shouldConsumeEventsWithMaskedHashedColumns() throws Exception {
assertThat(recordsForTopicS2.size()).isEqualTo(1);
SourceRecord record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidRead(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(record, PK_FIELD, 1);
Struct value = (Struct) record.value();
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isNull();
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isNull();
}
// insert and verify inserts
@@ -2536,11 +2752,11 @@ public void shouldConsumeEventsWithMaskedHashedColumns() throws Exception {
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidInsert(record, PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, 2);
value = (Struct) record.value();
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("8e68c68edbbac316dfe2");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("8e68c68edbbac316dfe2");
}
// update and verify update
@@ -2553,11 +2769,11 @@ record = recordsForTopicS2.remove(0);
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidUpdate(record, PK_FIELD, 2);
+ YBVerifyRecord.isValidUpdate(record, PK_FIELD, 2);
value = (Struct) record.value();
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("b4d39ab0d198fb4cac8b");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("b4d39ab0d198fb4cac8b");
}
// insert and verify inserts
@@ -2570,14 +2786,15 @@ record = recordsForTopicS2.remove(0);
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidInsert(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, 1);
value = (Struct) record.value();
+ // TODO Vaibhav: Note to self - the following assertion is only valid when before image is enabled.
if (value.getStruct("before") != null) {
- assertThat(value.getStruct("before").getString("bb")).isNull();
+ assertThat(value.getStruct("before").getStruct("bb").getString("value")).isNull();
}
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("b4d39ab0d198fb4cac8b2f023da74f670bcaf192dcc79b5d6361b7ae6b2fafdf");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("b4d39ab0d198fb4cac8b2f023da74f670bcaf192dcc79b5d6361b7ae6b2fafdf");
}
}
@@ -2587,7 +2804,7 @@ public void shouldConsumeEventsWithTruncatedColumns() throws Exception {
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with("column.truncate.to.3.chars", "s2.a.bb");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
SourceRecords actualRecords = consumeRecordsByTopic(2);
@@ -2597,7 +2814,7 @@ public void shouldConsumeEventsWithTruncatedColumns() throws Exception {
assertThat(recordsForTopicS2.size()).isEqualTo(1);
SourceRecord record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidRead(record, PK_FIELD, 1);
+ YBVerifyRecord.isValidRead(record, PK_FIELD, 1);
// insert and verify inserts
TestHelper.execute("INSERT INTO s2.a (aa,bb) VALUES (1, 'test');");
@@ -2609,11 +2826,11 @@ public void shouldConsumeEventsWithTruncatedColumns() throws Exception {
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidInsert(record, PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, 2);
Struct value = (Struct) record.value();
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("tes");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("tes");
}
// update and verify update
@@ -2626,14 +2843,15 @@ record = recordsForTopicS2.remove(0);
assertThat(recordsForTopicS2.size()).isEqualTo(1);
record = recordsForTopicS2.remove(0);
- VerifyRecord.isValidUpdate(record, PK_FIELD, 2);
+ YBVerifyRecord.isValidUpdate(record, PK_FIELD, 2);
value = (Struct) record.value();
- if (value.getStruct("before") != null && value.getStruct("before").getString("bb") != null) {
- assertThat(value.getStruct("before").getString("bb")).isEqualTo("tes");
+ // TODO Vaibhav: Note to self: the following before image assertion is only for cases with before image enabled.
+ if (value.getStruct("before") != null && value.getStruct("before").getStruct("bb").getString("value") != null) {
+ assertThat(value.getStruct("before").getStruct("bb").getString("value")).isEqualTo("tes");
}
if (value.getStruct("after") != null) {
- assertThat(value.getStruct("after").getString("bb")).isEqualTo("hel");
+ assertThat(value.getStruct("after").getStruct("bb").getString("value")).isEqualTo("hel");
}
}
@@ -2641,14 +2859,14 @@ record = recordsForTopicS2.remove(0);
@FixFor("DBZ-5811")
public void shouldAckLsnOnSourceByDefault() throws Exception {
TestHelper.dropDefaultReplicationSlot();
- TestHelper.createDefaultReplicationSlot();
TestHelper.execute(SETUP_TABLES_STMT);
+ TestHelper.createDefaultReplicationSlot();
final Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, "false");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2661,7 +2879,7 @@ public void shouldAckLsnOnSourceByDefault() throws Exception {
TestHelper.execute("INSERT INTO s2.a (aa,bb) VALUES (1, 'test');");
TestHelper.execute("UPDATE s2.a SET aa=2, bb='hello' WHERE pk=2;");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForStreamingRunning();
@@ -2674,12 +2892,300 @@ public void shouldAckLsnOnSourceByDefault() throws Exception {
Assert.assertEquals(1, slotAfterIncremental.slotLastFlushedLsn().compareTo(slotAfterSnapshot.slotLastFlushedLsn()));
}
+ // YB Note: This test is only applicable when replica identity is CHANGE.
+ @Test
+ public void testYBCustomChangesForUpdate() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+ TestHelper.createDefaultReplicationSlot();
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForStreamingRunning();
+ TestHelper.waitFor(Duration.ofSeconds(5));
+
+ TestHelper.execute(INSERT_STMT);
+ TestHelper.execute("UPDATE s2.a SET aa=2 WHERE pk=1;");
+ TestHelper.execute("UPDATE s2.a SET aa=NULL WHERE pk=1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(3);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(0), "after/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(0), "after/aa/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(0), "after/bb/value", null);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "after/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "after/aa/value", 2);
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "after/bb", null);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(2), "after/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(2), "after/aa/value", null);
+ assertValueField(actualRecords.allRecordsInOrder().get(2), "after/bb", null);
+ }
+
+ @Test
+ public void testTableWithCompositePrimaryKey() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+ TestHelper.execute("CREATE TABLE s1.test_composite_pk (id INT, text_col TEXT, first_name VARCHAR(60), age INT, PRIMARY KEY(id, text_col));");
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.test_composite_pk");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForStreamingRunning();
+
+ TestHelper.execute("INSERT INTO s1.test_composite_pk VALUES (1, 'ffff-ffff', 'Vaibhav', 25);");
+ TestHelper.execute("UPDATE s1.test_composite_pk SET first_name='Vaibhav K' WHERE id = 1 AND text_col='ffff-ffff';");
+ TestHelper.execute("DELETE FROM s1.test_composite_pk;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(4 /* 1 + 1 + 1 + tombstone */);
+ List records = actualRecords.allRecordsInOrder();
+
+ assertThat(records.size()).isEqualTo(4);
+
+ // Assert insert record.
+ assertValueField(records.get(0), "after/id/value", 1);
+ assertValueField(records.get(0), "after/text_col/value", "ffff-ffff");
+ assertValueField(records.get(0), "after/first_name/value", "Vaibhav");
+ assertValueField(records.get(0), "after/age/value", 25);
+
+ // Assert update record.
+ assertValueField(records.get(1), "after/id/value", 1);
+ assertValueField(records.get(1), "after/text_col/value", "ffff-ffff");
+ assertValueField(records.get(1), "after/first_name/value", "Vaibhav K");
+ assertValueField(records.get(1), "after/age", null);
+
+ // Assert delete record.
+ assertValueField(records.get(2), "before/id/value", 1);
+ assertValueField(records.get(2), "before/text_col/value", "ffff-ffff");
+ assertValueField(records.get(2), "before/first_name/value", null);
+ assertValueField(records.get(2), "before/age/value", null);
+ assertValueField(records.get(2), "after", null);
+
+ // Validate tombstone record.
+ assertTombstone(records.get(3));
+ }
+
+ @Test
+ public void shouldNotWorkWithReplicaIdentityChangeAndPgOutput() throws Exception {
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
+ .with(PostgresConnectorConfig.PLUGIN_NAME, LogicalDecoder.PGOUTPUT)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a");
+
+ start(YugabyteDBConnector.class, configBuilder.build(), (success, message, error) -> {
+ assertFalse(success);
+ });
+ }
+
+ @Test
+ public void foreignKeyOnTheTableShouldNotCauseIssues() throws Exception {
+ TestHelper.execute(CREATE_TABLES_STMT);
+ TestHelper.execute("CREATE TABLE s1.department (dept_id INT PRIMARY KEY, dept_name TEXT);");
+ TestHelper.execute("CREATE TABLE s1.users (id SERIAL PRIMARY KEY, name TEXT, dept_id INT, FOREIGN KEY (dept_id) REFERENCES s1.department(dept_id));");
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SLOT_NAME, "slot_for_fk")
+ .with(PostgresConnectorConfig.PUBLICATION_NAME, "publication_for_fk")
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.users");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForStreamingRunning();
+
+ TestHelper.execute("INSERT INTO s1.department VALUES (11, 'Industrial equipments');");
+ TestHelper.execute("INSERT INTO s1.users VALUES (1, 'Vaibhav', 11);");
+
+ SourceRecords records = consumeRecordsByTopic(1);
+ assertThat(records.allRecordsInOrder().size()).isEqualTo(1);
+
+ SourceRecord record = records.allRecordsInOrder().get(0);
+ YBVerifyRecord.isValidInsert(record, "id", 1);
+ assertValueField(record, "after/id/value", 1);
+ assertValueField(record, "after/name/value", "Vaibhav");
+ assertValueField(record, "after/dept_id/value", 11);
+ }
+
+ @Test
+ public void shouldNotSkipMessagesWithoutChangeWithReplicaIdentityChange() throws Exception {
+ testSkipMessagesWithoutChange(ReplicaIdentityInfo.ReplicaIdentity.CHANGE);
+ }
+
+ @Test
+ public void shouldSkipMessagesWithoutChangeWithReplicaIdentityFull() throws Exception {
+ testSkipMessagesWithoutChange(ReplicaIdentityInfo.ReplicaIdentity.FULL);
+ }
+
+ public void testSkipMessagesWithoutChange(ReplicaIdentityInfo.ReplicaIdentity replicaIdentity) throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+
+ boolean isReplicaIdentityFull = (replicaIdentity == ReplicaIdentityInfo.ReplicaIdentity.FULL);
+
+ if (isReplicaIdentityFull) {
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY FULL;");
+ TestHelper.waitFor(Duration.ofSeconds(10));
+ }
+
+ TestHelper.createDefaultReplicationSlot();
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a")
+ .with(PostgresConnectorConfig.SKIP_MESSAGES_WITHOUT_CHANGE, true)
+ .with(PostgresConnectorConfig.COLUMN_INCLUDE_LIST, "s2.a.pk,s2.a.aa");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForStreamingRunning();
+ TestHelper.waitFor(Duration.ofSeconds(5));
+
+ TestHelper.execute(INSERT_STMT);
+ // This update will not be propagated if replica identity is FULL.
+ TestHelper.execute("UPDATE s2.a SET bb = 'random_value' WHERE pk=1;");
+ TestHelper.execute("UPDATE s2.a SET aa = 12345 WHERE pk=1;");
+
+ // YB Note: We will be receiving all the records if replica identity is CHANGE.
+ SourceRecords actualRecords = consumeRecordsByTopic(isReplicaIdentityFull ? 2 : 3);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(0), "after/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(0), "after/aa/value", 1);
+
+ if (isReplicaIdentityFull) {
+ // In this case the second record we get is the operation where one of the monitored columns
+ // is changed.
+ assertThat(actualRecords.allRecordsInOrder().size()).isEqualTo(2);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "after/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "after/aa/value", 12345);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "before/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "before/aa/value", 1);
+ assertFieldAbsentInBeforeImage(actualRecords.allRecordsInOrder().get(1), "bb");
+ } else {
+ assertThat(actualRecords.allRecordsInOrder().size()).isEqualTo(3);
+
+ assertValueField(actualRecords.allRecordsInOrder().get(1), "after/pk/value", 1);
+ // Column aa would be not be present since it is unchanged column.
+ assertThat(((Struct) actualRecords.allRecordsInOrder().get(1).value()).getStruct("after").get("aa")).isNull();
+
+ assertThat(((Struct) actualRecords.allRecordsInOrder().get(1).value()).getStruct("before")).isNull();
+
+ assertValueField(actualRecords.allRecordsInOrder().get(2), "after/pk/value", 1);
+ assertValueField(actualRecords.allRecordsInOrder().get(2), "after/aa/value", 12345);
+ assertFieldAbsent(actualRecords.allRecordsInOrder().get(2), "bb");
+
+ assertThat(((Struct) actualRecords.allRecordsInOrder().get(2).value()).getStruct("before")).isNull();
+
+ }
+ }
+
+ // YB Note: This test is only applicable when replica identity is CHANGE.
+ @Test
+ public void customYBStructureShouldBePresentInSnapshotRecords() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+
+ // Insert 5 records to be included in snapshot.
+ for (int i = 0; i < 5; ++i) {
+ TestHelper.execute(String.format("INSERT INTO s2.a (aa) VALUES (%d);", i));
+ }
+
+ TestHelper.createDefaultReplicationSlot();
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForSnapshotToBeCompleted();
+
+ SourceRecords actualRecords = consumeRecordsByTopic(5);
+ assertThat(actualRecords.allRecordsInOrder().size()).isEqualTo(5);
+
+ Set expectedPKValues = new HashSet<>(Arrays.asList(1, 2, 3, 4, 5));
+ Set actualPKValues = new HashSet<>();
+
+ for (SourceRecord record : actualRecords.allRecordsInOrder()) {
+ Struct value = (Struct) record.value();
+
+ actualPKValues.add(value.getStruct("after").getStruct("pk").getInt32("value"));
+ }
+
+ assertEquals(expectedPKValues, actualPKValues);
+ }
+
+ @Test
+ public void streamColumnsWithNotNullConstraintsForReplicaIdentityChange() throws Exception {
+ testStreamColumnsWithNotNullConstraints(ReplicaIdentityInfo.ReplicaIdentity.CHANGE);
+ }
+
+ @Test
+ public void streamColumnsWithNotNullConstraintsForReplicaIdentityFull() throws Exception {
+ testStreamColumnsWithNotNullConstraints(ReplicaIdentityInfo.ReplicaIdentity.FULL);
+ }
+
+ @Test
+ public void streamColumnsWithNotNullConstraintsForReplicaIdentityDefault() throws Exception {
+ testStreamColumnsWithNotNullConstraints(ReplicaIdentityInfo.ReplicaIdentity.DEFAULT);
+ }
+
+ public void testStreamColumnsWithNotNullConstraints(
+ ReplicaIdentityInfo.ReplicaIdentity replicaIdentity) throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+ TestHelper.execute("CREATE TABLE s1.test_table (id INT PRIMARY KEY, name TEXT NOT NULL, age INT);");
+
+ if (replicaIdentity != ReplicaIdentityInfo.ReplicaIdentity.CHANGE) {
+ final String replicaIdentityName = replicaIdentity.name();
+ TestHelper.execute("ALTER TABLE s1.test_table REPLICA IDENTITY " + replicaIdentityName + ";");
+ }
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.test_table");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForStreamingRunning();
+
+ TestHelper.execute("INSERT INTO s1.test_table VALUES (1, 'Vaibhav', 25);");
+ TestHelper.execute("UPDATE s1.test_table SET age = 30 WHERE id = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(2);
+ List records = actualRecords.allRecordsInOrder();
+ assertThat(records.size()).isEqualTo(2);
+
+ YBVerifyRecord.isValidInsert(records.get(0), "id", 1);
+ YBVerifyRecord.isValidUpdate(records.get(1), "id", 1);
+
+ // Also verify that the update record does/doesn't contain the non-updated column depending
+ // on replica identity.
+ if (replicaIdentity.equals(ReplicaIdentityInfo.ReplicaIdentity.CHANGE)) {
+ assertValueField(records.get(1), "after/name", null);
+ } else {
+ assertValueField(records.get(1), "after/name/value", "Vaibhav");
+ }
+ }
+
@Test
@FixFor("DBZ-5811")
public void shouldNotAckLsnOnSource() throws Exception {
TestHelper.dropDefaultReplicationSlot();
- TestHelper.createDefaultReplicationSlot();
TestHelper.execute(SETUP_TABLES_STMT);
+ TestHelper.createDefaultReplicationSlot();
final SlotState slotAtTheBeginning = getDefaultReplicationSlot();
@@ -2688,7 +3194,7 @@ public void shouldNotAckLsnOnSource() throws Exception {
.with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, "false");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2703,12 +3209,15 @@ public void shouldNotAckLsnOnSource() throws Exception {
TestHelper.execute("INSERT INTO s2.a (aa,bb) VALUES (1, 'test');");
TestHelper.execute("UPDATE s2.a SET aa=2, bb='hello' WHERE pk=2;");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForStreamingRunning();
+ TestHelper.waitFor(Duration.ofSeconds(15));
+
actualRecords = consumeRecordsByTopic(2);
+
assertThat(actualRecords.allRecordsInOrder().size()).isEqualTo(2);
stopConnector();
@@ -2726,7 +3235,7 @@ public void shouldOutputRecordsInCloudEventsFormat() throws Exception {
.with(CommonConnectorConfig.PROVIDE_TRANSACTION_METADATA, true)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2745,8 +3254,9 @@ public void shouldOutputRecordsInCloudEventsFormat() throws Exception {
// Testing.Print.enable();
+ // YB Note: Increasing the wait time for records.
final List streaming = new ArrayList();
- Awaitility.await().atMost(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS).until(() -> {
+ Awaitility.await().atMost(TestHelper.waitTimeForRecords() * 15, TimeUnit.SECONDS).until(() -> {
// Should be BEGIN + END in case of empty tx or BEGIN + data in case of our TX
final SourceRecords streamingRecords = consumeRecordsByTopic(2);
final SourceRecord second = streamingRecords.allRecordsInOrder().get(1);
@@ -2768,6 +3278,44 @@ public void shouldOutputRecordsInCloudEventsFormat() throws Exception {
}
}
+ // This test is for manual testing and if this is being run then change the method TestHelper#defaultJdbcConfig
+ // to include all three nodes "127.0.0.1:5433,127.0.0.2:5433,127.0.0.3:5433".
+ //
+ // Now while running this test, as soon as you see "Take a node down now" in the logs now,
+ // take down the node at IP 127.0.0.1 in order to simulate a node going down scenario.
+ @Ignore("This test should not be run in the complete suite without making above mentioned changes")
+ @Test
+ public void testYBChangesForMultiHostConfiguration() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+ TestHelper.createDefaultReplicationSlot();
+
+ final Configuration.Builder configBuilder = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.HOSTNAME, "127.0.0.1:5433,127.0.0.2:5433,127.0.0.3:5433")
+ .with(PostgresConnectorConfig.SLOT_NAME, ReplicationConnection.Builder.DEFAULT_SLOT_NAME)
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a");
+
+ start(YugabyteDBConnector.class, configBuilder.build());
+ assertConnectorIsRunning();
+ waitForStreamingRunning();
+ TestHelper.waitFor(Duration.ofSeconds(5));
+
+ TestHelper.execute(INSERT_STMT);
+
+ LOGGER.info("Take a node down now");
+ TestHelper.waitFor(Duration.ofMinutes(1));
+
+ LOGGER.info("Inserting and waiting for another 30s");
+ TestHelper.execute("INSERT INTO s2.a (aa) VALUES (11);");
+
+ TestHelper.waitFor(Duration.ofMinutes(2));
+ SourceRecords actualRecords = consumeRecordsByTopic(2);
+
+ assertThat(actualRecords.allRecordsInOrder().size()).isEqualTo(2);
+ }
+
@Test
@FixFor("DBZ-1813")
@SkipWhenDecoderPluginNameIsNot(value = SkipWhenDecoderPluginNameIsNot.DecoderPluginName.PGOUTPUT, reason = "Publication configuration only valid for PGOUTPUT decoder")
@@ -2782,7 +3330,7 @@ public void shouldConfigureSubscriptionsForAllTablesByDefault() throws Exception
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.PUBLICATION_NAME, "cdc");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
@@ -2809,7 +3357,7 @@ public void shouldConfigureSubscriptionsFromTableFilters() throws Exception {
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.numeric_table,public.text_table,s1.a,s2.a")
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
@@ -2845,7 +3393,7 @@ public void shouldThrowWhenAutocreationIsDisabled() throws Exception {
assertEquals(error.getMessage(), "Publication autocreation is disabled, please create one and restart the connector.");
};
- start(PostgresConnector.class, configBuilder.build(), cb);
+ start(YugabyteDBConnector.class, configBuilder.build(), cb);
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
stopConnector();
@@ -2866,7 +3414,7 @@ public void shouldProduceMessagesOnlyForConfiguredTables() throws Exception {
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s2.a")
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2883,7 +3431,7 @@ public void shouldProduceMessagesOnlyForConfiguredTables() throws Exception {
assertThat(s1recs).isNull();
assertThat(s2recs).hasSize(1);
- VerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(s2recs.get(0), PK_FIELD, 2);
}
@Test
@@ -2902,7 +3450,7 @@ public void shouldThrowWhenTableFiltersIsEmpty() throws Exception {
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue())
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "nonexistent.table");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorNotRunning();
assertTrue(logInterceptor.containsStacktraceElement("No table filters found for filtered publication cdc"));
}
@@ -2922,7 +3470,7 @@ public void shouldUpdatePublicationForConfiguredTables() throws Exception {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
- start(PostgresConnector.class, initalConfigBuilder.build());
+ start(YugabyteDBConnector.class, initalConfigBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -2939,7 +3487,7 @@ public void shouldUpdatePublicationForConfiguredTables() throws Exception {
assertThat(initalS1recs).isNull();
assertThat(initalS2recs).hasSize(1);
- VerifyRecord.isValidInsert(initalS2recs.get(0), PK_FIELD, 2);
+ YBVerifyRecord.isValidInsert(initalS2recs.get(0), PK_FIELD, 2);
stopConnector();
@@ -2951,13 +3499,14 @@ public void shouldUpdatePublicationForConfiguredTables() throws Exception {
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.a")
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
- start(PostgresConnector.class, updatedConfigBuilder.build());
+ start(YugabyteDBConnector.class, updatedConfigBuilder.build());
assertConnectorIsRunning();
// snapshot record s1.a
consumeRecordsByTopic(1);
TestHelper.execute(INSERT_STMT);
+ TestHelper.waitFor(Duration.ofSeconds(10));
SourceRecords actualRecordsAfterUpdate = consumeRecordsByTopic(1);
assertThat(actualRecordsAfterUpdate.topics()).hasSize(1);
@@ -2989,7 +3538,7 @@ public void shouldUpdateExistingPublicationForConfiguredPartitionedTables() thro
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.part")
.with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED.getValue());
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -3010,8 +3559,8 @@ public void shouldUpdateExistingPublicationForConfiguredPartitionedTables() thro
assertThat(part1recs).isNull();
assertThat(part2recs).isNull();
- VerifyRecord.isValidInsert(recs.get(0), PK_FIELD, 1);
- VerifyRecord.isValidInsert(recs.get(1), PK_FIELD, 501);
+ YBVerifyRecord.isValidInsert(recs.get(0), PK_FIELD, 1);
+ YBVerifyRecord.isValidInsert(recs.get(1), PK_FIELD, 501);
}
@Test
@@ -3024,7 +3573,7 @@ public void shouldEmitNoEventsForSkippedCreateOperations() throws Exception {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SKIPPED_OPERATIONS, Envelope.Operation.UPDATE.code())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
assertNoRecordsToConsume();
@@ -3053,6 +3602,36 @@ public void shouldEmitNoEventsForSkippedCreateOperations() throws Exception {
}
+ @Test
+ public void nonSuperUserSnapshotAndStreaming() throws Exception {
+ TestHelper.executeDDL("replication_role_user.ddl");
+ TestHelper.execute(SETUP_TABLES_STMT);
+
+ // Only tables owned by the connector user can be added to the publication
+ TestHelper.execute("GRANT USAGE ON SCHEMA s1 to ybpgconn");
+ TestHelper.execute("GRANT USAGE ON SCHEMA s2 to ybpgconn");
+ TestHelper.execute("ALTER TABLE s1.a OWNER TO ybpgconn");
+ TestHelper.execute("ALTER TABLE s2.a OWNER TO ybpgconn");
+
+ // Start the connector with the non super user
+ Configuration.Builder configBuilderInitial = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.USER, "ybpgconn")
+ .with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, PostgresConnectorConfig.AutoCreateMode.FILTERED)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE);
+
+ start(YugabyteDBConnector.class, configBuilderInitial.build());
+ assertConnectorIsRunning();
+
+ // insert some more records - these should not be part of the snapshot
+ TestHelper.execute(INSERT_STMT);
+
+ assertRecordsFromSnapshot(2, 1, 1);
+ assertRecordsAfterInsert(2, 2, 2);
+
+ TestHelper.execute("REVOKE CREATE ON DATABASE yugabyte FROM ybpgconn");
+ }
+
private CompletableFuture batchInsertRecords(long recordsCount, int batchSize) {
String insertStmt = "INSERT INTO text_table(j, jb, x, u) " +
"VALUES ('{\"bar\": \"baz\"}'::json, '{\"bar\": \"baz\"}'::jsonb, " +
@@ -3094,7 +3673,7 @@ private List getSequence(SourceRecord record) {
@FixFor("DBZ-2911")
public void shouldHaveLastCommitLsn() throws InterruptedException {
TestHelper.execute(SETUP_TABLES_STMT);
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.PROVIDE_TRANSACTION_METADATA, true)
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA.getValue())
.build());
@@ -3147,7 +3726,7 @@ public void testCreateNumericReplicationSlotName() throws Exception {
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SLOT_NAME, "12345");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
waitForStreamingRunning();
assertConnectorIsRunning();
}
@@ -3158,7 +3737,7 @@ public void testStreamingWithNumericReplicationSlotName() throws Exception {
TestHelper.execute(SETUP_TABLES_STMT);
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SLOT_NAME, "12345");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
waitForStreamingRunning();
assertConnectorIsRunning();
@@ -3176,12 +3755,13 @@ public void testStreamingWithNumericReplicationSlotName() throws Exception {
assertInsert(recordsForTopic.get(3), PK_FIELD, 203);
}
+ @Ignore("Enum datatype not supported yet")
@Test
@FixFor("DBZ-5204")
public void testShouldNotCloseConnectionFetchingMetadataWithNewDataTypes() throws Exception {
TestHelper.execute(CREATE_TABLES_STMT);
Configuration config = TestHelper.defaultConfig().build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning();
assertConnectorIsRunning();
@@ -3212,7 +3792,7 @@ public void shouldReselectToastColumnsOnPrimaryKeyChange() throws Exception {
TestHelper.execute("INSERT INTO s1.dbz5295 (pk,data,data2) values (1,'" + toastValue1 + "','" + toastValue2 + "');");
Configuration config = TestHelper.defaultConfig().build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning();
SourceRecords records = consumeRecordsByTopic(1);
@@ -3221,9 +3801,9 @@ public void shouldReselectToastColumnsOnPrimaryKeyChange() throws Exception {
SourceRecord record = recordsForTopic.get(0);
Struct after = ((Struct) record.value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(after.get("pk")).isEqualTo(1);
- assertThat(after.get("data")).isEqualTo(toastValue1);
- assertThat(after.get("data2")).isEqualTo(toastValue2);
+ assertThat(after.getStruct("pk").get("value")).isEqualTo(1);
+ assertThat(after.getStruct("data").get("value")).isEqualTo(toastValue1);
+ assertThat(after.getStruct("data2").get("value")).isEqualTo(toastValue2);
TestHelper.execute("UPDATE s1.dbz5295 SET pk = 2 WHERE pk = 1;");
@@ -3234,21 +3814,21 @@ public void shouldReselectToastColumnsOnPrimaryKeyChange() throws Exception {
// First event: DELETE
record = recordsForTopic.get(0);
- VerifyRecord.isValidDelete(record, "pk", 1);
+ YBVerifyRecord.isValidDelete(record, "pk", 1);
after = ((Struct) record.value()).getStruct(Envelope.FieldName.AFTER);
assertThat(after).isNull();
// Second event: TOMBSTONE
record = recordsForTopic.get(1);
- VerifyRecord.isValidTombstone(record);
+ YBVerifyRecord.isValidTombstone(record);
// Third event: CREATE
record = recordsForTopic.get(2);
- VerifyRecord.isValidInsert(record, "pk", 2);
+ YBVerifyRecord.isValidInsert(record, "pk", 2);
after = ((Struct) record.value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(after.get("pk")).isEqualTo(2);
- assertThat(after.get("data")).isEqualTo(toastValue1);
- assertThat(after.get("data2")).isEqualTo(toastValue2);
+ assertThat(after.getStruct("pk").get("value")).isEqualTo(2);
+ assertThat(after.getStruct("data").get("value")).isEqualTo(toastValue1);
+ assertThat(after.getStruct("data2").get("value")).isEqualTo(toastValue2);
}
@Test
@@ -3262,7 +3842,7 @@ public void shouldSuppressLoggingOptionalOfExcludedColumns() throws Exception {
Configuration config = TestHelper.defaultConfig()
.with("column.exclude.list", "s1.dbz5783.data")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
@@ -3276,6 +3856,7 @@ public void shouldSuppressLoggingOptionalOfExcludedColumns() throws Exception {
assertThat(logInterceptor.containsMessage("Column 'data' optionality could not be determined, defaulting to true")).isFalse();
}
+ @Ignore("YB: YB doesn't support the way of initial_only snapshot this connector uses, see https://github.com/yugabyte/yugabyte-db/issues/21425")
@Test
@FixFor("DBZ-5739")
@SkipWhenDatabaseVersion(check = LESS_THAN, major = 11, reason = "This needs pg_replication_slot_advance which is supported only on Postgres 11+")
@@ -3288,7 +3869,7 @@ public void shouldStopConnectorOnSlotRecreation() throws InterruptedException {
.with(CommonConnectorConfig.SNAPSHOT_MODE_TABLES, "s1.a")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
consumeRecordsByTopic(1);
@@ -3303,7 +3884,7 @@ public void shouldStopConnectorOnSlotRecreation() throws InterruptedException {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SLOT_SEEK_TO_KNOWN_OFFSET, Boolean.TRUE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
Awaitility.await().atMost(TestHelper.waitTimeForRecords() * 5, TimeUnit.SECONDS)
.until(() -> logInterceptor.containsStacktraceElement("Cannot seek to the last known offset "));
assertConnectorNotRunning();
@@ -3321,7 +3902,7 @@ public void shouldSeekToCorrectOffset() throws InterruptedException {
.with(CommonConnectorConfig.SNAPSHOT_MODE_TABLES, "s1.a")
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
consumeRecordsByTopic(1);
@@ -3336,7 +3917,7 @@ public void shouldSeekToCorrectOffset() throws InterruptedException {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.SLOT_SEEK_TO_KNOWN_OFFSET, Boolean.TRUE);
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
consumeRecordsByTopic(1);
assertConnectorIsRunning();
@@ -3375,7 +3956,7 @@ public void shouldInvokeSnapshotterAbortedMethod() throws Exception {
}
};
- start(PostgresConnector.class, configBuilder.build(), completionCallback, stopOnPKPredicate(1));
+ start(YugabyteDBConnector.class, configBuilder.build(), completionCallback, stopOnPKPredicate(1));
// wait until we know we've raised the exception at startup AND the engine has been shutdown
if (!latch.await(TestHelper.waitTimeForRecords() * 5, TimeUnit.SECONDS)) {
@@ -3408,7 +3989,7 @@ public void shouldThrowRightExceptionWhenNoCustomSnapshotClassProvided() {
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.CUSTOM.getValue())
.build();
- start(PostgresConnector.class, config, (success, msg, err) -> {
+ start(YugabyteDBConnector.class, config, (success, msg, err) -> {
error.set(err);
message.set(msg);
status.set(success);
@@ -3438,7 +4019,7 @@ public void shouldIncludeTableWithBackSlashInName() throws Exception {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.back\\\\slash");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
TestHelper.execute("INSERT INTO s1.\"back\\slash\" (aa, bb) VALUES (3, 3);");
@@ -3450,10 +4031,10 @@ public void shouldIncludeTableWithBackSlashInName() throws Exception {
AtomicInteger pkValue = new AtomicInteger(1);
records.forEach(record -> {
if (pkValue.get() <= 2) {
- VerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
+ YBVerifyRecord.isValidRead(record, PK_FIELD, pkValue.getAndIncrement());
}
else {
- VerifyRecord.isValidInsert(record, PK_FIELD, pkValue.getAndIncrement());
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, pkValue.getAndIncrement());
}
});
}
@@ -3466,7 +4047,7 @@ public void shouldAddNewFieldToSourceInfo() throws InterruptedException {
"CREATE SCHEMA IF NOT EXISTS s1;",
"CREATE TABLE s1.DBZ6076 (pk SERIAL, aa integer, PRIMARY KEY(pk));",
"INSERT INTO s1.DBZ6076 (aa) VALUES (1);");
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.name())
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.DBZ6076")
.with(PostgresConnectorConfig.SOURCE_INFO_STRUCT_MAKER, CustomPostgresSourceInfoStructMaker.class.getName())
@@ -3492,11 +4073,23 @@ public void shouldAddNewFieldToSourceInfo() throws InterruptedException {
});
}
+ @Override
+ protected int consumeAvailableRecords(Consumer recordConsumer) {
+ List records = consumedLines
+ .stream()
+ .filter(r -> !r.topic().equals(TestHelper.getDefaultHeartbeatTopic()))
+ .collect(Collectors.toList());
+ if (recordConsumer != null) {
+ records.forEach(recordConsumer);
+ }
+ return records.size();
+ }
+
@Test
@FixFor("DBZ-6076")
public void shouldUseDefaultSourceInfoStructMaker() throws InterruptedException {
TestHelper.execute(SETUP_TABLES_STMT);
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.build());
assertConnectorIsRunning();
@@ -3526,7 +4119,7 @@ public void shouldFailWhenReadOnlyIsNotSupported() {
PostgresDatabaseVersionResolver databaseVersionResolver = new PostgresDatabaseVersionResolver();
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.READ_ONLY_CONNECTION, true)
.build(), (success, message, error) -> {
@@ -3558,12 +4151,12 @@ private void assertRecordsFromSnapshot(int expectedCount, int... pks) throws Int
List recordsForTopicS1 = actualRecords.recordsForTopic(topicName("s1.a"));
assertThat(recordsForTopicS1.size()).isEqualTo(expectedCountPerSchema);
IntStream.range(0, expectedCountPerSchema)
- .forEach(i -> VerifyRecord.isValidRead(recordsForTopicS1.remove(0), PK_FIELD, pks[i]));
+ .forEach(i -> YBVerifyRecord.isValidRead(recordsForTopicS1.remove(0), PK_FIELD, pks[i]));
List recordsForTopicS2 = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(recordsForTopicS2.size()).isEqualTo(expectedCountPerSchema);
IntStream.range(0, expectedCountPerSchema)
- .forEach(i -> VerifyRecord.isValidRead(recordsForTopicS2.remove(0), PK_FIELD, pks[i + expectedCountPerSchema]));
+ .forEach(i -> YBVerifyRecord.isValidRead(recordsForTopicS2.remove(0), PK_FIELD, pks[i + expectedCountPerSchema]));
}
private void assertRecordsAfterInsert(int expectedCount, int... pks) throws InterruptedException {
@@ -3572,14 +4165,15 @@ private void assertRecordsAfterInsert(int expectedCount, int... pks) throws Inte
// we have 2 schemas
int expectedCountPerSchema = expectedCount / 2;
+ LOGGER.info("Expected count per schema: {}", expectedCountPerSchema);
List recordsForTopicS1 = actualRecords.recordsForTopic(topicName("s1.a"));
assertThat(recordsForTopicS1.size()).isEqualTo(expectedCountPerSchema);
- IntStream.range(0, expectedCountPerSchema).forEach(i -> VerifyRecord.isValidInsert(recordsForTopicS1.remove(0), PK_FIELD, pks[i]));
+ IntStream.range(0, expectedCountPerSchema).forEach(i -> YBVerifyRecord.isValidInsert(recordsForTopicS1.remove(0), PK_FIELD, pks[i]));
List recordsForTopicS2 = actualRecords.recordsForTopic(topicName("s2.a"));
assertThat(recordsForTopicS2.size()).isEqualTo(expectedCountPerSchema);
- IntStream.range(0, expectedCountPerSchema).forEach(i -> VerifyRecord.isValidInsert(recordsForTopicS2.remove(0), PK_FIELD, pks[i]));
+ IntStream.range(0, expectedCountPerSchema).forEach(i -> YBVerifyRecord.isValidInsert(recordsForTopicS2.remove(0), PK_FIELD, pks[i]));
}
protected void assertSourceInfoMillisecondTransactionTimestamp(SourceRecord record, long ts_ms, long tolerance_ms) {
@@ -3609,6 +4203,43 @@ private void validateConfigField(Config config, Field field, T expectedValue
}
}
+ @Override
+ protected int consumeRecordsUntil(BiPredicate condition,
+ BiFunction logMessage,
+ int breakAfterNulls, Consumer recordConsumer,
+ boolean assertRecords)
+ throws InterruptedException {
+ int recordsConsumed = 0;
+ int nullReturn = 0;
+ boolean isLastRecord = false;
+ while (!isLastRecord && isEngineRunning.get()) {
+ SourceRecord record = consumedLines.poll(pollTimeoutInMs, TimeUnit.MILLISECONDS);
+
+ // YB Note: Ignore heartbeat records while consuming.
+ if (record != null && record.topic().equals(TestHelper.getDefaultHeartbeatTopic())) {
+ continue;
+ }
+
+ if (record != null) {
+ nullReturn = 0;
+ ++recordsConsumed;
+ if (recordConsumer != null) {
+ recordConsumer.accept(record);
+ }
+ if (assertRecords) {
+ YBVerifyRecord.isValid(record, /* skipAvroValidation */ false);
+ }
+ isLastRecord = condition.test(recordsConsumed, record);
+ }
+ else {
+ if (++nullReturn >= breakAfterNulls) {
+ return recordsConsumed;
+ }
+ }
+ }
+ return recordsConsumed;
+ }
+
private void validateFieldDef(Field expected) {
ConfigDef configDef = connector.config();
assertThat(configDef.names()).contains(expected.name());
@@ -3635,4 +4266,21 @@ private void waitForSnapshotToBeCompleted() throws InterruptedException {
private void waitForStreamingRunning() throws InterruptedException {
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
}
+
+ @Override
+ protected void assertConnectorIsRunning() {
+ try {
+ Thread.sleep(10_000);
+ }
+ catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+ super.assertConnectorIsRunning();
+ }
+
+ @Override
+ protected void assertInsert(SourceRecord record, String pkField, int pk) {
+ YBVerifyRecord.isValidInsert(record, pkField, pk);
+ }
}
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresDefaultValueConverterIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresDefaultValueConverterIT.java
index 6d40c29deb7..c9ede8bf2b3 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresDefaultValueConverterIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresDefaultValueConverterIT.java
@@ -34,6 +34,8 @@
import io.debezium.junit.SkipWhenDatabaseVersion;
import io.debezium.junit.logging.LogInterceptor;
+// TODO Vaibhav: Enabling this test doesn't make sense unless we populate the default value of the
+// columns in the schema.
public class PostgresDefaultValueConverterIT extends AbstractConnectorTest {
@Before
@@ -58,7 +60,7 @@ public void shouldSetTheNullValueInSnapshot() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted("postgres", TestHelper.TEST_SERVER);
@@ -75,7 +77,7 @@ public void shouldSetTheNullValueInStreaming() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL.getValue())
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "s1");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted("postgres", TestHelper.TEST_SERVER);
@@ -105,7 +107,7 @@ public void testShouldHandleDefaultValueFunctionsWithSchemaPrefixes() throws Exc
TestHelper.execute(ddl);
Configuration config = TestHelper.defaultConfig().build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
@@ -142,7 +144,7 @@ public void shouldTruncateDefaultValuePrecisionToMatchColumnMaxPrecision() throw
TestHelper.execute(ddl);
Configuration config = TestHelper.defaultConfig().build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
@@ -181,7 +183,7 @@ public void shouldSerializeHstoreDefaultAsJsonStringWhenUnavailable() throws Exc
TestHelper.execute(ddl);
Configuration config = TestHelper.defaultConfig().build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
@@ -223,7 +225,7 @@ public void shouldSerializeHstoreDefaultAsMapWhenUnavailable() throws Exception
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.HSTORE_HANDLING_MODE, PostgresConnectorConfig.HStoreHandlingMode.MAP.getValue())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
@@ -272,17 +274,18 @@ private void createTableAndInsertData() {
private void assertDefaultValueChangeRecord(SourceRecord sourceRecord) {
final Schema valueSchema = sourceRecord.valueSchema();
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt32("dint")).isNull();
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc1")).isNull();
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc2")).isEqualTo("NULL");
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc3")).isEqualTo("MYVALUE");
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc4")).isEqualTo("NULL");
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc5")).isEqualTo("NULL::character varying");
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getString("dvc6")).isNull();
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt64("dt1")).isNotNull();
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt32("dt2")).isNotNull();
- assertThat(((Struct) sourceRecord.value()).getStruct("after").getInt64("dt3")).isNotNull();
-
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dint").get("value")).isNull();
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dvc1").get("value")).isNull();
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dvc2").get("value")).isEqualTo("NULL");
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dvc3").get("value")).isEqualTo("MYVALUE");
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dvc4").get("value")).isEqualTo("NULL");
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dvc5").get("value")).isEqualTo("NULL::character varying");
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dvc6").get("value")).isNull();
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dt1").get("value")).isNotNull();
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dt2").get("value")).isNotNull();
+ assertThat(((Struct) sourceRecord.value()).getStruct("after").getStruct("dt3").get("value")).isNotNull();
+
+ // YB Note: We do not populate the default value while sending replication messages.
assertThat(valueSchema.field("after").schema().field("dint").schema().defaultValue()).isNull();
assertThat(valueSchema.field("after").schema().field("dvc1").schema().defaultValue()).isNull();
assertThat(valueSchema.field("after").schema().field("dvc2").schema().defaultValue()).isEqualTo("NULL");
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresErrorHandlerTest.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresErrorHandlerTest.java
index de8d35bf0d2..1c812fc10c8 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresErrorHandlerTest.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresErrorHandlerTest.java
@@ -8,8 +8,8 @@
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.Test;
-import org.postgresql.util.PSQLException;
-import org.postgresql.util.PSQLState;
+import com.yugabyte.util.PSQLException;
+import com.yugabyte.util.PSQLState;
import io.debezium.DebeziumException;
import io.debezium.config.CommonConnectorConfig;
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMetricsIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMetricsIT.java
index 72e2f92d872..9b6fe09811d 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMetricsIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMetricsIT.java
@@ -59,9 +59,9 @@ public void after() throws Exception {
@Test
public void testLifecycle() throws Exception {
// start connector
- start(PostgresConnector.class,
+ start(YugabyteDBConnector.class,
TestHelper.defaultConfig()
- .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build());
@@ -100,7 +100,7 @@ public void testSnapshotOnlyMetrics() throws Exception {
TestHelper.execute(INIT_STATEMENTS, INSERT_STATEMENTS);
// start connector
- start(PostgresConnector.class,
+ start(YugabyteDBConnector.class,
TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
@@ -115,9 +115,9 @@ public void testSnapshotAndStreamingMetrics() throws Exception {
TestHelper.execute(INIT_STATEMENTS, INSERT_STATEMENTS);
// start connector
- start(PostgresConnector.class,
+ start(YugabyteDBConnector.class,
TestHelper.defaultConfig()
- .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.build());
@@ -133,12 +133,12 @@ public void testSnapshotAndStreamingWithCustomMetrics() throws Exception {
// start connector
Configuration config = TestHelper.defaultConfig()
- .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.CUSTOM_METRIC_TAGS, "env=test,bu=bigdata")
.build();
Map customMetricTags = new PostgresConnectorConfig(config).getCustomMetricTags();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertSnapshotWithCustomMetrics(customMetricTags);
assertStreamingWithCustomMetrics(customMetricTags);
@@ -150,7 +150,7 @@ public void testStreamingOnlyMetrics() throws Exception {
TestHelper.execute(INIT_STATEMENTS);
// start connector
- start(PostgresConnector.class,
+ start(YugabyteDBConnector.class,
TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
@@ -275,7 +275,7 @@ public void oneRecordInQueue() throws Exception {
.with(PostgresConnectorConfig.MAX_BATCH_SIZE, 1)
.with(PostgresConnectorConfig.POLL_INTERVAL_MS, 100L)
.with(PostgresConnectorConfig.MAX_QUEUE_SIZE_IN_BYTES, 10000L);
- start(PostgresConnector.class, configBuilder.build(), loggingCompletion(), null, x -> {
+ start(YugabyteDBConnector.class, configBuilder.build(), loggingCompletion(), null, x -> {
LOGGER.info("Record '{}' arrived", x);
step1.countDown();
try {
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMoneyIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMoneyIT.java
index 353799c9c99..6667d46ac8e 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMoneyIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresMoneyIT.java
@@ -16,6 +16,7 @@
import org.apache.kafka.connect.source.SourceRecord;
import org.junit.After;
import org.junit.Before;
+import org.junit.Ignore;
import org.junit.Test;
import io.debezium.config.Configuration;
@@ -44,6 +45,7 @@ public void after() {
TestHelper.dropPublication();
}
+ @Ignore("YB Note: Decimal handling mode precise unsupported")
@Test
@FixFor("DBZ-5991")
public void shouldReceiveChangesForInsertsWithPreciseMode() throws Exception {
@@ -52,7 +54,7 @@ public void shouldReceiveChangesForInsertsWithPreciseMode() throws Exception {
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
// insert 2 records for testing
@@ -78,7 +80,7 @@ public void shouldReceiveChangesForInsertsWithStringMode() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, "string")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
// insert 2 records for testing
@@ -90,9 +92,9 @@ public void shouldReceiveChangesForInsertsWithStringMode() throws Exception {
assertThat(recordsForTopic).hasSize(2);
Struct after = ((Struct) recordsForTopic.get(0).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(after.get("m")).isEqualTo("-92233720368547758.08");
+ assertThat(after.getStruct("m").get("value")).isEqualTo("-92233720368547758.08");
after = ((Struct) recordsForTopic.get(1).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(after.get("m")).isEqualTo("92233720368547758.07");
+ assertThat(after.getStruct("m").get("value")).isEqualTo("92233720368547758.07");
}
@Test
@@ -104,7 +106,7 @@ public void shouldReceiveChangesForInsertsWithDoubleMode() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, "double")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
// insert 2 records for testing
@@ -116,11 +118,12 @@ public void shouldReceiveChangesForInsertsWithDoubleMode() throws Exception {
assertThat(recordsForTopic).hasSize(2);
Struct after = ((Struct) recordsForTopic.get(0).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(after.get("m")).isEqualTo(-92233720368547758.00);
+ assertThat(after.getStruct("m").get("value")).isEqualTo(-92233720368547758.00);
after = ((Struct) recordsForTopic.get(1).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(after.get("m")).isEqualTo(92233720368547758.00);
+ assertThat(after.getStruct("m").get("value")).isEqualTo(92233720368547758.00);
}
+ @Ignore("YB Note: Decimal handling mode precise unsupported")
@Test
@FixFor("DBZ-6001")
public void shouldReceiveChangesForInsertNullAndZeroMoney() throws Exception {
@@ -129,7 +132,7 @@ public void shouldReceiveChangesForInsertNullAndZeroMoney() throws Exception {
Configuration config = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
// insert 2 records for testing
@@ -158,7 +161,7 @@ public void shouldReceiveCorrectDefaultValueForHandlingMode() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE + ".post_money.debezium_test",
"SELECT id, null AS m FROM post_money.debezium_test")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
var records = consumeRecordsByTopic(1);
var recordsForTopic = records.recordsForTopic(topicName("post_money.debezium_test"));
@@ -175,7 +178,7 @@ public void shouldReceiveCorrectDefaultValueForHandlingMode() throws Exception {
.with(PostgresConnectorConfig.SNAPSHOT_SELECT_STATEMENT_OVERRIDES_BY_TABLE + ".post_money.debezium_test",
"SELECT id, null AS m FROM post_money.debezium_test")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
records = consumeRecordsByTopic(1);
recordsForTopic = records.recordsForTopic(topicName("post_money.debezium_test"));
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresReselectColumnsProcessorIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresReselectColumnsProcessorIT.java
index 7bcfdf5d8f8..bee02bac636 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresReselectColumnsProcessorIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresReselectColumnsProcessorIT.java
@@ -36,7 +36,7 @@
*
* @author Chris Cranford
*/
-public class PostgresReselectColumnsProcessorIT extends AbstractReselectProcessorTest {
+public class PostgresReselectColumnsProcessorIT extends AbstractReselectProcessorTest {
public static final String CREATE_STMT = "DROP SCHEMA IF EXISTS s1 CASCADE;" +
"CREATE SCHEMA s1; ";
@@ -59,8 +59,8 @@ public void afterEach() throws Exception {
}
@Override
- protected Class getConnectorClass() {
- return PostgresConnector.class;
+ protected Class getConnectorClass() {
+ return YugabyteDBConnector.class;
}
@Override
@@ -127,7 +127,7 @@ public void testToastColumnReselectedWhenJsonbValueIsUnavailable() throws Except
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1\\.dbz8168_toast")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingStarted();
final String json = "{\"key\": \"" + RandomStringUtils.randomAlphabetic(10000) + "\"}";
@@ -168,7 +168,7 @@ public void testToastColumnReselectedWhenValueIsUnavailable() throws Exception {
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1\\.dbz4321_toast")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingStarted();
final String text = RandomStringUtils.randomAlphabetic(10000);
@@ -212,7 +212,7 @@ public void testToastColumnHstoreAsMapReselectedWhenValueIsUnavailable() throws
.with(PostgresConnectorConfig.HSTORE_HANDLING_MODE, MAP.getValue())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingStarted();
TestHelper.execute(
@@ -256,7 +256,7 @@ public void testToastColumnHstoreAsJsonReselectedWhenValueIsUnavailable() throws
.with(PostgresConnectorConfig.HSTORE_HANDLING_MODE, JSON.getValue())
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingStarted();
TestHelper.execute(
@@ -303,7 +303,7 @@ public void testToastColumnArrayReselectedWhenValueIsUnavailable() throws Except
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, "true")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingStarted();
TestHelper.execute(
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresShutdownIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresShutdownIT.java
index 97c4cf9bdd8..b99fd872b70 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresShutdownIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresShutdownIT.java
@@ -33,7 +33,7 @@
import io.debezium.testing.testcontainers.util.ContainerImageVersions;
/**
- * Integration test for {@link PostgresConnector} using an {@link EmbeddedEngine} and Testcontainers infrastructure for when Postgres is shutdown during streaming
+ * Integration test for {@link YugabyteDBConnector} using an {@link EmbeddedEngine} and Testcontainers infrastructure for when Postgres is shutdown during streaming
*/
public class PostgresShutdownIT extends AbstractConnectorTest {
@@ -113,7 +113,7 @@ public void shouldStopOnPostgresFastShutdown() throws Exception {
String initialHeartbeat = postgresConnection.queryAndMap(
"SELECT ts FROM s1.heartbeat;",
postgresConnection.singleResultMapper(rs -> rs.getString("ts"), "Could not fetch keepalive info"));
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted("postgres", TestHelper.TEST_SERVER);
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresSkipMessagesWithoutChangeConfigIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresSkipMessagesWithoutChangeConfigIT.java
index 5804d7d46fb..92bfd56f64a 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresSkipMessagesWithoutChangeConfigIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PostgresSkipMessagesWithoutChangeConfigIT.java
@@ -58,7 +58,7 @@ public void shouldSkipEventsWithNoChangeInIncludedColumnsWhenSkipEnabled() throw
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
TestHelper.execute("INSERT INTO updates_test.debezium_test (id,white,black) VALUES (1,1,1);");
@@ -77,9 +77,9 @@ public void shouldSkipEventsWithNoChangeInIncludedColumnsWhenSkipEnabled() throw
final List recordsForTopic = records.recordsForTopic(topicName("updates_test.debezium_test"));
assertThat(recordsForTopic).hasSize(3);
Struct secondMessage = ((Struct) recordsForTopic.get(1).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(secondMessage.get("white")).isEqualTo(2);
+ assertThat(secondMessage.getStruct("white").getInt32("value")).isEqualTo(2);
Struct thirdMessage = ((Struct) recordsForTopic.get(2).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(thirdMessage.get("white")).isEqualTo(3);
+ assertThat(thirdMessage.getStruct("white").getInt32("value")).isEqualTo(3);
}
@Test
@@ -98,7 +98,7 @@ public void shouldSkipEventsWithNoChangeInIncludedColumnsWhenSkipEnabledWithExcl
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
TestHelper.execute("INSERT INTO updates_test.debezium_test (id,white,black) VALUES (1,1,1);");
@@ -117,9 +117,9 @@ public void shouldSkipEventsWithNoChangeInIncludedColumnsWhenSkipEnabledWithExcl
final List recordsForTopic = records.recordsForTopic(topicName("updates_test.debezium_test"));
assertThat(recordsForTopic).hasSize(3);
Struct secondMessage = ((Struct) recordsForTopic.get(1).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(secondMessage.get("white")).isEqualTo(2);
+ assertThat(secondMessage.getStruct("white").getInt32("value")).isEqualTo(2);
Struct thirdMessage = ((Struct) recordsForTopic.get(2).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(thirdMessage.get("white")).isEqualTo(3);
+ assertThat(thirdMessage.getStruct("white").getInt32("value")).isEqualTo(3);
}
@Test
@@ -136,7 +136,7 @@ public void shouldNotSkipEventsWithNoChangeInIncludedColumnsWhenSkipEnabledButTa
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
TestHelper.execute("INSERT INTO updates_test.debezium_test (id,white,black) VALUES (1,1,1);");
@@ -154,11 +154,11 @@ public void shouldNotSkipEventsWithNoChangeInIncludedColumnsWhenSkipEnabledButTa
final List recordsForTopic = records.recordsForTopic(topicName("updates_test.debezium_test"));
assertThat(recordsForTopic).hasSize(4);
Struct secondMessage = ((Struct) recordsForTopic.get(1).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(secondMessage.get("white")).isEqualTo(1);
+ assertThat(secondMessage.getStruct("white").getInt32("value")).isEqualTo(1);
Struct thirdMessage = ((Struct) recordsForTopic.get(2).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(thirdMessage.get("white")).isEqualTo(2);
+ assertThat(thirdMessage.getStruct("white").getInt32("value")).isEqualTo(2);
Struct forthMessage = ((Struct) recordsForTopic.get(3).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(forthMessage.get("white")).isEqualTo(3);
+ assertThat(forthMessage.getStruct("white").getInt32("value")).isEqualTo(3);
}
@Test
@@ -177,7 +177,7 @@ public void shouldNotSkipEventsWithNoChangeInIncludedColumnsWhenSkipDisabled() t
.with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NO_DATA)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
TestHelper.execute("INSERT INTO updates_test.debezium_test (id,white,black) VALUES (1,1,1);");
@@ -196,11 +196,11 @@ public void shouldNotSkipEventsWithNoChangeInIncludedColumnsWhenSkipDisabled() t
assertThat(recordsForTopic).hasSize(4);
Struct secondMessage = ((Struct) recordsForTopic.get(1).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(secondMessage.get("white")).isEqualTo(1);
+ assertThat(secondMessage.getStruct("white").getInt32("value")).isEqualTo(1);
Struct thirdMessage = ((Struct) recordsForTopic.get(2).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(thirdMessage.get("white")).isEqualTo(2);
+ assertThat(thirdMessage.getStruct("white").getInt32("value")).isEqualTo(2);
Struct forthMessage = ((Struct) recordsForTopic.get(3).value()).getStruct(Envelope.FieldName.AFTER);
- assertThat(forthMessage.get("white")).isEqualTo(3);
+ assertThat(forthMessage.getStruct("white").getInt32("value")).isEqualTo(3);
}
}
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PublicGeometryIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PublicGeometryIT.java
index 03db2a616f0..648947f8e9e 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PublicGeometryIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/PublicGeometryIT.java
@@ -90,7 +90,7 @@ public void shouldReceiveChangesForInsertsWithPostgisTypes() throws Exception {
}
private void setupRecordsProducer(Configuration.Builder config) {
- start(PostgresConnector.class, config
+ start(YugabyteDBConnector.class, config
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.build());
assertConnectorIsRunning();
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsSnapshotProducerIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsSnapshotProducerIT.java
index 7aa793a89f7..77d5483ab63 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsSnapshotProducerIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsSnapshotProducerIT.java
@@ -1266,7 +1266,7 @@ public void shouldIncludePartitionedTableIntoSnapshot() throws Exception {
Configuration.Builder configBuilder = TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY.getValue())
.with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "s1.part");
- start(PostgresConnector.class, configBuilder.build());
+ start(YugabyteDBConnector.class, configBuilder.build());
assertConnectorIsRunning();
waitForSnapshotToBeCompleted();
@@ -1320,7 +1320,7 @@ public void shouldGenerateSnapshotForGisDataTypes() throws Exception {
private void buildNoStreamProducer(Configuration.Builder config) {
alterConfig(config);
- start(PostgresConnector.class, config
+ start(YugabyteDBConnector.class, config
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(PostgresConnectorConfig.SNAPSHOT_MODE_CUSTOM_NAME, CustomTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
@@ -1330,7 +1330,7 @@ private void buildNoStreamProducer(Configuration.Builder config) {
private void buildWithStreamProducer(Configuration.Builder config) {
alterConfig(config);
- start(PostgresConnector.class, config
+ start(YugabyteDBConnector.class, config
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.ALWAYS)
.with(PostgresConnectorConfig.SNAPSHOT_MODE_CUSTOM_NAME, CustomTestSnapshot.class.getName())
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsStreamProducerIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsStreamProducerIT.java
index 33a1cd8a113..4dbcabb7083 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsStreamProducerIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/RecordsStreamProducerIT.java
@@ -62,7 +62,7 @@
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TestRule;
-import org.postgresql.util.PSQLException;
+import com.yugabyte.util.PSQLException;
import io.debezium.config.CommonConnectorConfig;
import io.debezium.config.CommonConnectorConfig.BinaryHandlingMode;
@@ -152,7 +152,7 @@ public void before() throws Exception {
private void startConnector(Function customConfig, boolean waitForSnapshot, Predicate isStopRecord)
throws InterruptedException {
- start(PostgresConnector.class, new PostgresConnectorConfig(customConfig.apply(TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, new PostgresConnectorConfig(customConfig.apply(TestHelper.defaultConfig()
.with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
.with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
.with(PostgresConnectorConfig.SNAPSHOT_MODE, waitForSnapshot ? SnapshotMode.INITIAL : SnapshotMode.NO_DATA))
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SignalsIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SignalsIT.java
index d5fd953957d..c79875fa170 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SignalsIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SignalsIT.java
@@ -82,7 +82,7 @@ private void signalLog(boolean includingEscapedCharacter) throws InterruptedExce
.with(PostgresConnectorConfig.SIGNAL_DATA_COLLECTION, includingEscapedCharacter ? signalTableWithEscapedCharacter : signalTable)
.with(CommonConnectorConfig.SIGNAL_POLL_INTERVAL_MS, "500")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
TestHelper.waitForDefaultReplicationSlotBeActive();
@@ -119,7 +119,7 @@ public void signalingDisabled() throws InterruptedException {
.with(CommonConnectorConfig.SIGNAL_POLL_INTERVAL_MS, "500")
.with(CommonConnectorConfig.SIGNAL_ENABLED_CHANNELS, "")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
TestHelper.waitForDefaultReplicationSlotBeActive();
@@ -152,7 +152,7 @@ public void signalSchemaChange() throws InterruptedException {
.with(PostgresConnectorConfig.SIGNAL_DATA_COLLECTION, "s1.debezium_signal")
.with(CommonConnectorConfig.SIGNAL_POLL_INTERVAL_MS, "500")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
TestHelper.waitForDefaultReplicationSlotBeActive();
@@ -232,7 +232,7 @@ public void jmxSignals() throws Exception {
.with(CommonConnectorConfig.SIGNAL_POLL_INTERVAL_MS, "500")
.with(CommonConnectorConfig.SIGNAL_ENABLED_CHANNELS, "jmx")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
TestHelper.waitForDefaultReplicationSlotBeActive();
@@ -258,7 +258,7 @@ public void customAction() throws Exception {
.with(CommonConnectorConfig.SIGNAL_POLL_INTERVAL_MS, "500")
.with(CommonConnectorConfig.SIGNAL_ENABLED_CHANNELS, "jmx")
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
TestHelper.waitForDefaultReplicationSlotBeActive();
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SnapshotWithOverridesProducerIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SnapshotWithOverridesProducerIT.java
index 60b654e387b..0257196eff4 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SnapshotWithOverridesProducerIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/SnapshotWithOverridesProducerIT.java
@@ -86,7 +86,7 @@ public void shouldUseMultipleOverriddenSelectStatementsDuringSnapshotting() thro
}
private void buildProducer(Configuration.Builder config) {
- start(PostgresConnector.class, config
+ start(YugabyteDBConnector.class, config
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.build());
assertConnectorIsRunning();
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TablesWithoutPrimaryKeyIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TablesWithoutPrimaryKeyIT.java
index b501749e93d..e4ea530ddd8 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TablesWithoutPrimaryKeyIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TablesWithoutPrimaryKeyIT.java
@@ -44,7 +44,7 @@ public void before() throws SQLException {
public void shouldProcessFromSnapshot() throws Exception {
TestHelper.execute(STATEMENTS);
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "nopk")
.build());
@@ -66,7 +66,7 @@ public void shouldProcessFromSnapshot() throws Exception {
public void shouldProcessFromSnapshotOld() throws Exception {
TestHelper.execute(STATEMENTS);
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL_ONLY)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "nopk")
.build());
@@ -86,7 +86,7 @@ public void shouldProcessFromSnapshotOld() throws Exception {
@Test
public void shouldProcessFromStreaming() throws Exception {
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "nopk")
.build());
@@ -125,7 +125,7 @@ public void shouldProcessFromStreaming() throws Exception {
@Test
public void shouldProcessFromStreamingOld() throws Exception {
- start(PostgresConnector.class, TestHelper.defaultConfig()
+ start(YugabyteDBConnector.class, TestHelper.defaultConfig()
.with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NO_DATA)
.with(PostgresConnectorConfig.SCHEMA_INCLUDE_LIST, "nopk")
.build());
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TestHelper.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TestHelper.java
index 4c23e87281e..a7fe8156248 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TestHelper.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TestHelper.java
@@ -26,9 +26,11 @@
import java.util.function.Predicate;
import java.util.stream.Collectors;
+import io.debezium.heartbeat.Heartbeat;
+import io.debezium.junit.logging.LogInterceptor;
import org.awaitility.Awaitility;
import org.awaitility.core.ConditionTimeoutException;
-import org.postgresql.jdbc.PgConnection;
+import com.yugabyte.jdbc.PgConnection;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -55,7 +57,7 @@ public final class TestHelper {
public static final String CONNECTION_TEST = "Debezium Test";
public static final String TEST_SERVER = "test_server";
- protected static final String TEST_DATABASE = "postgres";
+ protected static final String TEST_DATABASE = "yugabyte";
protected static final String PK_FIELD = "pk";
private static final String TEST_PROPERTY_PREFIX = "debezium.test.";
private static final Logger LOGGER = LoggerFactory.getLogger(TestHelper.class);
@@ -129,7 +131,7 @@ public static ReplicationConnection createForReplication(String slotName, boolea
*/
public static PostgresConnectorConfig.LogicalDecoder decoderPlugin() {
final String s = System.getProperty(PostgresConnectorConfig.PLUGIN_NAME.name());
- return (s == null || s.length() == 0) ? PostgresConnectorConfig.LogicalDecoder.DECODERBUFS : PostgresConnectorConfig.LogicalDecoder.parse(s);
+ return (s == null || s.length() == 0) ? PostgresConnectorConfig.LogicalDecoder.YBOUTPUT : PostgresConnectorConfig.LogicalDecoder.parse(s);
}
/**
@@ -219,11 +221,13 @@ public static void dropAllSchemas() throws SQLException {
if (!schemaNames.contains(PostgresSchema.PUBLIC_SCHEMA_NAME)) {
schemaNames.add(PostgresSchema.PUBLIC_SCHEMA_NAME);
}
+ LOGGER.info("Schemas to drop: {}", schemaNames);
String dropStmts = schemaNames.stream()
.map(schema -> "\"" + schema.replaceAll("\"", "\"\"") + "\"")
.map(schema -> "DROP SCHEMA IF EXISTS " + schema + " CASCADE;")
.collect(Collectors.joining(lineSeparator));
TestHelper.execute(dropStmts);
+
try {
TestHelper.executeDDL("init_database.ddl");
}
@@ -279,29 +283,37 @@ public static JdbcConfiguration defaultJdbcConfig(String hostname, int port) {
public static JdbcConfiguration.Builder defaultJdbcConfigBuilder(String hostname, int port) {
return JdbcConfiguration.copy(Configuration.fromSystemProperties("database."))
.with(CommonConnectorConfig.TOPIC_PREFIX, "dbserver1")
- .withDefault(JdbcConfiguration.DATABASE, "postgres")
+ .withDefault(JdbcConfiguration.DATABASE, "yugabyte")
.withDefault(JdbcConfiguration.HOSTNAME, hostname)
.withDefault(JdbcConfiguration.PORT, port)
- .withDefault(JdbcConfiguration.USER, "postgres")
- .withDefault(JdbcConfiguration.PASSWORD, "postgres");
+ .withDefault(JdbcConfiguration.USER, "yugabyte")
+ .withDefault(JdbcConfiguration.PASSWORD, "yugabyte");
}
public static JdbcConfiguration defaultJdbcConfig() {
- return defaultJdbcConfig("localhost", 5432);
+ return defaultJdbcConfig("127.0.0.1", 5433);
+ }
+
+ public static String getDefaultHeartbeatTopic() {
+ return Heartbeat.HEARTBEAT_TOPICS_PREFIX.defaultValueAsString() + "." + TEST_SERVER;
}
public static JdbcConfiguration.Builder defaultJdbcConfigBuilder() {
- return defaultJdbcConfigBuilder("localhost", 5432);
+ return defaultJdbcConfigBuilder("localhost", 5433);
}
public static Configuration.Builder defaultConfig() {
+ return defaultConfig("YBOUTPUT");
+ }
+
+ public static Configuration.Builder defaultConfig(String pluginName) {
JdbcConfiguration jdbcConfiguration = defaultJdbcConfig();
Configuration.Builder builder = Configuration.create();
jdbcConfiguration.forEach((field, value) -> builder.with(PostgresConnectorConfig.DATABASE_CONFIG_PREFIX + field, value));
builder.with(CommonConnectorConfig.TOPIC_PREFIX, TEST_SERVER)
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, true)
.with(PostgresConnectorConfig.STATUS_UPDATE_INTERVAL_MS, 100)
- .with(PostgresConnectorConfig.PLUGIN_NAME, decoderPlugin())
+ .with(PostgresConnectorConfig.PLUGIN_NAME, pluginName)
.with(PostgresConnectorConfig.SSL_MODE, SecureConnectionMode.DISABLED)
.with(PostgresConnectorConfig.MAX_RETRIES, 2)
.with(PostgresConnectorConfig.RETRY_DELAY_MS, 2000);
@@ -385,7 +397,7 @@ protected static void createPublicationForAllTables() {
}
protected static void dropPublication(String publicationName) {
- if (decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT)) {
+ if (decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) || decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT)) {
try {
execute("DROP PUBLICATION " + publicationName);
}
@@ -396,7 +408,7 @@ protected static void dropPublication(String publicationName) {
}
protected static void createPublicationForAllTables(String publicationName) {
- if (decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT)) {
+ if (decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) || decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT)) {
execute("CREATE PUBLICATION " + publicationName + " FOR ALL TABLES");
}
}
@@ -406,7 +418,7 @@ protected static boolean publicationExists() {
}
protected static boolean publicationExists(String publicationName) {
- if (decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT)) {
+ if (decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) || decoderPlugin().equals(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT)) {
try (PostgresConnection connection = create()) {
String query = String.format("SELECT pubname FROM pg_catalog.pg_publication WHERE pubname = '%s'", publicationName);
try {
@@ -462,6 +474,20 @@ protected static void assertNoOpenTransactions() throws SQLException {
}
}
+ protected static void waitFor(Duration duration) throws InterruptedException {
+ Awaitility.await()
+ .pollDelay(duration)
+ .atMost(duration.plusSeconds(1))
+ .until(() -> true);
+ }
+
+ protected static void waitForLogMessage(LogInterceptor logInterceptor, String message) {
+ Awaitility.await()
+ .atMost(Duration.ofSeconds(30))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> logInterceptor.containsMessage(message));
+ }
+
private static List getOpenIdleTransactions(PostgresConnection connection) throws SQLException {
int connectionPID = ((PgConnection) connection.connection()).getBackendPID();
return connection.queryAndMap(
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TransactionMetadataIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TransactionMetadataIT.java
index bedbe6c162c..7702d80d223 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TransactionMetadataIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/TransactionMetadataIT.java
@@ -80,14 +80,15 @@ public void transactionMetadata() throws InterruptedException {
.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
.with(PostgresConnectorConfig.PROVIDE_TRANSACTION_METADATA, true)
.build();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
assertConnectorIsRunning();
- TestHelper.waitForDefaultReplicationSlotBeActive();
waitForAvailableRecords(100, TimeUnit.MILLISECONDS);
// there shouldn't be any snapshot records
assertNoRecordsToConsume();
+ TestHelper.waitFor(Duration.ofSeconds(15));
+
// insert and verify 2 new records
TestHelper.execute(INSERT_STMT);
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YBRecordsStreamProducerIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YBRecordsStreamProducerIT.java
new file mode 100644
index 00000000000..bfffd5925a4
--- /dev/null
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YBRecordsStreamProducerIT.java
@@ -0,0 +1,3573 @@
+/*
+ * Copyright Debezium Authors.
+ *
+ * Licensed under the Apache Software License version 2.0, available at http://www.apache.org/licenses/LICENSE-2.0
+ */
+
+package io.debezium.connector.postgresql;
+
+import com.yugabyte.util.PSQLException;
+import io.debezium.config.CommonConnectorConfig;
+import io.debezium.config.CommonConnectorConfig.BinaryHandlingMode;
+import io.debezium.config.Configuration;
+import io.debezium.connector.SnapshotRecord;
+import io.debezium.connector.postgresql.PostgresConnectorConfig.IntervalHandlingMode;
+import io.debezium.connector.postgresql.PostgresConnectorConfig.SchemaRefreshMode;
+import io.debezium.connector.postgresql.PostgresConnectorConfig.SnapshotMode;
+import io.debezium.connector.postgresql.connection.PostgresConnection;
+import io.debezium.connector.postgresql.connection.ReplicationConnection;
+import io.debezium.connector.postgresql.junit.SkipTestDependingOnDecoderPluginNameRule;
+import io.debezium.connector.postgresql.junit.SkipWhenDecoderPluginNameIs;
+import io.debezium.connector.postgresql.junit.SkipWhenDecoderPluginNameIsNot;
+import io.debezium.data.Bits;
+import io.debezium.data.Enum;
+import io.debezium.data.Envelope;
+import io.debezium.data.SpecialValueDecimal;
+import io.debezium.data.VariableScaleDecimal;
+import io.debezium.data.VerifyRecord;
+import io.debezium.data.geometry.Point;
+import io.debezium.doc.FixFor;
+import io.debezium.embedded.EmbeddedEngineConfig;
+import io.debezium.heartbeat.DatabaseHeartbeatImpl;
+import io.debezium.heartbeat.Heartbeat;
+import io.debezium.jdbc.JdbcConnection;
+import io.debezium.jdbc.JdbcValueConverters.DecimalMode;
+import io.debezium.jdbc.TemporalPrecisionMode;
+import io.debezium.junit.ConditionalFail;
+import io.debezium.junit.EqualityCheck;
+import io.debezium.junit.SkipWhenDatabaseVersion;
+import io.debezium.junit.logging.LogInterceptor;
+import io.debezium.relational.RelationalChangeRecordEmitter;
+import io.debezium.relational.RelationalDatabaseConnectorConfig.DecimalHandlingMode;
+import io.debezium.relational.Table;
+import io.debezium.relational.TableId;
+import io.debezium.relational.Tables;
+import io.debezium.relational.Tables.TableFilter;
+import io.debezium.time.MicroTime;
+import io.debezium.time.MicroTimestamp;
+import io.debezium.time.ZonedTime;
+import io.debezium.time.ZonedTimestamp;
+import io.debezium.util.HexConverter;
+import io.debezium.util.Stopwatch;
+import io.debezium.util.Testing;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.kafka.connect.data.Decimal;
+import org.apache.kafka.connect.data.Field;
+import org.apache.kafka.connect.data.Schema;
+import org.apache.kafka.connect.data.SchemaBuilder;
+import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.errors.ConnectException;
+import org.apache.kafka.connect.header.Header;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.apache.kafka.connect.storage.MemoryOffsetBackingStore;
+import org.assertj.core.api.Assertions;
+import org.awaitility.Awaitility;
+import org.awaitility.core.ConditionTimeoutException;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.time.Instant;
+import java.time.LocalTime;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.function.Consumer;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.LongStream;
+import java.util.stream.Stream;
+import java.util.stream.StreamSupport;
+
+import static io.debezium.connector.postgresql.TestHelper.*;
+import static io.debezium.connector.postgresql.junit.SkipWhenDecoderPluginNameIs.DecoderPluginName.PGOUTPUT;
+import static io.debezium.junit.EqualityCheck.LESS_THAN;
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertTrue;
+import static org.assertj.core.api.Assertions.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Integration test for the {@link RecordsStreamProducer} class. This also tests indirectly the PG plugin functionality for
+ * different use cases. This class is a copy of {@link RecordsStreamProducerIT} with source database
+ * being YugabyteDB. This rewrite of the test class is needed since we use the plugin `yboutput` which essentially
+ * causes a change in the structure of the record so we had to change the way records were asserted.
+ *
+ * @author Vaibhav Kushwaha (hchiorea@redhat.com)
+ */
+public class YBRecordsStreamProducerIT extends AbstractRecordsProducerTest {
+ private static final Logger LOGGER = LoggerFactory.getLogger(YBRecordsStreamProducerIT.class);
+
+ private TestConsumer consumer;
+
+ @Rule
+ public final TestRule skip = new SkipTestDependingOnDecoderPluginNameRule();
+
+ @Rule
+ public TestRule conditionalFail = new ConditionalFail();
+
+ @Before
+ public void before() throws Exception {
+ // ensure the slot is deleted for each test
+ TestHelper.dropAllSchemas();
+ TestHelper.dropPublication();
+// TestHelper.executeDDL("init_postgis.ddl");
+ String statements = "CREATE SCHEMA IF NOT EXISTS public;" +
+ "DROP TABLE IF EXISTS test_table;" +
+ "CREATE TABLE test_table (pk SERIAL, text TEXT, PRIMARY KEY(pk));" +
+ "CREATE TABLE table_with_interval (id SERIAL PRIMARY KEY, title VARCHAR(512) NOT NULL, time_limit INTERVAL DEFAULT '60 days'::INTERVAL NOT NULL);" +
+ "INSERT INTO test_table(text) VALUES ('insert');";
+ TestHelper.execute(statements);
+
+ PostgresConnectorConfig config = new PostgresConnectorConfig(TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
+ .build());
+ }
+
+ private void startConnector(Function customConfig, boolean waitForSnapshot, Predicate isStopRecord)
+ throws InterruptedException {
+ start(YugabyteDBConnector.class, new PostgresConnectorConfig(customConfig.apply(TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, waitForSnapshot ? SnapshotMode.INITIAL : SnapshotMode.NO_DATA))
+ .build()).getConfig(), isStopRecord);
+ assertConnectorIsRunning();
+ waitForStreamingToStart();
+
+ if (waitForSnapshot) {
+ // Wait for snapshot to be in progress
+ consumer = testConsumer(1);
+ consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
+ consumer.remove();
+ }
+ }
+
+ private void startConnector(Function customConfig, boolean waitForSnapshot) throws InterruptedException {
+ startConnector(customConfig, waitForSnapshot, (x) -> false);
+ }
+
+ private void startConnector(Function customConfig) throws InterruptedException {
+ startConnector(customConfig, true);
+ }
+
+ private void startConnector() throws InterruptedException {
+ startConnector(Function.identity(), true);
+ }
+
+ @Test
+ @FixFor("DBZ-766")
+ public void shouldReceiveChangesAfterConnectionRestart() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.dropPublication();
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.FALSE)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis"));
+
+ TestHelper.execute("CREATE TABLE t0 (pk SERIAL, d INTEGER, PRIMARY KEY(pk));");
+
+ consumer = testConsumer(1);
+ waitForStreamingToStart();
+
+ // Insert new row and verify inserted
+ executeAndWait("INSERT INTO t0 (pk,d) VALUES(1,1);");
+ assertRecordInserted("public.t0", PK_FIELD, 1);
+
+ // simulate the connector is stopped
+ stopConnector();
+
+ // Alter schema offline
+ TestHelper.execute("ALTER TABLE t0 ADD COLUMN d2 INTEGER;");
+ TestHelper.execute("ALTER TABLE t0 ALTER COLUMN d SET NOT NULL;");
+
+ // Start the producer and wait; the wait is to guarantee the stream thread is polling
+ // This appears to be a potential race condition problem
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis"),
+ false);
+ consumer = testConsumer(1);
+ waitForStreamingToStart();
+
+ // Insert new row and verify inserted
+ executeAndWait("INSERT INTO t0 (pk,d,d2) VALUES (2,1,3);");
+ assertRecordInserted("public.t0", PK_FIELD, 2);
+ }
+
+ @Test
+ @FixFor("DBZ-1698")
+ public void shouldReceiveUpdateSchemaAfterConnectionRestart() throws Exception {
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.dropPublication();
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false)
+ .with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST));
+
+ TestHelper.execute("CREATE TABLE t0 (pk SERIAL, d INTEGER, PRIMARY KEY(pk));");
+
+ consumer = testConsumer(1);
+ waitForStreamingToStart();
+
+ // Insert new row and verify inserted
+ executeAndWait("INSERT INTO t0 (pk,d) VALUES(1,1);");
+ assertRecordInserted("public.t0", PK_FIELD, 1);
+
+ // simulate the connector is stopped
+ stopConnector();
+ Thread.sleep(3000);
+
+ // Add record offline
+ TestHelper.execute("INSERT INTO t0 (pk,d) VALUES(2,2);");
+
+ // Alter schema offline
+ TestHelper.execute("ALTER TABLE t0 ADD COLUMN d2 NUMERIC(10,6) DEFAULT 0 NOT NULL;");
+ TestHelper.execute("ALTER TABLE t0 ALTER COLUMN d SET NOT NULL;");
+
+ // Start the producer and wait; the wait is to guarantee the stream thread is polling
+ // This appears to be a potential race condition problem
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false)
+ .with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST),
+ false);
+ consumer = testConsumer(2);
+ waitForStreamingToStart();
+
+ // Insert new row and verify inserted
+ executeAndWait("INSERT INTO t0 (pk,d,d2) VALUES (3,1,3);");
+ assertRecordInserted("public.t0", PK_FIELD, 2);
+ assertRecordInserted("public.t0", PK_FIELD, 3);
+
+ stopConnector();
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.dropPublication();
+ }
+
+ private Struct testProcessNotNullColumns(TemporalPrecisionMode temporalMode) throws Exception {
+ TestHelper.executeDDL("postgres_create_tables.ddl");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SCHEMA_EXCLUDE_LIST, "postgis")
+ .with(PostgresConnectorConfig.TIME_PRECISION_MODE, temporalMode));
+
+ consumer.expects(1);
+ executeAndWait("INSERT INTO not_null_table VALUES (default, 30, '2019-02-10 11:34:58', '2019-02-10 11:35:00', "
+ + "'10:20:11', '10:20:12', '2019-02-01', '$20', B'101', 32766, 2147483646, 9223372036854775806, 3.14, "
+ + "true, 3.14768, 1234.56, 'Test', '(0,0),(1,1)', '<(0,0),1>', '01:02:03', '{0,1,2}', '((0,0),(1,1))', "
+ + "'((0,0),(0,1),(0,2))', '(1,1)', '((0,0),(0,1),(1,1))', 'a', 'hello world', '{\"key\": 123}', "
+ + "'- abc
', 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11', B'101', '192.168.1.100', "
+ + "'192.168.1', '08:00:2b:01:02:03');");
+
+ consumer.remove();
+
+ consumer.expects(1);
+ executeAndWait("UPDATE not_null_table SET val=40");
+ final SourceRecord record = consumer.remove();
+ YBVerifyRecord.isValidUpdate(record, "pk", 1);
+ YBVerifyRecord.isValid(record);
+ return ((Struct) record.value()).getStruct("before");
+ }
+
+ @Ignore("YB Note: Replica identity cannot be changed at runtime")
+ @Test
+ @FixFor("DBZ-1029")
+ @SkipWhenDecoderPluginNameIs(value = PGOUTPUT, reason = "Decoder synchronizes all schema columns when processing relation messages")
+ public void shouldReceiveChangesForInsertsIndependentOfReplicaIdentity() throws Exception {
+ // insert statement should not be affected by replica identity settings in any way
+
+ startConnector();
+
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ String statement = "INSERT INTO test_table (text) VALUES ('pk_and_default');";
+ assertInsert(statement, 2, Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "pk_and_default")));
+
+ consumer.expects(1);
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY FULL;");
+ statement = "INSERT INTO test_table (text) VALUES ('pk_and_full');";
+ assertInsert(statement, 3, Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "pk_and_full")));
+
+ consumer.expects(1);
+ TestHelper.execute("ALTER TABLE test_table DROP CONSTRAINT test_table_pkey CASCADE;");
+ statement = "INSERT INTO test_table (pk, text) VALUES (4, 'no_pk_and_full');";
+ assertInsert(statement, 4, Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "no_pk_and_full")));
+
+ consumer.expects(1);
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ statement = "INSERT INTO test_table (pk, text) VALUES (5, 'no_pk_and_default');";
+ assertInsert(statement, 5, Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "no_pk_and_default")));
+ }
+
+ @Ignore("YB Note: Replica identity cannot be changed at runtime")
+ @Test
+ @FixFor("DBZ-1029")
+ public void shouldReceiveChangesForInsertsIndependentOfReplicaIdentityWhenSchemaChanged() throws Exception {
+ // insert statement should not be affected by replica identity settings in any way
+
+ startConnector();
+
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ String statement = "INSERT INTO test_table (text) VALUES ('pk_and_default');";
+ assertInsert(statement, 2, Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "pk_and_default")));
+
+ consumer.expects(1);
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY FULL;");
+ statement = "INSERT INTO test_table (text) VALUES ('pk_and_full');";
+ assertInsert(statement, 3, Collections.singletonList(new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "pk_and_full")));
+
+ consumer.expects(1);
+ // YB Note: Table cannot be altered if it's a part of CDC replication - https://github.com/yugabyte/yugabyte-db/issues/16625
+ TestHelper.execute("ALTER TABLE test_table DROP CONSTRAINT test_table_pkey CASCADE;");
+ statement = "INSERT INTO test_table (pk, text) VALUES (4, 'no_pk_and_full');";
+ assertInsert(statement, Arrays.asList(new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 4),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "no_pk_and_full")));
+
+ consumer.expects(1);
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ statement = "INSERT INTO test_table (pk, text) VALUES (5, 'no_pk_and_default');";
+ assertInsert(statement, Arrays.asList(new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 5),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "no_pk_and_default")));
+ }
+
+ @Test
+ public void shouldReceiveChangesForNewTable() throws Exception {
+ String statement = "CREATE SCHEMA s1;" +
+ "CREATE TABLE s1.a (pk SERIAL, aa integer, PRIMARY KEY(pk));" +
+ "INSERT INTO s1.a (aa) VALUES (11);";
+
+ startConnector();
+
+ executeAndWait(statement);
+ assertRecordInserted("s1.a", PK_FIELD, 1);
+ }
+
+ @Test
+ public void shouldReceiveChangesForRenamedTable() throws Exception {
+ String statement = "DROP TABLE IF EXISTS renamed_test_table;" +
+ "ALTER TABLE test_table RENAME TO renamed_test_table;" +
+ "INSERT INTO renamed_test_table (text) VALUES ('new');";
+ startConnector();
+
+ executeAndWait(statement);
+ assertRecordInserted("public.renamed_test_table", PK_FIELD, 2);
+ }
+
+ @Test
+ public void shouldReceiveChangesForUpdatesWithColumnChanges() throws Exception {
+ // add a new column
+ String statements = "ALTER TABLE test_table ADD COLUMN uvc VARCHAR(2);" +
+ "ALTER TABLE test_table REPLICA IDENTITY FULL;";
+
+ execute(statements);
+
+ startConnector();
+
+ // Wait after starting connector.
+ consumer = testConsumer(1);
+
+ // Execute the update after starting the connector.
+ executeAndWait("UPDATE test_table SET uvc ='aa' WHERE pk = 1;");
+
+ // the update should be the last record
+ SourceRecord updatedRecord = consumer.remove();
+ String topicName = topicName("public.test_table");
+ assertEquals(topicName, updatedRecord.topic());
+ YBVerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
+
+ // now check we got the updated value (the old value should be null, the new one whatever we set)
+ List expectedBefore = Collections.singletonList(new SchemaAndValueField("uvc", null, null));
+ assertRecordSchemaAndValues(expectedBefore, updatedRecord, Envelope.FieldName.BEFORE);
+
+ List expectedAfter = Collections.singletonList(new SchemaAndValueField("uvc", SchemaBuilder.OPTIONAL_STRING_SCHEMA,
+ "aa"));
+ assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
+
+ // rename a column
+ statements = "ALTER TABLE test_table RENAME COLUMN uvc to xvc;" +
+ "UPDATE test_table SET xvc ='bb' WHERE pk = 1;";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+
+ updatedRecord = consumer.remove();
+ YBVerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
+
+ // now check we got the updated value (the old value should be null, the new one whatever we set)
+ expectedBefore = Collections.singletonList(new SchemaAndValueField("xvc", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "aa"));
+ assertRecordSchemaAndValues(expectedBefore, updatedRecord, Envelope.FieldName.BEFORE);
+
+ expectedAfter = Collections.singletonList(new SchemaAndValueField("xvc", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "bb"));
+ assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
+
+ // drop a column
+ statements = "ALTER TABLE test_table DROP COLUMN xvc;" +
+ "UPDATE test_table SET text ='update' WHERE pk = 1;";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+ YBVerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
+
+ // change a column type
+ statements = "ALTER TABLE test_table ADD COLUMN modtype INTEGER;" +
+ "INSERT INTO test_table (pk,modtype) VALUES (2,1);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 2);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("modtype", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 1)), updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ private Header getPKUpdateNewKeyHeader(SourceRecord record) {
+ return this.getHeaderField(record, RelationalChangeRecordEmitter.PK_UPDATE_NEWKEY_FIELD);
+ }
+
+ private Header getPKUpdateOldKeyHeader(SourceRecord record) {
+ return this.getHeaderField(record, RelationalChangeRecordEmitter.PK_UPDATE_OLDKEY_FIELD);
+ }
+
+ private Header getHeaderField(SourceRecord record, String fieldName) {
+ return StreamSupport.stream(record.headers().spliterator(), false)
+ .filter(header -> fieldName.equals(header.key()))
+ .collect(Collectors.toList()).get(0);
+ }
+
+ @Test
+ public void shouldReceiveChangesForUpdatesWithPKChanges() throws Exception {
+ startConnector();
+ consumer = testConsumer(3);
+ executeAndWait("UPDATE test_table SET text = 'update', pk = 2");
+
+ String topicName = topicName("public.test_table");
+
+ // first should be a delete of the old pk
+ SourceRecord deleteRecord = consumer.remove();
+ assertEquals(topicName, deleteRecord.topic());
+ YBVerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+
+ // followed by a tombstone of the old pk
+ SourceRecord tombstoneRecord = consumer.remove();
+ assertEquals(topicName, tombstoneRecord.topic());
+ YBVerifyRecord.isValidTombstone(tombstoneRecord, PK_FIELD, 1);
+
+ // and finally insert of the new value
+ SourceRecord insertRecord = consumer.remove();
+ assertEquals(topicName, insertRecord.topic());
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2);
+ }
+
+ @Test
+ @FixFor("DBZ-582")
+ public void shouldReceiveChangesForUpdatesWithPKChangesWithoutTombstone() throws Exception {
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false));
+ consumer = testConsumer(2);
+
+ executeAndWait("UPDATE test_table SET text = 'update', pk = 2");
+
+ String topicName = topicName("public.test_table");
+
+ // first should be a delete of the old pk
+ SourceRecord deleteRecord = consumer.remove();
+ assertEquals(topicName, deleteRecord.topic());
+ YBVerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+
+ // followed by insert of the new value
+ SourceRecord insertRecord = consumer.remove();
+ assertEquals(topicName, insertRecord.topic());
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2);
+ }
+
+ @Test
+ public void shouldReceiveChangesForDefaultValues() throws Exception {
+ String statements = "ALTER TABLE test_table REPLICA IDENTITY FULL;" +
+ "ALTER TABLE test_table ADD COLUMN default_column TEXT DEFAULT 'default';" +
+ "INSERT INTO test_table (text) VALUES ('update');";
+ startConnector();
+ consumer = testConsumer(1);
+ executeAndWait(statements);
+
+ SourceRecord insertRecord = consumer.remove();
+ assertEquals(topicName("public.test_table"), insertRecord.topic());
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 2);
+ List expectedSchemaAndValues = Arrays.asList(
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "update"),
+ new SchemaAndValueField("default_column", SchemaBuilder.string().optional().defaultValue("default").build(), "default"));
+ assertRecordSchemaAndValues(expectedSchemaAndValues, insertRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Test
+ public void shouldReceiveChangesForTypeConstraints() throws Exception {
+ // add a new column
+ String statements = "ALTER TABLE test_table ADD COLUMN num_val NUMERIC(5,2);" +
+ "ALTER TABLE test_table REPLICA IDENTITY FULL;";
+
+ // Alter the replica identity before starting connector.
+ execute(statements);
+
+ startConnector();
+ consumer = testConsumer(1);
+ executeAndWait("UPDATE test_table SET num_val = 123.45 WHERE pk = 1;");
+
+ // the update should be the last record
+ SourceRecord updatedRecord = consumer.remove();
+ String topicName = topicName("public.test_table");
+ assertEquals(topicName, updatedRecord.topic());
+ YBVerifyRecord.isValidUpdate(updatedRecord, PK_FIELD, 1);
+
+ // now check we got the updated value (the old value should be null, the new one whatever we set)
+ List expectedBefore = Collections.singletonList(new SchemaAndValueField("num_val", null, null));
+ assertRecordSchemaAndValues(expectedBefore, updatedRecord, Envelope.FieldName.BEFORE);
+
+ List expectedAfter = Collections.singletonList(
+ new SchemaAndValueField("num_val", Decimal.builder(2).parameter(TestHelper.PRECISION_PARAMETER_KEY, "5").optional().build(), new BigDecimal("123.45")));
+ assertRecordSchemaAndValues(expectedAfter, updatedRecord, Envelope.FieldName.AFTER);
+
+ if (YugabyteDBServer.isEnabled()) {
+ // YB Note: Altering column for table part of CDC replication is not allowed, see https://github.com/yugabyte/yugabyte-db/issues/16625
+ return;
+ }
+
+ // change a constraint
+ statements = "ALTER TABLE test_table ALTER COLUMN num_val TYPE NUMERIC(6,1);" +
+ "INSERT INTO test_table (pk,num_val) VALUES (2,123.41);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 2);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("num_val", Decimal.builder(1).parameter(TestHelper.PRECISION_PARAMETER_KEY, "6").optional().build(),
+ new BigDecimal("123.4"))),
+ updatedRecord, Envelope.FieldName.AFTER);
+
+ statements = "ALTER TABLE test_table ALTER COLUMN num_val TYPE NUMERIC;" +
+ "INSERT INTO test_table (pk,num_val) VALUES (3,123.4567);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ final Struct dvs = new Struct(VariableScaleDecimal.schema());
+ dvs.put("scale", 4).put("value", new BigDecimal("123.4567").unscaledValue().toByteArray());
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 3);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("num_val", VariableScaleDecimal.builder().optional().build(), dvs)), updatedRecord,
+ Envelope.FieldName.AFTER);
+
+ statements = "ALTER TABLE test_table ALTER COLUMN num_val TYPE DECIMAL(12,4);" +
+ "INSERT INTO test_table (pk,num_val) VALUES (4,2.48);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 4);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("num_val", Decimal.builder(4).parameter(TestHelper.PRECISION_PARAMETER_KEY, "12").optional().build(),
+ new BigDecimal("2.4800"))),
+ updatedRecord, Envelope.FieldName.AFTER);
+
+ statements = "ALTER TABLE test_table ALTER COLUMN num_val TYPE DECIMAL(12);" +
+ "INSERT INTO test_table (pk,num_val) VALUES (5,1238);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 5);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("num_val", Decimal.builder(0).parameter(TestHelper.PRECISION_PARAMETER_KEY, "12").optional().build(),
+ new BigDecimal("1238"))),
+ updatedRecord, Envelope.FieldName.AFTER);
+
+ statements = "ALTER TABLE test_table ALTER COLUMN num_val TYPE DECIMAL;" +
+ "INSERT INTO test_table (pk,num_val) VALUES (6,1225.1);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ final Struct dvs2 = new Struct(VariableScaleDecimal.schema());
+ dvs2.put("scale", 1).put("value", new BigDecimal("1225.1").unscaledValue().toByteArray());
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 6);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("num_val", VariableScaleDecimal.builder().optional().build(), dvs2)), updatedRecord,
+ Envelope.FieldName.AFTER);
+
+ statements = "ALTER TABLE test_table ALTER COLUMN num_val SET NOT NULL;" +
+ "INSERT INTO test_table (pk,num_val) VALUES (7,1976);";
+
+ consumer.expects(1);
+ executeAndWait(statements);
+ updatedRecord = consumer.remove();
+
+ dvs2.put("scale", 0).put("value", new BigDecimal("1976").unscaledValue().toByteArray());
+ YBVerifyRecord.isValidInsert(updatedRecord, PK_FIELD, 7);
+ assertRecordSchemaAndValues(
+ Collections.singletonList(new SchemaAndValueField("num_val", VariableScaleDecimal.builder().build(), dvs2)), updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Test
+ public void verifyAllWorkingTypesInATableWithYbOutput() throws Exception {
+ verifyAllWorkingTypesInATable(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ @Test
+ public void verifyAllWorkingTypesInATableWithPgOutput() throws Exception {
+ verifyAllWorkingTypesInATable(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ public void verifyAllWorkingTypesInATable(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ String createStmt = "CREATE TABLE all_types (id serial PRIMARY KEY, bigintcol bigint, " +
+ "bitcol bit(5), varbitcol varbit(5), booleanval boolean, " +
+ "byteaval bytea, ch char(5), vchar varchar(25), cidrval cidr, " +
+ "dt date, dp double precision, inetval inet, intervalval interval, " +
+ "jsonval json, jsonbval jsonb, mc macaddr, mc8 macaddr8, mn money, " +
+ "rl real, si smallint, i4r int4range, i8r int8range, " +
+ "nr numrange, tsr tsrange, tstzr tstzrange, dr daterange, txt text, " +
+ "tm time, tmtz timetz, ts timestamp, tstz timestamptz, uuidval uuid)";
+
+ execute(createStmt);
+
+ if (logicalDecoder == PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) {
+ LOGGER.info("Changing replica identity of the table to default");
+ TestHelper.execute("ALTER TABLE all_types REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE table_with_interval REPLICA IDENTITY DEFAULT;");
+ TestHelper.waitFor(Duration.ofSeconds(10));
+ }
+
+ TestHelper.dropPublication();
+
+ start(YugabyteDBConnector.class,
+ TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.all_types")
+ .with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, "filtered")
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, "never")
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .build());
+ assertConnectorIsRunning();
+ waitForStreamingToStart();
+ consumer = testConsumer(1);
+
+ String insertStmt =
+ "INSERT INTO all_types (bigintcol, bitcol, varbitcol, booleanval, byteaval, ch, vchar, cidrval, dt, " +
+ "dp, inetval, intervalval, jsonval, jsonbval, mc, mc8, mn, rl, si, i4r, i8r, nr, tsr, tstzr, dr, " +
+ "txt, tm, tmtz, ts, tstz, uuidval) VALUES (123456, '11011', '10101', FALSE, E'\\\\001', 'five5', " +
+ "'sample_text', '10.1.0.0/16', '2022-02-24', 12.345, '127.0.0.1', " +
+ "'2020-03-10 00:00:00'::timestamp-'2020-02-10 00:00:00'::timestamp, '{\"a\":\"b\"}', " +
+ "'{\"a\":\"b\"}', '2C:54:91:88:C9:E3', '22:00:5c:03:55:08:01:02', '$100.5', " +
+ "32.145, 12, '(1, 10)', '(100, 200)', '(10.45, 21.32)', " +
+ "'(1970-01-01 00:00:00, 2000-01-01 12:00:00)', '(2017-07-04 12:30:30 UTC, 2021-07-04 12:30:30+05:30)', " +
+ "'(2019-10-07, 2021-10-07)', 'text to verify behaviour', '12:47:32', '12:00:00+05:30', " +
+ "'2021-11-25 12:00:00.123456', '2021-11-25 12:00:00+05:30', 'ffffffff-ffff-ffff-ffff-ffffffffffff');";
+
+ consumer.expects(1);
+ executeAndWait(insertStmt);
+
+ SourceRecord record = consumer.remove();
+
+ assertValueField(record, getResolvedColumnName("after/bigintcol", logicalDecoder), 123456);
+ assertValueField(record, getResolvedColumnName("after/bitcol", logicalDecoder), new byte[]{27});
+ assertValueField(record, getResolvedColumnName("after/varbitcol", logicalDecoder), new byte[]{21});
+ assertValueField(record, getResolvedColumnName("after/booleanval", logicalDecoder), false);
+ assertValueField(record, getResolvedColumnName("after/byteaval", logicalDecoder), ByteBuffer.wrap(HexConverter.convertFromHex("01")));
+ assertValueField(record, getResolvedColumnName("after/ch", logicalDecoder), "five5");
+ assertValueField(record, getResolvedColumnName("after/vchar", logicalDecoder), "sample_text");
+ assertValueField(record, getResolvedColumnName("after/cidrval", logicalDecoder), "10.1.0.0/16");
+ assertValueField(record, getResolvedColumnName("after/dt", logicalDecoder), 19047);
+ assertValueField(record, getResolvedColumnName("after/dp", logicalDecoder), 12.345);
+ assertValueField(record, getResolvedColumnName("after/inetval", logicalDecoder), "127.0.0.1");
+ assertValueField(record, getResolvedColumnName("after/intervalval", logicalDecoder), 2505600000000L);
+ assertValueField(record, getResolvedColumnName("after/jsonval", logicalDecoder), "{\"a\":\"b\"}");
+ assertValueField(record, getResolvedColumnName("after/jsonbval", logicalDecoder), "{\"a\": \"b\"}");
+ assertValueField(record, getResolvedColumnName("after/mc", logicalDecoder), "2c:54:91:88:c9:e3");
+ assertValueField(record, getResolvedColumnName("after/mc8", logicalDecoder), "22:00:5c:03:55:08:01:02");
+ assertValueField(record, getResolvedColumnName("after/mn", logicalDecoder), 100.50);
+ assertValueField(record, getResolvedColumnName("after/rl", logicalDecoder), 32.145);
+ assertValueField(record, getResolvedColumnName("after/si", logicalDecoder), 12);
+ assertValueField(record, getResolvedColumnName("after/i4r", logicalDecoder), "[2,10)");
+ assertValueField(record, getResolvedColumnName("after/i8r", logicalDecoder), "[101,200)");
+ assertValueField(record, getResolvedColumnName("after/nr", logicalDecoder), "(10.45,21.32)");
+ assertValueField(record, getResolvedColumnName("after/tsr", logicalDecoder), "(\"1970-01-01 00:00:00\",\"2000-01-01 12:00:00\")");
+ assertValueField(record, getResolvedColumnName("after/tstzr", logicalDecoder), "(\"2017-07-04 18:00:30+05:30\",\"2021-07-04 12:30:30+05:30\")");
+ assertValueField(record, getResolvedColumnName("after/dr", logicalDecoder), "[2019-10-08,2021-10-07)");
+ assertValueField(record, getResolvedColumnName("after/txt", logicalDecoder), "text to verify behaviour");
+ assertValueField(record, getResolvedColumnName("after/tm", logicalDecoder), 46052000000L);
+ assertValueField(record, getResolvedColumnName("after/tmtz", logicalDecoder), "06:30:00Z");
+ assertValueField(record, getResolvedColumnName("after/ts", logicalDecoder), 1637841600123456L);
+ assertValueField(record, getResolvedColumnName("after/tstz", logicalDecoder), "2021-11-25T06:30:00.000000Z");
+ assertValueField(record, getResolvedColumnName("after/uuidval", logicalDecoder), "ffffffff-ffff-ffff-ffff-ffffffffffff");
+ }
+
+ private String getResolvedColumnName(String columnName, PostgresConnectorConfig.LogicalDecoder logicalDecoder) {
+ if (logicalDecoder == PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) {
+ return columnName;
+ } else if (logicalDecoder == PostgresConnectorConfig.LogicalDecoder.YBOUTPUT) {
+ return columnName + "/value";
+ } else {
+ throw new RuntimeException("Logical decoder name value incorrect, check configuration");
+ }
+ }
+
+ @Test
+ public void verifyUpdatesForColumnsOfAllTypesForYbOutput() throws Exception {
+ verifyUpdatesForColumnsOfAllTypes(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ @Test
+ public void verifyUpdatesForColumnsOfAllTypesForPgOutput() throws Exception {
+ verifyUpdatesForColumnsOfAllTypes(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ public void verifyUpdatesForColumnsOfAllTypes(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ String createStmt = "CREATE TABLE all_types (id serial PRIMARY KEY, bigintcol bigint, " +
+ "bitcol bit(5), varbitcol varbit(5), booleanval boolean, " +
+ "byteaval bytea, ch char(5), vchar varchar(25), cidrval cidr, " +
+ "dt date, dp double precision, inetval inet, intervalval interval, " +
+ "jsonval json, jsonbval jsonb, mc macaddr, mc8 macaddr8, mn money, " +
+ "rl real, si smallint, i4r int4range, i8r int8range, " +
+ "nr numrange, tsr tsrange, tstzr tstzrange, dr daterange, txt text, " +
+ "tm time, tmtz timetz, ts timestamp, tstz timestamptz, uuidval uuid)";
+
+ execute(createStmt);
+
+ if (logicalDecoder == PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) {
+ LOGGER.info("Changing replica identity of all the tables to default");
+ TestHelper.execute("ALTER TABLE all_types REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE table_with_interval REPLICA IDENTITY DEFAULT;");
+ TestHelper.waitFor(Duration.ofSeconds(10));
+ }
+
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.dropPublication();
+
+ start(YugabyteDBConnector.class,
+ TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.all_types")
+ .with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, "filtered")
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, "never")
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .build());
+ assertConnectorIsRunning();
+ waitForStreamingToStart();
+ consumer = testConsumer(1);
+
+ String insertStmt =
+ "INSERT INTO all_types (bigintcol, bitcol, varbitcol, booleanval, byteaval, ch, vchar, cidrval, dt, " +
+ "dp, inetval, intervalval, jsonval, jsonbval, mc, mc8, mn, rl, si, i4r, i8r, nr, tsr, tstzr, dr, " +
+ "txt, tm, tmtz, ts, tstz, uuidval) VALUES (123456, '11011', '10101', FALSE, E'\\\\001', 'five5', " +
+ "'sample_text', '10.1.0.0/16', '2022-02-24', 12.345, '127.0.0.1', " +
+ "'2020-03-10 00:00:00'::timestamp-'2020-02-10 00:00:00'::timestamp, '{\"a\":\"b\"}', " +
+ "'{\"a\":\"b\"}', '2C:54:91:88:C9:E3', '22:00:5c:03:55:08:01:02', '$100.5', " +
+ "32.145, 12, '(1, 10)', '(100, 200)', '(10.45, 21.32)', " +
+ "'(1970-01-01 00:00:00, 2000-01-01 12:00:00)', '(2017-07-04 12:30:30 UTC, 2021-07-04 12:30:30+05:30)', " +
+ "'(2019-10-07, 2021-10-07)', 'text to verify behaviour', '12:47:32', '12:00:00+05:30', " +
+ "'2021-11-25 12:00:00.123456', '2021-11-25 12:00:00+05:30', 'ffffffff-ffff-ffff-ffff-ffffffffffff');";
+
+ consumer.expects(1);
+ executeAndWait(insertStmt);
+
+ SourceRecord record = consumer.remove();
+
+ assertValueField(record, getResolvedColumnName("after/bigintcol", logicalDecoder), 123456);
+ assertValueField(record, getResolvedColumnName("after/bitcol", logicalDecoder), new byte[]{27});
+ assertValueField(record, getResolvedColumnName("after/varbitcol", logicalDecoder), new byte[]{21});
+ assertValueField(record, getResolvedColumnName("after/booleanval", logicalDecoder), false);
+ assertValueField(record, getResolvedColumnName("after/byteaval", logicalDecoder), ByteBuffer.wrap(HexConverter.convertFromHex("01")));
+ assertValueField(record, getResolvedColumnName("after/ch", logicalDecoder), "five5");
+ assertValueField(record, getResolvedColumnName("after/vchar", logicalDecoder), "sample_text");
+ assertValueField(record, getResolvedColumnName("after/cidrval", logicalDecoder), "10.1.0.0/16");
+ assertValueField(record, getResolvedColumnName("after/dt", logicalDecoder), 19047);
+ assertValueField(record, getResolvedColumnName("after/dp", logicalDecoder), 12.345);
+ assertValueField(record, getResolvedColumnName("after/inetval", logicalDecoder), "127.0.0.1");
+ assertValueField(record, getResolvedColumnName("after/intervalval", logicalDecoder), 2505600000000L);
+ assertValueField(record, getResolvedColumnName("after/jsonval", logicalDecoder), "{\"a\":\"b\"}");
+ assertValueField(record, getResolvedColumnName("after/jsonbval", logicalDecoder), "{\"a\": \"b\"}");
+ assertValueField(record, getResolvedColumnName("after/mc", logicalDecoder), "2c:54:91:88:c9:e3");
+ assertValueField(record, getResolvedColumnName("after/mc8", logicalDecoder), "22:00:5c:03:55:08:01:02");
+ assertValueField(record, getResolvedColumnName("after/mn", logicalDecoder), 100.50);
+ assertValueField(record, getResolvedColumnName("after/rl", logicalDecoder), 32.145);
+ assertValueField(record, getResolvedColumnName("after/si", logicalDecoder), 12);
+ assertValueField(record, getResolvedColumnName("after/i4r", logicalDecoder), "[2,10)");
+ assertValueField(record, getResolvedColumnName("after/i8r", logicalDecoder), "[101,200)");
+ assertValueField(record, getResolvedColumnName("after/nr", logicalDecoder), "(10.45,21.32)");
+ assertValueField(record, getResolvedColumnName("after/tsr", logicalDecoder), "(\"1970-01-01 00:00:00\",\"2000-01-01 12:00:00\")");
+ assertValueField(record, getResolvedColumnName("after/tstzr", logicalDecoder), "(\"2017-07-04 18:00:30+05:30\",\"2021-07-04 12:30:30+05:30\")");
+ assertValueField(record, getResolvedColumnName("after/dr", logicalDecoder), "[2019-10-08,2021-10-07)");
+ assertValueField(record, getResolvedColumnName("after/txt", logicalDecoder), "text to verify behaviour");
+ assertValueField(record, getResolvedColumnName("after/tm", logicalDecoder), 46052000000L);
+ assertValueField(record, getResolvedColumnName("after/tmtz", logicalDecoder), "06:30:00Z");
+ assertValueField(record, getResolvedColumnName("after/ts", logicalDecoder), 1637841600123456L);
+ assertValueField(record, getResolvedColumnName("after/tstz", logicalDecoder), "2021-11-25T06:30:00.000000Z");
+ assertValueField(record, getResolvedColumnName("after/uuidval", logicalDecoder), "ffffffff-ffff-ffff-ffff-ffffffffffff");
+
+ // Update each column one by one.
+ TestHelper.execute("UPDATE all_types SET bigintcol = 234567 WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET bitcol = '11111' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET varbitcol = '00011' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET booleanval = TRUE WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET byteaval = null WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET ch = 'four4' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET vchar = 'sample_text_updated' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET cidrval = '192.0.2.0/24' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET dt = '2024-08-06' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET dp = 23.456 WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET inetval = '192.168.1.1' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET intervalval = '2020-03-11 00:00:00'::timestamp-'2020-02-10 00:00:00'::timestamp WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET jsonval = '{\"c\":\"d\",\"e\":123}' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET jsonbval = '{\"c\":\"d\",\"e\":123}' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET mc = '2c:54:91:99:c9:e3' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET mc8 = '22:00:5c:3d:55:08:01:02' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET mn = '$200.5' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET rl = 44.556 WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET si = 11 WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET i4r = '(10, 100)' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET i8r = '(200, 10000)' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET nr = '(12.35, 56.78)' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET tsr = '(2000-01-01 00:00:00, 2000-01-02 00:00:00)' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET tstzr = '(2000-01-01 00:05:30+05:30, 2000-01-02 00:00:00 UTC)' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET dr = '(2000-01-01, 2000-01-03)' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET txt = 'updated text to verify behaviour' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET tm = '14:15:16' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET tmtz = '05:30:00+05:30' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET ts = '2024-08-06 12:00:00.123456' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET tstz = '2024-08-06 12:00:00+05:30' WHERE id = 1;");
+ TestHelper.execute("UPDATE all_types SET uuidval = 'ffffffff-ffff-ffff-ffff-123456789abc' WHERE id = 1;");
+
+ // This excludes the primary key column.
+ List columnsInAllTypes = List.of("bigintcol", "bitcol", "varbitcol", "booleanval", "byteaval", "ch", "vchar", "cidrval", "dt", "dp", "inetval",
+ "intervalval", "jsonval", "jsonbval", "mc", "mc8", "mn", "rl", "si", "i4r", "i8r", "nr", "tsr", "tstzr", "dr", "txt", "tm", "tmtz", "ts", "tstz",
+ "uuidval");
+
+ SourceRecords allRecords = consumeRecordsByTopic(31 /* total records for updated */);
+ List records = allRecords.allRecordsInOrder();
+
+ assertThat(records.size()).isEqualTo(31);
+
+ assertColumnInUpdate(columnsInAllTypes, records.get(0), "after/bigintcol", 234567, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(1), "after/bitcol", new byte[]{31}, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(2), "after/varbitcol", new byte[]{3}, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(3), "after/booleanval", true, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(4), "after/byteaval", null, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(5), "after/ch", "four4", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(6), "after/vchar", "sample_text_updated", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(7), "after/cidrval", "192.0.2.0/24", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(8), "after/dt", 19941, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(9), "after/dp", 23.456, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(10), "after/inetval", "192.168.1.1", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(11), "after/intervalval", 2592000000000L, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(12), "after/jsonval", "{\"c\":\"d\",\"e\":123}", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(13), "after/jsonbval", "{\"c\": \"d\", \"e\": 123}", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(14), "after/mc", "2c:54:91:99:c9:e3", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(15), "after/mc8", "22:00:5c:3d:55:08:01:02", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(16), "after/mn", 200.50, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(17), "after/rl", 44.556, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(18), "after/si", 11, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(19), "after/i4r", "[11,100)", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(20), "after/i8r", "[201,10000)", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(21), "after/nr", "(12.35,56.78)", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(22), "after/tsr", "(\"2000-01-01 00:00:00\",\"2000-01-02 00:00:00\")", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(23), "after/tstzr", "(\"2000-01-01 00:05:30+05:30\",\"2000-01-02 05:30:00+05:30\")", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(24), "after/dr", "[2000-01-02,2000-01-03)", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(25), "after/txt", "updated text to verify behaviour", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(26), "after/tm", 51316000000L, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(27), "after/tmtz", "00:00:00Z", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(28), "after/ts", 1722945600123456L, logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(29), "after/tstz", "2024-08-06T06:30:00.000000Z", logicalDecoder);
+ assertColumnInUpdate(columnsInAllTypes, records.get(30), "after/uuidval", "ffffffff-ffff-ffff-ffff-123456789abc", logicalDecoder);
+ }
+
+ @Test
+ public void verifyOperationsForTableWithMixedColumnsYbOutput() throws Exception {
+ verifyOperationsInTableWithMixedColumns(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ @Test
+ public void verifyOperationsForTableWithMixedColumnsPgOutput() throws Exception {
+ verifyOperationsInTableWithMixedColumns(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ public void verifyOperationsInTableWithMixedColumns(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ String createStmt = "create table public.test_mixed (" +
+ " id character varying not null," +
+ " i_key character varying not null," +
+ " c_id character varying not null," +
+ " p_type character varying not null," +
+ " p_id character varying," +
+ " tx_id character varying," +
+ " status character varying not null," +
+ " amount numeric not null," +
+ " currency character varying not null," +
+ " loc character varying," +
+ " quantity numeric," +
+ " o_type character varying," +
+ " o_created_at timestamp without time zone," +
+ " o_updated_at timestamp without time zone," +
+ " dis_details jsonb," +
+ " o_metadata jsonb," +
+ " tx_data jsonb," +
+ " tx_ref_d jsonb," +
+ " rw_d jsonb," +
+ " meta jsonb," +
+ " created_at timestamp without time zone," +
+ " updated_at timestamp without time zone not null," +
+ " deleted_at timestamp without time zone," +
+ " version integer not null default 0," +
+ " primary key (updated_at, id, c_id)" +
+ "); " +
+ "create unique index orders_i_key_key on test_mixed using lsm (i_key); " +
+ "create index idx_updated_at on test_mixed using lsm (updated_at);";
+
+ execute(createStmt);
+
+ if (logicalDecoder == PostgresConnectorConfig.LogicalDecoder.PGOUTPUT) {
+ LOGGER.info("Changing replica identity of all the tables to default");
+ TestHelper.execute("ALTER TABLE test_mixed REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE table_with_interval REPLICA IDENTITY DEFAULT;");
+ TestHelper.waitFor(Duration.ofSeconds(10));
+ }
+
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.dropPublication();
+
+ start(YugabyteDBConnector.class,
+ TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.test_mixed")
+ .with(PostgresConnectorConfig.PUBLICATION_AUTOCREATE_MODE, "filtered")
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, "never")
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .build());
+ assertConnectorIsRunning();
+ waitForStreamingToStart();
+
+ String insertStmt = "insert into test_mixed values ('id_val', 'i_key', 'c_id', 'p_type_val', " +
+ "'p_id_val', 'ffffff-ffff', 'DONE', 30.5, 'INR', 'JJXX+HR8', 10.0, null, '2024-08-06 17:30:00', " +
+ "'2024-08-06 17:30:00', '{\"name\":\"Something for display\"}', '{\"year\": 2000}', '{\"tx_seq\":500}', " +
+ "null, null, null, '2024-08-06 17:30:00', '2024-08-06 17:30:00', null, 1)";
+
+ execute(insertStmt);
+ execute("UPDATE test_mixed SET status = 'NOT AVAILABLE', version = 2 WHERE id = 'id_val' AND c_id = 'c_id' AND updated_at = '2024-08-06 17:30:00';");
+
+ SourceRecords allRecords = consumeRecordsByTopic(2);
+ SourceRecord insertRecord = allRecords.allRecordsInOrder().get(0);
+ assertValueField(insertRecord, getResolvedColumnName("after/id", logicalDecoder), "id_val");
+ assertValueField(insertRecord, getResolvedColumnName("after/i_key", logicalDecoder), "i_key");
+ assertValueField(insertRecord, getResolvedColumnName("after/c_id", logicalDecoder), "c_id");
+ assertValueField(insertRecord, getResolvedColumnName("after/p_type", logicalDecoder), "p_type_val");
+ assertValueField(insertRecord, getResolvedColumnName("after/p_id", logicalDecoder), "p_id_val");
+ assertValueField(insertRecord, getResolvedColumnName("after/tx_id", logicalDecoder), "ffffff-ffff");
+ assertValueField(insertRecord, getResolvedColumnName("after/status", logicalDecoder), "DONE");
+ assertValueField(insertRecord, getResolvedColumnName("after/amount", logicalDecoder), 30.5);
+ assertValueField(insertRecord, getResolvedColumnName("after/currency", logicalDecoder), "INR");
+ assertValueField(insertRecord, getResolvedColumnName("after/loc", logicalDecoder), "JJXX+HR8");
+ assertValueField(insertRecord, getResolvedColumnName("after/quantity", logicalDecoder), 10.0);
+ assertValueField(insertRecord, getResolvedColumnName("after/o_type", logicalDecoder), null);
+ assertValueField(insertRecord, getResolvedColumnName("after/o_created_at", logicalDecoder), 1722965400000000L);
+ assertValueField(insertRecord, getResolvedColumnName("after/o_updated_at", logicalDecoder), 1722965400000000L);
+ assertValueField(insertRecord, getResolvedColumnName("after/dis_details", logicalDecoder), "{\"name\": \"Something for display\"}");
+ assertValueField(insertRecord, getResolvedColumnName("after/o_metadata", logicalDecoder), "{\"year\": 2000}");
+ assertValueField(insertRecord, getResolvedColumnName("after/tx_data", logicalDecoder), "{\"tx_seq\": 500}");
+ assertValueField(insertRecord, getResolvedColumnName("after/tx_ref_d", logicalDecoder), null);
+ assertValueField(insertRecord, getResolvedColumnName("after/rw_d", logicalDecoder), null);
+ assertValueField(insertRecord, getResolvedColumnName("after/meta", logicalDecoder), null);
+ assertValueField(insertRecord, getResolvedColumnName("after/created_at", logicalDecoder), 1722965400000000L);
+ assertValueField(insertRecord, getResolvedColumnName("after/updated_at", logicalDecoder), 1722965400000000L);
+ assertValueField(insertRecord, getResolvedColumnName("after/deleted_at", logicalDecoder), null);
+ assertValueField(insertRecord, getResolvedColumnName("after/version", logicalDecoder), 1);
+
+ SourceRecord updateRecord = allRecords.allRecordsInOrder().get(1);
+
+ assertValueField(updateRecord, getResolvedColumnName("after/id", logicalDecoder), "id_val");
+ assertValueField(updateRecord, getResolvedColumnName("after/c_id", logicalDecoder), "c_id");
+ assertValueField(updateRecord, getResolvedColumnName("after/updated_at", logicalDecoder), 1722965400000000L);
+ assertValueField(updateRecord, getResolvedColumnName("after/status", logicalDecoder), "NOT AVAILABLE");
+ assertValueField(updateRecord, getResolvedColumnName("after/version", logicalDecoder), 2);
+
+ if (logicalDecoder.isYBOutput()) {
+ // If decoder is not yboutput then all the other columns will be present as well.
+ assertValueField(updateRecord, "after/i_key", null);
+ assertValueField(updateRecord, "after/p_type", null);
+ assertValueField(updateRecord, "after/p_id", null);
+ assertValueField(updateRecord, "after/tx_id", null);
+ assertValueField(updateRecord, "after/amount", null);
+ assertValueField(updateRecord, "after/currency", null);
+ assertValueField(updateRecord, "after/loc", null);
+ assertValueField(updateRecord, "after/quantity", null);
+ assertValueField(updateRecord, "after/o_type", null);
+ assertValueField(updateRecord, "after/o_created_at", null);
+ assertValueField(updateRecord, "after/o_updated_at", null);
+ assertValueField(updateRecord, "after/dis_details", null);
+ assertValueField(updateRecord, "after/o_metadata", null);
+ assertValueField(updateRecord, "after/tx_data", null);
+ assertValueField(updateRecord, "after/tx_ref_d", null);
+ assertValueField(updateRecord, "after/rw_d", null);
+ assertValueField(updateRecord, "after/meta", null);
+ assertValueField(updateRecord, "after/created_at", null);
+ assertValueField(updateRecord, "after/deleted_at", null);
+ }
+ else {
+ // If decoder is not yboutput then all the other columns will be present as well.
+ assertValueField(updateRecord, getResolvedColumnName("after/i_key", logicalDecoder), "i_key");
+ assertValueField(updateRecord, getResolvedColumnName("after/p_type", logicalDecoder), "p_type_val");
+ assertValueField(updateRecord, getResolvedColumnName("after/p_id", logicalDecoder), "p_id_val");
+ assertValueField(updateRecord, getResolvedColumnName("after/tx_id", logicalDecoder), "ffffff-ffff");
+ assertValueField(updateRecord, getResolvedColumnName("after/amount", logicalDecoder), 30.5);
+ assertValueField(updateRecord, getResolvedColumnName("after/currency", logicalDecoder), "INR");
+ assertValueField(updateRecord, getResolvedColumnName("after/loc", logicalDecoder), "JJXX+HR8");
+ assertValueField(updateRecord, getResolvedColumnName("after/quantity", logicalDecoder), 10.0);
+ assertValueField(updateRecord, getResolvedColumnName("after/o_type", logicalDecoder), null);
+ assertValueField(updateRecord, getResolvedColumnName("after/o_created_at", logicalDecoder), 1722965400000000L);
+ assertValueField(updateRecord, getResolvedColumnName("after/o_updated_at", logicalDecoder), 1722965400000000L);
+ assertValueField(updateRecord, getResolvedColumnName("after/dis_details", logicalDecoder), "{\"name\": \"Something for display\"}");
+ assertValueField(updateRecord, getResolvedColumnName("after/o_metadata", logicalDecoder), "{\"year\": 2000}");
+ assertValueField(updateRecord, getResolvedColumnName("after/tx_data", logicalDecoder), "{\"tx_seq\": 500}");
+ assertValueField(updateRecord, getResolvedColumnName("after/tx_ref_d", logicalDecoder), null);
+ assertValueField(updateRecord, getResolvedColumnName("after/rw_d", logicalDecoder), null);
+ assertValueField(updateRecord, getResolvedColumnName("after/meta", logicalDecoder), null);
+ assertValueField(updateRecord, getResolvedColumnName("after/created_at", logicalDecoder), 1722965400000000L);
+ assertValueField(updateRecord, getResolvedColumnName("after/updated_at", logicalDecoder), 1722965400000000L);
+ assertValueField(updateRecord, getResolvedColumnName("after/deleted_at", logicalDecoder), null);
+ }
+ }
+
+ public void assertColumnInUpdate(List allColumns, SourceRecord record, String column, Object expectedValue,
+ PostgresConnectorConfig.LogicalDecoder logicalDecoder) {
+ if (logicalDecoder.isYBOutput()) {
+ YBVerifyRecord.isValidUpdate(record, "id", 1);
+
+ // Assert that the other columns are null - note that this is only supposed to work with CHANGE.
+ for (String columnName : allColumns) {
+ if (!column.contains(columnName)) {
+ assertValueField(record, "after/" + columnName, null);
+ }
+ }
+ } else {
+ VerifyRecord.isValidUpdate(record, "id", 1);
+ }
+
+ assertValueField(record, getResolvedColumnName(column, logicalDecoder), expectedValue);
+ }
+
+ /*
+ * create table public.orders (
+ * order_reference_id character varying not null,
+ * idempotency_key character varying not null,
+ * customer_id character varying not null,
+ * product_type character varying not null,
+ * product_id character varying,
+ * txn_reference_id character varying,
+ * order_status character varying not null,
+ * order_amount numeric not null,
+ * order_currency character varying not null,
+ * location character varying,
+ * order_quantity numeric,
+ * order_type character varying,
+ * order_created_at timestamp without time zone,
+ * order_updated_at timestamp without time zone,
+ * order_display_details jsonb,
+ * order_metadata jsonb,
+ * txn_data jsonb,
+ * txn_refund_data jsonb,
+ * reward_data jsonb,
+ * metadata jsonb,
+ * created_at timestamp without time zone,
+ * updated_at timestamp without time zone not null,
+ * deleted_at timestamp without time zone,
+ * version integer not null default 0,
+ * primary key (updated_at, order_reference_id, customer_id)
+ * );
+ * create unique index orders_idempotency_key_key on orders using lsm (idempotency_key);
+ * create index idx_updated_at on orders using lsm (updated_at);
+ */
+
+ @Ignore
+ @Test
+ public void shouldWorkForNumericTypesWithoutLengthAndScale() throws Exception {
+ /*
+ Fails with exception -
+
+ org.apache.kafka.connect.errors.DataException: Invalid Java object for schema
+ "io.debezium.data.VariableScaleDecimal" with type STRUCT: class [B for field: "value"
+ */
+ String createStmt = "CREATE TABLE numeric_type (id serial PRIMARY KEY, nm numeric);";
+
+ execute(createStmt);
+
+ start(YugabyteDBConnector.class,
+ TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.numeric_type")
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, "never")
+ .build());
+ assertConnectorIsRunning();
+ waitForStreamingToStart();
+ consumer = testConsumer(1);
+
+ consumer.expects(1);
+ executeAndWait("INSERT INTO numeric_type VALUES (1, 12.34);");
+
+ SourceRecord record = consumer.remove();
+ assertValueField(record, "after/nm/value", 12.34);
+ }
+
+ @Test
+ public void shouldReceiveChangesForDeletes() throws Exception {
+ // add a new entry and remove both
+ String statements = "INSERT INTO test_table (text) VALUES ('insert2');" +
+ "DELETE FROM test_table WHERE pk > 0;";
+
+ startConnector();
+ consumer = testConsumer(5);
+ executeAndWait(statements);
+
+ String topicPrefix = "public.test_table";
+ String topicName = topicName(topicPrefix);
+ assertRecordInserted(topicPrefix, PK_FIELD, 2);
+
+ // first entry removed
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidDelete(record, PK_FIELD, 1);
+
+ // followed by a tombstone
+ record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidTombstone(record, PK_FIELD, 1);
+
+ // second entry removed
+ record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidDelete(record, PK_FIELD, 2);
+
+ // followed by a tombstone
+ record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidTombstone(record, PK_FIELD, 2);
+ }
+
+ @Test
+ @FixFor("DBZ-582")
+ public void shouldReceiveChangesForDeletesWithoutTombstone() throws Exception {
+ // add a new entry and remove both
+ String statements = "INSERT INTO test_table (text) VALUES ('insert2');" +
+ "DELETE FROM test_table WHERE pk > 0;";
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(CommonConnectorConfig.TOMBSTONES_ON_DELETE, false));
+ consumer = testConsumer(3);
+ executeAndWait(statements);
+
+ String topicPrefix = "public.test_table";
+ String topicName = topicName(topicPrefix);
+ assertRecordInserted(topicPrefix, PK_FIELD, 2);
+
+ // first entry removed
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidDelete(record, PK_FIELD, 1);
+
+ // second entry removed
+ record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidDelete(record, PK_FIELD, 2);
+ }
+
+ @Test
+ @FixFor("DBZ-4137")
+ public void shouldReceiveNumericTypeAsDoubleWithNullDefaults() throws Exception {
+ LogInterceptor logInterceptor = new LogInterceptor(PostgresStreamingChangeEventSource.class);
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS numeric_table_with_n_defaults;",
+ "CREATE TABLE numeric_table_with_n_defaults (\n" +
+ " pk int4 PRIMARY KEY NOT NULL,\n" +
+ " r_numeric numeric(19, 4) NULL DEFAULT NULL,\n" +
+ " r_int int4 NULL DEFAULT NULL);",
+ "ALTER TABLE numeric_table_with_n_defaults REPLICA IDENTITY FULL");
+
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ startConnector(config -> config.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE),
+ false);
+
+ consumer = testConsumer(1);
+
+ // INSERT
+ String statement = "INSERT INTO numeric_table_with_n_defaults (pk) VALUES (1);";
+
+ Awaitility.await()
+ .atMost(Duration.ofSeconds(50))
+ .pollInterval(Duration.ofSeconds(1))
+ .until(() -> logInterceptor.containsMessage("Processing messages"));
+
+ assertInsert(
+ statement,
+ 1,
+ Arrays.asList(
+ new SchemaAndValueField("pk", Schema.INT32_SCHEMA, 1),
+ new SchemaAndValueField("r_numeric",
+ new SchemaBuilder(Schema.Type.FLOAT64)
+ .name(Schema.FLOAT64_SCHEMA.name())
+ .version(Schema.FLOAT64_SCHEMA.version())
+ .optional()
+ .defaultValue(null)
+ .build(),
+ null),
+ new SchemaAndValueField("r_int",
+ new SchemaBuilder(Schema.Type.INT32)
+ .name(Schema.INT32_SCHEMA.name())
+ .version(Schema.INT32_SCHEMA.version())
+ .optional()
+ .defaultValue(null)
+ .build(),
+ null)));
+ }
+
+ @Test
+ @FixFor("DBZ-4137")
+ public void shouldReceiveNumericTypeAsDoubleWithDefaults() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS numeric_table_with_defaults;",
+ "CREATE TABLE numeric_table_with_defaults (\n" +
+ " pk int4 PRIMARY KEY NOT NULL,\n" +
+ " r_numeric numeric(19, 4) NOT NULL DEFAULT 1,\n" +
+ " r_int int4 NOT NULL DEFAULT 2);",
+ "ALTER TABLE numeric_table_with_defaults REPLICA IDENTITY FULL");
+
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ startConnector(config -> config.with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE),
+ false);
+ consumer = testConsumer(1);
+
+ // INSERT
+ String statement = "INSERT INTO numeric_table_with_defaults (pk) VALUES (1);";
+ assertInsert(
+ statement,
+ 1,
+ Arrays.asList(
+ new SchemaAndValueField("pk", Schema.INT32_SCHEMA, 1),
+ new SchemaAndValueField("r_numeric",
+ new SchemaBuilder(Schema.Type.FLOAT64)
+ .name(Schema.FLOAT64_SCHEMA.name())
+ .version(Schema.FLOAT64_SCHEMA.version())
+ .defaultValue(1.0d)
+ .build(),
+ 1.0d),
+ new SchemaAndValueField("r_int",
+ new SchemaBuilder(Schema.Type.INT32)
+ .name(Schema.INT32_SCHEMA.name())
+ .version(Schema.INT32_SCHEMA.version())
+ .defaultValue(2)
+ .build(),
+ 2)));
+ }
+
+ @Test
+ @FixFor("DBZ-259")
+ public void shouldProcessIntervalDelete() throws Exception {
+ final String statements = "INSERT INTO table_with_interval VALUES (default, 'Foo', default);" +
+ "INSERT INTO table_with_interval VALUES (default, 'Bar', default);" +
+ "DELETE FROM table_with_interval WHERE id = 1;";
+
+ startConnector();
+ consumer.expects(4);
+ executeAndWait(statements);
+
+ final String topicPrefix = "public.table_with_interval";
+ final String topicName = topicName(topicPrefix);
+ final String pk = "id";
+ assertRecordInserted(topicPrefix, pk, 1);
+ assertRecordInserted(topicPrefix, pk, 2);
+
+ // first entry removed
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidDelete(record, pk, 1);
+
+ // followed by a tombstone
+ record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidTombstone(record, pk, 1);
+ }
+
+ @Test
+ @FixFor("DBZ-911")
+ public void shouldRefreshSchemaOnUnchangedToastedDataWhenSchemaChanged() throws Exception {
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST));
+
+ String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ // inserting a toasted value should /always/ produce a correct record
+ String statement = "ALTER TABLE test_table ADD COLUMN not_toast integer; INSERT INTO test_table (not_toast, text) values (10, '" + toastedValue + "')";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ SourceRecord record = consumer.remove();
+
+ // after record should contain the toasted value
+ List expectedAfter = Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue));
+ assertRecordSchemaAndValues(expectedAfter, record, Envelope.FieldName.AFTER);
+
+ // now we remove the toast column and update the not_toast column to see that our unchanged toast data
+ // does trigger a table schema refresh. the after schema should be reflect the changes
+ statement = "ALTER TABLE test_table DROP COLUMN text; update test_table set not_toast = 5 where not_toast = 10";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_table", false));
+ assertEquals(Arrays.asList("pk", "not_toast"), tbl.retrieveColumnNames());
+ });
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-842")
+ public void shouldNotPropagateUnchangedToastedData() throws Exception {
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST));
+
+ final String toastedValue1 = RandomStringUtils.randomAlphanumeric(10000);
+ final String toastedValue2 = RandomStringUtils.randomAlphanumeric(10000);
+ final String toastedValue3 = RandomStringUtils.randomAlphanumeric(10000);
+
+ // inserting a toasted value should /always/ produce a correct record
+ String statement = "ALTER TABLE test_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_table ADD COLUMN mandatory_text TEXT NOT NULL DEFAULT '';"
+ + "ALTER TABLE test_table ALTER COLUMN mandatory_text SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_table ALTER COLUMN mandatory_text SET DEFAULT '" + toastedValue3 + "';"
+ + "INSERT INTO test_table (not_toast, text, mandatory_text) values (10, '" + toastedValue1 + "', '" + toastedValue1 + "');"
+ + "INSERT INTO test_table (not_toast, text, mandatory_text) values (10, '" + toastedValue2 + "', '" + toastedValue2 + "');";
+ consumer = testConsumer(2);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue1),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(), toastedValue1)), consumer.remove(),
+ Envelope.FieldName.AFTER);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue2),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(), toastedValue2)), consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ statement = "UPDATE test_table SET not_toast = 2;"
+ + "UPDATE test_table SET not_toast = 3;";
+
+ consumer.expects(6);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_table", false));
+ assertEquals(Arrays.asList("pk", "text", "not_toast", "mandatory_text"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "insert"),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(), "")), consumer.remove(), Envelope.FieldName.AFTER);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, DecoderDifferences.optionalToastedValuePlaceholder()),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(),
+ DecoderDifferences.mandatoryToastedValuePlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, DecoderDifferences.optionalToastedValuePlaceholder()),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(),
+ DecoderDifferences.mandatoryToastedValuePlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 3),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "insert"),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(), "")), consumer.remove(), Envelope.FieldName.AFTER);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 3),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, DecoderDifferences.optionalToastedValuePlaceholder()),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(),
+ DecoderDifferences.mandatoryToastedValuePlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 3),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, DecoderDifferences.optionalToastedValuePlaceholder()),
+ new SchemaAndValueField("mandatory_text", SchemaBuilder.string().defaultValue(toastedValue3).build(),
+ DecoderDifferences.mandatoryToastedValuePlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-4941")
+ public void shouldHandleToastedArrayColumn() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY, text TEXT);");
+ startConnector(Function.identity(), false);
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN mandatory_text_array TEXT[] NOT NULL;"
+ + "ALTER TABLE test_toast_table ALTER COLUMN mandatory_text_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, text, mandatory_text_array) values (10, 'text', ARRAY ['" + toastedValue + "']);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "text", "not_toast", "mandatory_text_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(),
+ Arrays.asList(DecoderDifferences.mandatoryToastedValuePlaceholder()))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-6122")
+ public void shouldHandleToastedArrayColumnCharacterVarying() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY, text character varying(255));");
+ startConnector(Function.identity(), false);
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN mandatory_text_array character varying(20000)[] NOT NULL;"
+ + "ALTER TABLE test_toast_table ALTER COLUMN mandatory_text_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, text, mandatory_text_array) values (10, 'text', ARRAY ['" + toastedValue + "']);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "text", "not_toast", "mandatory_text_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(),
+ Arrays.asList(DecoderDifferences.mandatoryToastedValuePlaceholder()))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-6122")
+ public void shouldHandleToastedDateArrayColumn() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+ startConnector(Function.identity(), false);
+ List intList = IntStream.range(1, 100000).boxed().map((x) -> 19338).collect(Collectors.toList());
+ final String toastedValue = intList.stream().map((x) -> "'2022-12-12'::date").collect(Collectors.joining(","));
+
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN date_array date[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN date_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, date_array) values (10, ARRAY [" + toastedValue + "]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("date_array",
+ SchemaBuilder.array(SchemaBuilder.int32().name("io.debezium.time.Date").optional().version(1).build()).optional().build(),
+ intList)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "date_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("date_array",
+ SchemaBuilder.array(SchemaBuilder.int32().name("io.debezium.time.Date").optional().version(1).build()).optional().build(),
+ DecoderDifferences.toastedValueIntPlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-6122")
+ public void shouldHandleToastedByteArrayColumn() throws Exception {
+// Print.enable();
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+ startConnector(Function.identity(), false);
+ List intList = IntStream.range(1, 100000).boxed().map((x) -> 19338).collect(Collectors.toList());
+ final String toastedValue = RandomStringUtils.randomNumeric(10000);
+
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN bytea_array bytea[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN bytea_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, bytea_array) values (10, ARRAY ['" + toastedValue + "'::bytea]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("bytea_array",
+ SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).optional().build(), Arrays.asList(ByteBuffer.wrap(toastedValue.getBytes())))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "bytea_array"), tbl.retrieveColumnNames());
+ });
+ });
+ final var record = consumer.remove();
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2)),
+ record,
+ Envelope.FieldName.AFTER);
+ final var after = ((Struct) record.value()).getStruct(Envelope.FieldName.AFTER);
+ final var byteaArray = after.getArray("bytea_array");
+ Assertions.assertThat(byteaArray).hasSize(1);
+ Assertions.assertThat(byteaArray.get(0)).isEqualTo(DecoderDifferences.mandatoryToastedValueBinaryPlaceholder());
+ Assertions.assertThat(after.schema().field("bytea_array").schema())
+ .isEqualTo(SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).optional().build());
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-5936")
+ public void shouldHandleToastedIntegerArrayColumn() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+ startConnector(Function.identity(), false);
+ List intList = IntStream.range(1, 10000).boxed().collect(Collectors.toList());
+ final String toastedValue = intList.stream().map(String::valueOf)
+ .collect(Collectors.joining(","));
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN int_array int[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN int_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, int_array) values (10, ARRAY [" + toastedValue + "]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("int_array", SchemaBuilder.array(Schema.OPTIONAL_INT32_SCHEMA).optional().build(), intList)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "int_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("int_array", SchemaBuilder.array(Schema.OPTIONAL_INT32_SCHEMA).optional().build(),
+ DecoderDifferences.toastedValueIntPlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-5936")
+ public void shouldHandleToastedBigIntArrayColumn() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+ startConnector(Function.identity(), false);
+ List bigintList = LongStream.range(1, 10000).boxed().collect(Collectors.toList());
+ final String toastedValue = bigintList.stream().map(String::valueOf)
+ .collect(Collectors.joining(","));
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN bigint_array bigint[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN bigint_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, bigint_array) values (10, ARRAY [" + toastedValue + "]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("bigint_array", SchemaBuilder.array(Schema.OPTIONAL_INT64_SCHEMA).optional().build(), bigintList)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "bigint_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("bigint_array", SchemaBuilder.array(Schema.OPTIONAL_INT64_SCHEMA).optional().build(),
+ DecoderDifferences.toastedValueBigintPlaceholder())),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-5936")
+ public void shouldHandleToastedJsonArrayColumn() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY, text TEXT);");
+ startConnector(Function.identity(), false);
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN json_array json[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN json_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, text, json_array) "
+ + "VALUES (10, 'text', ARRAY [ '{\"key\": \"" + toastedValue + "\" }'::json ]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("json_array", SchemaBuilder.array(
+ io.debezium.data.Json.builder().optional().build()).optional().build(),
+ Arrays.asList("{\"key\": \"" + toastedValue + "\" }"))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "text", "not_toast", "json_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("json_array", SchemaBuilder.array(
+ io.debezium.data.Json.builder().optional().build()).optional().build(),
+ Arrays.asList(DecoderDifferences.mandatoryToastedValuePlaceholder()))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Test
+ @FixFor("DBZ-6379")
+ public void shouldHandleToastedHstoreInHstoreMapMode() throws Exception {
+ TestHelper.execute("CREATE EXTENSION IF NOT EXISTS hstore SCHEMA public;");
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY, text TEXT, col hstore);");
+ startConnector(config -> config.with(PostgresConnectorConfig.HSTORE_HANDLING_MODE, PostgresConnectorConfig.HStoreHandlingMode.MAP));
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(100000);
+ String statement = "INSERT INTO test_toast_table (id, col) values (10, 'a=>" + toastedValue + "');";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ HashMap colValue = new HashMap();
+ colValue.put("a", toastedValue);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("col", SchemaBuilder.map(SchemaBuilder.STRING_SCHEMA,
+ SchemaBuilder.OPTIONAL_STRING_SCHEMA).optional().build(), colValue)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET text = 'text';";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "text", "col"), tbl.retrieveColumnNames());
+ });
+ });
+
+ // YB Note: Value for 'col' will not be present since replica identity is CHANGE.
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text")),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ colValue.clear();
+ colValue.put("a", "123456");
+ consumer.expects(1);
+ executeAndWait("UPDATE test_toast_table SET col = col || 'a=>\"123456\"'::hstore;");
+
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("col", SchemaBuilder.map(SchemaBuilder.STRING_SCHEMA,
+ SchemaBuilder.OPTIONAL_STRING_SCHEMA).optional().build(), colValue)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Test
+ public void shouldHandleHstoreWithPgOutput() throws Exception {
+ TestHelper.execute("CREATE EXTENSION IF NOT EXISTS hstore SCHEMA public;");
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_hstore;",
+ "CREATE TABLE test_hstore (id SERIAL PRIMARY KEY, text TEXT, col hstore);");
+
+ // We will need to change the replica identity of all the tables so that the service
+ // doesn't throw error.
+ TestHelper.execute("ALTER TABLE test_hstore REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE table_with_interval REPLICA IDENTITY DEFAULT;");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.HSTORE_HANDLING_MODE, PostgresConnectorConfig.HStoreHandlingMode.MAP)
+ .with(PostgresConnectorConfig.PLUGIN_NAME, PostgresConnectorConfig.LogicalDecoder.PGOUTPUT));
+ waitForStreamingToStart();
+
+ HashMap colValue = new HashMap();
+ TestHelper.execute("INSERT INTO test_hstore values (1, 'text_val', 'a=>\"hstoreValue\"');");
+ TestHelper.execute("UPDATE test_hstore SET text = 'text';");
+ TestHelper.execute("UPDATE test_hstore SET col = col || 'a=>\"123456\"'::hstore;");
+
+ SourceRecords allRecords = consumeRecordsByTopic(3);
+ List records = allRecords.allRecordsInOrder();
+
+ assertThat(records.size()).isEqualTo(3);
+
+ colValue.put("a", "hstoreValue");
+
+ // Assert insert record.
+ VerifyRecord.isValidInsert(records.get(0), "id", 1);
+ assertValueField(records.get(0), "after/id", 1);
+ assertValueField(records.get(0), "after/text", "text_val");
+ assertValueField(records.get(0), "after/col", colValue);
+
+ // Assert update record.
+ VerifyRecord.isValidUpdate(records.get(1), "id", 1);
+ assertValueField(records.get(1), "after/id", 1);
+ assertValueField(records.get(1), "after/text", "text");
+ assertValueField(records.get(1), "after/col", colValue);
+
+ colValue.clear();
+ colValue.put("a", "123456");
+
+ // Assert update record.
+ VerifyRecord.isValidUpdate(records.get(2), "id", 1);
+ assertValueField(records.get(2), "after/id", 1);
+ assertValueField(records.get(2), "after/text", "text");
+ assertValueField(records.get(2), "after/col", colValue);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-6720")
+ public void shouldHandleToastedUuidArrayColumn() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY, text TEXT);");
+ startConnector(Function.identity(), false);
+ final List toastedValueList = Stream.generate(UUID::randomUUID).map(String::valueOf).limit(10000).collect(Collectors.toList());
+ final String[] toastedValueArray = toastedValueList.toArray(new String[toastedValueList.size()]);
+ final String toastedValueQuotedString = toastedValueList.stream().map(uuid_str -> ("'" + uuid_str + "'")).collect(Collectors.joining(","));
+
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN uuid_array uuid[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN uuid_array SET STORAGE EXTENDED;"
+ + "INSERT INTO test_toast_table (not_toast, text, uuid_array) "
+ + "VALUES (10, 'text', ARRAY [" + toastedValueQuotedString + "]::uuid[]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("uuid_array", SchemaBuilder.array(
+ io.debezium.data.Uuid.builder().optional().build()).optional().build(),
+ Arrays.asList(toastedValueArray))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ statement = "UPDATE test_toast_table SET not_toast = 2;";
+
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "text", "not_toast", "uuid_array"), tbl.retrieveColumnNames());
+ });
+ });
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 2),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "text"),
+ new SchemaAndValueField("uuid_array", SchemaBuilder.array(
+ io.debezium.data.Uuid.builder().optional().build()).optional().build(),
+ Arrays.asList(DecoderDifferences.mandatoryToastedValueUuidPlaceholder()))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedArrayColumnForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN mandatory_text_array TEXT[] NOT NULL;"
+ + "ALTER TABLE test_toast_table ALTER COLUMN mandatory_text_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, mandatory_text_array) values (10, ARRAY ['" + toastedValue + "']);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "mandatory_text_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedArrayColumnCharacterVaryingForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN mandatory_text_array character varying(20000)[] NOT NULL;"
+ + "ALTER TABLE test_toast_table ALTER COLUMN mandatory_text_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, mandatory_text_array) values (10, ARRAY ['" + toastedValue + "']);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "mandatory_text_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("mandatory_text_array", SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA).build(), Arrays.asList(toastedValue))),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedDateArrayColumnForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ List intList = IntStream.range(1, 100000).boxed().map((x) -> 19338).collect(Collectors.toList());
+ final String toastedValue = intList.stream().map((x) -> "'2022-12-12'::date").collect(Collectors.joining(","));
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN date_array date[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN date_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, date_array) values (10, ARRAY [" + toastedValue + "]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("date_array",
+ SchemaBuilder.array(SchemaBuilder.int32().name("io.debezium.time.Date").optional().version(1).build()).optional().build(),
+ intList)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "date_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("date_array",
+ SchemaBuilder.array(SchemaBuilder.int32().name("io.debezium.time.Date").optional().version(1).build()).optional().build(),
+ intList)),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("date_array",
+ SchemaBuilder.array(SchemaBuilder.int32().name("io.debezium.time.Date").optional().version(1).build()).optional().build(),
+ intList)),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedByteArrayColumnForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ List intList = IntStream.range(1, 100000).boxed().map((x) -> 19338).collect(Collectors.toList());
+ final String toastedValue = RandomStringUtils.randomNumeric(10000);
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN bytea_array bytea[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN bytea_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, bytea_array) values (10, ARRAY ['" + toastedValue + "'::bytea]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("bytea_array",
+ SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).optional().build(), Arrays.asList(ByteBuffer.wrap(toastedValue.getBytes())))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "bytea_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("bytea_array",
+ SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).optional().build(),
+ Arrays.asList(ByteBuffer.wrap(toastedValue.getBytes())))),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("bytea_array",
+ SchemaBuilder.array(Schema.OPTIONAL_BYTES_SCHEMA).optional().build(),
+ Arrays.asList(ByteBuffer.wrap(toastedValue.getBytes())))),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedIntegerArrayColumnForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ List intList = IntStream.range(1, 10000).boxed().collect(Collectors.toList());
+ final String toastedValue = intList.stream().map(String::valueOf)
+ .collect(Collectors.joining(","));
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN int_array int[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN int_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, int_array) values (10, ARRAY [" + toastedValue + "]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("int_array", SchemaBuilder.array(Schema.OPTIONAL_INT32_SCHEMA).optional().build(), intList)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "int_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("int_array", SchemaBuilder.array(Schema.OPTIONAL_INT32_SCHEMA).optional().build(), intList)),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("int_array", SchemaBuilder.array(Schema.OPTIONAL_INT32_SCHEMA).optional().build(), intList)),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedBigIntArrayColumnForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ List bigintList = LongStream.range(1, 10000).boxed().collect(Collectors.toList());
+ final String toastedValue = bigintList.stream().map(String::valueOf)
+ .collect(Collectors.joining(","));
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN bigint_array bigint[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN bigint_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, bigint_array) values (10, ARRAY [" + toastedValue + "]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("bigint_array", SchemaBuilder.array(Schema.OPTIONAL_INT64_SCHEMA).optional().build(), bigintList)),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "bigint_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("bigint_array", SchemaBuilder.array(Schema.OPTIONAL_INT64_SCHEMA).optional().build(), bigintList)),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("bigint_array", SchemaBuilder.array(Schema.OPTIONAL_INT64_SCHEMA).optional().build(), bigintList)),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Altering column not allowed while in replication, see https://github.com/yugabyte/yugabyte-db/issues/16625")
+ @Test
+ @FixFor("DBZ-7193")
+ public void shouldHandleToastedUuidArrayColumnForReplicaIdentityFullTable() throws Exception {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_toast_table;",
+ "CREATE TABLE test_toast_table (id SERIAL PRIMARY KEY);");
+
+ startConnector(Function.identity(), false);
+ assertConnectorIsRunning();
+ final List toastedValueList = Stream.generate(UUID::randomUUID).map(String::valueOf).limit(10000).collect(Collectors.toList());
+ final String[] toastedValueArray = toastedValueList.toArray(new String[toastedValueList.size()]);
+ final String toastedValueQuotedString = toastedValueList.stream().map(uuid_str -> ("'" + uuid_str + "'")).collect(Collectors.joining(","));
+
+ // INSERT
+ String statement = "ALTER TABLE test_toast_table ADD COLUMN not_toast integer;"
+ + "ALTER TABLE test_toast_table ADD COLUMN uuid_array uuid[];"
+ + "ALTER TABLE test_toast_table ALTER COLUMN uuid_array SET STORAGE EXTENDED;"
+ + "ALTER TABLE test_toast_table REPLICA IDENTITY FULL;"
+ + "INSERT INTO test_toast_table (not_toast, uuid_array) "
+ + "VALUES (10, ARRAY [" + toastedValueQuotedString + "]::uuid[]);";
+ consumer = testConsumer(1);
+ executeAndWait(statement);
+
+ // after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("uuid_array",
+ SchemaBuilder.array(io.debezium.data.Uuid.builder().optional().build()).optional().build(),
+ Arrays.asList(toastedValueArray))),
+ consumer.remove(),
+ Envelope.FieldName.AFTER);
+
+ // UPDATE
+ statement = "UPDATE test_toast_table SET not_toast = 20;";
+ consumer.expects(1);
+ executeAndWait(statement);
+ consumer.process(record -> {
+ assertWithTask(task -> {
+ Table tbl = ((PostgresConnectorTask) task).getTaskContext().schema().tableFor(TableId.parse("public.test_toast_table", false));
+ assertEquals(Arrays.asList("id", "not_toast", "uuid_array"), tbl.retrieveColumnNames());
+ });
+ });
+ SourceRecord updatedRecord = consumer.remove();
+
+ // before and after record should contain the toasted value
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("uuid_array",
+ SchemaBuilder.array(io.debezium.data.Uuid.builder().optional().build()).optional().build(),
+ Arrays.asList(toastedValueArray))),
+ updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("uuid_array",
+ SchemaBuilder.array(io.debezium.data.Uuid.builder().optional().build()).optional().build(),
+ Arrays.asList(toastedValueArray))),
+ updatedRecord, Envelope.FieldName.AFTER);
+ }
+
+ @Ignore("Replica identity cannot be altered at runtime")
+ @Test
+ @FixFor("DBZ-1146")
+ public void shouldReceiveChangesForReplicaIdentityFullTableWithToastedValueTableFromSnapshot() throws Exception {
+ testReceiveChangesForReplicaIdentityFullTableWithToastedValue(SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST, true);
+ }
+
+ @Ignore("Replica identity cannot be altered at runtime")
+ @Test
+ @FixFor("DBZ-1146")
+ public void shouldReceiveChangesForReplicaIdentityFullTableWithToastedValueTableFromStreaming() throws Exception {
+ testReceiveChangesForReplicaIdentityFullTableWithToastedValue(SchemaRefreshMode.COLUMNS_DIFF_EXCLUDE_UNCHANGED_TOAST, false);
+ }
+
+ @Ignore("Replica identity cannot be altered at runtime")
+ @Test
+ @FixFor("DBZ-1146")
+ public void shouldReceiveChangesForReplicaIdentityFullTableWithToastedValueTableFromSnapshotFullDiff() throws Exception {
+ testReceiveChangesForReplicaIdentityFullTableWithToastedValue(SchemaRefreshMode.COLUMNS_DIFF, true);
+ }
+
+ @Ignore("Replica identity cannot be altered at runtime")
+ @Test
+ @FixFor("DBZ-1146")
+ public void shouldReceiveChangesForReplicaIdentityFullTableWithToastedValueTableFromStreamingFullDiff() throws Exception {
+ testReceiveChangesForReplicaIdentityFullTableWithToastedValue(SchemaRefreshMode.COLUMNS_DIFF, false);
+ }
+
+ @Test
+ @FixFor("DBZ-1082")
+ public void shouldHaveNoXminWhenNotEnabled() throws Exception {
+ startConnector(config -> config.with(PostgresConnectorConfig.XMIN_FETCH_INTERVAL, "0"));
+
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ String statement = "INSERT INTO test_table (text) VALUES ('no_xmin');";
+ executeAndWait(statement);
+
+ // Verify the record that made it does not have an xmin
+ SourceRecord rec = assertRecordInserted("public.test_table", PK_FIELD, 2);
+ assertSourceInfo(rec, "yugabyte", "public", "test_table");
+
+ Struct source = ((Struct) rec.value()).getStruct("source");
+ assertThat(source.getInt64("xmin")).isNull();
+
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1082")
+ public void shouldHaveXminWhenEnabled() throws Exception {
+ startConnector(config -> config.with(PostgresConnectorConfig.XMIN_FETCH_INTERVAL, "10"));
+
+ TestHelper.execute("ALTER TABLE test_table REPLICA IDENTITY DEFAULT;");
+ String statement = "INSERT INTO test_table (text) VALUES ('with_xmin');";
+ executeAndWait(statement);
+
+ // Verify the record that made it does not have an xmin
+ SourceRecord rec = assertRecordInserted("public.test_table", PK_FIELD, 2);
+ assertSourceInfo(rec, "yugabyte", "public", "test_table");
+
+ Struct source = ((Struct) rec.value()).getStruct("source");
+ assertThat(source.getInt64("xmin")).isGreaterThan(0L);
+
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ public void shouldProcessLargerTx() throws Exception {
+ Print.disable();
+ final int numberOfEvents = 1000;
+
+ startConnector();
+ waitForStreamingToStart();
+
+ final String topicPrefix = "public.test_table";
+ final String topicName = topicName(topicPrefix);
+
+ final Stopwatch stopwatch = Stopwatch.reusable();
+ consumer = testConsumer(numberOfEvents);
+ // This is not accurate as we measure also including the data but
+ // it is sufficient to confirm there is no large difference
+ // in runtime between the cases
+ stopwatch.start();
+ executeAndWait(IntStream.rangeClosed(2, numberOfEvents + 1)
+ .boxed()
+ .map(x -> "INSERT INTO test_table (text) VALUES ('insert" + x + "')")
+ .collect(Collectors.joining(";")));
+ stopwatch.stop();
+ final long firstRun = stopwatch.durations().statistics().getTotal().toMillis();
+ logger.info("Single tx duration = {} ms", firstRun);
+ for (int i = 0; i < numberOfEvents; i++) {
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, i + 2);
+ }
+
+ consumer.expects(numberOfEvents);
+ IntStream.rangeClosed(2, numberOfEvents + 1).forEach(x -> TestHelper.execute("INSERT INTO test_table (text) VALUES ('insert" + x + "')"));
+ stopwatch.start();
+ // There should be no significant difference between many TX runtime and single large TX
+ // We still add generous limits as the runtime is in seconds and we cannot provide
+ // a stable scheduling environment
+ consumer.await(3 * firstRun, TimeUnit.MILLISECONDS);
+ stopwatch.stop();
+ for (int i = 0; i < numberOfEvents; i++) {
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, i + 1002);
+ }
+ logger.info("Many tx duration = {} ms", stopwatch.durations().statistics().getTotal().toMillis());
+ }
+
+ @Test
+ @FixFor("DBZ-1824")
+ public void stopInTheMiddleOfTxAndResume() throws Exception {
+// Print.enable();
+ final int numberOfEvents = 50;
+ final int STOP_ID = 20;
+
+ startConnector(config -> config.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false), true, record -> {
+ if (!"test_server.public.test_table.Envelope".equals(record.valueSchema().name())) {
+ return false;
+ }
+ final Struct envelope = (Struct) record.value();
+ final Struct after = envelope.getStruct("after");
+ final Integer pk = after.getInt32("pk");
+ return pk == STOP_ID;
+ });
+ waitForStreamingToStart();
+
+ final String topicPrefix = "public.test_table";
+ final String topicName = topicName(topicPrefix);
+
+ final int expectFirstRun = STOP_ID - 2;
+ final int expectSecondRun = numberOfEvents - STOP_ID;
+ consumer = testConsumer(expectFirstRun);
+ executeAndWait(IntStream.rangeClosed(2, numberOfEvents + 1)
+ .boxed()
+ .map(x -> "INSERT INTO test_table (text) VALUES ('insert" + x + "')")
+ .collect(Collectors.joining(";")));
+
+ // 2..19, 1 is from snapshot
+ for (int i = 0; i < expectFirstRun; i++) {
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, i + 2);
+ }
+
+ stopConnector();
+
+ startConnector(Function.identity(), false);
+ consumer.expects(expectSecondRun);
+ consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);
+
+ // 20..51
+ for (int i = 0; i < expectSecondRun; i++) {
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, STOP_ID + i);
+ }
+ }
+
+ @Test
+ @FixFor("DBZ-2397")
+ public void restartConnectorInTheMiddleOfUncommittedTx() throws Exception {
+// Print.enable();
+
+ final PostgresConnection tx1Connection = TestHelper.create();
+ tx1Connection.setAutoCommit(false);
+
+ final PostgresConnection tx2Connection = TestHelper.create();
+ tx2Connection.setAutoCommit(true);
+
+ startConnector(config -> config.with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false), true);
+ waitForStreamingToStart();
+
+ tx1Connection.executeWithoutCommitting("INSERT INTO test_table (text) VALUES ('tx-1-1')");
+ tx2Connection.execute("INSERT INTO test_table (text) VALUES ('tx-2-1')");
+ consumer = testConsumer(1);
+ consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("tx-2-1");
+
+ stopConnector();
+ startConnector(Function.identity(), false);
+ waitForStreamingToStart();
+
+ tx1Connection.executeWithoutCommitting("INSERT INTO test_table (text) VALUES ('tx-1-2')");
+ tx2Connection.execute("INSERT INTO test_table (text) VALUES ('tx-2-2')");
+
+ tx1Connection.executeWithoutCommitting("INSERT INTO test_table (text) VALUES ('tx-1-3')");
+ tx2Connection.execute("INSERT INTO test_table (text) VALUES ('tx-2-3')");
+
+ tx1Connection.commit();
+
+ consumer = testConsumer(5);
+ consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
+
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("tx-2-2");
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("tx-2-3");
+
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("tx-1-1");
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("tx-1-2");
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getString("text")).isEqualTo("tx-1-3");
+ }
+
+ @Test
+ @FixFor("DBZ-1730")
+ public void shouldStartConsumingFromSlotLocation() throws Exception {
+// Print.enable();
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, false)
+ .with(EmbeddedEngineConfig.OFFSET_STORAGE, MemoryOffsetBackingStore.class), true);
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO test_table (text) VALUES ('insert2')");
+ consumer.remove();
+
+ stopConnector();
+ TestHelper.execute(
+ "INSERT INTO test_table (text) VALUES ('insert3');",
+ "INSERT INTO test_table (text) VALUES ('insert4')");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(EmbeddedEngineConfig.OFFSET_STORAGE, MemoryOffsetBackingStore.class), false);
+
+ consumer.expects(3);
+ consumer.await(TestHelper.waitTimeForRecords() * 5, TimeUnit.SECONDS);
+
+ // After loss of offset and not doing snapshot we always stream the first record available in replication slot
+ // even if we have seen it as it is not possible to make a difference from plain snapshot never mode
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getStruct("text").getString("value")).isEqualTo("insert2");
+
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getStruct("text").getString("value")).isEqualTo("insert3");
+ assertThat(((Struct) consumer.remove().value()).getStruct("after").getStruct("text").getString("value")).isEqualTo("insert4");
+
+ stopConnector();
+ }
+
+ @Ignore("YB Note: Truncate events are unsupported")
+ @Test
+ @SkipWhenDatabaseVersion(check = EqualityCheck.LESS_THAN, major = 11, reason = "TRUNCATE events only supported in PG11+ PGOUTPUT Plugin")
+ @SkipWhenDecoderPluginNameIsNot(value = SkipWhenDecoderPluginNameIsNot.DecoderPluginName.PGOUTPUT, reason = "Tests specifically that pgoutput handles TRUNCATE messages")
+ public void shouldProcessTruncateMessages() throws Exception {
+ startConnector(builder -> builder
+ .with(PostgresConnectorConfig.SKIPPED_OPERATIONS, "none"));
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO test_table (text) values ('TRUNCATE TEST');");
+
+ SourceRecord record = consumer.remove();
+ assertEquals(TestHelper.topicName("public.test_table"), record.topic());
+ YBVerifyRecord.isValidInsert(record, PK_FIELD, 2);
+
+ consumer.expects(1);
+ TestHelper.execute("TRUNCATE TABLE public.test_table RESTART IDENTITY CASCADE;");
+ consumer.await(TestHelper.waitTimeForRecords(), TimeUnit.SECONDS);
+
+ assertFalse(consumer.isEmpty());
+ SourceRecord truncateRecord = consumer.remove();
+ assertNotNull(truncateRecord);
+ YBVerifyRecord.isValidTruncate(truncateRecord);
+ assertTrue(consumer.isEmpty());
+ }
+
+ @Ignore("Decimal handling mode precise is unsupported")
+ @Test
+ @FixFor("DBZ-1413")
+ public void shouldStreamChangesForDataTypeAlias() throws Exception {
+ TestHelper.execute("CREATE DOMAIN money2 AS money DEFAULT 0.0;");
+ TestHelper.execute("CREATE TABLE alias_table (pk SERIAL, data VARCHAR(50), salary money, salary2 money2, PRIMARY KEY(pk));");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.PRECISE)
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"),
+ false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO alias_table (data, salary, salary2) values ('hello', 7.25, 8.25);");
+
+ SourceRecord rec = assertRecordInserted("public.alias_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "postgres", "public", "alias_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("data", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "hello"),
+ new SchemaAndValueField("salary", Decimal.builder(2).optional().build(), new BigDecimal(7.25)),
+ new SchemaAndValueField("salary2", Decimal.builder(2).optional().build(), new BigDecimal(8.25)));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1413")
+ public void shouldStreamChangesForDomainAliasAlterTable() throws Exception {
+ TestHelper.execute("CREATE TABLE alias_table (pk SERIAL, data VARCHAR(50), salary money, PRIMARY KEY(pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table")
+ .with("column.propagate.source.type", "public.alias_table.salary3"),
+ false);
+
+ waitForStreamingToStart();
+
+ // Now that streaming has started, alter the table schema
+ TestHelper.execute("CREATE DOMAIN money2 AS money DEFAULT 0.0;");
+ TestHelper.execute("CREATE DOMAIN money3 AS numeric(8,3) DEFAULT 0.0;");
+ TestHelper.execute("ALTER TABLE alias_table ADD COLUMN salary2 money2 NOT NULL;");
+ TestHelper.execute("ALTER TABLE alias_table ADD COLUMN salary3 money3 NOT NULL;");
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO alias_table (data, salary, salary2, salary3) values ('hello', 7.25, 8.25, 123.456);");
+
+ SourceRecord rec = assertRecordInserted("public.alias_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "alias_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("data", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "hello"),
+ new SchemaAndValueField("salary", SchemaBuilder.OPTIONAL_FLOAT64_SCHEMA, 7.25),
+ new SchemaAndValueField("salary2", SchemaBuilder.FLOAT64_SCHEMA, 8.25),
+ new SchemaAndValueField("salary3", SchemaBuilder.float64()
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "MONEY3")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, "8")
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "3")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "salary3")
+ .build(), 123.456));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1413")
+ public void shouldStreamDomainAliasWithProperModifiers() throws Exception {
+ TestHelper.execute("CREATE TABLE alias_table (pk SERIAL, PRIMARY KEY(pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"),
+ false);
+
+ waitForStreamingToStart();
+
+ TestHelper.execute("CREATE DOMAIN varbit2 AS varbit(3);");
+ TestHelper.execute("ALTER TABLE public.alias_table ADD COLUMN value varbit2 NOT NULL;");
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO public.alias_table (value) VALUES (B'101');");
+
+ SourceRecord rec = assertRecordInserted("public.alias_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "alias_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("value", Bits.builder(3).build(), new byte[]{ 5 }));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1413")
+ public void shouldStreamValuesForDomainTypeOfDomainType() throws Exception {
+ TestHelper.execute("CREATE DOMAIN numeric82 as numeric(8,2);");
+ TestHelper.execute("CREATE DOMAIN numericex as numeric82;");
+ TestHelper.execute("CREATE TABLE alias_table (pk SERIAL, value numericex, PRIMARY KEY (pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table")
+ .with("column.propagate.source.type", "public.alias_table.value"), false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO alias_table (value) values (123.45);");
+
+ SourceRecord rec = assertRecordInserted("public.alias_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "alias_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("value", SpecialValueDecimal.builder(DecimalMode.DOUBLE, 8, 2)
+ .optional()
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "NUMERICEX")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, "8")
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "2")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "value")
+ .build(), 123.45));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ public void shouldStreamValudForAliasLikeIntegerType() throws Exception {
+ TestHelper.execute("CREATE DOMAIN integer_alias AS integer;");
+ TestHelper.execute("CREATE TABLE test_alias_table (pk SERIAL PRIMARY KEY, alias_col integer_alias);");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.test_alias_table"),
+ false);
+
+ waitForStreamingToStart();
+ TestHelper.waitFor(Duration.ofSeconds(30));
+
+ TestHelper.execute("INSERT INTO test_alias_table (alias_col) VALUES (1234);");
+
+ SourceRecords allRecords = consumeRecordsByTopic(1);
+ assertEquals(1, allRecords.allRecordsInOrder().size());
+
+ SourceRecord r = allRecords.recordsForTopic(topicName("public.test_alias_table")).get(0);
+
+ assertValueField(r, "after/pk/value", 1);
+ assertValueField(r, "after/alias_col/value", 1234);
+ }
+
+ @Test
+ @FixFor("DBZ-1413")
+ public void shouldStreamValuesForAliasLikeBaseTypes() throws Exception {
+ TestHelper.execute("CREATE TABLE alias_table (pk SERIAL, PRIMARY KEY (pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.alias_table"),
+ false);
+
+ waitForStreamingToStart();
+
+ TestHelper.execute("CREATE DOMAIN bit2 AS BIT(3);");
+ TestHelper.execute("CREATE DOMAIN smallint2 AS smallint;");
+ TestHelper.execute("CREATE DOMAIN integer2 as integer;");
+ TestHelper.execute("CREATE DOMAIN bigint2 as bigint;");
+ TestHelper.execute("CREATE DOMAIN real2 as real;");
+ TestHelper.execute("CREATE DOMAIN bool2 AS BOOL DEFAULT false;");
+ TestHelper.execute("CREATE DOMAIN float82 as float8;");
+ TestHelper.execute("CREATE DOMAIN numeric2 as numeric(6,2);");
+ TestHelper.execute("CREATE DOMAIN string2 AS varchar(25) DEFAULT NULL;");
+ TestHelper.execute("CREATE DOMAIN date2 AS date;");
+ TestHelper.execute("CREATE DOMAIN time2 as time;");
+ TestHelper.execute("CREATE DOMAIN timetz2 as timetz;");
+ TestHelper.execute("CREATE DOMAIN timestamp2 as timestamp;");
+ TestHelper.execute("CREATE DOMAIN timestamptz2 AS timestamptz;");
+ TestHelper.execute("CREATE DOMAIN timewotz2 as time without time zone;");
+ TestHelper.execute("CREATE DOMAIN interval2 as interval;");
+ TestHelper.execute("CREATE DOMAIN char2 as char;");
+ TestHelper.execute("CREATE DOMAIN text2 as text;");
+ TestHelper.execute("CREATE DOMAIN json2 as json;");
+ TestHelper.execute("CREATE DOMAIN uuid2 as uuid;");
+ TestHelper.execute("CREATE DOMAIN varbit2 as varbit(3);");
+ TestHelper.execute("CREATE DOMAIN inet2 as inet;");
+ TestHelper.execute("CREATE DOMAIN cidr2 as cidr;");
+ TestHelper.execute("CREATE DOMAIN macaddr2 as macaddr;");
+
+ TestHelper.execute("ALTER TABLE alias_table "
+ + "ADD COLUMN bit_base bit(3) NOT NULL, ADD COLUMN bit_alias bit2 NOT NULL, "
+ + "ADD COLUMN smallint_base smallint NOT NULL, ADD COLUMN smallint_alias smallint2 NOT NULL, "
+ + "ADD COLUMN integer_base integer NOT NULL, ADD COLUMN integer_alias integer2 NOT NULL, "
+ + "ADD COLUMN bigint_base bigint NOT NULL, ADD COLUMN bigint_alias bigint2 NOT NULL, "
+ + "ADD COLUMN real_base real NOT NULL, ADD COLUMN real_alias real2 NOT NULL, "
+ + "ADD COLUMN float8_base float8 NOT NULL, ADD COLUMN float8_alias float82 NOT NULL, "
+ + "ADD COLUMN numeric_base numeric(6,2) NOT NULL, ADD COLUMN numeric_alias numeric2 NOT NULL, "
+ + "ADD COLUMN bool_base bool NOT NULL, ADD COLUMN bool_alias bool2 NOT NULL, "
+ + "ADD COLUMN string_base varchar(25) NOT NULL, ADD COLUMN string_alias string2 NOT NULL, "
+ + "ADD COLUMN date_base date NOT NULL, ADD COLUMN date_alias date2 NOT NULL, "
+ + "ADD COLUMN time_base time NOT NULL, ADD COLUMN time_alias time2 NOT NULL, "
+ + "ADD COLUMN timetz_base timetz NOT NULL, ADD COLUMN timetz_alias timetz2 NOT NULL, "
+ + "ADD COLUMN timestamp_base timestamp NOT NULL, ADD COLUMN timestamp_alias timestamp2 NOT NULL, "
+ + "ADD COLUMN timestamptz_base timestamptz NOT NULL, ADD COLUMN timestamptz_alias timestamptz2 NOT NULL, "
+ + "ADD COLUMN timewottz_base time without time zone NOT NULL, ADD COLUMN timewottz_alias timewotz2 NOT NULL, "
+ + "ADD COLUMN interval_base interval NOT NULL, ADD COLUMN interval_alias interval2 NOT NULL, "
+ + "ADD COLUMN char_base char NOT NULL, ADD COLUMN char_alias char2 NOT NULL, "
+ + "ADD COLUMN text_base text NOT NULL, ADD COLUMN text_alias text2 NOT NULL, "
+ + "ADD COLUMN json_base json NOT NULL, ADD COLUMN json_alias json2 NOT NULL, "
+ + "ADD COLUMN uuid_base UUID NOT NULL, ADD COLUMN uuid_alias uuid2 NOT NULL, "
+ + "ADD COLUMN varbit_base varbit(3) NOT NULL, ADD COLUMN varbit_alias varbit2 NOT NULL,"
+ + "ADD COLUMN inet_base inet NOT NULL, ADD COLUMN inet_alias inet2 NOT NULL, "
+ + "ADD COLUMN cidr_base cidr NOT NULL, ADD COLUMN cidr_alias cidr2 NOT NULL, "
+ + "ADD COLUMN macaddr_base macaddr NOT NULL, ADD COLUMN macaddr_alias macaddr2 NOT NULL");
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO alias_table ("
+ + "bit_base, bit_alias, "
+ + "smallint_base, smallint_alias, "
+ + "integer_base, integer_alias, "
+ + "bigint_base, bigint_alias, "
+ + "real_base, real_alias, "
+ + "float8_base, float8_alias, "
+ + "numeric_base, numeric_alias, "
+ + "bool_base, bool_alias, "
+ + "string_base, string_alias, "
+ + "date_base, date_alias, "
+ + "time_base, time_alias, "
+ + "timetz_base, timetz_alias, "
+ + "timestamp_base, timestamp_alias, "
+ + "timestamptz_base, timestamptz_alias, "
+ + "timewottz_base, timewottz_alias, "
+ + "interval_base, interval_alias, "
+ + "char_base, char_alias, "
+ + "text_base, text_alias, "
+ + "json_base, json_alias, "
+ + "uuid_base, uuid_alias, "
+ + "varbit_base, varbit_alias, "
+ + "inet_base, inet_alias, "
+ + "cidr_base, cidr_alias, "
+ + "macaddr_base, macaddr_alias "
+ + ") VALUES ("
+ + "B'101', B'101', "
+ + "1, 1, "
+ + "1, 1, "
+ + "1000, 1000, "
+ + "3.14, 3.14, "
+ + "3.14, 3.14, "
+ + "1234.12, 1234.12, "
+ + "true, true, "
+ + "'hello', 'hello', "
+ + "'2019-10-02', '2019-10-02', "
+ + "'01:02:03', '01:02:03', "
+ + "'01:02:03.123789Z', '01:02:03.123789Z', "
+ + "'2019-10-02T01:02:03.123456', '2019-10-02T01:02:03.123456', "
+ + "'2019-10-02T13:51:30.123456+02:00'::TIMESTAMPTZ, '2019-10-02T13:51:30.123456+02:00'::TIMESTAMPTZ, "
+ + "'01:02:03', '01:02:03', "
+ + "'1 year 2 months 3 days 4 hours 5 minutes 6 seconds', '1 year 2 months 3 days 4 hours 5 minutes 6 seconds', "
+ + "'a', 'a', "
+ + "'Hello World', 'Hello World', "
+ + "'{\"key\": \"value\"}', '{\"key\": \"value\"}', "
+ + "'40e6215d-b5c6-4896-987c-f30f3678f608', '40e6215d-b5c6-4896-987c-f30f3678f608', "
+ + "B'101', B'101', "
+ + "'192.168.0.1', '192.168.0.1', "
+ + "'192.168/24', '192.168/24', "
+ + "'08:00:2b:01:02:03', '08:00:2b:01:02:03' "
+ + ");");
+
+ SourceRecord rec = assertRecordInserted("public.alias_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "alias_table");
+
+ assertRecordSchemaAndValues(schemasAndValuesForDomainAliasTypes(true), rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-920")
+ public void shouldStreamEnumAsKnownType() throws Exception {
+ // Specifically enable `column.propagate.source.type` here to validate later that the actual
+ // type, length, and scale values are resolved correctly when paired with Enum types.
+ TestHelper.execute("CREATE TABLE enum_table (pk SERIAL, PRIMARY KEY (pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with("column.propagate.source.type", "public.enum_table.value")
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_table"), false);
+
+ waitForStreamingToStart();
+
+ // We create the enum type after streaming started to simulate some future schema change
+ TestHelper.execute("CREATE TYPE test_type AS ENUM ('V1','V2');");
+ TestHelper.execute("ALTER TABLE enum_table ADD COLUMN value test_type NOT NULL");
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO enum_table (value) VALUES ('V1');");
+
+ SourceRecord rec = assertRecordInserted("public.enum_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "enum_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("value", Enum.builder("V1,V2")
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "TEST_TYPE")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, String.valueOf(Integer.MAX_VALUE))
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "0")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "value")
+ .build(), "V1"));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-5038")
+ public void shouldEmitEnumColumnDefaultValuesInSchema() throws Exception {
+ // Specifically enable `column.propagate.source.type` here to validate later that the actual
+ // type, length, and scale values are resolved correctly when paired with Enum types.
+ TestHelper.execute("CREATE TABLE enum_table (pk SERIAL, PRIMARY KEY (pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, true)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with("column.propagate.source.type", "public.enum_table.value")
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_table"), false);
+
+ waitForStreamingToStart();
+
+ // We create the enum type after streaming started to simulate some future schema change
+ TestHelper.execute("CREATE TYPE test_type AS ENUM ('V1','V2');");
+ TestHelper.execute("ALTER TABLE enum_table ADD COLUMN data varchar(50) NOT NULL");
+ TestHelper.execute("ALTER TABLE enum_table ADD COLUMN value test_type NOT NULL DEFAULT 'V2'::test_type");
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO enum_table (data) VALUES ('V1');");
+
+ SourceRecord rec = assertRecordInserted("public.enum_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "enum_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("data", SchemaBuilder.string().build(), "V1"),
+ new SchemaAndValueField("value", Enum.builder("V1,V2")
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "TEST_TYPE")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, String.valueOf(Integer.MAX_VALUE))
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "0")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "value")
+ .defaultValue("V2")
+ .build(), "V2"));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Ignore("YB Note: Enum array unsupported")
+ @Test
+ public void shouldStreamEnumArrayAsKnownType() throws Exception {
+ // Specifically enable `column.propagate.source.type` here to validate later that the actual
+ // type, length, and scale values are resolved correctly when paired with Enum types.
+ TestHelper.execute("CREATE TABLE enum_array_table (pk SERIAL, PRIMARY KEY (pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with("column.propagate.source.type", "public.enum_array_table.value")
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_array_table"), false);
+
+ waitForStreamingToStart();
+
+ // We create the enum type after streaming started to simulate some future schema change
+ TestHelper.execute("CREATE TYPE test_type AS ENUM ('V1','V2');");
+ TestHelper.execute("ALTER TABLE enum_array_table ADD COLUMN value test_type[] NOT NULL;");
+
+ consumer = testConsumer(1);
+
+ // INSERT
+ executeAndWait("INSERT INTO enum_array_table (value) VALUES ('{V1, V2}');");
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ SourceRecord insertRec = assertRecordInserted("public.enum_array_table", PK_FIELD, 1);
+ assertSourceInfo(insertRec, "yugabyte", "public", "enum_array_table");
+
+ List expectedInsert = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("value", SchemaBuilder.array(Enum.builder("V1,V2"))
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "_TEST_TYPE")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, String.valueOf(Integer.MAX_VALUE))
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "0")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "value")
+ .build(), Arrays.asList("V1", "V2")));
+ assertRecordSchemaAndValues(expectedInsert, insertRec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+
+ // UPDATE
+ executeAndWait("UPDATE enum_array_table set value = '{V1}';");
+ SourceRecord updateRec = consumer.remove();
+ assertSourceInfo(updateRec, "yugabyte", "public", "enum_array_table");
+
+ List expectedUpdate = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("value", SchemaBuilder.array(Enum.builder("V1,V2"))
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "_TEST_TYPE")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, String.valueOf(Integer.MAX_VALUE))
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "0")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "value")
+ .build(), Arrays.asList("V1")));
+ assertRecordSchemaAndValues(expectedUpdate, updateRec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+
+ // DELETE
+ executeAndWait("DELETE FROM enum_array_table;");
+ SourceRecord deleteRec = consumer.remove();
+ YBVerifyRecord.isValidDelete(deleteRec, PK_FIELD, 1);
+ assertSourceInfo(updateRec, "yugabyte", "public", "enum_array_table");
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1969")
+ public void shouldStreamTimeArrayTypesAsKnownTypes() throws Exception {
+ TestHelper.execute("CREATE TABLE time_array_table (pk SERIAL, "
+ + "timea time[] NOT NULL, "
+ + "timetza timetz[] NOT NULL, "
+ + "timestampa timestamp[] NOT NULL, "
+ + "timestamptza timestamptz[] NOT NULL, primary key(pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.time_array_table"), false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+
+ // INSERT
+ executeAndWait("INSERT INTO time_array_table (timea, timetza, timestampa, timestamptza) "
+ + "values ("
+ + "'{00:01:02,01:02:03}', "
+ + "'{13:51:02+0200,14:51:03+0200}', "
+ + "'{2020-04-01 00:01:02,2020-04-01 01:02:03}', "
+ + "'{2020-04-01 13:51:02+02,2020-04-01 14:51:03+02}')");
+
+ SourceRecord insert = assertRecordInserted("public.time_array_table", PK_FIELD, 1);
+ assertSourceInfo(insert, "yugabyte", "public", "time_array_table");
+ assertRecordSchemaAndValues(schemaAndValuesForTimeArrayTypes(), insert, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+
+ // UPDATE
+ executeAndWait("UPDATE time_array_table SET "
+ + "timea = '{00:01:02,02:03:04}', "
+ + "timetza = '{00:01:02-0400,01:03:04-0400}', "
+ + "timestampa = '{2020-04-01 00:01:02,2020-04-25 03:04:05}', "
+ + "timestamptza = '{2020-04-01 00:01:02-04,2020-04-25 03:04:05-04}'");
+
+ SourceRecord update = consumer.remove();
+ assertSourceInfo(update, "yugabyte", "public", "time_array_table");
+
+ List expectedUpdate = Arrays.asList(
+ new SchemaAndValueField("timea",
+ SchemaBuilder.array(MicroTime.builder().optional().build()).build(),
+ Arrays.asList(LocalTime.parse("00:01:02").toNanoOfDay() / 1_000,
+ LocalTime.parse("02:03:04").toNanoOfDay() / 1_000)),
+ new SchemaAndValueField("timetza",
+ SchemaBuilder.array(ZonedTime.builder().optional().build()).build(),
+ Arrays.asList("04:01:02Z", "05:03:04Z")),
+ new SchemaAndValueField("timestampa",
+ SchemaBuilder.array(MicroTimestamp.builder().optional().build()).build(),
+ Arrays.asList(OffsetDateTime.of(2020, 4, 1, 0, 1, 2, 0, ZoneOffset.UTC).toInstant().toEpochMilli() * 1_000,
+ OffsetDateTime.of(2020, 4, 25, 3, 4, 5, 0, ZoneOffset.UTC).toInstant().toEpochMilli() * 1_000)),
+ new SchemaAndValueField("timestamptza",
+ SchemaBuilder.array(ZonedTimestamp.builder().optional().build()).build(),
+ Arrays.asList("2020-04-01T04:01:02.000000Z", "2020-04-25T07:04:05.000000Z")));
+ assertRecordSchemaAndValues(expectedUpdate, update, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+
+ // DELETE
+ executeAndWait("DELETE FROM time_array_table;");
+ SourceRecord deleteRec = consumer.remove();
+ YBVerifyRecord.isValidDelete(deleteRec, PK_FIELD, 1);
+ assertSourceInfo(deleteRec, "yugabyte", "public", "time_array_table");
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor({ "DBZ-1680", "DBZ-5038" })
+ public void shouldStreamEnumsWhenIncludeUnknownDataTypesDisabled() throws Exception {
+ // Specifically enable `column.propagate.source.type` here to validate later that the actual
+ // type, length, and scale values are resolved correctly when paired with Enum types.
+ TestHelper.execute("CREATE TYPE test_type AS ENUM ('V1','V2');");
+ TestHelper.execute("CREATE TABLE enum_table (pk SERIAL, data varchar(25) NOT NULL, value test_type NOT NULL DEFAULT 'V1', PRIMARY KEY (pk));");
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.INCLUDE_UNKNOWN_DATATYPES, false)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with("column.propagate.source.type", "public.enum_table.value")
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.enum_table"), false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO enum_table (data) VALUES ('hello');");
+
+ SourceRecord rec = assertRecordInserted("public.enum_table", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "enum_table");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField(PK_FIELD, SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("data", Schema.STRING_SCHEMA, "hello"),
+ new SchemaAndValueField("value", Enum.builder("V1,V2")
+ .parameter(TestHelper.TYPE_NAME_PARAMETER_KEY, "TEST_TYPE")
+ .parameter(TestHelper.TYPE_LENGTH_PARAMETER_KEY, String.valueOf(Integer.MAX_VALUE))
+ .parameter(TestHelper.TYPE_SCALE_PARAMETER_KEY, "0")
+ .parameter(TestHelper.COLUMN_NAME_PARAMETER_KEY, "value")
+ .defaultValue("V1")
+ .build(), "V1"));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ private void testReceiveChangesForReplicaIdentityFullTableWithToastedValue(SchemaRefreshMode mode, boolean tablesBeforeStart)
+ throws Exception {
+ if (tablesBeforeStart) {
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_table;",
+ "CREATE TABLE test_table (id SERIAL, not_toast int, text TEXT);",
+ "ALTER TABLE test_table REPLICA IDENTITY FULL");
+
+ awaitTableMetaDataIsQueryable(new TableId(null, "public", "test_table"));
+ }
+
+ startConnector(config -> config.with(PostgresConnectorConfig.SCHEMA_REFRESH_MODE, mode), false);
+ assertConnectorIsRunning();
+ consumer = testConsumer(1);
+
+ final String toastedValue = RandomStringUtils.randomAlphanumeric(10000);
+
+ if (!tablesBeforeStart) {
+ waitForStreamingToStart();
+ TestHelper.execute(
+ "DROP TABLE IF EXISTS test_table;",
+ "CREATE TABLE test_table (id SERIAL, not_toast int, text TEXT);",
+ "ALTER TABLE test_table REPLICA IDENTITY FULL");
+
+ awaitTableMetaDataIsQueryable(new TableId(null, "public", "test_table"));
+
+ }
+
+ // INSERT
+ String statement = "INSERT INTO test_table (not_toast, text) VALUES (10,'" + toastedValue + "');";
+ assertInsert(
+ statement,
+ Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 1), // SERIAL is NOT NULL implicitly
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue)));
+
+ // UPDATE
+ consumer.expects(1);
+ executeAndWait("UPDATE test_table set not_toast = 20");
+ SourceRecord updatedRecord = consumer.remove();
+
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 10),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue)), updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue)), updatedRecord, Envelope.FieldName.AFTER);
+
+ // DELETE
+ consumer.expects(2);
+ executeAndWait("DELETE FROM test_table");
+ SourceRecord deletedRecord = consumer.remove();
+ SourceRecord tombstoneRecord = consumer.remove();
+ assertThat(tombstoneRecord.value()).isNull();
+ assertThat(tombstoneRecord.valueSchema()).isNull();
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 20),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, toastedValue)), deletedRecord, Envelope.FieldName.BEFORE);
+
+ // INSERT null
+ consumer.expects(1);
+ statement = "INSERT INTO test_table (not_toast, text) VALUES (100, null);";
+ assertInsert(
+ statement,
+ Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 2), // SERIAL is NOT NULL implicitly
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 100),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, null)));
+
+ // UPDATE null
+ consumer.expects(1);
+ executeAndWait("UPDATE test_table set not_toast = 200 WHERE id=2");
+ updatedRecord = consumer.remove();
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 2),
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 100),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, null)), updatedRecord, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 2),
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 200),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, null)), updatedRecord, Envelope.FieldName.AFTER);
+
+ // DELETE null
+ consumer.expects(2);
+ executeAndWait("DELETE FROM test_table WHERE id=2");
+ deletedRecord = consumer.remove();
+ tombstoneRecord = consumer.remove();
+ assertThat(tombstoneRecord.value()).isNull();
+ assertThat(tombstoneRecord.valueSchema()).isNull();
+ assertRecordSchemaAndValues(Arrays.asList(
+ new SchemaAndValueField("id", SchemaBuilder.int32().defaultValue(0).build(), 2),
+ new SchemaAndValueField("not_toast", SchemaBuilder.OPTIONAL_INT32_SCHEMA, 200),
+ new SchemaAndValueField("text", SchemaBuilder.OPTIONAL_STRING_SCHEMA, null)), deletedRecord, Envelope.FieldName.BEFORE);
+ }
+
+ /**
+ * It appears in some cases retrieving column metadata "too quickly" raises
+ * a PSQLException: ERROR: could not open relation with OID xyz.
+ * This causes intermittent failures during schema refresh.
+ * This is an attempt to avoid that situation by making sure the metadata can be retrieved
+ * before proceeding.
+ */
+ private void awaitTableMetaDataIsQueryable(TableId tableId) {
+ Awaitility.await()
+ .atMost(TestHelper.waitTimeForRecords() * 10, TimeUnit.SECONDS)
+ .ignoreException(PSQLException.class)
+ .until(() -> {
+ try (PostgresConnection connection = TestHelper.createWithTypeRegistry()) {
+ Tables tables = new Tables();
+ connection.readSchema(tables, null, "public", TableFilter.fromPredicate(t -> t.equals(tableId)), null, false);
+ return tables.forTable(tableId) != null;
+ }
+ });
+ }
+
+ @Ignore("YB Note: We do not populate length and scale")
+ @Test
+ @FixFor({ "DBZ-1916", "DBZ-1830" })
+ public void shouldPropagateSourceTypeByDatatype() throws Exception {
+ TestHelper.execute("DROP TABLE IF EXISTS test_table;");
+ TestHelper.execute("CREATE TABLE test_table (id SERIAL, c1 INT, c2 INT, c3a NUMERIC(5,2), c3b VARCHAR(128), f1 float(10), f2 decimal(8,4), primary key (id));");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with("datatype.propagate.source.type", ".+\\.NUMERIC,.+\\.VARCHAR,.+\\.FLOAT4"), false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO test_table (id,c1,c2,c3a,c3b,f1,f2) values (1, 123, 456, 789.01, 'test', 1.228, 234.56);");
+
+ final SourceRecord record = assertRecordInserted("public.test_table", "id", 1);
+ final Field before = record.valueSchema().field("before");
+
+ // no type info requested as per given data types
+ assertThat(before.schema().field("id").schema().parameters()).isNull();
+ assertThat(before.schema().field("c1").schema().parameters()).isNull();
+ assertThat(before.schema().field("c2").schema().parameters()).isNull();
+
+ assertThat(before.schema().field("c3a").schema().parameters()).contains(
+ entry(TYPE_NAME_PARAMETER_KEY, "NUMERIC"),
+ entry(TYPE_LENGTH_PARAMETER_KEY, "5"),
+ entry(TYPE_SCALE_PARAMETER_KEY, "2"));
+
+ // variable width, name and length info
+ assertThat(before.schema().field("c3b").schema().parameters()).contains(
+ entry(TYPE_NAME_PARAMETER_KEY, "VARCHAR"),
+ entry(TYPE_LENGTH_PARAMETER_KEY, "128"));
+
+ assertThat(before.schema().field("f2").schema().parameters()).contains(
+ entry(TYPE_NAME_PARAMETER_KEY, "NUMERIC"),
+ entry(TYPE_LENGTH_PARAMETER_KEY, "8"),
+ entry(TYPE_SCALE_PARAMETER_KEY, "4"));
+
+ assertThat(before.schema().field("f1").schema().parameters()).contains(
+ entry(TYPE_NAME_PARAMETER_KEY, "FLOAT4"),
+ entry(TYPE_LENGTH_PARAMETER_KEY, "8"),
+ entry(TYPE_SCALE_PARAMETER_KEY, "8"));
+ }
+
+ @Test
+ @FixFor({ "DBZ-3074" })
+ public void shouldMaintainPrimaryKeyOrderOnSchemaChange() throws Exception {
+ startConnector();
+ consumer = testConsumer(1);
+ executeAndWait("CREATE TABLE test_should_maintain_primary_key_order(b INTEGER, d INTEGER, c INTEGER, a INTEGER, val INTEGER, PRIMARY KEY (b, d, c, a));" +
+ "INSERT INTO test_should_maintain_primary_key_order VALUES (1, 2, 3, 4, 5);");
+
+ SourceRecord record = consumer.remove();
+ assertEquals(1, ((Struct) record.value()).getStruct("after").getStruct("b").getInt32("value").intValue());
+
+ List fields = record.keySchema().fields();
+ String[] expectedFieldOrder = new String[]{ "b", "d", "c", "a" };
+
+ for (int i = 0; i < fields.size(); i++) {
+ assertEquals("Key field names should in order", expectedFieldOrder[i], fields.get(i).name());
+ }
+
+ // Alter the table to trigger a schema change event. Validate that the new schema maintains the primary key order.
+ consumer.expects(1);
+ executeAndWait("ALTER TABLE test_should_maintain_primary_key_order ADD COLUMN val2 INTEGER;" +
+ "INSERT INTO test_should_maintain_primary_key_order VALUES (10, 11, 12, 13, 14, 15);");
+
+ record = consumer.remove();
+ assertEquals(10, ((Struct) record.value()).getStruct("after").getStruct("b").getInt32("value").intValue());
+
+ fields = record.keySchema().fields();
+ for (int i = 0; i < fields.size(); i++) {
+ assertEquals("Key field names should in order", expectedFieldOrder[i], fields.get(i).name());
+ }
+ }
+
+ @Ignore("Decimal handling mode precise unsupported")
+ @Test
+ @FixFor("DBZ-1931")
+ public void testStreamMoneyAsDefaultPrecise() throws Exception {
+ TestHelper.execute("CREATE TABLE salary (pk SERIAL, name VARCHAR(50), salary money, PRIMARY KEY(pk));");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.salary"),
+ false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO salary (name, salary) values ('Joe', 123.45);");
+
+ SourceRecord rec = assertRecordInserted("public.salary", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "salary");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("name", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "Joe"),
+ new SchemaAndValueField("salary", Decimal.builder(2).optional().build(), BigDecimal.valueOf(123.45)));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1931")
+ public void testStreamMoneyAsString() throws Exception {
+ TestHelper.execute("CREATE TABLE salary (pk SERIAL, name VARCHAR(50), salary money, PRIMARY KEY(pk));");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.STRING)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.salary"),
+ false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO salary (name, salary) values ('Joe', 123.45);");
+
+ SourceRecord rec = assertRecordInserted("public.salary", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "salary");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("name", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "Joe"),
+ new SchemaAndValueField("salary", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "123.45"));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-1931")
+ public void testStreamMoneyAsDouble() throws Exception {
+ TestHelper.execute("CREATE TABLE salary (pk SERIAL, name VARCHAR(50), salary money, PRIMARY KEY(pk));");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.DOUBLE)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.NEVER)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.salary"),
+ false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO salary (name, salary) values ('Joe', 123.45);");
+
+ SourceRecord rec = assertRecordInserted("public.salary", PK_FIELD, 1);
+ assertSourceInfo(rec, "yugabyte", "public", "salary");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("name", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "Joe"),
+ new SchemaAndValueField("salary", SchemaBuilder.OPTIONAL_FLOAT64_SCHEMA, 123.45));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Ignore("Decimal handling mode precise unsupported")
+ @Test
+ @FixFor("DBZ-1931")
+ public void testStreamMoneyPreciseDecimalFraction() throws Exception {
+ TestHelper.execute("CREATE TABLE salary (pk SERIAL, name VARCHAR(50), salary money, PRIMARY KEY(pk));");
+
+ startConnector(config -> config
+ .with(PostgresConnectorConfig.DECIMAL_HANDLING_MODE, DecimalHandlingMode.PRECISE)
+ .with(PostgresConnectorConfig.MONEY_FRACTION_DIGITS, 1)
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, SnapshotMode.INITIAL)
+ .with(PostgresConnectorConfig.TABLE_INCLUDE_LIST, "public.salary"),
+ false);
+
+ waitForStreamingToStart();
+
+ consumer = testConsumer(1);
+ executeAndWait("INSERT INTO salary (name, salary) values ('Joe', 123.4567);");
+
+ SourceRecord rec = assertRecordInserted("public.salary", PK_FIELD, 1);
+ assertSourceInfo(rec, "postgres", "public", "salary");
+
+ List expected = Arrays.asList(
+ new SchemaAndValueField("pk", SchemaBuilder.int32().defaultValue(0).build(), 1),
+ new SchemaAndValueField("name", SchemaBuilder.OPTIONAL_STRING_SCHEMA, "Joe"),
+ new SchemaAndValueField("salary", Decimal.builder(1).optional().build(), BigDecimal.valueOf(123.5)));
+
+ assertRecordSchemaAndValues(expected, rec, Envelope.FieldName.AFTER);
+ assertThat(consumer.isEmpty()).isTrue();
+ }
+
+ @Test
+ @FixFor("DBZ-6648")
+ public void shouldHandleNonNullIntervalFiledDelete() throws Exception {
+ TestHelper.execute("CREATE TABLE test_interval (pk SERIAL, i interval NOT NULL, PRIMARY KEY(pk));");
+ // add a new entry and remove both
+ String statements = "INSERT INTO test_interval (pk, i) VALUES (1, '2 Months 3 Days');" +
+ "DELETE FROM test_interval WHERE pk = 1;";
+
+ startConnector(config -> config.with(PostgresConnectorConfig.INTERVAL_HANDLING_MODE, IntervalHandlingMode.STRING));
+ waitForStreamingToStart();
+
+ consumer = testConsumer(3);
+ executeAndWait(statements);
+
+ String topicPrefix = "public.test_interval";
+ String topicName = topicName(topicPrefix);
+ assertRecordInserted(topicPrefix, PK_FIELD, 1);
+
+ // entry removed
+ SourceRecord record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidDelete(record, PK_FIELD, 1);
+
+ // followed by a tombstone
+ record = consumer.remove();
+ assertEquals(topicName, record.topic());
+ YBVerifyRecord.isValidTombstone(record, PK_FIELD, 1);
+ }
+
+ private void assertHeartBeatRecord(SourceRecord heartbeat) {
+ assertEquals("__debezium-heartbeat." + TestHelper.TEST_SERVER, heartbeat.topic());
+
+ Struct key = (Struct) heartbeat.key();
+ assertThat(key.get("serverName")).isEqualTo(TestHelper.TEST_SERVER);
+
+ Struct value = (Struct) heartbeat.value();
+ assertThat(value.getInt64("ts_ms")).isLessThanOrEqualTo(Instant.now().toEpochMilli());
+ }
+
+ private void waitForSeveralHeartbeats() {
+ final AtomicInteger heartbeatCount = new AtomicInteger();
+ Awaitility.await().atMost(10, TimeUnit.SECONDS).until(() -> {
+ final SourceRecord record = consumeRecord();
+ if (record != null) {
+ if (record.topic().equalsIgnoreCase("__debezium-heartbeat.test_server")) {
+ assertHeartBeatRecord(record);
+ heartbeatCount.incrementAndGet();
+ }
+ }
+ return heartbeatCount.get() > 10;
+ });
+ }
+
+ private String getReplicationSlotChangesQuery() {
+ switch (TestHelper.decoderPlugin()) {
+ case DECODERBUFS:
+ return "SELECT pg_logical_slot_get_binary_changes('" + ReplicationConnection.Builder.DEFAULT_SLOT_NAME + "', " +
+ "NULL, NULL)";
+ case PGOUTPUT:
+ return "SELECT pg_logical_slot_get_binary_changes('" + ReplicationConnection.Builder.DEFAULT_SLOT_NAME + "', " +
+ "NULL, NULL, 'proto_version', '1', 'publication_names', '" + ReplicationConnection.Builder.DEFAULT_PUBLICATION_NAME + "')";
+ }
+ throw new UnsupportedOperationException("Test must be updated for new logical decoder type.");
+ }
+
+ private void assertInsert(String statement, List expectedSchemaAndValuesByColumn) {
+ assertInsert(statement, null, expectedSchemaAndValuesByColumn);
+ }
+
+ private void assertInsert(String statement, Integer pk, List expectedSchemaAndValuesByColumn) {
+ TableId table = tableIdFromInsertStmt(statement);
+ String expectedTopicName = table.schema() + "." + table.table();
+ expectedTopicName = expectedTopicName.replaceAll("[ \"]", "_");
+
+ try {
+ executeAndWait(statement);
+ SourceRecord record = assertRecordInserted(expectedTopicName, pk != null ? PK_FIELD : null, pk);
+ assertRecordOffsetAndSnapshotSource(record, SnapshotRecord.FALSE);
+ assertSourceInfo(record, "yugabyte", table.schema(), table.table());
+ assertRecordSchemaAndValues(expectedSchemaAndValuesByColumn, record, Envelope.FieldName.AFTER);
+ }
+ catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void assertDelete(String statement, Integer pk,
+ List expectedSchemaAndValuesByColumn) {
+ TableId table = tableIdFromDeleteStmt(statement);
+ String expectedTopicName = table.schema() + "." + table.table();
+ expectedTopicName = expectedTopicName.replaceAll("[ \"]", "_");
+
+ try {
+ executeAndWait(statement);
+ SourceRecord record = assertRecordDeleted(expectedTopicName, pk != null ? PK_FIELD : null, pk);
+ assertRecordOffsetAndSnapshotSource(record, SnapshotRecord.FALSE);
+ assertSourceInfo(record, "postgres", table.schema(), table.table());
+ assertRecordSchemaAndValues(expectedSchemaAndValuesByColumn, record, Envelope.FieldName.BEFORE);
+ assertRecordSchemaAndValues(null, record, Envelope.FieldName.AFTER);
+ }
+ catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ }
+
+ private SourceRecord assertRecordInserted(SourceRecord insertedRecord, String expectedTopicName, String pkColumn, Integer pk) throws InterruptedException {
+ assertEquals(topicName(expectedTopicName), insertedRecord.topic());
+
+ if (pk != null) {
+ YBVerifyRecord.isValidInsert(insertedRecord, pkColumn, pk);
+ }
+ else {
+ YBVerifyRecord.isValidInsert(insertedRecord);
+ }
+
+ return insertedRecord;
+ }
+
+ private SourceRecord assertRecordDeleted(String expectedTopicName, String pkColumn, Integer pk) throws InterruptedException {
+ assertFalse("records not generated", consumer.isEmpty());
+ SourceRecord deletedRecord = consumer.remove();
+
+ return assertRecordDeleted(deletedRecord, expectedTopicName, pkColumn, pk);
+ }
+
+ private SourceRecord assertRecordDeleted(SourceRecord deletedRecord, String expectedTopicName, String pkColumn, Integer pk) throws InterruptedException {
+ assertEquals(topicName(expectedTopicName), deletedRecord.topic());
+
+ if (pk != null) {
+ YBVerifyRecord.isValidDelete(deletedRecord, pkColumn, pk);
+ }
+ else {
+ YBVerifyRecord.isValidDelete(deletedRecord);
+ }
+
+ return deletedRecord;
+ }
+
+ private SourceRecord assertRecordInserted(String expectedTopicName, String pkColumn, Integer pk) throws InterruptedException {
+ assertFalse("records not generated", consumer.isEmpty());
+ SourceRecord insertedRecord = consumer.remove();
+
+ return assertRecordInserted(insertedRecord, expectedTopicName, pkColumn, pk);
+ }
+
+ private void executeAndWait(String statements) throws Exception {
+ TestHelper.execute(statements);
+ consumer.await(TestHelper.waitTimeForRecords() * 30, TimeUnit.SECONDS);
+ }
+
+ private void executeAndWaitForNoRecords(String statements) throws Exception {
+ TestHelper.execute(statements);
+ consumer.await(5, TimeUnit.SECONDS);
+ }
+
+ @Override
+ protected Consumer getConsumer(Predicate isStopRecord, Consumer recordArrivedListener, boolean ignoreRecordsAfterStop) {
+ return (record) -> {
+ // YB Note: Do not consume heartbeat record.
+ if (record.topic().equals(TestHelper.getDefaultHeartbeatTopic())) {
+ return;
+ }
+
+ if (isStopRecord != null && isStopRecord.test(record)) {
+ logger.error("Stopping connector after record as requested");
+ throw new ConnectException("Stopping connector after record as requested");
+ }
+ // Test stopped the connector, remaining records are ignored
+ if (ignoreRecordsAfterStop && (!isEngineRunning.get() || Thread.currentThread().isInterrupted())) {
+ return;
+ }
+ while (!consumedLines.offer(record)) {
+ if (ignoreRecordsAfterStop && (!isEngineRunning.get() || Thread.currentThread().isInterrupted())) {
+ return;
+ }
+ }
+ recordArrivedListener.accept(record);
+ };
+ }
+}
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YBVerifyRecord.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YBVerifyRecord.java
new file mode 100644
index 00000000000..03e30331584
--- /dev/null
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YBVerifyRecord.java
@@ -0,0 +1,40 @@
+package io.debezium.connector.postgresql;
+
+import io.debezium.data.Envelope;
+import io.debezium.data.VerifyRecord;
+import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.source.SourceRecord;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+public class YBVerifyRecord extends VerifyRecord {
+ public static void hasValidKey(SourceRecord record, String pkField, int pk) {
+ Struct key = (Struct) record.key();
+ assertThat(key.getStruct(pkField).get("value")).isEqualTo(pk);
+ }
+
+ public static void isValidRead(SourceRecord record, String pkField, int pk) {
+ hasValidKey(record, pkField, pk);
+ isValidRead(record);
+ }
+
+ public static void isValidInsert(SourceRecord record, String pkField, int pk) {
+ hasValidKey(record, pkField, pk);
+ isValidInsert(record, true);
+ }
+
+ public static void isValidUpdate(SourceRecord record, String pkField, int pk) {
+ hasValidKey(record, pkField, pk);
+ isValidUpdate(record, true);
+ }
+
+ public static void isValidDelete(SourceRecord record, String pkField, int pk) {
+ hasValidKey(record, pkField, pk);
+ isValidDelete(record, true);
+ }
+
+ public static void isValidTombstone(SourceRecord record, String pkField, int pk) {
+ hasValidKey(record, pkField, pk);
+ isValidTombstone(record);
+ }
+}
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YugabyteReplicaIdentityIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YugabyteReplicaIdentityIT.java
new file mode 100644
index 00000000000..8bd777c81ed
--- /dev/null
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/YugabyteReplicaIdentityIT.java
@@ -0,0 +1,449 @@
+package io.debezium.connector.postgresql;
+
+import io.debezium.config.Configuration;
+import io.debezium.data.Envelope;
+import io.debezium.data.VerifyRecord;
+import io.debezium.embedded.AbstractConnectorTest;
+import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.source.SourceRecord;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.SQLException;
+import java.time.Duration;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import static io.debezium.connector.postgresql.TestHelper.PK_FIELD;
+import static io.debezium.connector.postgresql.TestHelper.topicName;
+import static org.assertj.core.api.Assertions.assertThat;
+
+/**
+ * Tests to validate the functionality of replica identities with YugabyteDB.
+ *
+ * @author Vaibhav Kushwaha (vkushwaha@yugabyte.com)
+ */
+public class YugabyteReplicaIdentityIT extends AbstractConnectorTest {
+ private static final Logger LOGGER = LoggerFactory.getLogger(YugabyteReplicaIdentityIT.class);
+
+ private static final String CREATE_TABLES_STMT = "DROP SCHEMA IF EXISTS s1 CASCADE;" +
+ "DROP SCHEMA IF EXISTS s2 CASCADE;" +
+ "CREATE SCHEMA s1; " +
+ "CREATE SCHEMA s2; " +
+ "CREATE TABLE s1.a (pk SERIAL, aa integer, PRIMARY KEY(pk));" +
+ "CREATE TABLE s2.a (pk SERIAL, aa integer, bb varchar(20), PRIMARY KEY(pk));";
+
+ private static final String INSERT_STMT = "INSERT INTO s1.a (aa) VALUES (1);" +
+ "INSERT INTO s2.a (aa) VALUES (1);";
+
+ private YugabyteDBConnector connector;
+
+ @BeforeClass
+ public static void beforeClass() throws SQLException {
+ TestHelper.dropAllSchemas();
+ }
+
+ @Before
+ public void before() {
+ initializeConnectorTestFramework();
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.execute(CREATE_TABLES_STMT);
+ }
+
+ @After
+ public void after() {
+ stopConnector();
+ TestHelper.dropDefaultReplicationSlot();
+ TestHelper.dropPublication();
+ }
+
+ @Test
+ public void oldValuesWithReplicaIdentityFullForPgOutput() throws Exception {
+ shouldProduceOldValuesWithReplicaIdentityFull(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ @Test
+ public void oldValuesWithReplicaIdentityFullForYbOutput() throws Exception {
+ shouldProduceOldValuesWithReplicaIdentityFull(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ public void shouldProduceOldValuesWithReplicaIdentityFull(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ TestHelper.execute("ALTER TABLE s1.a REPLICA IDENTITY FULL;");
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY FULL;");
+
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute(INSERT_STMT);
+ TestHelper.execute("UPDATE s1.a SET aa = 12345 WHERE pk = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(3);
+ List records = actualRecords.recordsForTopic(topicName("s1.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord updateRecord = records.get(1);
+
+ if (logicalDecoder.isYBOutput()) {
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidUpdate(updateRecord, PK_FIELD, 1);
+ } else {
+ VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ VerifyRecord.isValidUpdate(updateRecord, PK_FIELD, 1);
+ }
+
+ Struct updateRecordValue = (Struct) updateRecord.value();
+ assertThat(updateRecordValue.get(Envelope.FieldName.AFTER)).isNotNull();
+ assertThat(updateRecordValue.get(Envelope.FieldName.BEFORE)).isNotNull();
+
+ if (logicalDecoder.isYBOutput()) {
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("aa").getInt32("value")).isEqualTo(1);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("aa").getInt32("value")).isEqualTo(12345);
+ } else {
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.BEFORE).get("aa")).isEqualTo(1);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).get("aa")).isEqualTo(12345);
+ }
+ }
+
+ @Test
+ public void replicaIdentityDefaultWithPgOutput() throws Exception {
+ shouldProduceExpectedValuesWithReplicaIdentityDefault(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ @Test
+ public void replicaIdentityDefaultWithYbOutput() throws Exception {
+ shouldProduceExpectedValuesWithReplicaIdentityDefault(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ public void shouldProduceExpectedValuesWithReplicaIdentityDefault(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ TestHelper.execute("ALTER TABLE s1.a REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY DEFAULT;");
+
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute("INSERT INTO s2.a VALUES (1, 22, 'random text value');");
+ TestHelper.execute("UPDATE s2.a SET aa = 12345 WHERE pk = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(2);
+ List records = actualRecords.recordsForTopic(topicName("s2.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord updateRecord = records.get(1);
+
+ if (logicalDecoder.isYBOutput()) {
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidUpdate(updateRecord, PK_FIELD, 1);
+ } else {
+ VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ VerifyRecord.isValidUpdate(updateRecord, PK_FIELD, 1);
+ }
+
+ Struct updateRecordValue = (Struct) updateRecord.value();
+ assertThat(updateRecordValue.get(Envelope.FieldName.AFTER)).isNotNull();
+ assertThat(updateRecordValue.get(Envelope.FieldName.BEFORE)).isNull();
+
+ // After field will have entries for all the columns.
+ if (logicalDecoder.isYBOutput()) {
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("pk").getInt32("value")).isEqualTo(1);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("aa").getInt32("value")).isEqualTo(12345);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("bb").getString("value")).isEqualTo("random text value");
+ } else {
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).get("pk")).isEqualTo(1);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).get("aa")).isEqualTo(12345);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).get("bb")).isEqualTo("random text value");
+ }
+ }
+
+ @Test
+ public void shouldProduceEventsWithValuesForChangedColumnWithReplicaIdentityChange() throws Exception {
+ // YB Note: Note that even if we do not alter, the default replica identity on service is CHANGE.
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY CHANGE;");
+
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 3 new records
+ TestHelper.execute("INSERT INTO s2.a VALUES (1, 22, 'random text value');");
+ TestHelper.execute("UPDATE s2.a SET aa = 12345 WHERE pk = 1;");
+ TestHelper.execute("UPDATE s2.a SET aa = null WHERE pk = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(3);
+ List records = actualRecords.recordsForTopic(topicName("s2.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord updateRecord = records.get(1);
+ SourceRecord updateRecordWithNullCol = records.get(2);
+
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidUpdate(updateRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidUpdate(updateRecordWithNullCol, PK_FIELD, 1);
+
+ Struct updateRecordValue = (Struct) updateRecord.value();
+ assertThat(updateRecordValue.get(Envelope.FieldName.AFTER)).isNotNull();
+ assertThat(updateRecordValue.get(Envelope.FieldName.BEFORE)).isNull();
+
+ // After field will have entries for all the changed columns.
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("pk").getInt32("value")).isEqualTo(1);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("aa").getInt32("value")).isEqualTo(12345);
+ assertThat(updateRecordValue.getStruct(Envelope.FieldName.AFTER).getStruct("bb")).isNull();
+
+ // After field will have a null value in place of the column explicitly set as null.
+ Struct updateRecordWithNullColValue = (Struct) updateRecordWithNullCol.value();
+ assertThat(updateRecordWithNullColValue.getStruct(Envelope.FieldName.AFTER).getStruct("pk").getInt32("value")).isEqualTo(1);
+ assertThat(updateRecordWithNullColValue.getStruct(Envelope.FieldName.AFTER).getStruct("aa").getInt32("value")).isNull();
+ assertThat(updateRecordWithNullColValue.getStruct(Envelope.FieldName.AFTER).getStruct("bb")).isNull();
+ }
+
+ @Test
+ public void shouldThrowExceptionWithReplicaIdentityNothingOnUpdatesAndDeletes() throws Exception {
+ /*
+ According to Postgres docs:
+ If a table without a replica identity is added to a publication that replicates
+ UPDATE or DELETE operations then subsequent UPDATE or DELETE operations will cause
+ an error on the publisher.
+
+ Details: https://www.postgresql.org/docs/current/logical-replication-publication.html
+ */
+ TestHelper.execute("ALTER TABLE s1.a REPLICA IDENTITY NOTHING;");
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY NOTHING;");
+
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute("INSERT INTO s2.a VALUES (1, 22, 'random text value');");
+
+ try {
+ TestHelper.execute("UPDATE s2.a SET aa = 12345 WHERE pk = 1;");
+ } catch (Exception sqle) {
+ assertThat(sqle.getMessage()).contains("ERROR: cannot update table \"a\" because it does "
+ + "not have a replica identity and publishes updates");
+ }
+
+ try {
+ TestHelper.execute("DELETE FROM s2.a WHERE pk = 1;");
+ } catch (Exception sqle) {
+ assertThat(sqle.getMessage()).contains("ERROR: cannot delete from table \"a\" because it "
+ + "does not have a replica identity and publishes deletes");
+ }
+ }
+
+ @Test
+ public void beforeImageForDeleteWithReplicaIdentityFullAndPgOutput() throws Exception {
+ shouldHaveBeforeImageForDeletesForReplicaIdentityFull(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ @Test
+ public void beforeImageForDeleteWithReplicaIdentityFullAndYbOutput() throws Exception {
+ shouldHaveBeforeImageForDeletesForReplicaIdentityFull(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ public void shouldHaveBeforeImageForDeletesForReplicaIdentityFull(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ TestHelper.execute("ALTER TABLE s1.a REPLICA IDENTITY FULL;");
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY FULL;");
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute("INSERT INTO s2.a VALUES (1, 22, 'random text value');");
+ TestHelper.execute("DELETE FROM s2.a WHERE pk = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(2);
+ List records = actualRecords.recordsForTopic(topicName("s2.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord deleteRecord = records.get(1);
+
+ if (logicalDecoder.isYBOutput()) {
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+ } else {
+ VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ VerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+ }
+
+ Struct deleteRecordValue = (Struct) deleteRecord.value();
+ assertThat(deleteRecordValue.get(Envelope.FieldName.AFTER)).isNull();
+ assertThat(deleteRecordValue.get(Envelope.FieldName.BEFORE)).isNotNull();
+
+ // Before field will have entries for all the columns.
+ if (logicalDecoder.isYBOutput()) {
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("pk").getInt32("value")).isEqualTo(1);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("aa").getInt32("value")).isEqualTo(22);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("bb").getString("value")).isEqualTo("random text value");
+ } else {
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).get("pk")).isEqualTo(1);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).get("aa")).isEqualTo(22);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).get("bb")).isEqualTo("random text value");
+ }
+ }
+
+ @Test
+ public void beforeImageForDeleteWithReplicaIdentityDefaultAndPgOutput() throws Exception {
+ shouldHaveBeforeImageForDeletesForReplicaIdentityDefault(PostgresConnectorConfig.LogicalDecoder.PGOUTPUT);
+ }
+
+ @Test
+ public void beforeImageForDeleteWithReplicaIdentityDefaultAndYbOutput() throws Exception {
+ shouldHaveBeforeImageForDeletesForReplicaIdentityDefault(PostgresConnectorConfig.LogicalDecoder.YBOUTPUT);
+ }
+
+ public void shouldHaveBeforeImageForDeletesForReplicaIdentityDefault(PostgresConnectorConfig.LogicalDecoder logicalDecoder) throws Exception {
+ TestHelper.execute("ALTER TABLE s1.a REPLICA IDENTITY DEFAULT;");
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY DEFAULT;");
+
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.PLUGIN_NAME, logicalDecoder.getPostgresPluginName())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute("INSERT INTO s2.a VALUES (1, 22, 'random text value');");
+ TestHelper.execute("DELETE FROM s2.a WHERE pk = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(2);
+ List records = actualRecords.recordsForTopic(topicName("s2.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord deleteRecord = records.get(1);
+
+ if (logicalDecoder.isYBOutput()) {
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+ } else {
+ VerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ VerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+ }
+
+ Struct deleteRecordValue = (Struct) deleteRecord.value();
+ assertThat(deleteRecordValue.get(Envelope.FieldName.AFTER)).isNull();
+ assertThat(deleteRecordValue.get(Envelope.FieldName.BEFORE)).isNotNull();
+
+ // Before field will have entries only for the primary key columns.
+ if (logicalDecoder.isYBOutput()) {
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("pk").getInt32("value")).isEqualTo(1);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("aa").getInt32("value")).isNull();
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("bb").getString("value")).isNull();
+ } else {
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).get("pk")).isEqualTo(1);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).get("aa")).isNull();
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).get("bb")).isNull();
+ }
+ }
+
+ @Test
+ public void shouldHaveBeforeImageForDeletesForReplicaIdentityChange() throws Exception {
+ // YB Note: Note that even if we do not alter, the default replica identity on service is CHANGE.
+ TestHelper.execute("ALTER TABLE s2.a REPLICA IDENTITY CHANGE;");
+
+ Configuration config = TestHelper.defaultConfig()
+ .with(PostgresConnectorConfig.SNAPSHOT_MODE, PostgresConnectorConfig.SnapshotMode.NEVER.getValue())
+ .with(PostgresConnectorConfig.DROP_SLOT_ON_STOP, Boolean.TRUE)
+ .build();
+ start(YugabyteDBConnector.class, config);
+ assertConnectorIsRunning();
+
+ // YB Note: Added a wait for replication slot to be active.
+ TestHelper.waitFor(Duration.ofSeconds(10));
+
+ waitForAvailableRecords(10_000, TimeUnit.MILLISECONDS);
+ // there shouldn't be any snapshot records
+ assertNoRecordsToConsume();
+
+ // insert and verify 2 new records
+ TestHelper.execute("INSERT INTO s2.a VALUES (1, 22, 'random text value');");
+ TestHelper.execute("DELETE FROM s2.a WHERE pk = 1;");
+
+ SourceRecords actualRecords = consumeRecordsByTopic(2);
+ List records = actualRecords.recordsForTopic(topicName("s2.a"));
+
+ SourceRecord insertRecord = records.get(0);
+ SourceRecord deleteRecord = records.get(1);
+
+ YBVerifyRecord.isValidInsert(insertRecord, PK_FIELD, 1);
+ YBVerifyRecord.isValidDelete(deleteRecord, PK_FIELD, 1);
+
+ Struct deleteRecordValue = (Struct) deleteRecord.value();
+ assertThat(deleteRecordValue.get(Envelope.FieldName.AFTER)).isNull();
+ assertThat(deleteRecordValue.get(Envelope.FieldName.BEFORE)).isNotNull();
+
+ // Before field will have entries only for the primary key columns.
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("pk").getInt32("value")).isEqualTo(1);
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("aa").getInt32("value")).isNull();
+ assertThat(deleteRecordValue.getStruct(Envelope.FieldName.BEFORE).getStruct("bb").getString("value")).isNull();
+ }
+}
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/connection/PostgresConnectionIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/connection/PostgresConnectionIT.java
index aad869a86eb..e4c9e10ebc3 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/connection/PostgresConnectionIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/connection/PostgresConnectionIT.java
@@ -23,7 +23,7 @@
import org.junit.After;
import org.junit.Ignore;
import org.junit.Test;
-import org.postgresql.jdbc.PgConnection;
+import com.yugabyte.jdbc.PgConnection;
import io.debezium.connector.postgresql.TestHelper;
import io.debezium.doc.FixFor;
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIs.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIs.java
index 888bead0a56..2613beeab0d 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIs.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIs.java
@@ -36,6 +36,12 @@ boolean isEqualTo(String pluginName) {
boolean isEqualTo(String pluginName) {
return pluginName.equals("pgoutput");
}
+ },
+ YBOUTPUT {
+ @Override
+ boolean isEqualTo(String pluginName) {
+ return pluginName.equals("yboutput");
+ }
};
abstract boolean isEqualTo(String pluginName);
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIsNot.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIsNot.java
index 7dda3b61371..1ac27cc7b9e 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIsNot.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/junit/SkipWhenDecoderPluginNameIsNot.java
@@ -32,9 +32,16 @@ boolean isNotEqualTo(String pluginName) {
}
},
PGOUTPUT {
+ @Override
+ boolean isNotEqualTo(String pluginName) {;
+ // YB Note: Making a change here so that the tests verifying the pgoutput
+ return !pluginName.equals("yboutput") || !pluginName.equals("pgoutput");
+ }
+ },
+ YBOUTPUT {
@Override
boolean isNotEqualTo(String pluginName) {
- return !pluginName.equals("pgoutput");
+ return !pluginName.equals("yboutput");
}
};
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceIT.java
index 5b0f17e4520..22d8b089216 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceIT.java
@@ -21,7 +21,7 @@
import org.junit.Test;
import io.debezium.connector.postgresql.Module;
-import io.debezium.connector.postgresql.PostgresConnector;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import io.debezium.connector.postgresql.PostgresConnectorConfig;
import io.debezium.testing.testcontainers.Connector;
import io.debezium.testing.testcontainers.ConnectorConfiguration;
@@ -84,7 +84,7 @@ public void testInvalidHostnameConnection() {
public void testInvalidConnection() {
given()
.port(RestExtensionTestInfrastructure.getDebeziumContainer().getFirstMappedPort())
- .when().contentType(ContentType.JSON).accept(ContentType.JSON).body("{\"connector.class\": \"" + PostgresConnector.class.getName() + "\"}")
+ .when().contentType(ContentType.JSON).accept(ContentType.JSON).body("{\"connector.class\": \"" + YugabyteDBConnector.class.getName() + "\"}")
.put(DebeziumPostgresConnectorResource.BASE_PATH + DebeziumPostgresConnectorResource.VALIDATE_CONNECTION_ENDPOINT)
.then().log().all()
.statusCode(200)
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceNoDatabaseIT.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceNoDatabaseIT.java
index c9d7a8bc7c6..3f23e69b652 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceNoDatabaseIT.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/rest/DebeziumPostgresConnectorResourceNoDatabaseIT.java
@@ -17,7 +17,7 @@
import org.junit.Test;
import io.debezium.connector.postgresql.Module;
-import io.debezium.connector.postgresql.PostgresConnector;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import io.debezium.testing.testcontainers.testhelper.RestExtensionTestInfrastructure;
public class DebeziumPostgresConnectorResourceNoDatabaseIT {
@@ -65,7 +65,7 @@ public void testSchemaEndpoint() {
.body("properties.isEmpty()", is(false))
.body("x-connector-id", is("postgres"))
.body("x-version", is(Module.version()))
- .body("x-className", is(PostgresConnector.class.getName()))
+ .body("x-className", is(YugabyteDBConnector.class.getName()))
.body("properties", hasKey("topic.prefix"))
.body("properties", hasKey("plugin.name"))
.body("properties", hasKey("slot.name"))
diff --git a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/transforms/timescaledb/TimescaleDbDatabaseTest.java b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/transforms/timescaledb/TimescaleDbDatabaseTest.java
index 4fa989fcdff..5c65479c875 100644
--- a/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/transforms/timescaledb/TimescaleDbDatabaseTest.java
+++ b/debezium-connector-postgres/src/test/java/io/debezium/connector/postgresql/transforms/timescaledb/TimescaleDbDatabaseTest.java
@@ -9,6 +9,7 @@
import java.sql.SQLException;
+import io.debezium.connector.postgresql.YugabyteDBConnector;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
@@ -19,7 +20,6 @@
import org.testcontainers.lifecycle.Startables;
import io.debezium.config.Configuration;
-import io.debezium.connector.postgresql.PostgresConnector;
import io.debezium.connector.postgresql.PostgresConnectorConfig;
import io.debezium.connector.postgresql.PostgresConnectorConfig.SnapshotMode;
import io.debezium.connector.postgresql.TestHelper;
@@ -100,7 +100,7 @@ protected void insertData() throws SQLException {
public void shouldTransformChunks() throws Exception {
// Testing.Print.enable();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
insertData();
@@ -118,7 +118,7 @@ public void shouldTransformChunks() throws Exception {
public void shouldTransformAggregates() throws Exception {
// Testing.Print.enable();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
insertData();
@@ -146,9 +146,9 @@ public void shouldTransformAggregates() throws Exception {
@Test
public void shouldTransformCompressedChunks() throws Exception {
- Testing.Print.enable();
+// Testing.Print.enable();
- start(PostgresConnector.class, config);
+ start(YugabyteDBConnector.class, config);
waitForStreamingRunning("postgres", TestHelper.TEST_SERVER);
insertData();
diff --git a/debezium-connector-postgres/src/test/resources/init_database.ddl b/debezium-connector-postgres/src/test/resources/init_database.ddl
index c41f73521c2..fdde0e4ce8f 100644
--- a/debezium-connector-postgres/src/test/resources/init_database.ddl
+++ b/debezium-connector-postgres/src/test/resources/init_database.ddl
@@ -5,7 +5,7 @@ DROP SCHEMA IF EXISTS public CASCADE;
CREATE SCHEMA public;
-- load contrib extensions for testing non-builtin types
-CREATE EXTENSION IF NOT EXISTS ltree SCHEMA public;
-CREATE EXTENSION IF NOT EXISTS isn SCHEMA public;
-CREATE EXTENSION IF NOT EXISTS citext SCHEMA public;
-CREATE EXTENSION IF NOT EXISTS hstore SCHEMA public;
+-- CREATE EXTENSION IF NOT EXISTS ltree SCHEMA public;
+-- CREATE EXTENSION IF NOT EXISTS isn SCHEMA public;
+-- CREATE EXTENSION IF NOT EXISTS citext SCHEMA public;
+-- CREATE EXTENSION IF NOT EXISTS hstore SCHEMA public;
diff --git a/debezium-connector-postgres/src/test/resources/postgres_create_role_specific_tables.ddl b/debezium-connector-postgres/src/test/resources/postgres_create_role_specific_tables.ddl
index d5c9d0d62e0..36df28962a0 100644
--- a/debezium-connector-postgres/src/test/resources/postgres_create_role_specific_tables.ddl
+++ b/debezium-connector-postgres/src/test/resources/postgres_create_role_specific_tables.ddl
@@ -11,7 +11,7 @@ BEGIN
SELECT FROM pg_catalog.pg_roles
WHERE rolname = 'role_1') THEN
- REASSIGN OWNED BY role_1 TO postgres;
+ REASSIGN OWNED BY role_1 TO yugabyte;
DROP OWNED BY role_1;
DROP ROLE IF EXISTS role_1;
END IF;
@@ -27,7 +27,7 @@ BEGIN
SELECT FROM pg_catalog.pg_roles
WHERE rolname = 'role_2') THEN
- REASSIGN OWNED BY role_2 TO postgres;
+ REASSIGN OWNED BY role_2 TO yugabyte;
DROP OWNED BY role_2;
DROP ROLE IF EXISTS role_2;
END IF;
@@ -43,12 +43,12 @@ CREATE SCHEMA s2;
CREATE ROLE role_1;
GRANT ALL ON SCHEMA s1 TO role_1;
GRANT ALL ON SCHEMA s2 TO role_1;
-GRANT CREATE ON DATABASE postgres TO role_1;
+GRANT CREATE ON DATABASE yugabyte TO role_1;
CREATE ROLE role_2 WITH REPLICATION LOGIN PASSWORD 'role_2_pass';
GRANT ALL ON SCHEMA s1 TO role_2;
GRANT ALL ON SCHEMA s2 TO role_2;
-GRANT CONNECT ON DATABASE postgres TO role_2;
+GRANT CONNECT ON DATABASE yugabyte TO role_2;
-- Create tables using r1
SET ROLE role_1;
diff --git a/debezium-connector-postgres/src/test/resources/replication_role_user.ddl b/debezium-connector-postgres/src/test/resources/replication_role_user.ddl
new file mode 100644
index 00000000000..1b69a59367b
--- /dev/null
+++ b/debezium-connector-postgres/src/test/resources/replication_role_user.ddl
@@ -0,0 +1,20 @@
+DROP ROLE IF EXISTS ybpgconn;
+
+CREATE ROLE ybpgconn WITH LOGIN REPLICATION;
+CREATE SCHEMA ybpgconn AUTHORIZATION ybpgconn;
+
+GRANT CREATE ON DATABASE yugabyte TO ybpgconn;
+
+BEGIN;
+ CREATE OR REPLACE PROCEDURE ybpgconn.set_yb_read_time(value TEXT)
+ LANGUAGE plpgsql
+ AS $$
+ BEGIN
+ EXECUTE 'SET LOCAL yb_read_time = ' || quote_literal(value);
+ END;
+ $$
+ SECURITY DEFINER;
+
+ REVOKE EXECUTE ON PROCEDURE ybpgconn.set_yb_read_time FROM PUBLIC;
+ GRANT EXECUTE ON PROCEDURE ybpgconn.set_yb_read_time TO ybpgconn;
+COMMIT;
diff --git a/debezium-core/src/main/java/io/debezium/metadata/ConnectorDescriptor.java b/debezium-core/src/main/java/io/debezium/metadata/ConnectorDescriptor.java
index ce8fdcbf8f8..1b3128283a9 100644
--- a/debezium-core/src/main/java/io/debezium/metadata/ConnectorDescriptor.java
+++ b/debezium-core/src/main/java/io/debezium/metadata/ConnectorDescriptor.java
@@ -50,6 +50,7 @@ public static String getIdForConnectorClass(String className) {
case "io.debezium.connector.oracle.OracleConnector":
return "oracle";
case "io.debezium.connector.postgresql.PostgresConnector":
+ case "io.debezium.connector.postgresql.YugabyteDBConnector":
return "postgres";
case "io.debezium.connector.sqlserver.SqlServerConnector":
return "sqlserver";
@@ -70,6 +71,8 @@ public static String getDisplayNameForConnectorClass(String className) {
return "Debezium Oracle Connector";
case "io.debezium.connector.postgresql.PostgresConnector":
return "Debezium PostgreSQL Connector";
+ case "io.debezium.connector.postgresql.YugabyteDBConnector":
+ return "Debezium YugabyteDB Connector";
case "io.debezium.connector.sqlserver.SqlServerConnector":
return "Debezium SQLServer Connector";
case "io.debezium.connector.mariadb.MariaDbConnector":
diff --git a/jenkins-jobs/scripts/config/Aliases.txt b/jenkins-jobs/scripts/config/Aliases.txt
index 71af1c44097..8b190bcfa7a 100644
--- a/jenkins-jobs/scripts/config/Aliases.txt
+++ b/jenkins-jobs/scripts/config/Aliases.txt
@@ -279,4 +279,4 @@ TimoWilhelm,Timo Wilhelm
ashishbinu,Ashish Binu
wltmlx,Lukas Langegger
GitHubSergei,Sergey Kazakov
-shaer,Mohamed El Shaer
\ No newline at end of file
+shaer,Mohamed El Shaer
diff --git a/log4j.properties b/log4j.properties
new file mode 100644
index 00000000000..b0bbe4d9855
--- /dev/null
+++ b/log4j.properties
@@ -0,0 +1,25 @@
+kafka.logs.dir=logs
+
+log4j.rootLogger=INFO, stdout, appender
+
+# Disable excessive reflection warnings - KAFKA-5229
+log4j.logger.org.reflections=ERROR
+
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.threshold=INFO
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n
+
+
+log4j.appender.appender=org.apache.log4j.RollingFileAppender
+log4j.appender.appender.policy.type=SizeBasedTriggeringPolicy
+log4j.appender.appender.policy.size=100MB
+log4j.appender.appender.strategy.type=DefaultRolloverStrategy
+log4j.appender.appender.File=${kafka.logs.dir}/connect-service.log
+log4j.appender.appender.ImmediateFlush=true
+log4j.appender.appender.MaxFileSize=100KB
+log4j.appender.appender.MaxBackupIndex=10000
+log4j.appender.appender.layout=org.apache.log4j.PatternLayout
+# TODO: find out how to generate files in a specific pattern
+# log4j.appender.appender.filePattern=${kafka.logs.dir}/connect-service-%d{yyyy-MM-dd}-%i.log
+log4j.appender.appender.layout.ConversionPattern=%d{ISO8601} %-5p %X{dbz.connectorType}|%X{dbz.connectorName}|%X{dbz.connectorContext} %m [%c]%n
diff --git a/metrics.yml b/metrics.yml
new file mode 100644
index 00000000000..2d8a724c424
--- /dev/null
+++ b/metrics.yml
@@ -0,0 +1,62 @@
+startDelaySeconds: 0
+ssl: false
+lowercaseOutputName: false
+lowercaseOutputLabelNames: false
+rules:
+ - pattern: 'kafka.(.+)<>start-time-ms'
+ name: kafka_$1_start_time_seconds
+ labels:
+ clientId: "$2"
+ help: "Kafka $1 JMX metric start time seconds"
+ type: GAUGE
+ valueFactor: 0.001
+ - pattern: 'kafka.(.+)<>(commit-id|version): (.+)'
+ name: kafka_$1_$3_info
+ value: 1
+ labels:
+ clientId: "$2"
+ $3: "$4"
+ help: "Kafka $1 JMX metric info version and commit-id"
+ type: GAUGE
+ - pattern: kafka.(.+)<>(.+-total|.+-rate|.+-avg|.+-replica|.+-lag|.+-lead)
+ name: kafka_$2_$6
+ labels:
+ clientId: "$3"
+ topic: "$4"
+ partition: "$5"
+ help: "Kafka $1 JMX metric type $2"
+ type: GAUGE
+ # Add other rules similarly...
+ - pattern: "debezium.([^:]+)]+)>([^:]+)"
+ name: "debezium_metrics_$6"
+ labels:
+ plugin: "$1"
+ name: "$2"
+ task: "$3"
+ context: "$4"
+ database: "$5"
+ - pattern: "debezium.([^:]+)]+)>([^:]+)"
+ name: "debezium_metrics_$5"
+ labels:
+ plugin: "$1"
+ name: "$2"
+ task: "$3"
+ context: "$4"
+ - pattern: "debezium.([^:]+)]+)>([^:]+)"
+ name: "debezium_metrics_$4"
+ labels:
+ plugin: "$1"
+ name: "$3"
+ context: "$2"
+
+ #kafka.connect:type=task-error-metrics,connector="{connector}",task="{task}"
+ #kafka.connect:type=source-task-metrics,connector="{connector}",task="{task}"
+ #kafka.connect:type=sink-task-metrics,connector="{connector}",task="{task}"
+ #kafka.connect:type=connector-task-metrics,connector="{connector}",task="{task}"
+ - pattern: kafka.connect<>(.+-total|.+-count|.+-ms|.+-ratio|.+-rate|.+-avg|.+-failures|.+-requests|.+-timestamp|.+-logged|.+-errors|.+-retries|.+-skipped)
+ name: kafka_connect_$1_$4
+ labels:
+ connector: "$2"
+ task: "$3"
+ help: "Kafka Connect JMX metric type $1"
+ type: GAUGE