From f0e1edd94eaddd23b6fe29ea704e507300f131aa Mon Sep 17 00:00:00 2001 From: hasher Date: Fri, 10 Jul 2020 13:20:25 +0530 Subject: [PATCH] clean pom and readme file --- README.md | 4 ++-- pom.xml | 5 ++++- .../microsoft/azure/kusto/kafka/connect/sink/FileWriter.java | 1 - .../microsoft/azure/kusto/kafka/connect/sink/SourceFile.java | 1 - 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 37d4493..575283f 100644 --- a/README.md +++ b/README.md @@ -112,9 +112,9 @@ KafkaTest | count #### Supported formats -`csv`, `json`, `avro`, `apacheAvro`, `parquet`, `orc`, `tsv`, `scsv`, `sohsv`, `psv`, `txt`. +`csv`, `json`, `avro`, `apacheAvro`, `tsv`, `scsv`, `sohsv`, `psv`, `txt`. -> Note - `avro`, `apacheAvro`, `parquet` and `orc` files are sent each record (file) separately without aggregation, and are expected to be sent as a byte array containing the full file. +> Note - `avro` and `apacheAvro`files are sent each record (file) separately without aggregation, and are expected to be sent as a byte array containing the full file. > >Use `value.converter=org.apache.kafka.connect.converters.ByteArrayConverter` diff --git a/pom.xml b/pom.xml index bd02dc9..f039ae2 100644 --- a/pom.xml +++ b/pom.xml @@ -99,11 +99,13 @@ org.apache.kafka connect-api ${kafka.version} + provided org.apache.kafka connect-json ${kafka.version} + provided org.json @@ -148,12 +150,13 @@ io.confluent kafka-connect-avro-converter 5.2.1 + provided org.apache.kafka connect-runtime 5.3.0-ccs - compile + provided diff --git a/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/FileWriter.java b/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/FileWriter.java index 416cb0b..67471c3 100644 --- a/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/FileWriter.java +++ b/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/FileWriter.java @@ -256,7 +256,6 @@ public class FileWriter implements Closeable { recordWriter.write(record); currentFile.records.add(record); currentFile.rawBytes = recordWriter.getDataSize(); - currentFile.zippedBytes += countingStream.numBytes; currentFile.numRecords++; if (this.flushInterval == 0 || currentFile.rawBytes > fileThreshold || shouldWriteAvroAsBytes) { rotate(record.kafkaOffset()); diff --git a/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/SourceFile.java b/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/SourceFile.java index e395776..457a606 100644 --- a/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/SourceFile.java +++ b/src/main/java/com/microsoft/azure/kusto/kafka/connect/sink/SourceFile.java @@ -8,7 +8,6 @@ import org.apache.kafka.connect.sink.SinkRecord; public class SourceFile { long rawBytes = 0; - long zippedBytes = 0; long numRecords = 0; public String path; public File file;