clean pom and readme file
This commit is contained in:
Родитель
77fce2ac4e
Коммит
f0e1edd94e
|
@ -112,9 +112,9 @@ KafkaTest | count
|
|||
|
||||
|
||||
#### Supported formats
|
||||
`csv`, `json`, `avro`, `apacheAvro`, `parquet`, `orc`, `tsv`, `scsv`, `sohsv`, `psv`, `txt`.
|
||||
`csv`, `json`, `avro`, `apacheAvro`, `tsv`, `scsv`, `sohsv`, `psv`, `txt`.
|
||||
|
||||
> Note - `avro`, `apacheAvro`, `parquet` and `orc` files are sent each record (file) separately without aggregation, and are expected to be sent as a byte array containing the full file.
|
||||
> Note - `avro` and `apacheAvro`files are sent each record (file) separately without aggregation, and are expected to be sent as a byte array containing the full file.
|
||||
>
|
||||
>Use `value.converter=org.apache.kafka.connect.converters.ByteArrayConverter`
|
||||
|
||||
|
|
5
pom.xml
5
pom.xml
|
@ -99,11 +99,13 @@
|
|||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>connect-api</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>connect-json</artifactId>
|
||||
<version>${kafka.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.json</groupId>
|
||||
|
@ -148,12 +150,13 @@
|
|||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-connect-avro-converter</artifactId>
|
||||
<version>5.2.1</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.kafka</groupId>
|
||||
<artifactId>connect-runtime</artifactId>
|
||||
<version>5.3.0-ccs</version>
|
||||
<scope>compile</scope>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<repositories>
|
||||
|
|
|
@ -256,7 +256,6 @@ public class FileWriter implements Closeable {
|
|||
recordWriter.write(record);
|
||||
currentFile.records.add(record);
|
||||
currentFile.rawBytes = recordWriter.getDataSize();
|
||||
currentFile.zippedBytes += countingStream.numBytes;
|
||||
currentFile.numRecords++;
|
||||
if (this.flushInterval == 0 || currentFile.rawBytes > fileThreshold || shouldWriteAvroAsBytes) {
|
||||
rotate(record.kafkaOffset());
|
||||
|
|
|
@ -8,7 +8,6 @@ import org.apache.kafka.connect.sink.SinkRecord;
|
|||
|
||||
public class SourceFile {
|
||||
long rawBytes = 0;
|
||||
long zippedBytes = 0;
|
||||
long numRecords = 0;
|
||||
public String path;
|
||||
public File file;
|
||||
|
|
Загрузка…
Ссылка в новой задаче