add flush check for avro as byte
This commit is contained in:
Родитель
354b4514c5
Коммит
64578e2226
|
@ -256,12 +256,17 @@ public class FileWriter implements Closeable {
|
|||
currentFile.rawBytes = recordWriter.getDataSize();
|
||||
currentFile.zippedBytes += countingStream.numBytes;
|
||||
currentFile.numRecords++;
|
||||
if (this.flushInterval == 0 || currentFile.rawBytes > fileThreshold) {
|
||||
if (this.flushInterval == 0 || currentFile.rawBytes > fileThreshold || shouldWriteAvroAsBytes(record)) {
|
||||
rotate(record.kafkaOffset());
|
||||
resetFlushTimer(true);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean shouldWriteAvroAsBytes(SinkRecord record) {
|
||||
return ((record.valueSchema().type() == Schema.Type.BYTES) &&
|
||||
(ingestionProps.getDataFormat().equals(IngestionProperties.DATA_FORMAT.avro.toString())));
|
||||
}
|
||||
|
||||
public void initializeRecordWriter(SinkRecord record) {
|
||||
if (record.value() instanceof Map) {
|
||||
recordWriterProvider = new JsonRecordWriterProvider();
|
||||
|
|
Загрузка…
Ссылка в новой задаче