Automatic style fixes using 'make style-fix'
This commit is contained in:
Родитель
18452517dc
Коммит
2f331f7c2e
|
@ -49,7 +49,7 @@ static volatile sig_atomic_t run = 1;
|
|||
/**
|
||||
* @brief Signal termination of program
|
||||
*/
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
@ -58,10 +58,10 @@ static void stop (int sig) {
|
|||
/**
|
||||
* @returns 1 if all bytes are printable, else 0.
|
||||
*/
|
||||
static int is_printable (const char *buf, size_t size) {
|
||||
static int is_printable(const char *buf, size_t size) {
|
||||
size_t i;
|
||||
|
||||
for (i = 0 ; i < size ; i++)
|
||||
for (i = 0; i < size; i++)
|
||||
if (!isprint((int)buf[i]))
|
||||
return 0;
|
||||
|
||||
|
@ -69,7 +69,7 @@ static int is_printable (const char *buf, size_t size) {
|
|||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_t *rk; /* Consumer instance handle */
|
||||
rd_kafka_conf_t *conf; /* Temporary configuration object */
|
||||
rd_kafka_resp_err_t err; /* librdkafka API error code */
|
||||
|
@ -107,8 +107,8 @@ int main (int argc, char **argv) {
|
|||
* host or host:port (default port 9092).
|
||||
* librdkafka will use the bootstrap brokers to acquire the full
|
||||
* set of brokers from the cluster. */
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
rd_kafka_conf_destroy(conf);
|
||||
return 1;
|
||||
|
@ -119,8 +119,8 @@ int main (int argc, char **argv) {
|
|||
* group, and the subscribed topic' partitions will be assigned
|
||||
* according to the partition.assignment.strategy
|
||||
* (consumer config property) to the consumers in the group. */
|
||||
if (rd_kafka_conf_set(conf, "group.id", groupid,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "group.id", groupid, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
rd_kafka_conf_destroy(conf);
|
||||
return 1;
|
||||
|
@ -131,8 +131,8 @@ int main (int argc, char **argv) {
|
|||
* in the partition to start fetching messages.
|
||||
* By setting this to earliest the consumer will read all messages
|
||||
* in the partition if there was no previously committed offset. */
|
||||
if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
rd_kafka_conf_destroy(conf);
|
||||
return 1;
|
||||
|
@ -147,8 +147,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
|
||||
if (!rk) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to create new consumer: %s\n", errstr);
|
||||
fprintf(stderr, "%% Failed to create new consumer: %s\n",
|
||||
errstr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -169,9 +169,8 @@ int main (int argc, char **argv) {
|
|||
|
||||
/* Convert the list of topics to a format suitable for librdkafka */
|
||||
subscription = rd_kafka_topic_partition_list_new(topic_cnt);
|
||||
for (i = 0 ; i < topic_cnt ; i++)
|
||||
rd_kafka_topic_partition_list_add(subscription,
|
||||
topics[i],
|
||||
for (i = 0; i < topic_cnt; i++)
|
||||
rd_kafka_topic_partition_list_add(subscription, topics[i],
|
||||
/* the partition is ignored
|
||||
* by subscribe() */
|
||||
RD_KAFKA_PARTITION_UA);
|
||||
|
@ -179,8 +178,7 @@ int main (int argc, char **argv) {
|
|||
/* Subscribe to the list of topics */
|
||||
err = rd_kafka_subscribe(rk, subscription);
|
||||
if (err) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to subscribe to %d topics: %s\n",
|
||||
fprintf(stderr, "%% Failed to subscribe to %d topics: %s\n",
|
||||
subscription->cnt, rd_kafka_err2str(err));
|
||||
rd_kafka_topic_partition_list_destroy(subscription);
|
||||
rd_kafka_destroy(rk);
|
||||
|
@ -220,29 +218,28 @@ int main (int argc, char **argv) {
|
|||
/* Consumer errors are generally to be considered
|
||||
* informational as the consumer will automatically
|
||||
* try to recover from all types of errors. */
|
||||
fprintf(stderr,
|
||||
"%% Consumer error: %s\n",
|
||||
fprintf(stderr, "%% Consumer error: %s\n",
|
||||
rd_kafka_message_errstr(rkm));
|
||||
rd_kafka_message_destroy(rkm);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Proper message. */
|
||||
printf("Message on %s [%"PRId32"] at offset %"PRId64":\n",
|
||||
printf("Message on %s [%" PRId32 "] at offset %" PRId64 ":\n",
|
||||
rd_kafka_topic_name(rkm->rkt), rkm->partition,
|
||||
rkm->offset);
|
||||
|
||||
/* Print the message key. */
|
||||
if (rkm->key && is_printable(rkm->key, rkm->key_len))
|
||||
printf(" Key: %.*s\n",
|
||||
(int)rkm->key_len, (const char *)rkm->key);
|
||||
printf(" Key: %.*s\n", (int)rkm->key_len,
|
||||
(const char *)rkm->key);
|
||||
else if (rkm->key)
|
||||
printf(" Key: (%d bytes)\n", (int)rkm->key_len);
|
||||
|
||||
/* Print the message value/payload. */
|
||||
if (rkm->payload && is_printable(rkm->payload, rkm->len))
|
||||
printf(" Value: %.*s\n",
|
||||
(int)rkm->len, (const char *)rkm->payload);
|
||||
printf(" Value: %.*s\n", (int)rkm->len,
|
||||
(const char *)rkm->payload);
|
||||
else if (rkm->payload)
|
||||
printf(" Value: (%d bytes)\n", (int)rkm->len);
|
||||
|
||||
|
|
|
@ -43,15 +43,15 @@
|
|||
#include "rdkafka.h"
|
||||
|
||||
|
||||
static rd_kafka_queue_t *queue; /** Admin result queue.
|
||||
* This is a global so we can
|
||||
* yield in stop() */
|
||||
static rd_kafka_queue_t *queue; /** Admin result queue.
|
||||
* This is a global so we can
|
||||
* yield in stop() */
|
||||
static volatile sig_atomic_t run = 1;
|
||||
|
||||
/**
|
||||
* @brief Signal termination of program
|
||||
*/
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
if (!run) {
|
||||
fprintf(stderr, "%% Forced termination\n");
|
||||
exit(2);
|
||||
|
@ -64,11 +64,11 @@ static void stop (int sig) {
|
|||
/**
|
||||
* @brief Parse an integer or fail.
|
||||
*/
|
||||
int64_t parse_int (const char *what, const char *str) {
|
||||
int64_t parse_int(const char *what, const char *str) {
|
||||
char *end;
|
||||
unsigned long n = strtoull(str, &end, 0);
|
||||
|
||||
if (end != str+strlen(str)) {
|
||||
if (end != str + strlen(str)) {
|
||||
fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
|
||||
what, str);
|
||||
exit(1);
|
||||
|
@ -78,27 +78,28 @@ int64_t parse_int (const char *what, const char *str) {
|
|||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
rd_kafka_conf_t *conf; /* Temporary configuration object */
|
||||
char errstr[512]; /* librdkafka API error reporting buffer */
|
||||
const char *brokers; /* Argument: broker list */
|
||||
rd_kafka_t *rk; /* Admin client instance */
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_conf_t *conf; /* Temporary configuration object */
|
||||
char errstr[512]; /* librdkafka API error reporting buffer */
|
||||
const char *brokers; /* Argument: broker list */
|
||||
rd_kafka_t *rk; /* Admin client instance */
|
||||
rd_kafka_topic_partition_list_t *offsets_before; /* Delete messages up
|
||||
* to but not
|
||||
* including these
|
||||
* offsets */
|
||||
rd_kafka_DeleteRecords_t *del_records; /* Container for offsets_before*/
|
||||
rd_kafka_AdminOptions_t *options; /* (Optional) Options for
|
||||
* DeleteRecords() */
|
||||
rd_kafka_event_t *event; /* DeleteRecords result event */
|
||||
rd_kafka_AdminOptions_t *options; /* (Optional) Options for
|
||||
* DeleteRecords() */
|
||||
rd_kafka_event_t *event; /* DeleteRecords result event */
|
||||
int exitcode = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Argument validation
|
||||
*/
|
||||
if (argc < 5 || (argc-2) % 3 != 0) {
|
||||
fprintf(stderr, "%% Usage: %s <broker> "
|
||||
if (argc < 5 || (argc - 2) % 3 != 0) {
|
||||
fprintf(stderr,
|
||||
"%% Usage: %s <broker> "
|
||||
"<topic> <partition> <offset_before> "
|
||||
"<topic2> <partition2> <offset_before2> ...\n"
|
||||
"\n"
|
||||
|
@ -112,15 +113,15 @@ int main (int argc, char **argv) {
|
|||
brokers = argv[1];
|
||||
|
||||
/* Parse topic partition offset tuples and add to offsets list */
|
||||
offsets_before = rd_kafka_topic_partition_list_new((argc-2) / 3);
|
||||
for (i = 2 ; i < argc ; i += 3) {
|
||||
offsets_before = rd_kafka_topic_partition_list_new((argc - 2) / 3);
|
||||
for (i = 2; i < argc; i += 3) {
|
||||
const char *topic = argv[i];
|
||||
int partition = parse_int("partition", argv[i+1]);
|
||||
int64_t offset = parse_int("offset_before", argv[i+2]);
|
||||
int partition = parse_int("partition", argv[i + 1]);
|
||||
int64_t offset = parse_int("offset_before", argv[i + 2]);
|
||||
|
||||
rd_kafka_topic_partition_list_add(offsets_before,
|
||||
topic,
|
||||
partition)->offset = offset;
|
||||
rd_kafka_topic_partition_list_add(offsets_before, topic,
|
||||
partition)
|
||||
->offset = offset;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -132,8 +133,8 @@ int main (int argc, char **argv) {
|
|||
* host or host:port (default port 9092).
|
||||
* librdkafka will use the bootstrap brokers to acquire the full
|
||||
* set of brokers from the cluster. */
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
|
@ -150,8 +151,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
|
||||
if (!rk) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to create new producer: %s\n", errstr);
|
||||
fprintf(stderr, "%% Failed to create new producer: %s\n",
|
||||
errstr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -163,11 +164,10 @@ int main (int argc, char **argv) {
|
|||
signal(SIGINT, stop);
|
||||
|
||||
/* Set timeout (optional) */
|
||||
options = rd_kafka_AdminOptions_new(rk,
|
||||
RD_KAFKA_ADMIN_OP_DELETERECORDS);
|
||||
if (rd_kafka_AdminOptions_set_request_timeout(options,
|
||||
30 * 1000 /* 30s */,
|
||||
errstr, sizeof(errstr))) {
|
||||
options =
|
||||
rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
|
||||
if (rd_kafka_AdminOptions_set_request_timeout(
|
||||
options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
|
||||
fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ int main (int argc, char **argv) {
|
|||
|
||||
|
||||
/* Wait for results */
|
||||
event = rd_kafka_queue_poll(queue, -1/*indefinitely*/);
|
||||
event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/);
|
||||
|
||||
if (!event) {
|
||||
/* User hit Ctrl-C */
|
||||
|
@ -205,12 +205,12 @@ int main (int argc, char **argv) {
|
|||
const rd_kafka_topic_partition_list_t *offsets;
|
||||
int i;
|
||||
|
||||
result = rd_kafka_event_DeleteRecords_result(event);
|
||||
result = rd_kafka_event_DeleteRecords_result(event);
|
||||
offsets = rd_kafka_DeleteRecords_result_offsets(result);
|
||||
|
||||
printf("DeleteRecords results:\n");
|
||||
for (i = 0 ; i < offsets->cnt ; i++)
|
||||
printf(" %s [%"PRId32"] offset %"PRId64": %s\n",
|
||||
for (i = 0; i < offsets->cnt; i++)
|
||||
printf(" %s [%" PRId32 "] offset %" PRId64 ": %s\n",
|
||||
offsets->elems[i].topic,
|
||||
offsets->elems[i].partition,
|
||||
offsets->elems[i].offset,
|
||||
|
|
|
@ -57,13 +57,13 @@ static volatile sig_atomic_t run = 1;
|
|||
/**
|
||||
* @brief Signal termination of program
|
||||
*/
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
||||
static int deliveredcnt = 0;
|
||||
static int msgerrcnt = 0;
|
||||
static int msgerrcnt = 0;
|
||||
|
||||
/**
|
||||
* @brief Message delivery report callback.
|
||||
|
@ -76,8 +76,8 @@ static int msgerrcnt = 0;
|
|||
* The callback is triggered from rd_kafka_poll() or rd_kafka_flush() and
|
||||
* executes on the application's thread.
|
||||
*/
|
||||
static void dr_msg_cb (rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
static void
|
||||
dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
if (rkmessage->err) {
|
||||
fprintf(stderr, "%% Message delivery failed: %s\n",
|
||||
rd_kafka_err2str(rkmessage->err));
|
||||
|
@ -85,9 +85,8 @@ static void dr_msg_cb (rd_kafka_t *rk,
|
|||
} else {
|
||||
fprintf(stderr,
|
||||
"%% Message delivered (%zd bytes, topic %s, "
|
||||
"partition %"PRId32", offset %"PRId64")\n",
|
||||
rkmessage->len,
|
||||
rd_kafka_topic_name(rkmessage->rkt),
|
||||
"partition %" PRId32 ", offset %" PRId64 ")\n",
|
||||
rkmessage->len, rd_kafka_topic_name(rkmessage->rkt),
|
||||
rkmessage->partition, rkmessage->offset);
|
||||
deliveredcnt++;
|
||||
}
|
||||
|
@ -112,8 +111,8 @@ static void dr_msg_cb (rd_kafka_t *rk,
|
|||
* the idempotence guarantees can't be satisfied, these errors
|
||||
* are identified by a the `RD_KAFKA_RESP_ERR__FATAL` error code.
|
||||
*/
|
||||
static void error_cb (rd_kafka_t *rk, int err, const
|
||||
char *reason, void *opaque) {
|
||||
static void
|
||||
error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
|
||||
rd_kafka_resp_err_t orig_err;
|
||||
char errstr[512];
|
||||
|
||||
|
@ -143,8 +142,8 @@ static void error_cb (rd_kafka_t *rk, int err, const
|
|||
*/
|
||||
|
||||
orig_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%% FATAL ERROR: %s: %s\n",
|
||||
rd_kafka_err2name(orig_err), errstr);
|
||||
fprintf(stderr, "%% FATAL ERROR: %s: %s\n", rd_kafka_err2name(orig_err),
|
||||
errstr);
|
||||
|
||||
/* Clean termination to get delivery results (from rd_kafka_flush())
|
||||
* for all outstanding/in-transit/queued messages. */
|
||||
|
@ -153,7 +152,7 @@ static void error_cb (rd_kafka_t *rk, int err, const
|
|||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_t *rk; /* Producer instance handle */
|
||||
rd_kafka_conf_t *conf; /* Temporary configuration object */
|
||||
char errstr[512]; /* librdkafka API error reporting buffer */
|
||||
|
@ -183,16 +182,16 @@ int main (int argc, char **argv) {
|
|||
* host or host:port (default port 9092).
|
||||
* librdkafka will use the bootstrap brokers to acquire the full
|
||||
* set of brokers from the cluster. */
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
rd_kafka_conf_destroy(conf);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Enable the idempotent producer */
|
||||
if (rd_kafka_conf_set(conf, "enable.idempotence", "true",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "enable.idempotence", "true", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
rd_kafka_conf_destroy(conf);
|
||||
return 1;
|
||||
|
@ -222,8 +221,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
|
||||
if (!rk) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to create new producer: %s\n", errstr);
|
||||
fprintf(stderr, "%% Failed to create new producer: %s\n",
|
||||
errstr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -252,21 +251,19 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
retry:
|
||||
err = rd_kafka_producev(
|
||||
rk,
|
||||
RD_KAFKA_V_TOPIC(topic),
|
||||
RD_KAFKA_V_VALUE(buf, strlen(buf)),
|
||||
/* Copy the message payload so the `buf` can
|
||||
* be reused for the next message. */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
|
||||
RD_KAFKA_V_END);
|
||||
rk, RD_KAFKA_V_TOPIC(topic),
|
||||
RD_KAFKA_V_VALUE(buf, strlen(buf)),
|
||||
/* Copy the message payload so the `buf` can
|
||||
* be reused for the next message. */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
|
||||
|
||||
if (err) {
|
||||
/**
|
||||
* Failed to *enqueue* message for producing.
|
||||
*/
|
||||
fprintf(stderr,
|
||||
"%% Failed to produce to topic %s: %s\n",
|
||||
topic, rd_kafka_err2str(err));
|
||||
"%% Failed to produce to topic %s: %s\n", topic,
|
||||
rd_kafka_err2str(err));
|
||||
|
||||
if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
|
||||
/* If the internal queue is full, wait for
|
||||
|
@ -279,7 +276,8 @@ int main (int argc, char **argv) {
|
|||
* The internal queue is limited by the
|
||||
* configuration property
|
||||
* queue.buffering.max.messages */
|
||||
rd_kafka_poll(rk, 1000/*block for max 1000ms*/);
|
||||
rd_kafka_poll(rk,
|
||||
1000 /*block for max 1000ms*/);
|
||||
goto retry;
|
||||
} else {
|
||||
/* Produce failed, most likely due to a
|
||||
|
@ -304,7 +302,7 @@ int main (int argc, char **argv) {
|
|||
* to make sure previously produced messages have their
|
||||
* delivery report callback served (and any other callbacks
|
||||
* you register). */
|
||||
rd_kafka_poll(rk, 0/*non-blocking*/);
|
||||
rd_kafka_poll(rk, 0 /*non-blocking*/);
|
||||
|
||||
msgcnt++;
|
||||
|
||||
|
@ -313,10 +311,9 @@ int main (int argc, char **argv) {
|
|||
* some time. */
|
||||
if (msgcnt == 13)
|
||||
rd_kafka_test_fatal_error(
|
||||
rk,
|
||||
RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
|
||||
"This is a fabricated error to test the "
|
||||
"fatal error handling");
|
||||
rk, RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
|
||||
"This is a fabricated error to test the "
|
||||
"fatal error handling");
|
||||
|
||||
/* Short sleep to rate-limit this example.
|
||||
* A real application should not do this. */
|
||||
|
@ -328,9 +325,8 @@ int main (int argc, char **argv) {
|
|||
* rd_kafka_flush() is an abstraction over rd_kafka_poll() which
|
||||
* waits for all messages to be delivered. */
|
||||
fprintf(stderr, "%% Flushing outstanding messages..\n");
|
||||
rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */);
|
||||
fprintf(stderr,
|
||||
"%% %d message(s) produced, %d delivered, %d failed\n",
|
||||
rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */);
|
||||
fprintf(stderr, "%% %d message(s) produced, %d delivered, %d failed\n",
|
||||
msgcnt, deliveredcnt, msgerrcnt);
|
||||
|
||||
/* Save fatal error prior for using with exit status below. */
|
||||
|
@ -344,4 +340,4 @@ int main (int argc, char **argv) {
|
|||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,56 +61,60 @@
|
|||
#include "rdkafkacpp.h"
|
||||
|
||||
static volatile sig_atomic_t run = 1;
|
||||
static bool exit_eof = false;
|
||||
static int verbosity = 1;
|
||||
static bool exit_eof = false;
|
||||
static int verbosity = 1;
|
||||
static std::string value_prefix;
|
||||
|
||||
class Assignment {
|
||||
|
||||
public:
|
||||
static std::string name (const std::string &t, int partition) {
|
||||
static std::string name(const std::string &t, int partition) {
|
||||
std::stringstream stm;
|
||||
stm << t << "." << partition;
|
||||
return stm.str();
|
||||
}
|
||||
|
||||
Assignment(): topic(""), partition(-1), consumedMessages(0),
|
||||
minOffset(-1), maxOffset(0) {
|
||||
Assignment() :
|
||||
topic(""),
|
||||
partition(-1),
|
||||
consumedMessages(0),
|
||||
minOffset(-1),
|
||||
maxOffset(0) {
|
||||
printf("Created assignment\n");
|
||||
}
|
||||
Assignment(const Assignment &a) {
|
||||
topic = a.topic;
|
||||
partition = a.partition;
|
||||
topic = a.topic;
|
||||
partition = a.partition;
|
||||
consumedMessages = a.consumedMessages;
|
||||
minOffset = a.minOffset;
|
||||
maxOffset = a.maxOffset;
|
||||
minOffset = a.minOffset;
|
||||
maxOffset = a.maxOffset;
|
||||
}
|
||||
|
||||
Assignment &operator=(const Assignment &a) {
|
||||
this->topic = a.topic;
|
||||
this->partition = a.partition;
|
||||
this->topic = a.topic;
|
||||
this->partition = a.partition;
|
||||
this->consumedMessages = a.consumedMessages;
|
||||
this->minOffset = a.minOffset;
|
||||
this->maxOffset = a.maxOffset;
|
||||
this->minOffset = a.minOffset;
|
||||
this->maxOffset = a.maxOffset;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int operator==(const Assignment &a) const {
|
||||
return !(this->topic == a.topic &&
|
||||
this->partition == a.partition);
|
||||
return !(this->topic == a.topic && this->partition == a.partition);
|
||||
}
|
||||
|
||||
int operator<(const Assignment &a) const {
|
||||
if (this->topic < a.topic) return 1;
|
||||
if (this->topic >= a.topic) return 0;
|
||||
if (this->topic < a.topic)
|
||||
return 1;
|
||||
if (this->topic >= a.topic)
|
||||
return 0;
|
||||
return (this->partition < a.partition);
|
||||
}
|
||||
|
||||
void setup (std::string t, int32_t p) {
|
||||
void setup(std::string t, int32_t p) {
|
||||
assert(!t.empty());
|
||||
assert(topic.empty() || topic == t);
|
||||
assert(partition == -1 || partition == p);
|
||||
topic = t;
|
||||
topic = t;
|
||||
partition = p;
|
||||
}
|
||||
|
||||
|
@ -123,7 +127,6 @@ class Assignment {
|
|||
|
||||
|
||||
|
||||
|
||||
static struct {
|
||||
int maxMessages;
|
||||
|
||||
|
@ -141,14 +144,13 @@ static struct {
|
|||
std::map<std::string, Assignment> assignments;
|
||||
} consumer;
|
||||
} state = {
|
||||
/* .maxMessages = */ -1
|
||||
};
|
||||
/* .maxMessages = */ -1};
|
||||
|
||||
|
||||
static RdKafka::KafkaConsumer *consumer;
|
||||
|
||||
|
||||
static std::string now () {
|
||||
static std::string now() {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
time_t t = tv.tv_sec;
|
||||
|
@ -157,7 +159,7 @@ static std::string now () {
|
|||
|
||||
localtime_r(&t, &tm);
|
||||
strftime(buf, sizeof(buf), "%H:%M:%S", &tm);
|
||||
snprintf(buf+strlen(buf), sizeof(buf)-strlen(buf), ".%03d",
|
||||
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ".%03d",
|
||||
(int)(tv.tv_usec / 1000));
|
||||
|
||||
return buf;
|
||||
|
@ -166,18 +168,19 @@ static std::string now () {
|
|||
|
||||
static time_t watchdog_last_kick;
|
||||
static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */
|
||||
static void sigwatchdog (int sig) {
|
||||
static void sigwatchdog(int sig) {
|
||||
time_t t = time(NULL);
|
||||
if (watchdog_last_kick + watchdog_timeout <= t) {
|
||||
std::cerr << now() << ": WATCHDOG TIMEOUT (" <<
|
||||
(int)(t - watchdog_last_kick) << "s): TERMINATING" << std::endl;
|
||||
std::cerr << now() << ": WATCHDOG TIMEOUT ("
|
||||
<< (int)(t - watchdog_last_kick) << "s): TERMINATING"
|
||||
<< std::endl;
|
||||
int *i = NULL;
|
||||
*i = 100;
|
||||
*i = 100;
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void watchdog_kick () {
|
||||
static void watchdog_kick() {
|
||||
watchdog_last_kick = time(NULL);
|
||||
|
||||
/* Safe guard against hangs-on-exit */
|
||||
|
@ -186,13 +189,11 @@ static void watchdog_kick () {
|
|||
|
||||
|
||||
|
||||
|
||||
|
||||
static void errorString (const std::string &name,
|
||||
const std::string &errmsg,
|
||||
const std::string &topic,
|
||||
const std::string *key,
|
||||
const std::string &value) {
|
||||
static void errorString(const std::string &name,
|
||||
const std::string &errmsg,
|
||||
const std::string &topic,
|
||||
const std::string *key,
|
||||
const std::string &value) {
|
||||
std::cout << "{ "
|
||||
<< "\"name\": \"" << name << "\", "
|
||||
<< "\"_time\": \"" << now() << "\", "
|
||||
|
@ -204,12 +205,12 @@ static void errorString (const std::string &name,
|
|||
}
|
||||
|
||||
|
||||
static void successString (const std::string &name,
|
||||
const std::string &topic,
|
||||
int partition,
|
||||
int64_t offset,
|
||||
const std::string *key,
|
||||
const std::string &value) {
|
||||
static void successString(const std::string &name,
|
||||
const std::string &topic,
|
||||
int partition,
|
||||
int64_t offset,
|
||||
const std::string *key,
|
||||
const std::string &value) {
|
||||
std::cout << "{ "
|
||||
<< "\"name\": \"" << name << "\", "
|
||||
<< "\"_time\": \"" << now() << "\", "
|
||||
|
@ -223,29 +224,27 @@ static void successString (const std::string &name,
|
|||
|
||||
|
||||
#if FIXME
|
||||
static void offsetStatus (bool success,
|
||||
const std::string &topic,
|
||||
int partition,
|
||||
int64_t offset,
|
||||
const std::string &errstr) {
|
||||
static void offsetStatus(bool success,
|
||||
const std::string &topic,
|
||||
int partition,
|
||||
int64_t offset,
|
||||
const std::string &errstr) {
|
||||
std::cout << "{ "
|
||||
"\"name\": \"offsets_committed\", " <<
|
||||
"\"success\": " << success << ", " <<
|
||||
"\"offsets\": [ " <<
|
||||
" { " <<
|
||||
" \"topic\": \"" << topic << "\", " <<
|
||||
" \"partition\": " << partition << ", " <<
|
||||
" \"offset\": " << (int)offset << ", " <<
|
||||
" \"error\": \"" << errstr << "\" " <<
|
||||
" } " <<
|
||||
"] }" << std::endl;
|
||||
|
||||
"\"name\": \"offsets_committed\", "
|
||||
<< "\"success\": " << success << ", "
|
||||
<< "\"offsets\": [ "
|
||||
<< " { "
|
||||
<< " \"topic\": \"" << topic << "\", "
|
||||
<< " \"partition\": " << partition << ", "
|
||||
<< " \"offset\": " << (int)offset << ", "
|
||||
<< " \"error\": \"" << errstr << "\" "
|
||||
<< " } "
|
||||
<< "] }" << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void sigterm (int sig) {
|
||||
|
||||
static void sigterm(int sig) {
|
||||
std::cerr << now() << ": Terminating because of signal " << sig << std::endl;
|
||||
|
||||
if (!run) {
|
||||
|
@ -258,21 +257,17 @@ static void sigterm (int sig) {
|
|||
|
||||
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
|
||||
public:
|
||||
void dr_cb (RdKafka::Message &message) {
|
||||
void dr_cb(RdKafka::Message &message) {
|
||||
if (message.err()) {
|
||||
state.producer.numErr++;
|
||||
errorString("producer_send_error", message.errstr(),
|
||||
message.topic_name(),
|
||||
errorString("producer_send_error", message.errstr(), message.topic_name(),
|
||||
message.key(),
|
||||
std::string(static_cast<const char*>(message.payload()),
|
||||
std::string(static_cast<const char *>(message.payload()),
|
||||
message.len()));
|
||||
} else {
|
||||
successString("producer_send_success",
|
||||
message.topic_name(),
|
||||
(int)message.partition(),
|
||||
message.offset(),
|
||||
message.key(),
|
||||
std::string(static_cast<const char*>(message.payload()),
|
||||
successString("producer_send_success", message.topic_name(),
|
||||
(int)message.partition(), message.offset(), message.key(),
|
||||
std::string(static_cast<const char *>(message.payload()),
|
||||
message.len()));
|
||||
state.producer.numAcked++;
|
||||
}
|
||||
|
@ -282,28 +277,27 @@ class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
|
|||
|
||||
class ExampleEventCb : public RdKafka::EventCb {
|
||||
public:
|
||||
void event_cb (RdKafka::Event &event) {
|
||||
switch (event.type())
|
||||
{
|
||||
case RdKafka::Event::EVENT_ERROR:
|
||||
std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err()) << "): " <<
|
||||
event.str() << std::endl;
|
||||
break;
|
||||
void event_cb(RdKafka::Event &event) {
|
||||
switch (event.type()) {
|
||||
case RdKafka::Event::EVENT_ERROR:
|
||||
std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err())
|
||||
<< "): " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_STATS:
|
||||
std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
|
||||
break;
|
||||
case RdKafka::Event::EVENT_STATS:
|
||||
std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_LOG:
|
||||
std::cerr << now() << ": LOG-" << event.severity() << "-"
|
||||
<< event.fac() << ": " << event.str() << std::endl;
|
||||
break;
|
||||
case RdKafka::Event::EVENT_LOG:
|
||||
std::cerr << now() << ": LOG-" << event.severity() << "-" << event.fac()
|
||||
<< ": " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
default:
|
||||
std::cerr << now() << ": EVENT " << event.type() <<
|
||||
" (" << RdKafka::err2str(event.err()) << "): " <<
|
||||
event.str() << std::endl;
|
||||
break;
|
||||
default:
|
||||
std::cerr << now() << ": EVENT " << event.type() << " ("
|
||||
<< RdKafka::err2str(event.err()) << "): " << event.str()
|
||||
<< std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -313,15 +307,17 @@ class ExampleEventCb : public RdKafka::EventCb {
|
|||
* in the produce() call. */
|
||||
class MyHashPartitionerCb : public RdKafka::PartitionerCb {
|
||||
public:
|
||||
int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
|
||||
int32_t partition_cnt, void *msg_opaque) {
|
||||
int32_t partitioner_cb(const RdKafka::Topic *topic,
|
||||
const std::string *key,
|
||||
int32_t partition_cnt,
|
||||
void *msg_opaque) {
|
||||
return djb_hash(key->c_str(), key->size()) % partition_cnt;
|
||||
}
|
||||
private:
|
||||
|
||||
static inline unsigned int djb_hash (const char *str, size_t len) {
|
||||
private:
|
||||
static inline unsigned int djb_hash(const char *str, size_t len) {
|
||||
unsigned int hash = 5381;
|
||||
for (size_t i = 0 ; i < len ; i++)
|
||||
for (size_t i = 0; i < len; i++)
|
||||
hash = ((hash << 5) + hash) + str[i];
|
||||
return hash;
|
||||
}
|
||||
|
@ -329,35 +325,35 @@ class MyHashPartitionerCb : public RdKafka::PartitionerCb {
|
|||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Print number of records consumed, every 100 messages or on timeout.
|
||||
*/
|
||||
static void report_records_consumed (int immediate) {
|
||||
std::map<std::string,Assignment> *assignments = &state.consumer.assignments;
|
||||
static void report_records_consumed(int immediate) {
|
||||
std::map<std::string, Assignment> *assignments = &state.consumer.assignments;
|
||||
|
||||
if (state.consumer.consumedMessages <=
|
||||
state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999))
|
||||
return;
|
||||
|
||||
std::cout << "{ "
|
||||
"\"name\": \"records_consumed\", " <<
|
||||
"\"_totcount\": " << state.consumer.consumedMessages << ", " <<
|
||||
"\"count\": " << (state.consumer.consumedMessages -
|
||||
state.consumer.consumedMessagesLastReported) << ", " <<
|
||||
"\"partitions\": [ ";
|
||||
"\"name\": \"records_consumed\", "
|
||||
<< "\"_totcount\": " << state.consumer.consumedMessages << ", "
|
||||
<< "\"count\": "
|
||||
<< (state.consumer.consumedMessages -
|
||||
state.consumer.consumedMessagesLastReported)
|
||||
<< ", "
|
||||
<< "\"partitions\": [ ";
|
||||
|
||||
for (std::map<std::string,Assignment>::iterator ii = assignments->begin() ;
|
||||
ii != assignments->end() ; ii++) {
|
||||
for (std::map<std::string, Assignment>::iterator ii = assignments->begin();
|
||||
ii != assignments->end(); ii++) {
|
||||
Assignment *a = &(*ii).second;
|
||||
assert(!a->topic.empty());
|
||||
std::cout << (ii == assignments->begin() ? "": ", ") << " { " <<
|
||||
" \"topic\": \"" << a->topic << "\", " <<
|
||||
" \"partition\": " << a->partition << ", " <<
|
||||
" \"minOffset\": " << a->minOffset << ", " <<
|
||||
" \"maxOffset\": " << a->maxOffset << " " <<
|
||||
" } ";
|
||||
std::cout << (ii == assignments->begin() ? "" : ", ") << " { "
|
||||
<< " \"topic\": \"" << a->topic << "\", "
|
||||
<< " \"partition\": " << a->partition << ", "
|
||||
<< " \"minOffset\": " << a->minOffset << ", "
|
||||
<< " \"maxOffset\": " << a->maxOffset << " "
|
||||
<< " } ";
|
||||
a->minOffset = -1;
|
||||
}
|
||||
|
||||
|
@ -369,36 +365,39 @@ static void report_records_consumed (int immediate) {
|
|||
|
||||
class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb {
|
||||
public:
|
||||
void offset_commit_cb (RdKafka::ErrorCode err,
|
||||
std::vector<RdKafka::TopicPartition*> &offsets) {
|
||||
std::cerr << now() << ": Propagate offset for " << offsets.size() << " partitions, error: " << RdKafka::err2str(err) << std::endl;
|
||||
void offset_commit_cb(RdKafka::ErrorCode err,
|
||||
std::vector<RdKafka::TopicPartition *> &offsets) {
|
||||
std::cerr << now() << ": Propagate offset for " << offsets.size()
|
||||
<< " partitions, error: " << RdKafka::err2str(err) << std::endl;
|
||||
|
||||
/* No offsets to commit, dont report anything. */
|
||||
if (err == RdKafka::ERR__NO_OFFSET)
|
||||
return;
|
||||
|
||||
/* Send up-to-date records_consumed report to make sure consumed > committed */
|
||||
/* Send up-to-date records_consumed report to make sure consumed > committed
|
||||
*/
|
||||
report_records_consumed(1);
|
||||
|
||||
std::cout << "{ " <<
|
||||
"\"name\": \"offsets_committed\", " <<
|
||||
"\"success\": " << (err ? "false" : "true") << ", " <<
|
||||
"\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", " <<
|
||||
"\"_autocommit\": " << (state.consumer.useAutoCommit ? "true":"false") << ", " <<
|
||||
"\"offsets\": [ ";
|
||||
std::cout << "{ "
|
||||
<< "\"name\": \"offsets_committed\", "
|
||||
<< "\"success\": " << (err ? "false" : "true") << ", "
|
||||
<< "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", "
|
||||
<< "\"_autocommit\": "
|
||||
<< (state.consumer.useAutoCommit ? "true" : "false") << ", "
|
||||
<< "\"offsets\": [ ";
|
||||
assert(offsets.size() > 0);
|
||||
for (unsigned int i = 0 ; i < offsets.size() ; i++) {
|
||||
std::cout << (i == 0 ? "" : ", ") << "{ " <<
|
||||
" \"topic\": \"" << offsets[i]->topic() << "\", " <<
|
||||
" \"partition\": " << offsets[i]->partition() << ", " <<
|
||||
" \"offset\": " << (int)offsets[i]->offset() << ", " <<
|
||||
" \"error\": \"" <<
|
||||
(offsets[i]->err() ? RdKafka::err2str(offsets[i]->err()) : "") <<
|
||||
"\" " <<
|
||||
" }";
|
||||
for (unsigned int i = 0; i < offsets.size(); i++) {
|
||||
std::cout << (i == 0 ? "" : ", ") << "{ "
|
||||
<< " \"topic\": \"" << offsets[i]->topic() << "\", "
|
||||
<< " \"partition\": " << offsets[i]->partition() << ", "
|
||||
<< " \"offset\": " << (int)offsets[i]->offset() << ", "
|
||||
<< " \"error\": \""
|
||||
<< (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err())
|
||||
: "")
|
||||
<< "\" "
|
||||
<< " }";
|
||||
}
|
||||
std::cout << " ] }" << std::endl;
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -408,12 +407,10 @@ static ExampleOffsetCommitCb ex_offset_commit_cb;
|
|||
/**
|
||||
* Commit every 1000 messages or whenever there is a consume timeout.
|
||||
*/
|
||||
static void do_commit (RdKafka::KafkaConsumer *consumer,
|
||||
int immediate) {
|
||||
if (!immediate &&
|
||||
(state.consumer.useAutoCommit ||
|
||||
state.consumer.consumedMessagesAtLastCommit + 1000 >
|
||||
state.consumer.consumedMessages))
|
||||
static void do_commit(RdKafka::KafkaConsumer *consumer, int immediate) {
|
||||
if (!immediate && (state.consumer.useAutoCommit ||
|
||||
state.consumer.consumedMessagesAtLastCommit + 1000 >
|
||||
state.consumer.consumedMessages))
|
||||
return;
|
||||
|
||||
/* Make sure we report consumption before commit,
|
||||
|
@ -422,106 +419,102 @@ static void do_commit (RdKafka::KafkaConsumer *consumer,
|
|||
state.consumer.consumedMessages)
|
||||
report_records_consumed(1);
|
||||
|
||||
std::cerr << now() << ": committing " <<
|
||||
(state.consumer.consumedMessages -
|
||||
state.consumer.consumedMessagesAtLastCommit) << " messages" << std::endl;
|
||||
std::cerr << now() << ": committing "
|
||||
<< (state.consumer.consumedMessages -
|
||||
state.consumer.consumedMessagesAtLastCommit)
|
||||
<< " messages" << std::endl;
|
||||
|
||||
RdKafka::ErrorCode err;
|
||||
err = consumer->commitSync(&ex_offset_commit_cb);
|
||||
|
||||
std::cerr << now() << ": " <<
|
||||
"sync commit returned " << RdKafka::err2str(err) << std::endl;
|
||||
std::cerr << now() << ": "
|
||||
<< "sync commit returned " << RdKafka::err2str(err) << std::endl;
|
||||
|
||||
state.consumer.consumedMessagesAtLastCommit =
|
||||
state.consumer.consumedMessages;
|
||||
state.consumer.consumedMessagesAtLastCommit = state.consumer.consumedMessages;
|
||||
}
|
||||
|
||||
|
||||
void msg_consume(RdKafka::KafkaConsumer *consumer,
|
||||
RdKafka::Message* msg, void* opaque) {
|
||||
RdKafka::Message *msg,
|
||||
void *opaque) {
|
||||
switch (msg->err()) {
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
/* Try reporting consumed messages */
|
||||
report_records_consumed(1);
|
||||
/* Commit one every consume() timeout instead of on every message.
|
||||
* Also commit on every 1000 messages, whichever comes first. */
|
||||
do_commit(consumer, 1);
|
||||
break;
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
/* Try reporting consumed messages */
|
||||
report_records_consumed(1);
|
||||
/* Commit one every consume() timeout instead of on every message.
|
||||
* Also commit on every 1000 messages, whichever comes first. */
|
||||
do_commit(consumer, 1);
|
||||
break;
|
||||
|
||||
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
{
|
||||
/* Real message */
|
||||
if (verbosity > 2)
|
||||
std::cerr << now() << ": Read msg from " << msg->topic_name() <<
|
||||
" [" << (int)msg->partition() << "] at offset " <<
|
||||
msg->offset() << std::endl;
|
||||
case RdKafka::ERR_NO_ERROR: {
|
||||
/* Real message */
|
||||
if (verbosity > 2)
|
||||
std::cerr << now() << ": Read msg from " << msg->topic_name() << " ["
|
||||
<< (int)msg->partition() << "] at offset " << msg->offset()
|
||||
<< std::endl;
|
||||
|
||||
if (state.maxMessages >= 0 &&
|
||||
state.consumer.consumedMessages >= state.maxMessages)
|
||||
return;
|
||||
if (state.maxMessages >= 0 &&
|
||||
state.consumer.consumedMessages >= state.maxMessages)
|
||||
return;
|
||||
|
||||
|
||||
Assignment *a =
|
||||
&state.consumer.assignments[Assignment::name(msg->topic_name(),
|
||||
msg->partition())];
|
||||
a->setup(msg->topic_name(), msg->partition());
|
||||
Assignment *a = &state.consumer.assignments[Assignment::name(
|
||||
msg->topic_name(), msg->partition())];
|
||||
a->setup(msg->topic_name(), msg->partition());
|
||||
|
||||
a->consumedMessages++;
|
||||
if (a->minOffset == -1)
|
||||
a->minOffset = msg->offset();
|
||||
if (a->maxOffset < msg->offset())
|
||||
a->maxOffset = msg->offset();
|
||||
a->consumedMessages++;
|
||||
if (a->minOffset == -1)
|
||||
a->minOffset = msg->offset();
|
||||
if (a->maxOffset < msg->offset())
|
||||
a->maxOffset = msg->offset();
|
||||
|
||||
if (msg->key()) {
|
||||
if (verbosity >= 3)
|
||||
std::cerr << now() << ": Key: " << *msg->key() << std::endl;
|
||||
}
|
||||
if (msg->key()) {
|
||||
if (verbosity >= 3)
|
||||
std::cerr << now() << ": Key: " << *msg->key() << std::endl;
|
||||
}
|
||||
|
||||
if (verbosity >= 3)
|
||||
fprintf(stderr, "%.*s\n",
|
||||
static_cast<int>(msg->len()),
|
||||
static_cast<const char *>(msg->payload()));
|
||||
if (verbosity >= 3)
|
||||
fprintf(stderr, "%.*s\n", static_cast<int>(msg->len()),
|
||||
static_cast<const char *>(msg->payload()));
|
||||
|
||||
state.consumer.consumedMessages++;
|
||||
state.consumer.consumedMessages++;
|
||||
|
||||
report_records_consumed(0);
|
||||
report_records_consumed(0);
|
||||
|
||||
do_commit(consumer, 0);
|
||||
}
|
||||
break;
|
||||
do_commit(consumer, 0);
|
||||
} break;
|
||||
|
||||
case RdKafka::ERR__PARTITION_EOF:
|
||||
/* Last message */
|
||||
if (exit_eof) {
|
||||
std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
|
||||
run = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case RdKafka::ERR__UNKNOWN_TOPIC:
|
||||
case RdKafka::ERR__UNKNOWN_PARTITION:
|
||||
std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
|
||||
case RdKafka::ERR__PARTITION_EOF:
|
||||
/* Last message */
|
||||
if (exit_eof) {
|
||||
std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
|
||||
std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
|
||||
break;
|
||||
case RdKafka::ERR__UNKNOWN_TOPIC:
|
||||
case RdKafka::ERR__UNKNOWN_PARTITION:
|
||||
std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Errors */
|
||||
std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
|
||||
run = 0;
|
||||
case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
|
||||
std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Errors */
|
||||
std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
|
||||
run = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
class ExampleConsumeCb : public RdKafka::ConsumeCb {
|
||||
public:
|
||||
void consume_cb (RdKafka::Message &msg, void *opaque) {
|
||||
void consume_cb(RdKafka::Message &msg, void *opaque) {
|
||||
msg_consume(consumer_, &msg, opaque);
|
||||
}
|
||||
RdKafka::KafkaConsumer *consumer_;
|
||||
|
@ -529,22 +522,22 @@ class ExampleConsumeCb : public RdKafka::ConsumeCb {
|
|||
|
||||
class ExampleRebalanceCb : public RdKafka::RebalanceCb {
|
||||
private:
|
||||
static std::string part_list_json (const std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
static std::string part_list_json(
|
||||
const std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
std::ostringstream out;
|
||||
for (unsigned int i = 0 ; i < partitions.size() ; i++)
|
||||
out << (i==0?"":", ") << "{ " <<
|
||||
" \"topic\": \"" << partitions[i]->topic() << "\", " <<
|
||||
" \"partition\": " << partitions[i]->partition() <<
|
||||
" }";
|
||||
for (unsigned int i = 0; i < partitions.size(); i++)
|
||||
out << (i == 0 ? "" : ", ") << "{ "
|
||||
<< " \"topic\": \"" << partitions[i]->topic() << "\", "
|
||||
<< " \"partition\": " << partitions[i]->partition() << " }";
|
||||
return out.str();
|
||||
}
|
||||
public:
|
||||
void rebalance_cb (RdKafka::KafkaConsumer *consumer,
|
||||
RdKafka::ErrorCode err,
|
||||
std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
|
||||
std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) <<
|
||||
" for " << partitions.size() << " partitions" << std::endl;
|
||||
public:
|
||||
void rebalance_cb(RdKafka::KafkaConsumer *consumer,
|
||||
RdKafka::ErrorCode err,
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << " for "
|
||||
<< partitions.size() << " partitions" << std::endl;
|
||||
/* Send message report prior to rebalancing event to make sure they
|
||||
* are accounted for on the "right side" of the rebalance. */
|
||||
report_records_consumed(1);
|
||||
|
@ -556,12 +549,13 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb {
|
|||
consumer->unassign();
|
||||
}
|
||||
|
||||
std::cout <<
|
||||
"{ " <<
|
||||
"\"name\": \"partitions_" << (err == RdKafka::ERR__ASSIGN_PARTITIONS ?
|
||||
"assigned" : "revoked") << "\", " <<
|
||||
"\"partitions\": [ " << part_list_json(partitions) << "] }" << std::endl;
|
||||
|
||||
std::cout << "{ "
|
||||
<< "\"name\": \"partitions_"
|
||||
<< (err == RdKafka::ERR__ASSIGN_PARTITIONS ? "assigned"
|
||||
: "revoked")
|
||||
<< "\", "
|
||||
<< "\"partitions\": [ " << part_list_json(partitions) << "] }"
|
||||
<< std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -570,11 +564,12 @@ class ExampleRebalanceCb : public RdKafka::RebalanceCb {
|
|||
/**
|
||||
* @brief Read (Java client) configuration file
|
||||
*/
|
||||
static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) {
|
||||
static void read_conf_file(RdKafka::Conf *conf, const std::string &conf_file) {
|
||||
std::ifstream inf(conf_file.c_str());
|
||||
|
||||
if (!inf) {
|
||||
std::cerr << now() << ": " << conf_file << ": could not open file" << std::endl;
|
||||
std::cerr << now() << ": " << conf_file << ": could not open file"
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -593,18 +588,23 @@ static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) {
|
|||
// Match on key=value..
|
||||
size_t d = line.find("=");
|
||||
if (d == 0 || d == std::string::npos) {
|
||||
std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line << ": ignoring invalid line (expect key=value): " << ::std::endl;
|
||||
std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line
|
||||
<< ": ignoring invalid line (expect key=value): "
|
||||
<< ::std::endl;
|
||||
continue;
|
||||
}
|
||||
|
||||
std::string key = line.substr(0, d);
|
||||
std::string val = line.substr(d+1);
|
||||
std::string val = line.substr(d + 1);
|
||||
|
||||
std::string errstr;
|
||||
if (conf->set(key, val, errstr)) {
|
||||
std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": " << errstr << ": ignoring error" << std::endl;
|
||||
std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key
|
||||
<< "=" << val << ": " << errstr << ": ignoring error"
|
||||
<< std::endl;
|
||||
} else {
|
||||
std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key << "=" << val << ": applied to configuration" << std::endl;
|
||||
std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key
|
||||
<< "=" << val << ": applied to configuration" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -613,19 +613,18 @@ static void read_conf_file (RdKafka::Conf *conf, const std::string &conf_file) {
|
|||
|
||||
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
std::string brokers = "localhost";
|
||||
std::string errstr;
|
||||
std::vector<std::string> topics;
|
||||
std::string mode = "P";
|
||||
int throughput = 0;
|
||||
std::string mode = "P";
|
||||
int throughput = 0;
|
||||
int32_t partition = RdKafka::Topic::PARTITION_UA;
|
||||
MyHashPartitionerCb hash_partitioner;
|
||||
int64_t create_time = -1;
|
||||
|
||||
std::cerr << now() << ": librdkafka version " << RdKafka::version_str() <<
|
||||
" (" << RdKafka::version() << ")" << std::endl;
|
||||
std::cerr << now() << ": librdkafka version " << RdKafka::version_str()
|
||||
<< " (" << RdKafka::version() << ")" << std::endl;
|
||||
|
||||
/*
|
||||
* Create configuration objects
|
||||
|
@ -646,7 +645,7 @@ int main (int argc, char **argv) {
|
|||
|
||||
{
|
||||
char hostname[128];
|
||||
gethostname(hostname, sizeof(hostname)-1);
|
||||
gethostname(hostname, sizeof(hostname) - 1);
|
||||
conf->set("client.id", std::string("rdkafka@") + hostname, errstr);
|
||||
}
|
||||
|
||||
|
@ -664,15 +663,15 @@ int main (int argc, char **argv) {
|
|||
|
||||
conf->set("enable.partition.eof", "true", errstr);
|
||||
|
||||
for (int i = 1 ; i < argc ; i++) {
|
||||
for (int i = 1; i < argc; i++) {
|
||||
const char *name = argv[i];
|
||||
const char *val = i+1 < argc ? argv[i+1] : NULL;
|
||||
const char *val = i + 1 < argc ? argv[i + 1] : NULL;
|
||||
|
||||
if (val && !strncmp(val, "-", 1))
|
||||
val = NULL;
|
||||
|
||||
std::cout << now() << ": argument: " << name << " " <<
|
||||
(val?val:"") << std::endl;
|
||||
std::cout << now() << ": argument: " << name << " " << (val ? val : "")
|
||||
<< std::endl;
|
||||
|
||||
if (val) {
|
||||
if (!strcmp(name, "--topic"))
|
||||
|
@ -712,22 +711,22 @@ int main (int argc, char **argv) {
|
|||
|
||||
std::transform(s.begin(), s.end(), s.begin(), tolower);
|
||||
|
||||
std::cerr << now() << ": converted " << name << " "
|
||||
<< val << " to " << s << std::endl;
|
||||
std::cerr << now() << ": converted " << name << " " << val << " to "
|
||||
<< s << std::endl;
|
||||
|
||||
if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
|
||||
if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
|
||||
std::cerr << now() << ": " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} else if (!strcmp(name, "--value-prefix")) {
|
||||
value_prefix = std::string(val) + ".";
|
||||
} else if (!strcmp(name, "--acks")) {
|
||||
if (conf->set("acks", val, errstr)) {
|
||||
std::cerr << now() << ": " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (conf->set("acks", val, errstr)) {
|
||||
std::cerr << now() << ": " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} else if (!strcmp(name, "--message-create-time")) {
|
||||
create_time = (int64_t)atoi(val);
|
||||
create_time = (int64_t)atoi(val);
|
||||
} else if (!strcmp(name, "--debug")) {
|
||||
conf->set("debug", val, errstr);
|
||||
} else if (!strcmp(name, "-X")) {
|
||||
|
@ -764,7 +763,8 @@ int main (int argc, char **argv) {
|
|||
else if (!strcmp(name, "-q"))
|
||||
verbosity--;
|
||||
else {
|
||||
std::cerr << now() << ": Unknown option or missing argument to " << name << std::endl;
|
||||
std::cerr << now() << ": Unknown option or missing argument to " << name
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -786,7 +786,7 @@ int main (int argc, char **argv) {
|
|||
|
||||
signal(SIGINT, sigterm);
|
||||
signal(SIGTERM, sigterm);
|
||||
signal(SIGALRM, sigwatchdog);
|
||||
signal(SIGALRM, sigwatchdog);
|
||||
|
||||
|
||||
if (mode == "P") {
|
||||
|
@ -804,28 +804,30 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
|
||||
if (!producer) {
|
||||
std::cerr << now() << ": Failed to create producer: " << errstr << std::endl;
|
||||
std::cerr << now() << ": Failed to create producer: " << errstr
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::cerr << now() << ": % Created producer " << producer->name() << std::endl;
|
||||
std::cerr << now() << ": % Created producer " << producer->name()
|
||||
<< std::endl;
|
||||
|
||||
/*
|
||||
* Create topic handle.
|
||||
*/
|
||||
RdKafka::Topic *topic = RdKafka::Topic::create(producer, topics[0],
|
||||
NULL, errstr);
|
||||
RdKafka::Topic *topic =
|
||||
RdKafka::Topic::create(producer, topics[0], NULL, errstr);
|
||||
if (!topic) {
|
||||
std::cerr << now() << ": Failed to create topic: " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static const int delay_us = throughput ? 1000000/throughput : 10;
|
||||
static const int delay_us = throughput ? 1000000 / throughput : 10;
|
||||
|
||||
if (state.maxMessages == -1)
|
||||
state.maxMessages = 1000000; /* Avoid infinite produce */
|
||||
|
||||
for (int i = 0 ; run && i < state.maxMessages ; i++) {
|
||||
for (int i = 0; run && i < state.maxMessages; i++) {
|
||||
/*
|
||||
* Produce message
|
||||
*/
|
||||
|
@ -833,27 +835,26 @@ int main (int argc, char **argv) {
|
|||
msg << value_prefix << i;
|
||||
while (true) {
|
||||
RdKafka::ErrorCode resp;
|
||||
if (create_time == -1) {
|
||||
resp = producer->produce(topic, partition,
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
const_cast<char *>(msg.str().c_str()),
|
||||
msg.str().size(), NULL, NULL);
|
||||
} else {
|
||||
resp = producer->produce(topics[0], partition,
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
const_cast<char *>(msg.str().c_str()),
|
||||
msg.str().size(),
|
||||
NULL, 0,
|
||||
create_time,
|
||||
NULL);
|
||||
}
|
||||
if (create_time == -1) {
|
||||
resp = producer->produce(
|
||||
topic, partition,
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL,
|
||||
NULL);
|
||||
} else {
|
||||
resp = producer->produce(
|
||||
topics[0], partition,
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL, 0,
|
||||
create_time, NULL);
|
||||
}
|
||||
|
||||
if (resp == RdKafka::ERR__QUEUE_FULL) {
|
||||
producer->poll(100);
|
||||
continue;
|
||||
} else if (resp != RdKafka::ERR_NO_ERROR) {
|
||||
errorString("producer_send_error",
|
||||
RdKafka::err2str(resp), topic->name(), NULL, msg.str());
|
||||
errorString("producer_send_error", RdKafka::err2str(resp),
|
||||
topic->name(), NULL, msg.str());
|
||||
state.producer.numErr++;
|
||||
} else {
|
||||
state.producer.numSent++;
|
||||
|
@ -868,15 +869,16 @@ int main (int argc, char **argv) {
|
|||
run = 1;
|
||||
|
||||
while (run && producer->outq_len() > 0) {
|
||||
std::cerr << now() << ": Waiting for " << producer->outq_len() << std::endl;
|
||||
std::cerr << now() << ": Waiting for " << producer->outq_len()
|
||||
<< std::endl;
|
||||
producer->poll(1000);
|
||||
watchdog_kick();
|
||||
}
|
||||
|
||||
std::cerr << now() << ": " << state.producer.numAcked << "/" <<
|
||||
state.producer.numSent << "/" << state.maxMessages <<
|
||||
" msgs acked/sent/max, " << state.producer.numErr <<
|
||||
" errored" << std::endl;
|
||||
std::cerr << now() << ": " << state.producer.numAcked << "/"
|
||||
<< state.producer.numSent << "/" << state.maxMessages
|
||||
<< " msgs acked/sent/max, " << state.producer.numErr << " errored"
|
||||
<< std::endl;
|
||||
|
||||
delete topic;
|
||||
delete producer;
|
||||
|
@ -900,21 +902,21 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
consumer = RdKafka::KafkaConsumer::create(conf, errstr);
|
||||
if (!consumer) {
|
||||
std::cerr << now() << ": Failed to create consumer: " <<
|
||||
errstr << std::endl;
|
||||
std::cerr << now() << ": Failed to create consumer: " << errstr
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::cerr << now() << ": % Created consumer " << consumer->name() <<
|
||||
std::endl;
|
||||
std::cerr << now() << ": % Created consumer " << consumer->name()
|
||||
<< std::endl;
|
||||
|
||||
/*
|
||||
* Subscribe to topic(s)
|
||||
*/
|
||||
RdKafka::ErrorCode resp = consumer->subscribe(topics);
|
||||
if (resp != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << now() << ": Failed to subscribe to " << topics.size() << " topics: "
|
||||
<< RdKafka::err2str(resp) << std::endl;
|
||||
std::cerr << now() << ": Failed to subscribe to " << topics.size()
|
||||
<< " topics: " << RdKafka::err2str(resp) << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,116 +52,108 @@
|
|||
*/
|
||||
#include "rdkafkacpp.h"
|
||||
|
||||
static void metadata_print (const RdKafka::Metadata *metadata) {
|
||||
std::cout << "Number of topics: " << metadata->topics()->size()
|
||||
<< std::endl;
|
||||
static void metadata_print(const RdKafka::Metadata *metadata) {
|
||||
std::cout << "Number of topics: " << metadata->topics()->size() << std::endl;
|
||||
|
||||
/* Iterate topics */
|
||||
RdKafka::Metadata::TopicMetadataIterator it;
|
||||
for (it = metadata->topics()->begin();
|
||||
it != metadata->topics()->end();
|
||||
++it)
|
||||
std::cout << " " << (*it)->topic() << " has "
|
||||
<< (*it)->partitions()->size() << " partitions." << std::endl;
|
||||
/* Iterate topics */
|
||||
RdKafka::Metadata::TopicMetadataIterator it;
|
||||
for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it)
|
||||
std::cout << " " << (*it)->topic() << " has "
|
||||
<< (*it)->partitions()->size() << " partitions." << std::endl;
|
||||
}
|
||||
|
||||
|
||||
class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb {
|
||||
/* This SSL cert verification callback simply prints the incoming
|
||||
* parameters. It provides no validation, everything is ok. */
|
||||
public:
|
||||
bool ssl_cert_verify_cb (const std::string &broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf,
|
||||
size_t size,
|
||||
std::string &errstr) {
|
||||
std::cout << "ssl_cert_verify_cb :" <<
|
||||
": broker_name=" << broker_name <<
|
||||
", broker_id=" << broker_id <<
|
||||
", x509_error=" << *x509_error <<
|
||||
", depth=" << depth <<
|
||||
", buf size=" << size << std::endl;
|
||||
/* This SSL cert verification callback simply prints the incoming
|
||||
* parameters. It provides no validation, everything is ok. */
|
||||
public:
|
||||
bool ssl_cert_verify_cb(const std::string &broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf,
|
||||
size_t size,
|
||||
std::string &errstr) {
|
||||
std::cout << "ssl_cert_verify_cb :"
|
||||
<< ": broker_name=" << broker_name << ", broker_id=" << broker_id
|
||||
<< ", x509_error=" << *x509_error << ", depth=" << depth
|
||||
<< ", buf size=" << size << std::endl;
|
||||
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
std::string brokers;
|
||||
std::string errstr;
|
||||
std::string engine_path;
|
||||
std::string ca_location;
|
||||
int main(int argc, char **argv) {
|
||||
std::string brokers;
|
||||
std::string errstr;
|
||||
std::string engine_path;
|
||||
std::string ca_location;
|
||||
|
||||
/*
|
||||
* Create configuration objects
|
||||
*/
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
std::string engine_id;
|
||||
std::string engine_callback_data;
|
||||
int opt;
|
||||
/*
|
||||
* Create configuration objects
|
||||
*/
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
std::string engine_id;
|
||||
std::string engine_callback_data;
|
||||
int opt;
|
||||
|
||||
if (conf->set("security.protocol", "ssl", errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
if (conf->set("security.protocol", "ssl", errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'b':
|
||||
brokers = optarg;
|
||||
break;
|
||||
case 'p':
|
||||
engine_path = optarg;
|
||||
break;
|
||||
case 'c':
|
||||
ca_location = optarg;
|
||||
break;
|
||||
case 'i':
|
||||
engine_id = optarg;
|
||||
break;
|
||||
case 'e':
|
||||
engine_callback_data = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " << name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
|
||||
while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'b':
|
||||
brokers = optarg;
|
||||
break;
|
||||
case 'p':
|
||||
engine_path = optarg;
|
||||
break;
|
||||
case 'c':
|
||||
ca_location = optarg;
|
||||
break;
|
||||
case 'i':
|
||||
engine_id = optarg;
|
||||
break;
|
||||
case 'e':
|
||||
engine_callback_data = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " <<
|
||||
name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
|
||||
if (brokers.empty() || engine_path.empty() || optind != argc) {
|
||||
usage:
|
||||
std::string features;
|
||||
conf->get("builtin.features", features);
|
||||
fprintf(stderr,
|
||||
if (brokers.empty() || engine_path.empty() || optind != argc) {
|
||||
usage:
|
||||
std::string features;
|
||||
conf->get("builtin.features", features);
|
||||
fprintf(stderr,
|
||||
"Usage: %s [options] -b <brokers> -p <engine-path> \n"
|
||||
"\n"
|
||||
"OpenSSL engine integration example. This example fetches\n"
|
||||
|
@ -180,83 +172,78 @@ int main (int argc, char **argv) {
|
|||
" -X <prop=name> Set arbitrary librdkafka configuration"
|
||||
" property\n"
|
||||
"\n",
|
||||
argv[0],
|
||||
RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
features.c_str(),
|
||||
RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
}
|
||||
argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
features.c_str(), RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (conf->set("bootstrap.servers", brokers, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (conf->set("bootstrap.servers", brokers, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (conf->set("ssl.engine.location", engine_path, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (conf->set("ssl.engine.location", engine_path, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (ca_location.length() > 0 &&
|
||||
conf->set("ssl.ca.location", ca_location, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (ca_location.length() > 0 && conf->set("ssl.ca.location", ca_location,
|
||||
errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (engine_id.length() > 0 &&
|
||||
conf->set("ssl.engine.id", engine_id, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (engine_id.length() > 0 &&
|
||||
conf->set("ssl.engine.id", engine_id, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* engine_callback_data needs to be persistent
|
||||
* and outlive the lifetime of the Kafka client handle. */
|
||||
if (engine_callback_data.length() > 0 &&
|
||||
conf->set_engine_callback_data((void *) engine_callback_data.c_str(),
|
||||
errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
/* engine_callback_data needs to be persistent
|
||||
* and outlive the lifetime of the Kafka client handle. */
|
||||
if (engine_callback_data.length() > 0 &&
|
||||
conf->set_engine_callback_data((void *)engine_callback_data.c_str(),
|
||||
errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* We use the Certificiate verification callback to print the
|
||||
* certificate name being used. */
|
||||
PrintingSSLVerifyCb ssl_verify_cb;
|
||||
/* We use the Certificiate verification callback to print the
|
||||
* certificate name being used. */
|
||||
PrintingSSLVerifyCb ssl_verify_cb;
|
||||
|
||||
if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create producer using accumulated global configuration.
|
||||
*/
|
||||
RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
|
||||
if (!producer) {
|
||||
std::cerr << "Failed to create producer: " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
/*
|
||||
* Create producer using accumulated global configuration.
|
||||
*/
|
||||
RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
|
||||
if (!producer) {
|
||||
std::cerr << "Failed to create producer: " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::cout << "% Created producer " << producer->name() << std::endl;
|
||||
std::cout << "% Created producer " << producer->name() << std::endl;
|
||||
|
||||
class RdKafka::Metadata *metadata;
|
||||
class RdKafka::Metadata *metadata;
|
||||
|
||||
/* Fetch metadata */
|
||||
RdKafka::ErrorCode err = producer->metadata(true, NULL,
|
||||
&metadata, 5000);
|
||||
if (err != RdKafka::ERR_NO_ERROR)
|
||||
std::cerr << "%% Failed to acquire metadata: " <<
|
||||
RdKafka::err2str(err) << std::endl;
|
||||
/* Fetch metadata */
|
||||
RdKafka::ErrorCode err = producer->metadata(true, NULL, &metadata, 5000);
|
||||
if (err != RdKafka::ERR_NO_ERROR)
|
||||
std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
|
||||
<< std::endl;
|
||||
|
||||
metadata_print(metadata);
|
||||
metadata_print(metadata);
|
||||
|
||||
delete metadata;
|
||||
delete producer;
|
||||
delete conf;
|
||||
delete metadata;
|
||||
delete producer;
|
||||
delete conf;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ static volatile sig_atomic_t run = 1;
|
|||
/**
|
||||
* @brief Signal termination of program
|
||||
*/
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
run = 0;
|
||||
fclose(stdin); /* abort fgets() */
|
||||
}
|
||||
|
@ -64,15 +64,15 @@ static void stop (int sig) {
|
|||
* The callback is triggered from rd_kafka_poll() and executes on
|
||||
* the application's thread.
|
||||
*/
|
||||
static void dr_msg_cb (rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
static void
|
||||
dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
if (rkmessage->err)
|
||||
fprintf(stderr, "%% Message delivery failed: %s\n",
|
||||
rd_kafka_err2str(rkmessage->err));
|
||||
else
|
||||
fprintf(stderr,
|
||||
"%% Message delivered (%zd bytes, "
|
||||
"partition %"PRId32")\n",
|
||||
"partition %" PRId32 ")\n",
|
||||
rkmessage->len, rkmessage->partition);
|
||||
|
||||
/* The rkmessage is destroyed automatically by librdkafka */
|
||||
|
@ -80,13 +80,13 @@ static void dr_msg_cb (rd_kafka_t *rk,
|
|||
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
rd_kafka_t *rk; /* Producer instance handle */
|
||||
rd_kafka_conf_t *conf; /* Temporary configuration object */
|
||||
char errstr[512]; /* librdkafka API error reporting buffer */
|
||||
char buf[512]; /* Message value temporary buffer */
|
||||
const char *brokers; /* Argument: broker list */
|
||||
const char *topic; /* Argument: topic to produce to */
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_t *rk; /* Producer instance handle */
|
||||
rd_kafka_conf_t *conf; /* Temporary configuration object */
|
||||
char errstr[512]; /* librdkafka API error reporting buffer */
|
||||
char buf[512]; /* Message value temporary buffer */
|
||||
const char *brokers; /* Argument: broker list */
|
||||
const char *topic; /* Argument: topic to produce to */
|
||||
|
||||
/*
|
||||
* Argument validation
|
||||
|
@ -109,8 +109,8 @@ int main (int argc, char **argv) {
|
|||
* host or host:port (default port 9092).
|
||||
* librdkafka will use the bootstrap brokers to acquire the full
|
||||
* set of brokers from the cluster. */
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
|
@ -132,8 +132,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
|
||||
if (!rk) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to create new producer: %s\n", errstr);
|
||||
fprintf(stderr, "%% Failed to create new producer: %s\n",
|
||||
errstr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -149,12 +149,12 @@ int main (int argc, char **argv) {
|
|||
size_t len = strlen(buf);
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
if (buf[len-1] == '\n') /* Remove newline */
|
||||
if (buf[len - 1] == '\n') /* Remove newline */
|
||||
buf[--len] = '\0';
|
||||
|
||||
if (len == 0) {
|
||||
/* Empty line: only serve delivery reports */
|
||||
rd_kafka_poll(rk, 0/*non-blocking */);
|
||||
rd_kafka_poll(rk, 0 /*non-blocking */);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -170,28 +170,28 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
retry:
|
||||
err = rd_kafka_producev(
|
||||
/* Producer handle */
|
||||
rk,
|
||||
/* Topic name */
|
||||
RD_KAFKA_V_TOPIC(topic),
|
||||
/* Make a copy of the payload. */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
|
||||
/* Message value and length */
|
||||
RD_KAFKA_V_VALUE(buf, len),
|
||||
/* Per-Message opaque, provided in
|
||||
* delivery report callback as
|
||||
* msg_opaque. */
|
||||
RD_KAFKA_V_OPAQUE(NULL),
|
||||
/* End sentinel */
|
||||
RD_KAFKA_V_END);
|
||||
/* Producer handle */
|
||||
rk,
|
||||
/* Topic name */
|
||||
RD_KAFKA_V_TOPIC(topic),
|
||||
/* Make a copy of the payload. */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
|
||||
/* Message value and length */
|
||||
RD_KAFKA_V_VALUE(buf, len),
|
||||
/* Per-Message opaque, provided in
|
||||
* delivery report callback as
|
||||
* msg_opaque. */
|
||||
RD_KAFKA_V_OPAQUE(NULL),
|
||||
/* End sentinel */
|
||||
RD_KAFKA_V_END);
|
||||
|
||||
if (err) {
|
||||
/*
|
||||
* Failed to *enqueue* message for producing.
|
||||
*/
|
||||
fprintf(stderr,
|
||||
"%% Failed to produce to topic %s: %s\n",
|
||||
topic, rd_kafka_err2str(err));
|
||||
"%% Failed to produce to topic %s: %s\n", topic,
|
||||
rd_kafka_err2str(err));
|
||||
|
||||
if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
|
||||
/* If the internal queue is full, wait for
|
||||
|
@ -204,11 +204,13 @@ int main (int argc, char **argv) {
|
|||
* The internal queue is limited by the
|
||||
* configuration property
|
||||
* queue.buffering.max.messages */
|
||||
rd_kafka_poll(rk, 1000/*block for max 1000ms*/);
|
||||
rd_kafka_poll(rk,
|
||||
1000 /*block for max 1000ms*/);
|
||||
goto retry;
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "%% Enqueued message (%zd bytes) "
|
||||
fprintf(stderr,
|
||||
"%% Enqueued message (%zd bytes) "
|
||||
"for topic %s\n",
|
||||
len, topic);
|
||||
}
|
||||
|
@ -225,7 +227,7 @@ int main (int argc, char **argv) {
|
|||
* to make sure previously produced messages have their
|
||||
* delivery report callback served (and any other callbacks
|
||||
* you register). */
|
||||
rd_kafka_poll(rk, 0/*non-blocking*/);
|
||||
rd_kafka_poll(rk, 0 /*non-blocking*/);
|
||||
}
|
||||
|
||||
|
||||
|
@ -233,7 +235,7 @@ int main (int argc, char **argv) {
|
|||
* rd_kafka_flush() is an abstraction over rd_kafka_poll() which
|
||||
* waits for all messages to be delivered. */
|
||||
fprintf(stderr, "%% Flushing final messages..\n");
|
||||
rd_kafka_flush(rk, 10*1000 /* wait for max 10 seconds */);
|
||||
rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */);
|
||||
|
||||
/* If the output queue is still not empty there is an issue
|
||||
* with producing messages to the clusters. */
|
||||
|
|
|
@ -52,34 +52,34 @@
|
|||
|
||||
static volatile sig_atomic_t run = 1;
|
||||
|
||||
static void sigterm (int sig) {
|
||||
static void sigterm(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
||||
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
|
||||
public:
|
||||
void dr_cb (RdKafka::Message &message) {
|
||||
public:
|
||||
void dr_cb(RdKafka::Message &message) {
|
||||
/* If message.err() is non-zero the message delivery failed permanently
|
||||
* for the message. */
|
||||
if (message.err())
|
||||
std::cerr << "% Message delivery failed: " << message.errstr() << std::endl;
|
||||
std::cerr << "% Message delivery failed: " << message.errstr()
|
||||
<< std::endl;
|
||||
else
|
||||
std::cerr << "% Message delivered to topic " << message.topic_name() <<
|
||||
" [" << message.partition() << "] at offset " <<
|
||||
message.offset() << std::endl;
|
||||
std::cerr << "% Message delivered to topic " << message.topic_name()
|
||||
<< " [" << message.partition() << "] at offset "
|
||||
<< message.offset() << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
if (argc != 3) {
|
||||
std::cerr << "Usage: " << argv[0] << " <brokers> <topic>\n";
|
||||
exit(1);
|
||||
}
|
||||
|
||||
std::string brokers = argv[1];
|
||||
std::string topic = argv[2];
|
||||
std::string topic = argv[2];
|
||||
|
||||
/*
|
||||
* Create configuration object
|
||||
|
@ -133,8 +133,8 @@ int main (int argc, char **argv) {
|
|||
/*
|
||||
* Read messages from stdin and produce to broker.
|
||||
*/
|
||||
std::cout << "% Type message value and hit enter " <<
|
||||
"to produce message." << std::endl;
|
||||
std::cout << "% Type message value and hit enter "
|
||||
<< "to produce message." << std::endl;
|
||||
|
||||
for (std::string line; run && std::getline(std::cin, line);) {
|
||||
if (line.empty()) {
|
||||
|
@ -153,32 +153,31 @@ int main (int argc, char **argv) {
|
|||
* has been delivered (or failed permanently after retries).
|
||||
*/
|
||||
retry:
|
||||
RdKafka::ErrorCode err =
|
||||
producer->produce(
|
||||
/* Topic name */
|
||||
topic,
|
||||
/* Any Partition: the builtin partitioner will be
|
||||
* used to assign the message to a topic based
|
||||
* on the message key, or random partition if
|
||||
* the key is not set. */
|
||||
RdKafka::Topic::PARTITION_UA,
|
||||
/* Make a copy of the value */
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
/* Value */
|
||||
const_cast<char *>(line.c_str()), line.size(),
|
||||
/* Key */
|
||||
NULL, 0,
|
||||
/* Timestamp (defaults to current time) */
|
||||
0,
|
||||
/* Message headers, if any */
|
||||
NULL,
|
||||
/* Per-message opaque value passed to
|
||||
* delivery report */
|
||||
NULL);
|
||||
RdKafka::ErrorCode err = producer->produce(
|
||||
/* Topic name */
|
||||
topic,
|
||||
/* Any Partition: the builtin partitioner will be
|
||||
* used to assign the message to a topic based
|
||||
* on the message key, or random partition if
|
||||
* the key is not set. */
|
||||
RdKafka::Topic::PARTITION_UA,
|
||||
/* Make a copy of the value */
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
/* Value */
|
||||
const_cast<char *>(line.c_str()), line.size(),
|
||||
/* Key */
|
||||
NULL, 0,
|
||||
/* Timestamp (defaults to current time) */
|
||||
0,
|
||||
/* Message headers, if any */
|
||||
NULL,
|
||||
/* Per-message opaque value passed to
|
||||
* delivery report */
|
||||
NULL);
|
||||
|
||||
if (err != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << "% Failed to produce to topic " << topic << ": " <<
|
||||
RdKafka::err2str(err) << std::endl;
|
||||
std::cerr << "% Failed to produce to topic " << topic << ": "
|
||||
<< RdKafka::err2str(err) << std::endl;
|
||||
|
||||
if (err == RdKafka::ERR__QUEUE_FULL) {
|
||||
/* If the internal queue is full, wait for
|
||||
|
@ -191,13 +190,13 @@ int main (int argc, char **argv) {
|
|||
* The internal queue is limited by the
|
||||
* configuration property
|
||||
* queue.buffering.max.messages */
|
||||
producer->poll(1000/*block for max 1000ms*/);
|
||||
producer->poll(1000 /*block for max 1000ms*/);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
} else {
|
||||
std::cerr << "% Enqueued message (" << line.size() << " bytes) " <<
|
||||
"for topic " << topic << std::endl;
|
||||
std::cerr << "% Enqueued message (" << line.size() << " bytes) "
|
||||
<< "for topic " << topic << std::endl;
|
||||
}
|
||||
|
||||
/* A producer application should continually serve
|
||||
|
@ -217,11 +216,11 @@ int main (int argc, char **argv) {
|
|||
* flush() is an abstraction over poll() which
|
||||
* waits for all messages to be delivered. */
|
||||
std::cerr << "% Flushing final messages..." << std::endl;
|
||||
producer->flush(10*1000 /* wait for max 10 seconds */);
|
||||
producer->flush(10 * 1000 /* wait for max 10 seconds */);
|
||||
|
||||
if (producer->outq_len() > 0)
|
||||
std::cerr << "% " << producer->outq_len() <<
|
||||
" message(s) were not delivered" << std::endl;
|
||||
std::cerr << "% " << producer->outq_len()
|
||||
<< " message(s) were not delivered" << std::endl;
|
||||
|
||||
delete producer;
|
||||
|
||||
|
|
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -44,63 +44,61 @@
|
|||
|
||||
/* Typical include path would be <librdkafka/rdkafka.h>, but this program
|
||||
* is builtin from within the librdkafka source tree and thus differs. */
|
||||
#include "rdkafka.h" /* for Kafka driver */
|
||||
#include "rdkafka.h" /* for Kafka driver */
|
||||
|
||||
|
||||
static volatile sig_atomic_t run = 1;
|
||||
static rd_kafka_t *rk;
|
||||
static int exit_eof = 0;
|
||||
static int wait_eof = 0; /* number of partitions awaiting EOF */
|
||||
static int quiet = 0;
|
||||
static enum {
|
||||
OUTPUT_HEXDUMP,
|
||||
OUTPUT_RAW,
|
||||
static int wait_eof = 0; /* number of partitions awaiting EOF */
|
||||
static int quiet = 0;
|
||||
static enum {
|
||||
OUTPUT_HEXDUMP,
|
||||
OUTPUT_RAW,
|
||||
} output = OUTPUT_HEXDUMP;
|
||||
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
if (!run)
|
||||
exit(1);
|
||||
run = 0;
|
||||
fclose(stdin); /* abort fgets() */
|
||||
run = 0;
|
||||
fclose(stdin); /* abort fgets() */
|
||||
}
|
||||
|
||||
|
||||
static void hexdump (FILE *fp, const char *name, const void *ptr, size_t len) {
|
||||
const char *p = (const char *)ptr;
|
||||
unsigned int of = 0;
|
||||
static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
|
||||
const char *p = (const char *)ptr;
|
||||
unsigned int of = 0;
|
||||
|
||||
|
||||
if (name)
|
||||
fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
|
||||
if (name)
|
||||
fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
|
||||
|
||||
for (of = 0 ; of < len ; of += 16) {
|
||||
char hexen[16*3+1];
|
||||
char charen[16+1];
|
||||
int hof = 0;
|
||||
for (of = 0; of < len; of += 16) {
|
||||
char hexen[16 * 3 + 1];
|
||||
char charen[16 + 1];
|
||||
int hof = 0;
|
||||
|
||||
int cof = 0;
|
||||
int i;
|
||||
int cof = 0;
|
||||
int i;
|
||||
|
||||
for (i = of ; i < (int)of + 16 && i < (int)len ; i++) {
|
||||
hof += sprintf(hexen+hof, "%02x ", p[i] & 0xff);
|
||||
cof += sprintf(charen+cof, "%c",
|
||||
isprint((int)p[i]) ? p[i] : '.');
|
||||
}
|
||||
fprintf(fp, "%08x: %-48s %-16s\n",
|
||||
of, hexen, charen);
|
||||
}
|
||||
for (i = of; i < (int)of + 16 && i < (int)len; i++) {
|
||||
hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff);
|
||||
cof += sprintf(charen + cof, "%c",
|
||||
isprint((int)p[i]) ? p[i] : '.');
|
||||
}
|
||||
fprintf(fp, "%08x: %-48s %-16s\n", of, hexen, charen);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Kafka logger callback (optional)
|
||||
*/
|
||||
static void logger (const rd_kafka_t *rk, int level,
|
||||
const char *fac, const char *buf) {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n",
|
||||
(int)tv.tv_sec, (int)(tv.tv_usec / 1000),
|
||||
level, fac, rd_kafka_name(rk), buf);
|
||||
static void
|
||||
logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec,
|
||||
(int)(tv.tv_usec / 1000), level, fac, rd_kafka_name(rk), buf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -111,32 +109,34 @@ static void logger (const rd_kafka_t *rk, int level,
|
|||
* librdkafka to the application. The application needs to check
|
||||
* the `rkmessage->err` field for this purpose.
|
||||
*/
|
||||
static void msg_consume (rd_kafka_message_t *rkmessage) {
|
||||
if (rkmessage->err) {
|
||||
if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
|
||||
fprintf(stderr,
|
||||
"%% Consumer reached end of %s [%"PRId32"] "
|
||||
"message queue at offset %"PRId64"\n",
|
||||
rd_kafka_topic_name(rkmessage->rkt),
|
||||
rkmessage->partition, rkmessage->offset);
|
||||
static void msg_consume(rd_kafka_message_t *rkmessage) {
|
||||
if (rkmessage->err) {
|
||||
if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
|
||||
fprintf(stderr,
|
||||
"%% Consumer reached end of %s [%" PRId32
|
||||
"] "
|
||||
"message queue at offset %" PRId64 "\n",
|
||||
rd_kafka_topic_name(rkmessage->rkt),
|
||||
rkmessage->partition, rkmessage->offset);
|
||||
|
||||
if (exit_eof && --wait_eof == 0) {
|
||||
if (exit_eof && --wait_eof == 0) {
|
||||
fprintf(stderr,
|
||||
"%% All partition(s) reached EOF: "
|
||||
"exiting\n");
|
||||
run = 0;
|
||||
run = 0;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (rkmessage->rkt)
|
||||
fprintf(stderr, "%% Consume error for "
|
||||
"topic \"%s\" [%"PRId32"] "
|
||||
"offset %"PRId64": %s\n",
|
||||
fprintf(stderr,
|
||||
"%% Consume error for "
|
||||
"topic \"%s\" [%" PRId32
|
||||
"] "
|
||||
"offset %" PRId64 ": %s\n",
|
||||
rd_kafka_topic_name(rkmessage->rkt),
|
||||
rkmessage->partition,
|
||||
rkmessage->offset,
|
||||
rkmessage->partition, rkmessage->offset,
|
||||
rd_kafka_message_errstr(rkmessage));
|
||||
else
|
||||
fprintf(stderr, "%% Consumer error: %s: %s\n",
|
||||
|
@ -146,59 +146,58 @@ static void msg_consume (rd_kafka_message_t *rkmessage) {
|
|||
if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
|
||||
rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
|
||||
run = 0;
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!quiet)
|
||||
fprintf(stdout, "%% Message (topic %s [%"PRId32"], "
|
||||
"offset %"PRId64", %zd bytes):\n",
|
||||
if (!quiet)
|
||||
fprintf(stdout,
|
||||
"%% Message (topic %s [%" PRId32
|
||||
"], "
|
||||
"offset %" PRId64 ", %zd bytes):\n",
|
||||
rd_kafka_topic_name(rkmessage->rkt),
|
||||
rkmessage->partition,
|
||||
rkmessage->offset, rkmessage->len);
|
||||
rkmessage->partition, rkmessage->offset,
|
||||
rkmessage->len);
|
||||
|
||||
if (rkmessage->key_len) {
|
||||
if (output == OUTPUT_HEXDUMP)
|
||||
hexdump(stdout, "Message Key",
|
||||
rkmessage->key, rkmessage->key_len);
|
||||
else
|
||||
printf("Key: %.*s\n",
|
||||
(int)rkmessage->key_len, (char *)rkmessage->key);
|
||||
}
|
||||
if (rkmessage->key_len) {
|
||||
if (output == OUTPUT_HEXDUMP)
|
||||
hexdump(stdout, "Message Key", rkmessage->key,
|
||||
rkmessage->key_len);
|
||||
else
|
||||
printf("Key: %.*s\n", (int)rkmessage->key_len,
|
||||
(char *)rkmessage->key);
|
||||
}
|
||||
|
||||
if (output == OUTPUT_HEXDUMP)
|
||||
hexdump(stdout, "Message Payload",
|
||||
rkmessage->payload, rkmessage->len);
|
||||
else
|
||||
printf("%.*s\n",
|
||||
(int)rkmessage->len, (char *)rkmessage->payload);
|
||||
if (output == OUTPUT_HEXDUMP)
|
||||
hexdump(stdout, "Message Payload", rkmessage->payload,
|
||||
rkmessage->len);
|
||||
else
|
||||
printf("%.*s\n", (int)rkmessage->len,
|
||||
(char *)rkmessage->payload);
|
||||
}
|
||||
|
||||
|
||||
static void print_partition_list (FILE *fp,
|
||||
const rd_kafka_topic_partition_list_t
|
||||
*partitions) {
|
||||
static void
|
||||
print_partition_list(FILE *fp,
|
||||
const rd_kafka_topic_partition_list_t *partitions) {
|
||||
int i;
|
||||
for (i = 0 ; i < partitions->cnt ; i++) {
|
||||
fprintf(fp, "%s %s [%"PRId32"] offset %"PRId64,
|
||||
i > 0 ? ",":"",
|
||||
partitions->elems[i].topic,
|
||||
for (i = 0; i < partitions->cnt; i++) {
|
||||
fprintf(fp, "%s %s [%" PRId32 "] offset %" PRId64,
|
||||
i > 0 ? "," : "", partitions->elems[i].topic,
|
||||
partitions->elems[i].partition,
|
||||
partitions->elems[i].offset);
|
||||
}
|
||||
fprintf(fp, "\n");
|
||||
|
||||
}
|
||||
static void rebalance_cb (rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque) {
|
||||
rd_kafka_error_t *error = NULL;
|
||||
static void rebalance_cb(rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque) {
|
||||
rd_kafka_error_t *error = NULL;
|
||||
rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
|
||||
|
||||
fprintf(stderr, "%% Consumer group rebalanced: ");
|
||||
|
||||
switch (err)
|
||||
{
|
||||
switch (err) {
|
||||
case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
|
||||
fprintf(stderr, "assigned (%s):\n",
|
||||
rd_kafka_rebalance_protocol(rk));
|
||||
|
@ -220,14 +219,13 @@ static void rebalance_cb (rd_kafka_t *rk,
|
|||
error = rd_kafka_incremental_unassign(rk, partitions);
|
||||
wait_eof -= partitions->cnt;
|
||||
} else {
|
||||
ret_err = rd_kafka_assign(rk, NULL);
|
||||
ret_err = rd_kafka_assign(rk, NULL);
|
||||
wait_eof = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr, "failed: %s\n",
|
||||
rd_kafka_err2str(err));
|
||||
fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err));
|
||||
rd_kafka_assign(rk, NULL);
|
||||
break;
|
||||
}
|
||||
|
@ -243,7 +241,7 @@ static void rebalance_cb (rd_kafka_t *rk,
|
|||
}
|
||||
|
||||
|
||||
static int describe_groups (rd_kafka_t *rk, const char *group) {
|
||||
static int describe_groups(rd_kafka_t *rk, const char *group) {
|
||||
rd_kafka_resp_err_t err;
|
||||
const struct rd_kafka_group_list *grplist;
|
||||
int i;
|
||||
|
@ -256,20 +254,21 @@ static int describe_groups (rd_kafka_t *rk, const char *group) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < grplist->group_cnt ; i++) {
|
||||
for (i = 0; i < grplist->group_cnt; i++) {
|
||||
const struct rd_kafka_group_info *gi = &grplist->groups[i];
|
||||
int j;
|
||||
|
||||
printf("Group \"%s\" in state %s on broker %d (%s:%d)\n",
|
||||
gi->group, gi->state,
|
||||
gi->broker.id, gi->broker.host, gi->broker.port);
|
||||
gi->group, gi->state, gi->broker.id, gi->broker.host,
|
||||
gi->broker.port);
|
||||
if (gi->err)
|
||||
printf(" Error: %s\n", rd_kafka_err2str(gi->err));
|
||||
printf(" Protocol type \"%s\", protocol \"%s\", "
|
||||
"with %d member(s):\n",
|
||||
gi->protocol_type, gi->protocol, gi->member_cnt);
|
||||
printf(
|
||||
" Protocol type \"%s\", protocol \"%s\", "
|
||||
"with %d member(s):\n",
|
||||
gi->protocol_type, gi->protocol, gi->member_cnt);
|
||||
|
||||
for (j = 0 ; j < gi->member_cnt ; j++) {
|
||||
for (j = 0; j < gi->member_cnt; j++) {
|
||||
const struct rd_kafka_group_member_info *mi;
|
||||
mi = &gi->members[j];
|
||||
|
||||
|
@ -293,187 +292,182 @@ static int describe_groups (rd_kafka_t *rk, const char *group) {
|
|||
|
||||
|
||||
|
||||
static void sig_usr1 (int sig) {
|
||||
rd_kafka_dump(stdout, rk);
|
||||
static void sig_usr1(int sig) {
|
||||
rd_kafka_dump(stdout, rk);
|
||||
}
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
char mode = 'C';
|
||||
char *brokers = "localhost:9092";
|
||||
int opt;
|
||||
rd_kafka_conf_t *conf;
|
||||
char errstr[512];
|
||||
const char *debug = NULL;
|
||||
int do_conf_dump = 0;
|
||||
char tmp[16];
|
||||
int main(int argc, char **argv) {
|
||||
char mode = 'C';
|
||||
char *brokers = "localhost:9092";
|
||||
int opt;
|
||||
rd_kafka_conf_t *conf;
|
||||
char errstr[512];
|
||||
const char *debug = NULL;
|
||||
int do_conf_dump = 0;
|
||||
char tmp[16];
|
||||
rd_kafka_resp_err_t err;
|
||||
char *group = NULL;
|
||||
rd_kafka_topic_partition_list_t *topics;
|
||||
int is_subscription;
|
||||
int i;
|
||||
|
||||
quiet = !isatty(STDIN_FILENO);
|
||||
quiet = !isatty(STDIN_FILENO);
|
||||
|
||||
/* Kafka configuration */
|
||||
conf = rd_kafka_conf_new();
|
||||
/* Kafka configuration */
|
||||
conf = rd_kafka_conf_new();
|
||||
|
||||
/* Set logger */
|
||||
rd_kafka_conf_set_log_cb(conf, logger);
|
||||
|
||||
/* Quick termination */
|
||||
snprintf(tmp, sizeof(tmp), "%i", SIGIO);
|
||||
rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
|
||||
/* Quick termination */
|
||||
snprintf(tmp, sizeof(tmp), "%i", SIGIO);
|
||||
rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
|
||||
|
||||
while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
|
||||
switch (opt) {
|
||||
case 'b':
|
||||
brokers = optarg;
|
||||
break;
|
||||
while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
|
||||
switch (opt) {
|
||||
case 'b':
|
||||
brokers = optarg;
|
||||
break;
|
||||
case 'g':
|
||||
group = optarg;
|
||||
break;
|
||||
case 'e':
|
||||
exit_eof = 1;
|
||||
break;
|
||||
case 'd':
|
||||
debug = optarg;
|
||||
break;
|
||||
case 'q':
|
||||
quiet = 1;
|
||||
break;
|
||||
case 'A':
|
||||
output = OUTPUT_RAW;
|
||||
break;
|
||||
case 'X':
|
||||
{
|
||||
char *name, *val;
|
||||
rd_kafka_conf_res_t res;
|
||||
case 'e':
|
||||
exit_eof = 1;
|
||||
break;
|
||||
case 'd':
|
||||
debug = optarg;
|
||||
break;
|
||||
case 'q':
|
||||
quiet = 1;
|
||||
break;
|
||||
case 'A':
|
||||
output = OUTPUT_RAW;
|
||||
break;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
rd_kafka_conf_res_t res;
|
||||
|
||||
if (!strcmp(optarg, "list") ||
|
||||
!strcmp(optarg, "help")) {
|
||||
rd_kafka_conf_properties_show(stdout);
|
||||
exit(0);
|
||||
}
|
||||
if (!strcmp(optarg, "list") ||
|
||||
!strcmp(optarg, "help")) {
|
||||
rd_kafka_conf_properties_show(stdout);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
if (!strcmp(optarg, "dump")) {
|
||||
do_conf_dump = 1;
|
||||
continue;
|
||||
}
|
||||
if (!strcmp(optarg, "dump")) {
|
||||
do_conf_dump = 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
fprintf(stderr, "%% Expected "
|
||||
"-X property=value, not %s\n", name);
|
||||
exit(1);
|
||||
}
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
fprintf(stderr,
|
||||
"%% Expected "
|
||||
"-X property=value, not %s\n",
|
||||
name);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
res = rd_kafka_conf_set(conf, name, val,
|
||||
errstr, sizeof(errstr));
|
||||
res = rd_kafka_conf_set(conf, name, val, errstr,
|
||||
sizeof(errstr));
|
||||
|
||||
if (res != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%% %s\n", errstr);
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
break;
|
||||
if (res != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%% %s\n", errstr);
|
||||
exit(1);
|
||||
}
|
||||
} break;
|
||||
|
||||
case 'D':
|
||||
case 'O':
|
||||
mode = opt;
|
||||
break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (do_conf_dump) {
|
||||
const char **arr;
|
||||
size_t cnt;
|
||||
int pass;
|
||||
if (do_conf_dump) {
|
||||
const char **arr;
|
||||
size_t cnt;
|
||||
int pass;
|
||||
|
||||
for (pass = 0 ; pass < 2 ; pass++) {
|
||||
if (pass == 0) {
|
||||
arr = rd_kafka_conf_dump(conf, &cnt);
|
||||
printf("# Global config\n");
|
||||
} else {
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
if (pass == 0) {
|
||||
arr = rd_kafka_conf_dump(conf, &cnt);
|
||||
printf("# Global config\n");
|
||||
} else {
|
||||
rd_kafka_topic_conf_t *topic_conf =
|
||||
rd_kafka_conf_get_default_topic_conf(
|
||||
conf);
|
||||
rd_kafka_conf_get_default_topic_conf(conf);
|
||||
if (topic_conf) {
|
||||
printf("# Topic config\n");
|
||||
arr = rd_kafka_topic_conf_dump(
|
||||
topic_conf, &cnt);
|
||||
topic_conf, &cnt);
|
||||
} else {
|
||||
arr = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!arr)
|
||||
continue;
|
||||
|
||||
for (i = 0 ; i < (int)cnt ; i += 2)
|
||||
printf("%s = %s\n",
|
||||
arr[i], arr[i+1]);
|
||||
for (i = 0; i < (int)cnt; i += 2)
|
||||
printf("%s = %s\n", arr[i], arr[i + 1]);
|
||||
|
||||
printf("\n");
|
||||
rd_kafka_conf_dump_free(arr, cnt);
|
||||
}
|
||||
rd_kafka_conf_dump_free(arr, cnt);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
exit(0);
|
||||
}
|
||||
|
||||
|
||||
if (strchr("OC", mode) && optind == argc) {
|
||||
usage:
|
||||
fprintf(stderr,
|
||||
"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
|
||||
"\n"
|
||||
"librdkafka version %s (0x%08x)\n"
|
||||
"\n"
|
||||
" Options:\n"
|
||||
if (strchr("OC", mode) && optind == argc) {
|
||||
usage:
|
||||
fprintf(stderr,
|
||||
"Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
|
||||
"\n"
|
||||
"librdkafka version %s (0x%08x)\n"
|
||||
"\n"
|
||||
" Options:\n"
|
||||
" -g <group> Consumer group (%s)\n"
|
||||
" -b <brokers> Broker address (%s)\n"
|
||||
" -e Exit consumer when last message\n"
|
||||
" in partition has been received.\n"
|
||||
" -b <brokers> Broker address (%s)\n"
|
||||
" -e Exit consumer when last message\n"
|
||||
" in partition has been received.\n"
|
||||
" -D Describe group.\n"
|
||||
" -O Get commmitted offset(s)\n"
|
||||
" -d [facs..] Enable debugging contexts:\n"
|
||||
" %s\n"
|
||||
" -q Be quiet\n"
|
||||
" -A Raw payload output (consumer)\n"
|
||||
" -X <prop=name> Set arbitrary librdkafka "
|
||||
"configuration property\n"
|
||||
" Use '-X list' to see the full list\n"
|
||||
" of supported properties.\n"
|
||||
"\n"
|
||||
" -d [facs..] Enable debugging contexts:\n"
|
||||
" %s\n"
|
||||
" -q Be quiet\n"
|
||||
" -A Raw payload output (consumer)\n"
|
||||
" -X <prop=name> Set arbitrary librdkafka "
|
||||
"configuration property\n"
|
||||
" Use '-X list' to see the full list\n"
|
||||
" of supported properties.\n"
|
||||
"\n"
|
||||
"For balanced consumer groups use the 'topic1 topic2..'"
|
||||
" format\n"
|
||||
"and for static assignment use "
|
||||
"'topic1:part1 topic1:part2 topic2:part1..'\n"
|
||||
"\n",
|
||||
argv[0],
|
||||
rd_kafka_version_str(), rd_kafka_version(),
|
||||
group, brokers,
|
||||
RD_KAFKA_DEBUG_CONTEXTS);
|
||||
exit(1);
|
||||
}
|
||||
"\n",
|
||||
argv[0], rd_kafka_version_str(), rd_kafka_version(),
|
||||
group, brokers, RD_KAFKA_DEBUG_CONTEXTS);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
signal(SIGINT, stop);
|
||||
signal(SIGUSR1, sig_usr1);
|
||||
signal(SIGINT, stop);
|
||||
signal(SIGUSR1, sig_usr1);
|
||||
|
||||
if (debug &&
|
||||
rd_kafka_conf_set(conf, "debug", debug, errstr, sizeof(errstr)) !=
|
||||
RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
|
||||
errstr, debug);
|
||||
exit(1);
|
||||
}
|
||||
if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
|
||||
errstr, debug);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Client/Consumer group
|
||||
|
@ -483,9 +477,8 @@ int main (int argc, char **argv) {
|
|||
/* Consumer groups require a group id */
|
||||
if (!group)
|
||||
group = "rdkafka_consumer_example";
|
||||
if (rd_kafka_conf_set(conf, "group.id", group,
|
||||
errstr, sizeof(errstr)) !=
|
||||
RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "group.id", group, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%% %s\n", errstr);
|
||||
exit(1);
|
||||
}
|
||||
|
@ -493,22 +486,21 @@ int main (int argc, char **argv) {
|
|||
/* Callback called on partition assignment changes */
|
||||
rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
|
||||
|
||||
rd_kafka_conf_set(conf, "enable.partition.eof", "true",
|
||||
NULL, 0);
|
||||
rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
|
||||
0);
|
||||
}
|
||||
|
||||
/* Set bootstrap servers */
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fprintf(stderr, "%% %s\n", errstr);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Create Kafka handle */
|
||||
if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf,
|
||||
errstr, sizeof(errstr)))) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to create new consumer: %s\n",
|
||||
if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
|
||||
sizeof(errstr)))) {
|
||||
fprintf(stderr, "%% Failed to create new consumer: %s\n",
|
||||
errstr);
|
||||
exit(1);
|
||||
}
|
||||
|
@ -525,17 +517,17 @@ int main (int argc, char **argv) {
|
|||
/* Redirect rd_kafka_poll() to consumer_poll() */
|
||||
rd_kafka_poll_set_consumer(rk);
|
||||
|
||||
topics = rd_kafka_topic_partition_list_new(argc - optind);
|
||||
topics = rd_kafka_topic_partition_list_new(argc - optind);
|
||||
is_subscription = 1;
|
||||
for (i = optind ; i < argc ; i++) {
|
||||
for (i = optind; i < argc; i++) {
|
||||
/* Parse "topic[:part] */
|
||||
char *topic = argv[i];
|
||||
char *t;
|
||||
int32_t partition = -1;
|
||||
|
||||
if ((t = strstr(topic, ":"))) {
|
||||
*t = '\0';
|
||||
partition = atoi(t+1);
|
||||
*t = '\0';
|
||||
partition = atoi(t + 1);
|
||||
is_subscription = 0; /* is assignment */
|
||||
wait_eof++;
|
||||
}
|
||||
|
@ -553,16 +545,14 @@ int main (int argc, char **argv) {
|
|||
exit(1);
|
||||
}
|
||||
|
||||
for (i = 0 ; i < topics->cnt ; i++) {
|
||||
for (i = 0; i < topics->cnt; i++) {
|
||||
rd_kafka_topic_partition_t *p = &topics->elems[i];
|
||||
printf("Topic \"%s\" partition %"PRId32,
|
||||
p->topic, p->partition);
|
||||
printf("Topic \"%s\" partition %" PRId32, p->topic,
|
||||
p->partition);
|
||||
if (p->err)
|
||||
printf(" error %s",
|
||||
rd_kafka_err2str(p->err));
|
||||
printf(" error %s", rd_kafka_err2str(p->err));
|
||||
else {
|
||||
printf(" offset %"PRId64"",
|
||||
p->offset);
|
||||
printf(" offset %" PRId64 "", p->offset);
|
||||
|
||||
if (p->metadata_size)
|
||||
printf(" (%d bytes of metadata)",
|
||||
|
@ -588,8 +578,7 @@ int main (int argc, char **argv) {
|
|||
fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);
|
||||
|
||||
if ((err = rd_kafka_assign(rk, topics))) {
|
||||
fprintf(stderr,
|
||||
"%% Failed to assign partitions: %s\n",
|
||||
fprintf(stderr, "%% Failed to assign partitions: %s\n",
|
||||
rd_kafka_err2str(err));
|
||||
}
|
||||
}
|
||||
|
@ -617,12 +606,12 @@ done:
|
|||
/* Destroy handle */
|
||||
rd_kafka_destroy(rk);
|
||||
|
||||
/* Let background threads clean up and terminate cleanly. */
|
||||
run = 5;
|
||||
while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
|
||||
printf("Waiting for librdkafka to decommission\n");
|
||||
if (run <= 0)
|
||||
rd_kafka_dump(stdout, rk);
|
||||
/* Let background threads clean up and terminate cleanly. */
|
||||
run = 5;
|
||||
while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
|
||||
printf("Waiting for librdkafka to decommission\n");
|
||||
if (run <= 0)
|
||||
rd_kafka_dump(stdout, rk);
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2014, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -63,13 +63,13 @@
|
|||
|
||||
|
||||
static volatile sig_atomic_t run = 1;
|
||||
static bool exit_eof = false;
|
||||
static int eof_cnt = 0;
|
||||
static int partition_cnt = 0;
|
||||
static int verbosity = 1;
|
||||
static long msg_cnt = 0;
|
||||
static int64_t msg_bytes = 0;
|
||||
static void sigterm (int sig) {
|
||||
static bool exit_eof = false;
|
||||
static int eof_cnt = 0;
|
||||
static int partition_cnt = 0;
|
||||
static int verbosity = 1;
|
||||
static long msg_cnt = 0;
|
||||
static int64_t msg_bytes = 0;
|
||||
static void sigterm(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
@ -77,81 +77,80 @@ static void sigterm (int sig) {
|
|||
/**
|
||||
* @brief format a string timestamp from the current time
|
||||
*/
|
||||
static void print_time () {
|
||||
static void print_time() {
|
||||
#ifndef _WIN32
|
||||
struct timeval tv;
|
||||
char buf[64];
|
||||
gettimeofday(&tv, NULL);
|
||||
strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
|
||||
fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
|
||||
struct timeval tv;
|
||||
char buf[64];
|
||||
gettimeofday(&tv, NULL);
|
||||
strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
|
||||
fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
|
||||
#else
|
||||
SYSTEMTIME lt = {0};
|
||||
GetLocalTime(<);
|
||||
// %Y-%m-%d %H:%M:%S.xxx:
|
||||
fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ",
|
||||
lt.wYear, lt.wMonth, lt.wDay,
|
||||
lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds);
|
||||
SYSTEMTIME lt = {0};
|
||||
GetLocalTime(<);
|
||||
// %Y-%m-%d %H:%M:%S.xxx:
|
||||
fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ", lt.wYear, lt.wMonth,
|
||||
lt.wDay, lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds);
|
||||
#endif
|
||||
}
|
||||
class ExampleEventCb : public RdKafka::EventCb {
|
||||
public:
|
||||
void event_cb (RdKafka::Event &event) {
|
||||
|
||||
void event_cb(RdKafka::Event &event) {
|
||||
print_time();
|
||||
|
||||
switch (event.type())
|
||||
{
|
||||
case RdKafka::Event::EVENT_ERROR:
|
||||
if (event.fatal()) {
|
||||
std::cerr << "FATAL ";
|
||||
run = 0;
|
||||
}
|
||||
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
|
||||
event.str() << std::endl;
|
||||
break;
|
||||
switch (event.type()) {
|
||||
case RdKafka::Event::EVENT_ERROR:
|
||||
if (event.fatal()) {
|
||||
std::cerr << "FATAL ";
|
||||
run = 0;
|
||||
}
|
||||
std::cerr << "ERROR (" << RdKafka::err2str(event.err())
|
||||
<< "): " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_STATS:
|
||||
std::cerr << "\"STATS\": " << event.str() << std::endl;
|
||||
break;
|
||||
case RdKafka::Event::EVENT_STATS:
|
||||
std::cerr << "\"STATS\": " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_LOG:
|
||||
fprintf(stderr, "LOG-%i-%s: %s\n",
|
||||
event.severity(), event.fac().c_str(), event.str().c_str());
|
||||
break;
|
||||
case RdKafka::Event::EVENT_LOG:
|
||||
fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(),
|
||||
event.str().c_str());
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_THROTTLE:
|
||||
std::cerr << "THROTTLED: " << event.throttle_time() << "ms by " <<
|
||||
event.broker_name() << " id " << (int)event.broker_id() << std::endl;
|
||||
break;
|
||||
case RdKafka::Event::EVENT_THROTTLE:
|
||||
std::cerr << "THROTTLED: " << event.throttle_time() << "ms by "
|
||||
<< event.broker_name() << " id " << (int)event.broker_id()
|
||||
<< std::endl;
|
||||
break;
|
||||
|
||||
default:
|
||||
std::cerr << "EVENT " << event.type() <<
|
||||
" (" << RdKafka::err2str(event.err()) << "): " <<
|
||||
event.str() << std::endl;
|
||||
break;
|
||||
default:
|
||||
std::cerr << "EVENT " << event.type() << " ("
|
||||
<< RdKafka::err2str(event.err()) << "): " << event.str()
|
||||
<< std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class ExampleRebalanceCb : public RdKafka::RebalanceCb {
|
||||
private:
|
||||
static void part_list_print (const std::vector<RdKafka::TopicPartition*>&partitions){
|
||||
for (unsigned int i = 0 ; i < partitions.size() ; i++)
|
||||
std::cerr << partitions[i]->topic() <<
|
||||
"[" << partitions[i]->partition() << "], ";
|
||||
private:
|
||||
static void part_list_print(
|
||||
const std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
for (unsigned int i = 0; i < partitions.size(); i++)
|
||||
std::cerr << partitions[i]->topic() << "[" << partitions[i]->partition()
|
||||
<< "], ";
|
||||
std::cerr << "\n";
|
||||
}
|
||||
|
||||
public:
|
||||
void rebalance_cb (RdKafka::KafkaConsumer *consumer,
|
||||
RdKafka::ErrorCode err,
|
||||
std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
public:
|
||||
void rebalance_cb(RdKafka::KafkaConsumer *consumer,
|
||||
RdKafka::ErrorCode err,
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";
|
||||
|
||||
part_list_print(partitions);
|
||||
|
||||
RdKafka::Error *error = NULL;
|
||||
RdKafka::Error *error = NULL;
|
||||
RdKafka::ErrorCode ret_err = RdKafka::ERR_NO_ERROR;
|
||||
|
||||
if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
|
||||
|
@ -165,7 +164,7 @@ public:
|
|||
error = consumer->incremental_unassign(partitions);
|
||||
partition_cnt -= (int)partitions.size();
|
||||
} else {
|
||||
ret_err = consumer->unassign();
|
||||
ret_err = consumer->unassign();
|
||||
partition_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
@ -176,66 +175,65 @@ public:
|
|||
delete error;
|
||||
} else if (ret_err)
|
||||
std::cerr << "assign failed: " << RdKafka::err2str(ret_err) << "\n";
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void msg_consume(RdKafka::Message* message, void* opaque) {
|
||||
void msg_consume(RdKafka::Message *message, void *opaque) {
|
||||
switch (message->err()) {
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
break;
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
break;
|
||||
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
/* Real message */
|
||||
msg_cnt++;
|
||||
msg_bytes += message->len();
|
||||
if (verbosity >= 3)
|
||||
std::cerr << "Read msg at offset " << message->offset() << std::endl;
|
||||
RdKafka::MessageTimestamp ts;
|
||||
ts = message->timestamp();
|
||||
if (verbosity >= 2 &&
|
||||
ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
|
||||
std::string tsname = "?";
|
||||
if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
|
||||
tsname = "create time";
|
||||
else if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
|
||||
tsname = "log append time";
|
||||
std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
|
||||
}
|
||||
if (verbosity >= 2 && message->key()) {
|
||||
std::cout << "Key: " << *message->key() << std::endl;
|
||||
}
|
||||
if (verbosity >= 1) {
|
||||
printf("%.*s\n",
|
||||
static_cast<int>(message->len()),
|
||||
static_cast<const char *>(message->payload()));
|
||||
}
|
||||
break;
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
/* Real message */
|
||||
msg_cnt++;
|
||||
msg_bytes += message->len();
|
||||
if (verbosity >= 3)
|
||||
std::cerr << "Read msg at offset " << message->offset() << std::endl;
|
||||
RdKafka::MessageTimestamp ts;
|
||||
ts = message->timestamp();
|
||||
if (verbosity >= 2 &&
|
||||
ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
|
||||
std::string tsname = "?";
|
||||
if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
|
||||
tsname = "create time";
|
||||
else if (ts.type ==
|
||||
RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
|
||||
tsname = "log append time";
|
||||
std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
|
||||
}
|
||||
if (verbosity >= 2 && message->key()) {
|
||||
std::cout << "Key: " << *message->key() << std::endl;
|
||||
}
|
||||
if (verbosity >= 1) {
|
||||
printf("%.*s\n", static_cast<int>(message->len()),
|
||||
static_cast<const char *>(message->payload()));
|
||||
}
|
||||
break;
|
||||
|
||||
case RdKafka::ERR__PARTITION_EOF:
|
||||
/* Last message */
|
||||
if (exit_eof && ++eof_cnt == partition_cnt) {
|
||||
std::cerr << "%% EOF reached for all " << partition_cnt <<
|
||||
" partition(s)" << std::endl;
|
||||
run = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case RdKafka::ERR__UNKNOWN_TOPIC:
|
||||
case RdKafka::ERR__UNKNOWN_PARTITION:
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
case RdKafka::ERR__PARTITION_EOF:
|
||||
/* Last message */
|
||||
if (exit_eof && ++eof_cnt == partition_cnt) {
|
||||
std::cerr << "%% EOF reached for all " << partition_cnt << " partition(s)"
|
||||
<< std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Errors */
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
run = 0;
|
||||
case RdKafka::ERR__UNKNOWN_TOPIC:
|
||||
case RdKafka::ERR__UNKNOWN_PARTITION:
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Errors */
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
run = 0;
|
||||
}
|
||||
}
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
std::string brokers = "localhost";
|
||||
std::string errstr;
|
||||
std::string topic_str;
|
||||
|
@ -258,7 +256,7 @@ int main (int argc, char **argv) {
|
|||
while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:qv")) != -1) {
|
||||
switch (opt) {
|
||||
case 'g':
|
||||
if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
@ -268,9 +266,9 @@ int main (int argc, char **argv) {
|
|||
break;
|
||||
case 'z':
|
||||
if (conf->set("compression.codec", optarg, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'e':
|
||||
|
@ -286,47 +284,44 @@ int main (int argc, char **argv) {
|
|||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'X':
|
||||
{
|
||||
char *name, *val;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
|
||||
if (!strcmp(optarg, "dump")) {
|
||||
do_conf_dump = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " <<
|
||||
name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
RdKafka::Conf::ConfResult res = conf->set(name, val, errstr);
|
||||
if (res != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (!strcmp(optarg, "dump")) {
|
||||
do_conf_dump = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " << name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
RdKafka::Conf::ConfResult res = conf->set(name, val, errstr);
|
||||
if (res != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} break;
|
||||
|
||||
case 'q':
|
||||
verbosity--;
|
||||
break;
|
||||
|
||||
case 'q':
|
||||
verbosity--;
|
||||
break;
|
||||
|
||||
case 'v':
|
||||
verbosity++;
|
||||
break;
|
||||
case 'v':
|
||||
verbosity++;
|
||||
break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
|
||||
for (; optind < argc ; optind++)
|
||||
for (; optind < argc; optind++)
|
||||
topics.push_back(std::string(argv[optind]));
|
||||
|
||||
if (topics.empty() || optind != argc) {
|
||||
|
@ -354,19 +349,20 @@ int main (int argc, char **argv) {
|
|||
" -v Increase verbosity\n"
|
||||
"\n"
|
||||
"\n",
|
||||
argv[0],
|
||||
RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (exit_eof) {
|
||||
std::string strategy;
|
||||
if (conf->get("partition.assignment.strategy", strategy) ==
|
||||
RdKafka::Conf::CONF_OK && strategy == "cooperative-sticky") {
|
||||
std::cerr << "Error: this example has not been modified to " <<
|
||||
"support -e (exit on EOF) when the partition.assignment.strategy " <<
|
||||
"is set to " << strategy << ": remove -e from the command line\n";
|
||||
RdKafka::Conf::CONF_OK &&
|
||||
strategy == "cooperative-sticky") {
|
||||
std::cerr
|
||||
<< "Error: this example has not been modified to "
|
||||
<< "support -e (exit on EOF) when the partition.assignment.strategy "
|
||||
<< "is set to " << strategy << ": remove -e from the command line\n";
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
@ -392,7 +388,7 @@ int main (int argc, char **argv) {
|
|||
std::cout << "# Global config" << std::endl;
|
||||
|
||||
for (std::list<std::string>::iterator it = dump->begin();
|
||||
it != dump->end(); ) {
|
||||
it != dump->end();) {
|
||||
std::cout << *it << " = ";
|
||||
it++;
|
||||
std::cout << *it << std::endl;
|
||||
|
@ -414,7 +410,8 @@ int main (int argc, char **argv) {
|
|||
/*
|
||||
* Create consumer using accumulated global configuration.
|
||||
*/
|
||||
RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
|
||||
RdKafka::KafkaConsumer *consumer =
|
||||
RdKafka::KafkaConsumer::create(conf, errstr);
|
||||
if (!consumer) {
|
||||
std::cerr << "Failed to create consumer: " << errstr << std::endl;
|
||||
exit(1);
|
||||
|
@ -430,8 +427,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
RdKafka::ErrorCode err = consumer->subscribe(topics);
|
||||
if (err) {
|
||||
std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
|
||||
<< RdKafka::err2str(err) << std::endl;
|
||||
std::cerr << "Failed to subscribe to " << topics.size()
|
||||
<< " topics: " << RdKafka::err2str(err) << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -454,8 +451,8 @@ int main (int argc, char **argv) {
|
|||
consumer->close();
|
||||
delete consumer;
|
||||
|
||||
std::cerr << "% Consumed " << msg_cnt << " messages ("
|
||||
<< msg_bytes << " bytes)" << std::endl;
|
||||
std::cerr << "% Consumed " << msg_cnt << " messages (" << msg_bytes
|
||||
<< " bytes)" << std::endl;
|
||||
|
||||
/*
|
||||
* Wait for RdKafka to decommission.
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
|
||||
static volatile sig_atomic_t run = 1;
|
||||
|
||||
static void sigterm (int sig) {
|
||||
static void sigterm(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
@ -77,11 +77,11 @@ static void sigterm (int sig) {
|
|||
/**
|
||||
* @returns the current wall-clock time in milliseconds
|
||||
*/
|
||||
static int64_t now () {
|
||||
static int64_t now() {
|
||||
#ifndef _WIN32
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000);
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000);
|
||||
#else
|
||||
#error "now() not implemented for Windows, please submit a PR"
|
||||
#endif
|
||||
|
@ -93,13 +93,14 @@ static int64_t now () {
|
|||
* @brief Accumulate a batch of \p batch_size messages, but wait
|
||||
* no longer than \p batch_tmout milliseconds.
|
||||
*/
|
||||
static std::vector<RdKafka::Message *>
|
||||
consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tmout) {
|
||||
|
||||
static std::vector<RdKafka::Message *> consume_batch(
|
||||
RdKafka::KafkaConsumer *consumer,
|
||||
size_t batch_size,
|
||||
int batch_tmout) {
|
||||
std::vector<RdKafka::Message *> msgs;
|
||||
msgs.reserve(batch_size);
|
||||
|
||||
int64_t end = now() + batch_tmout;
|
||||
int64_t end = now() + batch_tmout;
|
||||
int remaining_timeout = batch_tmout;
|
||||
|
||||
while (msgs.size() < batch_size) {
|
||||
|
@ -130,17 +131,18 @@ consume_batch (RdKafka::KafkaConsumer *consumer, size_t batch_size, int batch_tm
|
|||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
std::string errstr;
|
||||
std::string topic_str;
|
||||
std::vector<std::string> topics;
|
||||
int batch_size = 100;
|
||||
int batch_size = 100;
|
||||
int batch_tmout = 1000;
|
||||
|
||||
/* Create configuration objects */
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
|
||||
if (conf->set("enable.partition.eof", "false", errstr) != RdKafka::Conf::CONF_OK) {
|
||||
if (conf->set("enable.partition.eof", "false", errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
@ -150,7 +152,7 @@ int main (int argc, char **argv) {
|
|||
while ((opt = getopt(argc, argv, "g:B:T:b:X:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'g':
|
||||
if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
@ -165,32 +167,30 @@ int main (int argc, char **argv) {
|
|||
break;
|
||||
|
||||
case 'b':
|
||||
if (conf->set("bootstrap.servers", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
if (conf->set("bootstrap.servers", optarg, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'X':
|
||||
{
|
||||
char *name, *val;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " <<
|
||||
name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " << name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
|
@ -198,26 +198,27 @@ int main (int argc, char **argv) {
|
|||
}
|
||||
|
||||
/* Topics to consume */
|
||||
for (; optind < argc ; optind++)
|
||||
for (; optind < argc; optind++)
|
||||
topics.push_back(std::string(argv[optind]));
|
||||
|
||||
if (topics.empty() || optind != argc) {
|
||||
usage:
|
||||
fprintf(stderr,
|
||||
"Usage: %s -g <group-id> -B <batch-size> [options] topic1 topic2..\n"
|
||||
"\n"
|
||||
"librdkafka version %s (0x%08x)\n"
|
||||
"\n"
|
||||
" Options:\n"
|
||||
" -g <group-id> Consumer group id\n"
|
||||
" -B <batch-size> How many messages to batch (default: 100).\n"
|
||||
" -T <batch-tmout> How long to wait for batch-size to accumulate in milliseconds. (default 1000 ms)\n"
|
||||
" -b <brokers> Broker address (localhost:9092)\n"
|
||||
" -X <prop=name> Set arbitrary librdkafka configuration property\n"
|
||||
"\n",
|
||||
argv[0],
|
||||
RdKafka::version_str().c_str(), RdKafka::version());
|
||||
exit(1);
|
||||
fprintf(
|
||||
stderr,
|
||||
"Usage: %s -g <group-id> -B <batch-size> [options] topic1 topic2..\n"
|
||||
"\n"
|
||||
"librdkafka version %s (0x%08x)\n"
|
||||
"\n"
|
||||
" Options:\n"
|
||||
" -g <group-id> Consumer group id\n"
|
||||
" -B <batch-size> How many messages to batch (default: 100).\n"
|
||||
" -T <batch-tmout> How long to wait for batch-size to accumulate in "
|
||||
"milliseconds. (default 1000 ms)\n"
|
||||
" -b <brokers> Broker address (localhost:9092)\n"
|
||||
" -X <prop=name> Set arbitrary librdkafka configuration property\n"
|
||||
"\n",
|
||||
argv[0], RdKafka::version_str().c_str(), RdKafka::version());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -225,7 +226,8 @@ int main (int argc, char **argv) {
|
|||
signal(SIGTERM, sigterm);
|
||||
|
||||
/* Create consumer */
|
||||
RdKafka::KafkaConsumer *consumer = RdKafka::KafkaConsumer::create(conf, errstr);
|
||||
RdKafka::KafkaConsumer *consumer =
|
||||
RdKafka::KafkaConsumer::create(conf, errstr);
|
||||
if (!consumer) {
|
||||
std::cerr << "Failed to create consumer: " << errstr << std::endl;
|
||||
exit(1);
|
||||
|
@ -236,8 +238,8 @@ int main (int argc, char **argv) {
|
|||
/* Subscribe to topics */
|
||||
RdKafka::ErrorCode err = consumer->subscribe(topics);
|
||||
if (err) {
|
||||
std::cerr << "Failed to subscribe to " << topics.size() << " topics: "
|
||||
<< RdKafka::err2str(err) << std::endl;
|
||||
std::cerr << "Failed to subscribe to " << topics.size()
|
||||
<< " topics: " << RdKafka::err2str(err) << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -247,7 +249,9 @@ int main (int argc, char **argv) {
|
|||
std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl;
|
||||
|
||||
for (auto &msg : msgs) {
|
||||
std::cout << " Message in " << msg->topic_name() << " [" << msg->partition() << "] at offset " << msg->offset() << std::endl;
|
||||
std::cout << " Message in " << msg->topic_name() << " ["
|
||||
<< msg->partition() << "] at offset " << msg->offset()
|
||||
<< std::endl;
|
||||
delete msg;
|
||||
}
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2014, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -54,28 +54,26 @@
|
|||
#include "rdkafkacpp.h"
|
||||
|
||||
|
||||
static void metadata_print (const std::string &topic,
|
||||
const RdKafka::Metadata *metadata) {
|
||||
static void metadata_print(const std::string &topic,
|
||||
const RdKafka::Metadata *metadata) {
|
||||
std::cout << "Metadata for " << (topic.empty() ? "" : "all topics")
|
||||
<< "(from broker " << metadata->orig_broker_id()
|
||||
<< ":" << metadata->orig_broker_name() << std::endl;
|
||||
<< "(from broker " << metadata->orig_broker_id() << ":"
|
||||
<< metadata->orig_broker_name() << std::endl;
|
||||
|
||||
/* Iterate brokers */
|
||||
std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl;
|
||||
RdKafka::Metadata::BrokerMetadataIterator ib;
|
||||
for (ib = metadata->brokers()->begin();
|
||||
ib != metadata->brokers()->end();
|
||||
for (ib = metadata->brokers()->begin(); ib != metadata->brokers()->end();
|
||||
++ib) {
|
||||
std::cout << " broker " << (*ib)->id() << " at "
|
||||
<< (*ib)->host() << ":" << (*ib)->port() << std::endl;
|
||||
std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":"
|
||||
<< (*ib)->port() << std::endl;
|
||||
}
|
||||
/* Iterate topics */
|
||||
std::cout << metadata->topics()->size() << " topics:" << std::endl;
|
||||
RdKafka::Metadata::TopicMetadataIterator it;
|
||||
for (it = metadata->topics()->begin();
|
||||
it != metadata->topics()->end();
|
||||
for (it = metadata->topics()->begin(); it != metadata->topics()->end();
|
||||
++it) {
|
||||
std::cout << " topic \""<< (*it)->topic() << "\" with "
|
||||
std::cout << " topic \"" << (*it)->topic() << "\" with "
|
||||
<< (*it)->partitions()->size() << " partitions:";
|
||||
|
||||
if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
|
||||
|
@ -87,26 +85,23 @@ static void metadata_print (const std::string &topic,
|
|||
|
||||
/* Iterate topic's partitions */
|
||||
RdKafka::TopicMetadata::PartitionMetadataIterator ip;
|
||||
for (ip = (*it)->partitions()->begin();
|
||||
ip != (*it)->partitions()->end();
|
||||
for (ip = (*it)->partitions()->begin(); ip != (*it)->partitions()->end();
|
||||
++ip) {
|
||||
std::cout << " partition " << (*ip)->id()
|
||||
<< ", leader " << (*ip)->leader()
|
||||
<< ", replicas: ";
|
||||
std::cout << " partition " << (*ip)->id() << ", leader "
|
||||
<< (*ip)->leader() << ", replicas: ";
|
||||
|
||||
/* Iterate partition's replicas */
|
||||
RdKafka::PartitionMetadata::ReplicasIterator ir;
|
||||
for (ir = (*ip)->replicas()->begin();
|
||||
ir != (*ip)->replicas()->end();
|
||||
for (ir = (*ip)->replicas()->begin(); ir != (*ip)->replicas()->end();
|
||||
++ir) {
|
||||
std::cout << (ir == (*ip)->replicas()->begin() ? "":",") << *ir;
|
||||
std::cout << (ir == (*ip)->replicas()->begin() ? "" : ",") << *ir;
|
||||
}
|
||||
|
||||
/* Iterate partition's ISRs */
|
||||
std::cout << ", isrs: ";
|
||||
RdKafka::PartitionMetadata::ISRSIterator iis;
|
||||
for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end() ; ++iis)
|
||||
std::cout << (iis == (*ip)->isrs()->begin() ? "":",") << *iis;
|
||||
for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end(); ++iis)
|
||||
std::cout << (iis == (*ip)->isrs()->begin() ? "" : ",") << *iis;
|
||||
|
||||
if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
|
||||
std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl;
|
||||
|
@ -117,34 +112,34 @@ static void metadata_print (const std::string &topic,
|
|||
}
|
||||
|
||||
static volatile sig_atomic_t run = 1;
|
||||
static bool exit_eof = false;
|
||||
static bool exit_eof = false;
|
||||
|
||||
static void sigterm (int sig) {
|
||||
static void sigterm(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
||||
class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
|
||||
public:
|
||||
void dr_cb (RdKafka::Message &message) {
|
||||
void dr_cb(RdKafka::Message &message) {
|
||||
std::string status_name;
|
||||
switch (message.status())
|
||||
{
|
||||
case RdKafka::Message::MSG_STATUS_NOT_PERSISTED:
|
||||
status_name = "NotPersisted";
|
||||
break;
|
||||
case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED:
|
||||
status_name = "PossiblyPersisted";
|
||||
break;
|
||||
case RdKafka::Message::MSG_STATUS_PERSISTED:
|
||||
status_name = "Persisted";
|
||||
break;
|
||||
default:
|
||||
status_name = "Unknown?";
|
||||
break;
|
||||
}
|
||||
std::cout << "Message delivery for (" << message.len() << " bytes): " <<
|
||||
status_name << ": " << message.errstr() << std::endl;
|
||||
switch (message.status()) {
|
||||
case RdKafka::Message::MSG_STATUS_NOT_PERSISTED:
|
||||
status_name = "NotPersisted";
|
||||
break;
|
||||
case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED:
|
||||
status_name = "PossiblyPersisted";
|
||||
break;
|
||||
case RdKafka::Message::MSG_STATUS_PERSISTED:
|
||||
status_name = "Persisted";
|
||||
break;
|
||||
default:
|
||||
status_name = "Unknown?";
|
||||
break;
|
||||
}
|
||||
std::cout << "Message delivery for (" << message.len()
|
||||
<< " bytes): " << status_name << ": " << message.errstr()
|
||||
<< std::endl;
|
||||
if (message.key())
|
||||
std::cout << "Key: " << *(message.key()) << ";" << std::endl;
|
||||
}
|
||||
|
@ -153,32 +148,31 @@ class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
|
|||
|
||||
class ExampleEventCb : public RdKafka::EventCb {
|
||||
public:
|
||||
void event_cb (RdKafka::Event &event) {
|
||||
switch (event.type())
|
||||
{
|
||||
case RdKafka::Event::EVENT_ERROR:
|
||||
if (event.fatal()) {
|
||||
std::cerr << "FATAL ";
|
||||
run = 0;
|
||||
}
|
||||
std::cerr << "ERROR (" << RdKafka::err2str(event.err()) << "): " <<
|
||||
event.str() << std::endl;
|
||||
break;
|
||||
void event_cb(RdKafka::Event &event) {
|
||||
switch (event.type()) {
|
||||
case RdKafka::Event::EVENT_ERROR:
|
||||
if (event.fatal()) {
|
||||
std::cerr << "FATAL ";
|
||||
run = 0;
|
||||
}
|
||||
std::cerr << "ERROR (" << RdKafka::err2str(event.err())
|
||||
<< "): " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_STATS:
|
||||
std::cerr << "\"STATS\": " << event.str() << std::endl;
|
||||
break;
|
||||
case RdKafka::Event::EVENT_STATS:
|
||||
std::cerr << "\"STATS\": " << event.str() << std::endl;
|
||||
break;
|
||||
|
||||
case RdKafka::Event::EVENT_LOG:
|
||||
fprintf(stderr, "LOG-%i-%s: %s\n",
|
||||
event.severity(), event.fac().c_str(), event.str().c_str());
|
||||
break;
|
||||
case RdKafka::Event::EVENT_LOG:
|
||||
fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(),
|
||||
event.str().c_str());
|
||||
break;
|
||||
|
||||
default:
|
||||
std::cerr << "EVENT " << event.type() <<
|
||||
" (" << RdKafka::err2str(event.err()) << "): " <<
|
||||
event.str() << std::endl;
|
||||
break;
|
||||
default:
|
||||
std::cerr << "EVENT " << event.type() << " ("
|
||||
<< RdKafka::err2str(event.err()) << "): " << event.str()
|
||||
<< std::endl;
|
||||
break;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -188,91 +182,91 @@ class ExampleEventCb : public RdKafka::EventCb {
|
|||
* in the produce() call. */
|
||||
class MyHashPartitionerCb : public RdKafka::PartitionerCb {
|
||||
public:
|
||||
int32_t partitioner_cb (const RdKafka::Topic *topic, const std::string *key,
|
||||
int32_t partition_cnt, void *msg_opaque) {
|
||||
int32_t partitioner_cb(const RdKafka::Topic *topic,
|
||||
const std::string *key,
|
||||
int32_t partition_cnt,
|
||||
void *msg_opaque) {
|
||||
return djb_hash(key->c_str(), key->size()) % partition_cnt;
|
||||
}
|
||||
private:
|
||||
|
||||
static inline unsigned int djb_hash (const char *str, size_t len) {
|
||||
private:
|
||||
static inline unsigned int djb_hash(const char *str, size_t len) {
|
||||
unsigned int hash = 5381;
|
||||
for (size_t i = 0 ; i < len ; i++)
|
||||
for (size_t i = 0; i < len; i++)
|
||||
hash = ((hash << 5) + hash) + str[i];
|
||||
return hash;
|
||||
}
|
||||
};
|
||||
|
||||
void msg_consume(RdKafka::Message* message, void* opaque) {
|
||||
void msg_consume(RdKafka::Message *message, void *opaque) {
|
||||
const RdKafka::Headers *headers;
|
||||
|
||||
switch (message->err()) {
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
break;
|
||||
case RdKafka::ERR__TIMED_OUT:
|
||||
break;
|
||||
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
/* Real message */
|
||||
std::cout << "Read msg at offset " << message->offset() << std::endl;
|
||||
if (message->key()) {
|
||||
std::cout << "Key: " << *message->key() << std::endl;
|
||||
case RdKafka::ERR_NO_ERROR:
|
||||
/* Real message */
|
||||
std::cout << "Read msg at offset " << message->offset() << std::endl;
|
||||
if (message->key()) {
|
||||
std::cout << "Key: " << *message->key() << std::endl;
|
||||
}
|
||||
headers = message->headers();
|
||||
if (headers) {
|
||||
std::vector<RdKafka::Headers::Header> hdrs = headers->get_all();
|
||||
for (size_t i = 0; i < hdrs.size(); i++) {
|
||||
const RdKafka::Headers::Header hdr = hdrs[i];
|
||||
|
||||
if (hdr.value() != NULL)
|
||||
printf(" Header: %s = \"%.*s\"\n", hdr.key().c_str(),
|
||||
(int)hdr.value_size(), (const char *)hdr.value());
|
||||
else
|
||||
printf(" Header: %s = NULL\n", hdr.key().c_str());
|
||||
}
|
||||
headers = message->headers();
|
||||
if (headers) {
|
||||
std::vector<RdKafka::Headers::Header> hdrs = headers->get_all();
|
||||
for (size_t i = 0 ; i < hdrs.size() ; i++) {
|
||||
const RdKafka::Headers::Header hdr = hdrs[i];
|
||||
}
|
||||
printf("%.*s\n", static_cast<int>(message->len()),
|
||||
static_cast<const char *>(message->payload()));
|
||||
break;
|
||||
|
||||
if (hdr.value() != NULL)
|
||||
printf(" Header: %s = \"%.*s\"\n",
|
||||
hdr.key().c_str(),
|
||||
(int)hdr.value_size(), (const char *)hdr.value());
|
||||
else
|
||||
printf(" Header: %s = NULL\n", hdr.key().c_str());
|
||||
}
|
||||
}
|
||||
printf("%.*s\n",
|
||||
static_cast<int>(message->len()),
|
||||
static_cast<const char *>(message->payload()));
|
||||
break;
|
||||
|
||||
case RdKafka::ERR__PARTITION_EOF:
|
||||
/* Last message */
|
||||
if (exit_eof) {
|
||||
run = 0;
|
||||
}
|
||||
break;
|
||||
|
||||
case RdKafka::ERR__UNKNOWN_TOPIC:
|
||||
case RdKafka::ERR__UNKNOWN_PARTITION:
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
case RdKafka::ERR__PARTITION_EOF:
|
||||
/* Last message */
|
||||
if (exit_eof) {
|
||||
run = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Errors */
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
run = 0;
|
||||
case RdKafka::ERR__UNKNOWN_TOPIC:
|
||||
case RdKafka::ERR__UNKNOWN_PARTITION:
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Errors */
|
||||
std::cerr << "Consume failed: " << message->errstr() << std::endl;
|
||||
run = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class ExampleConsumeCb : public RdKafka::ConsumeCb {
|
||||
public:
|
||||
void consume_cb (RdKafka::Message &msg, void *opaque) {
|
||||
void consume_cb(RdKafka::Message &msg, void *opaque) {
|
||||
msg_consume(&msg, opaque);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
std::string brokers = "localhost";
|
||||
std::string errstr;
|
||||
std::string topic_str;
|
||||
std::string mode;
|
||||
std::string debug;
|
||||
int32_t partition = RdKafka::Topic::PARTITION_UA;
|
||||
int32_t partition = RdKafka::Topic::PARTITION_UA;
|
||||
int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
|
||||
bool do_conf_dump = false;
|
||||
bool do_conf_dump = false;
|
||||
int opt;
|
||||
MyHashPartitionerCb hash_partitioner;
|
||||
int use_ccb = 0;
|
||||
|
@ -280,7 +274,7 @@ int main (int argc, char **argv) {
|
|||
/*
|
||||
* Create configuration objects
|
||||
*/
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
|
||||
|
||||
|
||||
|
@ -311,20 +305,20 @@ int main (int argc, char **argv) {
|
|||
break;
|
||||
case 'z':
|
||||
if (conf->set("compression.codec", optarg, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'o':
|
||||
if (!strcmp(optarg, "end"))
|
||||
start_offset = RdKafka::Topic::OFFSET_END;
|
||||
start_offset = RdKafka::Topic::OFFSET_END;
|
||||
else if (!strcmp(optarg, "beginning"))
|
||||
start_offset = RdKafka::Topic::OFFSET_BEGINNING;
|
||||
start_offset = RdKafka::Topic::OFFSET_BEGINNING;
|
||||
else if (!strcmp(optarg, "stored"))
|
||||
start_offset = RdKafka::Topic::OFFSET_STORED;
|
||||
start_offset = RdKafka::Topic::OFFSET_STORED;
|
||||
else
|
||||
start_offset = strtoll(optarg, NULL, 10);
|
||||
start_offset = strtoll(optarg, NULL, 10);
|
||||
break;
|
||||
case 'e':
|
||||
exit_eof = true;
|
||||
|
@ -339,50 +333,47 @@ int main (int argc, char **argv) {
|
|||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'X':
|
||||
{
|
||||
char *name, *val;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
|
||||
if (!strcmp(optarg, "dump")) {
|
||||
do_conf_dump = true;
|
||||
continue;
|
||||
}
|
||||
if (!strcmp(optarg, "dump")) {
|
||||
do_conf_dump = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " <<
|
||||
name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " << name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
/* Try "topic." prefixed properties on topic
|
||||
* conf first, and then fall through to global if
|
||||
* it didnt match a topic configuration property. */
|
||||
RdKafka::Conf::ConfResult res;
|
||||
if (!strncmp(name, "topic.", strlen("topic.")))
|
||||
res = tconf->set(name+strlen("topic."), val, errstr);
|
||||
else
|
||||
res = conf->set(name, val, errstr);
|
||||
/* Try "topic." prefixed properties on topic
|
||||
* conf first, and then fall through to global if
|
||||
* it didnt match a topic configuration property. */
|
||||
RdKafka::Conf::ConfResult res;
|
||||
if (!strncmp(name, "topic.", strlen("topic.")))
|
||||
res = tconf->set(name + strlen("topic."), val, errstr);
|
||||
else
|
||||
res = conf->set(name, val, errstr);
|
||||
|
||||
if (res != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (res != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} break;
|
||||
|
||||
case 'f':
|
||||
if (!strcmp(optarg, "ccb"))
|
||||
use_ccb = 1;
|
||||
else {
|
||||
std::cerr << "Unknown option: " << optarg << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'f':
|
||||
if (!strcmp(optarg, "ccb"))
|
||||
use_ccb = 1;
|
||||
else {
|
||||
std::cerr << "Unknown option: " << optarg << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
|
@ -390,8 +381,8 @@ int main (int argc, char **argv) {
|
|||
|
||||
if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) {
|
||||
usage:
|
||||
std::string features;
|
||||
conf->get("builtin.features", features);
|
||||
std::string features;
|
||||
conf->get("builtin.features", features);
|
||||
fprintf(stderr,
|
||||
"Usage: %s [-C|-P] -t <topic> "
|
||||
"[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
|
||||
|
@ -430,11 +421,9 @@ int main (int argc, char **argv) {
|
|||
"\n"
|
||||
"\n"
|
||||
"\n",
|
||||
argv[0],
|
||||
RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
features.c_str(),
|
||||
RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
features.c_str(), RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
||||
|
@ -456,7 +445,7 @@ int main (int argc, char **argv) {
|
|||
if (do_conf_dump) {
|
||||
int pass;
|
||||
|
||||
for (pass = 0 ; pass < 2 ; pass++) {
|
||||
for (pass = 0; pass < 2; pass++) {
|
||||
std::list<std::string> *dump;
|
||||
if (pass == 0) {
|
||||
dump = conf->dump();
|
||||
|
@ -467,7 +456,7 @@ int main (int argc, char **argv) {
|
|||
}
|
||||
|
||||
for (std::list<std::string>::iterator it = dump->begin();
|
||||
it != dump->end(); ) {
|
||||
it != dump->end();) {
|
||||
std::cout << *it << " = ";
|
||||
it++;
|
||||
std::cout << *it << std::endl;
|
||||
|
@ -487,7 +476,7 @@ int main (int argc, char **argv) {
|
|||
* Producer mode
|
||||
*/
|
||||
|
||||
if(topic_str.empty())
|
||||
if (topic_str.empty())
|
||||
goto usage;
|
||||
|
||||
ExampleDeliveryReportCb ex_dr_cb;
|
||||
|
@ -515,7 +504,7 @@ int main (int argc, char **argv) {
|
|||
for (std::string line; run && std::getline(std::cin, line);) {
|
||||
if (line.empty()) {
|
||||
producer->poll(0);
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
|
||||
RdKafka::Headers *headers = RdKafka::Headers::create();
|
||||
|
@ -526,27 +515,27 @@ int main (int argc, char **argv) {
|
|||
* Produce message
|
||||
*/
|
||||
RdKafka::ErrorCode resp =
|
||||
producer->produce(topic_str, partition,
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
/* Value */
|
||||
const_cast<char *>(line.c_str()), line.size(),
|
||||
/* Key */
|
||||
NULL, 0,
|
||||
/* Timestamp (defaults to now) */
|
||||
0,
|
||||
/* Message headers, if any */
|
||||
headers,
|
||||
/* Per-message opaque value passed to
|
||||
* delivery report */
|
||||
NULL);
|
||||
producer->produce(topic_str, partition,
|
||||
RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
|
||||
/* Value */
|
||||
const_cast<char *>(line.c_str()), line.size(),
|
||||
/* Key */
|
||||
NULL, 0,
|
||||
/* Timestamp (defaults to now) */
|
||||
0,
|
||||
/* Message headers, if any */
|
||||
headers,
|
||||
/* Per-message opaque value passed to
|
||||
* delivery report */
|
||||
NULL);
|
||||
if (resp != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << "% Produce failed: " <<
|
||||
RdKafka::err2str(resp) << std::endl;
|
||||
std::cerr << "% Produce failed: " << RdKafka::err2str(resp)
|
||||
<< std::endl;
|
||||
delete headers; /* Headers are automatically deleted on produce()
|
||||
* success. */
|
||||
} else {
|
||||
std::cerr << "% Produced message (" << line.size() << " bytes)" <<
|
||||
std::endl;
|
||||
std::cerr << "% Produced message (" << line.size() << " bytes)"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
producer->poll(0);
|
||||
|
@ -568,7 +557,7 @@ int main (int argc, char **argv) {
|
|||
|
||||
conf->set("enable.partition.eof", "true", errstr);
|
||||
|
||||
if(topic_str.empty())
|
||||
if (topic_str.empty())
|
||||
goto usage;
|
||||
|
||||
/*
|
||||
|
@ -585,8 +574,8 @@ int main (int argc, char **argv) {
|
|||
/*
|
||||
* Create topic handle.
|
||||
*/
|
||||
RdKafka::Topic *topic = RdKafka::Topic::create(consumer, topic_str,
|
||||
tconf, errstr);
|
||||
RdKafka::Topic *topic =
|
||||
RdKafka::Topic::create(consumer, topic_str, tconf, errstr);
|
||||
if (!topic) {
|
||||
std::cerr << "Failed to create topic: " << errstr << std::endl;
|
||||
exit(1);
|
||||
|
@ -597,8 +586,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
|
||||
if (resp != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << "Failed to start consumer: " <<
|
||||
RdKafka::err2str(resp) << std::endl;
|
||||
std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp)
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -609,8 +598,8 @@ int main (int argc, char **argv) {
|
|||
*/
|
||||
while (run) {
|
||||
if (use_ccb) {
|
||||
consumer->consume_callback(topic, partition, 1000,
|
||||
&ex_consume_cb, &use_ccb);
|
||||
consumer->consume_callback(topic, partition, 1000, &ex_consume_cb,
|
||||
&use_ccb);
|
||||
} else {
|
||||
RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
|
||||
msg_consume(msg, NULL);
|
||||
|
@ -646,7 +635,7 @@ int main (int argc, char **argv) {
|
|||
* Create topic handle.
|
||||
*/
|
||||
RdKafka::Topic *topic = NULL;
|
||||
if(!topic_str.empty()) {
|
||||
if (!topic_str.empty()) {
|
||||
topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
|
||||
if (!topic) {
|
||||
std::cerr << "Failed to create topic: " << errstr << std::endl;
|
||||
|
@ -658,13 +647,13 @@ int main (int argc, char **argv) {
|
|||
class RdKafka::Metadata *metadata;
|
||||
|
||||
/* Fetch metadata */
|
||||
RdKafka::ErrorCode err = producer->metadata(!topic, topic,
|
||||
&metadata, 5000);
|
||||
RdKafka::ErrorCode err =
|
||||
producer->metadata(!topic, topic, &metadata, 5000);
|
||||
if (err != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << "%% Failed to acquire metadata: "
|
||||
<< RdKafka::err2str(err) << std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
|
||||
<< std::endl;
|
||||
run = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
metadata_print(topic_str, metadata);
|
||||
|
@ -672,7 +661,6 @@ int main (int argc, char **argv) {
|
|||
delete metadata;
|
||||
run = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
delete conf;
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -72,8 +72,8 @@ struct state {
|
|||
rd_kafka_t *producer; /**< Per-input partition output producer */
|
||||
rd_kafka_topic_partition_t *rktpar; /**< Back-pointer to the
|
||||
* input partition. */
|
||||
time_t last_commit; /**< Last transaction commit */
|
||||
int msgcnt; /**< Number of messages processed in current txn */
|
||||
time_t last_commit; /**< Last transaction commit */
|
||||
int msgcnt; /**< Number of messages processed in current txn */
|
||||
};
|
||||
/* Current assignment for the input consumer.
|
||||
* The .opaque field of each partition points to an allocated 'struct state'.
|
||||
|
@ -85,29 +85,31 @@ static rd_kafka_topic_partition_list_t *assigned_partitions;
|
|||
/**
|
||||
* @brief A fatal error has occurred, immediately exit the application.
|
||||
*/
|
||||
#define fatal(...) do { \
|
||||
fprintf(stderr, "FATAL ERROR: "); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, "\n"); \
|
||||
exit(1); \
|
||||
#define fatal(...) \
|
||||
do { \
|
||||
fprintf(stderr, "FATAL ERROR: "); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, "\n"); \
|
||||
exit(1); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Same as fatal() but takes an rd_kafka_error_t object, prints its
|
||||
* error message, destroys the object and then exits fatally.
|
||||
*/
|
||||
#define fatal_error(what,error) do { \
|
||||
fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", \
|
||||
what, rd_kafka_error_name(error), \
|
||||
rd_kafka_error_string(error)); \
|
||||
rd_kafka_error_destroy(error); \
|
||||
exit(1); \
|
||||
#define fatal_error(what, error) \
|
||||
do { \
|
||||
fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \
|
||||
rd_kafka_error_name(error), \
|
||||
rd_kafka_error_string(error)); \
|
||||
rd_kafka_error_destroy(error); \
|
||||
exit(1); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Signal termination of program
|
||||
*/
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
@ -133,11 +135,10 @@ static void stop (int sig) {
|
|||
* In the case of transactional producing the delivery report callback is
|
||||
* mostly useful for logging the produce failures.
|
||||
*/
|
||||
static void dr_msg_cb (rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
static void
|
||||
dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
if (rkmessage->err)
|
||||
fprintf(stderr,
|
||||
"%% Message delivery failed: %s\n",
|
||||
fprintf(stderr, "%% Message delivery failed: %s\n",
|
||||
rd_kafka_err2str(rkmessage->err));
|
||||
|
||||
/* The rkmessage is destroyed automatically by librdkafka */
|
||||
|
@ -150,7 +151,7 @@ static void dr_msg_cb (rd_kafka_t *rk,
|
|||
* and begin a new transaction.
|
||||
*/
|
||||
static rd_kafka_t *
|
||||
create_transactional_producer (const rd_kafka_topic_partition_t *rktpar) {
|
||||
create_transactional_producer(const rd_kafka_topic_partition_t *rktpar) {
|
||||
rd_kafka_conf_t *conf = rd_kafka_conf_new();
|
||||
rd_kafka_t *rk;
|
||||
char errstr[256];
|
||||
|
@ -158,15 +159,15 @@ create_transactional_producer (const rd_kafka_topic_partition_t *rktpar) {
|
|||
char transactional_id[256];
|
||||
|
||||
snprintf(transactional_id, sizeof(transactional_id),
|
||||
"librdkafka_transactions_older_example_%s-%d",
|
||||
rktpar->topic, rktpar->partition);
|
||||
"librdkafka_transactions_older_example_%s-%d", rktpar->topic,
|
||||
rktpar->partition);
|
||||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "transactional.id", transactional_id,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
|
||||
rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK)
|
||||
fatal("Failed to configure producer: %s", errstr);
|
||||
|
||||
/* This callback will be called once per message to indicate
|
||||
|
@ -199,7 +200,7 @@ create_transactional_producer (const rd_kafka_topic_partition_t *rktpar) {
|
|||
/**
|
||||
* @brief Abort the current transaction and destroy the producer.
|
||||
*/
|
||||
static void destroy_transactional_producer (rd_kafka_t *rk) {
|
||||
static void destroy_transactional_producer(rd_kafka_t *rk) {
|
||||
rd_kafka_error_t *error;
|
||||
|
||||
fprintf(stdout, "%s: aborting transaction and terminating producer\n",
|
||||
|
@ -226,9 +227,9 @@ static void destroy_transactional_producer (rd_kafka_t *rk) {
|
|||
* position where the transaction last started, i.e., the committed
|
||||
* consumer offset.
|
||||
*/
|
||||
static void abort_transaction_and_rewind (struct state *state) {
|
||||
rd_kafka_topic_t *rkt = rd_kafka_topic_new(consumer,
|
||||
state->rktpar->topic, NULL);
|
||||
static void abort_transaction_and_rewind(struct state *state) {
|
||||
rd_kafka_topic_t *rkt =
|
||||
rd_kafka_topic_new(consumer, state->rktpar->topic, NULL);
|
||||
rd_kafka_topic_partition_list_t *offset;
|
||||
rd_kafka_resp_err_t err;
|
||||
rd_kafka_error_t *error;
|
||||
|
@ -249,12 +250,11 @@ static void abort_transaction_and_rewind (struct state *state) {
|
|||
|
||||
/* Get committed offset for this partition */
|
||||
offset = rd_kafka_topic_partition_list_new(1);
|
||||
rd_kafka_topic_partition_list_add(offset,
|
||||
state->rktpar->topic,
|
||||
rd_kafka_topic_partition_list_add(offset, state->rktpar->topic,
|
||||
state->rktpar->partition);
|
||||
|
||||
/* Note: Timeout must be lower than max.poll.interval.ms */
|
||||
err = rd_kafka_committed(consumer, offset, 10*1000);
|
||||
err = rd_kafka_committed(consumer, offset, 10 * 1000);
|
||||
if (err)
|
||||
fatal("Failed to acquire committed offset for %s [%d]: %s",
|
||||
state->rktpar->topic, (int)state->rktpar->partition,
|
||||
|
@ -263,17 +263,18 @@ static void abort_transaction_and_rewind (struct state *state) {
|
|||
/* Seek to committed offset, or start of partition if no
|
||||
* no committed offset is available. */
|
||||
err = rd_kafka_seek(rkt, state->rktpar->partition,
|
||||
offset->elems[0].offset < 0 ?
|
||||
/* No committed offset, start from beginning */
|
||||
RD_KAFKA_OFFSET_BEGINNING :
|
||||
/* Use committed offset */
|
||||
offset->elems[0].offset,
|
||||
offset->elems[0].offset < 0
|
||||
?
|
||||
/* No committed offset, start from beginning */
|
||||
RD_KAFKA_OFFSET_BEGINNING
|
||||
:
|
||||
/* Use committed offset */
|
||||
offset->elems[0].offset,
|
||||
0);
|
||||
|
||||
if (err)
|
||||
fatal("Failed to seek %s [%d]: %s",
|
||||
state->rktpar->topic, (int)state->rktpar->partition,
|
||||
rd_kafka_err2str(err));
|
||||
fatal("Failed to seek %s [%d]: %s", state->rktpar->topic,
|
||||
(int)state->rktpar->partition, rd_kafka_err2str(err));
|
||||
|
||||
rd_kafka_topic_destroy(rkt);
|
||||
}
|
||||
|
@ -282,7 +283,7 @@ static void abort_transaction_and_rewind (struct state *state) {
|
|||
/**
|
||||
* @brief Commit the current transaction and start a new transaction.
|
||||
*/
|
||||
static void commit_transaction_and_start_new (struct state *state) {
|
||||
static void commit_transaction_and_start_new(struct state *state) {
|
||||
rd_kafka_error_t *error;
|
||||
rd_kafka_resp_err_t err;
|
||||
rd_kafka_consumer_group_metadata_t *cgmd;
|
||||
|
@ -301,8 +302,7 @@ static void commit_transaction_and_start_new (struct state *state) {
|
|||
|
||||
/* Get consumer's current position for this partition */
|
||||
offset = rd_kafka_topic_partition_list_new(1);
|
||||
rd_kafka_topic_partition_list_add(offset,
|
||||
state->rktpar->topic,
|
||||
rd_kafka_topic_partition_list_add(offset, state->rktpar->topic,
|
||||
state->rktpar->partition);
|
||||
err = rd_kafka_position(consumer, offset);
|
||||
if (err)
|
||||
|
@ -311,8 +311,8 @@ static void commit_transaction_and_start_new (struct state *state) {
|
|||
rd_kafka_err2str(err));
|
||||
|
||||
/* Send offsets to transaction coordinator */
|
||||
error = rd_kafka_send_offsets_to_transaction(state->producer,
|
||||
offset, cgmd, -1);
|
||||
error = rd_kafka_send_offsets_to_transaction(state->producer, offset,
|
||||
cgmd, -1);
|
||||
rd_kafka_consumer_group_metadata_destroy(cgmd);
|
||||
rd_kafka_topic_partition_list_destroy(offset);
|
||||
if (error) {
|
||||
|
@ -363,36 +363,36 @@ static void commit_transaction_and_start_new (struct state *state) {
|
|||
* these producer's from this callback.
|
||||
*/
|
||||
static void
|
||||
consumer_group_rebalance_cb (rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque) {
|
||||
consumer_group_rebalance_cb(rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque) {
|
||||
int i;
|
||||
|
||||
if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
|
||||
fatal("This example has not yet been modified to work with "
|
||||
"cooperative incremental rebalancing "
|
||||
"(partition.assignment.strategy=cooperative-sticky)");
|
||||
fatal(
|
||||
"This example has not yet been modified to work with "
|
||||
"cooperative incremental rebalancing "
|
||||
"(partition.assignment.strategy=cooperative-sticky)");
|
||||
|
||||
switch (err)
|
||||
{
|
||||
switch (err) {
|
||||
case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
|
||||
assigned_partitions =
|
||||
rd_kafka_topic_partition_list_copy(partitions);
|
||||
rd_kafka_topic_partition_list_copy(partitions);
|
||||
|
||||
fprintf(stdout, "Consumer group rebalanced, new assignment:\n");
|
||||
|
||||
/* Create a transactional producer for each input partition */
|
||||
for (i = 0 ; i < assigned_partitions->cnt ; i++) {
|
||||
for (i = 0; i < assigned_partitions->cnt; i++) {
|
||||
/* Store the partition-to-producer mapping
|
||||
* in the partition's opaque field. */
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
&assigned_partitions->elems[i];
|
||||
&assigned_partitions->elems[i];
|
||||
struct state *state = calloc(1, sizeof(*state));
|
||||
|
||||
state->producer = create_transactional_producer(rktpar);
|
||||
state->rktpar = rktpar;
|
||||
rktpar->opaque = state;
|
||||
state->rktpar = rktpar;
|
||||
rktpar->opaque = state;
|
||||
state->last_commit = time(NULL);
|
||||
|
||||
fprintf(stdout,
|
||||
|
@ -413,11 +413,12 @@ consumer_group_rebalance_cb (rd_kafka_t *rk,
|
|||
"Consumer group rebalanced, assignment revoked\n");
|
||||
|
||||
/* Abort the current transactions and destroy all producers */
|
||||
for (i = 0 ; i < assigned_partitions->cnt ; i++) {
|
||||
for (i = 0; i < assigned_partitions->cnt; i++) {
|
||||
/* Store the partition-to-producer mapping
|
||||
* in the partition's opaque field. */
|
||||
struct state *state = (struct state *)
|
||||
assigned_partitions->elems[i].opaque;
|
||||
struct state *state =
|
||||
(struct state *)assigned_partitions->elems[i]
|
||||
.opaque;
|
||||
|
||||
destroy_transactional_producer(state->producer);
|
||||
free(state);
|
||||
|
@ -441,16 +442,16 @@ consumer_group_rebalance_cb (rd_kafka_t *rk,
|
|||
/**
|
||||
* @brief Create the input consumer.
|
||||
*/
|
||||
static rd_kafka_t *create_input_consumer (const char *brokers,
|
||||
const char *input_topic) {
|
||||
static rd_kafka_t *create_input_consumer(const char *brokers,
|
||||
const char *input_topic) {
|
||||
rd_kafka_conf_t *conf = rd_kafka_conf_new();
|
||||
rd_kafka_t *rk;
|
||||
char errstr[256];
|
||||
rd_kafka_resp_err_t err;
|
||||
rd_kafka_topic_partition_list_t *topics;
|
||||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "group.id",
|
||||
"librdkafka_transactions_older_example_group",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
|
@ -458,8 +459,8 @@ static rd_kafka_t *create_input_consumer (const char *brokers,
|
|||
* output producer's transaction using
|
||||
* rd_kafka_send_offsets_to_transaction(), so auto commits
|
||||
* must be disabled. */
|
||||
rd_kafka_conf_set(conf, "enable.auto.commit", "false",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fatal("Failed to configure consumer: %s", errstr);
|
||||
}
|
||||
|
||||
|
@ -488,8 +489,8 @@ static rd_kafka_t *create_input_consumer (const char *brokers,
|
|||
rd_kafka_topic_partition_list_destroy(topics);
|
||||
if (err) {
|
||||
rd_kafka_destroy(rk);
|
||||
fatal("Failed to subscribe to %s: %s\n",
|
||||
input_topic, rd_kafka_err2str(err));
|
||||
fatal("Failed to subscribe to %s: %s\n", input_topic,
|
||||
rd_kafka_err2str(err));
|
||||
}
|
||||
|
||||
return rk;
|
||||
|
@ -500,16 +501,16 @@ static rd_kafka_t *create_input_consumer (const char *brokers,
|
|||
* @brief Find and parse next integer string in \p start.
|
||||
* @returns Pointer after found integer string, or NULL if not found.
|
||||
*/
|
||||
static const void *find_next_int (const void *start, const void *end,
|
||||
int *intp) {
|
||||
static const void *
|
||||
find_next_int(const void *start, const void *end, int *intp) {
|
||||
const char *p;
|
||||
int collecting = 0;
|
||||
int num = 0;
|
||||
int num = 0;
|
||||
|
||||
for (p = (const char *)start ; p < (const char *)end ; p++) {
|
||||
for (p = (const char *)start; p < (const char *)end; p++) {
|
||||
if (isdigit((int)(*p))) {
|
||||
collecting = 1;
|
||||
num = (num * 10) + ((int)*p - ((int)'0'));
|
||||
num = (num * 10) + ((int)*p - ((int)'0'));
|
||||
} else if (collecting)
|
||||
break;
|
||||
}
|
||||
|
@ -529,8 +530,8 @@ static const void *find_next_int (const void *start, const void *end,
|
|||
* the output topic using the transactional producer for the given
|
||||
* inut partition.
|
||||
*/
|
||||
static void process_message (struct state *state,
|
||||
const rd_kafka_message_t *rkmessage) {
|
||||
static void process_message(struct state *state,
|
||||
const rd_kafka_message_t *rkmessage) {
|
||||
int num;
|
||||
long unsigned sum = 0;
|
||||
const void *p, *end;
|
||||
|
@ -540,7 +541,7 @@ static void process_message (struct state *state,
|
|||
if (rkmessage->len == 0)
|
||||
return; /* Ignore empty messages */
|
||||
|
||||
p = rkmessage->payload;
|
||||
p = rkmessage->payload;
|
||||
end = ((const char *)rkmessage->payload) + rkmessage->len;
|
||||
|
||||
/* Find and sum all numbers in the message */
|
||||
|
@ -555,17 +556,14 @@ static void process_message (struct state *state,
|
|||
/* Emit output message on transactional producer */
|
||||
while (1) {
|
||||
err = rd_kafka_producev(
|
||||
state->producer,
|
||||
RD_KAFKA_V_TOPIC(output_topic),
|
||||
/* Use same key as input message */
|
||||
RD_KAFKA_V_KEY(rkmessage->key,
|
||||
rkmessage->key_len),
|
||||
/* Value is the current sum of this
|
||||
* transaction. */
|
||||
RD_KAFKA_V_VALUE(value, strlen(value)),
|
||||
/* Copy value since it is allocated on the stack */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
|
||||
RD_KAFKA_V_END);
|
||||
state->producer, RD_KAFKA_V_TOPIC(output_topic),
|
||||
/* Use same key as input message */
|
||||
RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len),
|
||||
/* Value is the current sum of this
|
||||
* transaction. */
|
||||
RD_KAFKA_V_VALUE(value, strlen(value)),
|
||||
/* Copy value since it is allocated on the stack */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
|
||||
|
||||
if (!err)
|
||||
break;
|
||||
|
@ -586,7 +584,7 @@ static void process_message (struct state *state,
|
|||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
/*
|
||||
* Argument validation
|
||||
*/
|
||||
|
@ -597,8 +595,8 @@ int main (int argc, char **argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
brokers = argv[1];
|
||||
input_topic = argv[2];
|
||||
brokers = argv[1];
|
||||
input_topic = argv[2];
|
||||
output_topic = argv[3];
|
||||
|
||||
/* Signal handler for clean shutdown */
|
||||
|
@ -618,7 +616,7 @@ int main (int argc, char **argv) {
|
|||
rd_kafka_topic_partition_t *rktpar;
|
||||
|
||||
/* Wait for new mesages or error events */
|
||||
msg = rd_kafka_consumer_poll(consumer, 1000/*1 second*/);
|
||||
msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/);
|
||||
if (!msg)
|
||||
continue;
|
||||
|
||||
|
@ -636,13 +634,13 @@ int main (int argc, char **argv) {
|
|||
|
||||
/* Find output producer for this input partition */
|
||||
rktpar = rd_kafka_topic_partition_list_find(
|
||||
assigned_partitions,
|
||||
rd_kafka_topic_name(msg->rkt), msg->partition);
|
||||
assigned_partitions, rd_kafka_topic_name(msg->rkt),
|
||||
msg->partition);
|
||||
if (!rktpar)
|
||||
fatal("BUG: No output producer for assigned "
|
||||
"partition %s [%d]",
|
||||
rd_kafka_topic_name(msg->rkt),
|
||||
(int)msg->partition);
|
||||
fatal(
|
||||
"BUG: No output producer for assigned "
|
||||
"partition %s [%d]",
|
||||
rd_kafka_topic_name(msg->rkt), (int)msg->partition);
|
||||
|
||||
/* Get state struct for this partition */
|
||||
state = (struct state *)rktpar->opaque;
|
||||
|
@ -656,7 +654,7 @@ int main (int argc, char **argv) {
|
|||
if (++state->msgcnt > 100 ||
|
||||
state->last_commit + 5 <= time(NULL)) {
|
||||
commit_transaction_and_start_new(state);
|
||||
state->msgcnt = 0;
|
||||
state->msgcnt = 0;
|
||||
state->last_commit = time(NULL);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,29 +60,31 @@ static volatile sig_atomic_t run = 1;
|
|||
/**
|
||||
* @brief A fatal error has occurred, immediately exit the application.
|
||||
*/
|
||||
#define fatal(...) do { \
|
||||
fprintf(stderr, "FATAL ERROR: "); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, "\n"); \
|
||||
exit(1); \
|
||||
#define fatal(...) \
|
||||
do { \
|
||||
fprintf(stderr, "FATAL ERROR: "); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, "\n"); \
|
||||
exit(1); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Same as fatal() but takes an rd_kafka_error_t object, prints its
|
||||
* error message, destroys the object and then exits fatally.
|
||||
*/
|
||||
#define fatal_error(what,error) do { \
|
||||
fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", \
|
||||
what, rd_kafka_error_name(error), \
|
||||
rd_kafka_error_string(error)); \
|
||||
rd_kafka_error_destroy(error); \
|
||||
exit(1); \
|
||||
#define fatal_error(what, error) \
|
||||
do { \
|
||||
fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \
|
||||
rd_kafka_error_name(error), \
|
||||
rd_kafka_error_string(error)); \
|
||||
rd_kafka_error_destroy(error); \
|
||||
exit(1); \
|
||||
} while (0)
|
||||
|
||||
/**
|
||||
* @brief Signal termination of program
|
||||
*/
|
||||
static void stop (int sig) {
|
||||
static void stop(int sig) {
|
||||
run = 0;
|
||||
}
|
||||
|
||||
|
@ -108,11 +110,10 @@ static void stop (int sig) {
|
|||
* In the case of transactional producing the delivery report callback is
|
||||
* mostly useful for logging the produce failures.
|
||||
*/
|
||||
static void dr_msg_cb (rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
static void
|
||||
dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
|
||||
if (rkmessage->err)
|
||||
fprintf(stderr,
|
||||
"%% Message delivery failed: %s\n",
|
||||
fprintf(stderr, "%% Message delivery failed: %s\n",
|
||||
rd_kafka_err2str(rkmessage->err));
|
||||
|
||||
/* The rkmessage is destroyed automatically by librdkafka */
|
||||
|
@ -123,18 +124,18 @@ static void dr_msg_cb (rd_kafka_t *rk,
|
|||
/**
|
||||
* @brief Create a transactional producer.
|
||||
*/
|
||||
static rd_kafka_t *
|
||||
create_transactional_producer (const char *brokers, const char *output_topic) {
|
||||
static rd_kafka_t *create_transactional_producer(const char *brokers,
|
||||
const char *output_topic) {
|
||||
rd_kafka_conf_t *conf = rd_kafka_conf_new();
|
||||
rd_kafka_t *rk;
|
||||
char errstr[256];
|
||||
rd_kafka_error_t *error;
|
||||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "transactional.id",
|
||||
"librdkafka_transactions_example",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
|
||||
"librdkafka_transactions_example", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK)
|
||||
fatal("Failed to configure producer: %s", errstr);
|
||||
|
||||
/* This callback will be called once per message to indicate
|
||||
|
@ -162,7 +163,7 @@ create_transactional_producer (const char *brokers, const char *output_topic) {
|
|||
* @brief Rewind consumer's consume position to the last committed offsets
|
||||
* for the current assignment.
|
||||
*/
|
||||
static void rewind_consumer (rd_kafka_t *consumer) {
|
||||
static void rewind_consumer(rd_kafka_t *consumer) {
|
||||
rd_kafka_topic_partition_list_t *offsets;
|
||||
rd_kafka_resp_err_t err;
|
||||
rd_kafka_error_t *error;
|
||||
|
@ -184,18 +185,17 @@ static void rewind_consumer (rd_kafka_t *consumer) {
|
|||
}
|
||||
|
||||
/* Note: Timeout must be lower than max.poll.interval.ms */
|
||||
err = rd_kafka_committed(consumer, offsets, 10*1000);
|
||||
err = rd_kafka_committed(consumer, offsets, 10 * 1000);
|
||||
if (err)
|
||||
fatal("Failed to acquire committed offsets: %s",
|
||||
rd_kafka_err2str(err));
|
||||
|
||||
/* Seek to committed offset, or start of partition if no
|
||||
* committed offset is available. */
|
||||
for (i = 0 ; i < offsets->cnt ; i++) {
|
||||
for (i = 0; i < offsets->cnt; i++) {
|
||||
/* No committed offset, start from beginning */
|
||||
if (offsets->elems[i].offset < 0)
|
||||
offsets->elems[i].offset =
|
||||
RD_KAFKA_OFFSET_BEGINNING;
|
||||
offsets->elems[i].offset = RD_KAFKA_OFFSET_BEGINNING;
|
||||
}
|
||||
|
||||
/* Perform seek */
|
||||
|
@ -211,8 +211,8 @@ static void rewind_consumer (rd_kafka_t *consumer) {
|
|||
* position where the transaction last started, i.e., the committed
|
||||
* consumer offset, then begin a new transaction.
|
||||
*/
|
||||
static void abort_transaction_and_rewind (rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer) {
|
||||
static void abort_transaction_and_rewind(rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer) {
|
||||
rd_kafka_error_t *error;
|
||||
|
||||
fprintf(stdout, "Aborting transaction and rewinding offsets\n");
|
||||
|
@ -238,8 +238,7 @@ static void abort_transaction_and_rewind (rd_kafka_t *consumer,
|
|||
* @returns 1 if transaction was successfully committed, or 0
|
||||
* if the current transaction was aborted.
|
||||
*/
|
||||
static int commit_transaction (rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer) {
|
||||
static int commit_transaction(rd_kafka_t *consumer, rd_kafka_t *producer) {
|
||||
rd_kafka_error_t *error;
|
||||
rd_kafka_resp_err_t err;
|
||||
rd_kafka_consumer_group_metadata_t *cgmd;
|
||||
|
@ -263,7 +262,8 @@ static int commit_transaction (rd_kafka_t *consumer,
|
|||
if (err)
|
||||
fprintf(stderr,
|
||||
"Failed to get consumer assignment to commit: "
|
||||
"%s\n", rd_kafka_err2str(err));
|
||||
"%s\n",
|
||||
rd_kafka_err2str(err));
|
||||
else
|
||||
rd_kafka_topic_partition_list_destroy(offsets);
|
||||
|
||||
|
@ -281,8 +281,8 @@ static int commit_transaction (rd_kafka_t *consumer,
|
|||
rd_kafka_err2str(err));
|
||||
|
||||
/* Send offsets to transaction coordinator */
|
||||
error = rd_kafka_send_offsets_to_transaction(producer,
|
||||
offsets, cgmd, -1);
|
||||
error =
|
||||
rd_kafka_send_offsets_to_transaction(producer, offsets, cgmd, -1);
|
||||
rd_kafka_consumer_group_metadata_destroy(cgmd);
|
||||
rd_kafka_topic_partition_list_destroy(offsets);
|
||||
if (error) {
|
||||
|
@ -334,8 +334,8 @@ static int commit_transaction (rd_kafka_t *consumer,
|
|||
/**
|
||||
* @brief Commit the current transaction and start a new transaction.
|
||||
*/
|
||||
static void commit_transaction_and_start_new (rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer) {
|
||||
static void commit_transaction_and_start_new(rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer) {
|
||||
rd_kafka_error_t *error;
|
||||
|
||||
/* Commit transaction.
|
||||
|
@ -355,15 +355,14 @@ static void commit_transaction_and_start_new (rd_kafka_t *consumer,
|
|||
* when the consumer's partition assignment is assigned or revoked.
|
||||
*/
|
||||
static void
|
||||
consumer_group_rebalance_cb (rd_kafka_t *consumer,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque) {
|
||||
consumer_group_rebalance_cb(rd_kafka_t *consumer,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque) {
|
||||
rd_kafka_t *producer = (rd_kafka_t *)opaque;
|
||||
rd_kafka_error_t *error;
|
||||
|
||||
switch (err)
|
||||
{
|
||||
switch (err) {
|
||||
case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
|
||||
fprintf(stdout,
|
||||
"Consumer group rebalanced: "
|
||||
|
@ -418,31 +417,31 @@ consumer_group_rebalance_cb (rd_kafka_t *consumer,
|
|||
/**
|
||||
* @brief Create the input consumer.
|
||||
*/
|
||||
static rd_kafka_t *create_input_consumer (const char *brokers,
|
||||
const char *input_topic,
|
||||
rd_kafka_t *producer) {
|
||||
static rd_kafka_t *create_input_consumer(const char *brokers,
|
||||
const char *input_topic,
|
||||
rd_kafka_t *producer) {
|
||||
rd_kafka_conf_t *conf = rd_kafka_conf_new();
|
||||
rd_kafka_t *rk;
|
||||
char errstr[256];
|
||||
rd_kafka_resp_err_t err;
|
||||
rd_kafka_topic_partition_list_t *topics;
|
||||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "group.id",
|
||||
"librdkafka_transactions_example_group",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
"librdkafka_transactions_example_group", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "partition.assignment.strategy",
|
||||
"cooperative-sticky",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "auto.offset.reset", "earliest",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
"cooperative-sticky", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK ||
|
||||
/* The input consumer's offsets are explicitly committed with the
|
||||
* output producer's transaction using
|
||||
* rd_kafka_send_offsets_to_transaction(), so auto commits
|
||||
* must be disabled. */
|
||||
rd_kafka_conf_set(conf, "enable.auto.commit", "false",
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
fatal("Failed to configure consumer: %s", errstr);
|
||||
}
|
||||
|
||||
|
@ -476,8 +475,8 @@ static rd_kafka_t *create_input_consumer (const char *brokers,
|
|||
rd_kafka_topic_partition_list_destroy(topics);
|
||||
if (err) {
|
||||
rd_kafka_destroy(rk);
|
||||
fatal("Failed to subscribe to %s: %s\n",
|
||||
input_topic, rd_kafka_err2str(err));
|
||||
fatal("Failed to subscribe to %s: %s\n", input_topic,
|
||||
rd_kafka_err2str(err));
|
||||
}
|
||||
|
||||
return rk;
|
||||
|
@ -488,16 +487,16 @@ static rd_kafka_t *create_input_consumer (const char *brokers,
|
|||
* @brief Find and parse next integer string in \p start.
|
||||
* @returns Pointer after found integer string, or NULL if not found.
|
||||
*/
|
||||
static const void *find_next_int (const void *start, const void *end,
|
||||
int *intp) {
|
||||
static const void *
|
||||
find_next_int(const void *start, const void *end, int *intp) {
|
||||
const char *p;
|
||||
int collecting = 0;
|
||||
int num = 0;
|
||||
int num = 0;
|
||||
|
||||
for (p = (const char *)start ; p < (const char *)end ; p++) {
|
||||
for (p = (const char *)start; p < (const char *)end; p++) {
|
||||
if (isdigit((int)(*p))) {
|
||||
collecting = 1;
|
||||
num = (num * 10) + ((int)*p - ((int)'0'));
|
||||
num = (num * 10) + ((int)*p - ((int)'0'));
|
||||
} else if (collecting)
|
||||
break;
|
||||
}
|
||||
|
@ -517,10 +516,10 @@ static const void *find_next_int (const void *start, const void *end,
|
|||
* the output topic using the transactional producer for the given
|
||||
* inut partition.
|
||||
*/
|
||||
static void process_message (rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer,
|
||||
const char *output_topic,
|
||||
const rd_kafka_message_t *rkmessage) {
|
||||
static void process_message(rd_kafka_t *consumer,
|
||||
rd_kafka_t *producer,
|
||||
const char *output_topic,
|
||||
const rd_kafka_message_t *rkmessage) {
|
||||
int num;
|
||||
long unsigned sum = 0;
|
||||
const void *p, *end;
|
||||
|
@ -530,7 +529,7 @@ static void process_message (rd_kafka_t *consumer,
|
|||
if (rkmessage->len == 0)
|
||||
return; /* Ignore empty messages */
|
||||
|
||||
p = rkmessage->payload;
|
||||
p = rkmessage->payload;
|
||||
end = ((const char *)rkmessage->payload) + rkmessage->len;
|
||||
|
||||
/* Find and sum all numbers in the message */
|
||||
|
@ -545,17 +544,14 @@ static void process_message (rd_kafka_t *consumer,
|
|||
/* Emit output message on transactional producer */
|
||||
while (1) {
|
||||
err = rd_kafka_producev(
|
||||
producer,
|
||||
RD_KAFKA_V_TOPIC(output_topic),
|
||||
/* Use same key as input message */
|
||||
RD_KAFKA_V_KEY(rkmessage->key,
|
||||
rkmessage->key_len),
|
||||
/* Value is the current sum of this
|
||||
* transaction. */
|
||||
RD_KAFKA_V_VALUE(value, strlen(value)),
|
||||
/* Copy value since it is allocated on the stack */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
|
||||
RD_KAFKA_V_END);
|
||||
producer, RD_KAFKA_V_TOPIC(output_topic),
|
||||
/* Use same key as input message */
|
||||
RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len),
|
||||
/* Value is the current sum of this
|
||||
* transaction. */
|
||||
RD_KAFKA_V_VALUE(value, strlen(value)),
|
||||
/* Copy value since it is allocated on the stack */
|
||||
RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
|
||||
|
||||
if (!err)
|
||||
break;
|
||||
|
@ -576,9 +572,9 @@ static void process_message (rd_kafka_t *consumer,
|
|||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_t *producer, *consumer;
|
||||
int msgcnt = 0;
|
||||
int msgcnt = 0;
|
||||
time_t last_commit = 0;
|
||||
const char *brokers, *input_topic, *output_topic;
|
||||
rd_kafka_error_t *error;
|
||||
|
@ -593,8 +589,8 @@ int main (int argc, char **argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
brokers = argv[1];
|
||||
input_topic = argv[2];
|
||||
brokers = argv[1];
|
||||
input_topic = argv[2];
|
||||
output_topic = argv[3];
|
||||
|
||||
/* Signal handler for clean shutdown */
|
||||
|
@ -611,8 +607,8 @@ int main (int argc, char **argv) {
|
|||
"Observe summed integers on output topic %s:\n"
|
||||
" $ examples/consumer %s just-watching %s\n"
|
||||
"\n",
|
||||
input_topic, brokers, input_topic,
|
||||
output_topic, brokers, output_topic);
|
||||
input_topic, brokers, input_topic, output_topic, brokers,
|
||||
output_topic);
|
||||
|
||||
/* Begin transaction and start waiting for messages */
|
||||
error = rd_kafka_begin_transaction(producer);
|
||||
|
@ -628,12 +624,12 @@ int main (int argc, char **argv) {
|
|||
printf("msgcnt %d, elapsed %d\n", msgcnt,
|
||||
(int)(time(NULL) - last_commit));
|
||||
commit_transaction_and_start_new(consumer, producer);
|
||||
msgcnt = 0;
|
||||
msgcnt = 0;
|
||||
last_commit = time(NULL);
|
||||
}
|
||||
|
||||
/* Wait for new mesages or error events */
|
||||
msg = rd_kafka_consumer_poll(consumer, 1000/*1 second*/);
|
||||
msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/);
|
||||
if (!msg)
|
||||
continue; /* Poll timeout */
|
||||
|
||||
|
|
|
@ -26,9 +26,9 @@
|
|||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Example of utilizing the Windows Certificate store with SSL.
|
||||
*/
|
||||
/**
|
||||
* Example of utilizing the Windows Certificate store with SSL.
|
||||
*/
|
||||
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
|
@ -42,383 +42,354 @@
|
|||
#include <windows.h>
|
||||
#include <wincrypt.h>
|
||||
|
||||
/*
|
||||
* Typically include path in a real application would be
|
||||
* #include <librdkafka/rdkafkacpp.h>
|
||||
*/
|
||||
/*
|
||||
* Typically include path in a real application would be
|
||||
* #include <librdkafka/rdkafkacpp.h>
|
||||
*/
|
||||
#include "rdkafkacpp.h"
|
||||
|
||||
|
||||
|
||||
class ExampleStoreRetriever {
|
||||
public:
|
||||
ExampleStoreRetriever (std::string const &subject, std::string const &pass)
|
||||
: m_cert_subject(subject), m_password(pass),
|
||||
m_cert_store(NULL), m_cert_ctx(NULL) {
|
||||
load_certificate();
|
||||
}
|
||||
public:
|
||||
ExampleStoreRetriever(std::string const &subject, std::string const &pass) :
|
||||
m_cert_subject(subject),
|
||||
m_password(pass),
|
||||
m_cert_store(NULL),
|
||||
m_cert_ctx(NULL) {
|
||||
load_certificate();
|
||||
}
|
||||
|
||||
~ExampleStoreRetriever() {
|
||||
if (m_cert_ctx)
|
||||
CertFreeCertificateContext(m_cert_ctx);
|
||||
~ExampleStoreRetriever() {
|
||||
if (m_cert_ctx)
|
||||
CertFreeCertificateContext(m_cert_ctx);
|
||||
|
||||
if (m_cert_store)
|
||||
CertCloseStore(m_cert_store, 0);
|
||||
}
|
||||
if (m_cert_store)
|
||||
CertCloseStore(m_cert_store, 0);
|
||||
}
|
||||
|
||||
/* @returns the public key in DER format */
|
||||
const std::vector<unsigned char> get_public_key () {
|
||||
std::vector<unsigned char> buf((size_t)m_cert_ctx->cbCertEncoded);
|
||||
buf.assign((const char *)m_cert_ctx->pbCertEncoded,
|
||||
(const char *)m_cert_ctx->pbCertEncoded +
|
||||
(size_t)m_cert_ctx->cbCertEncoded);
|
||||
return buf;
|
||||
}
|
||||
/* @returns the public key in DER format */
|
||||
const std::vector<unsigned char> get_public_key() {
|
||||
std::vector<unsigned char> buf((size_t)m_cert_ctx->cbCertEncoded);
|
||||
buf.assign((const char *)m_cert_ctx->pbCertEncoded,
|
||||
(const char *)m_cert_ctx->pbCertEncoded +
|
||||
(size_t)m_cert_ctx->cbCertEncoded);
|
||||
return buf;
|
||||
}
|
||||
|
||||
/* @returns the private key in PCKS#12 format */
|
||||
const std::vector<unsigned char> get_private_key () {
|
||||
ssize_t ret = 0;
|
||||
/*
|
||||
* In order to export the private key the certificate
|
||||
* must first be marked as exportable.
|
||||
*
|
||||
* Steps to export the certificate
|
||||
* 1) Create an in-memory cert store
|
||||
* 2) Add the certificate to the store
|
||||
* 3) Export the private key from the in-memory store
|
||||
*/
|
||||
/* @returns the private key in PCKS#12 format */
|
||||
const std::vector<unsigned char> get_private_key() {
|
||||
ssize_t ret = 0;
|
||||
/*
|
||||
* In order to export the private key the certificate
|
||||
* must first be marked as exportable.
|
||||
*
|
||||
* Steps to export the certificate
|
||||
* 1) Create an in-memory cert store
|
||||
* 2) Add the certificate to the store
|
||||
* 3) Export the private key from the in-memory store
|
||||
*/
|
||||
|
||||
/* Create an in-memory cert store */
|
||||
HCERTSTORE hMemStore = CertOpenStore(CERT_STORE_PROV_MEMORY,
|
||||
0, NULL, 0, NULL);
|
||||
if (!hMemStore)
|
||||
throw "Failed to create in-memory cert store: " +
|
||||
GetErrorMsg(GetLastError());
|
||||
/* Create an in-memory cert store */
|
||||
HCERTSTORE hMemStore =
|
||||
CertOpenStore(CERT_STORE_PROV_MEMORY, 0, NULL, 0, NULL);
|
||||
if (!hMemStore)
|
||||
throw "Failed to create in-memory cert store: " +
|
||||
GetErrorMsg(GetLastError());
|
||||
|
||||
/* Add certificate to store */
|
||||
if (!CertAddCertificateContextToStore(hMemStore,
|
||||
m_cert_ctx,
|
||||
CERT_STORE_ADD_USE_EXISTING,
|
||||
NULL))
|
||||
throw "Failed to add certificate to store: " +
|
||||
GetErrorMsg(GetLastError());
|
||||
/* Add certificate to store */
|
||||
if (!CertAddCertificateContextToStore(hMemStore, m_cert_ctx,
|
||||
CERT_STORE_ADD_USE_EXISTING, NULL))
|
||||
throw "Failed to add certificate to store: " +
|
||||
GetErrorMsg(GetLastError());
|
||||
|
||||
/*
|
||||
* Export private key from cert
|
||||
*/
|
||||
CRYPT_DATA_BLOB db = { NULL };
|
||||
/*
|
||||
* Export private key from cert
|
||||
*/
|
||||
CRYPT_DATA_BLOB db = {NULL};
|
||||
|
||||
std::wstring w_password(m_password.begin(), m_password.end());
|
||||
std::wstring w_password(m_password.begin(), m_password.end());
|
||||
|
||||
/* Acquire output size */
|
||||
if (!PFXExportCertStoreEx(hMemStore,
|
||||
&db,
|
||||
w_password.c_str(),
|
||||
NULL,
|
||||
EXPORT_PRIVATE_KEYS |
|
||||
REPORT_NO_PRIVATE_KEY |
|
||||
REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
|
||||
throw "Failed to export private key: " + GetErrorMsg(GetLastError());
|
||||
/* Acquire output size */
|
||||
if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL,
|
||||
EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY |
|
||||
REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
|
||||
throw "Failed to export private key: " + GetErrorMsg(GetLastError());
|
||||
|
||||
std::vector<unsigned char> buf;
|
||||
std::vector<unsigned char> buf;
|
||||
|
||||
buf.resize(db.cbData);
|
||||
db.pbData = &buf[0];
|
||||
buf.resize(db.cbData);
|
||||
db.pbData = &buf[0];
|
||||
|
||||
/* Extract key */
|
||||
if (!PFXExportCertStoreEx(hMemStore,
|
||||
&db,
|
||||
w_password.c_str(),
|
||||
NULL,
|
||||
EXPORT_PRIVATE_KEYS |
|
||||
REPORT_NO_PRIVATE_KEY |
|
||||
REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
|
||||
throw "Failed to export private key (PFX): " + GetErrorMsg(GetLastError());
|
||||
/* Extract key */
|
||||
if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL,
|
||||
EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY |
|
||||
REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
|
||||
throw "Failed to export private key (PFX): " +
|
||||
GetErrorMsg(GetLastError());
|
||||
|
||||
CertCloseStore(hMemStore, 0);
|
||||
CertCloseStore(hMemStore, 0);
|
||||
|
||||
buf.resize(db.cbData);
|
||||
buf.resize(db.cbData);
|
||||
|
||||
return buf;
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
||||
private:
|
||||
void load_certificate () {
|
||||
if (m_cert_ctx)
|
||||
return;
|
||||
void load_certificate() {
|
||||
if (m_cert_ctx)
|
||||
return;
|
||||
|
||||
m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM,
|
||||
0,
|
||||
NULL,
|
||||
CERT_SYSTEM_STORE_CURRENT_USER,
|
||||
L"My");
|
||||
if (!m_cert_store)
|
||||
throw "Failed to open cert store: " + GetErrorMsg(GetLastError());
|
||||
m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, NULL,
|
||||
CERT_SYSTEM_STORE_CURRENT_USER, L"My");
|
||||
if (!m_cert_store)
|
||||
throw "Failed to open cert store: " + GetErrorMsg(GetLastError());
|
||||
|
||||
m_cert_ctx = CertFindCertificateInStore(m_cert_store,
|
||||
X509_ASN_ENCODING,
|
||||
0,
|
||||
CERT_FIND_SUBJECT_STR,
|
||||
/* should probally do a better std::string to std::wstring conversion */
|
||||
std::wstring(m_cert_subject.begin(),
|
||||
m_cert_subject.end()).c_str(),
|
||||
NULL);
|
||||
if (!m_cert_ctx) {
|
||||
CertCloseStore(m_cert_store, 0);
|
||||
m_cert_store = NULL;
|
||||
throw "Certificate " + m_cert_subject + " not found in cert store: " + GetErrorMsg(GetLastError());
|
||||
}
|
||||
}
|
||||
m_cert_ctx = CertFindCertificateInStore(
|
||||
m_cert_store, X509_ASN_ENCODING, 0, CERT_FIND_SUBJECT_STR,
|
||||
/* should probally do a better std::string to std::wstring conversion */
|
||||
std::wstring(m_cert_subject.begin(), m_cert_subject.end()).c_str(),
|
||||
NULL);
|
||||
if (!m_cert_ctx) {
|
||||
CertCloseStore(m_cert_store, 0);
|
||||
m_cert_store = NULL;
|
||||
throw "Certificate " + m_cert_subject +
|
||||
" not found in cert store: " + GetErrorMsg(GetLastError());
|
||||
}
|
||||
}
|
||||
|
||||
std::string GetErrorMsg (unsigned long error) {
|
||||
char *message = NULL;
|
||||
size_t ret = FormatMessageA(
|
||||
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
|
||||
nullptr,
|
||||
error,
|
||||
0,
|
||||
(char*)&message,
|
||||
0,
|
||||
nullptr);
|
||||
if (ret == 0) {
|
||||
std::stringstream ss;
|
||||
std::string GetErrorMsg(unsigned long error) {
|
||||
char *message = NULL;
|
||||
size_t ret = FormatMessageA(
|
||||
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, nullptr,
|
||||
error, 0, (char *)&message, 0, nullptr);
|
||||
if (ret == 0) {
|
||||
std::stringstream ss;
|
||||
|
||||
ss << std::string("could not format message for ") << error;
|
||||
return ss.str();
|
||||
} else {
|
||||
std::string result(message, ret);
|
||||
LocalFree(message);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
ss << std::string("could not format message for ") << error;
|
||||
return ss.str();
|
||||
} else {
|
||||
std::string result(message, ret);
|
||||
LocalFree(message);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::string m_cert_subject;
|
||||
std::string m_password;
|
||||
PCCERT_CONTEXT m_cert_ctx;
|
||||
HCERTSTORE m_cert_store;
|
||||
std::string m_cert_subject;
|
||||
std::string m_password;
|
||||
PCCERT_CONTEXT m_cert_ctx;
|
||||
HCERTSTORE m_cert_store;
|
||||
};
|
||||
|
||||
|
||||
class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb {
|
||||
/* This SSL cert verification callback simply prints the certificates
|
||||
* in the certificate chain.
|
||||
* It provides no validation, everything is ok. */
|
||||
public:
|
||||
bool ssl_cert_verify_cb (const std::string &broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf, size_t size,
|
||||
std::string &errstr) {
|
||||
PCCERT_CONTEXT ctx = CertCreateCertificateContext(
|
||||
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING,
|
||||
(const uint8_t*)buf, static_cast<unsigned long>(size));
|
||||
/* This SSL cert verification callback simply prints the certificates
|
||||
* in the certificate chain.
|
||||
* It provides no validation, everything is ok. */
|
||||
public:
|
||||
bool ssl_cert_verify_cb(const std::string &broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf,
|
||||
size_t size,
|
||||
std::string &errstr) {
|
||||
PCCERT_CONTEXT ctx = CertCreateCertificateContext(
|
||||
X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, (const uint8_t *)buf,
|
||||
static_cast<unsigned long>(size));
|
||||
|
||||
if (!ctx)
|
||||
std::cerr << "Failed to parse certificate" << std::endl;
|
||||
if (!ctx)
|
||||
std::cerr << "Failed to parse certificate" << std::endl;
|
||||
|
||||
char subject[256] = "n/a";
|
||||
char issuer[256] = "n/a";
|
||||
char subject[256] = "n/a";
|
||||
char issuer[256] = "n/a";
|
||||
|
||||
CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE,
|
||||
0, NULL,
|
||||
subject, sizeof(subject));
|
||||
CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, 0, NULL, subject,
|
||||
sizeof(subject));
|
||||
|
||||
CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE,
|
||||
CERT_NAME_ISSUER_FLAG, NULL,
|
||||
issuer, sizeof(issuer));
|
||||
CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE,
|
||||
CERT_NAME_ISSUER_FLAG, NULL, issuer, sizeof(issuer));
|
||||
|
||||
std::cerr << "Broker " << broker_name <<
|
||||
" (" << broker_id << "): " <<
|
||||
"certificate depth " << depth <<
|
||||
", X509 error " << *x509_error <<
|
||||
", subject " << subject <<
|
||||
", issuer " << issuer << std::endl;
|
||||
std::cerr << "Broker " << broker_name << " (" << broker_id << "): "
|
||||
<< "certificate depth " << depth << ", X509 error " << *x509_error
|
||||
<< ", subject " << subject << ", issuer " << issuer << std::endl;
|
||||
|
||||
if (ctx)
|
||||
CertFreeCertificateContext(ctx);
|
||||
if (ctx)
|
||||
CertFreeCertificateContext(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* @brief Print the brokers in the cluster.
|
||||
*/
|
||||
static void print_brokers (RdKafka::Handle *handle,
|
||||
const RdKafka::Metadata *md) {
|
||||
std::cout << md->brokers()->size() << " broker(s) in cluster " <<
|
||||
handle->clusterid(0) << std::endl;
|
||||
|
||||
/* Iterate brokers */
|
||||
RdKafka::Metadata::BrokerMetadataIterator ib;
|
||||
for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib)
|
||||
std::cout << " broker " << (*ib)->id() << " at "
|
||||
<< (*ib)->host() << ":" << (*ib)->port() << std::endl;
|
||||
* @brief Print the brokers in the cluster.
|
||||
*/
|
||||
static void print_brokers(RdKafka::Handle *handle,
|
||||
const RdKafka::Metadata *md) {
|
||||
std::cout << md->brokers()->size() << " broker(s) in cluster "
|
||||
<< handle->clusterid(0) << std::endl;
|
||||
|
||||
/* Iterate brokers */
|
||||
RdKafka::Metadata::BrokerMetadataIterator ib;
|
||||
for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib)
|
||||
std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":"
|
||||
<< (*ib)->port() << std::endl;
|
||||
}
|
||||
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
std::string brokers;
|
||||
std::string errstr;
|
||||
std::string cert_subject;
|
||||
std::string priv_key_pass;
|
||||
int main(int argc, char **argv) {
|
||||
std::string brokers;
|
||||
std::string errstr;
|
||||
std::string cert_subject;
|
||||
std::string priv_key_pass;
|
||||
|
||||
/*
|
||||
* Create configuration objects
|
||||
*/
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
|
||||
/*
|
||||
* Create configuration objects
|
||||
*/
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
|
||||
|
||||
int opt;
|
||||
while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'b':
|
||||
brokers = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'X':
|
||||
{
|
||||
char *name, *val;
|
||||
int opt;
|
||||
while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) {
|
||||
switch (opt) {
|
||||
case 'b':
|
||||
brokers = optarg;
|
||||
break;
|
||||
case 'd':
|
||||
if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
break;
|
||||
case 'X': {
|
||||
char *name, *val;
|
||||
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " <<
|
||||
name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
name = optarg;
|
||||
if (!(val = strchr(name, '='))) {
|
||||
std::cerr << "%% Expected -X property=value, not " << name << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
*val = '\0';
|
||||
val++;
|
||||
*val = '\0';
|
||||
val++;
|
||||
|
||||
if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
break;
|
||||
if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
} break;
|
||||
|
||||
case 's':
|
||||
cert_subject = optarg;
|
||||
break;
|
||||
case 's':
|
||||
cert_subject = optarg;
|
||||
break;
|
||||
|
||||
case 'p':
|
||||
priv_key_pass = optarg;
|
||||
if (conf->set("ssl.key.password", optarg, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
case 'p':
|
||||
priv_key_pass = optarg;
|
||||
if (conf->set("ssl.key.password", optarg, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
break;
|
||||
break;
|
||||
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
default:
|
||||
goto usage;
|
||||
}
|
||||
}
|
||||
|
||||
if (brokers.empty() || optind != argc) {
|
||||
usage:
|
||||
std::string features;
|
||||
conf->get("builtin.features", features);
|
||||
fprintf(stderr,
|
||||
"Usage: %s [options] -b <brokers> -s <cert-subject> -p <priv-key-password>\n"
|
||||
"\n"
|
||||
"Windows Certificate Store integration example.\n"
|
||||
"Use certlm.msc or mmc to view your certificates.\n"
|
||||
"\n"
|
||||
"librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
|
||||
"\n"
|
||||
" Options:\n"
|
||||
" -b <brokers> Broker address\n"
|
||||
" -s <cert> The subject name of the client's SSL certificate to use\n"
|
||||
" -p <pass> The private key password\n"
|
||||
" -d [facs..] Enable debugging contexts: %s\n"
|
||||
" -X <prop=name> Set arbitrary librdkafka "
|
||||
"configuration property\n"
|
||||
"\n",
|
||||
argv[0],
|
||||
RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
features.c_str(),
|
||||
RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
}
|
||||
if (brokers.empty() || optind != argc) {
|
||||
usage:
|
||||
std::string features;
|
||||
conf->get("builtin.features", features);
|
||||
fprintf(stderr,
|
||||
"Usage: %s [options] -b <brokers> -s <cert-subject> -p "
|
||||
"<priv-key-password>\n"
|
||||
"\n"
|
||||
"Windows Certificate Store integration example.\n"
|
||||
"Use certlm.msc or mmc to view your certificates.\n"
|
||||
"\n"
|
||||
"librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
|
||||
"\n"
|
||||
" Options:\n"
|
||||
" -b <brokers> Broker address\n"
|
||||
" -s <cert> The subject name of the client's SSL "
|
||||
"certificate to use\n"
|
||||
" -p <pass> The private key password\n"
|
||||
" -d [facs..] Enable debugging contexts: %s\n"
|
||||
" -X <prop=name> Set arbitrary librdkafka "
|
||||
"configuration property\n"
|
||||
"\n",
|
||||
argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
|
||||
features.c_str(), RdKafka::get_debug_contexts().c_str());
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (!cert_subject.empty()) {
|
||||
if (!cert_subject.empty()) {
|
||||
try {
|
||||
/* Load certificates from the Windows store */
|
||||
ExampleStoreRetriever certStore(cert_subject, priv_key_pass);
|
||||
|
||||
try {
|
||||
/* Load certificates from the Windows store */
|
||||
ExampleStoreRetriever certStore(cert_subject, priv_key_pass);
|
||||
std::vector<unsigned char> pubkey, privkey;
|
||||
|
||||
std::vector<unsigned char> pubkey, privkey;
|
||||
pubkey = certStore.get_public_key();
|
||||
privkey = certStore.get_private_key();
|
||||
|
||||
pubkey = certStore.get_public_key();
|
||||
privkey = certStore.get_private_key();
|
||||
if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, RdKafka::CERT_ENC_DER,
|
||||
&pubkey[0], pubkey.size(),
|
||||
errstr) != RdKafka::Conf::CONF_OK)
|
||||
throw "Failed to set public key: " + errstr;
|
||||
|
||||
if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY,
|
||||
RdKafka::CERT_ENC_DER,
|
||||
&pubkey[0], pubkey.size(),
|
||||
errstr) !=
|
||||
RdKafka::Conf::CONF_OK)
|
||||
throw "Failed to set public key: " + errstr;
|
||||
if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY,
|
||||
RdKafka::CERT_ENC_PKCS12, &privkey[0],
|
||||
privkey.size(), errstr) != RdKafka::Conf::CONF_OK)
|
||||
throw "Failed to set private key: " + errstr;
|
||||
|
||||
if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY,
|
||||
RdKafka::CERT_ENC_PKCS12,
|
||||
&privkey[0], privkey.size(),
|
||||
errstr) !=
|
||||
RdKafka::Conf::CONF_OK)
|
||||
throw "Failed to set private key: " + errstr;
|
||||
|
||||
} catch (const std::string &ex) {
|
||||
std::cerr << ex << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
} catch (const std::string &ex) {
|
||||
std::cerr << ex << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Set configuration properties
|
||||
*/
|
||||
conf->set("bootstrap.servers", brokers, errstr);
|
||||
/*
|
||||
* Set configuration properties
|
||||
*/
|
||||
conf->set("bootstrap.servers", brokers, errstr);
|
||||
|
||||
/* We use the Certificiate verification callback to print the
|
||||
* certificate chains being used. */
|
||||
PrintingSSLVerifyCb ssl_verify_cb;
|
||||
/* We use the Certificiate verification callback to print the
|
||||
* certificate chains being used. */
|
||||
PrintingSSLVerifyCb ssl_verify_cb;
|
||||
|
||||
if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) != RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
|
||||
RdKafka::Conf::CONF_OK) {
|
||||
std::cerr << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Create any type of client, producering being the cheapest. */
|
||||
RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
|
||||
if (!producer) {
|
||||
std::cerr << "Failed to create producer: " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
/* Create any type of client, producering being the cheapest. */
|
||||
RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
|
||||
if (!producer) {
|
||||
std::cerr << "Failed to create producer: " << errstr << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
RdKafka::Metadata *metadata;
|
||||
RdKafka::Metadata *metadata;
|
||||
|
||||
/* Fetch metadata */
|
||||
RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000);
|
||||
if (err != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << "%% Failed to acquire metadata: "
|
||||
<< RdKafka::err2str(err) << std::endl;
|
||||
exit(1);
|
||||
}
|
||||
/* Fetch metadata */
|
||||
RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000);
|
||||
if (err != RdKafka::ERR_NO_ERROR) {
|
||||
std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
|
||||
<< std::endl;
|
||||
exit(1);
|
||||
}
|
||||
|
||||
print_brokers(producer, metadata);
|
||||
print_brokers(producer, metadata);
|
||||
|
||||
delete metadata;
|
||||
delete producer;
|
||||
delete metadata;
|
||||
delete producer;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -47,7 +47,9 @@ if __name__ == '__main__':
|
|||
if m:
|
||||
sym = m.group(2)
|
||||
# Ignore static (unused) functions
|
||||
m2 = re.match(r'(RD_UNUSED|__attribute__\(\(unused\)\))', last_line)
|
||||
m2 = re.match(
|
||||
r'(RD_UNUSED|__attribute__\(\(unused\)\))',
|
||||
last_line)
|
||||
if not m2:
|
||||
funcs.append(sym)
|
||||
last_line = ''
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <inttypes.h>
|
||||
|
||||
int32_t foo (int32_t i) {
|
||||
return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
|
||||
int32_t foo(int32_t i) {
|
||||
return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
int main() {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <inttypes.h>
|
||||
|
||||
int64_t foo (int64_t i) {
|
||||
return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
|
||||
int64_t foo(int64_t i) {
|
||||
return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
|
||||
}
|
||||
|
||||
int main() {
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
#include <threads.h>
|
||||
|
||||
static int start_func (void *arg) {
|
||||
int iarg = *(int *)arg;
|
||||
return iarg;
|
||||
static int start_func(void *arg) {
|
||||
int iarg = *(int *)arg;
|
||||
return iarg;
|
||||
}
|
||||
|
||||
void main (void) {
|
||||
thrd_t thr;
|
||||
int arg = 1;
|
||||
if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) {
|
||||
;
|
||||
}
|
||||
void main(void) {
|
||||
thrd_t thr;
|
||||
int arg = 1;
|
||||
if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) {
|
||||
;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,22 +3,25 @@
|
|||
#define LONGx1 "8192"
|
||||
#define LONGx2 "16384"
|
||||
void main(void) {
|
||||
const char *n = "abcdefghijklmnopqrstuvwxyz0123456789";
|
||||
uint64_t c0 = 0, c1 = 1, c2 = 2;
|
||||
uint64_t s;
|
||||
uint32_t eax = 1, ecx;
|
||||
__asm__("cpuid"
|
||||
: "=c"(ecx)
|
||||
: "a"(eax)
|
||||
: "%ebx", "%edx");
|
||||
__asm__("crc32b\t" "(%1), %0"
|
||||
: "=r"(c0)
|
||||
: "r"(n), "0"(c0));
|
||||
__asm__("crc32q\t" "(%3), %0\n\t"
|
||||
"crc32q\t" LONGx1 "(%3), %1\n\t"
|
||||
"crc32q\t" LONGx2 "(%3), %2"
|
||||
: "=r"(c0), "=r"(c1), "=r"(c2)
|
||||
: "r"(n), "0"(c0), "1"(c1), "2"(c2));
|
||||
s = c0 + c1 + c2;
|
||||
printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s, (int)eax, (int)ecx);
|
||||
const char *n = "abcdefghijklmnopqrstuvwxyz0123456789";
|
||||
uint64_t c0 = 0, c1 = 1, c2 = 2;
|
||||
uint64_t s;
|
||||
uint32_t eax = 1, ecx;
|
||||
__asm__("cpuid" : "=c"(ecx) : "a"(eax) : "%ebx", "%edx");
|
||||
__asm__(
|
||||
"crc32b\t"
|
||||
"(%1), %0"
|
||||
: "=r"(c0)
|
||||
: "r"(n), "0"(c0));
|
||||
__asm__(
|
||||
"crc32q\t"
|
||||
"(%3), %0\n\t"
|
||||
"crc32q\t" LONGx1
|
||||
"(%3), %1\n\t"
|
||||
"crc32q\t" LONGx2 "(%3), %2"
|
||||
: "=r"(c0), "=r"(c1), "=r"(c2)
|
||||
: "r"(n), "0"(c0), "1"(c1), "2"(c2));
|
||||
s = c0 + c1 + c2;
|
||||
printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s,
|
||||
(int)eax, (int)ecx);
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
int main() {
|
||||
void *h;
|
||||
/* Try loading anything, we don't care if it works */
|
||||
h = dlopen("__nothing_rdkafka.so", RTLD_NOW|RTLD_LOCAL);
|
||||
h = dlopen("__nothing_rdkafka.so", RTLD_NOW | RTLD_LOCAL);
|
||||
if (h)
|
||||
dlclose(h);
|
||||
return 0;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#include <pthread.h>
|
||||
|
||||
int main() {
|
||||
pthread_setname_np("abc");
|
||||
return 0;
|
||||
pthread_setname_np("abc");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,6 @@
|
|||
#include <pthread_np.h>
|
||||
|
||||
int main() {
|
||||
pthread_set_name_np(pthread_self(), "abc");
|
||||
return 0;
|
||||
pthread_set_name_np(pthread_self(), "abc");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#include <pthread.h>
|
||||
|
||||
int main() {
|
||||
return pthread_setname_np(pthread_self(), "abc");
|
||||
return pthread_setname_np(pthread_self(), "abc");
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <stdlib.h>
|
||||
|
||||
int main() {
|
||||
unsigned int seed = 0xbeaf;
|
||||
(void)rand_r(&seed);
|
||||
return 0;
|
||||
unsigned int seed = 0xbeaf;
|
||||
(void)rand_r(&seed);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
#include <regex.h>
|
||||
|
||||
int main() {
|
||||
regcomp(NULL, NULL, 0);
|
||||
regexec(NULL, NULL, 0, NULL, 0);
|
||||
regerror(0, NULL, NULL, 0);
|
||||
regfree(NULL);
|
||||
return 0;
|
||||
regcomp(NULL, NULL, 0);
|
||||
regexec(NULL, NULL, 0, NULL, 0);
|
||||
regerror(0, NULL, NULL, 0);
|
||||
regfree(NULL);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
#include <string.h>
|
||||
|
||||
int main() {
|
||||
return strndup("hi", 2) ? 0 : 1;
|
||||
return strndup("hi", 2) ? 0 : 1;
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <inttypes.h>
|
||||
|
||||
int32_t foo (int32_t i) {
|
||||
return __sync_add_and_fetch(&i, 1);
|
||||
int32_t foo(int32_t i) {
|
||||
return __sync_add_and_fetch(&i, 1);
|
||||
}
|
||||
|
||||
int main() {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#include <inttypes.h>
|
||||
|
||||
int64_t foo (int64_t i) {
|
||||
return __sync_add_and_fetch(&i, 1);
|
||||
int64_t foo(int64_t i) {
|
||||
return __sync_add_and_fetch(&i, 1);
|
||||
}
|
||||
|
||||
int main() {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <string.h>
|
||||
#include <librdkafka/rdkafka.h>
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_conf_t *conf;
|
||||
char buf[512];
|
||||
size_t sz = sizeof(buf);
|
||||
|
@ -12,8 +12,8 @@ int main (int argc, char **argv) {
|
|||
int i;
|
||||
int failures = 0;
|
||||
|
||||
printf("librdkafka %s (0x%x, define: 0x%x)\n",
|
||||
rd_kafka_version_str(), rd_kafka_version(), RD_KAFKA_VERSION);
|
||||
printf("librdkafka %s (0x%x, define: 0x%x)\n", rd_kafka_version_str(),
|
||||
rd_kafka_version(), RD_KAFKA_VERSION);
|
||||
|
||||
if (argc > 1 && !(argc & 1)) {
|
||||
printf("Usage: %s [config.property config-value ..]\n",
|
||||
|
@ -22,7 +22,7 @@ int main (int argc, char **argv) {
|
|||
}
|
||||
|
||||
conf = rd_kafka_conf_new();
|
||||
res = rd_kafka_conf_get(conf, "builtin.features", buf, &sz);
|
||||
res = rd_kafka_conf_get(conf, "builtin.features", buf, &sz);
|
||||
|
||||
if (res != RD_KAFKA_CONF_OK) {
|
||||
printf("ERROR: conf_get failed: %d\n", res);
|
||||
|
@ -36,22 +36,22 @@ int main (int argc, char **argv) {
|
|||
* which will return an error if one or more flags are not enabled. */
|
||||
if (rd_kafka_conf_set(conf, "builtin.features", expected_features,
|
||||
errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
printf("ERROR: expected at least features: %s\n"
|
||||
"got error: %s\n",
|
||||
expected_features, errstr);
|
||||
printf(
|
||||
"ERROR: expected at least features: %s\n"
|
||||
"got error: %s\n",
|
||||
expected_features, errstr);
|
||||
failures++;
|
||||
}
|
||||
|
||||
printf("all expected features matched: %s\n", expected_features);
|
||||
|
||||
/* Apply config from argv key value pairs */
|
||||
for (i = 1 ; i+1 < argc ; i += 2) {
|
||||
printf("verifying config %s=%s\n", argv[i], argv[i+1]);
|
||||
if (rd_kafka_conf_set(conf, argv[i], argv[i+1],
|
||||
errstr, sizeof(errstr)) !=
|
||||
RD_KAFKA_CONF_OK) {
|
||||
printf("ERROR: failed to set %s=%s: %s\n",
|
||||
argv[i], argv[i+1], errstr);
|
||||
for (i = 1; i + 1 < argc; i += 2) {
|
||||
printf("verifying config %s=%s\n", argv[i], argv[i + 1]);
|
||||
if (rd_kafka_conf_set(conf, argv[i], argv[i + 1], errstr,
|
||||
sizeof(errstr)) != RD_KAFKA_CONF_OK) {
|
||||
printf("ERROR: failed to set %s=%s: %s\n", argv[i],
|
||||
argv[i + 1], errstr);
|
||||
failures++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import boto3
|
|||
s3_bucket = 'librdkafka-ci-packages'
|
||||
dry_run = False
|
||||
|
||||
|
||||
class Artifact (object):
|
||||
def __init__(self, arts, path, info=None):
|
||||
self.path = path
|
||||
|
@ -49,7 +50,7 @@ class Artifact (object):
|
|||
# Assign the map and convert all keys to lower case
|
||||
self.info = {k.lower(): v for k, v in info.items()}
|
||||
# Rename values, e.g., 'plat':'linux' to 'plat':'debian'
|
||||
for k,v in self.info.items():
|
||||
for k, v in self.info.items():
|
||||
rdict = packaging.rename_vals.get(k, None)
|
||||
if rdict is not None:
|
||||
self.info[k] = rdict.get(v, v)
|
||||
|
@ -64,11 +65,10 @@ class Artifact (object):
|
|||
self.arts = arts
|
||||
arts.artifacts.append(self)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return self.path
|
||||
|
||||
def __lt__ (self, other):
|
||||
def __lt__(self, other):
|
||||
return self.score < other.score
|
||||
|
||||
def download(self):
|
||||
|
@ -136,7 +136,7 @@ class Artifacts (object):
|
|||
|
||||
# Match tag or sha to gitref
|
||||
unmatched = list()
|
||||
for m,v in self.match.items():
|
||||
for m, v in self.match.items():
|
||||
if m not in info or info[m] != v:
|
||||
unmatched.append(m)
|
||||
|
||||
|
@ -144,19 +144,22 @@ class Artifacts (object):
|
|||
# common artifact.
|
||||
if info.get('p', '') != 'common' and len(unmatched) > 0:
|
||||
print(info)
|
||||
print('%s: %s did not match %s' % (info.get('p', None), folder, unmatched))
|
||||
print('%s: %s did not match %s' %
|
||||
(info.get('p', None), folder, unmatched))
|
||||
return None
|
||||
|
||||
return Artifact(self, path, info)
|
||||
|
||||
|
||||
def collect_s3(self):
|
||||
""" Collect and download build-artifacts from S3 based on git reference """
|
||||
print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
|
||||
print(
|
||||
'Collecting artifacts matching %s from S3 bucket %s' %
|
||||
(self.match, s3_bucket))
|
||||
self.s3 = boto3.resource('s3')
|
||||
self.s3_bucket = self.s3.Bucket(s3_bucket)
|
||||
self.s3_client = boto3.client('s3')
|
||||
for item in self.s3_client.list_objects(Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
|
||||
for item in self.s3_client.list_objects(
|
||||
Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
|
||||
self.collect_single(item.get('Key'))
|
||||
|
||||
for a in self.artifacts:
|
||||
|
@ -165,9 +168,8 @@ class Artifacts (object):
|
|||
def collect_local(self, path, req_tag=True):
|
||||
""" Collect artifacts from a local directory possibly previously
|
||||
collected from s3 """
|
||||
for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
|
||||
for f in [os.path.join(dp, f) for dp, dn,
|
||||
filenames in os.walk(path) for f in filenames]:
|
||||
if not os.path.isfile(f):
|
||||
continue
|
||||
self.collect_single(f, req_tag)
|
||||
|
||||
|
||||
|
|
|
@ -59,7 +59,8 @@ def may_delete(path):
|
|||
if tag is None:
|
||||
return True
|
||||
|
||||
if re.match(r'^v?\d+\.\d+\.\d+(-?RC\d+)?$', tag, flags=re.IGNORECASE) is None:
|
||||
if re.match(r'^v?\d+\.\d+\.\d+(-?RC\d+)?$', tag,
|
||||
flags=re.IGNORECASE) is None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -101,6 +102,7 @@ def chunk_list(lst, cnt):
|
|||
for i in range(0, len(lst), cnt):
|
||||
yield lst[i:i + cnt]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
|
@ -42,12 +42,13 @@ magic_patterns = {
|
|||
('win', 'x86', '.lib'): re.compile('current ar archive'),
|
||||
('linux', 'x64', '.so'): re.compile('ELF 64.* x86-64'),
|
||||
('linux', 'arm64', '.so'): re.compile('ELF 64.* ARM aarch64'),
|
||||
('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64') }
|
||||
('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64')}
|
||||
|
||||
magic = magic.Magic()
|
||||
|
||||
|
||||
def magic_mismatch(path, a):
|
||||
""" Verify that the filemagic for \p path matches for artifact \p a.
|
||||
""" Verify that the filemagic for \\p path matches for artifact \\p a.
|
||||
Returns True if the magic file info does NOT match.
|
||||
Returns False if no matching is needed or the magic matches. """
|
||||
k = (a.info.get('plat', None), a.info.get('arch', None),
|
||||
|
@ -58,7 +59,8 @@ def magic_mismatch(path, a):
|
|||
|
||||
minfo = magic.id_filename(path)
|
||||
if not pattern.match(minfo):
|
||||
print(f"Warning: {path} magic \"{minfo}\" does not match expected {pattern} for key {k}")
|
||||
print(
|
||||
f"Warning: {path} magic \"{minfo}\" does not match expected {pattern} for key {k}")
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -94,6 +96,7 @@ class MissingArtifactError(Exception):
|
|||
s3_bucket = 'librdkafka-ci-packages'
|
||||
dry_run = False
|
||||
|
||||
|
||||
class Artifact (object):
|
||||
def __init__(self, arts, path, info=None):
|
||||
self.path = path
|
||||
|
@ -113,7 +116,7 @@ class Artifact (object):
|
|||
# Assign the map and convert all keys to lower case
|
||||
self.info = {k.lower(): v for k, v in info.items()}
|
||||
# Rename values, e.g., 'plat':'linux' to 'plat':'debian'
|
||||
for k,v in self.info.items():
|
||||
for k, v in self.info.items():
|
||||
rdict = rename_vals.get(k, None)
|
||||
if rdict is not None:
|
||||
self.info[k] = rdict.get(v, v)
|
||||
|
@ -128,11 +131,10 @@ class Artifact (object):
|
|||
self.arts = arts
|
||||
arts.artifacts.append(self)
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
return self.path
|
||||
|
||||
def __lt__ (self, other):
|
||||
def __lt__(self, other):
|
||||
return self.score < other.score
|
||||
|
||||
def download(self):
|
||||
|
@ -162,7 +164,6 @@ class Artifacts (object):
|
|||
if not dry_run:
|
||||
os.makedirs(self.dlpath, 0o755)
|
||||
|
||||
|
||||
def collect_single(self, path, req_tag=True):
|
||||
""" Collect single artifact, be it in S3 or locally.
|
||||
:param: path string: S3 or local (relative) path
|
||||
|
@ -201,7 +202,7 @@ class Artifacts (object):
|
|||
|
||||
# Perform matching
|
||||
unmatched = list()
|
||||
for m,v in self.match.items():
|
||||
for m, v in self.match.items():
|
||||
if m not in info or info[m] != v:
|
||||
unmatched.append(m)
|
||||
|
||||
|
@ -213,10 +214,11 @@ class Artifacts (object):
|
|||
|
||||
return Artifact(self, path, info)
|
||||
|
||||
|
||||
def collect_s3(self):
|
||||
""" Collect and download build-artifacts from S3 based on git reference """
|
||||
print('Collecting artifacts matching %s from S3 bucket %s' % (self.match, s3_bucket))
|
||||
print(
|
||||
'Collecting artifacts matching %s from S3 bucket %s' %
|
||||
(self.match, s3_bucket))
|
||||
self.s3 = boto3.resource('s3')
|
||||
self.s3_bucket = self.s3.Bucket(s3_bucket)
|
||||
self.s3_client = boto3.client('s3')
|
||||
|
@ -248,7 +250,8 @@ class Artifacts (object):
|
|||
def collect_local(self, path, req_tag=True):
|
||||
""" Collect artifacts from a local directory possibly previously
|
||||
collected from s3 """
|
||||
for f in [os.path.join(dp, f) for dp, dn, filenames in os.walk(path) for f in filenames]:
|
||||
for f in [os.path.join(dp, f) for dp, dn,
|
||||
filenames in os.walk(path) for f in filenames]:
|
||||
if not os.path.isfile(f):
|
||||
continue
|
||||
self.collect_single(f, req_tag)
|
||||
|
@ -259,7 +262,7 @@ class Package (object):
|
|||
A Package is a working container for one or more output
|
||||
packages for a specific package type (e.g., nuget) """
|
||||
|
||||
def __init__ (self, version, arts, ptype):
|
||||
def __init__(self, version, arts, ptype):
|
||||
super(Package, self).__init__()
|
||||
self.version = version
|
||||
self.arts = arts
|
||||
|
@ -271,22 +274,22 @@ class Package (object):
|
|||
self.kv = {'version': version}
|
||||
self.files = dict()
|
||||
|
||||
def add_file (self, file):
|
||||
def add_file(self, file):
|
||||
self.files[file] = True
|
||||
|
||||
def build (self):
|
||||
def build(self):
|
||||
""" Build package output(s), return a list of paths to built packages """
|
||||
raise NotImplementedError
|
||||
|
||||
def cleanup (self):
|
||||
def cleanup(self):
|
||||
""" Optional cleanup routine for removing temporary files, etc. """
|
||||
pass
|
||||
|
||||
def verify (self, path):
|
||||
def verify(self, path):
|
||||
""" Optional post-build package verifier """
|
||||
pass
|
||||
|
||||
def render (self, fname, destpath='.'):
|
||||
def render(self, fname, destpath='.'):
|
||||
""" Render template in file fname and save to destpath/fname,
|
||||
where destpath is relative to stpath """
|
||||
|
||||
|
@ -302,8 +305,7 @@ class Package (object):
|
|||
|
||||
self.add_file(outf)
|
||||
|
||||
|
||||
def copy_template (self, fname, target_fname=None, destpath='.'):
|
||||
def copy_template(self, fname, target_fname=None, destpath='.'):
|
||||
""" Copy template file to destpath/fname
|
||||
where destpath is relative to stpath """
|
||||
|
||||
|
@ -322,16 +324,17 @@ class Package (object):
|
|||
class NugetPackage (Package):
|
||||
""" All platforms, archs, et.al, are bundled into one set of
|
||||
NuGet output packages: "main", redist and symbols """
|
||||
def __init__ (self, version, arts):
|
||||
|
||||
def __init__(self, version, arts):
|
||||
if version.startswith('v'):
|
||||
version = version[1:] # Strip v prefix
|
||||
version = version[1:] # Strip v prefix
|
||||
super(NugetPackage, self).__init__(version, arts, "nuget")
|
||||
|
||||
def cleanup(self):
|
||||
if os.path.isdir(self.stpath):
|
||||
shutil.rmtree(self.stpath)
|
||||
|
||||
def build (self, buildtype):
|
||||
def build(self, buildtype):
|
||||
""" Build single NuGet package for all its artifacts. """
|
||||
|
||||
# NuGet removes the prefixing v from the version.
|
||||
|
@ -339,7 +342,6 @@ class NugetPackage (Package):
|
|||
if vless_version[0] == 'v':
|
||||
vless_version = vless_version[1:]
|
||||
|
||||
|
||||
self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype,
|
||||
dir=".")
|
||||
|
||||
|
@ -361,61 +363,159 @@ class NugetPackage (Package):
|
|||
a.info['toolset'] = 'v140'
|
||||
|
||||
mappings = [
|
||||
[{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka.h', 'build/native/include/librdkafka/rdkafka.h'],
|
||||
[{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafkacpp.h', 'build/native/include/librdkafka/rdkafkacpp.h'],
|
||||
[{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './include/librdkafka/rdkafka_mock.h', 'build/native/include/librdkafka/rdkafka_mock.h'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./include/librdkafka/rdkafka.h',
|
||||
'build/native/include/librdkafka/rdkafka.h'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./include/librdkafka/rdkafkacpp.h',
|
||||
'build/native/include/librdkafka/rdkafkacpp.h'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./include/librdkafka/rdkafka_mock.h',
|
||||
'build/native/include/librdkafka/rdkafka_mock.h'],
|
||||
|
||||
[{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/README.md', 'README.md'],
|
||||
[{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/CONFIGURATION.md', 'CONFIGURATION.md'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./share/doc/librdkafka/README.md',
|
||||
'README.md'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./share/doc/librdkafka/CONFIGURATION.md',
|
||||
'CONFIGURATION.md'],
|
||||
# The above x64-linux gcc job generates a bad LICENSES.txt file,
|
||||
# so we use the one from the osx job instead.
|
||||
[{'arch': 'x64', 'plat': 'osx', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './share/doc/librdkafka/LICENSES.txt', 'LICENSES.txt'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'osx',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./share/doc/librdkafka/LICENSES.txt',
|
||||
'LICENSES.txt'],
|
||||
|
||||
# Travis OSX build
|
||||
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'],
|
||||
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./lib/librdkafka.dylib', 'runtimes/osx-x64/native/librdkafka.dylib'],
|
||||
# Travis Manylinux build
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-manylinux*x86_64.tgz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/centos6-librdkafka.so'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'librdkafka-manylinux*x86_64.tgz'},
|
||||
'./lib/librdkafka.so.1',
|
||||
'runtimes/linux-x64/native/centos6-librdkafka.so'],
|
||||
# Travis Ubuntu 14.04 build
|
||||
[{'arch': 'x64', 'plat': 'linux', 'lnk': 'std', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-x64/native/librdkafka.so'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'lnk': 'std',
|
||||
'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./lib/librdkafka.so.1',
|
||||
'runtimes/linux-x64/native/librdkafka.so'],
|
||||
# Travis CentOS 7 RPM build
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka1*el7.x86_64.rpm'}, './usr/lib64/librdkafka.so.1', 'runtimes/linux-x64/native/centos7-librdkafka.so'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'librdkafka1*el7.x86_64.rpm'},
|
||||
'./usr/lib64/librdkafka.so.1',
|
||||
'runtimes/linux-x64/native/centos7-librdkafka.so'],
|
||||
# Travis Alpine build
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'],
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'},
|
||||
'librdkafka.so.1', 'runtimes/linux-x64/native/alpine-librdkafka.so'],
|
||||
# Travis arm64 Linux build
|
||||
[{'arch': 'arm64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka.so.1', 'runtimes/linux-arm64/native/librdkafka.so'],
|
||||
[{'arch': 'arm64', 'plat': 'linux', 'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./lib/librdkafka.so.1', 'runtimes/linux-arm64/native/librdkafka.so'],
|
||||
|
||||
# Common Win runtime
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'vcruntime140.dll', 'runtimes/win-x64/native/vcruntime140.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'},
|
||||
'vcruntime140.dll', 'runtimes/win-x64/native/vcruntime140.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'msvcr140.zip'},
|
||||
'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'],
|
||||
# matches librdkafka.redist.{VER}.nupkg
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/librdkafka.dll', 'runtimes/win-x64/native/librdkafka.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/librdkafkacpp.dll', 'runtimes/win-x64/native/librdkafkacpp.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/libcrypto-1_1-x64.dll', 'runtimes/win-x64/native/libcrypto-1_1-x64.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/libssl-1_1-x64.dll', 'runtimes/win-x64/native/libssl-1_1-x64.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/zlib1.dll', 'runtimes/win-x64/native/zlib1.dll'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/x64/Release/zstd.dll', 'runtimes/win-x64/native/zstd.dll'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/x64/Release/librdkafka.dll',
|
||||
'runtimes/win-x64/native/librdkafka.dll'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/x64/Release/librdkafkacpp.dll',
|
||||
'runtimes/win-x64/native/librdkafkacpp.dll'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/x64/Release/libcrypto-1_1-x64.dll',
|
||||
'runtimes/win-x64/native/libcrypto-1_1-x64.dll'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/x64/Release/libssl-1_1-x64.dll',
|
||||
'runtimes/win-x64/native/libssl-1_1-x64.dll'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/x64/Release/zlib1.dll',
|
||||
'runtimes/win-x64/native/zlib1.dll'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/x64/Release/zstd.dll',
|
||||
'runtimes/win-x64/native/zstd.dll'],
|
||||
# matches librdkafka.{VER}.nupkg
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']},
|
||||
'build/native/lib/v140/x64/Release/librdkafka.lib', 'build/native/lib/win/x64/win-x64-Release/v140/librdkafka.lib'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']},
|
||||
'build/native/lib/v140/x64/Release/librdkafkacpp.lib', 'build/native/lib/win/x64/win-x64-Release/v140/librdkafkacpp.lib'],
|
||||
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'vcruntime140.dll', 'runtimes/win-x86/native/vcruntime140.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'}, 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'},
|
||||
'vcruntime140.dll', 'runtimes/win-x86/native/vcruntime140.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'msvcr140.zip'},
|
||||
'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'],
|
||||
# matches librdkafka.redist.{VER}.nupkg
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/librdkafka.dll', 'runtimes/win-x86/native/librdkafka.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/librdkafkacpp.dll', 'runtimes/win-x86/native/librdkafkacpp.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/libcrypto-1_1.dll', 'runtimes/win-x86/native/libcrypto-1_1.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/libssl-1_1.dll', 'runtimes/win-x86/native/libssl-1_1.dll'],
|
||||
[{'arch': 'x86',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/Win32/Release/librdkafka.dll',
|
||||
'runtimes/win-x86/native/librdkafka.dll'],
|
||||
[{'arch': 'x86',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/Win32/Release/librdkafkacpp.dll',
|
||||
'runtimes/win-x86/native/librdkafkacpp.dll'],
|
||||
[{'arch': 'x86',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/Win32/Release/libcrypto-1_1.dll',
|
||||
'runtimes/win-x86/native/libcrypto-1_1.dll'],
|
||||
[{'arch': 'x86',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/Win32/Release/libssl-1_1.dll',
|
||||
'runtimes/win-x86/native/libssl-1_1.dll'],
|
||||
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/zlib1.dll', 'runtimes/win-x86/native/zlib1.dll'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka.redist*'}, 'build/native/bin/v140/Win32/Release/zstd.dll', 'runtimes/win-x86/native/zstd.dll'],
|
||||
[{'arch': 'x86',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/Win32/Release/zlib1.dll',
|
||||
'runtimes/win-x86/native/zlib1.dll'],
|
||||
[{'arch': 'x86',
|
||||
'plat': 'win',
|
||||
'fname_glob': 'librdkafka.redist*'},
|
||||
'build/native/bin/v140/Win32/Release/zstd.dll',
|
||||
'runtimes/win-x86/native/zstd.dll'],
|
||||
|
||||
# matches librdkafka.{VER}.nupkg
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']},
|
||||
'build/native/lib/v140/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafka.lib'],
|
||||
'build/native/lib/v140/Win32/Release/librdkafka.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafka.lib'],
|
||||
[{'arch': 'x86', 'plat': 'win', 'fname_glob': 'librdkafka*.nupkg', 'fname_excludes': ['redist', 'symbols']},
|
||||
'build/native/lib/v140/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafkacpp.lib']
|
||||
'build/native/lib/v140/Win32/Release/librdkafkacpp.lib', 'build/native/lib/win/x86/win-x86-Release/v140/librdkafkacpp.lib']
|
||||
]
|
||||
|
||||
for m in mappings:
|
||||
|
@ -454,7 +554,10 @@ class NugetPackage (Package):
|
|||
except KeyError as e:
|
||||
continue
|
||||
except Exception as e:
|
||||
raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (a.lpath, e, zfile.ZFile(a.lpath).getnames()))
|
||||
raise Exception(
|
||||
'file not found in archive %s: %s. Files in archive are: %s' %
|
||||
(a.lpath, e, zfile.ZFile(
|
||||
a.lpath).getnames()))
|
||||
|
||||
# Check that the file type matches.
|
||||
if magic_mismatch(outf, a):
|
||||
|
@ -465,21 +568,22 @@ class NugetPackage (Package):
|
|||
break
|
||||
|
||||
if not found:
|
||||
raise MissingArtifactError('unable to find artifact with tags %s matching "%s" for file "%s"' % (str(attributes), fname_glob, member))
|
||||
|
||||
raise MissingArtifactError(
|
||||
'unable to find artifact with tags %s matching "%s" for file "%s"' %
|
||||
(str(attributes), fname_glob, member))
|
||||
|
||||
print('Tree extracted to %s' % self.stpath)
|
||||
|
||||
# After creating a bare-bone nupkg layout containing the artifacts
|
||||
# and some spec and props files, call the 'nuget' utility to
|
||||
# make a proper nupkg of it (with all the metadata files).
|
||||
subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % \
|
||||
subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" %
|
||||
(os.path.join(self.stpath, 'librdkafka.redist.nuspec'),
|
||||
self.stpath), shell=True)
|
||||
|
||||
return 'librdkafka.redist.%s.nupkg' % vless_version
|
||||
|
||||
def verify (self, path):
|
||||
def verify(self, path):
|
||||
""" Verify package """
|
||||
expect = [
|
||||
"librdkafka.redist.nuspec",
|
||||
|
@ -529,7 +633,9 @@ class NugetPackage (Package):
|
|||
missing = [x for x in expect if x not in pkgd]
|
||||
|
||||
if len(missing) > 0:
|
||||
print('Missing files in package %s:\n%s' % (path, '\n'.join(missing)))
|
||||
print(
|
||||
'Missing files in package %s:\n%s' %
|
||||
(path, '\n'.join(missing)))
|
||||
return False
|
||||
|
||||
print('OK - %d expected files found' % len(expect))
|
||||
|
@ -542,40 +648,71 @@ class StaticPackage (Package):
|
|||
# Only match statically linked artifacts
|
||||
match = {'lnk': 'static'}
|
||||
|
||||
def __init__ (self, version, arts):
|
||||
def __init__(self, version, arts):
|
||||
super(StaticPackage, self).__init__(version, arts, "static")
|
||||
|
||||
def cleanup(self):
|
||||
if os.path.isdir(self.stpath):
|
||||
shutil.rmtree(self.stpath)
|
||||
|
||||
def build (self, buildtype):
|
||||
def build(self, buildtype):
|
||||
""" Build single package for all artifacts. """
|
||||
|
||||
self.stpath = tempfile.mkdtemp(prefix="out-", dir=".")
|
||||
|
||||
mappings = [
|
||||
# rdkafka.h
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-clang.tar.gz'}, './include/librdkafka/rdkafka.h', 'rdkafka.h'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./include/librdkafka/rdkafka.h',
|
||||
'rdkafka.h'],
|
||||
|
||||
# LICENSES.txt
|
||||
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './share/doc/librdkafka/LICENSES.txt', 'LICENSES.txt'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'osx',
|
||||
'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./share/doc/librdkafka/LICENSES.txt',
|
||||
'LICENSES.txt'],
|
||||
|
||||
# glibc linux static lib and pkg-config file
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_glibc_linux.a'],
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_glibc_linux.pc'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./lib/librdkafka-static.a',
|
||||
'librdkafka_glibc_linux.a'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./lib/pkgconfig/rdkafka-static.pc',
|
||||
'librdkafka_glibc_linux.pc'],
|
||||
|
||||
# musl linux static lib and pkg-config file
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'librdkafka-static.a', 'librdkafka_musl_linux.a'],
|
||||
[{'arch': 'x64', 'plat': 'linux', 'fname_glob': 'alpine-librdkafka.tgz'}, 'rdkafka-static.pc', 'librdkafka_musl_linux.pc'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'alpine-librdkafka.tgz'},
|
||||
'librdkafka-static.a',
|
||||
'librdkafka_musl_linux.a'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'linux',
|
||||
'fname_glob': 'alpine-librdkafka.tgz'},
|
||||
'rdkafka-static.pc',
|
||||
'librdkafka_musl_linux.pc'],
|
||||
|
||||
# osx static lib and pkg-config file
|
||||
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_darwin.a'],
|
||||
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_darwin.pc'],
|
||||
[{'arch': 'x64',
|
||||
'plat': 'osx',
|
||||
'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./lib/librdkafka-static.a',
|
||||
'librdkafka_darwin.a'],
|
||||
[{'arch': 'x64', 'plat': 'osx', 'fname_glob': 'librdkafka-clang.tar.gz'},
|
||||
'./lib/pkgconfig/rdkafka-static.pc', 'librdkafka_darwin.pc'],
|
||||
|
||||
# win static lib and pkg-config file (mingw)
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/librdkafka-static.a', 'librdkafka_windows.a'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'}, './lib/pkgconfig/rdkafka-static.pc', 'librdkafka_windows.pc'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./lib/librdkafka-static.a', 'librdkafka_windows.a'],
|
||||
[{'arch': 'x64', 'plat': 'win', 'fname_glob': 'librdkafka-gcc.tar.gz'},
|
||||
'./lib/pkgconfig/rdkafka-static.pc', 'librdkafka_windows.pc'],
|
||||
]
|
||||
|
||||
for m in mappings:
|
||||
|
@ -610,28 +747,32 @@ class StaticPackage (Package):
|
|||
break
|
||||
|
||||
if artifact is None:
|
||||
raise MissingArtifactError('unable to find artifact with tags %s matching "%s"' % (str(attributes), fname_glob))
|
||||
raise MissingArtifactError(
|
||||
'unable to find artifact with tags %s matching "%s"' %
|
||||
(str(attributes), fname_glob))
|
||||
|
||||
outf = os.path.join(self.stpath, m[2])
|
||||
member = m[1]
|
||||
try:
|
||||
zfile.ZFile.extract(artifact.lpath, member, outf)
|
||||
except KeyError as e:
|
||||
raise Exception('file not found in archive %s: %s. Files in archive are: %s' % (artifact.lpath, e, zfile.ZFile(artifact.lpath).getnames()))
|
||||
raise Exception(
|
||||
'file not found in archive %s: %s. Files in archive are: %s' %
|
||||
(artifact.lpath, e, zfile.ZFile(
|
||||
artifact.lpath).getnames()))
|
||||
|
||||
print('Tree extracted to %s' % self.stpath)
|
||||
|
||||
# After creating a bare-bone layout, create a tarball.
|
||||
outname = "librdkafka-static-bundle-%s.tgz" % self.version
|
||||
print('Writing to %s' % outname)
|
||||
subprocess.check_call("(cd %s && tar cvzf ../%s .)" % \
|
||||
subprocess.check_call("(cd %s && tar cvzf ../%s .)" %
|
||||
(self.stpath, outname),
|
||||
shell=True)
|
||||
|
||||
return outname
|
||||
|
||||
|
||||
def verify (self, path):
|
||||
def verify(self, path):
|
||||
""" Verify package """
|
||||
expect = [
|
||||
"./rdkafka.h",
|
||||
|
@ -654,7 +795,9 @@ class StaticPackage (Package):
|
|||
missing = [x for x in expect if x not in pkgd]
|
||||
|
||||
if len(missing) > 0:
|
||||
print('Missing files in package %s:\n%s' % (path, '\n'.join(missing)))
|
||||
print(
|
||||
'Missing files in package %s:\n%s' %
|
||||
(path, '\n'.join(missing)))
|
||||
return False
|
||||
else:
|
||||
print('OK - %d expected files found' % len(expect))
|
||||
|
|
|
@ -16,22 +16,44 @@ import packaging
|
|||
dry_run = False
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--no-s3", help="Don't collect from S3", action="store_true")
|
||||
parser.add_argument(
|
||||
"--no-s3",
|
||||
help="Don't collect from S3",
|
||||
action="store_true")
|
||||
parser.add_argument("--dry-run",
|
||||
help="Locate artifacts but don't actually download or do anything",
|
||||
action="store_true")
|
||||
parser.add_argument("--directory", help="Download directory (default: dl-<tag>)", default=None)
|
||||
parser.add_argument("--no-cleanup", help="Don't clean up temporary folders", action="store_true")
|
||||
parser.add_argument("--sha", help="Also match on this git sha1", default=None)
|
||||
parser.add_argument("--nuget-version", help="The nuget package version (defaults to same as tag)", default=None)
|
||||
parser.add_argument(
|
||||
"--directory",
|
||||
help="Download directory (default: dl-<tag>)",
|
||||
default=None)
|
||||
parser.add_argument(
|
||||
"--no-cleanup",
|
||||
help="Don't clean up temporary folders",
|
||||
action="store_true")
|
||||
parser.add_argument(
|
||||
"--sha",
|
||||
help="Also match on this git sha1",
|
||||
default=None)
|
||||
parser.add_argument(
|
||||
"--nuget-version",
|
||||
help="The nuget package version (defaults to same as tag)",
|
||||
default=None)
|
||||
parser.add_argument("--upload", help="Upload package to after building, using provided NuGet API key (either file or the key itself)", default=None,
|
||||
type=str)
|
||||
parser.add_argument("--class", help="Packaging class (see packaging.py)", default="NugetPackage", dest="pkgclass")
|
||||
parser.add_argument("--retries", help="Number of retries to collect artifacts", default=0, type=int)
|
||||
parser.add_argument(
|
||||
"--class",
|
||||
help="Packaging class (see packaging.py)",
|
||||
default="NugetPackage",
|
||||
dest="pkgclass")
|
||||
parser.add_argument(
|
||||
"--retries",
|
||||
help="Number of retries to collect artifacts",
|
||||
default=0,
|
||||
type=int)
|
||||
parser.add_argument("tag", help="Git tag to collect")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
@ -48,7 +70,7 @@ if __name__ == '__main__':
|
|||
|
||||
try:
|
||||
match.update(getattr(pkgclass, 'match'))
|
||||
except:
|
||||
except BaseException:
|
||||
pass
|
||||
|
||||
arts = packaging.Artifacts(match, args.directory)
|
||||
|
@ -119,5 +141,6 @@ if __name__ == '__main__':
|
|||
|
||||
print('Uploading %s to NuGet' % pkgfile)
|
||||
r = os.system("./push-to-nuget.sh '%s' %s" % (nuget_key, pkgfile))
|
||||
assert int(r) == 0, "NuGet upload failed with exit code {}, see previous errors".format(r)
|
||||
assert int(
|
||||
r) == 0, "NuGet upload failed with exit code {}, see previous errors".format(r)
|
||||
print('%s successfully uploaded to NuGet' % pkgfile)
|
||||
|
|
|
@ -5,6 +5,7 @@ import tarfile
|
|||
import zipfile
|
||||
import rpmfile
|
||||
|
||||
|
||||
class ZFile (object):
|
||||
def __init__(self, path, mode='r', ext=None):
|
||||
super(ZFile, self).__init__()
|
||||
|
@ -49,8 +50,8 @@ class ZFile (object):
|
|||
return dict()
|
||||
|
||||
def extract_to(self, member, path):
|
||||
""" Extract compress file's \p member to \p path
|
||||
If \p path is a directory the member's basename will used as
|
||||
""" Extract compress file's \\p member to \\p path
|
||||
If \\p path is a directory the member's basename will used as
|
||||
filename, otherwise path is considered the full file path name. """
|
||||
|
||||
if not os.path.isdir(os.path.dirname(path)):
|
||||
|
@ -66,7 +67,7 @@ class ZFile (object):
|
|||
zf = self.f.extractfile(member)
|
||||
|
||||
while True:
|
||||
b = zf.read(1024*100)
|
||||
b = zf.read(1024 * 100)
|
||||
if b:
|
||||
of.write(b)
|
||||
else:
|
||||
|
@ -74,9 +75,8 @@ class ZFile (object):
|
|||
|
||||
zf.close()
|
||||
|
||||
|
||||
@classmethod
|
||||
def extract (cls, zpath, member, outpath):
|
||||
def extract(cls, zpath, member, outpath):
|
||||
"""
|
||||
Extract file member (full internal path) to output from
|
||||
archive zpath.
|
||||
|
@ -85,11 +85,10 @@ class ZFile (object):
|
|||
with ZFile(zpath) as zf:
|
||||
zf.extract_to(member, outpath)
|
||||
|
||||
|
||||
@classmethod
|
||||
def compress (cls, zpath, paths, stripcnt=0, ext=None):
|
||||
def compress(cls, zpath, paths, stripcnt=0, ext=None):
|
||||
"""
|
||||
Create new compressed file \p zpath containing files in \p paths
|
||||
Create new compressed file \\p zpath containing files in \\p paths
|
||||
"""
|
||||
|
||||
with ZFile(zpath, 'w', ext=ext) as zf:
|
||||
|
@ -97,4 +96,3 @@ class ZFile (object):
|
|||
outp = os.path.sep.join(p.split(os.path.sep)[stripcnt:])
|
||||
print('zip %s to %s (stripcnt %d)' % (p, outp, stripcnt))
|
||||
zf.f.write(p, outp)
|
||||
|
||||
|
|
|
@ -2,26 +2,16 @@
|
|||
#include <string.h>
|
||||
#include <librdkafka/rdkafka.h>
|
||||
|
||||
int main (int argc, char **argv) {
|
||||
int main(int argc, char **argv) {
|
||||
rd_kafka_conf_t *conf;
|
||||
rd_kafka_t *rk;
|
||||
char features[256];
|
||||
size_t fsize = sizeof(features);
|
||||
char errstr[512];
|
||||
const char *exp_features[] = {
|
||||
"gzip",
|
||||
"snappy",
|
||||
"ssl",
|
||||
"sasl",
|
||||
"regex",
|
||||
"lz4",
|
||||
"sasl_gssapi",
|
||||
"sasl_plain",
|
||||
"sasl_scram",
|
||||
"plugins",
|
||||
"zstd",
|
||||
"sasl_oauthbearer",
|
||||
NULL,
|
||||
"gzip", "snappy", "ssl", "sasl", "regex",
|
||||
"lz4", "sasl_gssapi", "sasl_plain", "sasl_scram", "plugins",
|
||||
"zstd", "sasl_oauthbearer", NULL,
|
||||
};
|
||||
const char **exp;
|
||||
int missing = 0;
|
||||
|
@ -39,14 +29,13 @@ int main (int argc, char **argv) {
|
|||
printf("builtin.features %s\n", features);
|
||||
|
||||
/* Verify that expected features are enabled. */
|
||||
for (exp = exp_features ; *exp ; exp++) {
|
||||
for (exp = exp_features; *exp; exp++) {
|
||||
const char *t = features;
|
||||
size_t elen = strlen(*exp);
|
||||
int match = 0;
|
||||
size_t elen = strlen(*exp);
|
||||
int match = 0;
|
||||
|
||||
while ((t = strstr(t, *exp))) {
|
||||
if (t[elen] == ',' ||
|
||||
t[elen] == '\0') {
|
||||
if (t[elen] == ',' || t[elen] == '\0') {
|
||||
match = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -60,16 +49,16 @@ int main (int argc, char **argv) {
|
|||
missing++;
|
||||
}
|
||||
|
||||
if (rd_kafka_conf_set(conf, "security.protocol", "SASL_SSL",
|
||||
errstr, sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "sasl.mechanism", "PLAIN",
|
||||
errstr, sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "sasl.username", "username",
|
||||
errstr, sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "sasl.password", "password",
|
||||
errstr, sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "debug", "security",
|
||||
errstr, sizeof(errstr))) {
|
||||
if (rd_kafka_conf_set(conf, "security.protocol", "SASL_SSL", errstr,
|
||||
sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "sasl.mechanism", "PLAIN", errstr,
|
||||
sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "sasl.username", "username", errstr,
|
||||
sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "sasl.password", "password", errstr,
|
||||
sizeof(errstr)) ||
|
||||
rd_kafka_conf_set(conf, "debug", "security", errstr,
|
||||
sizeof(errstr))) {
|
||||
fprintf(stderr, "conf_set failed: %s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include <librdkafka/rdkafkacpp.h>
|
||||
|
||||
|
||||
int main () {
|
||||
int main() {
|
||||
std::cout << "librdkafka++ " << RdKafka::version_str() << std::endl;
|
||||
|
||||
RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
|
||||
|
|
|
@ -24,13 +24,14 @@ if __name__ == '__main__':
|
|||
print("Release asset checksums:")
|
||||
|
||||
for ftype in ["zip", "tar.gz"]:
|
||||
url = "https://github.com/edenhill/librdkafka/archive/{}.{}".format(tag, ftype)
|
||||
url = "https://github.com/edenhill/librdkafka/archive/{}.{}".format(
|
||||
tag, ftype)
|
||||
|
||||
h = hashlib.sha256()
|
||||
|
||||
r = requests.get(url, stream=True)
|
||||
while True:
|
||||
buf = r.raw.read(100*1000)
|
||||
buf = r.raw.read(100 * 1000)
|
||||
if len(buf) == 0:
|
||||
break
|
||||
h.update(buf)
|
||||
|
|
|
@ -35,18 +35,16 @@
|
|||
|
||||
|
||||
RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name,
|
||||
const std::string &value,
|
||||
std::string &errstr) {
|
||||
const std::string &value,
|
||||
std::string &errstr) {
|
||||
rd_kafka_conf_res_t res;
|
||||
char errbuf[512];
|
||||
|
||||
if (this->conf_type_ == CONF_GLOBAL)
|
||||
res = rd_kafka_conf_set(this->rk_conf_,
|
||||
name.c_str(), value.c_str(),
|
||||
errbuf, sizeof(errbuf));
|
||||
res = rd_kafka_conf_set(this->rk_conf_, name.c_str(), value.c_str(), errbuf,
|
||||
sizeof(errbuf));
|
||||
else
|
||||
res = rd_kafka_topic_conf_set(this->rkt_conf_,
|
||||
name.c_str(), value.c_str(),
|
||||
res = rd_kafka_topic_conf_set(this->rkt_conf_, name.c_str(), value.c_str(),
|
||||
errbuf, sizeof(errbuf));
|
||||
|
||||
if (res != RD_KAFKA_CONF_OK)
|
||||
|
@ -56,8 +54,7 @@ RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name,
|
|||
}
|
||||
|
||||
|
||||
std::list<std::string> *RdKafka::ConfImpl::dump () {
|
||||
|
||||
std::list<std::string> *RdKafka::ConfImpl::dump() {
|
||||
const char **arrc;
|
||||
size_t cnt;
|
||||
std::list<std::string> *arr;
|
||||
|
@ -68,14 +65,14 @@ std::list<std::string> *RdKafka::ConfImpl::dump () {
|
|||
arrc = rd_kafka_topic_conf_dump(rkt_conf_, &cnt);
|
||||
|
||||
arr = new std::list<std::string>();
|
||||
for (int i = 0 ; i < static_cast<int>(cnt) ; i++)
|
||||
for (int i = 0; i < static_cast<int>(cnt); i++)
|
||||
arr->push_back(std::string(arrc[i]));
|
||||
|
||||
rd_kafka_conf_dump_free(arrc, cnt);
|
||||
return arr;
|
||||
}
|
||||
|
||||
RdKafka::Conf *RdKafka::Conf::create (ConfType type) {
|
||||
RdKafka::Conf *RdKafka::Conf::create(ConfType type) {
|
||||
ConfImpl *conf = new ConfImpl(type);
|
||||
|
||||
if (type == CONF_GLOBAL)
|
||||
|
|
|
@ -33,14 +33,16 @@
|
|||
|
||||
#include "rdkafkacpp_int.h"
|
||||
|
||||
RdKafka::Consumer::~Consumer () {}
|
||||
RdKafka::Consumer::~Consumer() {
|
||||
}
|
||||
|
||||
RdKafka::Consumer *RdKafka::Consumer::create (const RdKafka::Conf *conf,
|
||||
std::string &errstr) {
|
||||
RdKafka::Consumer *RdKafka::Consumer::create(const RdKafka::Conf *conf,
|
||||
std::string &errstr) {
|
||||
char errbuf[512];
|
||||
const RdKafka::ConfImpl *confimpl = dynamic_cast<const RdKafka::ConfImpl *>(conf);
|
||||
const RdKafka::ConfImpl *confimpl =
|
||||
dynamic_cast<const RdKafka::ConfImpl *>(conf);
|
||||
RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl();
|
||||
rd_kafka_conf_t *rk_conf = NULL;
|
||||
rd_kafka_conf_t *rk_conf = NULL;
|
||||
|
||||
if (confimpl) {
|
||||
if (!confimpl->rk_conf_) {
|
||||
|
@ -55,8 +57,8 @@ RdKafka::Consumer *RdKafka::Consumer::create (const RdKafka::Conf *conf,
|
|||
}
|
||||
|
||||
rd_kafka_t *rk;
|
||||
if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
|
||||
errbuf, sizeof(errbuf)))) {
|
||||
if (!(rk =
|
||||
rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) {
|
||||
errstr = errbuf;
|
||||
// rd_kafka_new() takes ownership only if succeeds
|
||||
if (rk_conf)
|
||||
|
@ -71,13 +73,13 @@ RdKafka::Consumer *RdKafka::Consumer::create (const RdKafka::Conf *conf,
|
|||
return rkc;
|
||||
}
|
||||
|
||||
int64_t RdKafka::Consumer::OffsetTail (int64_t offset) {
|
||||
int64_t RdKafka::Consumer::OffsetTail(int64_t offset) {
|
||||
return RD_KAFKA_OFFSET_TAIL(offset);
|
||||
}
|
||||
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
|
||||
int32_t partition,
|
||||
int64_t offset) {
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic,
|
||||
int32_t partition,
|
||||
int64_t offset) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
|
||||
if (rd_kafka_consume_start(topicimpl->rkt_, partition, offset) == -1)
|
||||
|
@ -87,10 +89,10 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
|
||||
int32_t partition,
|
||||
int64_t offset,
|
||||
Queue *queue) {
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic,
|
||||
int32_t partition,
|
||||
int64_t offset,
|
||||
Queue *queue) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
|
||||
|
||||
|
@ -102,8 +104,8 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::start (Topic *topic,
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic,
|
||||
int32_t partition) {
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::stop(Topic *topic,
|
||||
int32_t partition) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
|
||||
if (rd_kafka_consume_stop(topicimpl->rkt_, partition) == -1)
|
||||
|
@ -112,10 +114,10 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::stop (Topic *topic,
|
|||
return RdKafka::ERR_NO_ERROR;
|
||||
}
|
||||
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic,
|
||||
int32_t partition,
|
||||
int64_t offset,
|
||||
int timeout_ms) {
|
||||
RdKafka::ErrorCode RdKafka::ConsumerImpl::seek(Topic *topic,
|
||||
int32_t partition,
|
||||
int64_t offset,
|
||||
int timeout_ms) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
|
||||
if (rd_kafka_seek(topicimpl->rkt_, partition, offset, timeout_ms) == -1)
|
||||
|
@ -124,68 +126,71 @@ RdKafka::ErrorCode RdKafka::ConsumerImpl::seek (Topic *topic,
|
|||
return RdKafka::ERR_NO_ERROR;
|
||||
}
|
||||
|
||||
RdKafka::Message *RdKafka::ConsumerImpl::consume (Topic *topic,
|
||||
int32_t partition,
|
||||
int timeout_ms) {
|
||||
RdKafka::Message *RdKafka::ConsumerImpl::consume(Topic *topic,
|
||||
int32_t partition,
|
||||
int timeout_ms) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
rd_kafka_message_t *rkmessage;
|
||||
|
||||
rkmessage = rd_kafka_consume(topicimpl->rkt_, partition, timeout_ms);
|
||||
if (!rkmessage)
|
||||
return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic,
|
||||
static_cast<RdKafka::ErrorCode>
|
||||
(rd_kafka_last_error()));
|
||||
return new RdKafka::MessageImpl(
|
||||
RD_KAFKA_CONSUMER, topic,
|
||||
static_cast<RdKafka::ErrorCode>(rd_kafka_last_error()));
|
||||
|
||||
return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, rkmessage);
|
||||
}
|
||||
|
||||
namespace {
|
||||
/* Helper struct for `consume_callback'.
|
||||
* Encapsulates the values we need in order to call `rd_kafka_consume_callback'
|
||||
* and keep track of the C++ callback function and `opaque' value.
|
||||
/* Helper struct for `consume_callback'.
|
||||
* Encapsulates the values we need in order to call `rd_kafka_consume_callback'
|
||||
* and keep track of the C++ callback function and `opaque' value.
|
||||
*/
|
||||
struct ConsumerImplCallback {
|
||||
ConsumerImplCallback(RdKafka::Topic *topic,
|
||||
RdKafka::ConsumeCb *cb,
|
||||
void *data) :
|
||||
topic(topic), cb_cls(cb), cb_data(data) {
|
||||
}
|
||||
/* This function is the one we give to `rd_kafka_consume_callback', with
|
||||
* the `opaque' pointer pointing to an instance of this struct, in which
|
||||
* we can find the C++ callback and `cb_data'.
|
||||
*/
|
||||
struct ConsumerImplCallback {
|
||||
ConsumerImplCallback(RdKafka::Topic* topic, RdKafka::ConsumeCb* cb, void* data)
|
||||
: topic(topic), cb_cls(cb), cb_data(data) {
|
||||
}
|
||||
/* This function is the one we give to `rd_kafka_consume_callback', with
|
||||
* the `opaque' pointer pointing to an instance of this struct, in which
|
||||
* we can find the C++ callback and `cb_data'.
|
||||
*/
|
||||
static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
|
||||
ConsumerImplCallback *instance = static_cast<ConsumerImplCallback*>(opaque);
|
||||
RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, instance->topic,
|
||||
msg, false /*don't free*/);
|
||||
instance->cb_cls->consume_cb(message, instance->cb_data);
|
||||
}
|
||||
RdKafka::Topic *topic;
|
||||
RdKafka::ConsumeCb *cb_cls;
|
||||
void *cb_data;
|
||||
};
|
||||
}
|
||||
static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
|
||||
ConsumerImplCallback *instance =
|
||||
static_cast<ConsumerImplCallback *>(opaque);
|
||||
RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, instance->topic, msg,
|
||||
false /*don't free*/);
|
||||
instance->cb_cls->consume_cb(message, instance->cb_data);
|
||||
}
|
||||
RdKafka::Topic *topic;
|
||||
RdKafka::ConsumeCb *cb_cls;
|
||||
void *cb_data;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
int RdKafka::ConsumerImpl::consume_callback (RdKafka::Topic* topic,
|
||||
int32_t partition,
|
||||
int timeout_ms,
|
||||
RdKafka::ConsumeCb *consume_cb,
|
||||
void *opaque) {
|
||||
int RdKafka::ConsumerImpl::consume_callback(RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
int timeout_ms,
|
||||
RdKafka::ConsumeCb *consume_cb,
|
||||
void *opaque) {
|
||||
RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(topic);
|
||||
ConsumerImplCallback context(topic, consume_cb, opaque);
|
||||
return rd_kafka_consume_callback(topicimpl->rkt_, partition, timeout_ms,
|
||||
&ConsumerImplCallback::consume_cb_trampoline, &context);
|
||||
&ConsumerImplCallback::consume_cb_trampoline,
|
||||
&context);
|
||||
}
|
||||
|
||||
|
||||
RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue,
|
||||
int timeout_ms) {
|
||||
RdKafka::Message *RdKafka::ConsumerImpl::consume(Queue *queue, int timeout_ms) {
|
||||
RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
|
||||
rd_kafka_message_t *rkmessage;
|
||||
|
||||
rkmessage = rd_kafka_consume_queue(queueimpl->queue_, timeout_ms);
|
||||
if (!rkmessage)
|
||||
return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, NULL,
|
||||
static_cast<RdKafka::ErrorCode>
|
||||
(rd_kafka_last_error()));
|
||||
return new RdKafka::MessageImpl(
|
||||
RD_KAFKA_CONSUMER, NULL,
|
||||
static_cast<RdKafka::ErrorCode>(rd_kafka_last_error()));
|
||||
/*
|
||||
* Recover our Topic * from the topic conf's opaque field, which we
|
||||
* set in RdKafka::Topic::create() for just this kind of situation.
|
||||
|
@ -197,42 +202,43 @@ RdKafka::Message *RdKafka::ConsumerImpl::consume (Queue *queue,
|
|||
}
|
||||
|
||||
namespace {
|
||||
/* Helper struct for `consume_callback' with a Queue.
|
||||
* Encapsulates the values we need in order to call `rd_kafka_consume_callback'
|
||||
* and keep track of the C++ callback function and `opaque' value.
|
||||
/* Helper struct for `consume_callback' with a Queue.
|
||||
* Encapsulates the values we need in order to call `rd_kafka_consume_callback'
|
||||
* and keep track of the C++ callback function and `opaque' value.
|
||||
*/
|
||||
struct ConsumerImplQueueCallback {
|
||||
ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data) :
|
||||
cb_cls(cb), cb_data(data) {
|
||||
}
|
||||
/* This function is the one we give to `rd_kafka_consume_callback', with
|
||||
* the `opaque' pointer pointing to an instance of this struct, in which
|
||||
* we can find the C++ callback and `cb_data'.
|
||||
*/
|
||||
struct ConsumerImplQueueCallback {
|
||||
ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data)
|
||||
: cb_cls(cb), cb_data(data) {
|
||||
}
|
||||
/* This function is the one we give to `rd_kafka_consume_callback', with
|
||||
* the `opaque' pointer pointing to an instance of this struct, in which
|
||||
* we can find the C++ callback and `cb_data'.
|
||||
static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
|
||||
ConsumerImplQueueCallback *instance =
|
||||
static_cast<ConsumerImplQueueCallback *>(opaque);
|
||||
/*
|
||||
* Recover our Topic * from the topic conf's opaque field, which we
|
||||
* set in RdKafka::Topic::create() for just this kind of situation.
|
||||
*/
|
||||
static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
|
||||
ConsumerImplQueueCallback *instance = static_cast<ConsumerImplQueueCallback *>(opaque);
|
||||
/*
|
||||
* Recover our Topic * from the topic conf's opaque field, which we
|
||||
* set in RdKafka::Topic::create() for just this kind of situation.
|
||||
*/
|
||||
void *topic_opaque = rd_kafka_topic_opaque(msg->rkt);
|
||||
RdKafka::Topic *topic = static_cast<RdKafka::Topic *>(topic_opaque);
|
||||
RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg,
|
||||
false /*don't free*/);
|
||||
instance->cb_cls->consume_cb(message, instance->cb_data);
|
||||
}
|
||||
RdKafka::ConsumeCb *cb_cls;
|
||||
void *cb_data;
|
||||
};
|
||||
}
|
||||
void *topic_opaque = rd_kafka_topic_opaque(msg->rkt);
|
||||
RdKafka::Topic *topic = static_cast<RdKafka::Topic *>(topic_opaque);
|
||||
RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg,
|
||||
false /*don't free*/);
|
||||
instance->cb_cls->consume_cb(message, instance->cb_data);
|
||||
}
|
||||
RdKafka::ConsumeCb *cb_cls;
|
||||
void *cb_data;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
int RdKafka::ConsumerImpl::consume_callback (Queue *queue,
|
||||
int timeout_ms,
|
||||
RdKafka::ConsumeCb *consume_cb,
|
||||
void *opaque) {
|
||||
int RdKafka::ConsumerImpl::consume_callback(Queue *queue,
|
||||
int timeout_ms,
|
||||
RdKafka::ConsumeCb *consume_cb,
|
||||
void *opaque) {
|
||||
RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
|
||||
ConsumerImplQueueCallback context(consume_cb, opaque);
|
||||
return rd_kafka_consume_callback_queue(queueimpl->queue_, timeout_ms,
|
||||
&ConsumerImplQueueCallback::consume_cb_trampoline,
|
||||
&context);
|
||||
return rd_kafka_consume_callback_queue(
|
||||
queueimpl->queue_, timeout_ms,
|
||||
&ConsumerImplQueueCallback::consume_cb_trampoline, &context);
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
|
||||
void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
RdKafka::Topic* topic = static_cast<Topic *>(rd_kafka_topic_opaque(msg->rkt));
|
||||
RdKafka::Topic *topic = static_cast<Topic *>(rd_kafka_topic_opaque(msg->rkt));
|
||||
|
||||
RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg,
|
||||
false /*don't free*/);
|
||||
|
@ -42,14 +42,16 @@ void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
|
|||
handle->consume_cb_->consume_cb(message, opaque);
|
||||
}
|
||||
|
||||
void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level,
|
||||
const char *fac, const char *buf) {
|
||||
void RdKafka::log_cb_trampoline(const rd_kafka_t *rk,
|
||||
int level,
|
||||
const char *fac,
|
||||
const char *buf) {
|
||||
if (!rk) {
|
||||
rd_kafka_log_print(rk, level, fac, buf);
|
||||
return;
|
||||
}
|
||||
|
||||
void *opaque = rd_kafka_opaque(rk);
|
||||
void *opaque = rd_kafka_opaque(rk);
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
|
||||
if (!handle->event_cb_) {
|
||||
|
@ -57,17 +59,18 @@ void RdKafka::log_cb_trampoline (const rd_kafka_t *rk, int level,
|
|||
return;
|
||||
}
|
||||
|
||||
RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG,
|
||||
RdKafka::ERR_NO_ERROR,
|
||||
static_cast<RdKafka::Event::Severity>(level),
|
||||
fac, buf);
|
||||
RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG, RdKafka::ERR_NO_ERROR,
|
||||
static_cast<RdKafka::Event::Severity>(level), fac,
|
||||
buf);
|
||||
|
||||
handle->event_cb_->event_cb(event);
|
||||
}
|
||||
|
||||
|
||||
void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err,
|
||||
const char *reason, void *opaque) {
|
||||
void RdKafka::error_cb_trampoline(rd_kafka_t *rk,
|
||||
int err,
|
||||
const char *reason,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
char errstr[512];
|
||||
bool is_fatal = false;
|
||||
|
@ -81,37 +84,36 @@ void RdKafka::error_cb_trampoline (rd_kafka_t *rk, int err,
|
|||
}
|
||||
RdKafka::EventImpl event(RdKafka::Event::EVENT_ERROR,
|
||||
static_cast<RdKafka::ErrorCode>(err),
|
||||
RdKafka::Event::EVENT_SEVERITY_ERROR,
|
||||
NULL,
|
||||
reason);
|
||||
RdKafka::Event::EVENT_SEVERITY_ERROR, NULL, reason);
|
||||
event.fatal_ = is_fatal;
|
||||
handle->event_cb_->event_cb(event);
|
||||
}
|
||||
|
||||
|
||||
void RdKafka::throttle_cb_trampoline (rd_kafka_t *rk, const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int throttle_time_ms,
|
||||
void *opaque) {
|
||||
void RdKafka::throttle_cb_trampoline(rd_kafka_t *rk,
|
||||
const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int throttle_time_ms,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
|
||||
RdKafka::EventImpl event(RdKafka::Event::EVENT_THROTTLE);
|
||||
event.str_ = broker_name;
|
||||
event.id_ = broker_id;
|
||||
event.str_ = broker_name;
|
||||
event.id_ = broker_id;
|
||||
event.throttle_time_ = throttle_time_ms;
|
||||
|
||||
handle->event_cb_->event_cb(event);
|
||||
}
|
||||
|
||||
|
||||
int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len,
|
||||
void *opaque) {
|
||||
int RdKafka::stats_cb_trampoline(rd_kafka_t *rk,
|
||||
char *json,
|
||||
size_t json_len,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
|
||||
RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS,
|
||||
RdKafka::ERR_NO_ERROR,
|
||||
RdKafka::Event::EVENT_SEVERITY_INFO,
|
||||
NULL, json);
|
||||
RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS, RdKafka::ERR_NO_ERROR,
|
||||
RdKafka::Event::EVENT_SEVERITY_INFO, NULL, json);
|
||||
|
||||
handle->event_cb_->event_cb(event);
|
||||
|
||||
|
@ -119,56 +121,57 @@ int RdKafka::stats_cb_trampoline (rd_kafka_t *rk, char *json, size_t json_len,
|
|||
}
|
||||
|
||||
|
||||
int RdKafka::socket_cb_trampoline (int domain, int type, int protocol,
|
||||
void *opaque) {
|
||||
int RdKafka::socket_cb_trampoline(int domain,
|
||||
int type,
|
||||
int protocol,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
|
||||
return handle->socket_cb_->socket_cb(domain, type, protocol);
|
||||
}
|
||||
|
||||
int RdKafka::open_cb_trampoline (const char *pathname, int flags, mode_t mode,
|
||||
void *opaque) {
|
||||
int RdKafka::open_cb_trampoline(const char *pathname,
|
||||
int flags,
|
||||
mode_t mode,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
|
||||
return handle->open_cb_->open_cb(pathname, flags, static_cast<int>(mode));
|
||||
}
|
||||
|
||||
void
|
||||
RdKafka::oauthbearer_token_refresh_cb_trampoline (rd_kafka_t *rk,
|
||||
const char *oauthbearer_config,
|
||||
void *opaque) {
|
||||
void RdKafka::oauthbearer_token_refresh_cb_trampoline(
|
||||
rd_kafka_t *rk,
|
||||
const char *oauthbearer_config,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
|
||||
handle->oauthbearer_token_refresh_cb_->
|
||||
oauthbearer_token_refresh_cb(handle,
|
||||
std::string(oauthbearer_config ?
|
||||
oauthbearer_config : ""));
|
||||
handle->oauthbearer_token_refresh_cb_->oauthbearer_token_refresh_cb(
|
||||
handle, std::string(oauthbearer_config ? oauthbearer_config : ""));
|
||||
}
|
||||
|
||||
|
||||
int RdKafka::ssl_cert_verify_cb_trampoline (rd_kafka_t *rk,
|
||||
const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf, size_t size,
|
||||
char *errstr, size_t errstr_size,
|
||||
void *opaque) {
|
||||
int RdKafka::ssl_cert_verify_cb_trampoline(rd_kafka_t *rk,
|
||||
const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf,
|
||||
size_t size,
|
||||
char *errstr,
|
||||
size_t errstr_size,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
std::string errbuf;
|
||||
|
||||
bool res = 0 != handle->ssl_cert_verify_cb_->
|
||||
ssl_cert_verify_cb(std::string(broker_name), broker_id,
|
||||
x509_error,
|
||||
depth,
|
||||
buf, size,
|
||||
errbuf);
|
||||
bool res = 0 != handle->ssl_cert_verify_cb_->ssl_cert_verify_cb(
|
||||
std::string(broker_name), broker_id, x509_error, depth,
|
||||
buf, size, errbuf);
|
||||
|
||||
if (res)
|
||||
return (int)res;
|
||||
|
||||
size_t errlen = errbuf.size() > errstr_size - 1 ?
|
||||
errstr_size - 1 : errbuf.size();
|
||||
size_t errlen =
|
||||
errbuf.size() > errstr_size - 1 ? errstr_size - 1 : errbuf.size();
|
||||
|
||||
memcpy(errstr, errbuf.c_str(), errlen);
|
||||
if (errstr_size > 0)
|
||||
|
@ -178,21 +181,21 @@ int RdKafka::ssl_cert_verify_cb_trampoline (rd_kafka_t *rk,
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics,
|
||||
const Topic *only_rkt,
|
||||
Metadata **metadatap,
|
||||
int timeout_ms) {
|
||||
RdKafka::ErrorCode RdKafka::HandleImpl::metadata(bool all_topics,
|
||||
const Topic *only_rkt,
|
||||
Metadata **metadatap,
|
||||
int timeout_ms) {
|
||||
const rd_kafka_metadata_t *cmetadatap = NULL;
|
||||
|
||||
const rd_kafka_metadata_t *cmetadatap=NULL;
|
||||
rd_kafka_topic_t *topic =
|
||||
only_rkt ? static_cast<const TopicImpl *>(only_rkt)->rkt_ : NULL;
|
||||
|
||||
rd_kafka_topic_t *topic = only_rkt ?
|
||||
static_cast<const TopicImpl *>(only_rkt)->rkt_ : NULL;
|
||||
const rd_kafka_resp_err_t rc =
|
||||
rd_kafka_metadata(rk_, all_topics, topic, &cmetadatap, timeout_ms);
|
||||
|
||||
const rd_kafka_resp_err_t rc = rd_kafka_metadata(rk_, all_topics, topic,
|
||||
&cmetadatap,timeout_ms);
|
||||
|
||||
*metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR) ?
|
||||
new RdKafka::MetadataImpl(cmetadatap) : NULL;
|
||||
*metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR)
|
||||
? new RdKafka::MetadataImpl(cmetadatap)
|
||||
: NULL;
|
||||
|
||||
return static_cast<RdKafka::ErrorCode>(rc);
|
||||
}
|
||||
|
@ -200,47 +203,45 @@ RdKafka::ErrorCode RdKafka::HandleImpl::metadata (bool all_topics,
|
|||
/**
|
||||
* Convert a list of C partitions to C++ partitions
|
||||
*/
|
||||
static void c_parts_to_partitions (const rd_kafka_topic_partition_list_t
|
||||
*c_parts,
|
||||
std::vector<RdKafka::TopicPartition*>
|
||||
&partitions) {
|
||||
static void c_parts_to_partitions(
|
||||
const rd_kafka_topic_partition_list_t *c_parts,
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
partitions.resize(c_parts->cnt);
|
||||
for (int i = 0 ; i < c_parts->cnt ; i++)
|
||||
for (int i = 0; i < c_parts->cnt; i++)
|
||||
partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
|
||||
}
|
||||
|
||||
static void free_partition_vector (std::vector<RdKafka::TopicPartition*> &v) {
|
||||
for (unsigned int i = 0 ; i < v.size() ; i++)
|
||||
static void free_partition_vector(std::vector<RdKafka::TopicPartition *> &v) {
|
||||
for (unsigned int i = 0; i < v.size(); i++)
|
||||
delete v[i];
|
||||
v.clear();
|
||||
}
|
||||
|
||||
void
|
||||
RdKafka::rebalance_cb_trampoline (rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *c_partitions,
|
||||
void *opaque) {
|
||||
void RdKafka::rebalance_cb_trampoline(
|
||||
rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *c_partitions,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
std::vector<RdKafka::TopicPartition*> partitions;
|
||||
std::vector<RdKafka::TopicPartition *> partitions;
|
||||
|
||||
c_parts_to_partitions(c_partitions, partitions);
|
||||
|
||||
handle->rebalance_cb_->rebalance_cb(
|
||||
dynamic_cast<RdKafka::KafkaConsumer*>(handle),
|
||||
static_cast<RdKafka::ErrorCode>(err),
|
||||
partitions);
|
||||
dynamic_cast<RdKafka::KafkaConsumer *>(handle),
|
||||
static_cast<RdKafka::ErrorCode>(err), partitions);
|
||||
|
||||
free_partition_vector(partitions);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
RdKafka::offset_commit_cb_trampoline0 (
|
||||
void RdKafka::offset_commit_cb_trampoline0(
|
||||
rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *c_offsets, void *opaque) {
|
||||
rd_kafka_topic_partition_list_t *c_offsets,
|
||||
void *opaque) {
|
||||
OffsetCommitCb *cb = static_cast<RdKafka::OffsetCommitCb *>(opaque);
|
||||
std::vector<RdKafka::TopicPartition*> offsets;
|
||||
std::vector<RdKafka::TopicPartition *> offsets;
|
||||
|
||||
if (c_offsets)
|
||||
c_parts_to_partitions(c_offsets, offsets);
|
||||
|
@ -250,28 +251,26 @@ RdKafka::offset_commit_cb_trampoline0 (
|
|||
free_partition_vector(offsets);
|
||||
}
|
||||
|
||||
static void
|
||||
offset_commit_cb_trampoline (
|
||||
static void offset_commit_cb_trampoline(
|
||||
rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *c_offsets, void *opaque) {
|
||||
rd_kafka_topic_partition_list_t *c_offsets,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
RdKafka::offset_commit_cb_trampoline0(rk, err, c_offsets,
|
||||
handle->offset_commit_cb_);
|
||||
}
|
||||
|
||||
|
||||
void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl) {
|
||||
|
||||
void RdKafka::HandleImpl::set_common_config(const RdKafka::ConfImpl *confimpl) {
|
||||
rd_kafka_conf_set_opaque(confimpl->rk_conf_, this);
|
||||
|
||||
if (confimpl->event_cb_) {
|
||||
rd_kafka_conf_set_log_cb(confimpl->rk_conf_,
|
||||
RdKafka::log_cb_trampoline);
|
||||
rd_kafka_conf_set_log_cb(confimpl->rk_conf_, RdKafka::log_cb_trampoline);
|
||||
rd_kafka_conf_set_error_cb(confimpl->rk_conf_,
|
||||
RdKafka::error_cb_trampoline);
|
||||
rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_,
|
||||
RdKafka::throttle_cb_trampoline);
|
||||
RdKafka::throttle_cb_trampoline);
|
||||
rd_kafka_conf_set_stats_cb(confimpl->rk_conf_,
|
||||
RdKafka::stats_cb_trampoline);
|
||||
event_cb_ = confimpl->event_cb_;
|
||||
|
@ -279,9 +278,8 @@ void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl)
|
|||
|
||||
if (confimpl->oauthbearer_token_refresh_cb_) {
|
||||
rd_kafka_conf_set_oauthbearer_token_refresh_cb(
|
||||
confimpl->rk_conf_,
|
||||
RdKafka::oauthbearer_token_refresh_cb_trampoline);
|
||||
oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_;
|
||||
confimpl->rk_conf_, RdKafka::oauthbearer_token_refresh_cb_trampoline);
|
||||
oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_;
|
||||
}
|
||||
|
||||
if (confimpl->socket_cb_) {
|
||||
|
@ -291,9 +289,9 @@ void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl)
|
|||
}
|
||||
|
||||
if (confimpl->ssl_cert_verify_cb_) {
|
||||
rd_kafka_conf_set_ssl_cert_verify_cb(confimpl->rk_conf_,
|
||||
RdKafka::ssl_cert_verify_cb_trampoline);
|
||||
ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_;
|
||||
rd_kafka_conf_set_ssl_cert_verify_cb(
|
||||
confimpl->rk_conf_, RdKafka::ssl_cert_verify_cb_trampoline);
|
||||
ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_;
|
||||
}
|
||||
|
||||
if (confimpl->open_cb_) {
|
||||
|
@ -320,12 +318,11 @@ void RdKafka::HandleImpl::set_common_config (const RdKafka::ConfImpl *confimpl)
|
|||
RdKafka::consume_cb_trampoline);
|
||||
consume_cb_ = confimpl->consume_cb_;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::HandleImpl::pause (std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
RdKafka::ErrorCode RdKafka::HandleImpl::pause(
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -342,8 +339,8 @@ RdKafka::HandleImpl::pause (std::vector<RdKafka::TopicPartition*> &partitions) {
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::HandleImpl::resume (std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
RdKafka::ErrorCode RdKafka::HandleImpl::resume(
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -359,11 +356,10 @@ RdKafka::HandleImpl::resume (std::vector<RdKafka::TopicPartition*> &partitions)
|
|||
return static_cast<RdKafka::ErrorCode>(err);
|
||||
}
|
||||
|
||||
RdKafka::Queue *
|
||||
RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) {
|
||||
RdKafka::Queue *RdKafka::HandleImpl::get_partition_queue(
|
||||
const TopicPartition *part) {
|
||||
rd_kafka_queue_t *rkqu;
|
||||
rkqu = rd_kafka_queue_get_partition(rk_,
|
||||
part->topic().c_str(),
|
||||
rkqu = rd_kafka_queue_get_partition(rk_, part->topic().c_str(),
|
||||
part->partition());
|
||||
|
||||
if (rkqu == NULL)
|
||||
|
@ -372,31 +368,28 @@ RdKafka::HandleImpl::get_partition_queue (const TopicPartition *part) {
|
|||
return new QueueImpl(rkqu);
|
||||
}
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::HandleImpl::set_log_queue (RdKafka::Queue *queue) {
|
||||
rd_kafka_queue_t *rkqu = NULL;
|
||||
if (queue) {
|
||||
QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
|
||||
rkqu = queueimpl->queue_;
|
||||
}
|
||||
return static_cast<RdKafka::ErrorCode>(
|
||||
rd_kafka_set_log_queue(rk_, rkqu));
|
||||
RdKafka::ErrorCode RdKafka::HandleImpl::set_log_queue(RdKafka::Queue *queue) {
|
||||
rd_kafka_queue_t *rkqu = NULL;
|
||||
if (queue) {
|
||||
QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
|
||||
rkqu = queueimpl->queue_;
|
||||
}
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_set_log_queue(rk_, rkqu));
|
||||
}
|
||||
|
||||
namespace RdKafka {
|
||||
|
||||
rd_kafka_topic_partition_list_t *
|
||||
partitions_to_c_parts (const std::vector<RdKafka::TopicPartition*> &partitions){
|
||||
rd_kafka_topic_partition_list_t *partitions_to_c_parts(
|
||||
const std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
|
||||
c_parts = rd_kafka_topic_partition_list_new((int)partitions.size());
|
||||
|
||||
for (unsigned int i = 0 ; i < partitions.size() ; i++) {
|
||||
for (unsigned int i = 0; i < partitions.size(); i++) {
|
||||
const RdKafka::TopicPartitionImpl *tpi =
|
||||
dynamic_cast<const RdKafka::TopicPartitionImpl*>(partitions[i]);
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
rd_kafka_topic_partition_list_add(c_parts,
|
||||
tpi->topic_.c_str(), tpi->partition_);
|
||||
dynamic_cast<const RdKafka::TopicPartitionImpl *>(partitions[i]);
|
||||
rd_kafka_topic_partition_t *rktpar = rd_kafka_topic_partition_list_add(
|
||||
c_parts, tpi->topic_.c_str(), tpi->partition_);
|
||||
rktpar->offset = tpi->offset_;
|
||||
}
|
||||
|
||||
|
@ -407,24 +400,23 @@ partitions_to_c_parts (const std::vector<RdKafka::TopicPartition*> &partitions){
|
|||
/**
|
||||
* @brief Update the application provided 'partitions' with info from 'c_parts'
|
||||
*/
|
||||
void
|
||||
update_partitions_from_c_parts (std::vector<RdKafka::TopicPartition*> &partitions,
|
||||
const rd_kafka_topic_partition_list_t *c_parts) {
|
||||
for (int i = 0 ; i < c_parts->cnt ; i++) {
|
||||
void update_partitions_from_c_parts(
|
||||
std::vector<RdKafka::TopicPartition *> &partitions,
|
||||
const rd_kafka_topic_partition_list_t *c_parts) {
|
||||
for (int i = 0; i < c_parts->cnt; i++) {
|
||||
rd_kafka_topic_partition_t *p = &c_parts->elems[i];
|
||||
|
||||
/* Find corresponding C++ entry */
|
||||
for (unsigned int j = 0 ; j < partitions.size() ; j++) {
|
||||
for (unsigned int j = 0; j < partitions.size(); j++) {
|
||||
RdKafka::TopicPartitionImpl *pp =
|
||||
dynamic_cast<RdKafka::TopicPartitionImpl*>(partitions[j]);
|
||||
dynamic_cast<RdKafka::TopicPartitionImpl *>(partitions[j]);
|
||||
if (!strcmp(p->topic, pp->topic_.c_str()) &&
|
||||
p->partition == pp->partition_) {
|
||||
pp->offset_ = p->offset;
|
||||
pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
|
||||
p->partition == pp->partition_) {
|
||||
pp->offset_ = p->offset;
|
||||
pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
}; // namespace RdKafka
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include "rdkafkacpp_int.h"
|
||||
|
||||
RdKafka::Headers *RdKafka::Headers::create() {
|
||||
return new RdKafka::HeadersImpl();
|
||||
return new RdKafka::HeadersImpl();
|
||||
}
|
||||
|
||||
RdKafka::Headers *RdKafka::Headers::create(const std::vector<Header> &headers) {
|
||||
|
@ -44,4 +44,5 @@ RdKafka::Headers *RdKafka::Headers::create(const std::vector<Header> &headers) {
|
|||
return new RdKafka::HeadersImpl();
|
||||
}
|
||||
|
||||
RdKafka::Headers::~Headers() {}
|
||||
RdKafka::Headers::~Headers() {
|
||||
}
|
||||
|
|
|
@ -31,14 +31,17 @@
|
|||
|
||||
#include "rdkafkacpp_int.h"
|
||||
|
||||
RdKafka::KafkaConsumer::~KafkaConsumer () {}
|
||||
RdKafka::KafkaConsumer::~KafkaConsumer() {
|
||||
}
|
||||
|
||||
RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *conf,
|
||||
std::string &errstr) {
|
||||
RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create(
|
||||
const RdKafka::Conf *conf,
|
||||
std::string &errstr) {
|
||||
char errbuf[512];
|
||||
const RdKafka::ConfImpl *confimpl = dynamic_cast<const RdKafka::ConfImpl *>(conf);
|
||||
const RdKafka::ConfImpl *confimpl =
|
||||
dynamic_cast<const RdKafka::ConfImpl *>(conf);
|
||||
RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl();
|
||||
rd_kafka_conf_t *rk_conf = NULL;
|
||||
rd_kafka_conf_t *rk_conf = NULL;
|
||||
size_t grlen;
|
||||
|
||||
if (!confimpl || !confimpl->rk_conf_) {
|
||||
|
@ -47,8 +50,8 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *con
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id",
|
||||
NULL, &grlen) != RD_KAFKA_CONF_OK ||
|
||||
if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", NULL, &grlen) !=
|
||||
RD_KAFKA_CONF_OK ||
|
||||
grlen <= 1 /* terminating null only */) {
|
||||
errstr = "\"group.id\" must be configured";
|
||||
delete rkc;
|
||||
|
@ -60,8 +63,8 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *con
|
|||
rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
|
||||
|
||||
rd_kafka_t *rk;
|
||||
if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf,
|
||||
errbuf, sizeof(errbuf)))) {
|
||||
if (!(rk =
|
||||
rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) {
|
||||
errstr = errbuf;
|
||||
// rd_kafka_new() takes ownership only if succeeds
|
||||
rd_kafka_conf_destroy(rk_conf);
|
||||
|
@ -79,18 +82,14 @@ RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create (const RdKafka::Conf *con
|
|||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::subscribe (const std::vector<std::string> &topics) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscribe(
|
||||
const std::vector<std::string> &topics) {
|
||||
rd_kafka_topic_partition_list_t *c_topics;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
c_topics = rd_kafka_topic_partition_list_new((int)topics.size());
|
||||
|
||||
for (unsigned int i = 0 ; i < topics.size() ; i++)
|
||||
for (unsigned int i = 0; i < topics.size(); i++)
|
||||
rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(),
|
||||
RD_KAFKA_PARTITION_UA);
|
||||
|
||||
|
@ -103,12 +102,11 @@ RdKafka::KafkaConsumerImpl::subscribe (const std::vector<std::string> &topics) {
|
|||
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::unsubscribe () {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unsubscribe() {
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_unsubscribe(this->rk_));
|
||||
}
|
||||
|
||||
RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) {
|
||||
RdKafka::Message *RdKafka::KafkaConsumerImpl::consume(int timeout_ms) {
|
||||
rd_kafka_message_t *rkmessage;
|
||||
|
||||
rkmessage = rd_kafka_consumer_poll(this->rk_, timeout_ms);
|
||||
|
@ -118,13 +116,12 @@ RdKafka::Message *RdKafka::KafkaConsumerImpl::consume (int timeout_ms) {
|
|||
RdKafka::ERR__TIMED_OUT);
|
||||
|
||||
return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::assignment (std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assignment(
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -133,7 +130,7 @@ RdKafka::KafkaConsumerImpl::assignment (std::vector<RdKafka::TopicPartition*> &p
|
|||
|
||||
partitions.resize(c_parts->cnt);
|
||||
|
||||
for (int i = 0 ; i < c_parts->cnt ; i++)
|
||||
for (int i = 0; i < c_parts->cnt; i++)
|
||||
partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
|
||||
|
||||
rd_kafka_topic_partition_list_destroy(c_parts);
|
||||
|
@ -143,15 +140,14 @@ RdKafka::KafkaConsumerImpl::assignment (std::vector<RdKafka::TopicPartition*> &p
|
|||
|
||||
|
||||
|
||||
bool
|
||||
RdKafka::KafkaConsumerImpl::assignment_lost () {
|
||||
bool RdKafka::KafkaConsumerImpl::assignment_lost() {
|
||||
return rd_kafka_assignment_lost(rk_) ? true : false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscription(
|
||||
std::vector<std::string> &topics) {
|
||||
rd_kafka_topic_partition_list_t *c_topics;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -159,7 +155,7 @@ RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
|
|||
return static_cast<RdKafka::ErrorCode>(err);
|
||||
|
||||
topics.resize(c_topics->cnt);
|
||||
for (int i = 0 ; i < c_topics->cnt ; i++)
|
||||
for (int i = 0; i < c_topics->cnt; i++)
|
||||
topics[i] = std::string(c_topics->elems[i].topic);
|
||||
|
||||
rd_kafka_topic_partition_list_destroy(c_topics);
|
||||
|
@ -168,8 +164,8 @@ RdKafka::KafkaConsumerImpl::subscription (std::vector<std::string> &topics) {
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::assign (const std::vector<TopicPartition*> &partitions) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assign(
|
||||
const std::vector<TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -182,14 +178,13 @@ RdKafka::KafkaConsumerImpl::assign (const std::vector<TopicPartition*> &partitio
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::unassign () {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unassign() {
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_assign(rk_, NULL));
|
||||
}
|
||||
|
||||
|
||||
RdKafka::Error *
|
||||
RdKafka::KafkaConsumerImpl::incremental_assign (const std::vector<TopicPartition*> &partitions) {
|
||||
RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_assign(
|
||||
const std::vector<TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_error_t *c_error;
|
||||
|
||||
|
@ -204,8 +199,8 @@ RdKafka::KafkaConsumerImpl::incremental_assign (const std::vector<TopicPartition
|
|||
}
|
||||
|
||||
|
||||
RdKafka::Error *
|
||||
RdKafka::KafkaConsumerImpl::incremental_unassign (const std::vector<TopicPartition*> &partitions) {
|
||||
RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_unassign(
|
||||
const std::vector<TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_error_t *c_error;
|
||||
|
||||
|
@ -220,8 +215,9 @@ RdKafka::KafkaConsumerImpl::incremental_unassign (const std::vector<TopicPartiti
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::committed (std::vector<RdKafka::TopicPartition*> &partitions, int timeout_ms) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::committed(
|
||||
std::vector<RdKafka::TopicPartition *> &partitions,
|
||||
int timeout_ms) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -239,8 +235,8 @@ RdKafka::KafkaConsumerImpl::committed (std::vector<RdKafka::TopicPartition*> &pa
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::position (std::vector<RdKafka::TopicPartition*> &partitions) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::position(
|
||||
std::vector<RdKafka::TopicPartition *> &partitions) {
|
||||
rd_kafka_topic_partition_list_t *c_parts;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
|
@ -258,20 +254,19 @@ RdKafka::KafkaConsumerImpl::position (std::vector<RdKafka::TopicPartition*> &par
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition,
|
||||
int timeout_ms) {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::seek(
|
||||
const RdKafka::TopicPartition &partition,
|
||||
int timeout_ms) {
|
||||
const RdKafka::TopicPartitionImpl *p =
|
||||
dynamic_cast<const RdKafka::TopicPartitionImpl*>(&partition);
|
||||
dynamic_cast<const RdKafka::TopicPartitionImpl *>(&partition);
|
||||
rd_kafka_topic_t *rkt;
|
||||
|
||||
if (!(rkt = rd_kafka_topic_new(rk_, p->topic_.c_str(), NULL)))
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
|
||||
|
||||
/* FIXME: Use a C API that takes a topic_partition_list_t instead */
|
||||
RdKafka::ErrorCode err =
|
||||
static_cast<RdKafka::ErrorCode>
|
||||
(rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms));
|
||||
RdKafka::ErrorCode err = static_cast<RdKafka::ErrorCode>(
|
||||
rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms));
|
||||
|
||||
rd_kafka_topic_destroy(rkt);
|
||||
|
||||
|
@ -280,15 +275,11 @@ RdKafka::KafkaConsumerImpl::seek (const RdKafka::TopicPartition &partition,
|
|||
|
||||
|
||||
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::KafkaConsumerImpl::close () {
|
||||
RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::close() {
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_consumer_close(rk_));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
RdKafka::ConsumerGroupMetadata::~ConsumerGroupMetadata () {}
|
||||
|
||||
RdKafka::ConsumerGroupMetadata::~ConsumerGroupMetadata() {
|
||||
}
|
||||
|
|
|
@ -34,5 +34,5 @@
|
|||
#include "rdkafkacpp_int.h"
|
||||
|
||||
|
||||
RdKafka::Message::~Message() {}
|
||||
|
||||
RdKafka::Message::~Message() {
|
||||
}
|
||||
|
|
|
@ -41,15 +41,23 @@ Metadata::~Metadata() {};
|
|||
*/
|
||||
class BrokerMetadataImpl : public BrokerMetadata {
|
||||
public:
|
||||
BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata)
|
||||
:broker_metadata_(broker_metadata),host_(broker_metadata->host) {}
|
||||
BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata) :
|
||||
broker_metadata_(broker_metadata), host_(broker_metadata->host) {
|
||||
}
|
||||
|
||||
int32_t id() const{return broker_metadata_->id;}
|
||||
int32_t id() const {
|
||||
return broker_metadata_->id;
|
||||
}
|
||||
|
||||
const std::string host() const {return host_;}
|
||||
int port() const {return broker_metadata_->port;}
|
||||
const std::string host() const {
|
||||
return host_;
|
||||
}
|
||||
int port() const {
|
||||
return broker_metadata_->port;
|
||||
}
|
||||
|
||||
virtual ~BrokerMetadataImpl() {}
|
||||
virtual ~BrokerMetadataImpl() {
|
||||
}
|
||||
|
||||
private:
|
||||
const rd_kafka_metadata_broker_t *broker_metadata_;
|
||||
|
@ -61,91 +69,97 @@ class BrokerMetadataImpl : public BrokerMetadata {
|
|||
*/
|
||||
class PartitionMetadataImpl : public PartitionMetadata {
|
||||
public:
|
||||
// @TODO too much memory copy? maybe we should create a new vector class that read directly from C arrays?
|
||||
// @TODO too much memory copy? maybe we should create a new vector class that
|
||||
// read directly from C arrays?
|
||||
// @TODO use auto_ptr?
|
||||
PartitionMetadataImpl(const rd_kafka_metadata_partition_t *partition_metadata)
|
||||
:partition_metadata_(partition_metadata) {
|
||||
PartitionMetadataImpl(
|
||||
const rd_kafka_metadata_partition_t *partition_metadata) :
|
||||
partition_metadata_(partition_metadata) {
|
||||
replicas_.reserve(partition_metadata->replica_cnt);
|
||||
for(int i=0;i<partition_metadata->replica_cnt;++i)
|
||||
for (int i = 0; i < partition_metadata->replica_cnt; ++i)
|
||||
replicas_.push_back(partition_metadata->replicas[i]);
|
||||
|
||||
isrs_.reserve(partition_metadata->isr_cnt);
|
||||
for(int i=0;i<partition_metadata->isr_cnt;++i)
|
||||
for (int i = 0; i < partition_metadata->isr_cnt; ++i)
|
||||
isrs_.push_back(partition_metadata->isrs[i]);
|
||||
}
|
||||
|
||||
int32_t id() const {
|
||||
int32_t id() const {
|
||||
return partition_metadata_->id;
|
||||
}
|
||||
int32_t leader() const {
|
||||
int32_t leader() const {
|
||||
return partition_metadata_->leader;
|
||||
}
|
||||
ErrorCode err() const {
|
||||
ErrorCode err() const {
|
||||
return static_cast<ErrorCode>(partition_metadata_->err);
|
||||
}
|
||||
|
||||
const std::vector<int32_t> *replicas() const {return &replicas_;}
|
||||
const std::vector<int32_t> *isrs() const {return &isrs_;}
|
||||
const std::vector<int32_t> *replicas() const {
|
||||
return &replicas_;
|
||||
}
|
||||
const std::vector<int32_t> *isrs() const {
|
||||
return &isrs_;
|
||||
}
|
||||
|
||||
~PartitionMetadataImpl() {};
|
||||
|
||||
private:
|
||||
const rd_kafka_metadata_partition_t *partition_metadata_;
|
||||
std::vector<int32_t> replicas_,isrs_;
|
||||
std::vector<int32_t> replicas_, isrs_;
|
||||
};
|
||||
|
||||
/**
|
||||
* Metadata: Topic information handler
|
||||
*/
|
||||
class TopicMetadataImpl : public TopicMetadata{
|
||||
class TopicMetadataImpl : public TopicMetadata {
|
||||
public:
|
||||
TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata)
|
||||
:topic_metadata_(topic_metadata),topic_(topic_metadata->topic) {
|
||||
TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata) :
|
||||
topic_metadata_(topic_metadata), topic_(topic_metadata->topic) {
|
||||
partitions_.reserve(topic_metadata->partition_cnt);
|
||||
for(int i=0;i<topic_metadata->partition_cnt;++i)
|
||||
for (int i = 0; i < topic_metadata->partition_cnt; ++i)
|
||||
partitions_.push_back(
|
||||
new PartitionMetadataImpl(&topic_metadata->partitions[i])
|
||||
);
|
||||
new PartitionMetadataImpl(&topic_metadata->partitions[i]));
|
||||
}
|
||||
|
||||
~TopicMetadataImpl(){
|
||||
for(size_t i=0;i<partitions_.size();++i)
|
||||
~TopicMetadataImpl() {
|
||||
for (size_t i = 0; i < partitions_.size(); ++i)
|
||||
delete partitions_[i];
|
||||
}
|
||||
|
||||
const std::string topic() const {return topic_;}
|
||||
const std::string topic() const {
|
||||
return topic_;
|
||||
}
|
||||
const std::vector<const PartitionMetadata *> *partitions() const {
|
||||
return &partitions_;
|
||||
}
|
||||
ErrorCode err() const {return static_cast<ErrorCode>(topic_metadata_->err);}
|
||||
ErrorCode err() const {
|
||||
return static_cast<ErrorCode>(topic_metadata_->err);
|
||||
}
|
||||
|
||||
private:
|
||||
const rd_kafka_metadata_topic_t *topic_metadata_;
|
||||
const std::string topic_;
|
||||
std::vector<const PartitionMetadata *> partitions_;
|
||||
|
||||
};
|
||||
|
||||
MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata)
|
||||
:metadata_(metadata)
|
||||
{
|
||||
MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata) :
|
||||
metadata_(metadata) {
|
||||
brokers_.reserve(metadata->broker_cnt);
|
||||
for(int i=0;i<metadata->broker_cnt;++i)
|
||||
for (int i = 0; i < metadata->broker_cnt; ++i)
|
||||
brokers_.push_back(new BrokerMetadataImpl(&metadata->brokers[i]));
|
||||
|
||||
topics_.reserve(metadata->topic_cnt);
|
||||
for(int i=0;i<metadata->topic_cnt;++i)
|
||||
for (int i = 0; i < metadata->topic_cnt; ++i)
|
||||
topics_.push_back(new TopicMetadataImpl(&metadata->topics[i]));
|
||||
|
||||
}
|
||||
|
||||
MetadataImpl::~MetadataImpl() {
|
||||
for(size_t i=0;i<brokers_.size();++i)
|
||||
for (size_t i = 0; i < brokers_.size(); ++i)
|
||||
delete brokers_[i];
|
||||
for(size_t i=0;i<topics_.size();++i)
|
||||
for (size_t i = 0; i < topics_.size(); ++i)
|
||||
delete topics_[i];
|
||||
|
||||
|
||||
if(metadata_)
|
||||
if (metadata_)
|
||||
rd_kafka_metadata_destroy(metadata_);
|
||||
}
|
||||
|
|
|
@ -34,14 +34,12 @@
|
|||
#include "rdkafkacpp_int.h"
|
||||
|
||||
|
||||
RdKafka::Producer::~Producer () {
|
||||
|
||||
RdKafka::Producer::~Producer() {
|
||||
}
|
||||
|
||||
static void dr_msg_cb_trampoline (rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *
|
||||
rkmessage,
|
||||
void *opaque) {
|
||||
static void dr_msg_cb_trampoline(rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *rkmessage,
|
||||
void *opaque) {
|
||||
RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
|
||||
RdKafka::MessageImpl message(RD_KAFKA_PRODUCER, NULL,
|
||||
(rd_kafka_message_t *)rkmessage, false);
|
||||
|
@ -50,12 +48,13 @@ static void dr_msg_cb_trampoline (rd_kafka_t *rk,
|
|||
|
||||
|
||||
|
||||
RdKafka::Producer *RdKafka::Producer::create (const RdKafka::Conf *conf,
|
||||
std::string &errstr) {
|
||||
RdKafka::Producer *RdKafka::Producer::create(const RdKafka::Conf *conf,
|
||||
std::string &errstr) {
|
||||
char errbuf[512];
|
||||
const RdKafka::ConfImpl *confimpl = dynamic_cast<const RdKafka::ConfImpl *>(conf);
|
||||
const RdKafka::ConfImpl *confimpl =
|
||||
dynamic_cast<const RdKafka::ConfImpl *>(conf);
|
||||
RdKafka::ProducerImpl *rkp = new RdKafka::ProducerImpl();
|
||||
rd_kafka_conf_t *rk_conf = NULL;
|
||||
rd_kafka_conf_t *rk_conf = NULL;
|
||||
|
||||
if (confimpl) {
|
||||
if (!confimpl->rk_conf_) {
|
||||
|
@ -76,8 +75,8 @@ RdKafka::Producer *RdKafka::Producer::create (const RdKafka::Conf *conf,
|
|||
|
||||
|
||||
rd_kafka_t *rk;
|
||||
if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf,
|
||||
errbuf, sizeof(errbuf)))) {
|
||||
if (!(rk =
|
||||
rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf, errbuf, sizeof(errbuf)))) {
|
||||
errstr = errbuf;
|
||||
// rd_kafka_new() takes ownership only if succeeds
|
||||
if (rk_conf)
|
||||
|
@ -92,16 +91,16 @@ RdKafka::Producer *RdKafka::Producer::create (const RdKafka::Conf *conf,
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
int msgflags,
|
||||
void *payload, size_t len,
|
||||
const std::string *key,
|
||||
void *msg_opaque) {
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
int msgflags,
|
||||
void *payload,
|
||||
size_t len,
|
||||
const std::string *key,
|
||||
void *msg_opaque) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
|
||||
if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags,
|
||||
payload, len,
|
||||
if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len,
|
||||
key ? key->c_str() : NULL, key ? key->size() : 0,
|
||||
msg_opaque) == -1)
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
|
||||
|
@ -110,91 +109,83 @@ RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
|
|||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
int msgflags,
|
||||
void *payload, size_t len,
|
||||
const void *key,
|
||||
size_t key_len,
|
||||
void *msg_opaque) {
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
int msgflags,
|
||||
void *payload,
|
||||
size_t len,
|
||||
const void *key,
|
||||
size_t key_len,
|
||||
void *msg_opaque) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
|
||||
if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags,
|
||||
payload, len, key, key_len,
|
||||
msg_opaque) == -1)
|
||||
if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len, key,
|
||||
key_len, msg_opaque) == -1)
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
|
||||
|
||||
return RdKafka::ERR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::ProducerImpl::produce (RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
const std::vector<char> *payload,
|
||||
const std::vector<char> *key,
|
||||
void *msg_opaque) {
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce(
|
||||
RdKafka::Topic *topic,
|
||||
int32_t partition,
|
||||
const std::vector<char> *payload,
|
||||
const std::vector<char> *key,
|
||||
void *msg_opaque) {
|
||||
RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
|
||||
|
||||
if (rd_kafka_produce(topicimpl->rkt_, partition, RD_KAFKA_MSG_F_COPY,
|
||||
payload ? (void *)&(*payload)[0] : NULL,
|
||||
payload ? payload->size() : 0,
|
||||
key ? &(*key)[0] : NULL, key ? key->size() : 0,
|
||||
msg_opaque) == -1)
|
||||
payload ? payload->size() : 0, key ? &(*key)[0] : NULL,
|
||||
key ? key->size() : 0, msg_opaque) == -1)
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
|
||||
|
||||
return RdKafka::ERR_NO_ERROR;
|
||||
|
||||
}
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::ProducerImpl::produce (const std::string topic_name,
|
||||
int32_t partition, int msgflags,
|
||||
void *payload, size_t len,
|
||||
const void *key, size_t key_len,
|
||||
int64_t timestamp, void *msg_opaque) {
|
||||
return
|
||||
static_cast<RdKafka::ErrorCode>
|
||||
(
|
||||
rd_kafka_producev(rk_,
|
||||
RD_KAFKA_V_TOPIC(topic_name.c_str()),
|
||||
RD_KAFKA_V_PARTITION(partition),
|
||||
RD_KAFKA_V_MSGFLAGS(msgflags),
|
||||
RD_KAFKA_V_VALUE(payload, len),
|
||||
RD_KAFKA_V_KEY(key, key_len),
|
||||
RD_KAFKA_V_TIMESTAMP(timestamp),
|
||||
RD_KAFKA_V_OPAQUE(msg_opaque),
|
||||
RD_KAFKA_V_END)
|
||||
);
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name,
|
||||
int32_t partition,
|
||||
int msgflags,
|
||||
void *payload,
|
||||
size_t len,
|
||||
const void *key,
|
||||
size_t key_len,
|
||||
int64_t timestamp,
|
||||
void *msg_opaque) {
|
||||
return static_cast<RdKafka::ErrorCode>(rd_kafka_producev(
|
||||
rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()),
|
||||
RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags),
|
||||
RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len),
|
||||
RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque),
|
||||
RD_KAFKA_V_END));
|
||||
}
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::ProducerImpl::produce (const std::string topic_name,
|
||||
int32_t partition, int msgflags,
|
||||
void *payload, size_t len,
|
||||
const void *key, size_t key_len,
|
||||
int64_t timestamp,
|
||||
RdKafka::Headers *headers,
|
||||
void *msg_opaque) {
|
||||
rd_kafka_headers_t *hdrs = NULL;
|
||||
RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name,
|
||||
int32_t partition,
|
||||
int msgflags,
|
||||
void *payload,
|
||||
size_t len,
|
||||
const void *key,
|
||||
size_t key_len,
|
||||
int64_t timestamp,
|
||||
RdKafka::Headers *headers,
|
||||
void *msg_opaque) {
|
||||
rd_kafka_headers_t *hdrs = NULL;
|
||||
RdKafka::HeadersImpl *headersimpl = NULL;
|
||||
rd_kafka_resp_err_t err;
|
||||
|
||||
if (headers) {
|
||||
headersimpl = static_cast<RdKafka::HeadersImpl*>(headers);
|
||||
hdrs = headersimpl->c_ptr();
|
||||
headersimpl = static_cast<RdKafka::HeadersImpl *>(headers);
|
||||
hdrs = headersimpl->c_ptr();
|
||||
}
|
||||
|
||||
err = rd_kafka_producev(rk_,
|
||||
RD_KAFKA_V_TOPIC(topic_name.c_str()),
|
||||
RD_KAFKA_V_PARTITION(partition),
|
||||
RD_KAFKA_V_MSGFLAGS(msgflags),
|
||||
RD_KAFKA_V_VALUE(payload, len),
|
||||
RD_KAFKA_V_KEY(key, key_len),
|
||||
RD_KAFKA_V_TIMESTAMP(timestamp),
|
||||
RD_KAFKA_V_OPAQUE(msg_opaque),
|
||||
RD_KAFKA_V_HEADERS(hdrs),
|
||||
RD_KAFKA_V_END);
|
||||
err = rd_kafka_producev(
|
||||
rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()),
|
||||
RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags),
|
||||
RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len),
|
||||
RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque),
|
||||
RD_KAFKA_V_HEADERS(hdrs), RD_KAFKA_V_END);
|
||||
|
||||
if (!err && headersimpl) {
|
||||
/* A successful producev() call will destroy the C headers. */
|
||||
|
|
|
@ -30,16 +30,15 @@
|
|||
|
||||
#include "rdkafkacpp_int.h"
|
||||
|
||||
RdKafka::Queue::~Queue () {
|
||||
|
||||
RdKafka::Queue::~Queue() {
|
||||
}
|
||||
|
||||
RdKafka::Queue *RdKafka::Queue::create (Handle *base) {
|
||||
return new RdKafka::QueueImpl(rd_kafka_queue_new(dynamic_cast<HandleImpl*>(base)->rk_));
|
||||
RdKafka::Queue *RdKafka::Queue::create(Handle *base) {
|
||||
return new RdKafka::QueueImpl(
|
||||
rd_kafka_queue_new(dynamic_cast<HandleImpl *>(base)->rk_));
|
||||
}
|
||||
|
||||
RdKafka::ErrorCode
|
||||
RdKafka::QueueImpl::forward (Queue *queue) {
|
||||
RdKafka::ErrorCode RdKafka::QueueImpl::forward(Queue *queue) {
|
||||
if (!queue) {
|
||||
rd_kafka_queue_forward(queue_, NULL);
|
||||
} else {
|
||||
|
@ -49,7 +48,7 @@ RdKafka::QueueImpl::forward (Queue *queue) {
|
|||
return RdKafka::ERR_NO_ERROR;
|
||||
}
|
||||
|
||||
RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) {
|
||||
RdKafka::Message *RdKafka::QueueImpl::consume(int timeout_ms) {
|
||||
rd_kafka_message_t *rkmessage;
|
||||
rkmessage = rd_kafka_consume_queue(queue_, timeout_ms);
|
||||
|
||||
|
@ -60,11 +59,12 @@ RdKafka::Message *RdKafka::QueueImpl::consume (int timeout_ms) {
|
|||
return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage);
|
||||
}
|
||||
|
||||
int RdKafka::QueueImpl::poll (int timeout_ms) {
|
||||
return rd_kafka_queue_poll_callback(queue_, timeout_ms);
|
||||
int RdKafka::QueueImpl::poll(int timeout_ms) {
|
||||
return rd_kafka_queue_poll_callback(queue_, timeout_ms);
|
||||
}
|
||||
|
||||
void RdKafka::QueueImpl::io_event_enable (int fd, const void *payload,
|
||||
size_t size) {
|
||||
rd_kafka_queue_io_event_enable(queue_, fd, payload, size);
|
||||
void RdKafka::QueueImpl::io_event_enable(int fd,
|
||||
const void *payload,
|
||||
size_t size) {
|
||||
rd_kafka_queue_io_event_enable(queue_, fd, payload, size);
|
||||
}
|
||||
|
|
|
@ -30,30 +30,30 @@
|
|||
|
||||
#include "rdkafkacpp_int.h"
|
||||
|
||||
int RdKafka::version () {
|
||||
int RdKafka::version() {
|
||||
return rd_kafka_version();
|
||||
}
|
||||
|
||||
std::string RdKafka::version_str () {
|
||||
std::string RdKafka::version_str() {
|
||||
return std::string(rd_kafka_version_str());
|
||||
}
|
||||
|
||||
std::string RdKafka::get_debug_contexts() {
|
||||
return std::string(RD_KAFKA_DEBUG_CONTEXTS);
|
||||
return std::string(RD_KAFKA_DEBUG_CONTEXTS);
|
||||
}
|
||||
|
||||
std::string RdKafka::err2str (RdKafka::ErrorCode err) {
|
||||
std::string RdKafka::err2str(RdKafka::ErrorCode err) {
|
||||
return std::string(rd_kafka_err2str(static_cast<rd_kafka_resp_err_t>(err)));
|
||||
}
|
||||
|
||||
int RdKafka::wait_destroyed (int timeout_ms) {
|
||||
int RdKafka::wait_destroyed(int timeout_ms) {
|
||||
return rd_kafka_wait_destroyed(timeout_ms);
|
||||
}
|
||||
|
||||
void *RdKafka::mem_malloc (size_t size) {
|
||||
void *RdKafka::mem_malloc(size_t size) {
|
||||
return rd_kafka_mem_malloc(NULL, size);
|
||||
}
|
||||
|
||||
void RdKafka::mem_free (void *ptr) {
|
||||
void RdKafka::mem_free(void *ptr) {
|
||||
rd_kafka_mem_free(NULL, ptr);
|
||||
}
|
||||
|
|
|
@ -43,45 +43,43 @@ const int64_t RdKafka::Topic::OFFSET_STORED = RD_KAFKA_OFFSET_STORED;
|
|||
|
||||
const int64_t RdKafka::Topic::OFFSET_INVALID = RD_KAFKA_OFFSET_INVALID;
|
||||
|
||||
RdKafka::Topic::~Topic () {
|
||||
|
||||
RdKafka::Topic::~Topic() {
|
||||
}
|
||||
|
||||
static int32_t partitioner_cb_trampoline (const rd_kafka_topic_t *rkt,
|
||||
const void *keydata,
|
||||
size_t keylen,
|
||||
int32_t partition_cnt,
|
||||
void *rkt_opaque,
|
||||
void *msg_opaque) {
|
||||
static int32_t partitioner_cb_trampoline(const rd_kafka_topic_t *rkt,
|
||||
const void *keydata,
|
||||
size_t keylen,
|
||||
int32_t partition_cnt,
|
||||
void *rkt_opaque,
|
||||
void *msg_opaque) {
|
||||
RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
|
||||
std::string key(static_cast<const char *>(keydata), keylen);
|
||||
return topicimpl->partitioner_cb_->partitioner_cb(topicimpl, &key,
|
||||
partition_cnt, msg_opaque);
|
||||
}
|
||||
|
||||
static int32_t partitioner_kp_cb_trampoline (const rd_kafka_topic_t *rkt,
|
||||
const void *keydata,
|
||||
size_t keylen,
|
||||
int32_t partition_cnt,
|
||||
void *rkt_opaque,
|
||||
void *msg_opaque) {
|
||||
static int32_t partitioner_kp_cb_trampoline(const rd_kafka_topic_t *rkt,
|
||||
const void *keydata,
|
||||
size_t keylen,
|
||||
int32_t partition_cnt,
|
||||
void *rkt_opaque,
|
||||
void *msg_opaque) {
|
||||
RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
|
||||
return topicimpl->partitioner_kp_cb_->partitioner_cb(topicimpl,
|
||||
keydata, keylen,
|
||||
partition_cnt,
|
||||
msg_opaque);
|
||||
return topicimpl->partitioner_kp_cb_->partitioner_cb(
|
||||
topicimpl, keydata, keylen, partition_cnt, msg_opaque);
|
||||
}
|
||||
|
||||
|
||||
|
||||
RdKafka::Topic *RdKafka::Topic::create (Handle *base,
|
||||
const std::string &topic_str,
|
||||
const Conf *conf,
|
||||
std::string &errstr) {
|
||||
const RdKafka::ConfImpl *confimpl = static_cast<const RdKafka::ConfImpl *>(conf);
|
||||
RdKafka::Topic *RdKafka::Topic::create(Handle *base,
|
||||
const std::string &topic_str,
|
||||
const Conf *conf,
|
||||
std::string &errstr) {
|
||||
const RdKafka::ConfImpl *confimpl =
|
||||
static_cast<const RdKafka::ConfImpl *>(conf);
|
||||
rd_kafka_topic_t *rkt;
|
||||
rd_kafka_topic_conf_t *rkt_conf;
|
||||
rd_kafka_t *rk = dynamic_cast<HandleImpl*>(base)->rk_;
|
||||
rd_kafka_t *rk = dynamic_cast<HandleImpl *>(base)->rk_;
|
||||
|
||||
RdKafka::TopicImpl *topic = new RdKafka::TopicImpl();
|
||||
|
||||
|
@ -123,6 +121,4 @@ RdKafka::Topic *RdKafka::Topic::create (Handle *base,
|
|||
topic->rkt_ = rkt;
|
||||
|
||||
return topic;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -32,24 +32,26 @@
|
|||
|
||||
#include "rdkafkacpp_int.h"
|
||||
|
||||
RdKafka::TopicPartition::~TopicPartition () {
|
||||
RdKafka::TopicPartition::~TopicPartition() {
|
||||
}
|
||||
|
||||
RdKafka::TopicPartition *
|
||||
RdKafka::TopicPartition::create (const std::string &topic, int partition) {
|
||||
RdKafka::TopicPartition *RdKafka::TopicPartition::create(
|
||||
const std::string &topic,
|
||||
int partition) {
|
||||
return new TopicPartitionImpl(topic, partition);
|
||||
}
|
||||
|
||||
RdKafka::TopicPartition *
|
||||
RdKafka::TopicPartition::create (const std::string &topic, int partition,
|
||||
int64_t offset) {
|
||||
RdKafka::TopicPartition *RdKafka::TopicPartition::create(
|
||||
const std::string &topic,
|
||||
int partition,
|
||||
int64_t offset) {
|
||||
return new TopicPartitionImpl(topic, partition, offset);
|
||||
}
|
||||
|
||||
void
|
||||
RdKafka::TopicPartition::destroy (std::vector<TopicPartition*> &partitions) {
|
||||
for (std::vector<TopicPartition*>::iterator it = partitions.begin() ;
|
||||
void RdKafka::TopicPartition::destroy(
|
||||
std::vector<TopicPartition *> &partitions) {
|
||||
for (std::vector<TopicPartition *>::iterator it = partitions.begin();
|
||||
it != partitions.end(); ++it)
|
||||
delete(*it);
|
||||
delete (*it);
|
||||
partitions.clear();
|
||||
}
|
||||
|
|
1649
src-cpp/rdkafkacpp.h
1649
src-cpp/rdkafkacpp.h
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
4647
src/cJSON.c
4647
src/cJSON.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
355
src/cJSON.h
355
src/cJSON.h
|
@ -24,23 +24,27 @@
|
|||
#define cJSON__h
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
|
||||
#if !defined(__WINDOWS__) && \
|
||||
(defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
|
||||
#define __WINDOWS__
|
||||
#endif
|
||||
|
||||
#ifdef __WINDOWS__
|
||||
|
||||
/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options:
|
||||
/* When compiling for windows, we specify a specific calling convention to avoid
|
||||
issues where we are being called from a project with a different default calling
|
||||
convention. For windows you have 3 define options:
|
||||
|
||||
CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols
|
||||
CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default)
|
||||
CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol
|
||||
CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever
|
||||
dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you
|
||||
want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you
|
||||
want to dllimport symbol
|
||||
|
||||
For *nix builds that support visibility attribute, you can define similar behavior by
|
||||
For *nix builds that support visibility attribute, you can define similar
|
||||
behavior by
|
||||
|
||||
setting default visibility to hidden by adding
|
||||
-fvisibility=hidden (for gcc)
|
||||
|
@ -48,31 +52,35 @@ or
|
|||
-xldscope=hidden (for sun cc)
|
||||
to CFLAGS
|
||||
|
||||
then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does
|
||||
then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way
|
||||
CJSON_EXPORT_SYMBOLS does
|
||||
|
||||
*/
|
||||
|
||||
#define CJSON_CDECL __cdecl
|
||||
#define CJSON_CDECL __cdecl
|
||||
#define CJSON_STDCALL __stdcall
|
||||
|
||||
/* export symbols by default, this is necessary for copy pasting the C and header file */
|
||||
#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS)
|
||||
/* export symbols by default, this is necessary for copy pasting the C and
|
||||
* header file */
|
||||
#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \
|
||||
!defined(CJSON_EXPORT_SYMBOLS)
|
||||
#define CJSON_EXPORT_SYMBOLS
|
||||
#endif
|
||||
|
||||
#if defined(CJSON_HIDE_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) type CJSON_STDCALL
|
||||
#define CJSON_PUBLIC(type) type CJSON_STDCALL
|
||||
#elif defined(CJSON_EXPORT_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
|
||||
#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
|
||||
#elif defined(CJSON_IMPORT_SYMBOLS)
|
||||
#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
|
||||
#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
|
||||
#endif
|
||||
#else /* !__WINDOWS__ */
|
||||
#define CJSON_CDECL
|
||||
#define CJSON_STDCALL
|
||||
|
||||
#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY)
|
||||
#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
|
||||
#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \
|
||||
defined(CJSON_API_VISIBILITY)
|
||||
#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
|
||||
#else
|
||||
#define CJSON_PUBLIC(type) type
|
||||
#endif
|
||||
|
@ -87,109 +95,145 @@ then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJ
|
|||
|
||||
/* cJSON Types: */
|
||||
#define cJSON_Invalid (0)
|
||||
#define cJSON_False (1 << 0)
|
||||
#define cJSON_True (1 << 1)
|
||||
#define cJSON_NULL (1 << 2)
|
||||
#define cJSON_Number (1 << 3)
|
||||
#define cJSON_String (1 << 4)
|
||||
#define cJSON_Array (1 << 5)
|
||||
#define cJSON_Object (1 << 6)
|
||||
#define cJSON_Raw (1 << 7) /* raw json */
|
||||
#define cJSON_False (1 << 0)
|
||||
#define cJSON_True (1 << 1)
|
||||
#define cJSON_NULL (1 << 2)
|
||||
#define cJSON_Number (1 << 3)
|
||||
#define cJSON_String (1 << 4)
|
||||
#define cJSON_Array (1 << 5)
|
||||
#define cJSON_Object (1 << 6)
|
||||
#define cJSON_Raw (1 << 7) /* raw json */
|
||||
|
||||
#define cJSON_IsReference 256
|
||||
#define cJSON_IsReference 256
|
||||
#define cJSON_StringIsConst 512
|
||||
|
||||
/* The cJSON structure: */
|
||||
typedef struct cJSON
|
||||
{
|
||||
/* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
|
||||
struct cJSON *next;
|
||||
struct cJSON *prev;
|
||||
/* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
|
||||
struct cJSON *child;
|
||||
typedef struct cJSON {
|
||||
/* next/prev allow you to walk array/object chains. Alternatively, use
|
||||
* GetArraySize/GetArrayItem/GetObjectItem */
|
||||
struct cJSON *next;
|
||||
struct cJSON *prev;
|
||||
/* An array or object item will have a child pointer pointing to a chain
|
||||
* of the items in the array/object. */
|
||||
struct cJSON *child;
|
||||
|
||||
/* The type of the item, as above. */
|
||||
int type;
|
||||
/* The type of the item, as above. */
|
||||
int type;
|
||||
|
||||
/* The item's string, if type==cJSON_String and type == cJSON_Raw */
|
||||
char *valuestring;
|
||||
/* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */
|
||||
int valueint;
|
||||
/* The item's number, if type==cJSON_Number */
|
||||
double valuedouble;
|
||||
/* The item's string, if type==cJSON_String and type == cJSON_Raw */
|
||||
char *valuestring;
|
||||
/* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead
|
||||
*/
|
||||
int valueint;
|
||||
/* The item's number, if type==cJSON_Number */
|
||||
double valuedouble;
|
||||
|
||||
/* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
|
||||
char *string;
|
||||
/* The item's name string, if this item is the child of, or is in the
|
||||
* list of subitems of an object. */
|
||||
char *string;
|
||||
} cJSON;
|
||||
|
||||
typedef struct cJSON_Hooks
|
||||
{
|
||||
/* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */
|
||||
void *(CJSON_CDECL *malloc_fn)(size_t sz);
|
||||
void (CJSON_CDECL *free_fn)(void *ptr);
|
||||
typedef struct cJSON_Hooks {
|
||||
/* malloc/free are CDECL on Windows regardless of the default calling
|
||||
* convention of the compiler, so ensure the hooks allow passing those
|
||||
* functions directly. */
|
||||
void *(CJSON_CDECL *malloc_fn)(size_t sz);
|
||||
void(CJSON_CDECL *free_fn)(void *ptr);
|
||||
} cJSON_Hooks;
|
||||
|
||||
typedef int cJSON_bool;
|
||||
|
||||
/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them.
|
||||
* This is to prevent stack overflows. */
|
||||
/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse
|
||||
* them. This is to prevent stack overflows. */
|
||||
#ifndef CJSON_NESTING_LIMIT
|
||||
#define CJSON_NESTING_LIMIT 1000
|
||||
#endif
|
||||
|
||||
/* returns the version of cJSON as a string */
|
||||
CJSON_PUBLIC(const char*) cJSON_Version(void);
|
||||
CJSON_PUBLIC(const char *) cJSON_Version(void);
|
||||
|
||||
/* Supply malloc, realloc and free functions to cJSON */
|
||||
CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks);
|
||||
CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks);
|
||||
|
||||
/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */
|
||||
/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */
|
||||
/* Memory Management: the caller is always responsible to free the results from
|
||||
* all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib
|
||||
* free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is
|
||||
* cJSON_PrintPreallocated, where the caller has full responsibility of the
|
||||
* buffer. */
|
||||
/* Supply a block of JSON, and this returns a cJSON object you can interrogate.
|
||||
*/
|
||||
CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length);
|
||||
/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */
|
||||
/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithLength(const char *value, size_t buffer_length);
|
||||
/* ParseWithOpts allows you to require (and check) that the JSON is null
|
||||
* terminated, and to retrieve the pointer to the final byte parsed. */
|
||||
/* If you supply a ptr in return_parse_end and parsing fails, then
|
||||
* return_parse_end will contain a pointer to the error so will match
|
||||
* cJSON_GetErrorPtr(). */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithOpts(const char *value,
|
||||
const char **return_parse_end,
|
||||
cJSON_bool require_null_terminated);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_ParseWithLengthOpts(const char *value,
|
||||
size_t buffer_length,
|
||||
const char **return_parse_end,
|
||||
cJSON_bool require_null_terminated);
|
||||
|
||||
/* Render a cJSON entity to text for transfer/storage. */
|
||||
CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
|
||||
/* Render a cJSON entity to text for transfer/storage without any formatting. */
|
||||
CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
|
||||
/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */
|
||||
CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
|
||||
/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */
|
||||
/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format);
|
||||
/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess
|
||||
* at the final size. guessing well reduces reallocation. fmt=0 gives
|
||||
* unformatted, =1 gives formatted */
|
||||
CJSON_PUBLIC(char *)
|
||||
cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
|
||||
/* Render a cJSON entity to text using a buffer already allocated in memory with
|
||||
* given length. Returns 1 on success and 0 on failure. */
|
||||
/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will
|
||||
* use, so to be safe allocate 5 bytes more than you actually need */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_PrintPreallocated(cJSON *item,
|
||||
char *buffer,
|
||||
const int length,
|
||||
const cJSON_bool format);
|
||||
/* Delete a cJSON entity and all subentities. */
|
||||
CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
|
||||
|
||||
/* Returns the number of items in an array (or object). */
|
||||
CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
|
||||
/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */
|
||||
/* Retrieve item number "index" from array "array". Returns NULL if
|
||||
* unsuccessful. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
|
||||
/* Get item "string" from object. Case insensitive. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string);
|
||||
/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_GetObjectItem(const cJSON *const object, const char *const string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
|
||||
const char *const string);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_HasObjectItem(const cJSON *object, const char *string);
|
||||
/* For analysing failed parses. This returns a pointer to the parse error.
|
||||
* You'll probably need to look a few chars back to make sense of it. Defined
|
||||
* when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
|
||||
CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
|
||||
|
||||
/* Check item type and return its value */
|
||||
CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item);
|
||||
CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item);
|
||||
CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item);
|
||||
CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item);
|
||||
|
||||
/* These functions check the type of an item */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item);
|
||||
|
||||
/* These calls create a cJSON item of the appropriate type. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
|
||||
|
@ -212,77 +256,138 @@ CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
|
|||
CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
|
||||
|
||||
/* These utilities create an Array of count items.
|
||||
* The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/
|
||||
* The parameter count cannot be greater than the number of elements in the
|
||||
* number array, otherwise array access will be out of bounds.*/
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_CreateStringArray(const char *const *strings, int count);
|
||||
|
||||
/* Append item to the specified array/object. */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
|
||||
/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object.
|
||||
* WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before
|
||||
* writing to `item->string` */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
|
||||
/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
|
||||
/* Use this when string is definitely const (i.e. a literal, or as good as), and
|
||||
* will definitely survive the cJSON object. WARNING: When this function was
|
||||
* used, make sure to always check that (item->type & cJSON_StringIsConst) is
|
||||
* zero before writing to `item->string` */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
|
||||
/* Append reference to item to the specified array/object. Use this when you
|
||||
* want to add an existing cJSON to a new cJSON, but don't want to corrupt your
|
||||
* existing cJSON. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
|
||||
|
||||
/* Remove/Detach items from Arrays/Objects. */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
|
||||
CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void)
|
||||
cJSON_DeleteItemFromObject(cJSON *object, const char *string);
|
||||
CJSON_PUBLIC(void)
|
||||
cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
|
||||
|
||||
/* Update array items. */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_InsertItemInArray(
|
||||
cJSON *array,
|
||||
int which,
|
||||
cJSON *newitem); /* Shifts pre-existing items to the right. */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemViaPointer(cJSON *const parent,
|
||||
cJSON *const item,
|
||||
cJSON *replacement);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem);
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
|
||||
const char *string,
|
||||
cJSON *newitem);
|
||||
|
||||
/* Duplicate a cJSON item */
|
||||
CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
|
||||
/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will
|
||||
* need to be released. With recurse!=0, it will duplicate any children connected to the item.
|
||||
* The item->next and ->prev pointers are always zero on return from Duplicate. */
|
||||
/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal.
|
||||
* case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */
|
||||
CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive);
|
||||
/* Duplicate will create a new, identical cJSON item to the one you pass, in new
|
||||
* memory that will need to be released. With recurse!=0, it will duplicate any
|
||||
* children connected to the item.
|
||||
* The item->next and ->prev pointers are always zero on return from Duplicate.
|
||||
*/
|
||||
/* Recursively compare two cJSON items for equality. If either a or b is NULL or
|
||||
* invalid, they will be considered unequal.
|
||||
* case_sensitive determines if object keys are treated case sensitive (1) or
|
||||
* case insensitive (0) */
|
||||
CJSON_PUBLIC(cJSON_bool)
|
||||
cJSON_Compare(const cJSON *const a,
|
||||
const cJSON *const b,
|
||||
const cJSON_bool case_sensitive);
|
||||
|
||||
/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings.
|
||||
* The input pointer json cannot point to a read-only address area, such as a string constant,
|
||||
/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from
|
||||
* strings. The input pointer json cannot point to a read-only address area,
|
||||
* such as a string constant,
|
||||
* but should point to a readable and writable adress area. */
|
||||
CJSON_PUBLIC(void) cJSON_Minify(char *json);
|
||||
|
||||
/* Helper functions for creating and adding items to an object at the same time.
|
||||
* They return the added item or NULL on failure. */
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name);
|
||||
CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddNullToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddTrueToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddFalseToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddBoolToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const cJSON_bool boolean);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddNumberToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const double number);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddStringToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const char *const string);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddRawToObject(cJSON *const object,
|
||||
const char *const name,
|
||||
const char *const raw);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddObjectToObject(cJSON *const object, const char *const name);
|
||||
CJSON_PUBLIC(cJSON *)
|
||||
cJSON_AddArrayToObject(cJSON *const object, const char *const name);
|
||||
|
||||
/* When assigning an integer value, it needs to be propagated to valuedouble too. */
|
||||
#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number))
|
||||
/* When assigning an integer value, it needs to be propagated to valuedouble
|
||||
* too. */
|
||||
#define cJSON_SetIntValue(object, number) \
|
||||
((object) ? (object)->valueint = (object)->valuedouble = (number) \
|
||||
: (number))
|
||||
/* helper for the cJSON_SetNumberValue macro */
|
||||
CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
|
||||
#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number))
|
||||
/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */
|
||||
CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring);
|
||||
#define cJSON_SetNumberValue(object, number) \
|
||||
((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \
|
||||
: (number))
|
||||
/* Change the valuestring of a cJSON_String object, only takes effect when type
|
||||
* of object is cJSON_String */
|
||||
CJSON_PUBLIC(char *)
|
||||
cJSON_SetValuestring(cJSON *object, const char *valuestring);
|
||||
|
||||
/* Macro for iterating over an array or object */
|
||||
#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next)
|
||||
#define cJSON_ArrayForEach(element, array) \
|
||||
for (element = (array != NULL) ? (array)->child : NULL; \
|
||||
element != NULL; element = element->next)
|
||||
|
||||
/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */
|
||||
/* malloc/free objects using the malloc/free functions that have been set with
|
||||
* cJSON_InitHooks */
|
||||
CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
|
||||
CJSON_PUBLIC(void) cJSON_free(void *object);
|
||||
|
||||
|
|
256
src/rd.h
256
src/rd.h
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -32,7 +32,7 @@
|
|||
|
||||
#ifndef _WIN32
|
||||
#ifndef _GNU_SOURCE
|
||||
#define _GNU_SOURCE /* for strndup() */
|
||||
#define _GNU_SOURCE /* for strndup() */
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__) && !defined(_DARWIN_C_SOURCE)
|
||||
|
@ -41,7 +41,7 @@
|
|||
|
||||
#define __need_IOV_MAX
|
||||
#ifndef _POSIX_C_SOURCE
|
||||
#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */
|
||||
#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -93,7 +93,9 @@
|
|||
#if ENABLE_DEVEL == 1
|
||||
#define rd_dassert(cond) rd_assert(cond)
|
||||
#else
|
||||
#define rd_dassert(cond) do {} while (0)
|
||||
#define rd_dassert(cond) \
|
||||
do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -101,65 +103,67 @@
|
|||
#define RD_NOTREACHED() rd_assert(!*"/* NOTREACHED */ violated")
|
||||
|
||||
/** Assert if reached */
|
||||
#define RD_BUG(...) do { \
|
||||
fprintf(stderr, "INTERNAL ERROR: librdkafka %s:%d: ", \
|
||||
__FUNCTION__, __LINE__); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, "\n"); \
|
||||
rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \
|
||||
#define RD_BUG(...) \
|
||||
do { \
|
||||
fprintf(stderr, \
|
||||
"INTERNAL ERROR: librdkafka %s:%d: ", __FUNCTION__, \
|
||||
__LINE__); \
|
||||
fprintf(stderr, __VA_ARGS__); \
|
||||
fprintf(stderr, "\n"); \
|
||||
rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Allocator wrappers.
|
||||
* We serve under the premise that if a (small) memory
|
||||
* allocation fails all hope is lost and the application
|
||||
* will fail anyway, so no need to handle it handsomely.
|
||||
*/
|
||||
* Allocator wrappers.
|
||||
* We serve under the premise that if a (small) memory
|
||||
* allocation fails all hope is lost and the application
|
||||
* will fail anyway, so no need to handle it handsomely.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) {
|
||||
void *p = calloc(num, sz);
|
||||
rd_assert(p);
|
||||
return p;
|
||||
void *p = calloc(num, sz);
|
||||
rd_assert(p);
|
||||
return p;
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) {
|
||||
void *p = malloc(sz);
|
||||
rd_assert(p);
|
||||
return p;
|
||||
void *p = malloc(sz);
|
||||
rd_assert(p);
|
||||
return p;
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) {
|
||||
void *p = realloc(ptr, sz);
|
||||
rd_assert(p);
|
||||
return p;
|
||||
void *p = realloc(ptr, sz);
|
||||
rd_assert(p);
|
||||
return p;
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_free(void *ptr) {
|
||||
free(ptr);
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) {
|
||||
#ifndef _WIN32
|
||||
char *n = strdup(s);
|
||||
char *n = strdup(s);
|
||||
#else
|
||||
char *n = _strdup(s);
|
||||
char *n = _strdup(s);
|
||||
#endif
|
||||
rd_assert(n);
|
||||
return n;
|
||||
rd_assert(n);
|
||||
return n;
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
|
||||
#if HAVE_STRNDUP
|
||||
char *n = strndup(s, len);
|
||||
rd_assert(n);
|
||||
char *n = strndup(s, len);
|
||||
rd_assert(n);
|
||||
#else
|
||||
char *n = (char *)rd_malloc(len + 1);
|
||||
rd_assert(n);
|
||||
memcpy(n, s, len);
|
||||
n[len] = '\0';
|
||||
char *n = (char *)rd_malloc(len + 1);
|
||||
rd_assert(n);
|
||||
memcpy(n, s, len);
|
||||
n[len] = '\0';
|
||||
#endif
|
||||
return n;
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
|
@ -169,25 +173,27 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
|
|||
*/
|
||||
|
||||
#ifdef strndupa
|
||||
#define rd_strndupa(DESTPTR,PTR,LEN) (*(DESTPTR) = strndupa(PTR,LEN))
|
||||
#define rd_strndupa(DESTPTR, PTR, LEN) (*(DESTPTR) = strndupa(PTR, LEN))
|
||||
#else
|
||||
#define rd_strndupa(DESTPTR,PTR,LEN) do { \
|
||||
const char *_src = (PTR); \
|
||||
size_t _srclen = (LEN); \
|
||||
char *_dst = rd_alloca(_srclen + 1); \
|
||||
memcpy(_dst, _src, _srclen); \
|
||||
_dst[_srclen] = '\0'; \
|
||||
*(DESTPTR) = _dst; \
|
||||
#define rd_strndupa(DESTPTR, PTR, LEN) \
|
||||
do { \
|
||||
const char *_src = (PTR); \
|
||||
size_t _srclen = (LEN); \
|
||||
char *_dst = rd_alloca(_srclen + 1); \
|
||||
memcpy(_dst, _src, _srclen); \
|
||||
_dst[_srclen] = '\0'; \
|
||||
*(DESTPTR) = _dst; \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifdef strdupa
|
||||
#define rd_strdupa(DESTPTR,PTR) (*(DESTPTR) = strdupa(PTR))
|
||||
#define rd_strdupa(DESTPTR, PTR) (*(DESTPTR) = strdupa(PTR))
|
||||
#else
|
||||
#define rd_strdupa(DESTPTR,PTR) do { \
|
||||
const char *_src1 = (PTR); \
|
||||
size_t _srclen1 = strlen(_src1); \
|
||||
rd_strndupa(DESTPTR, _src1, _srclen1); \
|
||||
#define rd_strdupa(DESTPTR, PTR) \
|
||||
do { \
|
||||
const char *_src1 = (PTR); \
|
||||
size_t _srclen1 = strlen(_src1); \
|
||||
rd_strndupa(DESTPTR, _src1, _srclen1); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
@ -205,35 +211,35 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
|
|||
|
||||
|
||||
/* Round/align X upwards to STRIDE, which must be power of 2. */
|
||||
#define RD_ROUNDUP(X,STRIDE) (((X) + ((STRIDE) - 1)) & ~(STRIDE-1))
|
||||
#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE)-1)) & ~(STRIDE - 1))
|
||||
|
||||
#define RD_ARRAY_SIZE(A) (sizeof((A)) / sizeof(*(A)))
|
||||
#define RD_ARRAYSIZE(A) RD_ARRAY_SIZE(A)
|
||||
#define RD_SIZEOF(TYPE,MEMBER) sizeof(((TYPE *)NULL)->MEMBER)
|
||||
#define RD_OFFSETOF(TYPE,MEMBER) ((size_t) &(((TYPE *)NULL)->MEMBER))
|
||||
#define RD_SIZEOF(TYPE, MEMBER) sizeof(((TYPE *)NULL)->MEMBER)
|
||||
#define RD_OFFSETOF(TYPE, MEMBER) ((size_t) & (((TYPE *)NULL)->MEMBER))
|
||||
|
||||
/**
|
||||
* Returns the 'I'th array element from static sized array 'A'
|
||||
* or NULL if 'I' is out of range.
|
||||
* var-args is an optional prefix to provide the correct return type.
|
||||
*/
|
||||
#define RD_ARRAY_ELEM(A,I,...) \
|
||||
((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__ (A)[(I)] : NULL)
|
||||
#define RD_ARRAY_ELEM(A, I, ...) \
|
||||
((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__(A)[(I)] : NULL)
|
||||
|
||||
|
||||
#define RD_STRINGIFY(X) # X
|
||||
#define RD_STRINGIFY(X) #X
|
||||
|
||||
|
||||
|
||||
#define RD_MIN(a,b) ((a) < (b) ? (a) : (b))
|
||||
#define RD_MAX(a,b) ((a) > (b) ? (a) : (b))
|
||||
#define RD_MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define RD_MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||
|
||||
|
||||
/**
|
||||
* Cap an integer (of any type) to reside within the defined limit.
|
||||
*/
|
||||
#define RD_INT_CAP(val,low,hi) \
|
||||
((val) < (low) ? low : ((val) > (hi) ? (hi) : (val)))
|
||||
#define RD_INT_CAP(val, low, hi) \
|
||||
((val) < (low) ? low : ((val) > (hi) ? (hi) : (val)))
|
||||
|
||||
|
||||
|
||||
|
@ -241,11 +247,11 @@ static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
|
|||
* Allocate 'size' bytes, copy 'src', return pointer to new memory.
|
||||
*
|
||||
* Use rd_free() to free the returned pointer.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void *rd_memdup (const void *src, size_t size) {
|
||||
void *dst = rd_malloc(size);
|
||||
memcpy(dst, src, size);
|
||||
return dst;
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void *rd_memdup(const void *src, size_t size) {
|
||||
void *dst = rd_malloc(size);
|
||||
memcpy(dst, src, size);
|
||||
return dst;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -272,7 +278,7 @@ typedef rd_atomic32_t rd_refcnt_t;
|
|||
#endif
|
||||
|
||||
#ifdef RD_REFCNT_USE_LOCKS
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) {
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_init(rd_refcnt_t *R, int v) {
|
||||
int r;
|
||||
mtx_init(&R->lock, mtx_plain);
|
||||
mtx_lock(&R->lock);
|
||||
|
@ -281,11 +287,11 @@ static RD_INLINE RD_UNUSED int rd_refcnt_init (rd_refcnt_t *R, int v) {
|
|||
return r;
|
||||
}
|
||||
#else
|
||||
#define rd_refcnt_init(R,v) rd_atomic32_init(R, v)
|
||||
#define rd_refcnt_init(R, v) rd_atomic32_init(R, v)
|
||||
#endif
|
||||
|
||||
#ifdef RD_REFCNT_USE_LOCKS
|
||||
static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) {
|
||||
static RD_INLINE RD_UNUSED void rd_refcnt_destroy(rd_refcnt_t *R) {
|
||||
mtx_lock(&R->lock);
|
||||
rd_assert(R->v == 0);
|
||||
mtx_unlock(&R->lock);
|
||||
|
@ -293,12 +299,14 @@ static RD_INLINE RD_UNUSED void rd_refcnt_destroy (rd_refcnt_t *R) {
|
|||
mtx_destroy(&R->lock);
|
||||
}
|
||||
#else
|
||||
#define rd_refcnt_destroy(R) do { } while (0)
|
||||
#define rd_refcnt_destroy(R) \
|
||||
do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef RD_REFCNT_USE_LOCKS
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) {
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_set(rd_refcnt_t *R, int v) {
|
||||
int r;
|
||||
mtx_lock(&R->lock);
|
||||
r = R->v = v;
|
||||
|
@ -306,12 +314,12 @@ static RD_INLINE RD_UNUSED int rd_refcnt_set (rd_refcnt_t *R, int v) {
|
|||
return r;
|
||||
}
|
||||
#else
|
||||
#define rd_refcnt_set(R,v) rd_atomic32_set(R, v)
|
||||
#define rd_refcnt_set(R, v) rd_atomic32_set(R, v)
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef RD_REFCNT_USE_LOCKS
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) {
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_add0(rd_refcnt_t *R) {
|
||||
int r;
|
||||
mtx_lock(&R->lock);
|
||||
r = ++(R->v);
|
||||
|
@ -319,10 +327,10 @@ static RD_INLINE RD_UNUSED int rd_refcnt_add0 (rd_refcnt_t *R) {
|
|||
return r;
|
||||
}
|
||||
#else
|
||||
#define rd_refcnt_add0(R) rd_atomic32_add(R, 1)
|
||||
#define rd_refcnt_add0(R) rd_atomic32_add(R, 1)
|
||||
#endif
|
||||
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) {
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_sub0(rd_refcnt_t *R) {
|
||||
int r;
|
||||
#ifdef RD_REFCNT_USE_LOCKS
|
||||
mtx_lock(&R->lock);
|
||||
|
@ -337,7 +345,7 @@ static RD_INLINE RD_UNUSED int rd_refcnt_sub0 (rd_refcnt_t *R) {
|
|||
}
|
||||
|
||||
#ifdef RD_REFCNT_USE_LOCKS
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) {
|
||||
static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) {
|
||||
int r;
|
||||
mtx_lock(&R->lock);
|
||||
r = R->v;
|
||||
|
@ -345,67 +353,67 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) {
|
|||
return r;
|
||||
}
|
||||
#else
|
||||
#define rd_refcnt_get(R) rd_atomic32_get(R)
|
||||
#define rd_refcnt_get(R) rd_atomic32_get(R)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* A wrapper for decreasing refcount and calling a destroy function
|
||||
* when refcnt reaches 0.
|
||||
*/
|
||||
#define rd_refcnt_destroywrapper(REFCNT,DESTROY_CALL) do { \
|
||||
if (rd_refcnt_sub(REFCNT) > 0) \
|
||||
break; \
|
||||
DESTROY_CALL; \
|
||||
#define rd_refcnt_destroywrapper(REFCNT, DESTROY_CALL) \
|
||||
do { \
|
||||
if (rd_refcnt_sub(REFCNT) > 0) \
|
||||
break; \
|
||||
DESTROY_CALL; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#define rd_refcnt_destroywrapper2(REFCNT,WHAT,DESTROY_CALL) do { \
|
||||
if (rd_refcnt_sub2(REFCNT,WHAT) > 0) \
|
||||
break; \
|
||||
DESTROY_CALL; \
|
||||
#define rd_refcnt_destroywrapper2(REFCNT, WHAT, DESTROY_CALL) \
|
||||
do { \
|
||||
if (rd_refcnt_sub2(REFCNT, WHAT) > 0) \
|
||||
break; \
|
||||
DESTROY_CALL; \
|
||||
} while (0)
|
||||
|
||||
#if ENABLE_REFCNT_DEBUG
|
||||
#define rd_refcnt_add_fl(FUNC,LINE,R) \
|
||||
( \
|
||||
fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", \
|
||||
#R, rd_refcnt_get(R), (R), (FUNC), (LINE)), \
|
||||
rd_refcnt_add0(R) \
|
||||
)
|
||||
#define rd_refcnt_add_fl(FUNC, LINE, R) \
|
||||
(fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", #R, \
|
||||
rd_refcnt_get(R), (R), (FUNC), (LINE)), \
|
||||
rd_refcnt_add0(R))
|
||||
|
||||
#define rd_refcnt_add(R) rd_refcnt_add_fl(__FUNCTION__, __LINE__, (R))
|
||||
|
||||
#define rd_refcnt_add2(R,WHAT) do { \
|
||||
fprintf(stderr, \
|
||||
"REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", \
|
||||
#R, rd_refcnt_get(R), (R), WHAT, \
|
||||
__FUNCTION__,__LINE__), \
|
||||
rd_refcnt_add0(R); \
|
||||
#define rd_refcnt_add2(R, WHAT) \
|
||||
do { \
|
||||
fprintf(stderr, \
|
||||
"REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", #R, \
|
||||
rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \
|
||||
rd_refcnt_add0(R); \
|
||||
} while (0)
|
||||
|
||||
#define rd_refcnt_sub2(R,WHAT) ( \
|
||||
fprintf(stderr, \
|
||||
"REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", \
|
||||
#R, rd_refcnt_get(R), (R), WHAT, \
|
||||
__FUNCTION__,__LINE__), \
|
||||
rd_refcnt_sub0(R) )
|
||||
#define rd_refcnt_sub2(R, WHAT) \
|
||||
(fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", #R, \
|
||||
rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \
|
||||
rd_refcnt_sub0(R))
|
||||
|
||||
#define rd_refcnt_sub(R) ( \
|
||||
fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", \
|
||||
#R, rd_refcnt_get(R), (R), __FUNCTION__,__LINE__), \
|
||||
rd_refcnt_sub0(R) )
|
||||
#define rd_refcnt_sub(R) \
|
||||
(fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", #R, \
|
||||
rd_refcnt_get(R), (R), __FUNCTION__, __LINE__), \
|
||||
rd_refcnt_sub0(R))
|
||||
|
||||
#else
|
||||
#define rd_refcnt_add_fl(FUNC,LINE,R) rd_refcnt_add0(R)
|
||||
#define rd_refcnt_add(R) rd_refcnt_add0(R)
|
||||
#define rd_refcnt_sub(R) rd_refcnt_sub0(R)
|
||||
#define rd_refcnt_add_fl(FUNC, LINE, R) rd_refcnt_add0(R)
|
||||
#define rd_refcnt_add(R) rd_refcnt_add0(R)
|
||||
#define rd_refcnt_sub(R) rd_refcnt_sub0(R)
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#define RD_IF_FREE(PTR,FUNC) do { if ((PTR)) FUNC(PTR); } while (0)
|
||||
#define RD_IF_FREE(PTR, FUNC) \
|
||||
do { \
|
||||
if ((PTR)) \
|
||||
FUNC(PTR); \
|
||||
} while (0)
|
||||
|
||||
|
||||
/**
|
||||
|
@ -413,7 +421,7 @@ static RD_INLINE RD_UNUSED int rd_refcnt_get (rd_refcnt_t *R) {
|
|||
*/
|
||||
|
||||
typedef struct rd_chariov_s {
|
||||
char *ptr;
|
||||
char *ptr;
|
||||
size_t size;
|
||||
} rd_chariov_t;
|
||||
|
||||
|
|
311
src/rdaddr.c
311
src/rdaddr.c
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -36,46 +36,43 @@
|
|||
#include <ws2tcpip.h>
|
||||
#endif
|
||||
|
||||
const char *rd_sockaddr2str (const void *addr, int flags) {
|
||||
const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr;
|
||||
static RD_TLS char ret[32][256];
|
||||
static RD_TLS int reti = 0;
|
||||
char portstr[32];
|
||||
int of = 0;
|
||||
int niflags = NI_NUMERICSERV;
|
||||
const char *rd_sockaddr2str(const void *addr, int flags) {
|
||||
const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr;
|
||||
static RD_TLS char ret[32][256];
|
||||
static RD_TLS int reti = 0;
|
||||
char portstr[32];
|
||||
int of = 0;
|
||||
int niflags = NI_NUMERICSERV;
|
||||
int r;
|
||||
|
||||
reti = (reti + 1) % 32;
|
||||
|
||||
switch (a->sinx_family)
|
||||
{
|
||||
case AF_INET:
|
||||
case AF_INET6:
|
||||
if (flags & RD_SOCKADDR2STR_F_FAMILY)
|
||||
of += rd_snprintf(&ret[reti][of], sizeof(ret[reti])-of, "ipv%i#",
|
||||
a->sinx_family == AF_INET ? 4 : 6);
|
||||
reti = (reti + 1) % 32;
|
||||
|
||||
if ((flags & RD_SOCKADDR2STR_F_PORT) &&
|
||||
a->sinx_family == AF_INET6)
|
||||
ret[reti][of++] = '[';
|
||||
switch (a->sinx_family) {
|
||||
case AF_INET:
|
||||
case AF_INET6:
|
||||
if (flags & RD_SOCKADDR2STR_F_FAMILY)
|
||||
of += rd_snprintf(&ret[reti][of],
|
||||
sizeof(ret[reti]) - of, "ipv%i#",
|
||||
a->sinx_family == AF_INET ? 4 : 6);
|
||||
|
||||
if (!(flags & RD_SOCKADDR2STR_F_RESOLVE))
|
||||
niflags |= NI_NUMERICHOST;
|
||||
if ((flags & RD_SOCKADDR2STR_F_PORT) &&
|
||||
a->sinx_family == AF_INET6)
|
||||
ret[reti][of++] = '[';
|
||||
|
||||
if (!(flags & RD_SOCKADDR2STR_F_RESOLVE))
|
||||
niflags |= NI_NUMERICHOST;
|
||||
|
||||
retry:
|
||||
if ((r = getnameinfo(
|
||||
(const struct sockaddr *)a,
|
||||
RD_SOCKADDR_INX_LEN(a),
|
||||
(const struct sockaddr *)a, RD_SOCKADDR_INX_LEN(a),
|
||||
|
||||
ret[reti]+of, sizeof(ret[reti])-of,
|
||||
ret[reti] + of, sizeof(ret[reti]) - of,
|
||||
|
||||
(flags & RD_SOCKADDR2STR_F_PORT) ?
|
||||
portstr : NULL,
|
||||
(flags & RD_SOCKADDR2STR_F_PORT) ? portstr : NULL,
|
||||
|
||||
(flags & RD_SOCKADDR2STR_F_PORT) ?
|
||||
sizeof(portstr) : 0,
|
||||
(flags & RD_SOCKADDR2STR_F_PORT) ? sizeof(portstr) : 0,
|
||||
|
||||
niflags))) {
|
||||
niflags))) {
|
||||
|
||||
if (r == EAI_AGAIN && !(niflags & NI_NUMERICHOST)) {
|
||||
/* If unable to resolve name, retry without
|
||||
|
@ -86,154 +83,154 @@ const char *rd_sockaddr2str (const void *addr, int flags) {
|
|||
break;
|
||||
}
|
||||
|
||||
|
||||
if (flags & RD_SOCKADDR2STR_F_PORT) {
|
||||
size_t len = strlen(ret[reti]);
|
||||
rd_snprintf(ret[reti]+len, sizeof(ret[reti])-len,
|
||||
"%s:%s",
|
||||
a->sinx_family == AF_INET6 ? "]" : "",
|
||||
portstr);
|
||||
}
|
||||
|
||||
return ret[reti];
|
||||
}
|
||||
|
||||
|
||||
/* Error-case */
|
||||
rd_snprintf(ret[reti], sizeof(ret[reti]), "<unsupported:%s>",
|
||||
rd_family2str(a->sinx_family));
|
||||
|
||||
return ret[reti];
|
||||
if (flags & RD_SOCKADDR2STR_F_PORT) {
|
||||
size_t len = strlen(ret[reti]);
|
||||
rd_snprintf(
|
||||
ret[reti] + len, sizeof(ret[reti]) - len, "%s:%s",
|
||||
a->sinx_family == AF_INET6 ? "]" : "", portstr);
|
||||
}
|
||||
|
||||
return ret[reti];
|
||||
}
|
||||
|
||||
|
||||
/* Error-case */
|
||||
rd_snprintf(ret[reti], sizeof(ret[reti]), "<unsupported:%s>",
|
||||
rd_family2str(a->sinx_family));
|
||||
|
||||
return ret[reti];
|
||||
}
|
||||
|
||||
|
||||
const char *rd_addrinfo_prepare (const char *nodesvc,
|
||||
char **node, char **svc) {
|
||||
static RD_TLS char snode[256];
|
||||
static RD_TLS char ssvc[64];
|
||||
const char *t;
|
||||
const char *svct = NULL;
|
||||
size_t nodelen = 0;
|
||||
const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc) {
|
||||
static RD_TLS char snode[256];
|
||||
static RD_TLS char ssvc[64];
|
||||
const char *t;
|
||||
const char *svct = NULL;
|
||||
size_t nodelen = 0;
|
||||
|
||||
*snode = '\0';
|
||||
*ssvc = '\0';
|
||||
*snode = '\0';
|
||||
*ssvc = '\0';
|
||||
|
||||
if (*nodesvc == '[') {
|
||||
/* "[host]".. (enveloped node name) */
|
||||
if (!(t = strchr(nodesvc, ']')))
|
||||
return "Missing close-']'";
|
||||
nodesvc++;
|
||||
nodelen = t-nodesvc;
|
||||
svct = t+1;
|
||||
if (*nodesvc == '[') {
|
||||
/* "[host]".. (enveloped node name) */
|
||||
if (!(t = strchr(nodesvc, ']')))
|
||||
return "Missing close-']'";
|
||||
nodesvc++;
|
||||
nodelen = t - nodesvc;
|
||||
svct = t + 1;
|
||||
|
||||
} else if (*nodesvc == ':' && *(nodesvc+1) != ':') {
|
||||
/* ":".. (port only) */
|
||||
nodelen = 0;
|
||||
svct = nodesvc;
|
||||
}
|
||||
|
||||
if ((svct = strrchr(svct ? svct : nodesvc, ':')) && (*(svct-1) != ':') &&
|
||||
*(++svct)) {
|
||||
/* Optional ":service" definition. */
|
||||
if (strlen(svct) >= sizeof(ssvc))
|
||||
return "Service name too long";
|
||||
strcpy(ssvc, svct);
|
||||
if (!nodelen)
|
||||
nodelen = svct - nodesvc - 1;
|
||||
} else if (*nodesvc == ':' && *(nodesvc + 1) != ':') {
|
||||
/* ":".. (port only) */
|
||||
nodelen = 0;
|
||||
svct = nodesvc;
|
||||
}
|
||||
|
||||
} else if (!nodelen)
|
||||
nodelen = strlen(nodesvc);
|
||||
if ((svct = strrchr(svct ? svct : nodesvc, ':')) &&
|
||||
(*(svct - 1) != ':') && *(++svct)) {
|
||||
/* Optional ":service" definition. */
|
||||
if (strlen(svct) >= sizeof(ssvc))
|
||||
return "Service name too long";
|
||||
strcpy(ssvc, svct);
|
||||
if (!nodelen)
|
||||
nodelen = svct - nodesvc - 1;
|
||||
|
||||
if (nodelen) {
|
||||
/* Truncate nodename if necessary. */
|
||||
nodelen = RD_MIN(nodelen, sizeof(snode)-1);
|
||||
memcpy(snode, nodesvc, nodelen);
|
||||
snode[nodelen] = '\0';
|
||||
}
|
||||
} else if (!nodelen)
|
||||
nodelen = strlen(nodesvc);
|
||||
|
||||
*node = snode;
|
||||
*svc = ssvc;
|
||||
if (nodelen) {
|
||||
/* Truncate nodename if necessary. */
|
||||
nodelen = RD_MIN(nodelen, sizeof(snode) - 1);
|
||||
memcpy(snode, nodesvc, nodelen);
|
||||
snode[nodelen] = '\0';
|
||||
}
|
||||
|
||||
return NULL;
|
||||
*node = snode;
|
||||
*svc = ssvc;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
|
||||
rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
|
||||
int flags, int family,
|
||||
int socktype, int protocol,
|
||||
const char **errstr) {
|
||||
struct addrinfo hints;
|
||||
memset(&hints, 0, sizeof(hints));
|
||||
hints.ai_family = family;
|
||||
hints.ai_socktype = socktype;
|
||||
hints.ai_protocol = protocol;
|
||||
hints.ai_flags = flags;
|
||||
rd_sockaddr_list_t *rd_getaddrinfo(const char *nodesvc,
|
||||
const char *defsvc,
|
||||
int flags,
|
||||
int family,
|
||||
int socktype,
|
||||
int protocol,
|
||||
const char **errstr) {
|
||||
struct addrinfo hints;
|
||||
memset(&hints, 0, sizeof(hints));
|
||||
hints.ai_family = family;
|
||||
hints.ai_socktype = socktype;
|
||||
hints.ai_protocol = protocol;
|
||||
hints.ai_flags = flags;
|
||||
|
||||
struct addrinfo *ais, *ai;
|
||||
char *node, *svc;
|
||||
int r;
|
||||
int cnt = 0;
|
||||
rd_sockaddr_list_t *rsal;
|
||||
struct addrinfo *ais, *ai;
|
||||
char *node, *svc;
|
||||
int r;
|
||||
int cnt = 0;
|
||||
rd_sockaddr_list_t *rsal;
|
||||
|
||||
if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) {
|
||||
errno = EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) {
|
||||
errno = EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (*svc)
|
||||
defsvc = svc;
|
||||
|
||||
if ((r = getaddrinfo(node, defsvc, &hints, &ais))) {
|
||||
if (*svc)
|
||||
defsvc = svc;
|
||||
|
||||
if ((r = getaddrinfo(node, defsvc, &hints, &ais))) {
|
||||
#ifdef EAI_SYSTEM
|
||||
if (r == EAI_SYSTEM)
|
||||
if (r == EAI_SYSTEM)
|
||||
#else
|
||||
if (0)
|
||||
if (0)
|
||||
#endif
|
||||
*errstr = rd_strerror(errno);
|
||||
else {
|
||||
*errstr = rd_strerror(errno);
|
||||
else {
|
||||
#ifdef _WIN32
|
||||
*errstr = gai_strerrorA(r);
|
||||
*errstr = gai_strerrorA(r);
|
||||
#else
|
||||
*errstr = gai_strerror(r);
|
||||
*errstr = gai_strerror(r);
|
||||
#endif
|
||||
errno = EFAULT;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Count number of addresses */
|
||||
for (ai = ais ; ai != NULL ; ai = ai->ai_next)
|
||||
cnt++;
|
||||
errno = EFAULT;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (cnt == 0) {
|
||||
/* unlikely? */
|
||||
freeaddrinfo(ais);
|
||||
errno = ENOENT;
|
||||
*errstr = "No addresses";
|
||||
return NULL;
|
||||
}
|
||||
/* Count number of addresses */
|
||||
for (ai = ais; ai != NULL; ai = ai->ai_next)
|
||||
cnt++;
|
||||
|
||||
if (cnt == 0) {
|
||||
/* unlikely? */
|
||||
freeaddrinfo(ais);
|
||||
errno = ENOENT;
|
||||
*errstr = "No addresses";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt));
|
||||
rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt));
|
||||
|
||||
for (ai = ais ; ai != NULL ; ai = ai->ai_next)
|
||||
memcpy(&rsal->rsal_addr[rsal->rsal_cnt++],
|
||||
ai->ai_addr, ai->ai_addrlen);
|
||||
for (ai = ais; ai != NULL; ai = ai->ai_next)
|
||||
memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], ai->ai_addr,
|
||||
ai->ai_addrlen);
|
||||
|
||||
freeaddrinfo(ais);
|
||||
freeaddrinfo(ais);
|
||||
|
||||
/* Shuffle address list for proper round-robin */
|
||||
if (!(flags & RD_AI_NOSHUFFLE))
|
||||
rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt,
|
||||
sizeof(*rsal->rsal_addr));
|
||||
/* Shuffle address list for proper round-robin */
|
||||
if (!(flags & RD_AI_NOSHUFFLE))
|
||||
rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt,
|
||||
sizeof(*rsal->rsal_addr));
|
||||
|
||||
return rsal;
|
||||
return rsal;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal) {
|
||||
rd_free(rsal);
|
||||
void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal) {
|
||||
rd_free(rsal);
|
||||
}
|
||||
|
||||
|
|
141
src/rdaddr.h
141
src/rdaddr.h
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -48,25 +48,28 @@
|
|||
* It provides conveniant abstraction of AF_INET* agnostic operations.
|
||||
*/
|
||||
typedef union {
|
||||
struct sockaddr_in in;
|
||||
struct sockaddr_in6 in6;
|
||||
struct sockaddr_in in;
|
||||
struct sockaddr_in6 in6;
|
||||
} rd_sockaddr_inx_t;
|
||||
#define sinx_family in.sin_family
|
||||
#define sinx_addr in.sin_addr
|
||||
#define RD_SOCKADDR_INX_LEN(sinx) \
|
||||
((sinx)->sinx_family == AF_INET ? sizeof(struct sockaddr_in) : \
|
||||
(sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6): \
|
||||
sizeof(rd_sockaddr_inx_t))
|
||||
#define RD_SOCKADDR_INX_PORT(sinx) \
|
||||
((sinx)->sinx_family == AF_INET ? (sinx)->in.sin_port : \
|
||||
(sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
|
||||
#define RD_SOCKADDR_INX_LEN(sinx) \
|
||||
((sinx)->sinx_family == AF_INET \
|
||||
? sizeof(struct sockaddr_in) \
|
||||
: (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6) \
|
||||
: sizeof(rd_sockaddr_inx_t))
|
||||
#define RD_SOCKADDR_INX_PORT(sinx) \
|
||||
((sinx)->sinx_family == AF_INET \
|
||||
? (sinx)->in.sin_port \
|
||||
: (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
|
||||
|
||||
#define RD_SOCKADDR_INX_PORT_SET(sinx,port) do { \
|
||||
if ((sinx)->sinx_family == AF_INET) \
|
||||
(sinx)->in.sin_port = port; \
|
||||
else if ((sinx)->sinx_family == AF_INET6) \
|
||||
(sinx)->in6.sin6_port = port; \
|
||||
} while (0)
|
||||
#define RD_SOCKADDR_INX_PORT_SET(sinx, port) \
|
||||
do { \
|
||||
if ((sinx)->sinx_family == AF_INET) \
|
||||
(sinx)->in.sin_port = port; \
|
||||
else if ((sinx)->sinx_family == AF_INET6) \
|
||||
(sinx)->in6.sin6_port = port; \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
||||
|
@ -79,12 +82,14 @@ typedef union {
|
|||
* IPv6 address enveloping ("[addr]:port") will also be performed
|
||||
* if .._F_PORT is set.
|
||||
*/
|
||||
#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */
|
||||
#define RD_SOCKADDR2STR_F_RESOLVE 0x2 /* Try to resolve address to hostname. */
|
||||
#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */
|
||||
#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \
|
||||
(RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE)
|
||||
const char *rd_sockaddr2str (const void *addr, int flags);
|
||||
#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */
|
||||
#define RD_SOCKADDR2STR_F_RESOLVE \
|
||||
0x2 /* Try to resolve address to hostname. \
|
||||
*/
|
||||
#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */
|
||||
#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \
|
||||
(RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE)
|
||||
const char *rd_sockaddr2str(const void *addr, int flags);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -96,15 +101,14 @@ const char *rd_sockaddr2str (const void *addr, int flags);
|
|||
* Thread-safe but returned buffers in '*node' and '*svc' are only
|
||||
* usable until the next call to rd_addrinfo_prepare() in the same thread.
|
||||
*/
|
||||
const char *rd_addrinfo_prepare (const char *nodesvc,
|
||||
char **node, char **svc);
|
||||
const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc);
|
||||
|
||||
|
||||
|
||||
typedef struct rd_sockaddr_list_s {
|
||||
int rsal_cnt;
|
||||
int rsal_curr;
|
||||
rd_sockaddr_inx_t rsal_addr[];
|
||||
int rsal_cnt;
|
||||
int rsal_curr;
|
||||
rd_sockaddr_inx_t rsal_addr[];
|
||||
} rd_sockaddr_list_t;
|
||||
|
||||
|
||||
|
@ -121,22 +125,21 @@ typedef struct rd_sockaddr_list_s {
|
|||
* }
|
||||
* ...
|
||||
* }
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
static RD_INLINE rd_sockaddr_inx_t *
|
||||
rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) RD_UNUSED;
|
||||
rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) RD_UNUSED;
|
||||
static RD_INLINE rd_sockaddr_inx_t *
|
||||
rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) {
|
||||
rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt;
|
||||
return &rsal->rsal_addr[rsal->rsal_curr];
|
||||
rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) {
|
||||
rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt;
|
||||
return &rsal->rsal_addr[rsal->rsal_curr];
|
||||
}
|
||||
|
||||
|
||||
#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \
|
||||
for ((sinx) = &(rsal)->rsal_addr[0] ; \
|
||||
(sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len] ; \
|
||||
(sinx)++)
|
||||
#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \
|
||||
for ((sinx) = &(rsal)->rsal_addr[0]; \
|
||||
(sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len]; (sinx)++)
|
||||
|
||||
/**
|
||||
* Wrapper for getaddrinfo(3) that performs these additional tasks:
|
||||
|
@ -149,14 +152,18 @@ rd_sockaddr_list_next (rd_sockaddr_list_t *rsal) {
|
|||
*
|
||||
* Thread-safe.
|
||||
*/
|
||||
#define RD_AI_NOSHUFFLE 0x10000000 /* Dont shuffle returned address list.
|
||||
* FIXME: Guessing non-used bits like this
|
||||
* is a bad idea. */
|
||||
#define RD_AI_NOSHUFFLE \
|
||||
0x10000000 /* Dont shuffle returned address list. \
|
||||
* FIXME: Guessing non-used bits like this \
|
||||
* is a bad idea. */
|
||||
|
||||
rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
|
||||
int flags, int family,
|
||||
int socktype, int protocol,
|
||||
const char **errstr);
|
||||
rd_sockaddr_list_t *rd_getaddrinfo(const char *nodesvc,
|
||||
const char *defsvc,
|
||||
int flags,
|
||||
int family,
|
||||
int socktype,
|
||||
int protocol,
|
||||
const char **errstr);
|
||||
|
||||
|
||||
|
||||
|
@ -165,23 +172,23 @@ rd_sockaddr_list_t *rd_getaddrinfo (const char *nodesvc, const char *defsvc,
|
|||
*
|
||||
* Thread-safe.
|
||||
*/
|
||||
void rd_sockaddr_list_destroy (rd_sockaddr_list_t *rsal);
|
||||
void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns the human readable name of a socket family.
|
||||
*/
|
||||
static const char *rd_family2str (int af) RD_UNUSED;
|
||||
static const char *rd_family2str (int af) {
|
||||
switch(af){
|
||||
case AF_INET:
|
||||
return "inet";
|
||||
case AF_INET6:
|
||||
return "inet6";
|
||||
default:
|
||||
return "af?";
|
||||
};
|
||||
static const char *rd_family2str(int af) RD_UNUSED;
|
||||
static const char *rd_family2str(int af) {
|
||||
switch (af) {
|
||||
case AF_INET:
|
||||
return "inet";
|
||||
case AF_INET6:
|
||||
return "inet6";
|
||||
default:
|
||||
return "af?";
|
||||
};
|
||||
}
|
||||
|
||||
#endif /* _RDADDR_H_ */
|
||||
|
|
168
src/rdatomic.h
168
src/rdatomic.h
|
@ -31,59 +31,61 @@
|
|||
#include "tinycthread.h"
|
||||
|
||||
typedef struct {
|
||||
int32_t val;
|
||||
int32_t val;
|
||||
#if !defined(_WIN32) && !HAVE_ATOMICS_32
|
||||
mtx_t lock;
|
||||
mtx_t lock;
|
||||
#endif
|
||||
} rd_atomic32_t;
|
||||
|
||||
typedef struct {
|
||||
int64_t val;
|
||||
int64_t val;
|
||||
#if !defined(_WIN32) && !HAVE_ATOMICS_64
|
||||
mtx_t lock;
|
||||
mtx_t lock;
|
||||
#endif
|
||||
} rd_atomic64_t;
|
||||
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_atomic32_init (rd_atomic32_t *ra, int32_t v) {
|
||||
ra->val = v;
|
||||
static RD_INLINE RD_UNUSED void rd_atomic32_init(rd_atomic32_t *ra, int32_t v) {
|
||||
ra->val = v;
|
||||
#if !defined(_WIN32) && !HAVE_ATOMICS_32
|
||||
mtx_init(&ra->lock, mtx_plain);
|
||||
mtx_init(&ra->lock, mtx_plain);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_add (rd_atomic32_t *ra, int32_t v) {
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_add(rd_atomic32_t *ra,
|
||||
int32_t v) {
|
||||
#ifdef __SUNPRO_C
|
||||
return atomic_add_32_nv(&ra->val, v);
|
||||
return atomic_add_32_nv(&ra->val, v);
|
||||
#elif defined(_WIN32)
|
||||
return InterlockedAdd((LONG *)&ra->val, v);
|
||||
return InterlockedAdd((LONG *)&ra->val, v);
|
||||
#elif !HAVE_ATOMICS_32
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val += v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val += v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#else
|
||||
return ATOMIC_OP32(add, fetch, &ra->val, v);
|
||||
return ATOMIC_OP32(add, fetch, &ra->val, v);
|
||||
#endif
|
||||
}
|
||||
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v) {
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra,
|
||||
int32_t v) {
|
||||
#ifdef __SUNPRO_C
|
||||
return atomic_add_32_nv(&ra->val, -v);
|
||||
return atomic_add_32_nv(&ra->val, -v);
|
||||
#elif defined(_WIN32)
|
||||
return InterlockedAdd((LONG *)&ra->val, -v);
|
||||
return InterlockedAdd((LONG *)&ra->val, -v);
|
||||
#elif !HAVE_ATOMICS_32
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val -= v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val -= v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#else
|
||||
return ATOMIC_OP32(sub, fetch, &ra->val, v);
|
||||
return ATOMIC_OP32(sub, fetch, &ra->val, v);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -97,27 +99,28 @@ static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra, int32_t v)
|
|||
*/
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) {
|
||||
#if defined(_WIN32) || defined(__SUNPRO_C)
|
||||
return ra->val;
|
||||
return ra->val;
|
||||
#elif !HAVE_ATOMICS_32
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#else
|
||||
return ATOMIC_OP32(fetch, add, &ra->val, 0);
|
||||
return ATOMIC_OP32(fetch, add, &ra->val, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v) {
|
||||
static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra,
|
||||
int32_t v) {
|
||||
#ifdef _WIN32
|
||||
return InterlockedExchange((LONG *)&ra->val, v);
|
||||
return InterlockedExchange((LONG *)&ra->val, v);
|
||||
#elif !HAVE_ATOMICS_32
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
r = ra->val = v;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int32_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
r = ra->val = v;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#elif HAVE_ATOMICS_32_ATOMIC
|
||||
__atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST);
|
||||
return v;
|
||||
|
@ -125,50 +128,52 @@ static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra, int32_t v)
|
|||
(void)__sync_lock_test_and_set(&ra->val, v);
|
||||
return v;
|
||||
#else
|
||||
return ra->val = v; // FIXME
|
||||
return ra->val = v; // FIXME
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_atomic64_init (rd_atomic64_t *ra, int64_t v) {
|
||||
ra->val = v;
|
||||
static RD_INLINE RD_UNUSED void rd_atomic64_init(rd_atomic64_t *ra, int64_t v) {
|
||||
ra->val = v;
|
||||
#if !defined(_WIN32) && !HAVE_ATOMICS_64
|
||||
mtx_init(&ra->lock, mtx_plain);
|
||||
mtx_init(&ra->lock, mtx_plain);
|
||||
#endif
|
||||
}
|
||||
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_add (rd_atomic64_t *ra, int64_t v) {
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_add(rd_atomic64_t *ra,
|
||||
int64_t v) {
|
||||
#ifdef __SUNPRO_C
|
||||
return atomic_add_64_nv(&ra->val, v);
|
||||
return atomic_add_64_nv(&ra->val, v);
|
||||
#elif defined(_WIN32)
|
||||
return InterlockedAdd64(&ra->val, v);
|
||||
return InterlockedAdd64(&ra->val, v);
|
||||
#elif !HAVE_ATOMICS_64
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val += v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val += v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#else
|
||||
return ATOMIC_OP64(add, fetch, &ra->val, v);
|
||||
return ATOMIC_OP64(add, fetch, &ra->val, v);
|
||||
#endif
|
||||
}
|
||||
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v) {
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra,
|
||||
int64_t v) {
|
||||
#ifdef __SUNPRO_C
|
||||
return atomic_add_64_nv(&ra->val, -v);
|
||||
return atomic_add_64_nv(&ra->val, -v);
|
||||
#elif defined(_WIN32)
|
||||
return InterlockedAdd64(&ra->val, -v);
|
||||
return InterlockedAdd64(&ra->val, -v);
|
||||
#elif !HAVE_ATOMICS_64
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val -= v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val -= v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#else
|
||||
return ATOMIC_OP64(sub, fetch, &ra->val, v);
|
||||
return ATOMIC_OP64(sub, fetch, &ra->val, v);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -183,29 +188,30 @@ static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra, int64_t v)
|
|||
*/
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) {
|
||||
#if defined(_WIN32) || defined(__SUNPRO_C)
|
||||
return ra->val;
|
||||
return ra->val;
|
||||
#elif !HAVE_ATOMICS_64
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#else
|
||||
return ATOMIC_OP64(fetch, add, &ra->val, 0);
|
||||
return ATOMIC_OP64(fetch, add, &ra->val, 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v) {
|
||||
static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra,
|
||||
int64_t v) {
|
||||
#ifdef _WIN32
|
||||
return InterlockedExchange64(&ra->val, v);
|
||||
return InterlockedExchange64(&ra->val, v);
|
||||
#elif !HAVE_ATOMICS_64
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val = v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
int64_t r;
|
||||
mtx_lock(&ra->lock);
|
||||
ra->val = v;
|
||||
r = ra->val;
|
||||
mtx_unlock(&ra->lock);
|
||||
return r;
|
||||
#elif HAVE_ATOMICS_64_ATOMIC
|
||||
__atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST);
|
||||
return v;
|
||||
|
@ -213,7 +219,7 @@ static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra, int64_t v)
|
|||
(void)__sync_lock_test_and_set(&ra->val, v);
|
||||
return v;
|
||||
#else
|
||||
return ra->val = v; // FIXME
|
||||
return ra->val = v; // FIXME
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
75
src/rdavg.h
75
src/rdavg.h
|
@ -40,14 +40,13 @@ typedef struct rd_avg_s {
|
|||
int64_t minv;
|
||||
int64_t avg;
|
||||
int64_t sum;
|
||||
int cnt;
|
||||
int cnt;
|
||||
rd_ts_t start;
|
||||
} ra_v;
|
||||
mtx_t ra_lock;
|
||||
int ra_enabled;
|
||||
enum {
|
||||
RD_AVG_GAUGE,
|
||||
RD_AVG_COUNTER,
|
||||
int ra_enabled;
|
||||
enum { RD_AVG_GAUGE,
|
||||
RD_AVG_COUNTER,
|
||||
} ra_type;
|
||||
#if WITH_HDRHISTOGRAM
|
||||
rd_hdr_histogram_t *ra_hdr;
|
||||
|
@ -74,18 +73,18 @@ typedef struct rd_avg_s {
|
|||
/**
|
||||
* @brief Add value \p v to averager \p ra.
|
||||
*/
|
||||
static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) {
|
||||
static RD_UNUSED void rd_avg_add(rd_avg_t *ra, int64_t v) {
|
||||
mtx_lock(&ra->ra_lock);
|
||||
if (!ra->ra_enabled) {
|
||||
mtx_unlock(&ra->ra_lock);
|
||||
return;
|
||||
}
|
||||
if (v > ra->ra_v.maxv)
|
||||
ra->ra_v.maxv = v;
|
||||
if (ra->ra_v.minv == 0 || v < ra->ra_v.minv)
|
||||
ra->ra_v.minv = v;
|
||||
ra->ra_v.sum += v;
|
||||
ra->ra_v.cnt++;
|
||||
if (v > ra->ra_v.maxv)
|
||||
ra->ra_v.maxv = v;
|
||||
if (ra->ra_v.minv == 0 || v < ra->ra_v.minv)
|
||||
ra->ra_v.minv = v;
|
||||
ra->ra_v.sum += v;
|
||||
ra->ra_v.cnt++;
|
||||
#if WITH_HDRHISTOGRAM
|
||||
rd_hdr_histogram_record(ra->ra_hdr, v);
|
||||
#endif
|
||||
|
@ -96,7 +95,7 @@ static RD_UNUSED void rd_avg_add (rd_avg_t *ra, int64_t v) {
|
|||
/**
|
||||
* @brief Calculate the average
|
||||
*/
|
||||
static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) {
|
||||
static RD_UNUSED void rd_avg_calc(rd_avg_t *ra, rd_ts_t now) {
|
||||
if (ra->ra_type == RD_AVG_GAUGE) {
|
||||
if (ra->ra_v.cnt)
|
||||
ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt;
|
||||
|
@ -121,8 +120,7 @@ static RD_UNUSED void rd_avg_calc (rd_avg_t *ra, rd_ts_t now) {
|
|||
*
|
||||
* @remark ra will be not locked by this function.
|
||||
*/
|
||||
static RD_UNUSED int64_t
|
||||
rd_avg_quantile (const rd_avg_t *ra, double q) {
|
||||
static RD_UNUSED int64_t rd_avg_quantile(const rd_avg_t *ra, double q) {
|
||||
#if WITH_HDRHISTOGRAM
|
||||
return rd_hdr_histogram_quantile(ra->ra_hdr, q);
|
||||
#else
|
||||
|
@ -137,7 +135,7 @@ rd_avg_quantile (const rd_avg_t *ra, double q) {
|
|||
* Caller must free avg internal members by calling rd_avg_destroy()
|
||||
* on the \p dst.
|
||||
*/
|
||||
static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) {
|
||||
static RD_UNUSED void rd_avg_rollover(rd_avg_t *dst, rd_avg_t *src) {
|
||||
rd_ts_t now;
|
||||
|
||||
mtx_lock(&src->ra_lock);
|
||||
|
@ -150,26 +148,26 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) {
|
|||
|
||||
mtx_init(&dst->ra_lock, mtx_plain);
|
||||
dst->ra_type = src->ra_type;
|
||||
dst->ra_v = src->ra_v;
|
||||
dst->ra_v = src->ra_v;
|
||||
#if WITH_HDRHISTOGRAM
|
||||
dst->ra_hdr = NULL;
|
||||
|
||||
dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr);
|
||||
dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr);
|
||||
dst->ra_hist.oor = src->ra_hdr->outOfRangeCount;
|
||||
dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr);
|
||||
dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr);
|
||||
dst->ra_hist.oor = src->ra_hdr->outOfRangeCount;
|
||||
dst->ra_hist.hdrsize = src->ra_hdr->allocatedSize;
|
||||
dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0);
|
||||
dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0);
|
||||
dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0);
|
||||
dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0);
|
||||
dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0);
|
||||
dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99);
|
||||
dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0);
|
||||
dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0);
|
||||
dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0);
|
||||
dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0);
|
||||
dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0);
|
||||
dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99);
|
||||
#else
|
||||
memset(&dst->ra_hist, 0, sizeof(dst->ra_hist));
|
||||
#endif
|
||||
memset(&src->ra_v, 0, sizeof(src->ra_v));
|
||||
memset(&src->ra_v, 0, sizeof(src->ra_v));
|
||||
|
||||
now = rd_clock();
|
||||
now = rd_clock();
|
||||
src->ra_v.start = now;
|
||||
|
||||
#if WITH_HDRHISTOGRAM
|
||||
|
@ -181,23 +179,23 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) {
|
|||
int64_t mindiff, maxdiff;
|
||||
|
||||
mindiff = src->ra_hdr->lowestTrackableValue -
|
||||
src->ra_hdr->lowestOutOfRange;
|
||||
src->ra_hdr->lowestOutOfRange;
|
||||
|
||||
if (mindiff > 0) {
|
||||
/* There were low out of range values, grow lower
|
||||
* span to fit lowest out of range value + 20%. */
|
||||
vmin = src->ra_hdr->lowestOutOfRange +
|
||||
(int64_t)((double)mindiff * 0.2);
|
||||
(int64_t)((double)mindiff * 0.2);
|
||||
}
|
||||
|
||||
maxdiff = src->ra_hdr->highestOutOfRange -
|
||||
src->ra_hdr->highestTrackableValue;
|
||||
src->ra_hdr->highestTrackableValue;
|
||||
|
||||
if (maxdiff > 0) {
|
||||
/* There were high out of range values, grow higher
|
||||
* span to fit highest out of range value + 20%. */
|
||||
vmax = src->ra_hdr->highestOutOfRange +
|
||||
(int64_t)((double)maxdiff * 0.2);
|
||||
(int64_t)((double)maxdiff * 0.2);
|
||||
}
|
||||
|
||||
if (vmin == src->ra_hdr->lowestTrackableValue &&
|
||||
|
@ -226,15 +224,18 @@ static RD_UNUSED void rd_avg_rollover (rd_avg_t *dst, rd_avg_t *src) {
|
|||
/**
|
||||
* Initialize an averager
|
||||
*/
|
||||
static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type,
|
||||
int64_t exp_min, int64_t exp_max,
|
||||
int sigfigs, int enable) {
|
||||
static RD_UNUSED void rd_avg_init(rd_avg_t *ra,
|
||||
int type,
|
||||
int64_t exp_min,
|
||||
int64_t exp_max,
|
||||
int sigfigs,
|
||||
int enable) {
|
||||
memset(ra, 0, sizeof(*ra));
|
||||
mtx_init(&ra->ra_lock, 0);
|
||||
ra->ra_enabled = enable;
|
||||
if (!enable)
|
||||
return;
|
||||
ra->ra_type = type;
|
||||
ra->ra_type = type;
|
||||
ra->ra_v.start = rd_clock();
|
||||
#if WITH_HDRHISTOGRAM
|
||||
/* Start off the histogram with expected min,max span,
|
||||
|
@ -247,7 +248,7 @@ static RD_UNUSED void rd_avg_init (rd_avg_t *ra, int type,
|
|||
/**
|
||||
* Destroy averager
|
||||
*/
|
||||
static RD_UNUSED void rd_avg_destroy (rd_avg_t *ra) {
|
||||
static RD_UNUSED void rd_avg_destroy(rd_avg_t *ra) {
|
||||
#if WITH_HDRHISTOGRAM
|
||||
if (ra->ra_hdr)
|
||||
rd_hdr_histogram_destroy(ra->ra_hdr);
|
||||
|
|
82
src/rdavl.c
82
src/rdavl.c
|
@ -36,46 +36,43 @@
|
|||
|
||||
#define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0)
|
||||
|
||||
#define RD_AVL_NODE_DELTA(ran) \
|
||||
(RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \
|
||||
#define RD_AVL_NODE_DELTA(ran) \
|
||||
(RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \
|
||||
RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT]))
|
||||
|
||||
#define RD_DELTA_MAX 1
|
||||
|
||||
|
||||
static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran);
|
||||
static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran);
|
||||
|
||||
static rd_avl_node_t *rd_avl_rotate (rd_avl_node_t *ran, rd_avl_dir_t dir) {
|
||||
static rd_avl_node_t *rd_avl_rotate(rd_avl_node_t *ran, rd_avl_dir_t dir) {
|
||||
rd_avl_node_t *n;
|
||||
static const rd_avl_dir_t odirmap[] = { /* opposite direction map */
|
||||
[RD_AVL_RIGHT] = RD_AVL_LEFT,
|
||||
[RD_AVL_LEFT] = RD_AVL_RIGHT
|
||||
};
|
||||
const int odir = odirmap[dir];
|
||||
static const rd_avl_dir_t odirmap[] = {/* opposite direction map */
|
||||
[RD_AVL_RIGHT] = RD_AVL_LEFT,
|
||||
[RD_AVL_LEFT] = RD_AVL_RIGHT};
|
||||
const int odir = odirmap[dir];
|
||||
|
||||
n = ran->ran_p[odir];
|
||||
n = ran->ran_p[odir];
|
||||
ran->ran_p[odir] = n->ran_p[dir];
|
||||
n->ran_p[dir] = rd_avl_balance_node(ran);
|
||||
n->ran_p[dir] = rd_avl_balance_node(ran);
|
||||
|
||||
return rd_avl_balance_node(n);
|
||||
}
|
||||
|
||||
static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) {
|
||||
static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran) {
|
||||
const int d = RD_AVL_NODE_DELTA(ran);
|
||||
int h;
|
||||
|
||||
if (d < -RD_DELTA_MAX) {
|
||||
if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0)
|
||||
ran->ran_p[RD_AVL_RIGHT] =
|
||||
rd_avl_rotate(ran->ran_p[RD_AVL_RIGHT],
|
||||
RD_AVL_RIGHT);
|
||||
ran->ran_p[RD_AVL_RIGHT] = rd_avl_rotate(
|
||||
ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT);
|
||||
return rd_avl_rotate(ran, RD_AVL_LEFT);
|
||||
|
||||
} else if (d > RD_DELTA_MAX) {
|
||||
if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0)
|
||||
ran->ran_p[RD_AVL_LEFT] =
|
||||
rd_avl_rotate(ran->ran_p[RD_AVL_LEFT],
|
||||
RD_AVL_LEFT);
|
||||
rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], RD_AVL_LEFT);
|
||||
|
||||
return rd_avl_rotate(ran, RD_AVL_RIGHT);
|
||||
}
|
||||
|
@ -85,7 +82,8 @@ static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) {
|
|||
if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height)
|
||||
ran->ran_height = h;
|
||||
|
||||
if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >ran->ran_height)
|
||||
if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >
|
||||
ran->ran_height)
|
||||
ran->ran_height = h;
|
||||
|
||||
ran->ran_height++;
|
||||
|
@ -93,10 +91,10 @@ static rd_avl_node_t *rd_avl_balance_node (rd_avl_node_t *ran) {
|
|||
return ran;
|
||||
}
|
||||
|
||||
rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
|
||||
rd_avl_node_t *parent,
|
||||
rd_avl_node_t *ran,
|
||||
rd_avl_node_t **existing) {
|
||||
rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl,
|
||||
rd_avl_node_t *parent,
|
||||
rd_avl_node_t *ran,
|
||||
rd_avl_node_t **existing) {
|
||||
rd_avl_dir_t dir;
|
||||
int r;
|
||||
|
||||
|
@ -105,10 +103,10 @@ rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
|
|||
|
||||
if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) {
|
||||
/* Replace existing node with new one. */
|
||||
ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT];
|
||||
ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT];
|
||||
ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT];
|
||||
ran->ran_height = parent->ran_height;
|
||||
*existing = parent;
|
||||
ran->ran_height = parent->ran_height;
|
||||
*existing = parent;
|
||||
return ran;
|
||||
}
|
||||
|
||||
|
@ -117,14 +115,14 @@ rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
|
|||
else
|
||||
dir = RD_AVL_RIGHT;
|
||||
|
||||
parent->ran_p[dir] = rd_avl_insert_node(ravl, parent->ran_p[dir],
|
||||
ran, existing);
|
||||
parent->ran_p[dir] =
|
||||
rd_avl_insert_node(ravl, parent->ran_p[dir], ran, existing);
|
||||
return rd_avl_balance_node(parent);
|
||||
}
|
||||
|
||||
|
||||
static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src,
|
||||
rd_avl_dir_t dir) {
|
||||
static rd_avl_node_t *
|
||||
rd_avl_move(rd_avl_node_t *dst, rd_avl_node_t *src, rd_avl_dir_t dir) {
|
||||
|
||||
if (!dst)
|
||||
return src;
|
||||
|
@ -134,11 +132,10 @@ static rd_avl_node_t *rd_avl_move (rd_avl_node_t *dst, rd_avl_node_t *src,
|
|||
return rd_avl_balance_node(dst);
|
||||
}
|
||||
|
||||
static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) {
|
||||
static rd_avl_node_t *rd_avl_remove_node0(rd_avl_node_t *ran) {
|
||||
rd_avl_node_t *tmp;
|
||||
|
||||
tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT],
|
||||
ran->ran_p[RD_AVL_RIGHT],
|
||||
tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], ran->ran_p[RD_AVL_RIGHT],
|
||||
RD_AVL_RIGHT);
|
||||
|
||||
ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL;
|
||||
|
@ -146,8 +143,8 @@ static rd_avl_node_t *rd_avl_remove_node0 (rd_avl_node_t *ran) {
|
|||
}
|
||||
|
||||
|
||||
rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
|
||||
const void *elm) {
|
||||
rd_avl_node_t *
|
||||
rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm) {
|
||||
rd_avl_dir_t dir;
|
||||
int r;
|
||||
|
||||
|
@ -157,22 +154,21 @@ rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
|
|||
|
||||
if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0)
|
||||
return rd_avl_remove_node0(parent);
|
||||
else if (r < 0)
|
||||
else if (r < 0)
|
||||
dir = RD_AVL_LEFT;
|
||||
else /* > 0 */
|
||||
dir = RD_AVL_RIGHT;
|
||||
|
||||
parent->ran_p[dir] =
|
||||
rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm);
|
||||
parent->ran_p[dir] = rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm);
|
||||
|
||||
return rd_avl_balance_node(parent);
|
||||
}
|
||||
|
||||
|
||||
|
||||
rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
|
||||
const rd_avl_node_t *begin,
|
||||
const void *elm) {
|
||||
rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl,
|
||||
const rd_avl_node_t *begin,
|
||||
const void *elm) {
|
||||
int r;
|
||||
|
||||
if (!begin)
|
||||
|
@ -187,7 +183,7 @@ rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
|
|||
|
||||
|
||||
|
||||
void rd_avl_destroy (rd_avl_t *ravl) {
|
||||
void rd_avl_destroy(rd_avl_t *ravl) {
|
||||
if (ravl->ravl_flags & RD_AVL_F_LOCKS)
|
||||
rwlock_destroy(&ravl->ravl_rwlock);
|
||||
|
||||
|
@ -195,7 +191,7 @@ void rd_avl_destroy (rd_avl_t *ravl) {
|
|||
rd_free(ravl);
|
||||
}
|
||||
|
||||
rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
|
||||
rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
|
||||
|
||||
if (!ravl) {
|
||||
ravl = rd_calloc(1, sizeof(*ravl));
|
||||
|
@ -205,7 +201,7 @@ rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
|
|||
}
|
||||
|
||||
ravl->ravl_flags = flags;
|
||||
ravl->ravl_cmp = cmp;
|
||||
ravl->ravl_cmp = cmp;
|
||||
|
||||
if (flags & RD_AVL_F_LOCKS)
|
||||
rwlock_init(&ravl->ravl_rwlock);
|
||||
|
|
98
src/rdavl.h
98
src/rdavl.h
|
@ -49,13 +49,13 @@ typedef enum {
|
|||
* provide it as the 'field' argument in the API below.
|
||||
*/
|
||||
typedef struct rd_avl_node_s {
|
||||
struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */
|
||||
int ran_height; /* Sub-tree height */
|
||||
void *ran_elm; /* Backpointer to the containing
|
||||
* element. This could be considered
|
||||
* costly but is convenient for the
|
||||
* caller: RAM is cheap,
|
||||
* development time isn't*/
|
||||
struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */
|
||||
int ran_height; /* Sub-tree height */
|
||||
void *ran_elm; /* Backpointer to the containing
|
||||
* element. This could be considered
|
||||
* costly but is convenient for the
|
||||
* caller: RAM is cheap,
|
||||
* development time isn't*/
|
||||
} rd_avl_node_t;
|
||||
|
||||
|
||||
|
@ -63,24 +63,23 @@ typedef struct rd_avl_node_s {
|
|||
/**
|
||||
* Per-AVL application-provided element comparator.
|
||||
*/
|
||||
typedef int (*rd_avl_cmp_t) (const void *, const void *);
|
||||
typedef int (*rd_avl_cmp_t)(const void *, const void *);
|
||||
|
||||
|
||||
/**
|
||||
* AVL tree
|
||||
*/
|
||||
typedef struct rd_avl_s {
|
||||
rd_avl_node_t *ravl_root; /* Root node */
|
||||
rd_avl_cmp_t ravl_cmp; /* Comparator */
|
||||
int ravl_flags; /* Flags */
|
||||
#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */
|
||||
#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */
|
||||
rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */
|
||||
rd_avl_node_t *ravl_root; /* Root node */
|
||||
rd_avl_cmp_t ravl_cmp; /* Comparator */
|
||||
int ravl_flags; /* Flags */
|
||||
#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */
|
||||
#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */
|
||||
rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */
|
||||
} rd_avl_t;
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
|
@ -94,21 +93,18 @@ typedef struct rd_avl_s {
|
|||
* In case of collision the previous entry is overwritten by the
|
||||
* new one and the previous element is returned, else NULL.
|
||||
*/
|
||||
#define RD_AVL_INSERT(ravl,elm,field) \
|
||||
rd_avl_insert(ravl, elm, &(elm)->field)
|
||||
#define RD_AVL_INSERT(ravl, elm, field) rd_avl_insert(ravl, elm, &(elm)->field)
|
||||
|
||||
|
||||
/**
|
||||
* Remove element by matching value 'elm' using compare function.
|
||||
*/
|
||||
#define RD_AVL_REMOVE_ELM(ravl,elm) \
|
||||
rd_avl_remove_elm(ravl, elm)
|
||||
#define RD_AVL_REMOVE_ELM(ravl, elm) rd_avl_remove_elm(ravl, elm)
|
||||
|
||||
/**
|
||||
* Search for (by value using compare function) and return matching elm.
|
||||
*/
|
||||
#define RD_AVL_FIND(ravl,elm) \
|
||||
rd_avl_find(ravl, elm, 1)
|
||||
#define RD_AVL_FIND(ravl, elm) rd_avl_find(ravl, elm, 1)
|
||||
|
||||
|
||||
/**
|
||||
|
@ -118,7 +114,7 @@ typedef struct rd_avl_s {
|
|||
*
|
||||
* NOTE: rd_avl_wrlock() must be held.
|
||||
*/
|
||||
#define RD_AVL_FIND_NL(ravl,elm) \
|
||||
#define RD_AVL_FIND_NL(ravl, elm) \
|
||||
rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0)
|
||||
|
||||
|
||||
|
@ -127,32 +123,31 @@ typedef struct rd_avl_s {
|
|||
*
|
||||
* NOTE: rd_avl_wrlock() must be held.
|
||||
*/
|
||||
#define RD_AVL_FIND_NODE_NL(ravl,elm) \
|
||||
rd_avl_find(ravl, elm, 0)
|
||||
#define RD_AVL_FIND_NODE_NL(ravl, elm) rd_avl_find(ravl, elm, 0)
|
||||
|
||||
|
||||
/**
|
||||
* Changes the element pointer for an existing AVL node in the tree.
|
||||
* The new element must be identical (according to the comparator)
|
||||
* The new element must be identical (according to the comparator)
|
||||
* to the previous element.
|
||||
*
|
||||
* NOTE: rd_avl_wrlock() must be held.
|
||||
*/
|
||||
#define RD_AVL_ELM_SET_NL(ran,elm) ((ran)->ran_elm = (elm))
|
||||
#define RD_AVL_ELM_SET_NL(ran, elm) ((ran)->ran_elm = (elm))
|
||||
|
||||
/**
|
||||
* Returns the current element pointer for an existing AVL node in the tree
|
||||
*
|
||||
*
|
||||
* NOTE: rd_avl_*lock() must be held.
|
||||
*/
|
||||
#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm)
|
||||
#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm)
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Destroy previously initialized (by rd_avl_init()) AVL tree.
|
||||
*/
|
||||
void rd_avl_destroy (rd_avl_t *ravl);
|
||||
void rd_avl_destroy(rd_avl_t *ravl);
|
||||
|
||||
/**
|
||||
* Initialize (and optionally allocate if 'ravl' is NULL) AVL tree.
|
||||
|
@ -162,7 +157,7 @@ void rd_avl_destroy (rd_avl_t *ravl);
|
|||
*
|
||||
* For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'.
|
||||
*/
|
||||
rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
|
||||
rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -173,71 +168,70 @@ rd_avl_t *rd_avl_init (rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
|
|||
*
|
||||
* rdavl utilizes rwlocks to allow multiple concurrent read threads.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_avl_rdlock (rd_avl_t *ravl) {
|
||||
static RD_INLINE RD_UNUSED void rd_avl_rdlock(rd_avl_t *ravl) {
|
||||
if (ravl->ravl_flags & RD_AVL_F_LOCKS)
|
||||
rwlock_rdlock(&ravl->ravl_rwlock);
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_avl_wrlock (rd_avl_t *ravl) {
|
||||
static RD_INLINE RD_UNUSED void rd_avl_wrlock(rd_avl_t *ravl) {
|
||||
if (ravl->ravl_flags & RD_AVL_F_LOCKS)
|
||||
rwlock_wrlock(&ravl->ravl_rwlock);
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_avl_rdunlock (rd_avl_t *ravl) {
|
||||
static RD_INLINE RD_UNUSED void rd_avl_rdunlock(rd_avl_t *ravl) {
|
||||
if (ravl->ravl_flags & RD_AVL_F_LOCKS)
|
||||
rwlock_rdunlock(&ravl->ravl_rwlock);
|
||||
}
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_avl_wrunlock (rd_avl_t *ravl) {
|
||||
static RD_INLINE RD_UNUSED void rd_avl_wrunlock(rd_avl_t *ravl) {
|
||||
if (ravl->ravl_flags & RD_AVL_F_LOCKS)
|
||||
rwlock_wrunlock(&ravl->ravl_rwlock);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Private API, dont use directly.
|
||||
*/
|
||||
|
||||
rd_avl_node_t *rd_avl_insert_node (rd_avl_t *ravl,
|
||||
rd_avl_node_t *parent,
|
||||
rd_avl_node_t *ran,
|
||||
rd_avl_node_t **existing);
|
||||
rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl,
|
||||
rd_avl_node_t *parent,
|
||||
rd_avl_node_t *ran,
|
||||
rd_avl_node_t **existing);
|
||||
|
||||
static RD_UNUSED void *rd_avl_insert (rd_avl_t *ravl, void *elm,
|
||||
rd_avl_node_t *ran) {
|
||||
static RD_UNUSED void *
|
||||
rd_avl_insert(rd_avl_t *ravl, void *elm, rd_avl_node_t *ran) {
|
||||
rd_avl_node_t *existing = NULL;
|
||||
|
||||
memset(ran, 0, sizeof(*ran));
|
||||
ran->ran_elm = elm;
|
||||
|
||||
rd_avl_wrlock(ravl);
|
||||
ravl->ravl_root = rd_avl_insert_node(ravl, ravl->ravl_root,
|
||||
ran, &existing);
|
||||
ravl->ravl_root =
|
||||
rd_avl_insert_node(ravl, ravl->ravl_root, ran, &existing);
|
||||
rd_avl_wrunlock(ravl);
|
||||
|
||||
return existing ? existing->ran_elm : NULL;
|
||||
}
|
||||
|
||||
rd_avl_node_t *rd_avl_remove_elm0 (rd_avl_t *ravl, rd_avl_node_t *parent,
|
||||
const void *elm);
|
||||
rd_avl_node_t *
|
||||
rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm);
|
||||
|
||||
static RD_INLINE RD_UNUSED
|
||||
void rd_avl_remove_elm (rd_avl_t *ravl, const void *elm) {
|
||||
static RD_INLINE RD_UNUSED void rd_avl_remove_elm(rd_avl_t *ravl,
|
||||
const void *elm) {
|
||||
rd_avl_wrlock(ravl);
|
||||
ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm);
|
||||
rd_avl_wrunlock(ravl);
|
||||
}
|
||||
|
||||
|
||||
rd_avl_node_t *rd_avl_find_node (const rd_avl_t *ravl,
|
||||
const rd_avl_node_t *begin,
|
||||
const void *elm);
|
||||
rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl,
|
||||
const rd_avl_node_t *begin,
|
||||
const void *elm);
|
||||
|
||||
|
||||
static RD_INLINE RD_UNUSED void *rd_avl_find (rd_avl_t *ravl, const void *elm,
|
||||
int dolock) {
|
||||
static RD_INLINE RD_UNUSED void *
|
||||
rd_avl_find(rd_avl_t *ravl, const void *elm, int dolock) {
|
||||
const rd_avl_node_t *ran;
|
||||
void *ret;
|
||||
|
||||
|
|
806
src/rdbuf.c
806
src/rdbuf.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
232
src/rdbuf.h
232
src/rdbuf.h
|
@ -61,50 +61,50 @@
|
|||
* @brief Buffer segment
|
||||
*/
|
||||
typedef struct rd_segment_s {
|
||||
TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */
|
||||
char *seg_p; /**< Backing-store memory */
|
||||
size_t seg_of; /**< Current relative write-position
|
||||
* (length of payload in this segment) */
|
||||
size_t seg_size; /**< Allocated size of seg_p */
|
||||
size_t seg_absof; /**< Absolute offset of this segment's
|
||||
* beginning in the grand rd_buf_t */
|
||||
void (*seg_free) (void *p); /**< Optional free function for seg_p */
|
||||
int seg_flags; /**< Segment flags */
|
||||
#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */
|
||||
#define RD_SEGMENT_F_FREE 0x2 /**< Free segment on destroy,
|
||||
* e.g, not a fixed segment. */
|
||||
TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */
|
||||
char *seg_p; /**< Backing-store memory */
|
||||
size_t seg_of; /**< Current relative write-position
|
||||
* (length of payload in this segment) */
|
||||
size_t seg_size; /**< Allocated size of seg_p */
|
||||
size_t seg_absof; /**< Absolute offset of this segment's
|
||||
* beginning in the grand rd_buf_t */
|
||||
void (*seg_free)(void *p); /**< Optional free function for seg_p */
|
||||
int seg_flags; /**< Segment flags */
|
||||
#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */
|
||||
#define RD_SEGMENT_F_FREE \
|
||||
0x2 /**< Free segment on destroy, \
|
||||
* e.g, not a fixed segment. */
|
||||
} rd_segment_t;
|
||||
|
||||
|
||||
|
||||
|
||||
TAILQ_HEAD(rd_segment_head,rd_segment_s);
|
||||
TAILQ_HEAD(rd_segment_head, rd_segment_s);
|
||||
|
||||
/**
|
||||
* @brief Buffer, containing a list of segments.
|
||||
*/
|
||||
typedef struct rd_buf_s {
|
||||
struct rd_segment_head rbuf_segments; /**< TAILQ list of segments */
|
||||
size_t rbuf_segment_cnt; /**< Number of segments */
|
||||
size_t rbuf_segment_cnt; /**< Number of segments */
|
||||
|
||||
rd_segment_t *rbuf_wpos; /**< Current write position seg */
|
||||
size_t rbuf_len; /**< Current (written) length */
|
||||
size_t rbuf_erased; /**< Total number of bytes
|
||||
* erased from segments.
|
||||
* This amount is taken into
|
||||
* account when checking for
|
||||
* writable space which is
|
||||
* always at the end of the
|
||||
* buffer and thus can't make
|
||||
* use of the erased parts. */
|
||||
size_t rbuf_size; /**< Total allocated size of
|
||||
* all segments. */
|
||||
rd_segment_t *rbuf_wpos; /**< Current write position seg */
|
||||
size_t rbuf_len; /**< Current (written) length */
|
||||
size_t rbuf_erased; /**< Total number of bytes
|
||||
* erased from segments.
|
||||
* This amount is taken into
|
||||
* account when checking for
|
||||
* writable space which is
|
||||
* always at the end of the
|
||||
* buffer and thus can't make
|
||||
* use of the erased parts. */
|
||||
size_t rbuf_size; /**< Total allocated size of
|
||||
* all segments. */
|
||||
|
||||
char *rbuf_extra; /* Extra memory allocated for
|
||||
* use by segment structs,
|
||||
* buffer memory, etc. */
|
||||
size_t rbuf_extra_len; /* Current extra memory used */
|
||||
size_t rbuf_extra_size; /* Total size of extra memory */
|
||||
char *rbuf_extra; /* Extra memory allocated for
|
||||
* use by segment structs,
|
||||
* buffer memory, etc. */
|
||||
size_t rbuf_extra_len; /* Current extra memory used */
|
||||
size_t rbuf_extra_size; /* Total size of extra memory */
|
||||
} rd_buf_t;
|
||||
|
||||
|
||||
|
@ -113,13 +113,13 @@ typedef struct rd_buf_s {
|
|||
* @brief A read-only slice of a buffer.
|
||||
*/
|
||||
typedef struct rd_slice_s {
|
||||
const rd_buf_t *buf; /**< Pointer to buffer */
|
||||
const rd_segment_t *seg; /**< Current read position segment.
|
||||
* Will point to NULL when end of
|
||||
* slice is reached. */
|
||||
size_t rof; /**< Relative read offset in segment */
|
||||
size_t start; /**< Slice start offset in buffer */
|
||||
size_t end; /**< Slice end offset in buffer+1 */
|
||||
const rd_buf_t *buf; /**< Pointer to buffer */
|
||||
const rd_segment_t *seg; /**< Current read position segment.
|
||||
* Will point to NULL when end of
|
||||
* slice is reached. */
|
||||
size_t rof; /**< Relative read offset in segment */
|
||||
size_t start; /**< Slice start offset in buffer */
|
||||
size_t end; /**< Slice end offset in buffer+1 */
|
||||
} rd_slice_t;
|
||||
|
||||
|
||||
|
@ -127,7 +127,7 @@ typedef struct rd_slice_s {
|
|||
/**
|
||||
* @returns the current write position (absolute offset)
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) {
|
||||
static RD_INLINE RD_UNUSED size_t rd_buf_write_pos(const rd_buf_t *rbuf) {
|
||||
const rd_segment_t *seg = rbuf->rbuf_wpos;
|
||||
|
||||
if (unlikely(!seg)) {
|
||||
|
@ -146,20 +146,19 @@ static RD_INLINE RD_UNUSED size_t rd_buf_write_pos (const rd_buf_t *rbuf) {
|
|||
/**
|
||||
* @returns the number of bytes available for writing (before growing).
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t rd_buf_write_remains (const rd_buf_t *rbuf) {
|
||||
static RD_INLINE RD_UNUSED size_t rd_buf_write_remains(const rd_buf_t *rbuf) {
|
||||
return rbuf->rbuf_size - (rbuf->rbuf_len + rbuf->rbuf_erased);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @returns the number of bytes remaining to write to the given segment,
|
||||
* and sets the \p *p pointer (unless NULL) to the start of
|
||||
* the contiguous memory.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t
|
||||
rd_segment_write_remains (const rd_segment_t *seg, void **p) {
|
||||
rd_segment_write_remains(const rd_segment_t *seg, void **p) {
|
||||
if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY)))
|
||||
return 0;
|
||||
if (p)
|
||||
|
@ -172,7 +171,7 @@ rd_segment_write_remains (const rd_segment_t *seg, void **p) {
|
|||
/**
|
||||
* @returns the last segment for the buffer.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) {
|
||||
static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last(const rd_buf_t *rbuf) {
|
||||
return TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head);
|
||||
}
|
||||
|
||||
|
@ -180,53 +179,59 @@ static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last (const rd_buf_t *rbuf) {
|
|||
/**
|
||||
* @returns the total written buffer length
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t rd_buf_len (const rd_buf_t *rbuf) {
|
||||
static RD_INLINE RD_UNUSED size_t rd_buf_len(const rd_buf_t *rbuf) {
|
||||
return rbuf->rbuf_len;
|
||||
}
|
||||
|
||||
|
||||
int rd_buf_write_seek (rd_buf_t *rbuf, size_t absof);
|
||||
int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof);
|
||||
|
||||
|
||||
size_t rd_buf_write (rd_buf_t *rbuf, const void *payload, size_t size);
|
||||
size_t rd_buf_write_slice (rd_buf_t *rbuf, rd_slice_t *slice);
|
||||
size_t rd_buf_write_update (rd_buf_t *rbuf, size_t absof,
|
||||
const void *payload, size_t size);
|
||||
void rd_buf_push0 (rd_buf_t *rbuf, const void *payload, size_t size,
|
||||
void (*free_cb)(void *), rd_bool_t writable);
|
||||
#define rd_buf_push(rbuf,payload,size,free_cb) \
|
||||
rd_buf_push0(rbuf,payload,size,free_cb,rd_false/*not-writable*/)
|
||||
#define rd_buf_push_writable(rbuf,payload,size,free_cb) \
|
||||
rd_buf_push0(rbuf,payload,size,free_cb,rd_true/*writable*/)
|
||||
size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size);
|
||||
size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice);
|
||||
size_t rd_buf_write_update(rd_buf_t *rbuf,
|
||||
size_t absof,
|
||||
const void *payload,
|
||||
size_t size);
|
||||
void rd_buf_push0(rd_buf_t *rbuf,
|
||||
const void *payload,
|
||||
size_t size,
|
||||
void (*free_cb)(void *),
|
||||
rd_bool_t writable);
|
||||
#define rd_buf_push(rbuf, payload, size, free_cb) \
|
||||
rd_buf_push0(rbuf, payload, size, free_cb, rd_false /*not-writable*/)
|
||||
#define rd_buf_push_writable(rbuf, payload, size, free_cb) \
|
||||
rd_buf_push0(rbuf, payload, size, free_cb, rd_true /*writable*/)
|
||||
|
||||
size_t rd_buf_erase (rd_buf_t *rbuf, size_t absof, size_t size);
|
||||
size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size);
|
||||
|
||||
size_t rd_buf_get_writable (rd_buf_t *rbuf, void **p);
|
||||
size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p);
|
||||
|
||||
void rd_buf_write_ensure_contig (rd_buf_t *rbuf, size_t size);
|
||||
void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size);
|
||||
|
||||
void rd_buf_write_ensure (rd_buf_t *rbuf, size_t min_size, size_t max_size);
|
||||
void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size);
|
||||
|
||||
size_t rd_buf_get_write_iov (const rd_buf_t *rbuf,
|
||||
struct iovec *iovs, size_t *iovcntp,
|
||||
size_t iov_max, size_t size_max);
|
||||
size_t rd_buf_get_write_iov(const rd_buf_t *rbuf,
|
||||
struct iovec *iovs,
|
||||
size_t *iovcntp,
|
||||
size_t iov_max,
|
||||
size_t size_max);
|
||||
|
||||
void rd_buf_init (rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size);
|
||||
rd_buf_t *rd_buf_new (size_t fixed_seg_cnt, size_t buf_size);
|
||||
void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size);
|
||||
rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size);
|
||||
|
||||
void rd_buf_destroy (rd_buf_t *rbuf);
|
||||
void rd_buf_destroy_free (rd_buf_t *rbuf);
|
||||
void rd_buf_destroy(rd_buf_t *rbuf);
|
||||
void rd_buf_destroy_free(rd_buf_t *rbuf);
|
||||
|
||||
void rd_buf_dump (const rd_buf_t *rbuf, int do_hexdump);
|
||||
void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump);
|
||||
|
||||
int unittest_rdbuf (void);
|
||||
int unittest_rdbuf(void);
|
||||
|
||||
|
||||
/**@}*/
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @name Buffer reads operate on slices of an rd_buf_t and does not
|
||||
* modify the underlying rd_buf_t itself.
|
||||
|
@ -251,7 +256,7 @@ int unittest_rdbuf (void);
|
|||
/**
|
||||
* @returns the read position in the slice as a new slice.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) {
|
||||
static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos(const rd_slice_t *slice) {
|
||||
rd_slice_t newslice = *slice;
|
||||
|
||||
if (!slice->seg)
|
||||
|
@ -266,8 +271,7 @@ static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos (const rd_slice_t *slice) {
|
|||
* @returns the read position as an absolute buffer byte offset.
|
||||
* @remark this is the buffer offset, not the slice's local offset.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t
|
||||
rd_slice_abs_offset (const rd_slice_t *slice) {
|
||||
static RD_INLINE RD_UNUSED size_t rd_slice_abs_offset(const rd_slice_t *slice) {
|
||||
if (unlikely(!slice->seg)) /* reader has reached the end */
|
||||
return slice->end;
|
||||
|
||||
|
@ -278,7 +282,7 @@ rd_slice_abs_offset (const rd_slice_t *slice) {
|
|||
* @returns the read position as a byte offset.
|
||||
* @remark this is the slice-local offset, not the backing buffer's offset.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) {
|
||||
static RD_INLINE RD_UNUSED size_t rd_slice_offset(const rd_slice_t *slice) {
|
||||
if (unlikely(!slice->seg)) /* reader has reached the end */
|
||||
return rd_slice_size(slice);
|
||||
|
||||
|
@ -287,21 +291,25 @@ static RD_INLINE RD_UNUSED size_t rd_slice_offset (const rd_slice_t *slice) {
|
|||
|
||||
|
||||
|
||||
int rd_slice_init_seg(rd_slice_t *slice,
|
||||
const rd_buf_t *rbuf,
|
||||
const rd_segment_t *seg,
|
||||
size_t rof,
|
||||
size_t size);
|
||||
int rd_slice_init(rd_slice_t *slice,
|
||||
const rd_buf_t *rbuf,
|
||||
size_t absof,
|
||||
size_t size);
|
||||
void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf);
|
||||
|
||||
int rd_slice_init_seg (rd_slice_t *slice, const rd_buf_t *rbuf,
|
||||
const rd_segment_t *seg, size_t rof, size_t size);
|
||||
int rd_slice_init (rd_slice_t *slice, const rd_buf_t *rbuf,
|
||||
size_t absof, size_t size);
|
||||
void rd_slice_init_full (rd_slice_t *slice, const rd_buf_t *rbuf);
|
||||
size_t rd_slice_reader(rd_slice_t *slice, const void **p);
|
||||
size_t rd_slice_peeker(const rd_slice_t *slice, const void **p);
|
||||
|
||||
size_t rd_slice_reader (rd_slice_t *slice, const void **p);
|
||||
size_t rd_slice_peeker (const rd_slice_t *slice, const void **p);
|
||||
size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size);
|
||||
size_t
|
||||
rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size);
|
||||
|
||||
size_t rd_slice_read (rd_slice_t *slice, void *dst, size_t size);
|
||||
size_t rd_slice_peek (const rd_slice_t *slice, size_t offset,
|
||||
void *dst, size_t size);
|
||||
|
||||
size_t rd_slice_read_uvarint (rd_slice_t *slice, uint64_t *nump);
|
||||
size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump);
|
||||
|
||||
/**
|
||||
* @brief Read a zig-zag varint-encoded signed integer from \p slice,
|
||||
|
@ -310,16 +318,15 @@ size_t rd_slice_read_uvarint (rd_slice_t *slice, uint64_t *nump);
|
|||
* @returns the number of bytes read on success or 0 in case of
|
||||
* buffer underflow.
|
||||
*/
|
||||
static RD_UNUSED RD_INLINE
|
||||
size_t rd_slice_read_varint (rd_slice_t *slice, int64_t *nump) {
|
||||
static RD_UNUSED RD_INLINE size_t rd_slice_read_varint(rd_slice_t *slice,
|
||||
int64_t *nump) {
|
||||
size_t r;
|
||||
uint64_t unum;
|
||||
|
||||
r = rd_slice_read_uvarint(slice, &unum);
|
||||
if (likely(r > 0)) {
|
||||
/* Zig-zag decoding */
|
||||
*nump = (int64_t)((unum >> 1) ^
|
||||
-(int64_t)(unum & 1));
|
||||
*nump = (int64_t)((unum >> 1) ^ -(int64_t)(unum & 1));
|
||||
}
|
||||
|
||||
return r;
|
||||
|
@ -327,35 +334,36 @@ size_t rd_slice_read_varint (rd_slice_t *slice, int64_t *nump) {
|
|||
|
||||
|
||||
|
||||
const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size);
|
||||
|
||||
const void *rd_slice_ensure_contig (rd_slice_t *slice, size_t size);
|
||||
int rd_slice_seek(rd_slice_t *slice, size_t offset);
|
||||
|
||||
int rd_slice_seek (rd_slice_t *slice, size_t offset);
|
||||
|
||||
size_t rd_slice_get_iov (const rd_slice_t *slice,
|
||||
struct iovec *iovs, size_t *iovcntp,
|
||||
size_t iov_max, size_t size_max);
|
||||
size_t rd_slice_get_iov(const rd_slice_t *slice,
|
||||
struct iovec *iovs,
|
||||
size_t *iovcntp,
|
||||
size_t iov_max,
|
||||
size_t size_max);
|
||||
|
||||
|
||||
uint32_t rd_slice_crc32 (rd_slice_t *slice);
|
||||
uint32_t rd_slice_crc32c (rd_slice_t *slice);
|
||||
uint32_t rd_slice_crc32(rd_slice_t *slice);
|
||||
uint32_t rd_slice_crc32c(rd_slice_t *slice);
|
||||
|
||||
|
||||
int rd_slice_narrow (rd_slice_t *slice, rd_slice_t *save_slice, size_t size)
|
||||
RD_WARN_UNUSED_RESULT;
|
||||
int rd_slice_narrow_relative (rd_slice_t *slice, rd_slice_t *save_slice,
|
||||
size_t relsize)
|
||||
RD_WARN_UNUSED_RESULT;
|
||||
void rd_slice_widen (rd_slice_t *slice, const rd_slice_t *save_slice);
|
||||
int rd_slice_narrow_copy (const rd_slice_t *orig, rd_slice_t *new_slice,
|
||||
size_t size)
|
||||
RD_WARN_UNUSED_RESULT;
|
||||
int rd_slice_narrow_copy_relative (const rd_slice_t *orig,
|
||||
rd_slice_t *new_slice,
|
||||
size_t relsize)
|
||||
RD_WARN_UNUSED_RESULT;
|
||||
int rd_slice_narrow(rd_slice_t *slice,
|
||||
rd_slice_t *save_slice,
|
||||
size_t size) RD_WARN_UNUSED_RESULT;
|
||||
int rd_slice_narrow_relative(rd_slice_t *slice,
|
||||
rd_slice_t *save_slice,
|
||||
size_t relsize) RD_WARN_UNUSED_RESULT;
|
||||
void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice);
|
||||
int rd_slice_narrow_copy(const rd_slice_t *orig,
|
||||
rd_slice_t *new_slice,
|
||||
size_t size) RD_WARN_UNUSED_RESULT;
|
||||
int rd_slice_narrow_copy_relative(const rd_slice_t *orig,
|
||||
rd_slice_t *new_slice,
|
||||
size_t relsize) RD_WARN_UNUSED_RESULT;
|
||||
|
||||
void rd_slice_dump (const rd_slice_t *slice, int do_hexdump);
|
||||
void rd_slice_dump(const rd_slice_t *slice, int do_hexdump);
|
||||
|
||||
|
||||
/**@}*/
|
||||
|
|
136
src/rdcrc32.c
136
src/rdcrc32.c
|
@ -29,7 +29,7 @@
|
|||
* \file rdcrc32.c
|
||||
* Functions and types for CRC checks.
|
||||
*
|
||||
*
|
||||
*
|
||||
*
|
||||
* Generated on Tue May 8 17:37:04 2012,
|
||||
* by pycrc v0.7.10, http://www.tty1.net/pycrc/
|
||||
|
@ -42,7 +42,7 @@
|
|||
* ReflectOut = True
|
||||
* Algorithm = table-driven
|
||||
*****************************************************************************/
|
||||
#include "rdcrc32.h" /* include the header file generated with pycrc */
|
||||
#include "rdcrc32.h" /* include the header file generated with pycrc */
|
||||
#include <stdlib.h>
|
||||
#include <stdint.h>
|
||||
|
||||
|
@ -50,71 +50,49 @@
|
|||
* Static table used for the table_driven implementation.
|
||||
*****************************************************************************/
|
||||
const rd_crc32_t crc_table[256] = {
|
||||
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
|
||||
0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
|
||||
0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
|
||||
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
|
||||
0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
|
||||
0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
|
||||
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
|
||||
0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
|
||||
0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
|
||||
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
|
||||
0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
|
||||
0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
|
||||
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
|
||||
0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
|
||||
0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
|
||||
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
|
||||
0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
|
||||
0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
|
||||
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
|
||||
0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
|
||||
0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
|
||||
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
|
||||
0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
|
||||
0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
|
||||
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
|
||||
0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
|
||||
0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
|
||||
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
|
||||
0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
|
||||
0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
|
||||
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
|
||||
0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
|
||||
0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
|
||||
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
|
||||
0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
|
||||
0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
|
||||
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
|
||||
0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
|
||||
0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
|
||||
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
|
||||
0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
|
||||
0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
|
||||
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
|
||||
0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
|
||||
0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
|
||||
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
|
||||
0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
|
||||
0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
|
||||
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
|
||||
0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
|
||||
0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
|
||||
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
|
||||
0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
|
||||
0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
|
||||
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
|
||||
0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
|
||||
0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
|
||||
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
|
||||
0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
|
||||
0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
|
||||
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
|
||||
0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
|
||||
0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
|
||||
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
|
||||
};
|
||||
0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
|
||||
0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
|
||||
0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
|
||||
0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
|
||||
0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
|
||||
0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
|
||||
0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
|
||||
0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
|
||||
0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
|
||||
0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
|
||||
0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
|
||||
0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
|
||||
0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
|
||||
0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
|
||||
0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
|
||||
0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
|
||||
0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
|
||||
0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
|
||||
0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
|
||||
0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
|
||||
0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
|
||||
0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
|
||||
0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
|
||||
0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
|
||||
0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
|
||||
0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
|
||||
0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
|
||||
0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
|
||||
0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
|
||||
0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
|
||||
0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
|
||||
0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
|
||||
0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
|
||||
0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
|
||||
0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
|
||||
0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
|
||||
0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
|
||||
0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
|
||||
0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
|
||||
0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
|
||||
0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
|
||||
0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
|
||||
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d};
|
||||
|
||||
/**
|
||||
* Reflect all bits of a \a data word of \a data_len bytes.
|
||||
|
@ -123,20 +101,14 @@ const rd_crc32_t crc_table[256] = {
|
|||
* \param data_len The width of \a data expressed in number of bits.
|
||||
* \return The reflected data.
|
||||
*****************************************************************************/
|
||||
rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len)
|
||||
{
|
||||
unsigned int i;
|
||||
rd_crc32_t ret;
|
||||
rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) {
|
||||
unsigned int i;
|
||||
rd_crc32_t ret;
|
||||
|
||||
ret = data & 0x01;
|
||||
for (i = 1; i < data_len; i++) {
|
||||
data >>= 1;
|
||||
ret = (ret << 1) | (data & 0x01);
|
||||
}
|
||||
return ret;
|
||||
ret = data & 0x01;
|
||||
for (i = 1; i < data_len; i++) {
|
||||
data >>= 1;
|
||||
ret = (ret << 1) | (data & 0x01);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ extern "C" {
|
|||
typedef uint32_t rd_crc32_t;
|
||||
|
||||
#if !WITH_ZLIB
|
||||
extern const rd_crc32_t crc_table[256];
|
||||
extern const rd_crc32_t crc_table[256];
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -95,12 +95,11 @@ rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len);
|
|||
*
|
||||
* \return The initial crc value.
|
||||
*****************************************************************************/
|
||||
static RD_INLINE rd_crc32_t rd_crc32_init(void)
|
||||
{
|
||||
static RD_INLINE rd_crc32_t rd_crc32_init(void) {
|
||||
#if WITH_ZLIB
|
||||
return crc32(0, NULL, 0);
|
||||
#else
|
||||
return 0xffffffff;
|
||||
return 0xffffffff;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -113,7 +112,7 @@ static RD_INLINE rd_crc32_t rd_crc32_init(void)
|
|||
* \param data_len Number of bytes in the \a data buffer.
|
||||
* \return The updated crc value.
|
||||
*****************************************************************************/
|
||||
/**
|
||||
/**
|
||||
* Update the crc value with new data.
|
||||
*
|
||||
* \param crc The current crc value.
|
||||
|
@ -121,22 +120,22 @@ static RD_INLINE rd_crc32_t rd_crc32_init(void)
|
|||
* \param data_len Number of bytes in the \a data buffer.
|
||||
* \return The updated crc value.
|
||||
*****************************************************************************/
|
||||
static RD_INLINE RD_UNUSED
|
||||
rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t data_len)
|
||||
{
|
||||
static RD_INLINE RD_UNUSED rd_crc32_t rd_crc32_update(rd_crc32_t crc,
|
||||
const unsigned char *data,
|
||||
size_t data_len) {
|
||||
#if WITH_ZLIB
|
||||
rd_assert(data_len <= UINT_MAX);
|
||||
return crc32(crc, data, (uInt) data_len);
|
||||
return crc32(crc, data, (uInt)data_len);
|
||||
#else
|
||||
unsigned int tbl_idx;
|
||||
unsigned int tbl_idx;
|
||||
|
||||
while (data_len--) {
|
||||
tbl_idx = (crc ^ *data) & 0xff;
|
||||
crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff;
|
||||
while (data_len--) {
|
||||
tbl_idx = (crc ^ *data) & 0xff;
|
||||
crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff;
|
||||
|
||||
data++;
|
||||
}
|
||||
return crc & 0xffffffff;
|
||||
data++;
|
||||
}
|
||||
return crc & 0xffffffff;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -147,12 +146,11 @@ rd_crc32_t rd_crc32_update(rd_crc32_t crc, const unsigned char *data, size_t dat
|
|||
* \param crc The current crc value.
|
||||
* \return The final crc value.
|
||||
*****************************************************************************/
|
||||
static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc)
|
||||
{
|
||||
static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) {
|
||||
#if WITH_ZLIB
|
||||
return crc;
|
||||
#else
|
||||
return crc ^ 0xffffffff;
|
||||
return crc ^ 0xffffffff;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -160,14 +158,13 @@ static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc)
|
|||
/**
|
||||
* Wrapper for performing CRC32 on the provided buffer.
|
||||
*/
|
||||
static RD_INLINE rd_crc32_t rd_crc32 (const char *data, size_t data_len) {
|
||||
return rd_crc32_finalize(rd_crc32_update(rd_crc32_init(),
|
||||
(const unsigned char *)data,
|
||||
data_len));
|
||||
static RD_INLINE rd_crc32_t rd_crc32(const char *data, size_t data_len) {
|
||||
return rd_crc32_finalize(rd_crc32_update(
|
||||
rd_crc32_init(), (const unsigned char *)data, data_len));
|
||||
}
|
||||
|
||||
#ifdef __cplusplus
|
||||
} /* closing brace for extern "C" */
|
||||
} /* closing brace for extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* __RDCRC32___H__ */
|
||||
#endif /* __RDCRC32___H__ */
|
||||
|
|
30
src/rddl.c
30
src/rddl.c
|
@ -44,7 +44,7 @@
|
|||
* @brief Latest thread-local dl error, normalized to suit our logging.
|
||||
* @returns a newly allocated string that must be freed
|
||||
*/
|
||||
static char *rd_dl_error (void) {
|
||||
static char *rd_dl_error(void) {
|
||||
#if WITH_LIBDL
|
||||
char *errstr;
|
||||
char *s;
|
||||
|
@ -72,20 +72,20 @@ static char *rd_dl_error (void) {
|
|||
* else NULL.
|
||||
*/
|
||||
static rd_dl_hnd_t *
|
||||
rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) {
|
||||
rd_dl_open0(const char *path, char *errstr, size_t errstr_size) {
|
||||
void *handle;
|
||||
const char *loadfunc;
|
||||
#if WITH_LIBDL
|
||||
loadfunc = "dlopen()";
|
||||
handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
|
||||
handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
|
||||
#elif defined(_WIN32)
|
||||
loadfunc = "LoadLibrary()";
|
||||
handle = (void *)LoadLibraryA(path);
|
||||
handle = (void *)LoadLibraryA(path);
|
||||
#endif
|
||||
if (!handle) {
|
||||
char *dlerrstr = rd_dl_error();
|
||||
rd_snprintf(errstr, errstr_size, "%s failed: %s",
|
||||
loadfunc, dlerrstr);
|
||||
rd_snprintf(errstr, errstr_size, "%s failed: %s", loadfunc,
|
||||
dlerrstr);
|
||||
rd_free(dlerrstr);
|
||||
}
|
||||
return (rd_dl_hnd_t *)handle;
|
||||
|
@ -98,7 +98,7 @@ rd_dl_open0 (const char *path, char *errstr, size_t errstr_size) {
|
|||
* @returns the library handle (platform dependent, thus opaque) on success,
|
||||
* else NULL.
|
||||
*/
|
||||
rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) {
|
||||
rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size) {
|
||||
rd_dl_hnd_t *handle;
|
||||
char *extpath;
|
||||
size_t pathlen;
|
||||
|
@ -135,7 +135,7 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) {
|
|||
pathlen = strlen(path);
|
||||
extpath = rd_alloca(pathlen + strlen(solib_ext) + 1);
|
||||
memcpy(extpath, path, pathlen);
|
||||
memcpy(extpath+pathlen, solib_ext, strlen(solib_ext) + 1);
|
||||
memcpy(extpath + pathlen, solib_ext, strlen(solib_ext) + 1);
|
||||
|
||||
/* Try again with extension */
|
||||
return rd_dl_open0(extpath, errstr, errstr_size);
|
||||
|
@ -146,7 +146,7 @@ rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size) {
|
|||
* @brief Close handle previously returned by rd_dl_open()
|
||||
* @remark errors are ignored (what can we do anyway?)
|
||||
*/
|
||||
void rd_dl_close (rd_dl_hnd_t *handle) {
|
||||
void rd_dl_close(rd_dl_hnd_t *handle) {
|
||||
#if WITH_LIBDL
|
||||
dlclose((void *)handle);
|
||||
#elif defined(_WIN32)
|
||||
|
@ -158,9 +158,10 @@ void rd_dl_close (rd_dl_hnd_t *handle) {
|
|||
* @brief look up address of \p symbol in library handle \p handle
|
||||
* @returns the function pointer on success or NULL on error.
|
||||
*/
|
||||
void *
|
||||
rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol,
|
||||
char *errstr, size_t errstr_size) {
|
||||
void *rd_dl_sym(rd_dl_hnd_t *handle,
|
||||
const char *symbol,
|
||||
char *errstr,
|
||||
size_t errstr_size) {
|
||||
void *func;
|
||||
#if WITH_LIBDL
|
||||
func = dlsym((void *)handle, symbol);
|
||||
|
@ -170,10 +171,9 @@ rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol,
|
|||
if (!func) {
|
||||
char *dlerrstr = rd_dl_error();
|
||||
rd_snprintf(errstr, errstr_size,
|
||||
"Failed to load symbol \"%s\": %s",
|
||||
symbol, dlerrstr);
|
||||
"Failed to load symbol \"%s\": %s", symbol,
|
||||
dlerrstr);
|
||||
rd_free(dlerrstr);
|
||||
}
|
||||
return func;
|
||||
}
|
||||
|
||||
|
|
10
src/rddl.h
10
src/rddl.h
|
@ -33,9 +33,11 @@
|
|||
|
||||
typedef void rd_dl_hnd_t;
|
||||
|
||||
rd_dl_hnd_t *rd_dl_open (const char *path, char *errstr, size_t errstr_size);
|
||||
void rd_dl_close (rd_dl_hnd_t *handle);
|
||||
void *rd_dl_sym (rd_dl_hnd_t *handle, const char *symbol,
|
||||
char *errstr, size_t errstr_size);
|
||||
rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size);
|
||||
void rd_dl_close(rd_dl_hnd_t *handle);
|
||||
void *rd_dl_sym(rd_dl_hnd_t *handle,
|
||||
const char *symbol,
|
||||
char *errstr,
|
||||
size_t errstr_size);
|
||||
|
||||
#endif /* _RDDL_H */
|
||||
|
|
111
src/rdendian.h
111
src/rdendian.h
|
@ -41,59 +41,59 @@
|
|||
*/
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
#include <sys/endian.h>
|
||||
#include <sys/endian.h>
|
||||
#elif defined __GLIBC__
|
||||
#include <endian.h>
|
||||
#ifndef be64toh
|
||||
/* Support older glibc (<2.9) which lack be64toh */
|
||||
#include <byteswap.h>
|
||||
#if __BYTE_ORDER == __BIG_ENDIAN
|
||||
#define be16toh(x) (x)
|
||||
#define be32toh(x) (x)
|
||||
#define be64toh(x) (x)
|
||||
#define le64toh(x) __bswap_64 (x)
|
||||
#define le32toh(x) __bswap_32 (x)
|
||||
#else
|
||||
#define be16toh(x) __bswap_16 (x)
|
||||
#define be32toh(x) __bswap_32 (x)
|
||||
#define be64toh(x) __bswap_64 (x)
|
||||
#define le64toh(x) (x)
|
||||
#define le32toh(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
#include <endian.h>
|
||||
#ifndef be64toh
|
||||
/* Support older glibc (<2.9) which lack be64toh */
|
||||
#include <byteswap.h>
|
||||
#if __BYTE_ORDER == __BIG_ENDIAN
|
||||
#define be16toh(x) (x)
|
||||
#define be32toh(x) (x)
|
||||
#define be64toh(x) (x)
|
||||
#define le64toh(x) __bswap_64(x)
|
||||
#define le32toh(x) __bswap_32(x)
|
||||
#else
|
||||
#define be16toh(x) __bswap_16(x)
|
||||
#define be32toh(x) __bswap_32(x)
|
||||
#define be64toh(x) __bswap_64(x)
|
||||
#define le64toh(x) (x)
|
||||
#define le32toh(x) (x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#elif defined __CYGWIN__
|
||||
#include <endian.h>
|
||||
#include <endian.h>
|
||||
#elif defined __BSD__
|
||||
#include <sys/endian.h>
|
||||
#include <sys/endian.h>
|
||||
#elif defined __sun
|
||||
#include <sys/byteorder.h>
|
||||
#include <sys/isa_defs.h>
|
||||
#include <sys/byteorder.h>
|
||||
#include <sys/isa_defs.h>
|
||||
#define __LITTLE_ENDIAN 1234
|
||||
#define __BIG_ENDIAN 4321
|
||||
#define __BIG_ENDIAN 4321
|
||||
#ifdef _BIG_ENDIAN
|
||||
#define __BYTE_ORDER __BIG_ENDIAN
|
||||
#define be64toh(x) (x)
|
||||
#define be32toh(x) (x)
|
||||
#define be16toh(x) (x)
|
||||
#define le16toh(x) ((uint16_t)BSWAP_16(x))
|
||||
#define le32toh(x) BSWAP_32(x)
|
||||
#define le64toh(x) BSWAP_64(x)
|
||||
# else
|
||||
#define be64toh(x) (x)
|
||||
#define be32toh(x) (x)
|
||||
#define be16toh(x) (x)
|
||||
#define le16toh(x) ((uint16_t)BSWAP_16(x))
|
||||
#define le32toh(x) BSWAP_32(x)
|
||||
#define le64toh(x) BSWAP_64(x)
|
||||
#else
|
||||
#define __BYTE_ORDER __LITTLE_ENDIAN
|
||||
#define be64toh(x) BSWAP_64(x)
|
||||
#define be32toh(x) ntohl(x)
|
||||
#define be16toh(x) ntohs(x)
|
||||
#define le16toh(x) (x)
|
||||
#define le32toh(x) (x)
|
||||
#define le64toh(x) (x)
|
||||
#define htole16(x) (x)
|
||||
#define htole64(x) (x)
|
||||
#define be64toh(x) BSWAP_64(x)
|
||||
#define be32toh(x) ntohl(x)
|
||||
#define be16toh(x) ntohs(x)
|
||||
#define le16toh(x) (x)
|
||||
#define le32toh(x) (x)
|
||||
#define le64toh(x) (x)
|
||||
#define htole16(x) (x)
|
||||
#define htole64(x) (x)
|
||||
#endif /* __sun */
|
||||
|
||||
#elif defined __APPLE__
|
||||
#include <machine/endian.h>
|
||||
#include <libkern/OSByteOrder.h>
|
||||
#include <machine/endian.h>
|
||||
#include <libkern/OSByteOrder.h>
|
||||
#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
|
||||
#define be64toh(x) (x)
|
||||
#define be32toh(x) (x)
|
||||
|
@ -120,26 +120,23 @@
|
|||
#define le32toh(x) (x)
|
||||
#define le64toh(x) (x)
|
||||
|
||||
#elif defined _AIX /* AIX is always big endian */
|
||||
#elif defined _AIX /* AIX is always big endian */
|
||||
#define be64toh(x) (x)
|
||||
#define be32toh(x) (x)
|
||||
#define be16toh(x) (x)
|
||||
#define le32toh(x) \
|
||||
((((x) & 0xff) << 24) | \
|
||||
(((x) & 0xff00) << 8) | \
|
||||
(((x) & 0xff0000) >> 8) | \
|
||||
(((x) & 0xff000000) >> 24))
|
||||
#define le64toh(x) \
|
||||
((((x) & 0x00000000000000ffL) << 56) | \
|
||||
(((x) & 0x000000000000ff00L) << 40) | \
|
||||
(((x) & 0x0000000000ff0000L) << 24) | \
|
||||
(((x) & 0x00000000ff000000L) << 8) | \
|
||||
(((x) & 0x000000ff00000000L) >> 8) | \
|
||||
(((x) & 0x0000ff0000000000L) >> 24) | \
|
||||
(((x) & 0x00ff000000000000L) >> 40) | \
|
||||
(((x) & 0xff00000000000000L) >> 56))
|
||||
#define le32toh(x) \
|
||||
((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \
|
||||
(((x)&0xff000000) >> 24))
|
||||
#define le64toh(x) \
|
||||
((((x)&0x00000000000000ffL) << 56) | \
|
||||
(((x)&0x000000000000ff00L) << 40) | \
|
||||
(((x)&0x0000000000ff0000L) << 24) | \
|
||||
(((x)&0x00000000ff000000L) << 8) | (((x)&0x000000ff00000000L) >> 8) | \
|
||||
(((x)&0x0000ff0000000000L) >> 24) | \
|
||||
(((x)&0x00ff000000000000L) >> 40) | \
|
||||
(((x)&0xff00000000000000L) >> 56))
|
||||
#else
|
||||
#include <endian.h>
|
||||
#include <endian.h>
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -37,9 +37,8 @@
|
|||
* More info:
|
||||
* http://docs.sun.com/source/806-3568/ncg_goldberg.html
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED
|
||||
int rd_dbl_eq0 (double a, double b, double prec) {
|
||||
return fabs(a - b) < prec;
|
||||
static RD_INLINE RD_UNUSED int rd_dbl_eq0(double a, double b, double prec) {
|
||||
return fabs(a - b) < prec;
|
||||
}
|
||||
|
||||
/* A default 'good' double-equality precision value.
|
||||
|
@ -53,16 +52,16 @@ int rd_dbl_eq0 (double a, double b, double prec) {
|
|||
* rd_dbl_eq(a,b)
|
||||
* Same as rd_dbl_eq0() above but with a predefined 'good' precision.
|
||||
*/
|
||||
#define rd_dbl_eq(a,b) rd_dbl_eq0(a,b,RD_DBL_EPSILON)
|
||||
#define rd_dbl_eq(a, b) rd_dbl_eq0(a, b, RD_DBL_EPSILON)
|
||||
|
||||
/**
|
||||
* rd_dbl_ne(a,b)
|
||||
* Same as rd_dbl_eq() above but with reversed logic: not-equal.
|
||||
*/
|
||||
#define rd_dbl_ne(a,b) (!rd_dbl_eq0(a,b,RD_DBL_EPSILON))
|
||||
#define rd_dbl_ne(a, b) (!rd_dbl_eq0(a, b, RD_DBL_EPSILON))
|
||||
|
||||
/**
|
||||
* rd_dbl_zero(a)
|
||||
* Checks if the double `a' is zero (or close enough).
|
||||
*/
|
||||
#define rd_dbl_zero(a) rd_dbl_eq0(a,0.0,RD_DBL_EPSILON)
|
||||
#define rd_dbl_zero(a) rd_dbl_eq0(a, 0.0, RD_DBL_EPSILON)
|
||||
|
|
|
@ -34,14 +34,15 @@
|
|||
/* FNV-1a by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo
|
||||
*
|
||||
* Based on http://www.isthe.com/chongo/src/fnv/hash_32a.c
|
||||
* with librdkafka modifications to match the Sarama default Producer implementation,
|
||||
* as seen here: https://github.com/Shopify/sarama/blob/master/partitioner.go#L203
|
||||
* Note that this implementation is only compatible with Sarama's default
|
||||
* with librdkafka modifications to match the Sarama default Producer
|
||||
* implementation, as seen here:
|
||||
* https://github.com/Shopify/sarama/blob/master/partitioner.go#L203 Note that
|
||||
* this implementation is only compatible with Sarama's default
|
||||
* NewHashPartitioner and not NewReferenceHashPartitioner.
|
||||
*/
|
||||
uint32_t rd_fnv1a (const void *key, size_t len) {
|
||||
const uint32_t prime = 0x01000193; // 16777619
|
||||
const uint32_t offset = 0x811C9DC5; // 2166136261
|
||||
uint32_t rd_fnv1a(const void *key, size_t len) {
|
||||
const uint32_t prime = 0x01000193; // 16777619
|
||||
const uint32_t offset = 0x811C9DC5; // 2166136261
|
||||
size_t i;
|
||||
int32_t h = offset;
|
||||
|
||||
|
@ -52,7 +53,8 @@ uint32_t rd_fnv1a (const void *key, size_t len) {
|
|||
h *= prime;
|
||||
}
|
||||
|
||||
/* Take absolute value to match the Sarama NewHashPartitioner implementation */
|
||||
/* Take absolute value to match the Sarama NewHashPartitioner
|
||||
* implementation */
|
||||
if (h < 0) {
|
||||
h = -h;
|
||||
}
|
||||
|
@ -64,45 +66,44 @@ uint32_t rd_fnv1a (const void *key, size_t len) {
|
|||
/**
|
||||
* @brief Unittest for rd_fnv1a()
|
||||
*/
|
||||
int unittest_fnv1a (void) {
|
||||
int unittest_fnv1a(void) {
|
||||
const char *short_unaligned = "1234";
|
||||
const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs";
|
||||
const char *keysToTest[] = {
|
||||
"kafka",
|
||||
"giberish123456789",
|
||||
short_unaligned,
|
||||
short_unaligned+1,
|
||||
short_unaligned+2,
|
||||
short_unaligned+3,
|
||||
unaligned,
|
||||
unaligned+1,
|
||||
unaligned+2,
|
||||
unaligned+3,
|
||||
"",
|
||||
NULL,
|
||||
const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs";
|
||||
const char *keysToTest[] = {
|
||||
"kafka",
|
||||
"giberish123456789",
|
||||
short_unaligned,
|
||||
short_unaligned + 1,
|
||||
short_unaligned + 2,
|
||||
short_unaligned + 3,
|
||||
unaligned,
|
||||
unaligned + 1,
|
||||
unaligned + 2,
|
||||
unaligned + 3,
|
||||
"",
|
||||
NULL,
|
||||
};
|
||||
|
||||
// Acquired via https://play.golang.org/p/vWIhw3zJINA
|
||||
const int32_t golang_hashfnv_results[] = {
|
||||
0xd33c4e1, // kafka
|
||||
0x77a58295, // giberish123456789
|
||||
0x23bdd03, // short_unaligned
|
||||
0x2dea3cd2, // short_unaligned+1
|
||||
0x740fa83e, // short_unaligned+2
|
||||
0x310ca263, // short_unaligned+3
|
||||
0x65cbd69c, // unaligned
|
||||
0x6e49c79a, // unaligned+1
|
||||
0x69eed356, // unaligned+2
|
||||
0x6abcc023, // unaligned+3
|
||||
0x7ee3623b, // ""
|
||||
0x7ee3623b, // NULL
|
||||
0xd33c4e1, // kafka
|
||||
0x77a58295, // giberish123456789
|
||||
0x23bdd03, // short_unaligned
|
||||
0x2dea3cd2, // short_unaligned+1
|
||||
0x740fa83e, // short_unaligned+2
|
||||
0x310ca263, // short_unaligned+3
|
||||
0x65cbd69c, // unaligned
|
||||
0x6e49c79a, // unaligned+1
|
||||
0x69eed356, // unaligned+2
|
||||
0x6abcc023, // unaligned+3
|
||||
0x7ee3623b, // ""
|
||||
0x7ee3623b, // NULL
|
||||
};
|
||||
|
||||
size_t i;
|
||||
for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) {
|
||||
uint32_t h = rd_fnv1a(keysToTest[i],
|
||||
keysToTest[i] ?
|
||||
strlen(keysToTest[i]) : 0);
|
||||
uint32_t h = rd_fnv1a(
|
||||
keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0);
|
||||
RD_UT_ASSERT((int32_t)h == golang_hashfnv_results[i],
|
||||
"Calculated FNV-1a hash 0x%x for \"%s\", "
|
||||
"expected 0x%x",
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#ifndef __RDFNV1A___H__
|
||||
#define __RDFNV1A___H__
|
||||
|
||||
uint32_t rd_fnv1a (const void *key, size_t len);
|
||||
int unittest_fnv1a (void);
|
||||
uint32_t rd_fnv1a(const void *key, size_t len);
|
||||
int unittest_fnv1a(void);
|
||||
|
||||
#endif // __RDFNV1A___H__
|
||||
#endif // __RDFNV1A___H__
|
||||
|
|
162
src/rdgz.c
162
src/rdgz.c
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -32,87 +32,89 @@
|
|||
#include <zlib.h>
|
||||
|
||||
|
||||
#define RD_GZ_CHUNK 262144
|
||||
#define RD_GZ_CHUNK 262144
|
||||
|
||||
void *rd_gz_decompress (const void *compressed, int compressed_len,
|
||||
uint64_t *decompressed_lenp) {
|
||||
int pass = 1;
|
||||
char *decompressed = NULL;
|
||||
void *rd_gz_decompress(const void *compressed,
|
||||
int compressed_len,
|
||||
uint64_t *decompressed_lenp) {
|
||||
int pass = 1;
|
||||
char *decompressed = NULL;
|
||||
|
||||
/* First pass (1): calculate decompressed size.
|
||||
* (pass-1 is skipped if *decompressed_lenp is
|
||||
* non-zero).
|
||||
* Second pass (2): perform actual decompression.
|
||||
*/
|
||||
/* First pass (1): calculate decompressed size.
|
||||
* (pass-1 is skipped if *decompressed_lenp is
|
||||
* non-zero).
|
||||
* Second pass (2): perform actual decompression.
|
||||
*/
|
||||
|
||||
if (*decompressed_lenp != 0LLU)
|
||||
pass++;
|
||||
if (*decompressed_lenp != 0LLU)
|
||||
pass++;
|
||||
|
||||
for (; pass <= 2 ; pass++) {
|
||||
z_stream strm = RD_ZERO_INIT;
|
||||
char buf[512];
|
||||
char *p;
|
||||
int len;
|
||||
int r;
|
||||
|
||||
if ((r = inflateInit2(&strm, 15+32)) != Z_OK)
|
||||
goto fail;
|
||||
for (; pass <= 2; pass++) {
|
||||
z_stream strm = RD_ZERO_INIT;
|
||||
char buf[512];
|
||||
char *p;
|
||||
int len;
|
||||
int r;
|
||||
|
||||
strm.next_in = (void *)compressed;
|
||||
strm.avail_in = compressed_len;
|
||||
if ((r = inflateInit2(&strm, 15 + 32)) != Z_OK)
|
||||
goto fail;
|
||||
|
||||
if (pass == 1) {
|
||||
/* Use dummy output buffer */
|
||||
p = buf;
|
||||
len = sizeof(buf);
|
||||
} else {
|
||||
/* Use real output buffer */
|
||||
p = decompressed;
|
||||
len = (int)*decompressed_lenp;
|
||||
}
|
||||
strm.next_in = (void *)compressed;
|
||||
strm.avail_in = compressed_len;
|
||||
|
||||
do {
|
||||
strm.next_out = (unsigned char *)p;
|
||||
strm.avail_out = len;
|
||||
if (pass == 1) {
|
||||
/* Use dummy output buffer */
|
||||
p = buf;
|
||||
len = sizeof(buf);
|
||||
} else {
|
||||
/* Use real output buffer */
|
||||
p = decompressed;
|
||||
len = (int)*decompressed_lenp;
|
||||
}
|
||||
|
||||
r = inflate(&strm, Z_NO_FLUSH);
|
||||
switch (r) {
|
||||
case Z_STREAM_ERROR:
|
||||
case Z_NEED_DICT:
|
||||
case Z_DATA_ERROR:
|
||||
case Z_MEM_ERROR:
|
||||
inflateEnd(&strm);
|
||||
goto fail;
|
||||
}
|
||||
do {
|
||||
strm.next_out = (unsigned char *)p;
|
||||
strm.avail_out = len;
|
||||
|
||||
if (pass == 2) {
|
||||
/* Advance output pointer (in pass 2). */
|
||||
p += len - strm.avail_out;
|
||||
len -= len - strm.avail_out;
|
||||
}
|
||||
r = inflate(&strm, Z_NO_FLUSH);
|
||||
switch (r) {
|
||||
case Z_STREAM_ERROR:
|
||||
case Z_NEED_DICT:
|
||||
case Z_DATA_ERROR:
|
||||
case Z_MEM_ERROR:
|
||||
inflateEnd(&strm);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
} while (strm.avail_out == 0 && r != Z_STREAM_END);
|
||||
if (pass == 2) {
|
||||
/* Advance output pointer (in pass 2). */
|
||||
p += len - strm.avail_out;
|
||||
len -= len - strm.avail_out;
|
||||
}
|
||||
|
||||
} while (strm.avail_out == 0 && r != Z_STREAM_END);
|
||||
|
||||
|
||||
if (pass == 1) {
|
||||
*decompressed_lenp = strm.total_out;
|
||||
if (!(decompressed = rd_malloc((size_t)(*decompressed_lenp)+1))) {
|
||||
inflateEnd(&strm);
|
||||
return NULL;
|
||||
}
|
||||
/* For convenience of the caller we nul-terminate
|
||||
* the buffer. If it happens to be a string there
|
||||
* is no need for extra copies. */
|
||||
decompressed[*decompressed_lenp] = '\0';
|
||||
}
|
||||
if (pass == 1) {
|
||||
*decompressed_lenp = strm.total_out;
|
||||
if (!(decompressed = rd_malloc(
|
||||
(size_t)(*decompressed_lenp) + 1))) {
|
||||
inflateEnd(&strm);
|
||||
return NULL;
|
||||
}
|
||||
/* For convenience of the caller we nul-terminate
|
||||
* the buffer. If it happens to be a string there
|
||||
* is no need for extra copies. */
|
||||
decompressed[*decompressed_lenp] = '\0';
|
||||
}
|
||||
|
||||
inflateEnd(&strm);
|
||||
}
|
||||
inflateEnd(&strm);
|
||||
}
|
||||
|
||||
return decompressed;
|
||||
return decompressed;
|
||||
|
||||
fail:
|
||||
if (decompressed)
|
||||
rd_free(decompressed);
|
||||
return NULL;
|
||||
if (decompressed)
|
||||
rd_free(decompressed);
|
||||
return NULL;
|
||||
}
|
||||
|
|
31
src/rdgz.h
31
src/rdgz.h
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -39,7 +39,8 @@
|
|||
*
|
||||
* The decompressed length is returned in '*decompressed_lenp'.
|
||||
*/
|
||||
void *rd_gz_decompress (const void *compressed, int compressed_len,
|
||||
uint64_t *decompressed_lenp);
|
||||
void *rd_gz_decompress(const void *compressed,
|
||||
int compressed_len,
|
||||
uint64_t *decompressed_lenp);
|
||||
|
||||
#endif /* _RDGZ_H_ */
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
*/
|
||||
|
||||
/*
|
||||
* librdkafka - Apache Kafka C library
|
||||
|
@ -78,12 +78,13 @@
|
|||
#include "rdunittest.h"
|
||||
#include "rdfloat.h"
|
||||
|
||||
void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr) {
|
||||
void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr) {
|
||||
rd_free(hdr);
|
||||
}
|
||||
|
||||
rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue,
|
||||
int significantFigures) {
|
||||
rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue,
|
||||
int64_t maxValue,
|
||||
int significantFigures) {
|
||||
rd_hdr_histogram_t *hdr;
|
||||
int64_t largestValueWithSingleUnitResolution;
|
||||
int32_t subBucketCountMagnitude;
|
||||
|
@ -101,22 +102,21 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue,
|
|||
return NULL;
|
||||
|
||||
largestValueWithSingleUnitResolution =
|
||||
(int64_t)(2.0 * pow(10.0, (double)significantFigures));
|
||||
(int64_t)(2.0 * pow(10.0, (double)significantFigures));
|
||||
|
||||
subBucketCountMagnitude =
|
||||
(int32_t)ceil(
|
||||
log2((double)largestValueWithSingleUnitResolution));
|
||||
(int32_t)ceil(log2((double)largestValueWithSingleUnitResolution));
|
||||
|
||||
subBucketHalfCountMagnitude = RD_MAX(subBucketCountMagnitude, 1) - 1;
|
||||
|
||||
unitMagnitude = (int32_t)RD_MAX(floor(log2((double)minValue)), 0);
|
||||
|
||||
subBucketCount = (int32_t)pow(2,
|
||||
(double)subBucketHalfCountMagnitude+1.0);
|
||||
subBucketCount =
|
||||
(int32_t)pow(2, (double)subBucketHalfCountMagnitude + 1.0);
|
||||
|
||||
subBucketHalfCount = subBucketCount / 2;
|
||||
|
||||
subBucketMask = (int64_t)(subBucketCount-1) << unitMagnitude;
|
||||
subBucketMask = (int64_t)(subBucketCount - 1) << unitMagnitude;
|
||||
|
||||
/* Determine exponent range needed to support the trackable
|
||||
* value with no overflow: */
|
||||
|
@ -127,24 +127,24 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue,
|
|||
}
|
||||
|
||||
bucketCount = bucketsNeeded;
|
||||
countsLen = (bucketCount + 1) * (subBucketCount / 2);
|
||||
countsLen = (bucketCount + 1) * (subBucketCount / 2);
|
||||
hdr = rd_calloc(1, sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen));
|
||||
hdr->counts = (int64_t *)(hdr+1);
|
||||
hdr->counts = (int64_t *)(hdr + 1);
|
||||
hdr->allocatedSize = sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen);
|
||||
|
||||
hdr->lowestTrackableValue = minValue;
|
||||
hdr->highestTrackableValue = maxValue;
|
||||
hdr->unitMagnitude = unitMagnitude;
|
||||
hdr->significantFigures = significantFigures;
|
||||
hdr->lowestTrackableValue = minValue;
|
||||
hdr->highestTrackableValue = maxValue;
|
||||
hdr->unitMagnitude = unitMagnitude;
|
||||
hdr->significantFigures = significantFigures;
|
||||
hdr->subBucketHalfCountMagnitude = subBucketHalfCountMagnitude;
|
||||
hdr->subBucketHalfCount = subBucketHalfCount;
|
||||
hdr->subBucketMask = subBucketMask;
|
||||
hdr->subBucketCount = subBucketCount;
|
||||
hdr->bucketCount = bucketCount;
|
||||
hdr->countsLen = countsLen;
|
||||
hdr->totalCount = 0;
|
||||
hdr->lowestOutOfRange = minValue;
|
||||
hdr->highestOutOfRange = maxValue;
|
||||
hdr->subBucketHalfCount = subBucketHalfCount;
|
||||
hdr->subBucketMask = subBucketMask;
|
||||
hdr->subBucketCount = subBucketCount;
|
||||
hdr->bucketCount = bucketCount;
|
||||
hdr->countsLen = countsLen;
|
||||
hdr->totalCount = 0;
|
||||
hdr->lowestOutOfRange = minValue;
|
||||
hdr->highestOutOfRange = maxValue;
|
||||
|
||||
return hdr;
|
||||
}
|
||||
|
@ -152,32 +152,32 @@ rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue,
|
|||
/**
|
||||
* @brief Deletes all recorded values and resets histogram.
|
||||
*/
|
||||
void rd_hdr_histogram_reset (rd_hdr_histogram_t *hdr) {
|
||||
void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr) {
|
||||
int32_t i;
|
||||
hdr->totalCount = 0;
|
||||
for (i = 0 ; i < hdr->countsLen ; i++)
|
||||
for (i = 0; i < hdr->countsLen; i++)
|
||||
hdr->counts[i] = 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static RD_INLINE int32_t
|
||||
rd_hdr_countsIndex (const rd_hdr_histogram_t *hdr,
|
||||
int32_t bucketIdx, int32_t subBucketIdx) {
|
||||
int32_t bucketBaseIdx = (bucketIdx + 1) <<
|
||||
hdr->subBucketHalfCountMagnitude;
|
||||
static RD_INLINE int32_t rd_hdr_countsIndex(const rd_hdr_histogram_t *hdr,
|
||||
int32_t bucketIdx,
|
||||
int32_t subBucketIdx) {
|
||||
int32_t bucketBaseIdx = (bucketIdx + 1)
|
||||
<< hdr->subBucketHalfCountMagnitude;
|
||||
int32_t offsetInBucket = subBucketIdx - hdr->subBucketHalfCount;
|
||||
return bucketBaseIdx + offsetInBucket;
|
||||
}
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_getCountAtIndex (const rd_hdr_histogram_t *hdr,
|
||||
int32_t bucketIdx, int32_t subBucketIdx) {
|
||||
static RD_INLINE int64_t rd_hdr_getCountAtIndex(const rd_hdr_histogram_t *hdr,
|
||||
int32_t bucketIdx,
|
||||
int32_t subBucketIdx) {
|
||||
return hdr->counts[rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx)];
|
||||
}
|
||||
|
||||
|
||||
static RD_INLINE int64_t bitLen (int64_t x) {
|
||||
static RD_INLINE int64_t bitLen(int64_t x) {
|
||||
int64_t n = 0;
|
||||
for (; x >= 0x8000; x >>= 16)
|
||||
n += 16;
|
||||
|
@ -199,29 +199,30 @@ static RD_INLINE int64_t bitLen (int64_t x) {
|
|||
}
|
||||
|
||||
|
||||
static RD_INLINE int32_t
|
||||
rd_hdr_getBucketIndex (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
static RD_INLINE int32_t rd_hdr_getBucketIndex(const rd_hdr_histogram_t *hdr,
|
||||
int64_t v) {
|
||||
int64_t pow2Ceiling = bitLen(v | hdr->subBucketMask);
|
||||
return (int32_t)(pow2Ceiling - (int64_t)hdr->unitMagnitude -
|
||||
(int64_t)(hdr->subBucketHalfCountMagnitude+1));
|
||||
(int64_t)(hdr->subBucketHalfCountMagnitude + 1));
|
||||
}
|
||||
|
||||
static RD_INLINE int32_t
|
||||
rd_hdr_getSubBucketIdx (const rd_hdr_histogram_t *hdr, int64_t v, int32_t idx) {
|
||||
static RD_INLINE int32_t rd_hdr_getSubBucketIdx(const rd_hdr_histogram_t *hdr,
|
||||
int64_t v,
|
||||
int32_t idx) {
|
||||
return (int32_t)(v >> ((int64_t)idx + (int64_t)hdr->unitMagnitude));
|
||||
}
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_valueFromIndex (const rd_hdr_histogram_t *hdr,
|
||||
int32_t bucketIdx, int32_t subBucketIdx) {
|
||||
return (int64_t)subBucketIdx <<
|
||||
((int64_t)bucketIdx + hdr->unitMagnitude);
|
||||
static RD_INLINE int64_t rd_hdr_valueFromIndex(const rd_hdr_histogram_t *hdr,
|
||||
int32_t bucketIdx,
|
||||
int32_t subBucketIdx) {
|
||||
return (int64_t)subBucketIdx
|
||||
<< ((int64_t)bucketIdx + hdr->unitMagnitude);
|
||||
}
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_sizeOfEquivalentValueRange (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
|
||||
int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
|
||||
rd_hdr_sizeOfEquivalentValueRange(const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
|
||||
int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
|
||||
int32_t adjustedBucket = bucketIdx;
|
||||
if (unlikely(subBucketIdx >= hdr->subBucketCount))
|
||||
adjustedBucket++;
|
||||
|
@ -229,35 +230,35 @@ rd_hdr_sizeOfEquivalentValueRange (const rd_hdr_histogram_t *hdr, int64_t v) {
|
|||
}
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_lowestEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
|
||||
rd_hdr_lowestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
|
||||
int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
|
||||
return rd_hdr_valueFromIndex(hdr, bucketIdx, subBucketIdx);
|
||||
}
|
||||
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_nextNonEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
rd_hdr_nextNonEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
return rd_hdr_lowestEquivalentValue(hdr, v) +
|
||||
rd_hdr_sizeOfEquivalentValueRange(hdr, v);
|
||||
rd_hdr_sizeOfEquivalentValueRange(hdr, v);
|
||||
}
|
||||
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_highestEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
rd_hdr_highestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
return rd_hdr_nextNonEquivalentValue(hdr, v) - 1;
|
||||
}
|
||||
|
||||
static RD_INLINE int64_t
|
||||
rd_hdr_medianEquivalentValue (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
rd_hdr_medianEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
return rd_hdr_lowestEquivalentValue(hdr, v) +
|
||||
(rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1);
|
||||
(rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1);
|
||||
}
|
||||
|
||||
|
||||
static RD_INLINE int32_t
|
||||
rd_hdr_countsIndexFor (const rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
|
||||
static RD_INLINE int32_t rd_hdr_countsIndexFor(const rd_hdr_histogram_t *hdr,
|
||||
int64_t v) {
|
||||
int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
|
||||
int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
|
||||
return rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx);
|
||||
}
|
||||
|
@ -274,9 +275,10 @@ typedef struct rd_hdr_iter_s {
|
|||
int64_t highestEquivalentValue;
|
||||
} rd_hdr_iter_t;
|
||||
|
||||
#define RD_HDR_ITER_INIT(hdr) { .hdr = hdr, .subBucketIdx = -1 }
|
||||
#define RD_HDR_ITER_INIT(hdr) \
|
||||
{ .hdr = hdr, .subBucketIdx = -1 }
|
||||
|
||||
static int rd_hdr_iter_next (rd_hdr_iter_t *it) {
|
||||
static int rd_hdr_iter_next(rd_hdr_iter_t *it) {
|
||||
const rd_hdr_histogram_t *hdr = it->hdr;
|
||||
|
||||
if (unlikely(it->countToIdx >= hdr->totalCount))
|
||||
|
@ -291,24 +293,22 @@ static int rd_hdr_iter_next (rd_hdr_iter_t *it) {
|
|||
if (unlikely(it->bucketIdx >= hdr->bucketCount))
|
||||
return 0;
|
||||
|
||||
it->countAtIdx = rd_hdr_getCountAtIndex(hdr,
|
||||
it->bucketIdx,
|
||||
it->subBucketIdx);
|
||||
it->countAtIdx =
|
||||
rd_hdr_getCountAtIndex(hdr, it->bucketIdx, it->subBucketIdx);
|
||||
it->countToIdx += it->countAtIdx;
|
||||
it->valueFromIdx = rd_hdr_valueFromIndex(hdr,
|
||||
it->bucketIdx,
|
||||
it->subBucketIdx);
|
||||
it->valueFromIdx =
|
||||
rd_hdr_valueFromIndex(hdr, it->bucketIdx, it->subBucketIdx);
|
||||
it->highestEquivalentValue =
|
||||
rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx);
|
||||
rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) {
|
||||
double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr) {
|
||||
double mean;
|
||||
double geometricDevTotal = 0.0;
|
||||
rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
|
||||
rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
|
||||
|
||||
if (hdr->totalCount == 0)
|
||||
return 0;
|
||||
|
@ -322,8 +322,9 @@ double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) {
|
|||
if (it.countAtIdx == 0)
|
||||
continue;
|
||||
|
||||
dev = (double)rd_hdr_medianEquivalentValue(
|
||||
hdr, it.valueFromIdx) - mean;
|
||||
dev =
|
||||
(double)rd_hdr_medianEquivalentValue(hdr, it.valueFromIdx) -
|
||||
mean;
|
||||
geometricDevTotal += (dev * dev) * (double)it.countAtIdx;
|
||||
}
|
||||
|
||||
|
@ -334,8 +335,8 @@ double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr) {
|
|||
/**
|
||||
* @returns the approximate maximum recorded value.
|
||||
*/
|
||||
int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr) {
|
||||
int64_t vmax = 0;
|
||||
int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr) {
|
||||
int64_t vmax = 0;
|
||||
rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
|
||||
|
||||
while (rd_hdr_iter_next(&it)) {
|
||||
|
@ -348,8 +349,8 @@ int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr) {
|
|||
/**
|
||||
* @returns the approximate minimum recorded value.
|
||||
*/
|
||||
int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr) {
|
||||
int64_t vmin = 0;
|
||||
int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr) {
|
||||
int64_t vmin = 0;
|
||||
rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
|
||||
|
||||
while (rd_hdr_iter_next(&it)) {
|
||||
|
@ -364,8 +365,8 @@ int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr) {
|
|||
/**
|
||||
* @returns the approximate arithmetic mean of the recorded values.
|
||||
*/
|
||||
double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) {
|
||||
int64_t total = 0;
|
||||
double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr) {
|
||||
int64_t total = 0;
|
||||
rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
|
||||
|
||||
if (hdr->totalCount == 0)
|
||||
|
@ -373,9 +374,8 @@ double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) {
|
|||
|
||||
while (rd_hdr_iter_next(&it)) {
|
||||
if (it.countAtIdx != 0)
|
||||
total += it.countAtIdx *
|
||||
rd_hdr_medianEquivalentValue(hdr,
|
||||
it.valueFromIdx);
|
||||
total += it.countAtIdx * rd_hdr_medianEquivalentValue(
|
||||
hdr, it.valueFromIdx);
|
||||
}
|
||||
return (double)total / (double)hdr->totalCount;
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr) {
|
|||
* @returns 1 if value was recorded or 0 if value is out of range.
|
||||
*/
|
||||
|
||||
int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v) {
|
||||
int32_t idx = rd_hdr_countsIndexFor(hdr, v);
|
||||
|
||||
if (idx < 0 || hdr->countsLen <= idx) {
|
||||
|
@ -410,7 +410,7 @@ int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v) {
|
|||
/**
|
||||
* @returns the recorded value at the given quantile (0..100).
|
||||
*/
|
||||
int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) {
|
||||
int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q) {
|
||||
int64_t total = 0;
|
||||
int64_t countAtPercentile;
|
||||
rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
|
||||
|
@ -419,13 +419,13 @@ int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) {
|
|||
q = 100.0;
|
||||
|
||||
countAtPercentile =
|
||||
(int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5);
|
||||
(int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5);
|
||||
|
||||
while (rd_hdr_iter_next(&it)) {
|
||||
total += it.countAtIdx;
|
||||
if (total >= countAtPercentile)
|
||||
return rd_hdr_highestEquivalentValue(
|
||||
hdr, it.valueFromIdx);
|
||||
return rd_hdr_highestEquivalentValue(hdr,
|
||||
it.valueFromIdx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -444,55 +444,50 @@ int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q) {
|
|||
/**
|
||||
* @returns 0 on success or 1 on failure.
|
||||
*/
|
||||
static int ut_high_sigfig (void) {
|
||||
static int ut_high_sigfig(void) {
|
||||
rd_hdr_histogram_t *hdr;
|
||||
const int64_t input[] = {
|
||||
459876, 669187, 711612, 816326, 931423,
|
||||
1033197, 1131895, 2477317, 3964974, 12718782,
|
||||
459876, 669187, 711612, 816326, 931423,
|
||||
1033197, 1131895, 2477317, 3964974, 12718782,
|
||||
};
|
||||
size_t i;
|
||||
int64_t v;
|
||||
const int64_t exp = 1048575;
|
||||
|
||||
hdr = rd_hdr_histogram_new(459876, 12718782, 5);
|
||||
for (i = 0 ; i < RD_ARRAYSIZE(input) ; i++) {
|
||||
for (i = 0; i < RD_ARRAYSIZE(input); i++) {
|
||||
/* Ignore errors (some should fail) */
|
||||
rd_hdr_histogram_record(hdr, input[i]);
|
||||
}
|
||||
|
||||
v = rd_hdr_histogram_quantile(hdr, 50);
|
||||
RD_UT_ASSERT(v == exp, "Median is %"PRId64", expected %"PRId64,
|
||||
v, exp);
|
||||
RD_UT_ASSERT(v == exp, "Median is %" PRId64 ", expected %" PRId64, v,
|
||||
exp);
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_quantile (void) {
|
||||
static int ut_quantile(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
size_t i;
|
||||
const struct {
|
||||
double q;
|
||||
double q;
|
||||
int64_t v;
|
||||
} exp[] = {
|
||||
{ 50, 500223 },
|
||||
{ 75, 750079 },
|
||||
{ 90, 900095 },
|
||||
{ 95, 950271 },
|
||||
{ 99, 990207 },
|
||||
{ 99.9, 999423 },
|
||||
{ 99.99, 999935 },
|
||||
{50, 500223}, {75, 750079}, {90, 900095}, {95, 950271},
|
||||
{99, 990207}, {99.9, 999423}, {99.99, 999935},
|
||||
};
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int r = rd_hdr_histogram_record(hdr, (int64_t)i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
|
||||
}
|
||||
|
||||
for (i = 0 ; i < RD_ARRAYSIZE(exp) ; i++) {
|
||||
for (i = 0; i < RD_ARRAYSIZE(exp); i++) {
|
||||
int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q);
|
||||
RD_UT_ASSERT(v == exp[i].v,
|
||||
"P%.2f is %"PRId64", expected %"PRId64,
|
||||
"P%.2f is %" PRId64 ", expected %" PRId64,
|
||||
exp[i].q, v, exp[i].v);
|
||||
}
|
||||
|
||||
|
@ -500,36 +495,36 @@ static int ut_quantile (void) {
|
|||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_mean (void) {
|
||||
static int ut_mean(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
size_t i;
|
||||
const double exp = 500000.013312;
|
||||
double v;
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int r = rd_hdr_histogram_record(hdr, (int64_t)i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
|
||||
}
|
||||
|
||||
v = rd_hdr_histogram_mean(hdr);
|
||||
RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001),
|
||||
"Mean is %f, expected %f", v, exp);
|
||||
RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), "Mean is %f, expected %f",
|
||||
v, exp);
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
|
||||
static int ut_stddev (void) {
|
||||
static int ut_stddev(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
size_t i;
|
||||
const double exp = 288675.140368;
|
||||
const double epsilon = 0.000001;
|
||||
const double exp = 288675.140368;
|
||||
const double epsilon = 0.000001;
|
||||
double v;
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int r = rd_hdr_histogram_record(hdr, (int64_t)i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", (int64_t)i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
|
||||
}
|
||||
|
||||
v = rd_hdr_histogram_stddev(hdr);
|
||||
|
@ -541,19 +536,19 @@ static int ut_stddev (void) {
|
|||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_totalcount (void) {
|
||||
static int ut_totalcount(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
int64_t i;
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int64_t v;
|
||||
int r = rd_hdr_histogram_record(hdr, i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
|
||||
|
||||
v = hdr->totalCount;
|
||||
RD_UT_ASSERT(v == i+1,
|
||||
"total_count is %"PRId64", expected %"PRId64,
|
||||
v, i+1);
|
||||
RD_UT_ASSERT(v == i + 1,
|
||||
"total_count is %" PRId64 ", expected %" PRId64, v,
|
||||
i + 1);
|
||||
}
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
|
@ -561,64 +556,61 @@ static int ut_totalcount (void) {
|
|||
}
|
||||
|
||||
|
||||
static int ut_max (void) {
|
||||
static int ut_max(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
int64_t i, v;
|
||||
const int64_t exp = 1000447;
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int r = rd_hdr_histogram_record(hdr, i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
|
||||
}
|
||||
|
||||
v = rd_hdr_histogram_max(hdr);
|
||||
RD_UT_ASSERT(v == exp,
|
||||
"Max is %"PRId64", expected %"PRId64, v, exp);
|
||||
RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp);
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_min (void) {
|
||||
static int ut_min(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
int64_t i, v;
|
||||
const int64_t exp = 0;
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int r = rd_hdr_histogram_record(hdr, i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
|
||||
}
|
||||
|
||||
v = rd_hdr_histogram_min(hdr);
|
||||
RD_UT_ASSERT(v == exp,
|
||||
"Min is %"PRId64", expected %"PRId64, v, exp);
|
||||
RD_UT_ASSERT(v == exp, "Min is %" PRId64 ", expected %" PRId64, v, exp);
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_reset (void) {
|
||||
static int ut_reset(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
|
||||
int64_t i, v;
|
||||
const int64_t exp = 0;
|
||||
|
||||
for (i = 0 ; i < 1000000 ; i++) {
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int r = rd_hdr_histogram_record(hdr, i);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", i);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
|
||||
}
|
||||
|
||||
rd_hdr_histogram_reset(hdr);
|
||||
|
||||
v = rd_hdr_histogram_max(hdr);
|
||||
RD_UT_ASSERT(v == exp,
|
||||
"Max is %"PRId64", expected %"PRId64, v, exp);
|
||||
RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp);
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
|
||||
static int ut_nan (void) {
|
||||
static int ut_nan(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 100000, 3);
|
||||
double v;
|
||||
|
||||
|
@ -632,13 +624,13 @@ static int ut_nan (void) {
|
|||
}
|
||||
|
||||
|
||||
static int ut_sigfigs (void) {
|
||||
static int ut_sigfigs(void) {
|
||||
int sigfigs;
|
||||
|
||||
for (sigfigs = 1 ; sigfigs <= 5 ; sigfigs++) {
|
||||
for (sigfigs = 1; sigfigs <= 5; sigfigs++) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10, sigfigs);
|
||||
RD_UT_ASSERT(hdr->significantFigures == sigfigs,
|
||||
"Significant figures is %"PRId64", expected %d",
|
||||
"Significant figures is %" PRId64 ", expected %d",
|
||||
hdr->significantFigures, sigfigs);
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
}
|
||||
|
@ -646,16 +638,16 @@ static int ut_sigfigs (void) {
|
|||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_minmax_trackable (void) {
|
||||
const int64_t minval = 2;
|
||||
const int64_t maxval = 11;
|
||||
static int ut_minmax_trackable(void) {
|
||||
const int64_t minval = 2;
|
||||
const int64_t maxval = 11;
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(minval, maxval, 3);
|
||||
|
||||
RD_UT_ASSERT(hdr->lowestTrackableValue == minval,
|
||||
"lowestTrackableValue is %"PRId64", expected %"PRId64,
|
||||
"lowestTrackableValue is %" PRId64 ", expected %" PRId64,
|
||||
hdr->lowestTrackableValue, minval);
|
||||
RD_UT_ASSERT(hdr->highestTrackableValue == maxval,
|
||||
"highestTrackableValue is %"PRId64", expected %"PRId64,
|
||||
"highestTrackableValue is %" PRId64 ", expected %" PRId64,
|
||||
hdr->highestTrackableValue, maxval);
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
|
@ -663,41 +655,41 @@ static int ut_minmax_trackable (void) {
|
|||
}
|
||||
|
||||
|
||||
static int ut_unitmagnitude_overflow (void) {
|
||||
static int ut_unitmagnitude_overflow(void) {
|
||||
rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(0, 200, 4);
|
||||
int r = rd_hdr_histogram_record(hdr, 11);
|
||||
int r = rd_hdr_histogram_record(hdr, 11);
|
||||
RD_UT_ASSERT(r, "record(11) failed\n");
|
||||
|
||||
rd_hdr_histogram_destroy(hdr);
|
||||
RD_UT_PASS();
|
||||
}
|
||||
|
||||
static int ut_subbucketmask_overflow (void) {
|
||||
static int ut_subbucketmask_overflow(void) {
|
||||
rd_hdr_histogram_t *hdr;
|
||||
const int64_t input[] = { (int64_t)1e8, (int64_t)2e7, (int64_t)3e7 };
|
||||
const int64_t input[] = {(int64_t)1e8, (int64_t)2e7, (int64_t)3e7};
|
||||
const struct {
|
||||
double q;
|
||||
double q;
|
||||
int64_t v;
|
||||
} exp[] = {
|
||||
{ 50, 33554431 },
|
||||
{ 83.33, 33554431 },
|
||||
{ 83.34, 100663295 },
|
||||
{ 99, 100663295 },
|
||||
{50, 33554431},
|
||||
{83.33, 33554431},
|
||||
{83.34, 100663295},
|
||||
{99, 100663295},
|
||||
};
|
||||
size_t i;
|
||||
|
||||
hdr = rd_hdr_histogram_new((int64_t)2e7, (int64_t)1e8, 5);
|
||||
|
||||
for (i = 0 ; i < RD_ARRAYSIZE(input) ; i++) {
|
||||
for (i = 0; i < RD_ARRAYSIZE(input); i++) {
|
||||
/* Ignore errors (some should fail) */
|
||||
int r = rd_hdr_histogram_record(hdr, input[i]);
|
||||
RD_UT_ASSERT(r, "record(%"PRId64") failed\n", input[i]);
|
||||
RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", input[i]);
|
||||
}
|
||||
|
||||
for (i = 0 ; i < RD_ARRAYSIZE(exp) ; i++) {
|
||||
for (i = 0; i < RD_ARRAYSIZE(exp); i++) {
|
||||
int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q);
|
||||
RD_UT_ASSERT(v == exp[i].v,
|
||||
"P%.2f is %"PRId64", expected %"PRId64,
|
||||
"P%.2f is %" PRId64 ", expected %" PRId64,
|
||||
exp[i].q, v, exp[i].v);
|
||||
}
|
||||
|
||||
|
@ -706,7 +698,7 @@ static int ut_subbucketmask_overflow (void) {
|
|||
}
|
||||
|
||||
|
||||
int unittest_rdhdrhistogram (void) {
|
||||
int unittest_rdhdrhistogram(void) {
|
||||
int fails = 0;
|
||||
|
||||
fails += ut_high_sigfig();
|
||||
|
|
|
@ -32,33 +32,33 @@
|
|||
|
||||
|
||||
typedef struct rd_hdr_histogram_s {
|
||||
int64_t lowestTrackableValue;
|
||||
int64_t highestTrackableValue;
|
||||
int64_t unitMagnitude;
|
||||
int64_t significantFigures;
|
||||
int32_t subBucketHalfCountMagnitude;
|
||||
int32_t subBucketHalfCount;
|
||||
int64_t subBucketMask;
|
||||
int32_t subBucketCount;
|
||||
int32_t bucketCount;
|
||||
int32_t countsLen;
|
||||
int64_t totalCount;
|
||||
int64_t lowestTrackableValue;
|
||||
int64_t highestTrackableValue;
|
||||
int64_t unitMagnitude;
|
||||
int64_t significantFigures;
|
||||
int32_t subBucketHalfCountMagnitude;
|
||||
int32_t subBucketHalfCount;
|
||||
int64_t subBucketMask;
|
||||
int32_t subBucketCount;
|
||||
int32_t bucketCount;
|
||||
int32_t countsLen;
|
||||
int64_t totalCount;
|
||||
int64_t *counts;
|
||||
int64_t outOfRangeCount; /**< Number of rejected records due to
|
||||
* value being out of range. */
|
||||
int64_t lowestOutOfRange; /**< Lowest value that was out of range.
|
||||
* Initialized to lowestTrackableValue */
|
||||
int64_t highestOutOfRange; /**< Highest value that was out of range.
|
||||
* Initialized to highestTrackableValue */
|
||||
int32_t allocatedSize; /**< Allocated size of histogram, for
|
||||
* sigfigs tuning. */
|
||||
int64_t outOfRangeCount; /**< Number of rejected records due to
|
||||
* value being out of range. */
|
||||
int64_t lowestOutOfRange; /**< Lowest value that was out of range.
|
||||
* Initialized to lowestTrackableValue */
|
||||
int64_t highestOutOfRange; /**< Highest value that was out of range.
|
||||
* Initialized to highestTrackableValue */
|
||||
int32_t allocatedSize; /**< Allocated size of histogram, for
|
||||
* sigfigs tuning. */
|
||||
} rd_hdr_histogram_t;
|
||||
|
||||
|
||||
#endif /* !_RDHDR_HISTOGRAM_H_ */
|
||||
|
||||
|
||||
void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr);
|
||||
void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr);
|
||||
|
||||
/**
|
||||
* @brief Create a new Hdr_Histogram.
|
||||
|
@ -69,18 +69,19 @@ void rd_hdr_histogram_destroy (rd_hdr_histogram_t *hdr);
|
|||
*
|
||||
* @sa rd_hdr_histogram_destroy()
|
||||
*/
|
||||
rd_hdr_histogram_t *rd_hdr_histogram_new (int64_t minValue, int64_t maxValue,
|
||||
int significantFigures);
|
||||
rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue,
|
||||
int64_t maxValue,
|
||||
int significantFigures);
|
||||
|
||||
void rd_hdr_histogram_reset (rd_hdr_histogram_t *hdr);
|
||||
void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr);
|
||||
|
||||
int rd_hdr_histogram_record (rd_hdr_histogram_t *hdr, int64_t v);
|
||||
int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v);
|
||||
|
||||
double rd_hdr_histogram_stddev (rd_hdr_histogram_t *hdr);
|
||||
double rd_hdr_histogram_mean (const rd_hdr_histogram_t *hdr);
|
||||
int64_t rd_hdr_histogram_max (const rd_hdr_histogram_t *hdr);
|
||||
int64_t rd_hdr_histogram_min (const rd_hdr_histogram_t *hdr);
|
||||
int64_t rd_hdr_histogram_quantile (const rd_hdr_histogram_t *hdr, double q);
|
||||
double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr);
|
||||
double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr);
|
||||
int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr);
|
||||
int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr);
|
||||
int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q);
|
||||
|
||||
|
||||
int unittest_rdhdrhistogram (void);
|
||||
int unittest_rdhdrhistogram(void);
|
||||
|
|
88
src/rdhttp.c
88
src/rdhttp.c
|
@ -41,16 +41,16 @@
|
|||
#include "rdhttp.h"
|
||||
|
||||
/** Maximum response size, increase as necessary. */
|
||||
#define RD_HTTP_RESPONSE_SIZE_MAX 1024*1024*500 /* 500kb */
|
||||
#define RD_HTTP_RESPONSE_SIZE_MAX 1024 * 1024 * 500 /* 500kb */
|
||||
|
||||
|
||||
void rd_http_error_destroy (rd_http_error_t *herr) {
|
||||
void rd_http_error_destroy(rd_http_error_t *herr) {
|
||||
rd_free(herr);
|
||||
}
|
||||
|
||||
static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...)
|
||||
RD_FORMAT(printf, 2, 3);
|
||||
static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) {
|
||||
static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...)
|
||||
RD_FORMAT(printf, 2, 3);
|
||||
static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) {
|
||||
size_t len = 0;
|
||||
rd_http_error_t *herr;
|
||||
va_list ap;
|
||||
|
@ -65,8 +65,8 @@ static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) {
|
|||
}
|
||||
|
||||
/* Use single allocation for both herr and the error string */
|
||||
herr = rd_malloc(sizeof(*herr) + len + 1);
|
||||
herr->code = code;
|
||||
herr = rd_malloc(sizeof(*herr) + len + 1);
|
||||
herr->code = code;
|
||||
herr->errstr = herr->data;
|
||||
|
||||
if (len > 0)
|
||||
|
@ -83,21 +83,20 @@ static rd_http_error_t *rd_http_error_new (int code, const char *fmt, ...) {
|
|||
* @brief Same as rd_http_error_new() but reads the error string from the
|
||||
* provided buffer.
|
||||
*/
|
||||
static rd_http_error_t *rd_http_error_new_from_buf (int code,
|
||||
const rd_buf_t *rbuf) {
|
||||
static rd_http_error_t *rd_http_error_new_from_buf(int code,
|
||||
const rd_buf_t *rbuf) {
|
||||
rd_http_error_t *herr;
|
||||
rd_slice_t slice;
|
||||
size_t len = rd_buf_len(rbuf);
|
||||
|
||||
if (len == 0)
|
||||
return rd_http_error_new(
|
||||
code,
|
||||
"Server did not provide an error string");
|
||||
code, "Server did not provide an error string");
|
||||
|
||||
|
||||
/* Use single allocation for both herr and the error string */
|
||||
herr = rd_malloc(sizeof(*herr) + len + 1);
|
||||
herr->code = code;
|
||||
herr = rd_malloc(sizeof(*herr) + len + 1);
|
||||
herr->code = code;
|
||||
herr->errstr = herr->data;
|
||||
rd_slice_init_full(&slice, rbuf);
|
||||
rd_slice_read(&slice, herr->errstr, len);
|
||||
|
@ -106,7 +105,7 @@ static rd_http_error_t *rd_http_error_new_from_buf (int code,
|
|||
return herr;
|
||||
}
|
||||
|
||||
void rd_http_req_destroy (rd_http_req_t *hreq) {
|
||||
void rd_http_req_destroy(rd_http_req_t *hreq) {
|
||||
RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup);
|
||||
RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy);
|
||||
}
|
||||
|
@ -116,8 +115,8 @@ void rd_http_req_destroy (rd_http_req_t *hreq) {
|
|||
* @brief Curl writefunction. Writes the bytes passed from curl
|
||||
* to the hreq's buffer.
|
||||
*/
|
||||
static size_t rd_http_req_write_cb (char *ptr, size_t size, size_t nmemb,
|
||||
void *userdata) {
|
||||
static size_t
|
||||
rd_http_req_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) {
|
||||
rd_http_req_t *hreq = (rd_http_req_t *)userdata;
|
||||
|
||||
if (unlikely(rd_buf_len(hreq->hreq_buf) + nmemb >
|
||||
|
@ -129,7 +128,7 @@ static size_t rd_http_req_write_cb (char *ptr, size_t size, size_t nmemb,
|
|||
return nmemb;
|
||||
}
|
||||
|
||||
rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url) {
|
||||
rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) {
|
||||
|
||||
memset(hreq, 0, sizeof(*hreq));
|
||||
|
||||
|
@ -157,7 +156,7 @@ rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url) {
|
|||
/**
|
||||
* @brief Synchronously (blockingly) perform the HTTP operation.
|
||||
*/
|
||||
rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq) {
|
||||
rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq) {
|
||||
CURLcode res;
|
||||
long code = 0;
|
||||
|
||||
|
@ -175,11 +174,11 @@ rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq) {
|
|||
}
|
||||
|
||||
|
||||
int rd_http_req_get_code (const rd_http_req_t *hreq) {
|
||||
int rd_http_req_get_code(const rd_http_req_t *hreq) {
|
||||
return hreq->hreq_code;
|
||||
}
|
||||
|
||||
const char *rd_http_req_get_content_type (rd_http_req_t *hreq) {
|
||||
const char *rd_http_req_get_content_type(rd_http_req_t *hreq) {
|
||||
const char *content_type = NULL;
|
||||
|
||||
if (curl_easy_getinfo(hreq->hreq_curl, CURLINFO_CONTENT_TYPE,
|
||||
|
@ -201,7 +200,7 @@ const char *rd_http_req_get_content_type (rd_http_req_t *hreq) {
|
|||
* by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp
|
||||
* may be filled with the error response.
|
||||
*/
|
||||
rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) {
|
||||
rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) {
|
||||
rd_http_req_t hreq;
|
||||
rd_http_error_t *herr;
|
||||
|
||||
|
@ -217,7 +216,7 @@ rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) {
|
|||
return herr;
|
||||
}
|
||||
|
||||
*rbufp = hreq.hreq_buf;
|
||||
*rbufp = hreq.hreq_buf;
|
||||
hreq.hreq_buf = NULL;
|
||||
|
||||
return NULL;
|
||||
|
@ -230,7 +229,7 @@ rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp) {
|
|||
*
|
||||
* Same error semantics as rd_http_get().
|
||||
*/
|
||||
rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) {
|
||||
rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) {
|
||||
rd_http_req_t hreq;
|
||||
rd_http_error_t *herr;
|
||||
rd_slice_t slice;
|
||||
|
@ -248,7 +247,7 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) {
|
|||
// FIXME: send Accept: json.. header?
|
||||
|
||||
herr = rd_http_req_perform_sync(&hreq);
|
||||
len = rd_buf_len(hreq.hreq_buf);
|
||||
len = rd_buf_len(hreq.hreq_buf);
|
||||
if (herr && len == 0) {
|
||||
rd_http_req_destroy(&hreq);
|
||||
return herr;
|
||||
|
@ -263,14 +262,12 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) {
|
|||
|
||||
content_type = rd_http_req_get_content_type(&hreq);
|
||||
|
||||
if (!content_type ||
|
||||
rd_strncasecmp(content_type,
|
||||
"application/json", strlen("application/json"))) {
|
||||
if (!content_type || rd_strncasecmp(content_type, "application/json",
|
||||
strlen("application/json"))) {
|
||||
if (!herr)
|
||||
herr = rd_http_error_new(
|
||||
hreq.hreq_code,
|
||||
"Response is not JSON encoded: %s",
|
||||
content_type ? content_type : "(n/a)");
|
||||
hreq.hreq_code, "Response is not JSON encoded: %s",
|
||||
content_type ? content_type : "(n/a)");
|
||||
rd_http_req_destroy(&hreq);
|
||||
return herr;
|
||||
}
|
||||
|
@ -282,12 +279,12 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) {
|
|||
raw_json[len] = '\0';
|
||||
|
||||
/* Parse JSON */
|
||||
end = NULL;
|
||||
end = NULL;
|
||||
*jsonp = cJSON_ParseWithOpts(raw_json, &end, 0);
|
||||
if (!*jsonp && !herr)
|
||||
herr = rd_http_error_new(hreq.hreq_code,
|
||||
"Failed to parse JSON response "
|
||||
"at %"PRIusz"/%"PRIusz,
|
||||
"at %" PRIusz "/%" PRIusz,
|
||||
(size_t)(end - raw_json), len);
|
||||
|
||||
rd_free(raw_json);
|
||||
|
@ -297,7 +294,7 @@ rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp) {
|
|||
}
|
||||
|
||||
|
||||
void rd_http_global_init (void) {
|
||||
void rd_http_global_init(void) {
|
||||
curl_global_init(CURL_GLOBAL_DEFAULT);
|
||||
}
|
||||
|
||||
|
@ -311,7 +308,7 @@ void rd_http_global_init (void) {
|
|||
* and 4xx response on $RD_UT_HTTP_URL/error (with whatever type of body).
|
||||
*/
|
||||
|
||||
int unittest_http (void) {
|
||||
int unittest_http(void) {
|
||||
const char *base_url = rd_getenv("RD_UT_HTTP_URL", NULL);
|
||||
char *error_url;
|
||||
size_t error_url_size;
|
||||
|
@ -325,7 +322,7 @@ int unittest_http (void) {
|
|||
RD_UT_BEGIN();
|
||||
|
||||
error_url_size = strlen(base_url) + strlen("/error") + 1;
|
||||
error_url = rd_alloca(error_url_size);
|
||||
error_url = rd_alloca(error_url_size);
|
||||
rd_snprintf(error_url, error_url_size, "%s/error", base_url);
|
||||
|
||||
/* Try the base url first, parse its JSON and extract a key-value. */
|
||||
|
@ -341,9 +338,10 @@ int unittest_http (void) {
|
|||
}
|
||||
RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s",
|
||||
base_url);
|
||||
RD_UT_SAY("URL %s returned no error and a non-empty "
|
||||
"JSON object/array as expected",
|
||||
base_url);
|
||||
RD_UT_SAY(
|
||||
"URL %s returned no error and a non-empty "
|
||||
"JSON object/array as expected",
|
||||
base_url);
|
||||
cJSON_Delete(json);
|
||||
|
||||
|
||||
|
@ -351,12 +349,14 @@ int unittest_http (void) {
|
|||
json = NULL;
|
||||
herr = rd_http_get_json(error_url, &json);
|
||||
RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url);
|
||||
RD_UT_ASSERT(herr->code >= 400, "Expected get_json(%s) error code >= "
|
||||
"400, got %d", error_url, herr->code);
|
||||
RD_UT_SAY("Error URL %s returned code %d, errstr \"%s\" "
|
||||
"and %s JSON object as expected",
|
||||
error_url, herr->code, herr->errstr,
|
||||
json ? "a" : "no");
|
||||
RD_UT_ASSERT(herr->code >= 400,
|
||||
"Expected get_json(%s) error code >= "
|
||||
"400, got %d",
|
||||
error_url, herr->code);
|
||||
RD_UT_SAY(
|
||||
"Error URL %s returned code %d, errstr \"%s\" "
|
||||
"and %s JSON object as expected",
|
||||
error_url, herr->code, herr->errstr, json ? "a" : "no");
|
||||
/* Check if there's a JSON document returned */
|
||||
if (json)
|
||||
cJSON_Delete(json);
|
||||
|
|
23
src/rdhttp.h
23
src/rdhttp.h
|
@ -37,16 +37,15 @@
|
|||
typedef struct rd_http_error_s {
|
||||
int code;
|
||||
char *errstr;
|
||||
char data[1]; /**< This is where the error string begins. */
|
||||
char data[1]; /**< This is where the error string begins. */
|
||||
} rd_http_error_t;
|
||||
|
||||
void rd_http_error_destroy (rd_http_error_t *herr);
|
||||
void rd_http_error_destroy(rd_http_error_t *herr);
|
||||
|
||||
rd_http_error_t *rd_http_get (const char *url, rd_buf_t **rbufp);
|
||||
rd_http_error_t *rd_http_get_json (const char *url, cJSON **jsonp);
|
||||
|
||||
void rd_http_global_init (void);
|
||||
rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp);
|
||||
rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp);
|
||||
|
||||
void rd_http_global_init(void);
|
||||
|
||||
|
||||
|
||||
|
@ -56,16 +55,16 @@ void rd_http_global_init (void);
|
|||
|
||||
|
||||
typedef struct rd_http_req_s {
|
||||
CURL *hreq_curl; /**< CURL handle */
|
||||
rd_buf_t *hreq_buf; /**< Response buffer */
|
||||
int hreq_code; /**< HTTP response code */
|
||||
CURL *hreq_curl; /**< CURL handle */
|
||||
rd_buf_t *hreq_buf; /**< Response buffer */
|
||||
int hreq_code; /**< HTTP response code */
|
||||
char hreq_curl_errstr[CURL_ERROR_SIZE]; /**< Error string for curl to
|
||||
* write to. */
|
||||
} rd_http_req_t;
|
||||
|
||||
static void rd_http_req_destroy (rd_http_req_t *hreq);
|
||||
rd_http_error_t *rd_http_req_init (rd_http_req_t *hreq, const char *url);
|
||||
rd_http_error_t *rd_http_req_perform_sync (rd_http_req_t *hreq);
|
||||
static void rd_http_req_destroy(rd_http_req_t *hreq);
|
||||
rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url);
|
||||
rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq);
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -32,13 +32,13 @@
|
|||
#include "rd.h"
|
||||
|
||||
typedef struct rd_interval_s {
|
||||
rd_ts_t ri_ts_last; /* last interval timestamp */
|
||||
rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */
|
||||
int ri_backoff; /* back off the next interval by this much */
|
||||
rd_ts_t ri_ts_last; /* last interval timestamp */
|
||||
rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */
|
||||
int ri_backoff; /* back off the next interval by this much */
|
||||
} rd_interval_t;
|
||||
|
||||
|
||||
static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) {
|
||||
static RD_INLINE RD_UNUSED void rd_interval_init(rd_interval_t *ri) {
|
||||
memset(ri, 0, sizeof(*ri));
|
||||
}
|
||||
|
||||
|
@ -60,13 +60,13 @@ static RD_INLINE RD_UNUSED void rd_interval_init (rd_interval_t *ri) {
|
|||
* will be returned immediately even though the initial interval has not
|
||||
* passed.
|
||||
*/
|
||||
#define rd_interval(ri,interval_us,now) rd_interval0(ri,interval_us,now,0)
|
||||
#define rd_interval_immediate(ri,interval_us,now) \
|
||||
rd_interval0(ri,interval_us,now,1)
|
||||
static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri,
|
||||
rd_ts_t interval_us,
|
||||
rd_ts_t now,
|
||||
int immediate) {
|
||||
#define rd_interval(ri, interval_us, now) rd_interval0(ri, interval_us, now, 0)
|
||||
#define rd_interval_immediate(ri, interval_us, now) \
|
||||
rd_interval0(ri, interval_us, now, 1)
|
||||
static RD_INLINE RD_UNUSED rd_ts_t rd_interval0(rd_interval_t *ri,
|
||||
rd_ts_t interval_us,
|
||||
rd_ts_t now,
|
||||
int immediate) {
|
||||
rd_ts_t diff;
|
||||
|
||||
if (!now)
|
||||
|
@ -91,7 +91,7 @@ static RD_INLINE RD_UNUSED rd_ts_t rd_interval0 (rd_interval_t *ri,
|
|||
* Reset the interval to zero, i.e., the next call to rd_interval()
|
||||
* will be immediate.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_interval_reset (rd_interval_t *ri) {
|
||||
static RD_INLINE RD_UNUSED void rd_interval_reset(rd_interval_t *ri) {
|
||||
ri->ri_ts_last = 0;
|
||||
ri->ri_backoff = 0;
|
||||
}
|
||||
|
@ -100,8 +100,8 @@ static RD_INLINE RD_UNUSED void rd_interval_reset (rd_interval_t *ri) {
|
|||
* Reset the interval to 'now'. If now is 0, the time will be gathered
|
||||
* automatically.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_interval_reset_to_now (rd_interval_t *ri,
|
||||
rd_ts_t now) {
|
||||
static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri,
|
||||
rd_ts_t now) {
|
||||
if (!now)
|
||||
now = rd_clock();
|
||||
|
||||
|
@ -112,7 +112,7 @@ static RD_INLINE RD_UNUSED void rd_interval_reset_to_now (rd_interval_t *ri,
|
|||
/**
|
||||
* Back off the next interval by `backoff_us` microseconds.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri,
|
||||
static RD_INLINE RD_UNUSED void rd_interval_backoff(rd_interval_t *ri,
|
||||
int backoff_us) {
|
||||
ri->ri_backoff = backoff_us;
|
||||
}
|
||||
|
@ -122,19 +122,19 @@ static RD_INLINE RD_UNUSED void rd_interval_backoff (rd_interval_t *ri,
|
|||
* If `expedite_us` is 0 the interval will be set to trigger
|
||||
* immedately on the next rd_interval() call.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_interval_expedite (rd_interval_t *ri,
|
||||
int expedite_us) {
|
||||
if (!expedite_us)
|
||||
ri->ri_ts_last = 0;
|
||||
else
|
||||
ri->ri_backoff = -expedite_us;
|
||||
static RD_INLINE RD_UNUSED void rd_interval_expedite(rd_interval_t *ri,
|
||||
int expedite_us) {
|
||||
if (!expedite_us)
|
||||
ri->ri_ts_last = 0;
|
||||
else
|
||||
ri->ri_backoff = -expedite_us;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies a fixed interval to use if rd_interval() is called with
|
||||
* `interval_us` set to 0.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri,
|
||||
static RD_INLINE RD_UNUSED void rd_interval_fixed(rd_interval_t *ri,
|
||||
rd_ts_t fixed_us) {
|
||||
ri->ri_fixed = fixed_us;
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ static RD_INLINE RD_UNUSED void rd_interval_fixed (rd_interval_t *ri,
|
|||
* A disabled interval will never return a positive value from
|
||||
* rd_interval().
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) {
|
||||
static RD_INLINE RD_UNUSED void rd_interval_disable(rd_interval_t *ri) {
|
||||
/* Set last beat to a large value a long time in the future. */
|
||||
ri->ri_ts_last = 6000000000000000000LL; /* in about 190000 years */
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ static RD_INLINE RD_UNUSED void rd_interval_disable (rd_interval_t *ri) {
|
|||
/**
|
||||
* Returns true if the interval is disabled.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED int rd_interval_disabled (const rd_interval_t *ri) {
|
||||
static RD_INLINE RD_UNUSED int rd_interval_disabled(const rd_interval_t *ri) {
|
||||
return ri->ri_ts_last == 6000000000000000000LL;
|
||||
}
|
||||
|
||||
|
|
3689
src/rdkafka.c
3689
src/rdkafka.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
2389
src/rdkafka.h
2389
src/rdkafka.h
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
1780
src/rdkafka_admin.c
1780
src/rdkafka_admin.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -42,17 +42,17 @@
|
|||
* to make sure it is copied properly.
|
||||
*/
|
||||
struct rd_kafka_AdminOptions_s {
|
||||
rd_kafka_admin_op_t for_api; /**< Limit allowed options to
|
||||
* this API (optional) */
|
||||
rd_kafka_admin_op_t for_api; /**< Limit allowed options to
|
||||
* this API (optional) */
|
||||
|
||||
/* Generic */
|
||||
rd_kafka_confval_t request_timeout;/**< I32: Full request timeout,
|
||||
* includes looking up leader
|
||||
* broker,
|
||||
* waiting for req/response,
|
||||
* etc. */
|
||||
rd_ts_t abs_timeout; /**< Absolute timeout calculated
|
||||
* from .timeout */
|
||||
rd_kafka_confval_t request_timeout; /**< I32: Full request timeout,
|
||||
* includes looking up leader
|
||||
* broker,
|
||||
* waiting for req/response,
|
||||
* etc. */
|
||||
rd_ts_t abs_timeout; /**< Absolute timeout calculated
|
||||
* from .timeout */
|
||||
|
||||
/* Specific for one or more APIs */
|
||||
rd_kafka_confval_t operation_timeout; /**< I32: Timeout on broker.
|
||||
|
@ -62,30 +62,30 @@ struct rd_kafka_AdminOptions_s {
|
|||
* DeleteRecords
|
||||
* DeleteTopics
|
||||
*/
|
||||
rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker),
|
||||
* but don't perform action.
|
||||
* Valid for:
|
||||
* CreateTopics
|
||||
* CreatePartitions
|
||||
* AlterConfigs
|
||||
*/
|
||||
rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker),
|
||||
* but don't perform action.
|
||||
* Valid for:
|
||||
* CreateTopics
|
||||
* CreatePartitions
|
||||
* AlterConfigs
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than
|
||||
* absolute application
|
||||
* of config.
|
||||
* Valid for:
|
||||
* AlterConfigs
|
||||
*/
|
||||
rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than
|
||||
* absolute application
|
||||
* of config.
|
||||
* Valid for:
|
||||
* AlterConfigs
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t broker; /**< INT: Explicitly override
|
||||
* broker id to send
|
||||
* requests to.
|
||||
* Valid for:
|
||||
* all
|
||||
*/
|
||||
rd_kafka_confval_t broker; /**< INT: Explicitly override
|
||||
* broker id to send
|
||||
* requests to.
|
||||
* Valid for:
|
||||
* all
|
||||
*/
|
||||
|
||||
rd_kafka_confval_t opaque; /**< PTR: Application opaque.
|
||||
* Valid for all. */
|
||||
rd_kafka_confval_t opaque; /**< PTR: Application opaque.
|
||||
* Valid for all. */
|
||||
};
|
||||
|
||||
|
||||
|
@ -104,11 +104,11 @@ struct rd_kafka_NewTopic_s {
|
|||
int replication_factor; /**< Replication factor */
|
||||
|
||||
/* Optional */
|
||||
rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
|
||||
* Array of replica lists indexed by
|
||||
* partition, size num_partitions. */
|
||||
rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *):
|
||||
* List of configuration entries */
|
||||
rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
|
||||
* Array of replica lists indexed by
|
||||
* partition, size num_partitions. */
|
||||
rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *):
|
||||
* List of configuration entries */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
@ -123,13 +123,13 @@ struct rd_kafka_NewTopic_s {
|
|||
* @brief DeleteTopics result
|
||||
*/
|
||||
struct rd_kafka_DeleteTopics_result_s {
|
||||
rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
|
||||
rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_DeleteTopic_s {
|
||||
char *topic; /**< Points to data */
|
||||
char data[1]; /**< The topic name is allocated along with
|
||||
* the struct here. */
|
||||
char *topic; /**< Points to data */
|
||||
char data[1]; /**< The topic name is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
@ -146,7 +146,7 @@ struct rd_kafka_DeleteTopic_s {
|
|||
* @brief CreatePartitions result
|
||||
*/
|
||||
struct rd_kafka_CreatePartitions_result_s {
|
||||
rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
|
||||
rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_NewPartitions_s {
|
||||
|
@ -154,15 +154,15 @@ struct rd_kafka_NewPartitions_s {
|
|||
size_t total_cnt; /**< New total partition count */
|
||||
|
||||
/* Optional */
|
||||
rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
|
||||
* Array of replica lists indexed by
|
||||
* new partition relative index.
|
||||
* Size is dynamic since we don't
|
||||
* know how many partitions are actually
|
||||
* being added by total_cnt */
|
||||
rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
|
||||
* Array of replica lists indexed by
|
||||
* new partition relative index.
|
||||
* Size is dynamic since we don't
|
||||
* know how many partitions are actually
|
||||
* being added by total_cnt */
|
||||
|
||||
char data[1]; /**< The topic name is allocated along with
|
||||
* the struct here. */
|
||||
char data[1]; /**< The topic name is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
@ -176,27 +176,27 @@ struct rd_kafka_NewPartitions_s {
|
|||
|
||||
/* KIP-248 */
|
||||
typedef enum rd_kafka_AlterOperation_t {
|
||||
RD_KAFKA_ALTER_OP_ADD = 0,
|
||||
RD_KAFKA_ALTER_OP_SET = 1,
|
||||
RD_KAFKA_ALTER_OP_ADD = 0,
|
||||
RD_KAFKA_ALTER_OP_SET = 1,
|
||||
RD_KAFKA_ALTER_OP_DELETE = 2,
|
||||
} rd_kafka_AlterOperation_t;
|
||||
|
||||
struct rd_kafka_ConfigEntry_s {
|
||||
rd_strtup_t *kv; /**< Name/Value pair */
|
||||
rd_strtup_t *kv; /**< Name/Value pair */
|
||||
|
||||
/* Response */
|
||||
|
||||
/* Attributes: this is a struct for easy copying */
|
||||
struct {
|
||||
rd_kafka_AlterOperation_t operation; /**< Operation */
|
||||
rd_kafka_ConfigSource_t source; /**< Config source */
|
||||
rd_bool_t is_readonly; /**< Value is read-only (on broker) */
|
||||
rd_bool_t is_default; /**< Value is at its default */
|
||||
rd_bool_t is_sensitive; /**< Value is sensitive */
|
||||
rd_bool_t is_synonym; /**< Value is synonym */
|
||||
rd_kafka_ConfigSource_t source; /**< Config source */
|
||||
rd_bool_t is_readonly; /**< Value is read-only (on broker) */
|
||||
rd_bool_t is_default; /**< Value is at its default */
|
||||
rd_bool_t is_sensitive; /**< Value is sensitive */
|
||||
rd_bool_t is_synonym; /**< Value is synonym */
|
||||
} a;
|
||||
|
||||
rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */
|
||||
rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -214,16 +214,15 @@ struct rd_kafka_ConfigResource_s {
|
|||
* List of config props */
|
||||
|
||||
/* Response */
|
||||
rd_kafka_resp_err_t err; /**< Response error code */
|
||||
char *errstr; /**< Response error string */
|
||||
rd_kafka_resp_err_t err; /**< Response error code */
|
||||
char *errstr; /**< Response error string */
|
||||
|
||||
char data[1]; /**< The name is allocated along with
|
||||
* the struct here. */
|
||||
char data[1]; /**< The name is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/**@}*/
|
||||
|
||||
/**
|
||||
|
@ -233,15 +232,14 @@ struct rd_kafka_ConfigResource_s {
|
|||
|
||||
|
||||
|
||||
|
||||
struct rd_kafka_AlterConfigs_result_s {
|
||||
rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_ConfigResource_result_s {
|
||||
rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *):
|
||||
* List of config resources, sans config
|
||||
* but with response error values. */
|
||||
rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *):
|
||||
* List of config resources, sans config
|
||||
* but with response error values. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
@ -254,7 +252,7 @@ struct rd_kafka_ConfigResource_result_s {
|
|||
*/
|
||||
|
||||
struct rd_kafka_DescribeConfigs_result_s {
|
||||
rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
@ -267,9 +265,9 @@ struct rd_kafka_DescribeConfigs_result_s {
|
|||
|
||||
|
||||
struct rd_kafka_DeleteGroup_s {
|
||||
char *group; /**< Points to data */
|
||||
char data[1]; /**< The group name is allocated along with
|
||||
* the struct here. */
|
||||
char *group; /**< Points to data */
|
||||
char data[1]; /**< The group name is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
@ -296,14 +294,14 @@ struct rd_kafka_DeleteRecords_s {
|
|||
* @brief DeleteConsumerGroupOffsets result
|
||||
*/
|
||||
struct rd_kafka_DeleteConsumerGroupOffsets_result_s {
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
|
||||
};
|
||||
|
||||
struct rd_kafka_DeleteConsumerGroupOffsets_s {
|
||||
char *group; /**< Points to data */
|
||||
char *group; /**< Points to data */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< The group name is allocated along with
|
||||
* the struct here. */
|
||||
char data[1]; /**< The group name is allocated along with
|
||||
* the struct here. */
|
||||
};
|
||||
|
||||
/**@}*/
|
||||
|
|
|
@ -99,27 +99,23 @@
|
|||
#include "rdkafka_request.h"
|
||||
|
||||
|
||||
static void rd_kafka_assignment_dump (rd_kafka_t *rk) {
|
||||
static void rd_kafka_assignment_dump(rd_kafka_t *rk) {
|
||||
rd_kafka_dbg(rk, CGRP, "DUMP",
|
||||
"Assignment dump (started_cnt=%d, wait_stop_cnt=%d)",
|
||||
rk->rk_consumer.assignment.started_cnt,
|
||||
rk->rk_consumer.assignment.wait_stop_cnt);
|
||||
|
||||
rd_kafka_topic_partition_list_log(
|
||||
rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.all);
|
||||
rd_kafka_topic_partition_list_log(rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.all);
|
||||
|
||||
rd_kafka_topic_partition_list_log(
|
||||
rk, "DUMP_PND", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.pending);
|
||||
rd_kafka_topic_partition_list_log(rk, "DUMP_PND", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.pending);
|
||||
|
||||
rd_kafka_topic_partition_list_log(
|
||||
rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.queried);
|
||||
rd_kafka_topic_partition_list_log(rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.queried);
|
||||
|
||||
rd_kafka_topic_partition_list_log(
|
||||
rk, "DUMP_REM", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.removed);
|
||||
rd_kafka_topic_partition_list_log(rk, "DUMP_REM", RD_KAFKA_DBG_CGRP,
|
||||
rk->rk_consumer.assignment.removed);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,20 +129,21 @@ static void rd_kafka_assignment_dump (rd_kafka_t *rk) {
|
|||
* Called from the FetchOffsets response handler below.
|
||||
*/
|
||||
static void
|
||||
rd_kafka_assignment_apply_offsets (rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *offsets,
|
||||
rd_kafka_resp_err_t err) {
|
||||
rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *offsets,
|
||||
rd_kafka_resp_err_t err) {
|
||||
rd_kafka_topic_partition_t *rktpar;
|
||||
|
||||
RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) {
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* May be NULL */
|
||||
|
||||
if (!rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.queried,
|
||||
rktpar->topic, rktpar->partition)) {
|
||||
rk->rk_consumer.assignment.queried, rktpar->topic,
|
||||
rktpar->partition)) {
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
|
||||
"Ignoring OffsetFetch "
|
||||
"response for %s [%"PRId32"] which is no "
|
||||
"response for %s [%" PRId32
|
||||
"] which is no "
|
||||
"longer in the queried list "
|
||||
"(possibly unassigned?)",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
@ -164,27 +161,24 @@ rd_kafka_assignment_apply_offsets (rd_kafka_t *rk,
|
|||
* later handling by the assignment state machine. */
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
|
||||
"Adding %s [%"PRId32"] back to pending "
|
||||
"Adding %s [%" PRId32
|
||||
"] back to pending "
|
||||
"list because on-going transaction is "
|
||||
"blocking offset retrieval",
|
||||
rktpar->topic,
|
||||
rktpar->partition);
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
rd_kafka_topic_partition_list_add_copy(
|
||||
rk->rk_consumer.assignment.pending, rktpar);
|
||||
rk->rk_consumer.assignment.pending, rktpar);
|
||||
|
||||
} else if (rktpar->err) {
|
||||
/* Partition-level error */
|
||||
rd_kafka_consumer_err(
|
||||
rk->rk_consumer.q, RD_KAFKA_NODEID_UA,
|
||||
rktpar->err, 0,
|
||||
rktpar->topic, rktp,
|
||||
RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offset for "
|
||||
"group \"%s\" topic %s [%"PRId32"]: %s",
|
||||
rk->rk_group_id->str,
|
||||
rktpar->topic, rktpar->partition,
|
||||
rd_kafka_err2str(rktpar->err));
|
||||
rk->rk_consumer.q, RD_KAFKA_NODEID_UA, rktpar->err,
|
||||
0, rktpar->topic, rktp, RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offset for "
|
||||
"group \"%s\" topic %s [%" PRId32 "]: %s",
|
||||
rk->rk_group_id->str, rktpar->topic,
|
||||
rktpar->partition, rd_kafka_err2str(rktpar->err));
|
||||
|
||||
/* The partition will not be added back to .pending
|
||||
* and thus only reside on .all until the application
|
||||
|
@ -201,17 +195,16 @@ rd_kafka_assignment_apply_offsets (rd_kafka_t *rk,
|
|||
/* Add partition to pending list where serve()
|
||||
* will start the fetcher. */
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
|
||||
"Adding %s [%"PRId32"] back to pending "
|
||||
"Adding %s [%" PRId32
|
||||
"] back to pending "
|
||||
"list with offset %s",
|
||||
rktpar->topic,
|
||||
rktpar->partition,
|
||||
rktpar->topic, rktpar->partition,
|
||||
rd_kafka_offset2str(rktpar->offset));
|
||||
|
||||
rd_kafka_topic_partition_list_add_copy(
|
||||
rk->rk_consumer.assignment.pending, rktpar);
|
||||
rk->rk_consumer.assignment.pending, rktpar);
|
||||
}
|
||||
/* Do nothing for request-level errors (err is set). */
|
||||
|
||||
}
|
||||
|
||||
if (offsets->cnt > 0)
|
||||
|
@ -228,19 +221,18 @@ rd_kafka_assignment_apply_offsets (rd_kafka_t *rk,
|
|||
*
|
||||
* @locality rdkafka main thread
|
||||
*/
|
||||
static void
|
||||
rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_buf_t *reply,
|
||||
rd_kafka_buf_t *request,
|
||||
void *opaque) {
|
||||
static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_buf_t *reply,
|
||||
rd_kafka_buf_t *request,
|
||||
void *opaque) {
|
||||
rd_kafka_topic_partition_list_t *offsets = NULL;
|
||||
int64_t *req_assignment_version = (int64_t *)opaque;
|
||||
int64_t *req_assignment_version = (int64_t *)opaque;
|
||||
/* Only allow retries if there's been no change to the assignment,
|
||||
* otherwise rely on assignment state machine to retry. */
|
||||
rd_bool_t allow_retry = *req_assignment_version ==
|
||||
rk->rk_consumer.assignment.version;
|
||||
rd_bool_t allow_retry =
|
||||
*req_assignment_version == rk->rk_consumer.assignment.version;
|
||||
|
||||
if (err == RD_KAFKA_RESP_ERR__DESTROY) {
|
||||
/* Termination, quick cleanup. */
|
||||
|
@ -248,11 +240,9 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk,
|
|||
return;
|
||||
}
|
||||
|
||||
err = rd_kafka_handle_OffsetFetch(rk, rkb, err,
|
||||
reply, request, &offsets,
|
||||
rd_true/* Update toppars */,
|
||||
rd_true/* Add parts */,
|
||||
allow_retry);
|
||||
err = rd_kafka_handle_OffsetFetch(
|
||||
rk, rkb, err, reply, request, &offsets,
|
||||
rd_true /* Update toppars */, rd_true /* Add parts */, allow_retry);
|
||||
if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
|
||||
if (offsets)
|
||||
rd_kafka_topic_partition_list_destroy(offsets);
|
||||
|
@ -268,18 +258,15 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk,
|
|||
if (!err)
|
||||
err = RD_KAFKA_RESP_ERR__NO_OFFSET;
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSET",
|
||||
"Offset fetch error: %s",
|
||||
rd_kafka_dbg(rk, CGRP, "OFFSET", "Offset fetch error: %s",
|
||||
rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(rk->rk_consumer.q,
|
||||
rd_kafka_broker_id(rkb),
|
||||
err, 0, NULL, NULL,
|
||||
RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed "
|
||||
"offsets for partitions "
|
||||
"in group \"%s\": %s",
|
||||
rk->rk_group_id->str,
|
||||
rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(
|
||||
rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
|
||||
NULL, RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed "
|
||||
"offsets for partitions "
|
||||
"in group \"%s\": %s",
|
||||
rk->rk_group_id->str, rd_kafka_err2str(err));
|
||||
|
||||
return;
|
||||
}
|
||||
|
@ -290,15 +277,12 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk,
|
|||
rd_kafka_dbg(rk, CGRP, "OFFSET",
|
||||
"Offset fetch error for %d partition(s): %s",
|
||||
offsets->cnt, rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(rk->rk_consumer.q,
|
||||
rd_kafka_broker_id(rkb),
|
||||
err, 0, NULL, NULL,
|
||||
RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offsets for "
|
||||
"%d partition(s) in group \"%s\": %s",
|
||||
offsets->cnt,
|
||||
rk->rk_group_id->str,
|
||||
rd_kafka_err2str(err));
|
||||
rd_kafka_consumer_err(
|
||||
rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
|
||||
NULL, RD_KAFKA_OFFSET_INVALID,
|
||||
"Failed to fetch committed offsets for "
|
||||
"%d partition(s) in group \"%s\": %s",
|
||||
offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err));
|
||||
}
|
||||
|
||||
/* Apply the fetched offsets to the assignment */
|
||||
|
@ -313,8 +297,7 @@ rd_kafka_assignment_handle_OffsetFetch (rd_kafka_t *rk,
|
|||
*
|
||||
* @returns >0 if there are removal operations in progress, else 0.
|
||||
*/
|
||||
static int
|
||||
rd_kafka_assignment_serve_removals (rd_kafka_t *rk) {
|
||||
static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
|
||||
rd_kafka_topic_partition_t *rktpar;
|
||||
int valid_offsets = 0;
|
||||
|
||||
|
@ -327,25 +310,24 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) {
|
|||
* Outstanding OffsetFetch query results will be ignored
|
||||
* for partitions that are no longer on the .queried list. */
|
||||
was_pending = rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.pending,
|
||||
rktpar->topic, rktpar->partition);
|
||||
rk->rk_consumer.assignment.pending, rktpar->topic,
|
||||
rktpar->partition);
|
||||
was_queried = rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.queried,
|
||||
rktpar->topic, rktpar->partition);
|
||||
rk->rk_consumer.assignment.queried, rktpar->topic,
|
||||
rktpar->partition);
|
||||
|
||||
if (rktp->rktp_started) {
|
||||
/* Partition was started, stop the fetcher. */
|
||||
rd_assert(rk->rk_consumer.assignment.started_cnt > 0);
|
||||
|
||||
rd_kafka_toppar_op_fetch_stop(
|
||||
rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
|
||||
rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
|
||||
rk->rk_consumer.assignment.wait_stop_cnt++;
|
||||
}
|
||||
|
||||
/* Reset the (lib) pause flag which may have been set by
|
||||
* the cgrp when scheduling the rebalance callback. */
|
||||
rd_kafka_toppar_op_pause_resume(rktp,
|
||||
rd_false/*resume*/,
|
||||
rd_kafka_toppar_op_pause_resume(rktp, rd_false /*resume*/,
|
||||
RD_KAFKA_TOPPAR_F_LIB_PAUSE,
|
||||
RD_KAFKA_NO_REPLYQ);
|
||||
|
||||
|
@ -368,17 +350,17 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) {
|
|||
rd_kafka_toppar_unlock(rktp);
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "REMOVE",
|
||||
"Removing %s [%"PRId32"] from assignment "
|
||||
"Removing %s [%" PRId32
|
||||
"] from assignment "
|
||||
"(started=%s, pending=%s, queried=%s, "
|
||||
"stored offset=%s)",
|
||||
rktpar->topic, rktpar->partition,
|
||||
RD_STR_ToF(rktp->rktp_started),
|
||||
RD_STR_ToF(was_pending),
|
||||
RD_STR_ToF(was_queried),
|
||||
RD_STR_ToF(was_pending), RD_STR_ToF(was_queried),
|
||||
rd_kafka_offset2str(rktpar->offset));
|
||||
}
|
||||
|
||||
rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, "REMOVE",
|
||||
rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REMOVE",
|
||||
"Served %d removed partition(s), "
|
||||
"with %d offset(s) to commit",
|
||||
rk->rk_consumer.assignment.removed->cnt, valid_offsets);
|
||||
|
@ -387,21 +369,18 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) {
|
|||
* Commit final offsets to broker for the removed partitions,
|
||||
* unless this is a consumer destruction with a close() call. */
|
||||
if (valid_offsets > 0 &&
|
||||
rk->rk_conf.offset_store_method ==
|
||||
RD_KAFKA_OFFSET_METHOD_BROKER &&
|
||||
rk->rk_cgrp &&
|
||||
rk->rk_conf.enable_auto_commit &&
|
||||
rk->rk_conf.offset_store_method == RD_KAFKA_OFFSET_METHOD_BROKER &&
|
||||
rk->rk_cgrp && rk->rk_conf.enable_auto_commit &&
|
||||
!rd_kafka_destroy_flags_no_consumer_close(rk))
|
||||
rd_kafka_cgrp_assigned_offsets_commit(
|
||||
rk->rk_cgrp,
|
||||
rk->rk_consumer.assignment.removed,
|
||||
rd_false /* use offsets from .removed */,
|
||||
"unassigned partitions");
|
||||
rk->rk_cgrp, rk->rk_consumer.assignment.removed,
|
||||
rd_false /* use offsets from .removed */,
|
||||
"unassigned partitions");
|
||||
|
||||
rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.removed);
|
||||
|
||||
return rk->rk_consumer.assignment.wait_stop_cnt +
|
||||
rk->rk_consumer.wait_commit_cnt;
|
||||
rk->rk_consumer.wait_commit_cnt;
|
||||
}
|
||||
|
||||
|
||||
|
@ -414,8 +393,7 @@ rd_kafka_assignment_serve_removals (rd_kafka_t *rk) {
|
|||
* @returns >0 if there are pending operations in progress for the current
|
||||
* assignment, else 0.
|
||||
*/
|
||||
static int
|
||||
rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
||||
static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
|
||||
rd_kafka_topic_partition_list_t *partitions_to_query = NULL;
|
||||
/* We can query committed offsets only if all of the following are true:
|
||||
* - We have a group coordinator.
|
||||
|
@ -426,21 +404,20 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
* is unassigned and then assigned again).
|
||||
*/
|
||||
rd_kafka_broker_t *coord =
|
||||
rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL;
|
||||
rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL;
|
||||
rd_bool_t can_query_offsets =
|
||||
coord &&
|
||||
rk->rk_consumer.wait_commit_cnt == 0 &&
|
||||
rk->rk_consumer.assignment.queried->cnt == 0;
|
||||
coord && rk->rk_consumer.wait_commit_cnt == 0 &&
|
||||
rk->rk_consumer.assignment.queried->cnt == 0;
|
||||
int i;
|
||||
|
||||
if (can_query_offsets)
|
||||
partitions_to_query = rd_kafka_topic_partition_list_new(
|
||||
rk->rk_consumer.assignment.pending->cnt);
|
||||
rk->rk_consumer.assignment.pending->cnt);
|
||||
|
||||
/* Scan the list backwards so removals are cheap (no array shuffle) */
|
||||
for (i = rk->rk_consumer.assignment.pending->cnt - 1 ; i >= 0 ; i--) {
|
||||
for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) {
|
||||
rd_kafka_topic_partition_t *rktpar =
|
||||
&rk->rk_consumer.assignment.pending->elems[i];
|
||||
&rk->rk_consumer.assignment.pending->elems[i];
|
||||
rd_kafka_toppar_t *rktp = rktpar->_private; /* Borrow ref */
|
||||
|
||||
rd_assert(!rktp->rktp_started);
|
||||
|
@ -462,25 +439,23 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
|
||||
rd_kafka_dbg(rk, CGRP, "SRVPEND",
|
||||
"Starting pending assigned partition "
|
||||
"%s [%"PRId32"] at offset %s",
|
||||
"%s [%" PRId32 "] at offset %s",
|
||||
rktpar->topic, rktpar->partition,
|
||||
rd_kafka_offset2str(rktpar->offset));
|
||||
|
||||
/* Reset the (lib) pause flag which may have been set by
|
||||
* the cgrp when scheduling the rebalance callback. */
|
||||
rd_kafka_toppar_op_pause_resume(
|
||||
rktp,
|
||||
rd_false/*resume*/,
|
||||
RD_KAFKA_TOPPAR_F_LIB_PAUSE,
|
||||
RD_KAFKA_NO_REPLYQ);
|
||||
rktp, rd_false /*resume*/,
|
||||
RD_KAFKA_TOPPAR_F_LIB_PAUSE, RD_KAFKA_NO_REPLYQ);
|
||||
|
||||
/* Start the fetcher */
|
||||
rktp->rktp_started = rd_true;
|
||||
rk->rk_consumer.assignment.started_cnt++;
|
||||
|
||||
rd_kafka_toppar_op_fetch_start(
|
||||
rktp, rktpar->offset,
|
||||
rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ);
|
||||
rd_kafka_toppar_op_fetch_start(rktp, rktpar->offset,
|
||||
rk->rk_consumer.q,
|
||||
RD_KAFKA_NO_REPLYQ);
|
||||
|
||||
|
||||
} else if (can_query_offsets) {
|
||||
|
@ -491,42 +466,44 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
* to the group coordinator. */
|
||||
|
||||
rd_dassert(!rd_kafka_topic_partition_list_find(
|
||||
rk->rk_consumer.assignment.queried,
|
||||
rktpar->topic, rktpar->partition));
|
||||
rk->rk_consumer.assignment.queried, rktpar->topic,
|
||||
rktpar->partition));
|
||||
|
||||
rd_kafka_topic_partition_list_add_copy(
|
||||
partitions_to_query, rktpar);
|
||||
partitions_to_query, rktpar);
|
||||
|
||||
rd_kafka_topic_partition_list_add_copy(
|
||||
rk->rk_consumer.assignment.queried, rktpar);
|
||||
rk->rk_consumer.assignment.queried, rktpar);
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "SRVPEND",
|
||||
"Querying committed offset for pending "
|
||||
"assigned partition %s [%"PRId32"]",
|
||||
"assigned partition %s [%" PRId32 "]",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
|
||||
} else {
|
||||
rd_kafka_dbg(rk, CGRP, "SRVPEND",
|
||||
"Pending assignment partition "
|
||||
"%s [%"PRId32"] can't fetch committed "
|
||||
"offset yet "
|
||||
"(cgrp state %s, awaiting %d commits, "
|
||||
"%d partition(s) already being queried)",
|
||||
rktpar->topic, rktpar->partition,
|
||||
rk->rk_cgrp ?
|
||||
rd_kafka_cgrp_state_names[
|
||||
rk->rk_cgrp->rkcg_state] :
|
||||
"n/a",
|
||||
rk->rk_consumer.wait_commit_cnt,
|
||||
rk->rk_consumer.assignment.queried->cnt);
|
||||
rd_kafka_dbg(
|
||||
rk, CGRP, "SRVPEND",
|
||||
"Pending assignment partition "
|
||||
"%s [%" PRId32
|
||||
"] can't fetch committed "
|
||||
"offset yet "
|
||||
"(cgrp state %s, awaiting %d commits, "
|
||||
"%d partition(s) already being queried)",
|
||||
rktpar->topic, rktpar->partition,
|
||||
rk->rk_cgrp
|
||||
? rd_kafka_cgrp_state_names[rk->rk_cgrp
|
||||
->rkcg_state]
|
||||
: "n/a",
|
||||
rk->rk_consumer.wait_commit_cnt,
|
||||
rk->rk_consumer.assignment.queried->cnt);
|
||||
|
||||
continue; /* Keep rktpar on pending list */
|
||||
}
|
||||
|
||||
/* Remove rktpar from the pending list */
|
||||
rd_kafka_topic_partition_list_del_by_idx(
|
||||
rk->rk_consumer.assignment.pending, i);
|
||||
rk->rk_consumer.assignment.pending, i);
|
||||
}
|
||||
|
||||
|
||||
|
@ -534,7 +511,7 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
if (coord)
|
||||
rd_kafka_broker_destroy(coord);
|
||||
return rk->rk_consumer.assignment.pending->cnt +
|
||||
rk->rk_consumer.assignment.queried->cnt;
|
||||
rk->rk_consumer.assignment.queried->cnt;
|
||||
}
|
||||
|
||||
|
||||
|
@ -548,14 +525,13 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
partitions_to_query->cnt);
|
||||
|
||||
rd_kafka_OffsetFetchRequest(
|
||||
coord,
|
||||
partitions_to_query,
|
||||
rk->rk_conf.isolation_level ==
|
||||
RD_KAFKA_READ_COMMITTED/*require_stable*/,
|
||||
RD_KAFKA_REPLYQ(rk->rk_ops, 0),
|
||||
rd_kafka_assignment_handle_OffsetFetch,
|
||||
/* Must be freed by handler */
|
||||
(void *)req_assignment_version);
|
||||
coord, partitions_to_query,
|
||||
rk->rk_conf.isolation_level ==
|
||||
RD_KAFKA_READ_COMMITTED /*require_stable*/,
|
||||
RD_KAFKA_REPLYQ(rk->rk_ops, 0),
|
||||
rd_kafka_assignment_handle_OffsetFetch,
|
||||
/* Must be freed by handler */
|
||||
(void *)req_assignment_version);
|
||||
}
|
||||
|
||||
if (coord)
|
||||
|
@ -564,7 +540,7 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
rd_kafka_topic_partition_list_destroy(partitions_to_query);
|
||||
|
||||
return rk->rk_consumer.assignment.pending->cnt +
|
||||
rk->rk_consumer.assignment.queried->cnt;
|
||||
rk->rk_consumer.assignment.queried->cnt;
|
||||
}
|
||||
|
||||
|
||||
|
@ -577,9 +553,9 @@ rd_kafka_assignment_serve_pending (rd_kafka_t *rk) {
|
|||
* - wait_commit_cnt reaches 0
|
||||
* - partition fetcher is stopped
|
||||
*/
|
||||
void rd_kafka_assignment_serve (rd_kafka_t *rk) {
|
||||
void rd_kafka_assignment_serve(rd_kafka_t *rk) {
|
||||
int inp_removals = 0;
|
||||
int inp_pending = 0;
|
||||
int inp_pending = 0;
|
||||
|
||||
rd_kafka_assignment_dump(rk);
|
||||
|
||||
|
@ -593,15 +569,15 @@ void rd_kafka_assignment_serve (rd_kafka_t *rk) {
|
|||
* to finish (since we might need the committed offsets as start
|
||||
* offsets). */
|
||||
if (rk->rk_consumer.assignment.wait_stop_cnt == 0 &&
|
||||
rk->rk_consumer.wait_commit_cnt == 0 &&
|
||||
inp_removals == 0 &&
|
||||
rk->rk_consumer.wait_commit_cnt == 0 && inp_removals == 0 &&
|
||||
rk->rk_consumer.assignment.pending->cnt > 0)
|
||||
inp_pending = rd_kafka_assignment_serve_pending(rk);
|
||||
|
||||
if (inp_removals + inp_pending +
|
||||
rk->rk_consumer.assignment.queried->cnt +
|
||||
rk->rk_consumer.assignment.wait_stop_cnt +
|
||||
rk->rk_consumer.wait_commit_cnt == 0) {
|
||||
rk->rk_consumer.assignment.queried->cnt +
|
||||
rk->rk_consumer.assignment.wait_stop_cnt +
|
||||
rk->rk_consumer.wait_commit_cnt ==
|
||||
0) {
|
||||
/* No assignment operations in progress,
|
||||
* signal assignment done back to cgrp to let it
|
||||
* transition to its next state if necessary.
|
||||
|
@ -615,8 +591,7 @@ void rd_kafka_assignment_serve (rd_kafka_t *rk) {
|
|||
"with %d pending adds, %d offset queries, "
|
||||
"%d partitions awaiting stop and "
|
||||
"%d offset commits in progress",
|
||||
rk->rk_consumer.assignment.all->cnt,
|
||||
inp_pending,
|
||||
rk->rk_consumer.assignment.all->cnt, inp_pending,
|
||||
rk->rk_consumer.assignment.queried->cnt,
|
||||
rk->rk_consumer.assignment.wait_stop_cnt,
|
||||
rk->rk_consumer.wait_commit_cnt);
|
||||
|
@ -628,12 +603,12 @@ void rd_kafka_assignment_serve (rd_kafka_t *rk) {
|
|||
* @returns true if the current or previous assignment has operations in
|
||||
* progress, such as waiting for partition fetchers to stop.
|
||||
*/
|
||||
rd_bool_t rd_kafka_assignment_in_progress (rd_kafka_t *rk) {
|
||||
rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk) {
|
||||
return rk->rk_consumer.wait_commit_cnt > 0 ||
|
||||
rk->rk_consumer.assignment.wait_stop_cnt > 0 ||
|
||||
rk->rk_consumer.assignment.pending->cnt > 0 ||
|
||||
rk->rk_consumer.assignment.queried->cnt > 0 ||
|
||||
rk->rk_consumer.assignment.removed->cnt > 0;
|
||||
rk->rk_consumer.assignment.wait_stop_cnt > 0 ||
|
||||
rk->rk_consumer.assignment.pending->cnt > 0 ||
|
||||
rk->rk_consumer.assignment.queried->cnt > 0 ||
|
||||
rk->rk_consumer.assignment.removed->cnt > 0;
|
||||
}
|
||||
|
||||
|
||||
|
@ -645,17 +620,16 @@ rd_bool_t rd_kafka_assignment_in_progress (rd_kafka_t *rk) {
|
|||
*
|
||||
* @returns the number of partitions removed.
|
||||
*/
|
||||
int rd_kafka_assignment_clear (rd_kafka_t *rk) {
|
||||
int rd_kafka_assignment_clear(rd_kafka_t *rk) {
|
||||
int cnt = rk->rk_consumer.assignment.all->cnt;
|
||||
|
||||
if (cnt == 0) {
|
||||
rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP,
|
||||
"CLEARASSIGN",
|
||||
rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
|
||||
"No current assignment to clear");
|
||||
return 0;
|
||||
}
|
||||
|
||||
rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
|
||||
rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
|
||||
"Clearing current assignment of %d partition(s)",
|
||||
rk->rk_consumer.assignment.all->cnt);
|
||||
|
||||
|
@ -663,8 +637,7 @@ int rd_kafka_assignment_clear (rd_kafka_t *rk) {
|
|||
rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.queried);
|
||||
|
||||
rd_kafka_topic_partition_list_add_list(
|
||||
rk->rk_consumer.assignment.removed,
|
||||
rk->rk_consumer.assignment.all);
|
||||
rk->rk_consumer.assignment.removed, rk->rk_consumer.assignment.all);
|
||||
rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.all);
|
||||
|
||||
rk->rk_consumer.assignment.version++;
|
||||
|
@ -683,8 +656,8 @@ int rd_kafka_assignment_clear (rd_kafka_t *rk) {
|
|||
* return from this function.
|
||||
*/
|
||||
rd_kafka_error_t *
|
||||
rd_kafka_assignment_add (rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions) {
|
||||
rd_kafka_assignment_add(rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions) {
|
||||
rd_bool_t was_empty = rk->rk_consumer.assignment.all->cnt == 0;
|
||||
int i;
|
||||
|
||||
|
@ -692,10 +665,10 @@ rd_kafka_assignment_add (rd_kafka_t *rk,
|
|||
* invalid offsets in the input partitions. */
|
||||
rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
|
||||
|
||||
for (i = 0 ; i < partitions->cnt ; i++) {
|
||||
for (i = 0; i < partitions->cnt; i++) {
|
||||
rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
|
||||
const rd_kafka_topic_partition_t *prev =
|
||||
i > 0 ? &partitions->elems[i-1] : NULL;
|
||||
i > 0 ? &partitions->elems[i - 1] : NULL;
|
||||
|
||||
if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) &&
|
||||
rktpar->offset != RD_KAFKA_OFFSET_BEGINNING &&
|
||||
|
@ -704,26 +677,26 @@ rd_kafka_assignment_add (rd_kafka_t *rk,
|
|||
rktpar->offset != RD_KAFKA_OFFSET_INVALID &&
|
||||
rktpar->offset > RD_KAFKA_OFFSET_TAIL_BASE)
|
||||
return rd_kafka_error_new(
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"%s [%"PRId32"] has invalid start offset %"
|
||||
PRId64,
|
||||
rktpar->topic, rktpar->partition,
|
||||
rktpar->offset);
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"%s [%" PRId32
|
||||
"] has invalid start offset %" PRId64,
|
||||
rktpar->topic, rktpar->partition, rktpar->offset);
|
||||
|
||||
if (prev && !rd_kafka_topic_partition_cmp(rktpar, prev))
|
||||
return rd_kafka_error_new(
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"Duplicate %s [%"PRId32"] in input list",
|
||||
rktpar->topic, rktpar->partition);
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"Duplicate %s [%" PRId32 "] in input list",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
if (rd_kafka_topic_partition_list_find(
|
||||
rk->rk_consumer.assignment.all,
|
||||
rktpar->topic, rktpar->partition))
|
||||
return rd_kafka_error_new(
|
||||
RD_KAFKA_RESP_ERR__CONFLICT,
|
||||
"%s [%"PRId32"] is already part of the "
|
||||
"current assignment",
|
||||
rktpar->topic, rktpar->partition);
|
||||
rk->rk_consumer.assignment.all, rktpar->topic,
|
||||
rktpar->partition))
|
||||
return rd_kafka_error_new(RD_KAFKA_RESP_ERR__CONFLICT,
|
||||
"%s [%" PRId32
|
||||
"] is already part of the "
|
||||
"current assignment",
|
||||
rktpar->topic,
|
||||
rktpar->partition);
|
||||
|
||||
/* Translate RD_KAFKA_OFFSET_INVALID to RD_KAFKA_OFFSET_STORED,
|
||||
* i.e., read from committed offset, since we use INVALID
|
||||
|
@ -737,8 +710,7 @@ rd_kafka_assignment_add (rd_kafka_t *rk,
|
|||
* This is to make sure the rktp stays alive while unassigning
|
||||
* any previous assignment in the call to
|
||||
* assignment_clear() below. */
|
||||
rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
|
||||
rd_true);
|
||||
rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
|
||||
}
|
||||
|
||||
/* Add the new list of partitions to the current assignment.
|
||||
|
@ -747,22 +719,19 @@ rd_kafka_assignment_add (rd_kafka_t *rk,
|
|||
rd_kafka_topic_partition_list_add_list(rk->rk_consumer.assignment.all,
|
||||
partitions);
|
||||
if (!was_empty)
|
||||
rd_kafka_topic_partition_list_sort(rk->rk_consumer.
|
||||
assignment.all,
|
||||
NULL, NULL);
|
||||
rd_kafka_topic_partition_list_sort(
|
||||
rk->rk_consumer.assignment.all, NULL, NULL);
|
||||
|
||||
/* And add to .pending for serve_pending() to handle. */
|
||||
rd_kafka_topic_partition_list_add_list(rk->rk_consumer.
|
||||
assignment.pending,
|
||||
partitions);
|
||||
rd_kafka_topic_partition_list_add_list(
|
||||
rk->rk_consumer.assignment.pending, partitions);
|
||||
|
||||
|
||||
rd_kafka_dbg(rk, CONSUMER|RD_KAFKA_DBG_CGRP, "ASSIGNMENT",
|
||||
rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "ASSIGNMENT",
|
||||
"Added %d partition(s) to assignment which "
|
||||
"now consists of %d partition(s) where of %d are in "
|
||||
"pending state and %d are being queried",
|
||||
partitions->cnt,
|
||||
rk->rk_consumer.assignment.all->cnt,
|
||||
partitions->cnt, rk->rk_consumer.assignment.all->cnt,
|
||||
rk->rk_consumer.assignment.pending->cnt,
|
||||
rk->rk_consumer.assignment.queried->cnt);
|
||||
|
||||
|
@ -782,35 +751,35 @@ rd_kafka_assignment_add (rd_kafka_t *rk,
|
|||
* return from this function.
|
||||
*/
|
||||
rd_kafka_error_t *
|
||||
rd_kafka_assignment_subtract (rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions) {
|
||||
rd_kafka_assignment_subtract(rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions) {
|
||||
int i;
|
||||
int matched_queried_partitions = 0;
|
||||
int assignment_pre_cnt;
|
||||
|
||||
if (rk->rk_consumer.assignment.all->cnt == 0 && partitions->cnt > 0)
|
||||
return rd_kafka_error_new(
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"Can't subtract from empty assignment");
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"Can't subtract from empty assignment");
|
||||
|
||||
/* Verify that all partitions in \p partitions are in the assignment
|
||||
* before starting to modify the assignment. */
|
||||
rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
|
||||
|
||||
for (i = 0 ; i < partitions->cnt ; i++) {
|
||||
for (i = 0; i < partitions->cnt; i++) {
|
||||
rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
|
||||
|
||||
if (!rd_kafka_topic_partition_list_find(
|
||||
rk->rk_consumer.assignment.all,
|
||||
rktpar->topic, rktpar->partition))
|
||||
rk->rk_consumer.assignment.all, rktpar->topic,
|
||||
rktpar->partition))
|
||||
return rd_kafka_error_new(
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"%s [%"PRId32"] can't be unassigned since "
|
||||
"it is not in the current assignment",
|
||||
rktpar->topic, rktpar->partition);
|
||||
RD_KAFKA_RESP_ERR__INVALID_ARG,
|
||||
"%s [%" PRId32
|
||||
"] can't be unassigned since "
|
||||
"it is not in the current assignment",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
|
||||
rd_true);
|
||||
rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
|
||||
}
|
||||
|
||||
|
||||
|
@ -819,38 +788,39 @@ rd_kafka_assignment_subtract (rd_kafka_t *rk,
|
|||
/* Remove partitions in reverse order to avoid excessive
|
||||
* array shuffling of .all.
|
||||
* Add the removed partitions to .pending for serve() to handle. */
|
||||
for (i = partitions->cnt-1 ; i >= 0 ; i--) {
|
||||
for (i = partitions->cnt - 1; i >= 0; i--) {
|
||||
const rd_kafka_topic_partition_t *rktpar =
|
||||
&partitions->elems[i];
|
||||
&partitions->elems[i];
|
||||
|
||||
if (!rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.all,
|
||||
rktpar->topic, rktpar->partition))
|
||||
RD_BUG("Removed partition %s [%"PRId32"] not found "
|
||||
rk->rk_consumer.assignment.all, rktpar->topic,
|
||||
rktpar->partition))
|
||||
RD_BUG("Removed partition %s [%" PRId32
|
||||
"] not found "
|
||||
"in assignment.all",
|
||||
rktpar->topic, rktpar->partition);
|
||||
|
||||
if (rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.queried,
|
||||
rktpar->topic, rktpar->partition))
|
||||
rk->rk_consumer.assignment.queried, rktpar->topic,
|
||||
rktpar->partition))
|
||||
matched_queried_partitions++;
|
||||
else
|
||||
rd_kafka_topic_partition_list_del(
|
||||
rk->rk_consumer.assignment.pending,
|
||||
rktpar->topic, rktpar->partition);
|
||||
rk->rk_consumer.assignment.pending, rktpar->topic,
|
||||
rktpar->partition);
|
||||
|
||||
/* Add to .removed list which will be served by
|
||||
* serve_removals(). */
|
||||
rd_kafka_topic_partition_list_add_copy(
|
||||
rk->rk_consumer.assignment.removed, rktpar);
|
||||
rk->rk_consumer.assignment.removed, rktpar);
|
||||
}
|
||||
|
||||
rd_kafka_dbg(rk, CGRP, "REMOVEASSIGN",
|
||||
"Removed %d partition(s) "
|
||||
"(%d with outstanding offset queries) from assignment "
|
||||
"of %d partition(s)",
|
||||
partitions->cnt,
|
||||
matched_queried_partitions, assignment_pre_cnt);
|
||||
partitions->cnt, matched_queried_partitions,
|
||||
assignment_pre_cnt);
|
||||
|
||||
if (rk->rk_consumer.assignment.all->cnt == 0) {
|
||||
/* Some safe checking */
|
||||
|
@ -867,8 +837,8 @@ rd_kafka_assignment_subtract (rd_kafka_t *rk,
|
|||
/**
|
||||
* @brief Call when partition fetcher has stopped.
|
||||
*/
|
||||
void rd_kafka_assignment_partition_stopped (rd_kafka_t *rk,
|
||||
rd_kafka_toppar_t *rktp) {
|
||||
void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk,
|
||||
rd_kafka_toppar_t *rktp) {
|
||||
rd_assert(rk->rk_consumer.assignment.wait_stop_cnt > 0);
|
||||
rk->rk_consumer.assignment.wait_stop_cnt--;
|
||||
|
||||
|
@ -895,7 +865,7 @@ void rd_kafka_assignment_partition_stopped (rd_kafka_t *rk,
|
|||
* Partitions will be resumed by calling rd_kafka_assignment_resume() or
|
||||
* from either serve_removals() or serve_pending() above.
|
||||
*/
|
||||
void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason) {
|
||||
void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason) {
|
||||
|
||||
if (rk->rk_consumer.assignment.all->cnt == 0)
|
||||
return;
|
||||
|
@ -904,9 +874,7 @@ void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason) {
|
|||
"Pausing fetchers for %d assigned partition(s): %s",
|
||||
rk->rk_consumer.assignment.all->cnt, reason);
|
||||
|
||||
rd_kafka_toppars_pause_resume(rk,
|
||||
rd_true/*pause*/,
|
||||
RD_ASYNC,
|
||||
rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_ASYNC,
|
||||
RD_KAFKA_TOPPAR_F_LIB_PAUSE,
|
||||
rk->rk_consumer.assignment.all);
|
||||
}
|
||||
|
@ -915,7 +883,7 @@ void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason) {
|
|||
* @brief Resume fetching of the currently assigned partitions which have
|
||||
* previously been paused by rd_kafka_assignment_pause().
|
||||
*/
|
||||
void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason) {
|
||||
void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason) {
|
||||
|
||||
if (rk->rk_consumer.assignment.all->cnt == 0)
|
||||
return;
|
||||
|
@ -924,9 +892,7 @@ void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason) {
|
|||
"Resuming fetchers for %d assigned partition(s): %s",
|
||||
rk->rk_consumer.assignment.all->cnt, reason);
|
||||
|
||||
rd_kafka_toppars_pause_resume(rk,
|
||||
rd_false/*resume*/,
|
||||
RD_ASYNC,
|
||||
rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_ASYNC,
|
||||
RD_KAFKA_TOPPAR_F_LIB_PAUSE,
|
||||
rk->rk_consumer.assignment.all);
|
||||
}
|
||||
|
@ -936,30 +902,28 @@ void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason) {
|
|||
/**
|
||||
* @brief Destroy assignment state (but not \p assignment itself)
|
||||
*/
|
||||
void rd_kafka_assignment_destroy (rd_kafka_t *rk) {
|
||||
void rd_kafka_assignment_destroy(rd_kafka_t *rk) {
|
||||
if (!rk->rk_consumer.assignment.all)
|
||||
return; /* rd_kafka_assignment_init() not called */
|
||||
rd_kafka_topic_partition_list_destroy(rk->rk_consumer.assignment.all);
|
||||
rd_kafka_topic_partition_list_destroy(
|
||||
rk->rk_consumer.assignment.all);
|
||||
rk->rk_consumer.assignment.pending);
|
||||
rd_kafka_topic_partition_list_destroy(
|
||||
rk->rk_consumer.assignment.pending);
|
||||
rk->rk_consumer.assignment.queried);
|
||||
rd_kafka_topic_partition_list_destroy(
|
||||
rk->rk_consumer.assignment.queried);
|
||||
rd_kafka_topic_partition_list_destroy(
|
||||
rk->rk_consumer.assignment.removed);
|
||||
rk->rk_consumer.assignment.removed);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Initialize the assignment struct.
|
||||
*/
|
||||
void rd_kafka_assignment_init (rd_kafka_t *rk) {
|
||||
rk->rk_consumer.assignment.all =
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
void rd_kafka_assignment_init(rd_kafka_t *rk) {
|
||||
rk->rk_consumer.assignment.all = rd_kafka_topic_partition_list_new(100);
|
||||
rk->rk_consumer.assignment.pending =
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
rk->rk_consumer.assignment.queried =
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
rk->rk_consumer.assignment.removed =
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
rd_kafka_topic_partition_list_new(100);
|
||||
}
|
||||
|
|
|
@ -54,20 +54,20 @@ typedef struct rd_kafka_assignment_s {
|
|||
} rd_kafka_assignment_t;
|
||||
|
||||
|
||||
int rd_kafka_assignment_clear (rd_kafka_t *rk);
|
||||
int rd_kafka_assignment_clear(rd_kafka_t *rk);
|
||||
rd_kafka_error_t *
|
||||
rd_kafka_assignment_add (rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions);
|
||||
rd_kafka_assignment_add(rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions);
|
||||
rd_kafka_error_t *
|
||||
rd_kafka_assignment_subtract (rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions);
|
||||
void rd_kafka_assignment_partition_stopped (rd_kafka_t *rk,
|
||||
rd_kafka_toppar_t *rktp);
|
||||
void rd_kafka_assignment_pause (rd_kafka_t *rk, const char *reason);
|
||||
void rd_kafka_assignment_resume (rd_kafka_t *rk, const char *reason);
|
||||
void rd_kafka_assignment_serve (rd_kafka_t *rk);
|
||||
rd_bool_t rd_kafka_assignment_in_progress (rd_kafka_t *rk);
|
||||
void rd_kafka_assignment_destroy (rd_kafka_t *rk);
|
||||
void rd_kafka_assignment_init (rd_kafka_t *rk);
|
||||
rd_kafka_assignment_subtract(rd_kafka_t *rk,
|
||||
rd_kafka_topic_partition_list_t *partitions);
|
||||
void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk,
|
||||
rd_kafka_toppar_t *rktp);
|
||||
void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason);
|
||||
void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason);
|
||||
void rd_kafka_assignment_serve(rd_kafka_t *rk);
|
||||
rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk);
|
||||
void rd_kafka_assignment_destroy(rd_kafka_t *rk);
|
||||
void rd_kafka_assignment_init(rd_kafka_t *rk);
|
||||
|
||||
#endif /* _RDKAFKA_ASSIGNMENT_H_ */
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -46,8 +46,6 @@ typedef enum rd_kafka_rebalance_protocol_t {
|
|||
|
||||
|
||||
|
||||
|
||||
|
||||
typedef struct rd_kafka_group_member_s {
|
||||
/** Subscribed topics (partition field is ignored). */
|
||||
rd_kafka_topic_partition_list_t *rkgm_subscription;
|
||||
|
@ -60,26 +58,25 @@ typedef struct rd_kafka_group_member_s {
|
|||
rd_kafka_topic_partition_list_t *rkgm_owned;
|
||||
/** List of eligible topics in subscription. E.g., subscribed topics
|
||||
* that exist. */
|
||||
rd_list_t rkgm_eligible;
|
||||
rd_list_t rkgm_eligible;
|
||||
/** Member id (e.g., client.id-some-uuid). */
|
||||
rd_kafkap_str_t *rkgm_member_id;
|
||||
rd_kafkap_str_t *rkgm_member_id;
|
||||
/** Group instance id. */
|
||||
rd_kafkap_str_t *rkgm_group_instance_id;
|
||||
rd_kafkap_str_t *rkgm_group_instance_id;
|
||||
/** Member-specific opaque userdata. */
|
||||
rd_kafkap_bytes_t *rkgm_userdata;
|
||||
rd_kafkap_bytes_t *rkgm_userdata;
|
||||
/** Member metadata, e.g., the currently owned partitions. */
|
||||
rd_kafkap_bytes_t *rkgm_member_metadata;
|
||||
rd_kafkap_bytes_t *rkgm_member_metadata;
|
||||
/** Group generation id. */
|
||||
int rkgm_generation;
|
||||
int rkgm_generation;
|
||||
} rd_kafka_group_member_t;
|
||||
|
||||
|
||||
int rd_kafka_group_member_cmp (const void *_a, const void *_b);
|
||||
int rd_kafka_group_member_cmp(const void *_a, const void *_b);
|
||||
|
||||
int
|
||||
rd_kafka_group_member_find_subscription (rd_kafka_t *rk,
|
||||
const rd_kafka_group_member_t *rkgm,
|
||||
const char *topic);
|
||||
int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
|
||||
const rd_kafka_group_member_t *rkgm,
|
||||
const char *topic);
|
||||
|
||||
|
||||
/**
|
||||
|
@ -88,126 +85,125 @@ rd_kafka_group_member_find_subscription (rd_kafka_t *rk,
|
|||
*/
|
||||
typedef struct rd_kafka_assignor_topic_s {
|
||||
const rd_kafka_metadata_topic_t *metadata;
|
||||
rd_list_t members; /* rd_kafka_group_member_t * */
|
||||
rd_list_t members; /* rd_kafka_group_member_t * */
|
||||
} rd_kafka_assignor_topic_t;
|
||||
|
||||
|
||||
int rd_kafka_assignor_topic_cmp (const void *_a, const void *_b);
|
||||
int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b);
|
||||
|
||||
|
||||
typedef struct rd_kafka_assignor_s {
|
||||
rd_kafkap_str_t *rkas_protocol_type;
|
||||
rd_kafkap_str_t *rkas_protocol_name;
|
||||
|
||||
int rkas_enabled;
|
||||
int rkas_enabled;
|
||||
|
||||
rd_kafka_rebalance_protocol_t rkas_protocol;
|
||||
|
||||
rd_kafka_resp_err_t (*rkas_assign_cb) (
|
||||
rd_kafka_t *rk,
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
const char *member_id,
|
||||
const rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
rd_kafka_assignor_topic_t **eligible_topics,
|
||||
size_t eligible_topic_cnt,
|
||||
char *errstr,
|
||||
size_t errstr_size,
|
||||
void *opaque);
|
||||
rd_kafka_resp_err_t (*rkas_assign_cb)(
|
||||
rd_kafka_t *rk,
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
const char *member_id,
|
||||
const rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
rd_kafka_assignor_topic_t **eligible_topics,
|
||||
size_t eligible_topic_cnt,
|
||||
char *errstr,
|
||||
size_t errstr_size,
|
||||
void *opaque);
|
||||
|
||||
rd_kafkap_bytes_t *(*rkas_get_metadata_cb) (
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
rd_kafkap_bytes_t *(*rkas_get_metadata_cb)(
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
|
||||
void (*rkas_on_assignment_cb) (
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
const rd_kafkap_bytes_t *assignment_userdata,
|
||||
const rd_kafka_consumer_group_metadata_t *rkcgm);
|
||||
void (*rkas_on_assignment_cb)(
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
const rd_kafkap_bytes_t *assignment_userdata,
|
||||
const rd_kafka_consumer_group_metadata_t *rkcgm);
|
||||
|
||||
void (*rkas_destroy_state_cb) (void *assignor_state);
|
||||
void (*rkas_destroy_state_cb)(void *assignor_state);
|
||||
|
||||
int (*rkas_unittest) (void);
|
||||
int (*rkas_unittest)(void);
|
||||
|
||||
void *rkas_opaque;
|
||||
} rd_kafka_assignor_t;
|
||||
|
||||
|
||||
rd_kafka_resp_err_t
|
||||
rd_kafka_assignor_add (rd_kafka_t *rk,
|
||||
const char *protocol_type,
|
||||
const char *protocol_name,
|
||||
rd_kafka_rebalance_protocol_t rebalance_protocol,
|
||||
rd_kafka_resp_err_t (*assign_cb) (
|
||||
rd_kafka_t *rk,
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
const char *member_id,
|
||||
const rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
rd_kafka_assignor_topic_t **eligible_topics,
|
||||
size_t eligible_topic_cnt,
|
||||
char *errstr, size_t errstr_size, void *opaque),
|
||||
rd_kafkap_bytes_t *(*get_metadata_cb) (
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t
|
||||
*owned_partitions),
|
||||
void (*on_assignment_cb) (
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
const rd_kafkap_bytes_t *userdata,
|
||||
const rd_kafka_consumer_group_metadata_t *rkcgm),
|
||||
void (*destroy_state_cb) (void *assignor_state),
|
||||
int (*unittest_cb) (void),
|
||||
void *opaque);
|
||||
rd_kafka_resp_err_t rd_kafka_assignor_add(
|
||||
rd_kafka_t *rk,
|
||||
const char *protocol_type,
|
||||
const char *protocol_name,
|
||||
rd_kafka_rebalance_protocol_t rebalance_protocol,
|
||||
rd_kafka_resp_err_t (*assign_cb)(
|
||||
rd_kafka_t *rk,
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
const char *member_id,
|
||||
const rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *members,
|
||||
size_t member_cnt,
|
||||
rd_kafka_assignor_topic_t **eligible_topics,
|
||||
size_t eligible_topic_cnt,
|
||||
char *errstr,
|
||||
size_t errstr_size,
|
||||
void *opaque),
|
||||
rd_kafkap_bytes_t *(*get_metadata_cb)(
|
||||
const struct rd_kafka_assignor_s *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions),
|
||||
void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
|
||||
void **assignor_state,
|
||||
const rd_kafka_topic_partition_list_t *assignment,
|
||||
const rd_kafkap_bytes_t *userdata,
|
||||
const rd_kafka_consumer_group_metadata_t *rkcgm),
|
||||
void (*destroy_state_cb)(void *assignor_state),
|
||||
int (*unittest_cb)(void),
|
||||
void *opaque);
|
||||
|
||||
rd_kafkap_bytes_t *
|
||||
rd_kafka_consumer_protocol_member_metadata_new (const rd_list_t *topics,
|
||||
const void *userdata,
|
||||
size_t userdata_size,
|
||||
const rd_kafka_topic_partition_list_t
|
||||
*owned_partitions);
|
||||
rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
|
||||
const rd_list_t *topics,
|
||||
const void *userdata,
|
||||
size_t userdata_size,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
|
||||
rd_kafkap_bytes_t *
|
||||
rd_kafka_assignor_get_metadata_with_empty_userdata (const rd_kafka_assignor_t *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t
|
||||
*owned_partitions);
|
||||
rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
|
||||
const rd_kafka_assignor_t *rkas,
|
||||
void *assignor_state,
|
||||
const rd_list_t *topics,
|
||||
const rd_kafka_topic_partition_list_t *owned_partitions);
|
||||
|
||||
|
||||
void rd_kafka_assignor_update_subscription (const rd_kafka_assignor_t *rkas,
|
||||
const rd_kafka_topic_partition_list_t
|
||||
*subscription);
|
||||
void rd_kafka_assignor_update_subscription(
|
||||
const rd_kafka_assignor_t *rkas,
|
||||
const rd_kafka_topic_partition_list_t *subscription);
|
||||
|
||||
|
||||
rd_kafka_resp_err_t
|
||||
rd_kafka_assignor_run (struct rd_kafka_cgrp_s *rkcg,
|
||||
const rd_kafka_assignor_t *rkas,
|
||||
rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *members, int member_cnt,
|
||||
char *errstr, size_t errstr_size);
|
||||
rd_kafka_resp_err_t rd_kafka_assignor_run(struct rd_kafka_cgrp_s *rkcg,
|
||||
const rd_kafka_assignor_t *rkas,
|
||||
rd_kafka_metadata_t *metadata,
|
||||
rd_kafka_group_member_t *members,
|
||||
int member_cnt,
|
||||
char *errstr,
|
||||
size_t errstr_size);
|
||||
|
||||
rd_kafka_assignor_t *
|
||||
rd_kafka_assignor_find (rd_kafka_t *rk, const char *protocol);
|
||||
rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk,
|
||||
const char *protocol);
|
||||
|
||||
int rd_kafka_assignors_init (rd_kafka_t *rk, char *errstr, size_t errstr_size);
|
||||
void rd_kafka_assignors_term (rd_kafka_t *rk);
|
||||
int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
|
||||
void rd_kafka_assignors_term(rd_kafka_t *rk);
|
||||
|
||||
|
||||
|
||||
void rd_kafka_group_member_clear (rd_kafka_group_member_t *rkgm);
|
||||
void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm);
|
||||
|
||||
|
||||
rd_kafka_resp_err_t rd_kafka_range_assignor_init (rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init (rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_sticky_assignor_init (rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk);
|
||||
rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk);
|
||||
|
||||
#endif /* _RDKAFKA_ASSIGNOR_H_ */
|
||||
|
|
|
@ -32,17 +32,17 @@
|
|||
#include "rdkafka_error.h"
|
||||
|
||||
rd_kafka_resp_err_t
|
||||
rd_kafka_topic_result_error (const rd_kafka_topic_result_t *topicres) {
|
||||
rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres) {
|
||||
return topicres->err;
|
||||
}
|
||||
|
||||
const char *
|
||||
rd_kafka_topic_result_error_string (const rd_kafka_topic_result_t *topicres) {
|
||||
rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres) {
|
||||
return topicres->errstr;
|
||||
}
|
||||
|
||||
const char *
|
||||
rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres) {
|
||||
rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres) {
|
||||
return topicres->topic;
|
||||
}
|
||||
|
||||
|
@ -58,10 +58,10 @@ rd_kafka_topic_result_name (const rd_kafka_topic_result_t *topicres) {
|
|||
* All input arguments are copied.
|
||||
*/
|
||||
|
||||
rd_kafka_topic_result_t *
|
||||
rd_kafka_topic_result_new (const char *topic, ssize_t topic_size,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr) {
|
||||
rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
|
||||
ssize_t topic_size,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr) {
|
||||
size_t tlen = topic_size != -1 ? (size_t)topic_size : strlen(topic);
|
||||
size_t elen = errstr ? strlen(errstr) + 1 : 0;
|
||||
rd_kafka_topic_result_t *terr;
|
||||
|
@ -88,50 +88,46 @@ rd_kafka_topic_result_new (const char *topic, ssize_t topic_size,
|
|||
/**
|
||||
* @brief Destroy topic_result
|
||||
*/
|
||||
void rd_kafka_topic_result_destroy (rd_kafka_topic_result_t *terr) {
|
||||
void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr) {
|
||||
rd_free(terr);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy-variant suitable for rd_list free_cb use.
|
||||
*/
|
||||
void rd_kafka_topic_result_free (void *ptr) {
|
||||
void rd_kafka_topic_result_free(void *ptr) {
|
||||
rd_kafka_topic_result_destroy((rd_kafka_topic_result_t *)ptr);
|
||||
}
|
||||
|
||||
const rd_kafka_error_t *
|
||||
rd_kafka_group_result_error (const rd_kafka_group_result_t *groupres) {
|
||||
rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres) {
|
||||
return groupres->error;
|
||||
}
|
||||
|
||||
const char *
|
||||
rd_kafka_group_result_name (const rd_kafka_group_result_t *groupres) {
|
||||
rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres) {
|
||||
return groupres->group;
|
||||
}
|
||||
|
||||
const rd_kafka_topic_partition_list_t *
|
||||
rd_kafka_group_result_partitions (const rd_kafka_group_result_t *groupres) {
|
||||
rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres) {
|
||||
return groupres->partitions;
|
||||
}
|
||||
|
||||
rd_kafka_group_result_t *
|
||||
rd_kafka_group_result_copy (const rd_kafka_group_result_t *groupres) {
|
||||
return rd_kafka_group_result_new(groupres->group,
|
||||
-1,
|
||||
groupres->partitions,
|
||||
groupres->error ?
|
||||
rd_kafka_error_copy(groupres->error) :
|
||||
NULL);
|
||||
rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres) {
|
||||
return rd_kafka_group_result_new(
|
||||
groupres->group, -1, groupres->partitions,
|
||||
groupres->error ? rd_kafka_error_copy(groupres->error) : NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Same as rd_kafka_group_result_copy() but suitable for
|
||||
* rd_list_copy(). The \p opaque is ignored.
|
||||
*/
|
||||
void *
|
||||
rd_kafka_group_result_copy_opaque (const void *src_groupres,
|
||||
void *opaque) {
|
||||
return rd_kafka_group_result_copy(src_groupres);
|
||||
void *rd_kafka_group_result_copy_opaque(const void *src_groupres,
|
||||
void *opaque) {
|
||||
return rd_kafka_group_result_copy(src_groupres);
|
||||
}
|
||||
|
||||
|
||||
|
@ -147,9 +143,10 @@ rd_kafka_group_result_copy_opaque (const void *src_groupres,
|
|||
*/
|
||||
|
||||
rd_kafka_group_result_t *
|
||||
rd_kafka_group_result_new (const char *group, ssize_t group_size,
|
||||
const rd_kafka_topic_partition_list_t *partitions,
|
||||
rd_kafka_error_t *error) {
|
||||
rd_kafka_group_result_new(const char *group,
|
||||
ssize_t group_size,
|
||||
const rd_kafka_topic_partition_list_t *partitions,
|
||||
rd_kafka_error_t *error) {
|
||||
size_t glen = group_size != -1 ? (size_t)group_size : strlen(group);
|
||||
rd_kafka_group_result_t *groupres;
|
||||
|
||||
|
@ -161,8 +158,8 @@ rd_kafka_group_result_new (const char *group, ssize_t group_size,
|
|||
groupres->group[glen] = '\0';
|
||||
|
||||
if (partitions)
|
||||
groupres->partitions = rd_kafka_topic_partition_list_copy(
|
||||
partitions);
|
||||
groupres->partitions =
|
||||
rd_kafka_topic_partition_list_copy(partitions);
|
||||
|
||||
groupres->error = error;
|
||||
|
||||
|
@ -170,10 +167,10 @@ rd_kafka_group_result_new (const char *group, ssize_t group_size,
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
/**
|
||||
* @brief Destroy group_result
|
||||
*/
|
||||
void rd_kafka_group_result_destroy (rd_kafka_group_result_t *groupres) {
|
||||
void rd_kafka_group_result_destroy(rd_kafka_group_result_t *groupres) {
|
||||
if (groupres->partitions)
|
||||
rd_kafka_topic_partition_list_destroy(groupres->partitions);
|
||||
if (groupres->error)
|
||||
|
@ -181,9 +178,9 @@ void rd_kafka_group_result_destroy (rd_kafka_group_result_t *groupres) {
|
|||
rd_free(groupres);
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* @brief Destroy-variant suitable for rd_list free_cb use.
|
||||
*/
|
||||
void rd_kafka_group_result_free (void *ptr) {
|
||||
void rd_kafka_group_result_free(void *ptr) {
|
||||
rd_kafka_group_result_destroy((rd_kafka_group_result_t *)ptr);
|
||||
}
|
||||
|
|
|
@ -47,16 +47,16 @@ struct rd_kafka_topic_result_s {
|
|||
char *topic; /**< Points to data */
|
||||
rd_kafka_resp_err_t err; /**< Error code */
|
||||
char *errstr; /**< Points to data after topic, unless NULL */
|
||||
char data[1]; /**< topic followed by errstr */
|
||||
char data[1]; /**< topic followed by errstr */
|
||||
};
|
||||
|
||||
void rd_kafka_topic_result_destroy (rd_kafka_topic_result_t *terr);
|
||||
void rd_kafka_topic_result_free (void *ptr);
|
||||
void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr);
|
||||
void rd_kafka_topic_result_free(void *ptr);
|
||||
|
||||
rd_kafka_topic_result_t *
|
||||
rd_kafka_topic_result_new (const char *topic, ssize_t topic_size,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr);
|
||||
rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
|
||||
ssize_t topic_size,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr);
|
||||
|
||||
/**@}*/
|
||||
|
||||
|
@ -71,22 +71,21 @@ struct rd_kafka_group_result_s {
|
|||
rd_kafka_error_t *error; /**< Error object, or NULL on success */
|
||||
/** Partitions, used by DeleteConsumerGroupOffsets. */
|
||||
rd_kafka_topic_partition_list_t *partitions;
|
||||
char data[1]; /**< Group name */
|
||||
char data[1]; /**< Group name */
|
||||
};
|
||||
|
||||
void rd_kafka_group_result_destroy (rd_kafka_group_result_t *terr);
|
||||
void rd_kafka_group_result_free (void *ptr);
|
||||
void rd_kafka_group_result_destroy(rd_kafka_group_result_t *terr);
|
||||
void rd_kafka_group_result_free(void *ptr);
|
||||
|
||||
rd_kafka_group_result_t *
|
||||
rd_kafka_group_result_new (const char *group, ssize_t group_size,
|
||||
const rd_kafka_topic_partition_list_t *partitions,
|
||||
rd_kafka_error_t *error);
|
||||
rd_kafka_group_result_new(const char *group,
|
||||
ssize_t group_size,
|
||||
const rd_kafka_topic_partition_list_t *partitions,
|
||||
rd_kafka_error_t *error);
|
||||
|
||||
rd_kafka_group_result_t *
|
||||
rd_kafka_group_result_copy (const rd_kafka_group_result_t *groupres);
|
||||
void *
|
||||
rd_kafka_group_result_copy_opaque (const void *src_groupres,
|
||||
void *opaque);
|
||||
rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres);
|
||||
void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque);
|
||||
/**@}*/
|
||||
|
||||
#endif /* _RDKAFKA_AUX_H_ */
|
||||
|
|
|
@ -43,8 +43,8 @@
|
|||
* @brief Call the registered background_event_cb.
|
||||
* @locality rdkafka background queue thread
|
||||
*/
|
||||
static RD_INLINE void
|
||||
rd_kafka_call_background_event_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) {
|
||||
static RD_INLINE void rd_kafka_call_background_event_cb(rd_kafka_t *rk,
|
||||
rd_kafka_op_t *rko) {
|
||||
rd_assert(!rk->rk_background.calling);
|
||||
rk->rk_background.calling = 1;
|
||||
|
||||
|
@ -64,11 +64,11 @@ rd_kafka_call_background_event_cb (rd_kafka_t *rk, rd_kafka_op_t *rko) {
|
|||
* APIs to the background queue.
|
||||
*/
|
||||
static rd_kafka_op_res_t
|
||||
rd_kafka_background_queue_serve (rd_kafka_t *rk,
|
||||
rd_kafka_q_t *rkq,
|
||||
rd_kafka_op_t *rko,
|
||||
rd_kafka_q_cb_type_t cb_type,
|
||||
void *opaque) {
|
||||
rd_kafka_background_queue_serve(rd_kafka_t *rk,
|
||||
rd_kafka_q_t *rkq,
|
||||
rd_kafka_op_t *rko,
|
||||
rd_kafka_q_cb_type_t cb_type,
|
||||
void *opaque) {
|
||||
rd_kafka_op_res_t res;
|
||||
|
||||
/*
|
||||
|
@ -109,7 +109,7 @@ rd_kafka_background_queue_serve (rd_kafka_t *rk,
|
|||
/**
|
||||
* @brief Main loop for background queue thread.
|
||||
*/
|
||||
int rd_kafka_background_thread_main (void *arg) {
|
||||
int rd_kafka_background_thread_main(void *arg) {
|
||||
rd_kafka_t *rk = arg;
|
||||
|
||||
rd_kafka_set_thread_name("background");
|
||||
|
@ -130,7 +130,7 @@ int rd_kafka_background_thread_main (void *arg) {
|
|||
mtx_unlock(&rk->rk_init_lock);
|
||||
|
||||
while (likely(!rd_kafka_terminating(rk))) {
|
||||
rd_kafka_q_serve(rk->rk_background.q, 10*1000, 0,
|
||||
rd_kafka_q_serve(rk->rk_background.q, 10 * 1000, 0,
|
||||
RD_KAFKA_Q_CB_RETURN,
|
||||
rd_kafka_background_queue_serve, NULL);
|
||||
}
|
||||
|
@ -144,8 +144,7 @@ int rd_kafka_background_thread_main (void *arg) {
|
|||
rd_kafka_q_disable(rk->rk_background.q);
|
||||
rd_kafka_q_purge(rk->rk_background.q);
|
||||
|
||||
rd_kafka_dbg(rk, GENERIC, "BGQUEUE",
|
||||
"Background queue thread exiting");
|
||||
rd_kafka_dbg(rk, GENERIC, "BGQUEUE", "Background queue thread exiting");
|
||||
|
||||
rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BACKGROUND);
|
||||
|
||||
|
@ -161,9 +160,9 @@ int rd_kafka_background_thread_main (void *arg) {
|
|||
* @locks_acquired rk_init_lock
|
||||
* @locks_required rd_kafka_wrlock()
|
||||
*/
|
||||
rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk,
|
||||
char *errstr,
|
||||
size_t errstr_size) {
|
||||
rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk,
|
||||
char *errstr,
|
||||
size_t errstr_size) {
|
||||
#ifndef _WIN32
|
||||
sigset_t newset, oldset;
|
||||
#endif
|
||||
|
@ -188,9 +187,8 @@ rd_kafka_resp_err_t rd_kafka_background_thread_create (rd_kafka_t *rk,
|
|||
sigemptyset(&oldset);
|
||||
sigfillset(&newset);
|
||||
if (rk->rk_conf.term_sig) {
|
||||
struct sigaction sa_term = {
|
||||
.sa_handler = rd_kafka_term_sig_handler
|
||||
};
|
||||
struct sigaction sa_term = {.sa_handler =
|
||||
rd_kafka_term_sig_handler};
|
||||
sigaction(rk->rk_conf.term_sig, &sa_term, NULL);
|
||||
}
|
||||
pthread_sigmask(SIG_SETMASK, &newset, &oldset);
|
||||
|
|
4089
src/rdkafka_broker.c
4089
src/rdkafka_broker.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012,2013 Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -39,7 +39,7 @@ extern const char *rd_kafka_secproto_names[];
|
|||
/**
|
||||
* @enum Broker states
|
||||
*/
|
||||
typedef enum {
|
||||
typedef enum {
|
||||
RD_KAFKA_BROKER_STATE_INIT,
|
||||
RD_KAFKA_BROKER_STATE_DOWN,
|
||||
RD_KAFKA_BROKER_STATE_TRY_CONNECT,
|
||||
|
@ -70,7 +70,7 @@ typedef struct rd_kafka_broker_monitor_s {
|
|||
/**< Callback triggered on the monitoree's op handler thread.
|
||||
* Do note that the callback might be triggered even after
|
||||
* it has been deleted due to the queueing nature of op queues. */
|
||||
void (*rkbmon_cb) (rd_kafka_broker_t *rkb);
|
||||
void (*rkbmon_cb)(rd_kafka_broker_t *rkb);
|
||||
} rd_kafka_broker_monitor_t;
|
||||
|
||||
|
||||
|
@ -78,35 +78,35 @@ typedef struct rd_kafka_broker_monitor_s {
|
|||
* @struct Broker instance
|
||||
*/
|
||||
struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
||||
TAILQ_ENTRY(rd_kafka_broker_s) rkb_link;
|
||||
TAILQ_ENTRY(rd_kafka_broker_s) rkb_link;
|
||||
|
||||
int32_t rkb_nodeid; /**< Broker Node Id.
|
||||
* @locks rkb_lock */
|
||||
int32_t rkb_nodeid; /**< Broker Node Id.
|
||||
* @locks rkb_lock */
|
||||
#define RD_KAFKA_NODEID_UA -1
|
||||
|
||||
rd_sockaddr_list_t *rkb_rsal;
|
||||
rd_ts_t rkb_ts_rsal_last;
|
||||
const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */
|
||||
rd_sockaddr_list_t *rkb_rsal;
|
||||
rd_ts_t rkb_ts_rsal_last;
|
||||
const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */
|
||||
|
||||
rd_kafka_transport_t *rkb_transport;
|
||||
rd_kafka_transport_t *rkb_transport;
|
||||
|
||||
uint32_t rkb_corrid;
|
||||
int rkb_connid; /* Connection id, increased by
|
||||
* one for each connection by
|
||||
* this broker. Used as a safe-guard
|
||||
* to help troubleshooting buffer
|
||||
* problems across disconnects. */
|
||||
uint32_t rkb_corrid;
|
||||
int rkb_connid; /* Connection id, increased by
|
||||
* one for each connection by
|
||||
* this broker. Used as a safe-guard
|
||||
* to help troubleshooting buffer
|
||||
* problems across disconnects. */
|
||||
|
||||
rd_kafka_q_t *rkb_ops;
|
||||
rd_kafka_q_t *rkb_ops;
|
||||
|
||||
mtx_t rkb_lock;
|
||||
mtx_t rkb_lock;
|
||||
|
||||
int rkb_blocking_max_ms; /* Maximum IO poll blocking
|
||||
* time. */
|
||||
int rkb_blocking_max_ms; /* Maximum IO poll blocking
|
||||
* time. */
|
||||
|
||||
/* Toppars handled by this broker */
|
||||
TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars;
|
||||
int rkb_toppar_cnt;
|
||||
TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars;
|
||||
int rkb_toppar_cnt;
|
||||
|
||||
/* Active toppars that are eligible for:
|
||||
* - (consumer) fetching due to underflow
|
||||
|
@ -115,138 +115,139 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
* The circleq provides round-robin scheduling for both cases.
|
||||
*/
|
||||
CIRCLEQ_HEAD(, rd_kafka_toppar_s) rkb_active_toppars;
|
||||
int rkb_active_toppar_cnt;
|
||||
rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar
|
||||
* in fetch list.
|
||||
* This is used for
|
||||
* round-robin. */
|
||||
int rkb_active_toppar_cnt;
|
||||
rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar
|
||||
* in fetch list.
|
||||
* This is used for
|
||||
* round-robin. */
|
||||
|
||||
|
||||
rd_kafka_cgrp_t *rkb_cgrp;
|
||||
rd_kafka_cgrp_t *rkb_cgrp;
|
||||
|
||||
rd_ts_t rkb_ts_fetch_backoff;
|
||||
int rkb_fetching;
|
||||
rd_ts_t rkb_ts_fetch_backoff;
|
||||
int rkb_fetching;
|
||||
|
||||
rd_kafka_broker_state_t rkb_state; /**< Current broker state */
|
||||
rd_kafka_broker_state_t rkb_state; /**< Current broker state */
|
||||
|
||||
rd_ts_t rkb_ts_state; /* Timestamp of last
|
||||
* state change */
|
||||
rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan
|
||||
* interval. */
|
||||
rd_ts_t rkb_ts_state; /* Timestamp of last
|
||||
* state change */
|
||||
rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan
|
||||
* interval. */
|
||||
|
||||
rd_atomic32_t rkb_blocking_request_cnt; /* The number of
|
||||
* in-flight blocking
|
||||
* requests.
|
||||
* A blocking request is
|
||||
* one that is known to
|
||||
* possibly block on the
|
||||
* broker for longer than
|
||||
* the typical processing
|
||||
* time, e.g.:
|
||||
* JoinGroup, SyncGroup */
|
||||
rd_atomic32_t rkb_blocking_request_cnt; /* The number of
|
||||
* in-flight blocking
|
||||
* requests.
|
||||
* A blocking request is
|
||||
* one that is known to
|
||||
* possibly block on the
|
||||
* broker for longer than
|
||||
* the typical processing
|
||||
* time, e.g.:
|
||||
* JoinGroup, SyncGroup */
|
||||
|
||||
int rkb_features; /* Protocol features supported
|
||||
* by this broker.
|
||||
* See RD_KAFKA_FEATURE_* in
|
||||
* rdkafka_proto.h */
|
||||
int rkb_features; /* Protocol features supported
|
||||
* by this broker.
|
||||
* See RD_KAFKA_FEATURE_* in
|
||||
* rdkafka_proto.h */
|
||||
|
||||
struct rd_kafka_ApiVersion *rkb_ApiVersions; /* Broker's supported APIs
|
||||
* (MUST be sorted) */
|
||||
size_t rkb_ApiVersions_cnt;
|
||||
rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long
|
||||
* the fallback proto
|
||||
* will be used after
|
||||
* ApiVersionRequest
|
||||
* failure. */
|
||||
size_t rkb_ApiVersions_cnt;
|
||||
rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long
|
||||
* the fallback proto
|
||||
* will be used after
|
||||
* ApiVersionRequest
|
||||
* failure. */
|
||||
|
||||
rd_kafka_confsource_t rkb_source;
|
||||
struct {
|
||||
rd_atomic64_t tx_bytes;
|
||||
rd_atomic64_t tx; /**< Kafka requests */
|
||||
rd_atomic64_t tx_err;
|
||||
rd_atomic64_t tx_retries;
|
||||
rd_atomic64_t req_timeouts; /* Accumulated value */
|
||||
rd_kafka_confsource_t rkb_source;
|
||||
struct {
|
||||
rd_atomic64_t tx_bytes;
|
||||
rd_atomic64_t tx; /**< Kafka requests */
|
||||
rd_atomic64_t tx_err;
|
||||
rd_atomic64_t tx_retries;
|
||||
rd_atomic64_t req_timeouts; /* Accumulated value */
|
||||
|
||||
rd_atomic64_t rx_bytes;
|
||||
rd_atomic64_t rx; /**< Kafka responses */
|
||||
rd_atomic64_t rx_err;
|
||||
rd_atomic64_t rx_corrid_err; /* CorrId misses */
|
||||
rd_atomic64_t rx_partial; /* Partial messages received
|
||||
rd_atomic64_t rx_bytes;
|
||||
rd_atomic64_t rx; /**< Kafka responses */
|
||||
rd_atomic64_t rx_err;
|
||||
rd_atomic64_t rx_corrid_err; /* CorrId misses */
|
||||
rd_atomic64_t rx_partial; /* Partial messages received
|
||||
* and dropped. */
|
||||
rd_atomic64_t zbuf_grow; /* Compression/decompression buffer grows needed */
|
||||
rd_atomic64_t zbuf_grow; /* Compression/decompression buffer
|
||||
grows needed */
|
||||
rd_atomic64_t buf_grow; /* rkbuf grows needed */
|
||||
rd_atomic64_t wakeups; /* Poll wakeups */
|
||||
|
||||
rd_atomic32_t connects; /**< Connection attempts,
|
||||
* successful or not. */
|
||||
rd_atomic32_t connects; /**< Connection attempts,
|
||||
* successful or not. */
|
||||
|
||||
rd_atomic32_t disconnects; /**< Disconnects.
|
||||
* Always peer-triggered. */
|
||||
rd_atomic32_t disconnects; /**< Disconnects.
|
||||
* Always peer-triggered. */
|
||||
|
||||
rd_atomic64_t reqtype[RD_KAFKAP__NUM]; /**< Per request-type
|
||||
* counter */
|
||||
|
||||
rd_atomic64_t ts_send; /**< Timestamp of last send */
|
||||
rd_atomic64_t ts_recv; /**< Timestamp of last receive */
|
||||
} rkb_c;
|
||||
rd_atomic64_t ts_send; /**< Timestamp of last send */
|
||||
rd_atomic64_t ts_recv; /**< Timestamp of last receive */
|
||||
} rkb_c;
|
||||
|
||||
int rkb_req_timeouts; /* Current value */
|
||||
int rkb_req_timeouts; /* Current value */
|
||||
|
||||
thrd_t rkb_thread;
|
||||
thrd_t rkb_thread;
|
||||
|
||||
rd_refcnt_t rkb_refcnt;
|
||||
rd_refcnt_t rkb_refcnt;
|
||||
|
||||
rd_kafka_t *rkb_rk;
|
||||
rd_kafka_t *rkb_rk;
|
||||
|
||||
rd_kafka_buf_t *rkb_recv_buf;
|
||||
rd_kafka_buf_t *rkb_recv_buf;
|
||||
|
||||
int rkb_max_inflight; /* Maximum number of in-flight
|
||||
* requests to broker.
|
||||
* Compared to rkb_waitresps length.*/
|
||||
rd_kafka_bufq_t rkb_outbufs;
|
||||
rd_kafka_bufq_t rkb_waitresps;
|
||||
rd_kafka_bufq_t rkb_retrybufs;
|
||||
int rkb_max_inflight; /* Maximum number of in-flight
|
||||
* requests to broker.
|
||||
* Compared to rkb_waitresps length.*/
|
||||
rd_kafka_bufq_t rkb_outbufs;
|
||||
rd_kafka_bufq_t rkb_waitresps;
|
||||
rd_kafka_bufq_t rkb_retrybufs;
|
||||
|
||||
rd_avg_t rkb_avg_int_latency;/* Current internal latency period*/
|
||||
rd_avg_t rkb_avg_outbuf_latency; /**< Current latency
|
||||
* between buf_enq0
|
||||
* and writing to socket
|
||||
*/
|
||||
rd_avg_t rkb_avg_rtt; /* Current RTT period */
|
||||
rd_avg_t rkb_avg_throttle; /* Current throttle period */
|
||||
rd_avg_t rkb_avg_int_latency; /* Current internal latency period*/
|
||||
rd_avg_t rkb_avg_outbuf_latency; /**< Current latency
|
||||
* between buf_enq0
|
||||
* and writing to socket
|
||||
*/
|
||||
rd_avg_t rkb_avg_rtt; /* Current RTT period */
|
||||
rd_avg_t rkb_avg_throttle; /* Current throttle period */
|
||||
|
||||
/* These are all protected by rkb_lock */
|
||||
char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */
|
||||
char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/
|
||||
uint16_t rkb_port; /* TCP port */
|
||||
char *rkb_origname; /* Original
|
||||
* host name */
|
||||
int rkb_nodename_epoch; /**< Bumped each time
|
||||
* the nodename is changed.
|
||||
* Compared to
|
||||
* rkb_connect_epoch
|
||||
* to trigger a reconnect
|
||||
* for logical broker
|
||||
* when the nodename is
|
||||
* updated. */
|
||||
int rkb_connect_epoch; /**< The value of
|
||||
* rkb_nodename_epoch at the
|
||||
* last connection attempt.
|
||||
*/
|
||||
char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */
|
||||
char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/
|
||||
uint16_t rkb_port; /* TCP port */
|
||||
char *rkb_origname; /* Original
|
||||
* host name */
|
||||
int rkb_nodename_epoch; /**< Bumped each time
|
||||
* the nodename is changed.
|
||||
* Compared to
|
||||
* rkb_connect_epoch
|
||||
* to trigger a reconnect
|
||||
* for logical broker
|
||||
* when the nodename is
|
||||
* updated. */
|
||||
int rkb_connect_epoch; /**< The value of
|
||||
* rkb_nodename_epoch at the
|
||||
* last connection attempt.
|
||||
*/
|
||||
|
||||
/* Logging name is a copy of rkb_name, protected by its own mutex */
|
||||
char *rkb_logname;
|
||||
mtx_t rkb_logname_lock;
|
||||
char *rkb_logname;
|
||||
mtx_t rkb_logname_lock;
|
||||
|
||||
rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake
|
||||
* up from IO-wait when
|
||||
* queues have content. */
|
||||
rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake
|
||||
* up from IO-wait when
|
||||
* queues have content. */
|
||||
|
||||
/**< Current, exponentially increased, reconnect backoff. */
|
||||
int rkb_reconnect_backoff_ms;
|
||||
int rkb_reconnect_backoff_ms;
|
||||
|
||||
/**< Absolute timestamp of next allowed reconnect. */
|
||||
rd_ts_t rkb_ts_reconnect;
|
||||
rd_ts_t rkb_ts_reconnect;
|
||||
|
||||
/**< Persistent connection demand is tracked by
|
||||
* an counter for each type of demand.
|
||||
|
@ -284,11 +285,11 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
* Will trigger the coord_req fsm on broker state change. */
|
||||
rd_kafka_broker_monitor_t rkb_coord_monitor;
|
||||
|
||||
rd_kafka_secproto_t rkb_proto;
|
||||
rd_kafka_secproto_t rkb_proto;
|
||||
|
||||
int rkb_down_reported; /* Down event reported */
|
||||
int rkb_down_reported; /* Down event reported */
|
||||
#if WITH_SASL_CYRUS
|
||||
rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr;
|
||||
rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr;
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -313,12 +314,12 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
struct {
|
||||
char errstr[512]; /**< Last error string */
|
||||
rd_kafka_resp_err_t err; /**< Last error code */
|
||||
int cnt; /**< Number of identical errors */
|
||||
int cnt; /**< Number of identical errors */
|
||||
} rkb_last_err;
|
||||
};
|
||||
|
||||
#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt)
|
||||
#define rd_kafka_broker_keep_fl(FUNC,LINE,RKB) \
|
||||
#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt)
|
||||
#define rd_kafka_broker_keep_fl(FUNC, LINE, RKB) \
|
||||
rd_refcnt_add_fl(FUNC, LINE, &(RKB)->rkb_refcnt)
|
||||
#define rd_kafka_broker_lock(rkb) mtx_lock(&(rkb)->rkb_lock)
|
||||
#define rd_kafka_broker_unlock(rkb) mtx_unlock(&(rkb)->rkb_lock)
|
||||
|
@ -330,8 +331,8 @@ struct rd_kafka_broker_s { /* rd_kafka_broker_t */
|
|||
* @locks broker_lock MUST NOT be held.
|
||||
* @locality any
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED
|
||||
rd_kafka_broker_state_t rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) {
|
||||
static RD_INLINE RD_UNUSED rd_kafka_broker_state_t
|
||||
rd_kafka_broker_get_state(rd_kafka_broker_t *rkb) {
|
||||
rd_kafka_broker_state_t state;
|
||||
rd_kafka_broker_lock(rkb);
|
||||
state = rkb->rkb_state;
|
||||
|
@ -341,12 +342,11 @@ rd_kafka_broker_state_t rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) {
|
|||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @returns true if the broker state is UP or UPDATE
|
||||
*/
|
||||
#define rd_kafka_broker_state_is_up(state) \
|
||||
((state) == RD_KAFKA_BROKER_STATE_UP || \
|
||||
#define rd_kafka_broker_state_is_up(state) \
|
||||
((state) == RD_KAFKA_BROKER_STATE_UP || \
|
||||
(state) == RD_KAFKA_BROKER_STATE_UPDATE)
|
||||
|
||||
|
||||
|
@ -356,7 +356,7 @@ rd_kafka_broker_state_t rd_kafka_broker_get_state (rd_kafka_broker_t *rkb) {
|
|||
* @locality any
|
||||
*/
|
||||
static RD_UNUSED RD_INLINE rd_bool_t
|
||||
rd_kafka_broker_is_up (rd_kafka_broker_t *rkb) {
|
||||
rd_kafka_broker_is_up(rd_kafka_broker_t *rkb) {
|
||||
rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb);
|
||||
return rd_kafka_broker_state_is_up(state);
|
||||
}
|
||||
|
@ -365,8 +365,8 @@ rd_kafka_broker_is_up (rd_kafka_broker_t *rkb) {
|
|||
/**
|
||||
* @brief Broker comparator
|
||||
*/
|
||||
static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp (const void *_a,
|
||||
const void *_b) {
|
||||
static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp(const void *_a,
|
||||
const void *_b) {
|
||||
const rd_kafka_broker_t *a = _a, *b = _b;
|
||||
return RD_CMP(a, b);
|
||||
}
|
||||
|
@ -375,174 +375,185 @@ static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp (const void *_a,
|
|||
/**
|
||||
* @returns true if broker supports \p features, else false.
|
||||
*/
|
||||
static RD_UNUSED
|
||||
int rd_kafka_broker_supports (rd_kafka_broker_t *rkb, int features) {
|
||||
static RD_UNUSED int rd_kafka_broker_supports(rd_kafka_broker_t *rkb,
|
||||
int features) {
|
||||
const rd_bool_t do_lock = !thrd_is_current(rkb->rkb_thread);
|
||||
int r;
|
||||
int r;
|
||||
|
||||
if (do_lock)
|
||||
rd_kafka_broker_lock(rkb);
|
||||
|
||||
r = (rkb->rkb_features & features) == features;
|
||||
r = (rkb->rkb_features & features) == features;
|
||||
|
||||
if (do_lock)
|
||||
rd_kafka_broker_unlock(rkb);
|
||||
return r;
|
||||
return r;
|
||||
}
|
||||
|
||||
int16_t rd_kafka_broker_ApiVersion_supported (rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver, int16_t maxver,
|
||||
int *featuresp);
|
||||
int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int16_t minver,
|
||||
int16_t maxver,
|
||||
int *featuresp);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl (const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
int32_t nodeid,
|
||||
int state,
|
||||
rd_bool_t do_connect);
|
||||
rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
|
||||
int line,
|
||||
rd_kafka_t *rk,
|
||||
int32_t nodeid,
|
||||
int state,
|
||||
rd_bool_t do_connect);
|
||||
|
||||
#define rd_kafka_broker_find_by_nodeid0(rk,nodeid,state,do_connect) \
|
||||
rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__,__LINE__, \
|
||||
rk,nodeid,state,do_connect)
|
||||
#define rd_kafka_broker_find_by_nodeid(rk,nodeid) \
|
||||
rd_kafka_broker_find_by_nodeid0(rk,nodeid,-1,rd_false)
|
||||
#define rd_kafka_broker_find_by_nodeid0(rk, nodeid, state, do_connect) \
|
||||
rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__, __LINE__, rk, nodeid, \
|
||||
state, do_connect)
|
||||
#define rd_kafka_broker_find_by_nodeid(rk, nodeid) \
|
||||
rd_kafka_broker_find_by_nodeid0(rk, nodeid, -1, rd_false)
|
||||
|
||||
|
||||
/**
|
||||
* Filter out brokers that don't support Idempotent Producer.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED int
|
||||
rd_kafka_broker_filter_non_idempotent (rd_kafka_broker_t *rkb, void *opaque) {
|
||||
rd_kafka_broker_filter_non_idempotent(rd_kafka_broker_t *rkb, void *opaque) {
|
||||
return !(rkb->rkb_features & RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER);
|
||||
}
|
||||
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_any (rd_kafka_t *rk, int state,
|
||||
int (*filter) (rd_kafka_broker_t *rkb,
|
||||
void *opaque),
|
||||
void *opaque, const char *reason);
|
||||
rd_kafka_broker_t *
|
||||
rd_kafka_broker_any_up (rd_kafka_t *rk,
|
||||
int *filtered_cnt,
|
||||
int (*filter) (rd_kafka_broker_t *rkb,
|
||||
void *opaque),
|
||||
void *opaque, const char *reason);
|
||||
rd_kafka_broker_t *rd_kafka_broker_any_usable (rd_kafka_t *rk, int timeout_ms,
|
||||
rd_dolock_t do_lock,
|
||||
int features,
|
||||
const char *reason);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_prefer (rd_kafka_t *rk, int32_t broker_id,
|
||||
int state);
|
||||
rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk,
|
||||
int state,
|
||||
int (*filter)(rd_kafka_broker_t *rkb,
|
||||
void *opaque),
|
||||
void *opaque,
|
||||
const char *reason);
|
||||
rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk,
|
||||
int *filtered_cnt,
|
||||
int (*filter)(rd_kafka_broker_t *rkb,
|
||||
void *opaque),
|
||||
void *opaque,
|
||||
const char *reason);
|
||||
rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk,
|
||||
int timeout_ms,
|
||||
rd_dolock_t do_lock,
|
||||
int features,
|
||||
const char *reason);
|
||||
|
||||
rd_kafka_broker_t *
|
||||
rd_kafka_broker_get_async (rd_kafka_t *rk, int32_t broker_id, int state,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
rd_kafka_broker_prefer(rd_kafka_t *rk, int32_t broker_id, int state);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk,
|
||||
int32_t broker_id,
|
||||
int state,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_controller (rd_kafka_t *rk, int state,
|
||||
rd_ts_t abs_timeout);
|
||||
rd_kafka_broker_t *
|
||||
rd_kafka_broker_controller_async (rd_kafka_t *rk, int state,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout);
|
||||
rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk,
|
||||
int state,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
|
||||
int rd_kafka_brokers_add0 (rd_kafka_t *rk, const char *brokerlist);
|
||||
void rd_kafka_broker_set_state (rd_kafka_broker_t *rkb, int state);
|
||||
int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist);
|
||||
void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state);
|
||||
|
||||
void rd_kafka_broker_fail (rd_kafka_broker_t *rkb,
|
||||
int level, rd_kafka_resp_err_t err,
|
||||
const char *fmt, ...) RD_FORMAT(printf, 4, 5);
|
||||
void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
|
||||
int level,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *fmt,
|
||||
...) RD_FORMAT(printf, 4, 5);
|
||||
|
||||
void rd_kafka_broker_conn_closed (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr);
|
||||
void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *errstr);
|
||||
|
||||
void rd_kafka_broker_destroy_final (rd_kafka_broker_t *rkb);
|
||||
void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb);
|
||||
|
||||
#define rd_kafka_broker_destroy(rkb) \
|
||||
rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \
|
||||
#define rd_kafka_broker_destroy(rkb) \
|
||||
rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \
|
||||
rd_kafka_broker_destroy_final(rkb))
|
||||
|
||||
|
||||
void
|
||||
rd_kafka_broker_update (rd_kafka_t *rk, rd_kafka_secproto_t proto,
|
||||
const struct rd_kafka_metadata_broker *mdb,
|
||||
rd_kafka_broker_t **rkbp);
|
||||
rd_kafka_broker_t *rd_kafka_broker_add (rd_kafka_t *rk,
|
||||
rd_kafka_confsource_t source,
|
||||
rd_kafka_secproto_t proto,
|
||||
const char *name, uint16_t port,
|
||||
int32_t nodeid);
|
||||
void rd_kafka_broker_update(rd_kafka_t *rk,
|
||||
rd_kafka_secproto_t proto,
|
||||
const struct rd_kafka_metadata_broker *mdb,
|
||||
rd_kafka_broker_t **rkbp);
|
||||
rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk,
|
||||
rd_kafka_confsource_t source,
|
||||
rd_kafka_secproto_t proto,
|
||||
const char *name,
|
||||
uint16_t port,
|
||||
int32_t nodeid);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_add_logical (rd_kafka_t *rk,
|
||||
const char *name);
|
||||
rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk,
|
||||
const char *name);
|
||||
|
||||
/** @define returns true if broker is logical. No locking is needed. */
|
||||
#define RD_KAFKA_BROKER_IS_LOGICAL(rkb) ((rkb)->rkb_source == RD_KAFKA_LOGICAL)
|
||||
|
||||
void rd_kafka_broker_set_nodename (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_broker_t *from_rkb);
|
||||
void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_broker_t *from_rkb);
|
||||
|
||||
void rd_kafka_broker_connect_up (rd_kafka_broker_t *rkb);
|
||||
void rd_kafka_broker_connect_done (rd_kafka_broker_t *rkb, const char *errstr);
|
||||
void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb);
|
||||
void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr);
|
||||
|
||||
int rd_kafka_send (rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_recv (rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_send(rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_recv(rd_kafka_broker_t *rkb);
|
||||
|
||||
void rd_kafka_dr_msgq (rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq, rd_kafka_resp_err_t err);
|
||||
void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
|
||||
rd_kafka_msgq_t *rkmq,
|
||||
rd_kafka_resp_err_t err);
|
||||
|
||||
void rd_kafka_dr_implicit_ack (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
uint64_t last_msgid);
|
||||
void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
uint64_t last_msgid);
|
||||
|
||||
void rd_kafka_broker_buf_enq1 (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_resp_cb_t *resp_cb,
|
||||
void *opaque);
|
||||
void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_resp_cb_t *resp_cb,
|
||||
void *opaque);
|
||||
|
||||
void rd_kafka_broker_buf_enq_replyq (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_replyq_t replyq,
|
||||
rd_kafka_resp_cb_t *resp_cb,
|
||||
void *opaque);
|
||||
void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_replyq_t replyq,
|
||||
rd_kafka_resp_cb_t *resp_cb,
|
||||
void *opaque);
|
||||
|
||||
void rd_kafka_broker_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
|
||||
void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
|
||||
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_broker_internal (rd_kafka_t *rk);
|
||||
rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk);
|
||||
|
||||
void msghdr_print (rd_kafka_t *rk,
|
||||
const char *what, const struct msghdr *msg,
|
||||
int hexdump);
|
||||
void msghdr_print(rd_kafka_t *rk,
|
||||
const char *what,
|
||||
const struct msghdr *msg,
|
||||
int hexdump);
|
||||
|
||||
int32_t rd_kafka_broker_id (rd_kafka_broker_t *rkb);
|
||||
const char *rd_kafka_broker_name (rd_kafka_broker_t *rkb);
|
||||
void rd_kafka_broker_wakeup (rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_all_brokers_wakeup (rd_kafka_t *rk,
|
||||
int min_state);
|
||||
int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb);
|
||||
const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb);
|
||||
void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb);
|
||||
int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk, int min_state);
|
||||
|
||||
void rd_kafka_connect_any (rd_kafka_t *rk, const char *reason);
|
||||
void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason);
|
||||
|
||||
void rd_kafka_broker_purge_queues (rd_kafka_broker_t *rkb, int purge_flags,
|
||||
rd_kafka_replyq_t replyq);
|
||||
void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb,
|
||||
int purge_flags,
|
||||
rd_kafka_replyq_t replyq);
|
||||
|
||||
int rd_kafka_brokers_get_state_version (rd_kafka_t *rk);
|
||||
int rd_kafka_brokers_wait_state_change (rd_kafka_t *rk, int stored_version,
|
||||
int timeout_ms);
|
||||
int rd_kafka_brokers_wait_state_change_async (rd_kafka_t *rk,
|
||||
int stored_version,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
void rd_kafka_brokers_broadcast_state_change (rd_kafka_t *rk);
|
||||
int rd_kafka_brokers_get_state_version(rd_kafka_t *rk);
|
||||
int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk,
|
||||
int stored_version,
|
||||
int timeout_ms);
|
||||
int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
|
||||
int stored_version,
|
||||
rd_kafka_enq_once_t *eonce);
|
||||
void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk);
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Updates the current toppar active round-robin next pointer.
|
||||
*/
|
||||
static RD_INLINE RD_UNUSED
|
||||
void rd_kafka_broker_active_toppar_next (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *sugg_next) {
|
||||
static RD_INLINE RD_UNUSED void
|
||||
rd_kafka_broker_active_toppar_next(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *sugg_next) {
|
||||
if (CIRCLEQ_EMPTY(&rkb->rkb_active_toppars) ||
|
||||
(void *)sugg_next == CIRCLEQ_ENDC(&rkb->rkb_active_toppars))
|
||||
rkb->rkb_active_toppar_next = NULL;
|
||||
|
@ -550,37 +561,35 @@ void rd_kafka_broker_active_toppar_next (rd_kafka_broker_t *rkb,
|
|||
rkb->rkb_active_toppar_next = sugg_next;
|
||||
else
|
||||
rkb->rkb_active_toppar_next =
|
||||
CIRCLEQ_FIRST(&rkb->rkb_active_toppars);
|
||||
CIRCLEQ_FIRST(&rkb->rkb_active_toppars);
|
||||
}
|
||||
|
||||
|
||||
void rd_kafka_broker_active_toppar_add (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
const char *reason);
|
||||
void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
const char *reason);
|
||||
|
||||
void rd_kafka_broker_active_toppar_del (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
const char *reason);
|
||||
void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
const char *reason);
|
||||
|
||||
|
||||
void rd_kafka_broker_schedule_connection (rd_kafka_broker_t *rkb);
|
||||
void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb);
|
||||
|
||||
void
|
||||
rd_kafka_broker_persistent_connection_add (rd_kafka_broker_t *rkb,
|
||||
rd_atomic32_t *acntp);
|
||||
void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb,
|
||||
rd_atomic32_t *acntp);
|
||||
|
||||
void
|
||||
rd_kafka_broker_persistent_connection_del (rd_kafka_broker_t *rkb,
|
||||
rd_atomic32_t *acntp);
|
||||
void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb,
|
||||
rd_atomic32_t *acntp);
|
||||
|
||||
|
||||
void rd_kafka_broker_monitor_add (rd_kafka_broker_monitor_t *rkbmon,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_kafka_q_t *rkq,
|
||||
void (*callback) (rd_kafka_broker_t *rkb));
|
||||
void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_kafka_q_t *rkq,
|
||||
void (*callback)(rd_kafka_broker_t *rkb));
|
||||
|
||||
void rd_kafka_broker_monitor_del (rd_kafka_broker_monitor_t *rkbmon);
|
||||
void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon);
|
||||
|
||||
int unittest_broker (void);
|
||||
int unittest_broker(void);
|
||||
|
||||
#endif /* _RDKAFKA_BROKER_H_ */
|
||||
|
|
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -31,10 +31,9 @@
|
|||
#include "rdkafka_broker.h"
|
||||
#include "rdkafka_interceptor.h"
|
||||
|
||||
void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) {
|
||||
void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) {
|
||||
|
||||
switch (rkbuf->rkbuf_reqhdr.ApiKey)
|
||||
{
|
||||
switch (rkbuf->rkbuf_reqhdr.ApiKey) {
|
||||
case RD_KAFKAP_Metadata:
|
||||
if (rkbuf->rkbuf_u.Metadata.topics)
|
||||
rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics);
|
||||
|
@ -77,7 +76,7 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) {
|
|||
|
||||
rd_refcnt_destroy(&rkbuf->rkbuf_refcnt);
|
||||
|
||||
rd_free(rkbuf);
|
||||
rd_free(rkbuf);
|
||||
}
|
||||
|
||||
|
||||
|
@ -87,8 +86,11 @@ void rd_kafka_buf_destroy_final (rd_kafka_buf_t *rkbuf) {
|
|||
*
|
||||
* \p buf will NOT be freed by the buffer.
|
||||
*/
|
||||
void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len,
|
||||
int allow_crc_calc, void (*free_cb) (void *)) {
|
||||
void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf,
|
||||
const void *buf,
|
||||
size_t len,
|
||||
int allow_crc_calc,
|
||||
void (*free_cb)(void *)) {
|
||||
rd_buf_push(&rkbuf->rkbuf_buf, buf, len, free_cb);
|
||||
|
||||
if (allow_crc_calc && (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC))
|
||||
|
@ -105,7 +107,7 @@ void rd_kafka_buf_push0 (rd_kafka_buf_t *rkbuf, const void *buf, size_t len,
|
|||
* If \p rk is non-NULL (typical case):
|
||||
* Additional space for the Kafka protocol headers is inserted automatically.
|
||||
*/
|
||||
rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) {
|
||||
rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) {
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
|
||||
rkbuf = rd_calloc(1, sizeof(*rkbuf));
|
||||
|
@ -123,10 +125,11 @@ rd_kafka_buf_t *rd_kafka_buf_new0 (int segcnt, size_t size, int flags) {
|
|||
* @brief Create new request buffer with the request-header written (will
|
||||
* need to be updated with Length, etc, later)
|
||||
*/
|
||||
rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int segcnt, size_t size,
|
||||
rd_bool_t is_flexver) {
|
||||
rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
|
||||
int16_t ApiKey,
|
||||
int segcnt,
|
||||
size_t size,
|
||||
rd_bool_t is_flexver) {
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
|
||||
/* Make room for common protocol request headers */
|
||||
|
@ -175,7 +178,6 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb,
|
|||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Create new read-only rkbuf shadowing a memory region.
|
||||
*
|
||||
|
@ -183,60 +185,61 @@ rd_kafka_buf_t *rd_kafka_buf_new_request0 (rd_kafka_broker_t *rkb,
|
|||
* buffer refcount reaches 0.
|
||||
* @remark the buffer may only be read from, not written to.
|
||||
*/
|
||||
rd_kafka_buf_t *rd_kafka_buf_new_shadow (const void *ptr, size_t size,
|
||||
void (*free_cb) (void *)) {
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
rd_kafka_buf_t *
|
||||
rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)) {
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
|
||||
rkbuf = rd_calloc(1, sizeof(*rkbuf));
|
||||
rkbuf = rd_calloc(1, sizeof(*rkbuf));
|
||||
|
||||
rkbuf->rkbuf_reqhdr.ApiKey = RD_KAFKAP_None;
|
||||
|
||||
rd_buf_init(&rkbuf->rkbuf_buf, 1, 0);
|
||||
rd_buf_push(&rkbuf->rkbuf_buf, ptr, size, free_cb);
|
||||
|
||||
rkbuf->rkbuf_totlen = size;
|
||||
rkbuf->rkbuf_totlen = size;
|
||||
|
||||
/* Initialize reader slice */
|
||||
rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
|
||||
|
||||
rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
|
||||
|
||||
return rkbuf;
|
||||
return rkbuf;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void rd_kafka_bufq_enq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
|
||||
TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
|
||||
void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
|
||||
TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
|
||||
rd_atomic32_add(&rkbufq->rkbq_cnt, 1);
|
||||
if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
|
||||
rd_atomic32_add(&rkbufq->rkbq_msg_cnt,
|
||||
rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
|
||||
}
|
||||
|
||||
void rd_kafka_bufq_deq (rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
|
||||
TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
|
||||
rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0);
|
||||
rd_atomic32_sub(&rkbufq->rkbq_cnt, 1);
|
||||
void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
|
||||
TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
|
||||
rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0);
|
||||
rd_atomic32_sub(&rkbufq->rkbq_cnt, 1);
|
||||
if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
|
||||
rd_atomic32_sub(&rkbufq->rkbq_msg_cnt,
|
||||
rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
|
||||
}
|
||||
|
||||
void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) {
|
||||
TAILQ_INIT(&rkbufq->rkbq_bufs);
|
||||
rd_atomic32_init(&rkbufq->rkbq_cnt, 0);
|
||||
rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0);
|
||||
TAILQ_INIT(&rkbufq->rkbq_bufs);
|
||||
rd_atomic32_init(&rkbufq->rkbq_cnt, 0);
|
||||
rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat all buffers from 'src' to tail of 'dst'
|
||||
*/
|
||||
void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
|
||||
TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link);
|
||||
(void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt));
|
||||
(void)rd_atomic32_add(&dst->rkbq_msg_cnt, rd_atomic32_get(&src->rkbq_msg_cnt));
|
||||
rd_kafka_bufq_init(src);
|
||||
void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
|
||||
TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link);
|
||||
(void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt));
|
||||
(void)rd_atomic32_add(&dst->rkbq_msg_cnt,
|
||||
rd_atomic32_get(&src->rkbq_msg_cnt));
|
||||
rd_kafka_bufq_init(src);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -245,17 +248,17 @@ void rd_kafka_bufq_concat (rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
|
|||
* or rkb_outbufs since buffers may be re-enqueued on those queues.
|
||||
* 'rkbufq' needs to be bufq_init():ed before reuse after this call.
|
||||
*/
|
||||
void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_bufq_t *rkbufq,
|
||||
rd_kafka_resp_err_t err) {
|
||||
rd_kafka_buf_t *rkbuf, *tmp;
|
||||
void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_bufq_t *rkbufq,
|
||||
rd_kafka_resp_err_t err) {
|
||||
rd_kafka_buf_t *rkbuf, *tmp;
|
||||
|
||||
rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
|
||||
rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
|
||||
|
||||
rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers",
|
||||
rd_atomic32_get(&rkbufq->rkbq_cnt));
|
||||
rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers",
|
||||
rd_atomic32_get(&rkbufq->rkbq_cnt));
|
||||
|
||||
TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
|
||||
TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
|
||||
rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
|
||||
}
|
||||
}
|
||||
|
@ -271,27 +274,26 @@ void rd_kafka_bufq_purge (rd_kafka_broker_t *rkb,
|
|||
* ApiVersion
|
||||
* SaslHandshake
|
||||
*/
|
||||
void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb,
|
||||
rd_kafka_bufq_t *rkbufq) {
|
||||
rd_kafka_buf_t *rkbuf, *tmp;
|
||||
void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb,
|
||||
rd_kafka_bufq_t *rkbufq) {
|
||||
rd_kafka_buf_t *rkbuf, *tmp;
|
||||
rd_ts_t now = rd_clock();
|
||||
|
||||
rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
|
||||
rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
|
||||
|
||||
rd_rkb_dbg(rkb, QUEUE, "BUFQ",
|
||||
"Updating %d buffers on connection reset",
|
||||
rd_atomic32_get(&rkbufq->rkbq_cnt));
|
||||
rd_rkb_dbg(rkb, QUEUE, "BUFQ",
|
||||
"Updating %d buffers on connection reset",
|
||||
rd_atomic32_get(&rkbufq->rkbq_cnt));
|
||||
|
||||
TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
|
||||
switch (rkbuf->rkbuf_reqhdr.ApiKey)
|
||||
{
|
||||
case RD_KAFKAP_ApiVersion:
|
||||
case RD_KAFKAP_SaslHandshake:
|
||||
rd_kafka_bufq_deq(rkbufq, rkbuf);
|
||||
rd_kafka_buf_callback(rkb->rkb_rk, rkb,
|
||||
RD_KAFKA_RESP_ERR__DESTROY,
|
||||
NULL, rkbuf);
|
||||
break;
|
||||
TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
|
||||
switch (rkbuf->rkbuf_reqhdr.ApiKey) {
|
||||
case RD_KAFKAP_ApiVersion:
|
||||
case RD_KAFKAP_SaslHandshake:
|
||||
rd_kafka_bufq_deq(rkbufq, rkbuf);
|
||||
rd_kafka_buf_callback(rkb->rkb_rk, rkb,
|
||||
RD_KAFKA_RESP_ERR__DESTROY, NULL,
|
||||
rkbuf);
|
||||
break;
|
||||
default:
|
||||
/* Reset buffer send position and corrid */
|
||||
rd_slice_seek(&rkbuf->rkbuf_reader, 0);
|
||||
|
@ -299,13 +301,14 @@ void rd_kafka_bufq_connection_reset (rd_kafka_broker_t *rkb,
|
|||
/* Reset timeout */
|
||||
rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac,
|
||||
rd_kafka_bufq_t *rkbq) {
|
||||
void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb,
|
||||
const char *fac,
|
||||
rd_kafka_bufq_t *rkbq) {
|
||||
rd_kafka_buf_t *rkbuf;
|
||||
int cnt = rd_kafka_bufq_cnt(rkbq);
|
||||
rd_ts_t now;
|
||||
|
@ -319,28 +322,31 @@ void rd_kafka_bufq_dump (rd_kafka_broker_t *rkb, const char *fac,
|
|||
|
||||
TAILQ_FOREACH(rkbuf, &rkbq->rkbq_bufs, rkbuf_link) {
|
||||
rd_rkb_dbg(rkb, BROKER, fac,
|
||||
" Buffer %s (%"PRIusz" bytes, corrid %"PRId32", "
|
||||
" Buffer %s (%" PRIusz " bytes, corrid %" PRId32
|
||||
", "
|
||||
"connid %d, prio %d, retry %d in %lldms, "
|
||||
"timeout in %lldms)",
|
||||
rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
|
||||
rkbuf->rkbuf_totlen, rkbuf->rkbuf_corrid,
|
||||
rkbuf->rkbuf_connid, rkbuf->rkbuf_prio,
|
||||
rkbuf->rkbuf_retries,
|
||||
rkbuf->rkbuf_ts_retry ?
|
||||
(rkbuf->rkbuf_ts_retry - now) / 1000LL : 0,
|
||||
rkbuf->rkbuf_ts_timeout ?
|
||||
(rkbuf->rkbuf_ts_timeout - now) / 1000LL : 0);
|
||||
rkbuf->rkbuf_ts_retry
|
||||
? (rkbuf->rkbuf_ts_retry - now) / 1000LL
|
||||
: 0,
|
||||
rkbuf->rkbuf_ts_timeout
|
||||
? (rkbuf->rkbuf_ts_timeout - now) / 1000LL
|
||||
: 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* @brief Calculate the effective timeout for a request attempt
|
||||
*/
|
||||
void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf,
|
||||
rd_ts_t now) {
|
||||
void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk,
|
||||
rd_kafka_buf_t *rkbuf,
|
||||
rd_ts_t now) {
|
||||
if (likely(rkbuf->rkbuf_rel_timeout)) {
|
||||
/* Default:
|
||||
* Relative timeout, set request timeout to
|
||||
|
@ -348,11 +354,11 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf,
|
|||
rkbuf->rkbuf_ts_timeout = now + rkbuf->rkbuf_rel_timeout * 1000;
|
||||
} else if (!rkbuf->rkbuf_force_timeout) {
|
||||
/* Use absolute timeout, limited by socket.timeout.ms */
|
||||
rd_ts_t sock_timeout = now +
|
||||
rk->rk_conf.socket_timeout_ms * 1000;
|
||||
rd_ts_t sock_timeout =
|
||||
now + rk->rk_conf.socket_timeout_ms * 1000;
|
||||
|
||||
rkbuf->rkbuf_ts_timeout =
|
||||
RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout);
|
||||
RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout);
|
||||
} else {
|
||||
/* Use absolue timeout without limit. */
|
||||
rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_abs_timeout;
|
||||
|
@ -367,64 +373,62 @@ void rd_kafka_buf_calc_timeout (const rd_kafka_t *rk, rd_kafka_buf_t *rkbuf,
|
|||
* (rkb_outbufs) then the retry counter is not increased.
|
||||
* Returns 1 if the request was scheduled for retry, else 0.
|
||||
*/
|
||||
int rd_kafka_buf_retry (rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
|
||||
int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
|
||||
int incr_retry = rd_kafka_buf_was_sent(rkbuf) ? 1 : 0;
|
||||
|
||||
/* Don't allow retries of dummy/empty buffers */
|
||||
rd_assert(rd_buf_len(&rkbuf->rkbuf_buf) > 0);
|
||||
|
||||
if (unlikely(!rkb ||
|
||||
rkb->rkb_source == RD_KAFKA_INTERNAL ||
|
||||
rd_kafka_terminating(rkb->rkb_rk) ||
|
||||
rkbuf->rkbuf_retries + incr_retry >
|
||||
rkbuf->rkbuf_max_retries))
|
||||
if (unlikely(!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL ||
|
||||
rd_kafka_terminating(rkb->rkb_rk) ||
|
||||
rkbuf->rkbuf_retries + incr_retry >
|
||||
rkbuf->rkbuf_max_retries))
|
||||
return 0;
|
||||
|
||||
/* Absolute timeout, check for expiry. */
|
||||
if (rkbuf->rkbuf_abs_timeout &&
|
||||
rkbuf->rkbuf_abs_timeout < rd_clock())
|
||||
if (rkbuf->rkbuf_abs_timeout && rkbuf->rkbuf_abs_timeout < rd_clock())
|
||||
return 0; /* Expired */
|
||||
|
||||
/* Try again */
|
||||
rkbuf->rkbuf_ts_sent = 0;
|
||||
/* Try again */
|
||||
rkbuf->rkbuf_ts_sent = 0;
|
||||
rkbuf->rkbuf_ts_timeout = 0; /* Will be updated in calc_timeout() */
|
||||
rkbuf->rkbuf_retries += incr_retry;
|
||||
rd_kafka_buf_keep(rkbuf);
|
||||
rd_kafka_broker_buf_retry(rkb, rkbuf);
|
||||
return 1;
|
||||
rkbuf->rkbuf_retries += incr_retry;
|
||||
rd_kafka_buf_keep(rkbuf);
|
||||
rd_kafka_broker_buf_retry(rkb, rkbuf);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @brief Handle RD_KAFKA_OP_RECV_BUF.
|
||||
*/
|
||||
void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
|
||||
void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
|
||||
rd_kafka_buf_t *request, *response;
|
||||
rd_kafka_t *rk;
|
||||
|
||||
request = rko->rko_u.xbuf.rkbuf;
|
||||
request = rko->rko_u.xbuf.rkbuf;
|
||||
rko->rko_u.xbuf.rkbuf = NULL;
|
||||
|
||||
/* NULL on op_destroy() */
|
||||
if (request->rkbuf_replyq.q) {
|
||||
int32_t version = request->rkbuf_replyq.version;
|
||||
if (request->rkbuf_replyq.q) {
|
||||
int32_t version = request->rkbuf_replyq.version;
|
||||
/* Current queue usage is done, but retain original replyq for
|
||||
* future retries, stealing
|
||||
* the current reference. */
|
||||
request->rkbuf_orig_replyq = request->rkbuf_replyq;
|
||||
rd_kafka_replyq_clear(&request->rkbuf_replyq);
|
||||
/* Callback might need to version check so we retain the
|
||||
* version across the clear() call which clears it. */
|
||||
request->rkbuf_replyq.version = version;
|
||||
}
|
||||
/* Callback might need to version check so we retain the
|
||||
* version across the clear() call which clears it. */
|
||||
request->rkbuf_replyq.version = version;
|
||||
}
|
||||
|
||||
if (!request->rkbuf_cb) {
|
||||
rd_kafka_buf_destroy(request);
|
||||
return;
|
||||
}
|
||||
if (!request->rkbuf_cb) {
|
||||
rd_kafka_buf_destroy(request);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Let buf_callback() do destroy()s */
|
||||
response = request->rkbuf_response; /* May be NULL */
|
||||
response = request->rkbuf_response; /* May be NULL */
|
||||
request->rkbuf_response = NULL;
|
||||
|
||||
if (!(rk = rko->rko_rk)) {
|
||||
|
@ -450,27 +454,24 @@ void rd_kafka_buf_handle_op (rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
|
|||
* The decision to retry, and the call to buf_retry(), is delegated
|
||||
* to the buffer's response callback.
|
||||
*/
|
||||
void rd_kafka_buf_callback (rd_kafka_t *rk,
|
||||
rd_kafka_broker_t *rkb, rd_kafka_resp_err_t err,
|
||||
rd_kafka_buf_t *response, rd_kafka_buf_t *request){
|
||||
void rd_kafka_buf_callback(rd_kafka_t *rk,
|
||||
rd_kafka_broker_t *rkb,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_buf_t *response,
|
||||
rd_kafka_buf_t *request) {
|
||||
|
||||
rd_kafka_interceptors_on_response_received(
|
||||
rk,
|
||||
-1,
|
||||
rkb ? rd_kafka_broker_name(rkb) : "",
|
||||
rkb ? rd_kafka_broker_id(rkb) : -1,
|
||||
request->rkbuf_reqhdr.ApiKey,
|
||||
request->rkbuf_reqhdr.ApiVersion,
|
||||
request->rkbuf_reshdr.CorrId,
|
||||
response ? response->rkbuf_totlen : 0,
|
||||
response ? response->rkbuf_ts_sent : -1,
|
||||
err);
|
||||
rk, -1, rkb ? rd_kafka_broker_name(rkb) : "",
|
||||
rkb ? rd_kafka_broker_id(rkb) : -1, request->rkbuf_reqhdr.ApiKey,
|
||||
request->rkbuf_reqhdr.ApiVersion, request->rkbuf_reshdr.CorrId,
|
||||
response ? response->rkbuf_totlen : 0,
|
||||
response ? response->rkbuf_ts_sent : -1, err);
|
||||
|
||||
if (err != RD_KAFKA_RESP_ERR__DESTROY && request->rkbuf_replyq.q) {
|
||||
rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
|
||||
|
||||
rd_kafka_assert(NULL, !request->rkbuf_response);
|
||||
request->rkbuf_response = response;
|
||||
rd_kafka_assert(NULL, !request->rkbuf_response);
|
||||
request->rkbuf_response = response;
|
||||
|
||||
/* Increment refcnt since rko_rkbuf will be decref:ed
|
||||
* if replyq_enq() fails and we dont want the rkbuf gone in that
|
||||
|
@ -485,10 +486,10 @@ void rd_kafka_buf_callback (rd_kafka_t *rk,
|
|||
rd_kafka_replyq_copy(&request->rkbuf_orig_replyq,
|
||||
&request->rkbuf_replyq);
|
||||
|
||||
rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0);
|
||||
rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0);
|
||||
|
||||
rd_kafka_buf_destroy(request); /* from keep above */
|
||||
return;
|
||||
rd_kafka_buf_destroy(request); /* from keep above */
|
||||
return;
|
||||
}
|
||||
|
||||
if (request->rkbuf_cb)
|
||||
|
@ -496,8 +497,8 @@ void rd_kafka_buf_callback (rd_kafka_t *rk,
|
|||
request->rkbuf_opaque);
|
||||
|
||||
rd_kafka_buf_destroy(request);
|
||||
if (response)
|
||||
rd_kafka_buf_destroy(response);
|
||||
if (response)
|
||||
rd_kafka_buf_destroy(response);
|
||||
}
|
||||
|
||||
|
||||
|
@ -511,15 +512,15 @@ void rd_kafka_buf_callback (rd_kafka_t *rk,
|
|||
*
|
||||
* See rd_kafka_make_req_cb_t documentation for more info.
|
||||
*/
|
||||
void rd_kafka_buf_set_maker (rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_make_req_cb_t *make_cb,
|
||||
void *make_opaque,
|
||||
void (*free_make_opaque_cb) (void *make_opaque)) {
|
||||
void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
|
||||
rd_kafka_make_req_cb_t *make_cb,
|
||||
void *make_opaque,
|
||||
void (*free_make_opaque_cb)(void *make_opaque)) {
|
||||
rd_assert(!rkbuf->rkbuf_make_req_cb &&
|
||||
!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE));
|
||||
|
||||
rkbuf->rkbuf_make_req_cb = make_cb;
|
||||
rkbuf->rkbuf_make_opaque = make_opaque;
|
||||
rkbuf->rkbuf_make_req_cb = make_cb;
|
||||
rkbuf->rkbuf_make_opaque = make_opaque;
|
||||
rkbuf->rkbuf_free_make_opaque_cb = free_make_opaque_cb;
|
||||
|
||||
rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NEED_MAKE;
|
||||
|
|
1074
src/rdkafka_buf.h
1074
src/rdkafka_buf.h
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -47,15 +47,15 @@
|
|||
*
|
||||
* @locality application thread
|
||||
*/
|
||||
static int rd_kafka_conf_ssl_passwd_cb (char *buf, int size, int rwflag,
|
||||
void *userdata) {
|
||||
static int
|
||||
rd_kafka_conf_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) {
|
||||
const rd_kafka_conf_t *conf = userdata;
|
||||
int pwlen;
|
||||
|
||||
if (!conf->ssl.key_password)
|
||||
return -1;
|
||||
|
||||
pwlen = (int) strlen(conf->ssl.key_password);
|
||||
pwlen = (int)strlen(conf->ssl.key_password);
|
||||
memcpy(buf, conf->ssl.key_password, RD_MIN(pwlen, size));
|
||||
|
||||
return pwlen;
|
||||
|
@ -63,23 +63,16 @@ static int rd_kafka_conf_ssl_passwd_cb (char *buf, int size, int rwflag,
|
|||
|
||||
|
||||
|
||||
static const char *rd_kafka_cert_type_names[] = {
|
||||
"public-key",
|
||||
"private-key",
|
||||
"CA"
|
||||
};
|
||||
static const char *rd_kafka_cert_type_names[] = {"public-key", "private-key",
|
||||
"CA"};
|
||||
|
||||
static const char *rd_kafka_cert_enc_names[] = {
|
||||
"PKCS#12",
|
||||
"DER",
|
||||
"PEM"
|
||||
};
|
||||
static const char *rd_kafka_cert_enc_names[] = {"PKCS#12", "DER", "PEM"};
|
||||
|
||||
|
||||
/**
|
||||
* @brief Destroy a certificate
|
||||
*/
|
||||
static void rd_kafka_cert_destroy (rd_kafka_cert_t *cert) {
|
||||
static void rd_kafka_cert_destroy(rd_kafka_cert_t *cert) {
|
||||
if (rd_refcnt_sub(&cert->refcnt) > 0)
|
||||
return;
|
||||
|
||||
|
@ -97,7 +90,7 @@ static void rd_kafka_cert_destroy (rd_kafka_cert_t *cert) {
|
|||
/**
|
||||
* @brief Create a copy of a cert
|
||||
*/
|
||||
static rd_kafka_cert_t *rd_kafka_cert_dup (rd_kafka_cert_t *src) {
|
||||
static rd_kafka_cert_t *rd_kafka_cert_dup(rd_kafka_cert_t *src) {
|
||||
rd_refcnt_add(&src->refcnt);
|
||||
return src;
|
||||
}
|
||||
|
@ -105,29 +98,27 @@ static rd_kafka_cert_t *rd_kafka_cert_dup (rd_kafka_cert_t *src) {
|
|||
/**
|
||||
* @brief Print the OpenSSL error stack do stdout, for development use.
|
||||
*/
|
||||
static RD_UNUSED void rd_kafka_print_ssl_errors (void) {
|
||||
static RD_UNUSED void rd_kafka_print_ssl_errors(void) {
|
||||
unsigned long l;
|
||||
const char *file, *data;
|
||||
int line, flags;
|
||||
|
||||
while ((l = ERR_get_error_line_data(&file, &line,
|
||||
&data, &flags)) != 0) {
|
||||
while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) !=
|
||||
0) {
|
||||
char buf[256];
|
||||
|
||||
ERR_error_string_n(l, buf, sizeof(buf));
|
||||
|
||||
printf("ERR: %s:%d: %s: %s:\n",
|
||||
file, line, buf, (flags & ERR_TXT_STRING) ? data : "");
|
||||
printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n",
|
||||
l,
|
||||
ERR_lib_error_string(l),
|
||||
ERR_func_error_string(l),
|
||||
file, line,
|
||||
(flags & ERR_TXT_STRING) && data && *data ?
|
||||
data : ERR_reason_error_string(l),
|
||||
printf("ERR: %s:%d: %s: %s:\n", file, line, buf,
|
||||
(flags & ERR_TXT_STRING) ? data : "");
|
||||
printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", l,
|
||||
ERR_lib_error_string(l), ERR_func_error_string(l), file,
|
||||
line,
|
||||
(flags & ERR_TXT_STRING) && data && *data
|
||||
? data
|
||||
: ERR_reason_error_string(l),
|
||||
data, data ? (int)strlen(data) : -1,
|
||||
flags & ERR_TXT_STRING);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -136,38 +127,37 @@ static RD_UNUSED void rd_kafka_print_ssl_errors (void) {
|
|||
* or NULL on failure in which case errstr will have a human-readable
|
||||
* error string written to it.
|
||||
*/
|
||||
static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
||||
rd_kafka_cert_type_t type,
|
||||
rd_kafka_cert_enc_t encoding,
|
||||
const void *buffer, size_t size,
|
||||
char *errstr, size_t errstr_size) {
|
||||
static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf,
|
||||
rd_kafka_cert_type_t type,
|
||||
rd_kafka_cert_enc_t encoding,
|
||||
const void *buffer,
|
||||
size_t size,
|
||||
char *errstr,
|
||||
size_t errstr_size) {
|
||||
static const rd_bool_t
|
||||
valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = {
|
||||
valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = {
|
||||
/* Valid encodings per certificate type */
|
||||
[RD_KAFKA_CERT_PUBLIC_KEY] = {
|
||||
[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_DER] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_PEM] = rd_true
|
||||
},
|
||||
[RD_KAFKA_CERT_PRIVATE_KEY] = {
|
||||
[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_DER] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_PEM] = rd_true
|
||||
},
|
||||
[RD_KAFKA_CERT_CA] = {
|
||||
[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_DER] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_PEM] = rd_true
|
||||
},
|
||||
};
|
||||
[RD_KAFKA_CERT_PUBLIC_KEY] = {[RD_KAFKA_CERT_ENC_PKCS12] =
|
||||
rd_true,
|
||||
[RD_KAFKA_CERT_ENC_DER] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_PEM] =
|
||||
rd_true},
|
||||
[RD_KAFKA_CERT_PRIVATE_KEY] =
|
||||
{[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_DER] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_PEM] = rd_true},
|
||||
[RD_KAFKA_CERT_CA] = {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_DER] = rd_true,
|
||||
[RD_KAFKA_CERT_ENC_PEM] = rd_true},
|
||||
};
|
||||
const char *action = "";
|
||||
BIO *bio;
|
||||
rd_kafka_cert_t *cert = NULL;
|
||||
PKCS12 *p12 = NULL;
|
||||
PKCS12 *p12 = NULL;
|
||||
|
||||
if ((int)type < 0 || type >= RD_KAFKA_CERT__CNT) {
|
||||
rd_snprintf(errstr, errstr_size,
|
||||
"Invalid certificate type %d", (int)type);
|
||||
rd_snprintf(errstr, errstr_size, "Invalid certificate type %d",
|
||||
(int)type);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -186,148 +176,136 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
|||
}
|
||||
|
||||
action = "read memory";
|
||||
bio = BIO_new_mem_buf((void *)buffer, (long)size);
|
||||
bio = BIO_new_mem_buf((void *)buffer, (long)size);
|
||||
if (!bio)
|
||||
goto fail;
|
||||
|
||||
if (encoding == RD_KAFKA_CERT_ENC_PKCS12) {
|
||||
action = "read PKCS#12";
|
||||
p12 = d2i_PKCS12_bio(bio, NULL);
|
||||
p12 = d2i_PKCS12_bio(bio, NULL);
|
||||
if (!p12)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cert = rd_calloc(1, sizeof(*cert));
|
||||
cert->type = type;
|
||||
cert = rd_calloc(1, sizeof(*cert));
|
||||
cert->type = type;
|
||||
cert->encoding = encoding;
|
||||
|
||||
rd_refcnt_init(&cert->refcnt, 1);
|
||||
|
||||
switch (type)
|
||||
{
|
||||
switch (type) {
|
||||
case RD_KAFKA_CERT_CA:
|
||||
cert->store = X509_STORE_new();
|
||||
|
||||
switch (encoding)
|
||||
{
|
||||
case RD_KAFKA_CERT_ENC_PKCS12:
|
||||
{
|
||||
EVP_PKEY *ign_pkey;
|
||||
X509 *ign_cert;
|
||||
STACK_OF(X509) *cas = NULL;
|
||||
int i;
|
||||
switch (encoding) {
|
||||
case RD_KAFKA_CERT_ENC_PKCS12: {
|
||||
EVP_PKEY *ign_pkey;
|
||||
X509 *ign_cert;
|
||||
STACK_OF(X509) *cas = NULL;
|
||||
int i;
|
||||
|
||||
action = "parse PKCS#12";
|
||||
if (!PKCS12_parse(p12, conf->ssl.key_password,
|
||||
&ign_pkey, &ign_cert,
|
||||
&cas))
|
||||
goto fail;
|
||||
action = "parse PKCS#12";
|
||||
if (!PKCS12_parse(p12, conf->ssl.key_password,
|
||||
&ign_pkey, &ign_cert, &cas))
|
||||
goto fail;
|
||||
|
||||
EVP_PKEY_free(ign_pkey);
|
||||
X509_free(ign_cert);
|
||||
EVP_PKEY_free(ign_pkey);
|
||||
X509_free(ign_cert);
|
||||
|
||||
if (!cas || sk_X509_num(cas) < 1) {
|
||||
action = "retrieve at least one CA "
|
||||
"cert from PKCS#12";
|
||||
if (cas)
|
||||
sk_X509_pop_free(cas,
|
||||
X509_free);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0 ; i < sk_X509_num(cas) ; i++) {
|
||||
if (!X509_STORE_add_cert(
|
||||
cert->store,
|
||||
sk_X509_value(cas, i))) {
|
||||
action = "add certificate to "
|
||||
"X.509 store";
|
||||
sk_X509_pop_free(cas,
|
||||
X509_free);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
sk_X509_pop_free(cas, X509_free);
|
||||
if (!cas || sk_X509_num(cas) < 1) {
|
||||
action =
|
||||
"retrieve at least one CA "
|
||||
"cert from PKCS#12";
|
||||
if (cas)
|
||||
sk_X509_pop_free(cas, X509_free);
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_DER:
|
||||
{
|
||||
X509 *x509;
|
||||
|
||||
action = "read DER / X.509 ASN.1";
|
||||
if (!(x509 = d2i_X509_bio(bio, NULL)))
|
||||
for (i = 0; i < sk_X509_num(cas); i++) {
|
||||
if (!X509_STORE_add_cert(
|
||||
cert->store, sk_X509_value(cas, i))) {
|
||||
action =
|
||||
"add certificate to "
|
||||
"X.509 store";
|
||||
sk_X509_pop_free(cas, X509_free);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
sk_X509_pop_free(cas, X509_free);
|
||||
} break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_DER: {
|
||||
X509 *x509;
|
||||
|
||||
action = "read DER / X.509 ASN.1";
|
||||
if (!(x509 = d2i_X509_bio(bio, NULL)))
|
||||
goto fail;
|
||||
|
||||
if (!X509_STORE_add_cert(cert->store, x509)) {
|
||||
action =
|
||||
"add certificate to "
|
||||
"X.509 store";
|
||||
X509_free(x509);
|
||||
goto fail;
|
||||
}
|
||||
} break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_PEM: {
|
||||
X509 *x509;
|
||||
int cnt = 0;
|
||||
|
||||
action = "read PEM";
|
||||
|
||||
/* This will read one certificate per call
|
||||
* until an error occurs or the end of the
|
||||
* buffer is reached (which is an error
|
||||
* we'll need to clear). */
|
||||
while ((x509 = PEM_read_bio_X509(
|
||||
bio, NULL, rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf))) {
|
||||
|
||||
if (!X509_STORE_add_cert(cert->store, x509)) {
|
||||
action = "add certificate to "
|
||||
"X.509 store";
|
||||
action =
|
||||
"add certificate to "
|
||||
"X.509 store";
|
||||
X509_free(x509);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cnt++;
|
||||
}
|
||||
break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_PEM:
|
||||
{
|
||||
X509 *x509;
|
||||
int cnt = 0;
|
||||
|
||||
action = "read PEM";
|
||||
|
||||
/* This will read one certificate per call
|
||||
* until an error occurs or the end of the
|
||||
* buffer is reached (which is an error
|
||||
* we'll need to clear). */
|
||||
while ((x509 =
|
||||
PEM_read_bio_X509(
|
||||
bio, NULL,
|
||||
rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf))) {
|
||||
|
||||
if (!X509_STORE_add_cert(cert->store,
|
||||
x509)) {
|
||||
action = "add certificate to "
|
||||
"X.509 store";
|
||||
X509_free(x509);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cnt++;
|
||||
}
|
||||
|
||||
if (!BIO_eof(bio)) {
|
||||
/* Encountered parse error before
|
||||
* reaching end, propagate error and
|
||||
* fail. */
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!cnt) {
|
||||
action = "retrieve at least one "
|
||||
"CA cert from PEM";
|
||||
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Reached end, which is raised as an error,
|
||||
* so clear it since it is not. */
|
||||
ERR_clear_error();
|
||||
if (!BIO_eof(bio)) {
|
||||
/* Encountered parse error before
|
||||
* reaching end, propagate error and
|
||||
* fail. */
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
RD_NOTREACHED();
|
||||
break;
|
||||
if (!cnt) {
|
||||
action =
|
||||
"retrieve at least one "
|
||||
"CA cert from PEM";
|
||||
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Reached end, which is raised as an error,
|
||||
* so clear it since it is not. */
|
||||
ERR_clear_error();
|
||||
} break;
|
||||
|
||||
default:
|
||||
RD_NOTREACHED();
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case RD_KAFKA_CERT_PUBLIC_KEY:
|
||||
switch (encoding)
|
||||
{
|
||||
case RD_KAFKA_CERT_ENC_PKCS12:
|
||||
{
|
||||
switch (encoding) {
|
||||
case RD_KAFKA_CERT_ENC_PKCS12: {
|
||||
EVP_PKEY *ign_pkey;
|
||||
|
||||
action = "parse PKCS#12";
|
||||
|
@ -340,21 +318,20 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
|||
action = "retrieve public key";
|
||||
if (!cert->x509)
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
} break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_DER:
|
||||
action = "read DER / X.509 ASN.1";
|
||||
action = "read DER / X.509 ASN.1";
|
||||
cert->x509 = d2i_X509_bio(bio, NULL);
|
||||
if (!cert->x509)
|
||||
goto fail;
|
||||
break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_PEM:
|
||||
action = "read PEM";
|
||||
action = "read PEM";
|
||||
cert->x509 = PEM_read_bio_X509(
|
||||
bio, NULL, rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf);
|
||||
bio, NULL, rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf);
|
||||
if (!cert->x509)
|
||||
goto fail;
|
||||
break;
|
||||
|
@ -367,10 +344,8 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
|||
|
||||
|
||||
case RD_KAFKA_CERT_PRIVATE_KEY:
|
||||
switch (encoding)
|
||||
{
|
||||
case RD_KAFKA_CERT_ENC_PKCS12:
|
||||
{
|
||||
switch (encoding) {
|
||||
case RD_KAFKA_CERT_ENC_PKCS12: {
|
||||
X509 *x509;
|
||||
|
||||
action = "parse PKCS#12";
|
||||
|
@ -383,22 +358,22 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
|||
action = "retrieve private key";
|
||||
if (!cert->pkey)
|
||||
goto fail;
|
||||
}
|
||||
break;
|
||||
} break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_DER:
|
||||
action = "read DER / X.509 ASN.1 and "
|
||||
"convert to EVP_PKEY";
|
||||
action =
|
||||
"read DER / X.509 ASN.1 and "
|
||||
"convert to EVP_PKEY";
|
||||
cert->pkey = d2i_PrivateKey_bio(bio, NULL);
|
||||
if (!cert->pkey)
|
||||
goto fail;
|
||||
break;
|
||||
|
||||
case RD_KAFKA_CERT_ENC_PEM:
|
||||
action = "read PEM";
|
||||
action = "read PEM";
|
||||
cert->pkey = PEM_read_bio_PrivateKey(
|
||||
bio, NULL, rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf);
|
||||
bio, NULL, rd_kafka_conf_ssl_passwd_cb,
|
||||
(void *)conf);
|
||||
if (!cert->pkey)
|
||||
goto fail;
|
||||
break;
|
||||
|
@ -421,11 +396,9 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
|||
|
||||
return cert;
|
||||
|
||||
fail:
|
||||
rd_snprintf(errstr, errstr_size,
|
||||
"Failed to %s %s (encoding %s): %s",
|
||||
action,
|
||||
rd_kafka_cert_type_names[type],
|
||||
fail:
|
||||
rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s",
|
||||
action, rd_kafka_cert_type_names[type],
|
||||
rd_kafka_cert_enc_names[encoding],
|
||||
rd_kafka_ssl_last_error_str());
|
||||
|
||||
|
@ -448,12 +421,13 @@ static rd_kafka_cert_t *rd_kafka_cert_new (const rd_kafka_conf_t *conf,
|
|||
* @{
|
||||
*/
|
||||
|
||||
rd_kafka_conf_res_t
|
||||
rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf,
|
||||
rd_kafka_cert_type_t cert_type,
|
||||
rd_kafka_cert_enc_t cert_enc,
|
||||
const void *buffer, size_t size,
|
||||
char *errstr, size_t errstr_size) {
|
||||
rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf,
|
||||
rd_kafka_cert_type_t cert_type,
|
||||
rd_kafka_cert_enc_t cert_enc,
|
||||
const void *buffer,
|
||||
size_t size,
|
||||
char *errstr,
|
||||
size_t errstr_size) {
|
||||
#if !WITH_SSL
|
||||
rd_snprintf(errstr, errstr_size,
|
||||
"librdkafka not built with OpenSSL support");
|
||||
|
@ -461,15 +435,14 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf,
|
|||
#else
|
||||
rd_kafka_cert_t *cert;
|
||||
rd_kafka_cert_t **cert_map[RD_KAFKA_CERT__CNT] = {
|
||||
[RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert,
|
||||
[RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key,
|
||||
[RD_KAFKA_CERT_CA] = &conf->ssl.ca
|
||||
};
|
||||
[RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert,
|
||||
[RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key,
|
||||
[RD_KAFKA_CERT_CA] = &conf->ssl.ca};
|
||||
rd_kafka_cert_t **certp;
|
||||
|
||||
if ((int)cert_type < 0 || cert_type >= RD_KAFKA_CERT__CNT) {
|
||||
rd_snprintf(errstr, errstr_size,
|
||||
"Invalid certificate type %d", (int)cert_type);
|
||||
rd_snprintf(errstr, errstr_size, "Invalid certificate type %d",
|
||||
(int)cert_type);
|
||||
return RD_KAFKA_CONF_INVALID;
|
||||
}
|
||||
|
||||
|
@ -506,7 +479,7 @@ rd_kafka_conf_set_ssl_cert (rd_kafka_conf_t *conf,
|
|||
/**
|
||||
* @brief Destructor called when configuration object is destroyed.
|
||||
*/
|
||||
void rd_kafka_conf_cert_dtor (int scope, void *pconf) {
|
||||
void rd_kafka_conf_cert_dtor(int scope, void *pconf) {
|
||||
#if WITH_SSL
|
||||
rd_kafka_conf_t *conf = pconf;
|
||||
assert(scope == _RK_GLOBAL);
|
||||
|
@ -529,11 +502,15 @@ void rd_kafka_conf_cert_dtor (int scope, void *pconf) {
|
|||
* @brief Copy-constructor called when configuration object \p psrcp is
|
||||
* duplicated to \p dstp.
|
||||
*/
|
||||
void rd_kafka_conf_cert_copy (int scope, void *pdst, const void *psrc,
|
||||
void *dstptr, const void *srcptr,
|
||||
size_t filter_cnt, const char **filter) {
|
||||
void rd_kafka_conf_cert_copy(int scope,
|
||||
void *pdst,
|
||||
const void *psrc,
|
||||
void *dstptr,
|
||||
const void *srcptr,
|
||||
size_t filter_cnt,
|
||||
const char **filter) {
|
||||
#if WITH_SSL
|
||||
rd_kafka_conf_t *dconf = pdst;
|
||||
rd_kafka_conf_t *dconf = pdst;
|
||||
const rd_kafka_conf_t *sconf = psrc;
|
||||
|
||||
assert(scope == _RK_GLOBAL);
|
||||
|
|
|
@ -40,18 +40,22 @@
|
|||
*/
|
||||
typedef struct rd_kafka_cert_s {
|
||||
rd_kafka_cert_type_t type;
|
||||
rd_kafka_cert_enc_t encoding;
|
||||
rd_refcnt_t refcnt;
|
||||
rd_kafka_cert_enc_t encoding;
|
||||
rd_refcnt_t refcnt;
|
||||
#if WITH_SSL
|
||||
X509 *x509; /**< Certificate (public key) */
|
||||
EVP_PKEY *pkey; /**< Private key */
|
||||
X509_STORE *store; /**< CA certificate chain store */
|
||||
X509 *x509; /**< Certificate (public key) */
|
||||
EVP_PKEY *pkey; /**< Private key */
|
||||
X509_STORE *store; /**< CA certificate chain store */
|
||||
#endif
|
||||
} rd_kafka_cert_t;
|
||||
|
||||
void rd_kafka_conf_cert_dtor (int scope, void *pconf);
|
||||
void rd_kafka_conf_cert_copy (int scope, void *pdst, const void *psrc,
|
||||
void *dstptr, const void *srcptr,
|
||||
size_t filter_cnt, const char **filter);
|
||||
void rd_kafka_conf_cert_dtor(int scope, void *pconf);
|
||||
void rd_kafka_conf_cert_copy(int scope,
|
||||
void *pdst,
|
||||
const void *psrc,
|
||||
void *dstptr,
|
||||
const void *srcptr,
|
||||
size_t filter_cnt,
|
||||
const char **filter);
|
||||
|
||||
#endif /* _RDKAFKA_CERT_H_ */
|
||||
|
|
3182
src/rdkafka_cgrp.c
3182
src/rdkafka_cgrp.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -3,24 +3,24 @@
|
|||
*
|
||||
* Copyright (c) 2012-2015, Magnus Edenhill
|
||||
* All rights reserved.
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
* this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
* this list of conditions and the following disclaimer in the documentation
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
|
@ -52,10 +52,10 @@ extern const char *rd_kafka_cgrp_join_state_names[];
|
|||
* Client group
|
||||
*/
|
||||
typedef struct rd_kafka_cgrp_s {
|
||||
const rd_kafkap_str_t *rkcg_group_id;
|
||||
rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */
|
||||
rd_kafkap_str_t *rkcg_group_instance_id;
|
||||
const rd_kafkap_str_t *rkcg_client_id;
|
||||
const rd_kafkap_str_t *rkcg_group_id;
|
||||
rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */
|
||||
rd_kafkap_str_t *rkcg_group_instance_id;
|
||||
const rd_kafkap_str_t *rkcg_client_id;
|
||||
|
||||
enum {
|
||||
/* Init state */
|
||||
|
@ -79,8 +79,8 @@ typedef struct rd_kafka_cgrp_s {
|
|||
/* Coordinator is up and manager is assigned. */
|
||||
RD_KAFKA_CGRP_STATE_UP,
|
||||
} rkcg_state;
|
||||
rd_ts_t rkcg_ts_statechange; /* Timestamp of last
|
||||
* state change. */
|
||||
rd_ts_t rkcg_ts_statechange; /* Timestamp of last
|
||||
* state change. */
|
||||
|
||||
|
||||
enum {
|
||||
|
@ -121,87 +121,93 @@ typedef struct rd_kafka_cgrp_s {
|
|||
int member_cnt;
|
||||
} rkcg_group_leader;
|
||||
|
||||
rd_kafka_q_t *rkcg_q; /* Application poll queue */
|
||||
rd_kafka_q_t *rkcg_ops; /* Manager ops queue */
|
||||
rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */
|
||||
int rkcg_flags;
|
||||
#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */
|
||||
#define RD_KAFKA_CGRP_F_TERMINATED 0x2 /* Cgrp terminated */
|
||||
#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE 0x8 /* Send LeaveGroup when
|
||||
* unassign is done */
|
||||
#define RD_KAFKA_CGRP_F_SUBSCRIPTION 0x10 /* If set:
|
||||
* subscription
|
||||
* else:
|
||||
* static assignment */
|
||||
#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT 0x20 /* A Heartbeat request
|
||||
* is in transit, dont
|
||||
* send a new one. */
|
||||
#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION 0x40 /* Subscription contains
|
||||
* wildcards. */
|
||||
#define RD_KAFKA_CGRP_F_WAIT_LEAVE 0x80 /* Wait for LeaveGroup
|
||||
* to be sent.
|
||||
* This is used to stall
|
||||
* termination until
|
||||
* the LeaveGroupRequest
|
||||
* is responded to,
|
||||
* otherwise it risks
|
||||
* being dropped in the
|
||||
* output queue when
|
||||
* the broker is destroyed.
|
||||
*/
|
||||
#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED 0x100 /**< max.poll.interval.ms
|
||||
* was exceeded and we
|
||||
* left the group.
|
||||
* Do not rejoin until
|
||||
* the application has
|
||||
* polled again. */
|
||||
rd_kafka_q_t *rkcg_q; /* Application poll queue */
|
||||
rd_kafka_q_t *rkcg_ops; /* Manager ops queue */
|
||||
rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */
|
||||
int rkcg_flags;
|
||||
#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */
|
||||
#define RD_KAFKA_CGRP_F_TERMINATED 0x2 /* Cgrp terminated */
|
||||
#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE \
|
||||
0x8 /* Send LeaveGroup when \
|
||||
* unassign is done */
|
||||
#define RD_KAFKA_CGRP_F_SUBSCRIPTION \
|
||||
0x10 /* If set: \
|
||||
* subscription \
|
||||
* else: \
|
||||
* static assignment */
|
||||
#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT \
|
||||
0x20 /* A Heartbeat request \
|
||||
* is in transit, dont \
|
||||
* send a new one. */
|
||||
#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION \
|
||||
0x40 /* Subscription contains \
|
||||
* wildcards. */
|
||||
#define RD_KAFKA_CGRP_F_WAIT_LEAVE \
|
||||
0x80 /* Wait for LeaveGroup \
|
||||
* to be sent. \
|
||||
* This is used to stall \
|
||||
* termination until \
|
||||
* the LeaveGroupRequest \
|
||||
* is responded to, \
|
||||
* otherwise it risks \
|
||||
* being dropped in the \
|
||||
* output queue when \
|
||||
* the broker is destroyed. \
|
||||
*/
|
||||
#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED \
|
||||
0x100 /**< max.poll.interval.ms \
|
||||
* was exceeded and we \
|
||||
* left the group. \
|
||||
* Do not rejoin until \
|
||||
* the application has \
|
||||
* polled again. */
|
||||
|
||||
rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/
|
||||
rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */
|
||||
rd_interval_t rkcg_join_intvl; /* JoinGroup interval */
|
||||
rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */
|
||||
rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/
|
||||
rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */
|
||||
rd_interval_t rkcg_join_intvl; /* JoinGroup interval */
|
||||
rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */
|
||||
|
||||
rd_ts_t rkcg_ts_session_timeout; /**< Absolute session
|
||||
* timeout enforced by
|
||||
* the consumer, this
|
||||
* value is updated on
|
||||
* Heartbeat success,
|
||||
* etc. */
|
||||
rd_ts_t rkcg_ts_session_timeout; /**< Absolute session
|
||||
* timeout enforced by
|
||||
* the consumer, this
|
||||
* value is updated on
|
||||
* Heartbeat success,
|
||||
* etc. */
|
||||
rd_kafka_resp_err_t rkcg_last_heartbeat_err; /**< Last Heartbeat error,
|
||||
* used for logging. */
|
||||
|
||||
TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics;/* Topics subscribed to */
|
||||
TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics; /* Topics subscribed to */
|
||||
|
||||
rd_list_t rkcg_toppars; /* Toppars subscribed to*/
|
||||
rd_list_t rkcg_toppars; /* Toppars subscribed to*/
|
||||
|
||||
int32_t rkcg_generation_id; /* Current generation id */
|
||||
int32_t rkcg_generation_id; /* Current generation id */
|
||||
|
||||
rd_kafka_assignor_t *rkcg_assignor; /**< The current partition
|
||||
* assignor. used by both
|
||||
* leader and members. */
|
||||
void *rkcg_assignor_state; /**< current partition
|
||||
* assignor state */
|
||||
rd_kafka_assignor_t *rkcg_assignor; /**< The current partition
|
||||
* assignor. used by both
|
||||
* leader and members. */
|
||||
void *rkcg_assignor_state; /**< current partition
|
||||
* assignor state */
|
||||
|
||||
int32_t rkcg_coord_id; /**< Current coordinator id,
|
||||
* or -1 if not known. */
|
||||
int32_t rkcg_coord_id; /**< Current coordinator id,
|
||||
* or -1 if not known. */
|
||||
|
||||
rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator
|
||||
* broker handle, or NULL.
|
||||
* rkcg_coord's nodename is
|
||||
* updated to this broker's
|
||||
* nodename when there is a
|
||||
* coordinator change. */
|
||||
rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator
|
||||
* broker handle.
|
||||
* Will be updated when the
|
||||
* coordinator changes. */
|
||||
rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator
|
||||
* broker handle, or NULL.
|
||||
* rkcg_coord's nodename is
|
||||
* updated to this broker's
|
||||
* nodename when there is a
|
||||
* coordinator change. */
|
||||
rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator
|
||||
* broker handle.
|
||||
* Will be updated when the
|
||||
* coordinator changes. */
|
||||
|
||||
int16_t rkcg_wait_resp; /**< Awaiting response for this
|
||||
* ApiKey.
|
||||
* Makes sure only one
|
||||
* JoinGroup or SyncGroup
|
||||
* request is outstanding.
|
||||
* Unset value is -1. */
|
||||
int16_t rkcg_wait_resp; /**< Awaiting response for this
|
||||
* ApiKey.
|
||||
* Makes sure only one
|
||||
* JoinGroup or SyncGroup
|
||||
* request is outstanding.
|
||||
* Unset value is -1. */
|
||||
|
||||
/** Current subscription */
|
||||
rd_kafka_topic_partition_list_t *rkcg_subscription;
|
||||
|
@ -254,119 +260,117 @@ typedef struct rd_kafka_cgrp_s {
|
|||
* incremental unassign. */
|
||||
rd_bool_t rkcg_rebalance_rejoin;
|
||||
|
||||
rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to
|
||||
* application.
|
||||
* This is for silencing
|
||||
* same errors. */
|
||||
rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to
|
||||
* application.
|
||||
* This is for silencing
|
||||
* same errors. */
|
||||
|
||||
rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */
|
||||
rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max
|
||||
* poll interval. */
|
||||
rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */
|
||||
rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max
|
||||
* poll interval. */
|
||||
|
||||
rd_kafka_t *rkcg_rk;
|
||||
rd_kafka_t *rkcg_rk;
|
||||
|
||||
rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op
|
||||
* (OP_TERMINATE)
|
||||
* to this rko's queue. */
|
||||
rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op
|
||||
* (OP_TERMINATE)
|
||||
* to this rko's queue. */
|
||||
|
||||
rd_ts_t rkcg_ts_terminate; /* Timestamp of when
|
||||
* cgrp termination was
|
||||
* initiated. */
|
||||
rd_ts_t rkcg_ts_terminate; /* Timestamp of when
|
||||
* cgrp termination was
|
||||
* initiated. */
|
||||
|
||||
/* Protected by rd_kafka_*lock() */
|
||||
struct {
|
||||
rd_ts_t ts_rebalance; /* Timestamp of
|
||||
* last rebalance */
|
||||
int rebalance_cnt; /* Number of
|
||||
rebalances */
|
||||
char rebalance_reason[256]; /**< Last rebalance
|
||||
* reason */
|
||||
int assignment_size; /* Partition count
|
||||
* of last rebalance
|
||||
* assignment */
|
||||
rd_ts_t ts_rebalance; /* Timestamp of
|
||||
* last rebalance */
|
||||
int rebalance_cnt; /* Number of
|
||||
rebalances */
|
||||
char rebalance_reason[256]; /**< Last rebalance
|
||||
* reason */
|
||||
int assignment_size; /* Partition count
|
||||
* of last rebalance
|
||||
* assignment */
|
||||
} rkcg_c;
|
||||
|
||||
} rd_kafka_cgrp_t;
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/* Check if broker is the coordinator */
|
||||
#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg,rkb) \
|
||||
((rkcg)->rkcg_coord_id != -1 && \
|
||||
#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb) \
|
||||
((rkcg)->rkcg_coord_id != -1 && \
|
||||
(rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid)
|
||||
|
||||
/**
|
||||
* @returns true if cgrp is using static group membership
|
||||
*/
|
||||
#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \
|
||||
#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \
|
||||
!RD_KAFKAP_STR_IS_NULL((rkcg)->rkcg_group_instance_id)
|
||||
|
||||
extern const char *rd_kafka_cgrp_state_names[];
|
||||
extern const char *rd_kafka_cgrp_join_state_names[];
|
||||
|
||||
void rd_kafka_cgrp_destroy_final (rd_kafka_cgrp_t *rkcg);
|
||||
rd_kafka_cgrp_t *rd_kafka_cgrp_new (rd_kafka_t *rk,
|
||||
const rd_kafkap_str_t *group_id,
|
||||
const rd_kafkap_str_t *client_id);
|
||||
void rd_kafka_cgrp_serve (rd_kafka_cgrp_t *rkcg);
|
||||
void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg);
|
||||
rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
|
||||
const rd_kafkap_str_t *group_id,
|
||||
const rd_kafkap_str_t *client_id);
|
||||
void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg);
|
||||
|
||||
void rd_kafka_cgrp_op (rd_kafka_cgrp_t *rkcg, rd_kafka_toppar_t *rktp,
|
||||
rd_kafka_replyq_t replyq, rd_kafka_op_type_t type,
|
||||
rd_kafka_resp_err_t err);
|
||||
void rd_kafka_cgrp_terminate0 (rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko);
|
||||
void rd_kafka_cgrp_terminate (rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq);
|
||||
void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg,
|
||||
rd_kafka_toppar_t *rktp,
|
||||
rd_kafka_replyq_t replyq,
|
||||
rd_kafka_op_type_t type,
|
||||
rd_kafka_resp_err_t err);
|
||||
void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko);
|
||||
void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq);
|
||||
|
||||
|
||||
rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del (rd_kafka_cgrp_t *rkcg,
|
||||
const char *pattern);
|
||||
rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add (rd_kafka_cgrp_t *rkcg,
|
||||
const char *pattern);
|
||||
rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del(rd_kafka_cgrp_t *rkcg,
|
||||
const char *pattern);
|
||||
rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add(rd_kafka_cgrp_t *rkcg,
|
||||
const char *pattern);
|
||||
|
||||
int rd_kafka_cgrp_topic_check (rd_kafka_cgrp_t *rkcg, const char *topic);
|
||||
int rd_kafka_cgrp_topic_check(rd_kafka_cgrp_t *rkcg, const char *topic);
|
||||
|
||||
void rd_kafka_cgrp_set_member_id (rd_kafka_cgrp_t *rkcg, const char *member_id);
|
||||
void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id);
|
||||
|
||||
void rd_kafka_cgrp_set_join_state (rd_kafka_cgrp_t *rkcg, int join_state);
|
||||
void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state);
|
||||
|
||||
rd_kafka_broker_t *rd_kafka_cgrp_get_coord (rd_kafka_cgrp_t *rkcg);
|
||||
void rd_kafka_cgrp_coord_query (rd_kafka_cgrp_t *rkcg,
|
||||
const char *reason);
|
||||
void rd_kafka_cgrp_coord_dead (rd_kafka_cgrp_t *rkcg, rd_kafka_resp_err_t err,
|
||||
const char *reason);
|
||||
void rd_kafka_cgrp_metadata_update_check (rd_kafka_cgrp_t *rkcg,
|
||||
rd_bool_t do_join);
|
||||
rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg);
|
||||
void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason);
|
||||
void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg,
|
||||
rd_kafka_resp_err_t err,
|
||||
const char *reason);
|
||||
void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg,
|
||||
rd_bool_t do_join);
|
||||
#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp)
|
||||
|
||||
|
||||
void
|
||||
rd_kafka_cgrp_assigned_offsets_commit (rd_kafka_cgrp_t *rkcg,
|
||||
const rd_kafka_topic_partition_list_t
|
||||
*offsets, rd_bool_t set_offsets,
|
||||
const char *reason);
|
||||
void rd_kafka_cgrp_assigned_offsets_commit(
|
||||
rd_kafka_cgrp_t *rkcg,
|
||||
const rd_kafka_topic_partition_list_t *offsets,
|
||||
rd_bool_t set_offsets,
|
||||
const char *reason);
|
||||
|
||||
void rd_kafka_cgrp_assignment_done (rd_kafka_cgrp_t *rkcg);
|
||||
void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg);
|
||||
|
||||
rd_bool_t rd_kafka_cgrp_assignment_is_lost (rd_kafka_cgrp_t *rkcg);
|
||||
rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg);
|
||||
|
||||
|
||||
struct rd_kafka_consumer_group_metadata_s {
|
||||
char *group_id;
|
||||
int32_t generation_id;
|
||||
char *member_id;
|
||||
char *group_instance_id; /**< Optional (NULL) */
|
||||
char *group_instance_id; /**< Optional (NULL) */
|
||||
};
|
||||
|
||||
rd_kafka_consumer_group_metadata_t *
|
||||
rd_kafka_consumer_group_metadata_dup (
|
||||
const rd_kafka_consumer_group_metadata_t *cgmetadata);
|
||||
rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup(
|
||||
const rd_kafka_consumer_group_metadata_t *cgmetadata);
|
||||
|
||||
|
||||
static RD_UNUSED const char *
|
||||
rd_kafka_rebalance_protocol2str (rd_kafka_rebalance_protocol_t protocol) {
|
||||
switch (protocol)
|
||||
{
|
||||
rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) {
|
||||
switch (protocol) {
|
||||
case RD_KAFKA_REBALANCE_PROTOCOL_EAGER:
|
||||
return "EAGER";
|
||||
case RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE:
|
||||
|
|
4811
src/rdkafka_conf.c
4811
src/rdkafka_conf.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -46,30 +46,28 @@ struct rd_kafka_transport_s;
|
|||
* MessageSet compression codecs
|
||||
*/
|
||||
typedef enum {
|
||||
RD_KAFKA_COMPRESSION_NONE,
|
||||
RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP,
|
||||
RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY,
|
||||
RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4,
|
||||
RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD,
|
||||
RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */
|
||||
RD_KAFKA_COMPRESSION_NONE,
|
||||
RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP,
|
||||
RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY,
|
||||
RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4,
|
||||
RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD,
|
||||
RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */
|
||||
RD_KAFKA_COMPRESSION_NUM
|
||||
} rd_kafka_compression_t;
|
||||
|
||||
static RD_INLINE RD_UNUSED const char *
|
||||
rd_kafka_compression2str (rd_kafka_compression_t compr) {
|
||||
rd_kafka_compression2str(rd_kafka_compression_t compr) {
|
||||
static const char *names[RD_KAFKA_COMPRESSION_NUM] = {
|
||||
[RD_KAFKA_COMPRESSION_NONE] = "none",
|
||||
[RD_KAFKA_COMPRESSION_GZIP] = "gzip",
|
||||
[RD_KAFKA_COMPRESSION_SNAPPY] = "snappy",
|
||||
[RD_KAFKA_COMPRESSION_LZ4] = "lz4",
|
||||
[RD_KAFKA_COMPRESSION_ZSTD] = "zstd",
|
||||
[RD_KAFKA_COMPRESSION_INHERIT] = "inherit"
|
||||
};
|
||||
[RD_KAFKA_COMPRESSION_NONE] = "none",
|
||||
[RD_KAFKA_COMPRESSION_GZIP] = "gzip",
|
||||
[RD_KAFKA_COMPRESSION_SNAPPY] = "snappy",
|
||||
[RD_KAFKA_COMPRESSION_LZ4] = "lz4",
|
||||
[RD_KAFKA_COMPRESSION_ZSTD] = "zstd",
|
||||
[RD_KAFKA_COMPRESSION_INHERIT] = "inherit"};
|
||||
static RD_TLS char ret[32];
|
||||
|
||||
if ((int)compr < 0 || compr >= RD_KAFKA_COMPRESSION_NUM) {
|
||||
rd_snprintf(ret, sizeof(ret),
|
||||
"codec0x%x?", (int)compr);
|
||||
rd_snprintf(ret, sizeof(ret), "codec0x%x?", (int)compr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -80,56 +78,52 @@ rd_kafka_compression2str (rd_kafka_compression_t compr) {
|
|||
* MessageSet compression levels
|
||||
*/
|
||||
typedef enum {
|
||||
RD_KAFKA_COMPLEVEL_DEFAULT = -1,
|
||||
RD_KAFKA_COMPLEVEL_MIN = -1,
|
||||
RD_KAFKA_COMPLEVEL_GZIP_MAX = 9,
|
||||
RD_KAFKA_COMPLEVEL_LZ4_MAX = 12,
|
||||
RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0,
|
||||
RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22,
|
||||
RD_KAFKA_COMPLEVEL_MAX = 12
|
||||
RD_KAFKA_COMPLEVEL_DEFAULT = -1,
|
||||
RD_KAFKA_COMPLEVEL_MIN = -1,
|
||||
RD_KAFKA_COMPLEVEL_GZIP_MAX = 9,
|
||||
RD_KAFKA_COMPLEVEL_LZ4_MAX = 12,
|
||||
RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0,
|
||||
RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22,
|
||||
RD_KAFKA_COMPLEVEL_MAX = 12
|
||||
} rd_kafka_complevel_t;
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_PROTO_PLAINTEXT,
|
||||
RD_KAFKA_PROTO_SSL,
|
||||
RD_KAFKA_PROTO_SASL_PLAINTEXT,
|
||||
RD_KAFKA_PROTO_SASL_SSL,
|
||||
RD_KAFKA_PROTO_NUM,
|
||||
RD_KAFKA_PROTO_PLAINTEXT,
|
||||
RD_KAFKA_PROTO_SSL,
|
||||
RD_KAFKA_PROTO_SASL_PLAINTEXT,
|
||||
RD_KAFKA_PROTO_SASL_SSL,
|
||||
RD_KAFKA_PROTO_NUM,
|
||||
} rd_kafka_secproto_t;
|
||||
|
||||
|
||||
typedef enum {
|
||||
RD_KAFKA_CONFIGURED,
|
||||
RD_KAFKA_LEARNED,
|
||||
RD_KAFKA_INTERNAL,
|
||||
RD_KAFKA_CONFIGURED,
|
||||
RD_KAFKA_LEARNED,
|
||||
RD_KAFKA_INTERNAL,
|
||||
RD_KAFKA_LOGICAL
|
||||
} rd_kafka_confsource_t;
|
||||
|
||||
static RD_INLINE RD_UNUSED
|
||||
const char *rd_kafka_confsource2str (rd_kafka_confsource_t source) {
|
||||
static const char *names[] = {
|
||||
"configured",
|
||||
"learned",
|
||||
"internal",
|
||||
"logical"
|
||||
};
|
||||
static RD_INLINE RD_UNUSED const char *
|
||||
rd_kafka_confsource2str(rd_kafka_confsource_t source) {
|
||||
static const char *names[] = {"configured", "learned", "internal",
|
||||
"logical"};
|
||||
|
||||
return names[source];
|
||||
}
|
||||
|
||||
|
||||
typedef enum {
|
||||
_RK_GLOBAL = 0x1,
|
||||
_RK_PRODUCER = 0x2,
|
||||
_RK_CONSUMER = 0x4,
|
||||
_RK_TOPIC = 0x8,
|
||||
_RK_CGRP = 0x10,
|
||||
_RK_DEPRECATED = 0x20,
|
||||
_RK_HIDDEN = 0x40,
|
||||
_RK_HIGH = 0x80, /* High Importance */
|
||||
_RK_MED = 0x100, /* Medium Importance */
|
||||
typedef enum {
|
||||
_RK_GLOBAL = 0x1,
|
||||
_RK_PRODUCER = 0x2,
|
||||
_RK_CONSUMER = 0x4,
|
||||
_RK_TOPIC = 0x8,
|
||||
_RK_CGRP = 0x10,
|
||||
_RK_DEPRECATED = 0x20,
|
||||
_RK_HIDDEN = 0x40,
|
||||
_RK_HIGH = 0x80, /* High Importance */
|
||||
_RK_MED = 0x100, /* Medium Importance */
|
||||
_RK_EXPERIMENTAL = 0x200, /* Experimental (unsupported) property */
|
||||
_RK_SENSITIVE = 0x400 /* The configuration property's value
|
||||
_RK_SENSITIVE = 0x400 /* The configuration property's value
|
||||
* might contain sensitive information. */
|
||||
} rd_kafka_conf_scope_t;
|
||||
|
||||
|
@ -138,9 +132,9 @@ typedef enum {
|
|||
#define _RK_CGRP _RK_CONSUMER
|
||||
|
||||
typedef enum {
|
||||
_RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */
|
||||
_RK_CONF_PROP_SET_ADD, /* Add value (S2F) */
|
||||
_RK_CONF_PROP_SET_DEL /* Remove value (S2F) */
|
||||
_RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */
|
||||
_RK_CONF_PROP_SET_ADD, /* Add value (S2F) */
|
||||
_RK_CONF_PROP_SET_DEL /* Remove value (S2F) */
|
||||
} rd_kafka_conf_set_mode_t;
|
||||
|
||||
|
||||
|
@ -158,12 +152,12 @@ typedef enum {
|
|||
|
||||
typedef enum {
|
||||
RD_KAFKA_SSL_ENDPOINT_ID_NONE,
|
||||
RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */
|
||||
RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */
|
||||
} rd_kafka_ssl_endpoint_id_t;
|
||||
|
||||
/* Increase in steps of 64 as needed.
|
||||
* This must be larger than sizeof(rd_kafka_[topic_]conf_t) */
|
||||
#define RD_KAFKA_CONF_PROPS_IDX_MAX (64*30)
|
||||
#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 30)
|
||||
|
||||
/**
|
||||
* @struct rd_kafka_anyconf_t
|
||||
|
@ -172,7 +166,7 @@ typedef enum {
|
|||
* It provides a way to track which property has been modified.
|
||||
*/
|
||||
struct rd_kafka_anyconf_hdr {
|
||||
uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX/64];
|
||||
uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX / 64];
|
||||
};
|
||||
|
||||
|
||||
|
@ -184,48 +178,48 @@ struct rd_kafka_anyconf_hdr {
|
|||
*
|
||||
*/
|
||||
struct rd_kafka_conf_s {
|
||||
struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
|
||||
struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
|
||||
|
||||
/*
|
||||
* Generic configuration
|
||||
*/
|
||||
int enabled_events;
|
||||
int max_msg_size;
|
||||
int msg_copy_max_size;
|
||||
int recv_max_msg_size;
|
||||
int max_inflight;
|
||||
int metadata_request_timeout_ms;
|
||||
int metadata_refresh_interval_ms;
|
||||
int metadata_refresh_fast_cnt;
|
||||
int metadata_refresh_fast_interval_ms;
|
||||
int metadata_refresh_sparse;
|
||||
int metadata_max_age_ms;
|
||||
int metadata_propagation_max_ms;
|
||||
int debug;
|
||||
int broker_addr_ttl;
|
||||
int broker_addr_family;
|
||||
int socket_timeout_ms;
|
||||
int socket_blocking_max_ms;
|
||||
int socket_sndbuf_size;
|
||||
int socket_rcvbuf_size;
|
||||
int socket_keepalive;
|
||||
int socket_nagle_disable;
|
||||
int socket_max_fails;
|
||||
char *client_id_str;
|
||||
char *brokerlist;
|
||||
int stats_interval_ms;
|
||||
int term_sig;
|
||||
int reconnect_backoff_ms;
|
||||
int reconnect_backoff_max_ms;
|
||||
int reconnect_jitter_ms;
|
||||
int connections_max_idle_ms;
|
||||
int sparse_connections;
|
||||
int sparse_connect_intvl;
|
||||
int api_version_request;
|
||||
int api_version_request_timeout_ms;
|
||||
int api_version_fallback_ms;
|
||||
char *broker_version_fallback;
|
||||
rd_kafka_secproto_t security_protocol;
|
||||
/*
|
||||
* Generic configuration
|
||||
*/
|
||||
int enabled_events;
|
||||
int max_msg_size;
|
||||
int msg_copy_max_size;
|
||||
int recv_max_msg_size;
|
||||
int max_inflight;
|
||||
int metadata_request_timeout_ms;
|
||||
int metadata_refresh_interval_ms;
|
||||
int metadata_refresh_fast_cnt;
|
||||
int metadata_refresh_fast_interval_ms;
|
||||
int metadata_refresh_sparse;
|
||||
int metadata_max_age_ms;
|
||||
int metadata_propagation_max_ms;
|
||||
int debug;
|
||||
int broker_addr_ttl;
|
||||
int broker_addr_family;
|
||||
int socket_timeout_ms;
|
||||
int socket_blocking_max_ms;
|
||||
int socket_sndbuf_size;
|
||||
int socket_rcvbuf_size;
|
||||
int socket_keepalive;
|
||||
int socket_nagle_disable;
|
||||
int socket_max_fails;
|
||||
char *client_id_str;
|
||||
char *brokerlist;
|
||||
int stats_interval_ms;
|
||||
int term_sig;
|
||||
int reconnect_backoff_ms;
|
||||
int reconnect_backoff_max_ms;
|
||||
int reconnect_jitter_ms;
|
||||
int connections_max_idle_ms;
|
||||
int sparse_connections;
|
||||
int sparse_connect_intvl;
|
||||
int api_version_request;
|
||||
int api_version_request_timeout_ms;
|
||||
int api_version_fallback_ms;
|
||||
char *broker_version_fallback;
|
||||
rd_kafka_secproto_t security_protocol;
|
||||
|
||||
struct {
|
||||
#if WITH_SSL
|
||||
|
@ -255,16 +249,18 @@ struct rd_kafka_conf_s {
|
|||
void *engine_callback_data;
|
||||
char *keystore_location;
|
||||
char *keystore_password;
|
||||
int endpoint_identification;
|
||||
int enable_verify;
|
||||
int (*cert_verify_cb) (rd_kafka_t *rk,
|
||||
const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf, size_t size,
|
||||
char *errstr, size_t errstr_size,
|
||||
void *opaque);
|
||||
int endpoint_identification;
|
||||
int enable_verify;
|
||||
int (*cert_verify_cb)(rd_kafka_t *rk,
|
||||
const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int *x509_error,
|
||||
int depth,
|
||||
const char *buf,
|
||||
size_t size,
|
||||
char *errstr,
|
||||
size_t errstr_size,
|
||||
void *opaque);
|
||||
} ssl;
|
||||
|
||||
struct {
|
||||
|
@ -274,22 +270,23 @@ struct rd_kafka_conf_s {
|
|||
char *service_name;
|
||||
char *kinit_cmd;
|
||||
char *keytab;
|
||||
int relogin_min_time;
|
||||
int relogin_min_time;
|
||||
char *username;
|
||||
char *password;
|
||||
#if WITH_SASL_SCRAM
|
||||
/* SCRAM EVP-wrapped hash function
|
||||
* (return value from EVP_shaX()) */
|
||||
const void/*EVP_MD*/ *scram_evp;
|
||||
const void /*EVP_MD*/ *scram_evp;
|
||||
/* SCRAM direct hash function (e.g., SHA256()) */
|
||||
unsigned char *(*scram_H) (const unsigned char *d, size_t n,
|
||||
unsigned char *md);
|
||||
unsigned char *(*scram_H)(const unsigned char *d,
|
||||
size_t n,
|
||||
unsigned char *md);
|
||||
/* Hash size */
|
||||
size_t scram_H_size;
|
||||
size_t scram_H_size;
|
||||
#endif
|
||||
char *oauthbearer_config;
|
||||
int enable_oauthbearer_unsecure_jwt;
|
||||
int enable_callback_queue;
|
||||
int enable_oauthbearer_unsecure_jwt;
|
||||
int enable_callback_queue;
|
||||
struct {
|
||||
rd_kafka_oauthbearer_method_t method;
|
||||
char *token_endpoint_url;
|
||||
|
@ -298,10 +295,9 @@ struct rd_kafka_conf_s {
|
|||
char *scope;
|
||||
char *extensions_str;
|
||||
/* SASL/OAUTHBEARER token refresh event callback */
|
||||
void (*token_refresh_cb) (
|
||||
rd_kafka_t *rk,
|
||||
const char *oauthbearer_config,
|
||||
void *opaque);
|
||||
void (*token_refresh_cb)(rd_kafka_t *rk,
|
||||
const char *oauthbearer_config,
|
||||
void *opaque);
|
||||
} oauthbearer;
|
||||
} sasl;
|
||||
|
||||
|
@ -313,195 +309,207 @@ struct rd_kafka_conf_s {
|
|||
/* Interceptors */
|
||||
struct {
|
||||
/* rd_kafka_interceptor_method_t lists */
|
||||
rd_list_t on_conf_set; /* on_conf_set interceptors
|
||||
* (not copied on conf_dup()) */
|
||||
rd_list_t on_conf_dup; /* .. (not copied) */
|
||||
rd_list_t on_conf_destroy; /* .. (not copied) */
|
||||
rd_list_t on_new; /* .. (copied) */
|
||||
rd_list_t on_destroy; /* .. (copied) */
|
||||
rd_list_t on_send; /* .. (copied) */
|
||||
rd_list_t on_acknowledgement; /* .. (copied) */
|
||||
rd_list_t on_consume; /* .. (copied) */
|
||||
rd_list_t on_commit; /* .. (copied) */
|
||||
rd_list_t on_request_sent; /* .. (copied) */
|
||||
rd_list_t on_response_received;/* .. (copied) */
|
||||
rd_list_t on_thread_start; /* .. (copied) */
|
||||
rd_list_t on_thread_exit; /* .. (copied) */
|
||||
rd_list_t on_conf_set; /* on_conf_set interceptors
|
||||
* (not copied on conf_dup()) */
|
||||
rd_list_t on_conf_dup; /* .. (not copied) */
|
||||
rd_list_t on_conf_destroy; /* .. (not copied) */
|
||||
rd_list_t on_new; /* .. (copied) */
|
||||
rd_list_t on_destroy; /* .. (copied) */
|
||||
rd_list_t on_send; /* .. (copied) */
|
||||
rd_list_t on_acknowledgement; /* .. (copied) */
|
||||
rd_list_t on_consume; /* .. (copied) */
|
||||
rd_list_t on_commit; /* .. (copied) */
|
||||
rd_list_t on_request_sent; /* .. (copied) */
|
||||
rd_list_t on_response_received; /* .. (copied) */
|
||||
rd_list_t on_thread_start; /* .. (copied) */
|
||||
rd_list_t on_thread_exit; /* .. (copied) */
|
||||
|
||||
/* rd_strtup_t list */
|
||||
rd_list_t config; /* Configuration name=val's
|
||||
* handled by interceptors. */
|
||||
rd_list_t config; /* Configuration name=val's
|
||||
* handled by interceptors. */
|
||||
} interceptors;
|
||||
|
||||
/* Client group configuration */
|
||||
int coord_query_intvl_ms;
|
||||
int max_poll_interval_ms;
|
||||
int coord_query_intvl_ms;
|
||||
int max_poll_interval_ms;
|
||||
|
||||
int builtin_features;
|
||||
/*
|
||||
* Consumer configuration
|
||||
*/
|
||||
int check_crcs;
|
||||
int queued_min_msgs;
|
||||
int queued_max_msg_kbytes;
|
||||
int builtin_features;
|
||||
/*
|
||||
* Consumer configuration
|
||||
*/
|
||||
int check_crcs;
|
||||
int queued_min_msgs;
|
||||
int queued_max_msg_kbytes;
|
||||
int64_t queued_max_msg_bytes;
|
||||
int fetch_wait_max_ms;
|
||||
int fetch_msg_max_bytes;
|
||||
int fetch_max_bytes;
|
||||
int fetch_min_bytes;
|
||||
int fetch_error_backoff_ms;
|
||||
char *group_id_str;
|
||||
char *group_instance_id;
|
||||
int allow_auto_create_topics;
|
||||
int fetch_wait_max_ms;
|
||||
int fetch_msg_max_bytes;
|
||||
int fetch_max_bytes;
|
||||
int fetch_min_bytes;
|
||||
int fetch_error_backoff_ms;
|
||||
char *group_id_str;
|
||||
char *group_instance_id;
|
||||
int allow_auto_create_topics;
|
||||
|
||||
rd_kafka_pattern_list_t *topic_blacklist;
|
||||
struct rd_kafka_topic_conf_s *topic_conf; /* Default topic config
|
||||
* for automatically
|
||||
* subscribed topics. */
|
||||
int enable_auto_commit;
|
||||
int enable_auto_offset_store;
|
||||
int enable_auto_offset_store;
|
||||
int auto_commit_interval_ms;
|
||||
int group_session_timeout_ms;
|
||||
int group_heartbeat_intvl_ms;
|
||||
rd_kafkap_str_t *group_protocol_type;
|
||||
char *partition_assignment_strategy;
|
||||
rd_list_t partition_assignors;
|
||||
int enabled_assignor_cnt;
|
||||
int enabled_assignor_cnt;
|
||||
|
||||
void (*rebalance_cb) (rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque);
|
||||
void (*rebalance_cb)(rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *partitions,
|
||||
void *opaque);
|
||||
|
||||
void (*offset_commit_cb) (rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *offsets,
|
||||
void *opaque);
|
||||
void (*offset_commit_cb)(rd_kafka_t *rk,
|
||||
rd_kafka_resp_err_t err,
|
||||
rd_kafka_topic_partition_list_t *offsets,
|
||||
void *opaque);
|
||||
|
||||
rd_kafka_offset_method_t offset_store_method;
|
||||
|
||||
rd_kafka_isolation_level_t isolation_level;
|
||||
|
||||
int enable_partition_eof;
|
||||
int enable_partition_eof;
|
||||
|
||||
rd_kafkap_str_t *client_rack;
|
||||
rd_kafkap_str_t *client_rack;
|
||||
|
||||
/*
|
||||
* Producer configuration
|
||||
*/
|
||||
/*
|
||||
* Producer configuration
|
||||
*/
|
||||
struct {
|
||||
/*
|
||||
* Idempotence
|
||||
*/
|
||||
int idempotence; /**< Enable Idempotent Producer */
|
||||
rd_bool_t gapless; /**< Raise fatal error if
|
||||
* gapless guarantee can't be
|
||||
* satisfied. */
|
||||
int idempotence; /**< Enable Idempotent Producer */
|
||||
rd_bool_t gapless; /**< Raise fatal error if
|
||||
* gapless guarantee can't be
|
||||
* satisfied. */
|
||||
/*
|
||||
* Transactions
|
||||
*/
|
||||
char *transactional_id; /**< Transactional Id */
|
||||
int transaction_timeout_ms; /**< Transaction timeout */
|
||||
char *transactional_id; /**< Transactional Id */
|
||||
int transaction_timeout_ms; /**< Transaction timeout */
|
||||
} eos;
|
||||
int queue_buffering_max_msgs;
|
||||
int queue_buffering_max_kbytes;
|
||||
int queue_buffering_max_msgs;
|
||||
int queue_buffering_max_kbytes;
|
||||
double buffering_max_ms_dbl; /**< This is the configured value */
|
||||
rd_ts_t buffering_max_us; /**< This is the value used in the code */
|
||||
int queue_backpressure_thres;
|
||||
int max_retries;
|
||||
int retry_backoff_ms;
|
||||
int batch_num_messages;
|
||||
int batch_size;
|
||||
rd_kafka_compression_t compression_codec;
|
||||
int dr_err_only;
|
||||
int sticky_partition_linger_ms;
|
||||
rd_ts_t buffering_max_us; /**< This is the value used in the code */
|
||||
int queue_backpressure_thres;
|
||||
int max_retries;
|
||||
int retry_backoff_ms;
|
||||
int batch_num_messages;
|
||||
int batch_size;
|
||||
rd_kafka_compression_t compression_codec;
|
||||
int dr_err_only;
|
||||
int sticky_partition_linger_ms;
|
||||
|
||||
/* Message delivery report callback.
|
||||
* Called once for each produced message, either on
|
||||
* successful and acknowledged delivery to the broker in which
|
||||
* case 'err' is 0, or if the message could not be delivered
|
||||
* in which case 'err' is non-zero (use rd_kafka_err2str()
|
||||
* to obtain a human-readable error reason).
|
||||
*
|
||||
* If the message was produced with neither RD_KAFKA_MSG_F_FREE
|
||||
* or RD_KAFKA_MSG_F_COPY set then 'payload' is the original
|
||||
* pointer provided to rd_kafka_produce().
|
||||
* rdkafka will not perform any further actions on 'payload'
|
||||
* at this point and the application may rd_free the payload data
|
||||
* at this point.
|
||||
*
|
||||
* 'opaque' is 'conf.opaque', while 'msg_opaque' is
|
||||
* the opaque pointer provided in the rd_kafka_produce() call.
|
||||
*/
|
||||
void (*dr_cb) (rd_kafka_t *rk,
|
||||
void *payload, size_t len,
|
||||
rd_kafka_resp_err_t err,
|
||||
void *opaque, void *msg_opaque);
|
||||
/* Message delivery report callback.
|
||||
* Called once for each produced message, either on
|
||||
* successful and acknowledged delivery to the broker in which
|
||||
* case 'err' is 0, or if the message could not be delivered
|
||||
* in which case 'err' is non-zero (use rd_kafka_err2str()
|
||||
* to obtain a human-readable error reason).
|
||||
*
|
||||
* If the message was produced with neither RD_KAFKA_MSG_F_FREE
|
||||
* or RD_KAFKA_MSG_F_COPY set then 'payload' is the original
|
||||
* pointer provided to rd_kafka_produce().
|
||||
* rdkafka will not perform any further actions on 'payload'
|
||||
* at this point and the application may rd_free the payload data
|
||||
* at this point.
|
||||
*
|
||||
* 'opaque' is 'conf.opaque', while 'msg_opaque' is
|
||||
* the opaque pointer provided in the rd_kafka_produce() call.
|
||||
*/
|
||||
void (*dr_cb)(rd_kafka_t *rk,
|
||||
void *payload,
|
||||
size_t len,
|
||||
rd_kafka_resp_err_t err,
|
||||
void *opaque,
|
||||
void *msg_opaque);
|
||||
|
||||
void (*dr_msg_cb) (rd_kafka_t *rk, const rd_kafka_message_t *rkmessage,
|
||||
void *opaque);
|
||||
void (*dr_msg_cb)(rd_kafka_t *rk,
|
||||
const rd_kafka_message_t *rkmessage,
|
||||
void *opaque);
|
||||
|
||||
/* Consume callback */
|
||||
void (*consume_cb) (rd_kafka_message_t *rkmessage, void *opaque);
|
||||
void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque);
|
||||
|
||||
/* Log callback */
|
||||
void (*log_cb) (const rd_kafka_t *rk, int level,
|
||||
const char *fac, const char *buf);
|
||||
int log_level;
|
||||
int log_queue;
|
||||
int log_thread_name;
|
||||
int log_connection_close;
|
||||
void (*log_cb)(const rd_kafka_t *rk,
|
||||
int level,
|
||||
const char *fac,
|
||||
const char *buf);
|
||||
int log_level;
|
||||
int log_queue;
|
||||
int log_thread_name;
|
||||
int log_connection_close;
|
||||
|
||||
/* PRNG seeding */
|
||||
int enable_random_seed;
|
||||
int enable_random_seed;
|
||||
|
||||
/* Error callback */
|
||||
void (*error_cb) (rd_kafka_t *rk, int err,
|
||||
const char *reason, void *opaque);
|
||||
void (*error_cb)(rd_kafka_t *rk,
|
||||
int err,
|
||||
const char *reason,
|
||||
void *opaque);
|
||||
|
||||
/* Throttle callback */
|
||||
void (*throttle_cb) (rd_kafka_t *rk, const char *broker_name,
|
||||
int32_t broker_id, int throttle_time_ms,
|
||||
void *opaque);
|
||||
/* Throttle callback */
|
||||
void (*throttle_cb)(rd_kafka_t *rk,
|
||||
const char *broker_name,
|
||||
int32_t broker_id,
|
||||
int throttle_time_ms,
|
||||
void *opaque);
|
||||
|
||||
/* Stats callback */
|
||||
int (*stats_cb) (rd_kafka_t *rk,
|
||||
char *json,
|
||||
size_t json_len,
|
||||
void *opaque);
|
||||
|
||||
/* Socket creation callback */
|
||||
int (*socket_cb) (int domain, int type, int protocol, void *opaque);
|
||||
|
||||
/* Connect callback */
|
||||
int (*connect_cb) (int sockfd,
|
||||
const struct sockaddr *addr,
|
||||
int addrlen,
|
||||
const char *id,
|
||||
void *opaque);
|
||||
|
||||
/* Close socket callback */
|
||||
int (*closesocket_cb) (int sockfd, void *opaque);
|
||||
|
||||
/* File open callback */
|
||||
int (*open_cb) (const char *pathname, int flags, mode_t mode,
|
||||
/* Stats callback */
|
||||
int (*stats_cb)(rd_kafka_t *rk,
|
||||
char *json,
|
||||
size_t json_len,
|
||||
void *opaque);
|
||||
|
||||
/* Socket creation callback */
|
||||
int (*socket_cb)(int domain, int type, int protocol, void *opaque);
|
||||
|
||||
/* Connect callback */
|
||||
int (*connect_cb)(int sockfd,
|
||||
const struct sockaddr *addr,
|
||||
int addrlen,
|
||||
const char *id,
|
||||
void *opaque);
|
||||
|
||||
/* Close socket callback */
|
||||
int (*closesocket_cb)(int sockfd, void *opaque);
|
||||
|
||||
/* File open callback */
|
||||
int (*open_cb)(const char *pathname,
|
||||
int flags,
|
||||
mode_t mode,
|
||||
void *opaque);
|
||||
|
||||
/* Background queue event callback */
|
||||
void (*background_event_cb) (rd_kafka_t *rk, rd_kafka_event_t *rkev,
|
||||
void *opaque);
|
||||
void (*background_event_cb)(rd_kafka_t *rk,
|
||||
rd_kafka_event_t *rkev,
|
||||
void *opaque);
|
||||
|
||||
|
||||
/* Opaque passed to callbacks. */
|
||||
void *opaque;
|
||||
/* Opaque passed to callbacks. */
|
||||
void *opaque;
|
||||
|
||||
/* For use with value-less properties. */
|
||||
int dummy;
|
||||
int dummy;
|
||||
|
||||
|
||||
/* Admin client defaults */
|
||||
struct {
|
||||
int request_timeout_ms; /* AdminOptions.request_timeout */
|
||||
int request_timeout_ms; /* AdminOptions.request_timeout */
|
||||
} admin;
|
||||
|
||||
|
||||
|
@ -509,7 +517,7 @@ struct rd_kafka_conf_s {
|
|||
* Test mocks
|
||||
*/
|
||||
struct {
|
||||
int broker_cnt; /**< Number of mock brokers */
|
||||
int broker_cnt; /**< Number of mock brokers */
|
||||
} mock;
|
||||
|
||||
/*
|
||||
|
@ -517,11 +525,11 @@ struct rd_kafka_conf_s {
|
|||
*/
|
||||
struct {
|
||||
/**< Inject errors in ProduceResponse handler */
|
||||
rd_kafka_resp_err_t (*handle_ProduceResponse) (
|
||||
rd_kafka_t *rk,
|
||||
int32_t brokerid,
|
||||
uint64_t msgid,
|
||||
rd_kafka_resp_err_t err);
|
||||
rd_kafka_resp_err_t (*handle_ProduceResponse)(
|
||||
rd_kafka_t *rk,
|
||||
int32_t brokerid,
|
||||
uint64_t msgid,
|
||||
rd_kafka_resp_err_t err);
|
||||
} ut;
|
||||
|
||||
char *sw_name; /**< Software/client name */
|
||||
|
@ -534,81 +542,90 @@ struct rd_kafka_conf_s {
|
|||
} warn;
|
||||
};
|
||||
|
||||
int rd_kafka_socket_cb_linux (int domain, int type, int protocol, void *opaque);
|
||||
int rd_kafka_socket_cb_generic (int domain, int type, int protocol,
|
||||
void *opaque);
|
||||
int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque);
|
||||
int rd_kafka_socket_cb_generic(int domain,
|
||||
int type,
|
||||
int protocol,
|
||||
void *opaque);
|
||||
#ifndef _WIN32
|
||||
int rd_kafka_open_cb_linux (const char *pathname, int flags, mode_t mode,
|
||||
void *opaque);
|
||||
int rd_kafka_open_cb_linux(const char *pathname,
|
||||
int flags,
|
||||
mode_t mode,
|
||||
void *opaque);
|
||||
#endif
|
||||
int rd_kafka_open_cb_generic (const char *pathname, int flags, mode_t mode,
|
||||
void *opaque);
|
||||
int rd_kafka_open_cb_generic(const char *pathname,
|
||||
int flags,
|
||||
mode_t mode,
|
||||
void *opaque);
|
||||
|
||||
|
||||
|
||||
struct rd_kafka_topic_conf_s {
|
||||
struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
|
||||
struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
|
||||
|
||||
int required_acks;
|
||||
int32_t request_timeout_ms;
|
||||
int message_timeout_ms;
|
||||
int required_acks;
|
||||
int32_t request_timeout_ms;
|
||||
int message_timeout_ms;
|
||||
|
||||
int32_t (*partitioner) (const rd_kafka_topic_t *rkt,
|
||||
const void *keydata, size_t keylen,
|
||||
int32_t partition_cnt,
|
||||
void *rkt_opaque,
|
||||
void *msg_opaque);
|
||||
char *partitioner_str;
|
||||
int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
|
||||
const void *keydata,
|
||||
size_t keylen,
|
||||
int32_t partition_cnt,
|
||||
void *rkt_opaque,
|
||||
void *msg_opaque);
|
||||
char *partitioner_str;
|
||||
|
||||
rd_bool_t random_partitioner; /**< rd_true - random
|
||||
* rd_false - sticky */
|
||||
* rd_false - sticky */
|
||||
|
||||
int queuing_strategy; /* RD_KAFKA_QUEUE_FIFO|LIFO */
|
||||
int (*msg_order_cmp) (const void *a, const void *b);
|
||||
int (*msg_order_cmp)(const void *a, const void *b);
|
||||
|
||||
rd_kafka_compression_t compression_codec;
|
||||
rd_kafka_complevel_t compression_level;
|
||||
int produce_offset_report;
|
||||
rd_kafka_compression_t compression_codec;
|
||||
rd_kafka_complevel_t compression_level;
|
||||
int produce_offset_report;
|
||||
|
||||
int consume_callback_max_msgs;
|
||||
int auto_commit;
|
||||
int auto_commit_interval_ms;
|
||||
int auto_offset_reset;
|
||||
char *offset_store_path;
|
||||
int offset_store_sync_interval_ms;
|
||||
int consume_callback_max_msgs;
|
||||
int auto_commit;
|
||||
int auto_commit_interval_ms;
|
||||
int auto_offset_reset;
|
||||
char *offset_store_path;
|
||||
int offset_store_sync_interval_ms;
|
||||
|
||||
rd_kafka_offset_method_t offset_store_method;
|
||||
|
||||
/* Application provided opaque pointer (this is rkt_opaque) */
|
||||
void *opaque;
|
||||
/* Application provided opaque pointer (this is rkt_opaque) */
|
||||
void *opaque;
|
||||
};
|
||||
|
||||
|
||||
|
||||
void rd_kafka_anyconf_destroy (int scope, void *conf);
|
||||
void rd_kafka_anyconf_destroy(int scope, void *conf);
|
||||
|
||||
rd_bool_t rd_kafka_conf_is_modified (const rd_kafka_conf_t *conf,
|
||||
const char *name);
|
||||
rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
|
||||
const char *name);
|
||||
|
||||
void rd_kafka_desensitize_str (char *str);
|
||||
void rd_kafka_desensitize_str(char *str);
|
||||
|
||||
void rd_kafka_conf_desensitize (rd_kafka_conf_t *conf);
|
||||
void rd_kafka_topic_conf_desensitize (rd_kafka_topic_conf_t *tconf);
|
||||
void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf);
|
||||
void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf);
|
||||
|
||||
const char *rd_kafka_conf_finalize (rd_kafka_type_t cltype,
|
||||
rd_kafka_conf_t *conf);
|
||||
const char *rd_kafka_topic_conf_finalize (rd_kafka_type_t cltype,
|
||||
const rd_kafka_conf_t *conf,
|
||||
rd_kafka_topic_conf_t *tconf);
|
||||
const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
|
||||
rd_kafka_conf_t *conf);
|
||||
const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype,
|
||||
const rd_kafka_conf_t *conf,
|
||||
rd_kafka_topic_conf_t *tconf);
|
||||
|
||||
|
||||
int rd_kafka_conf_warn (rd_kafka_t *rk);
|
||||
int rd_kafka_conf_warn(rd_kafka_t *rk);
|
||||
|
||||
void rd_kafka_anyconf_dump_dbg (rd_kafka_t *rk, int scope, const void *conf,
|
||||
const char *description);
|
||||
void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk,
|
||||
int scope,
|
||||
const void *conf,
|
||||
const char *description);
|
||||
|
||||
#include "rdkafka_confval.h"
|
||||
|
||||
int unittest_conf (void);
|
||||
int unittest_conf(void);
|
||||
|
||||
#endif /* _RDKAFKA_CONF_H_ */
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче