本文整理汇总了C++中TEST_SAY函数的典型用法代码示例。如果您正苦于以下问题:C++ TEST_SAY函数的具体用法?C++ TEST_SAY怎么用?C++ TEST_SAY使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了TEST_SAY函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main_0035_api_version
int main_0035_api_version (int argc, char **argv) {
rd_kafka_t *rk;
rd_kafka_conf_t *conf;
const struct rd_kafka_metadata *metadata;
rd_kafka_resp_err_t err;
test_timing_t t_meta;
test_conf_init(&conf, NULL, 30);
test_conf_set(conf, "socket.timeout.ms", "12000");
rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
TEST_SAY("Querying for metadata\n");
TIMING_START(&t_meta, "metadata()");
err = rd_kafka_metadata(rk, 0, NULL, &metadata, 10*1000);
TIMING_STOP(&t_meta);
if (err)
TEST_FAIL("metadata() failed: %s",
rd_kafka_err2str(err));
if (TIMING_DURATION(&t_meta) / 1000 > 11*1000)
TEST_FAIL("metadata() took too long: %.3fms",
(float)TIMING_DURATION(&t_meta) / 1000.0f);
rd_kafka_metadata_destroy(metadata);
TEST_SAY("Metadata succeeded\n");
rd_kafka_destroy(rk);
return 0;
}
示例2: verify_groups
/**
* Verify that all groups in 'groups' are seen, if so returns group_cnt,
* else returns -1.
*/
static int verify_groups (const struct rd_kafka_group_list *grplist,
char **groups, int group_cnt) {
int i;
int seen = 0;
for (i = 0 ; i < grplist->group_cnt ; i++) {
const struct rd_kafka_group_info *gi = &grplist->groups[i];
int j;
for (j = 0 ; j < group_cnt ; j++) {
if (strcmp(gi->group, groups[j]))
continue;
if (gi->err)
TEST_SAY("Group %s has broker-reported "
"error: %s\n", gi->group,
rd_kafka_err2str(gi->err));
seen++;
}
}
TEST_SAY("Found %d/%d desired groups in list of %d groups\n",
seen, group_cnt, grplist->group_cnt);
if (seen != group_cnt)
return -1;
else
return seen;
}
示例3: verify_consumed_msg_check0
static void verify_consumed_msg_check0 (const char *func, int line) {
int i;
int fails = 0;
if (cons_msgs_cnt < cons_msgs_size) {
TEST_SAY("Missing %i messages in consumer\n",
cons_msgs_size - cons_msgs_cnt);
fails++;
}
qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
for (i = 0 ; i < cons_msgs_size ; i++) {
if (cons_msgs[i] != i) {
TEST_SAY("Consumed message #%i is wrong, "
"expected #%i\n",
cons_msgs[i], i);
fails++;
}
}
if (fails)
TEST_FAIL("See above error(s)");
verify_consumed_msg_reset(0);
}
示例4: test_wait_exit
/**
* Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
*/
void test_wait_exit (int timeout) {
int r;
time_t start = time(NULL);
while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
rd_sleep(1);
}
TEST_SAY("%i thread(s) in use by librdkafka\n", r);
if (r > 0) {
TEST_FAIL("%i thread(s) still active in librdkafka", r);
}
timeout -= (int)(time(NULL) - start);
if (timeout > 0) {
TEST_SAY("Waiting %d seconds for all librdkafka memory "
"to be released\n", timeout);
if (rd_kafka_wait_destroyed(timeout * 1000) == -1)
TEST_FAIL("Not all internal librdkafka "
"objects destroyed\n");
}
}
示例5: run_test0
static int run_test0 (struct run_args *run_args) {
test_timing_t t_run;
int r;
test_curr = run_args->testname;
TEST_SAY("================= Running test %s =================\n",
run_args->testname);
TIMING_START(&t_run, run_args->testname);
test_start = t_run.ts_start;
r = run_args->test_main(run_args->argc, run_args->argv);
TIMING_STOP(&t_run);
if (r)
TEST_SAY("\033[31m"
"================= Test %s FAILED ================="
"\033[0m\n",
run_args->testname);
else
TEST_SAY("\033[32m"
"================= Test %s PASSED ================="
"\033[0m\n",
run_args->testname);
return r;
}
示例6: rebalance_cb
static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
rd_kafka_topic_partition_list_t *parts, void *opaque) {
int i;
TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err));
test_print_partition_list(parts);
if (parts->cnt < partitions)
TEST_FAIL("rebalance_cb: Expected %d partitions, not %d",
partitions, parts->cnt);
switch (err)
{
case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
for (i = 0 ; i < parts->cnt ; i++) {
if (i < partitions)
parts->elems[i].offset = msgcnt / 2;
else
parts->elems[i].offset = RD_KAFKA_OFFSET_END;
}
TEST_SAY("Use these offsets:\n");
test_print_partition_list(parts);
test_consumer_assign("HL.REBALANCE", rk, parts);
break;
case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
test_consumer_unassign("HL.REBALANCE", rk);
break;
default:
TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
}
}
示例7: consume_messages
static void consume_messages (uint64_t testid, const char *topic,
int32_t partition, int msg_base, int batch_cnt,
int msgcnt) {
rd_kafka_t *rk;
rd_kafka_topic_t *rkt;
rd_kafka_conf_t *conf;
rd_kafka_topic_conf_t *topic_conf;
int i;
test_conf_init(&conf, &topic_conf, 20);
/* Create kafka instance */
rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
rkt = rd_kafka_topic_new(rk, topic, topic_conf);
if (!rkt)
TEST_FAIL("Failed to create topic: %s\n",
rd_kafka_err2str(rd_kafka_last_error()));
TEST_SAY("Consuming %i messages from partition %i\n",
batch_cnt, partition);
/* Consume messages */
if (rd_kafka_consume_start(rkt, partition,
RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
TEST_FAIL("consume_start(%i, -%i) failed: %s",
(int)partition, batch_cnt,
rd_kafka_err2str(rd_kafka_last_error()));
for (i = 0 ; i < batch_cnt ; i++) {
rd_kafka_message_t *rkmessage;
rkmessage = rd_kafka_consume(rkt, partition, tmout_multip(5000));
if (!rkmessage)
TEST_FAIL("Failed to consume message %i/%i from "
"partition %i: %s",
i, batch_cnt, (int)partition,
rd_kafka_err2str(rd_kafka_last_error()));
if (rkmessage->err)
TEST_FAIL("Consume message %i/%i from partition %i "
"has error: %s",
i, batch_cnt, (int)partition,
rd_kafka_err2str(rkmessage->err));
verify_consumed_msg(testid, partition, msg_base+i, rkmessage);
rd_kafka_message_destroy(rkmessage);
}
rd_kafka_consume_stop(rkt, partition);
/* Destroy topic */
rd_kafka_topic_destroy(rkt);
/* Destroy rdkafka instance */
TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
rd_kafka_destroy(rk);
}
示例8: verify_consumed_msg0
static void verify_consumed_msg0 (const char *func, int line,
uint64_t testid, int32_t partition,
int msgnum,
rd_kafka_message_t *rkmessage) {
uint64_t in_testid;
int in_part;
int in_msgnum;
char buf[128];
if (rkmessage->len != 0)
TEST_FAIL("Incoming message not NULL: %i bytes",
(int)rkmessage->len);
if (rkmessage->key_len +1 >= sizeof(buf))
TEST_FAIL("Incoming message key too large (%i): "
"not sourced by this test",
(int)rkmessage->key_len);
rd_snprintf(buf, sizeof(buf), "%.*s",
(int)rkmessage->key_len, (char *)rkmessage->key);
if (sscanf(buf, "testid=%"SCNu64", partition=%i, msg=%i",
&in_testid, &in_part, &in_msgnum) != 3)
TEST_FAIL("Incorrect key format: %s", buf);
if (testid != in_testid ||
(partition != -1 && partition != in_part) ||
(msgnum != -1 && msgnum != in_msgnum) ||
(in_msgnum < 0 || in_msgnum > cons_msgs_size))
goto fail_match;
if (test_level > 2) {
TEST_SAY("%s:%i: Our testid %"PRIu64", part %i (%i), "
"msg %i/%i did "
", key's: \"%s\"\n",
func, line,
testid, (int)partition, (int)rkmessage->partition,
msgnum, cons_msgs_size, buf);
}
if (cons_msgs_cnt == cons_msgs_size) {
TEST_SAY("Too many messages in cons_msgs (%i) while reading "
"message key \"%s\"\n",
cons_msgs_cnt, buf);
verify_consumed_msg_check();
TEST_FAIL("See above error(s)");
}
cons_msgs[cons_msgs_cnt++] = in_msgnum;
return;
fail_match:
TEST_FAIL("%s:%i: Our testid %"PRIu64", part %i, msg %i/%i did "
"not match message's key: \"%s\"\n",
func, line,
testid, (int)partition, msgnum, cons_msgs_size, buf);
}
示例9: test_consumer_poll
int test_consumer_poll (const char *what, rd_kafka_t *rk, uint64_t testid,
int exp_eof_cnt, int exp_msg_base, int exp_cnt) {
int eof_cnt = 0;
int cnt = 0;
test_timing_t t_cons;
TEST_SAY("%s: consume %d messages\n", what, exp_cnt);
TIMING_START(&t_cons, "CONSUME");
while ((exp_eof_cnt == -1 || eof_cnt < exp_eof_cnt) &&
(cnt < exp_cnt)) {
rd_kafka_message_t *rkmessage;
rkmessage = rd_kafka_consumer_poll(rk, 10*1000);
if (!rkmessage) /* Shouldn't take this long to get a msg */
TEST_FAIL("%s: consumer_poll() timeout\n", what);
if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
TEST_SAY("%s [%"PRId32"] reached EOF at "
"offset %"PRId64"\n",
rd_kafka_topic_name(rkmessage->rkt),
rkmessage->partition,
rkmessage->offset);
eof_cnt++;
} else if (rkmessage->err) {
TEST_SAY("%s [%"PRId32"] error (offset %"PRId64"): %s",
rkmessage->rkt ?
rd_kafka_topic_name(rkmessage->rkt) :
"(no-topic)",
rkmessage->partition,
rkmessage->offset,
rd_kafka_message_errstr(rkmessage));
} else {
if (test_level > 2)
TEST_SAY("%s [%"PRId32"] "
"message at offset %"PRId64"\n",
rd_kafka_topic_name(rkmessage->rkt),
rkmessage->partition,
rkmessage->offset);
test_verify_rkmessage(rkmessage, testid, -1, -1);
cnt++;
}
rd_kafka_message_destroy(rkmessage);
}
TIMING_STOP(&t_cons);
TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n",
what, cnt, exp_cnt, eof_cnt, exp_eof_cnt);
return cnt;
}
示例10: do_test_implicit_ack
/**
* @brief Test handling of implicit acks.
*
* @param batch_cnt Total number of batches, ProduceRequests, sent.
* @param initial_fail_batch_cnt How many of the initial batches should
* fail with an emulated network timeout.
*/
static void do_test_implicit_ack (const char *what,
int batch_cnt, int initial_fail_batch_cnt) {
rd_kafka_t *rk;
const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
const int32_t partition = 0;
uint64_t testid;
int msgcnt = 10*batch_cnt;
rd_kafka_conf_t *conf;
rd_kafka_topic_t *rkt;
test_msgver_t mv;
TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);
rd_atomic32_init(&state.produce_cnt, 0);
state.batch_cnt = batch_cnt;
state.initial_fail_batch_cnt = initial_fail_batch_cnt;
testid = test_id_generate();
test_conf_init(&conf, NULL, 60);
rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
test_conf_set(conf, "enable.idempotence", "true");
test_conf_set(conf, "batch.num.messages", "10");
test_conf_set(conf, "linger.ms", "500");
test_conf_set(conf, "retry.backoff.ms", "2000");
/* The ProduceResponse handler will inject timed-out-in-flight
* errors for the first N ProduceRequests, which will trigger retries
* that in turn will result in OutOfSequence errors. */
test_conf_set(conf, "ut_handle_ProduceResponse",
(char *)handle_ProduceResponse);
test_create_topic(topic, 1, 1);
rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
rkt = test_create_producer_topic(rk, topic, NULL);
TEST_SAY("Producing %d messages\n", msgcnt);
test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);
TEST_SAY("Flushing..\n");
rd_kafka_flush(rk, 10000);
rd_kafka_topic_destroy(rkt);
rd_kafka_destroy(rk);
TEST_SAY("Verifying messages with consumer\n");
test_msgver_init(&mv, testid);
test_consume_msgs_easy_mv(NULL, topic, partition,
testid, 1, msgcnt, NULL, &mv);
test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
test_msgver_clear(&mv);
TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
}
示例11: run_test0
static int run_test0 (struct run_args *run_args) {
struct test *test = run_args->test;
test_timing_t t_run;
int r;
char stats_file[256];
rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%"PRIu64".json",
test->name, test_id_generate());
if (!(test->stats_fp = fopen(stats_file, "w+")))
TEST_SAY("=== Failed to create stats file %s: %s ===\n",
stats_file, strerror(errno));
test_curr = test;
TEST_SAY("================= Running test %s =================\n",
test->name);
if (test->stats_fp)
TEST_SAY("==== Stats written to file %s ====\n", stats_file);
TIMING_START(&t_run, test->name);
test->start = t_run.ts_start;
r = test->mainfunc(run_args->argc, run_args->argv);
TIMING_STOP(&t_run);
TEST_LOCK();
test->duration = TIMING_DURATION(&t_run);
if (r) {
test->state = TEST_FAILED;
TEST_SAY("\033[31m"
"================= Test %s FAILED ================="
"\033[0m\n",
run_args->test->name);
} else {
test->state = TEST_PASSED;
TEST_SAY("\033[32m"
"================= Test %s PASSED ================="
"\033[0m\n",
run_args->test->name);
}
TEST_UNLOCK();
if (test->stats_fp) {
long pos = ftell(test->stats_fp);
fclose(test->stats_fp);
test->stats_fp = NULL;
/* Delete file if nothing was written */
if (pos == 0) {
#ifndef _MSC_VER
unlink(stats_file);
#else
_unlink(stats_file);
#endif
}
}
return r;
}
示例12: list_groups
/**
* List groups by:
* - List all groups, check that the groups in 'groups' are seen.
* - List each group in 'groups', one by one.
*
* Returns 'group_cnt' if all groups in 'groups' were seen by both
* methods, else 0, or -1 on error.
*/
static int list_groups (rd_kafka_t *rk, char **groups, int group_cnt,
const char *desc) {
rd_kafka_resp_err_t err = 0;
const struct rd_kafka_group_list *grplist;
int i, r;
int fails = 0;
int seen = 0;
int seen_all = 0;
int retries = 5;
TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc);
/* FIXME: Wait for broker to come up. This should really be abstracted
* by librdkafka. */
do {
if (err) {
TEST_SAY("Retrying group list in 1s because of: %s\n",
rd_kafka_err2str(err));
rd_sleep(1);
}
err = rd_kafka_list_groups(rk, NULL, &grplist, 5000);
} while ((err == RD_KAFKA_RESP_ERR__TRANSPORT ||
err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) &&
retries-- > 0);
if (err) {
TEST_SAY("Failed to list all groups: %s\n",
rd_kafka_err2str(err));
return -1;
}
seen_all = verify_groups(grplist, groups, group_cnt);
rd_kafka_group_list_destroy(grplist);
for (i = 0 ; i < group_cnt ; i++) {
err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000);
if (err) {
TEST_SAY("Failed to list group %s: %s\n",
groups[i], rd_kafka_err2str(err));
fails++;
continue;
}
r = verify_groups(grplist, &groups[i], 1);
if (r == 1)
seen++;
rd_kafka_group_list_destroy(grplist);
}
if (seen_all != seen)
return 0;
return seen;
}
示例13: do_test_stats_timer
/**
* Enable statistics with a set interval, make sure the stats callbacks are
* called within reasonable intervals.
*/
static void do_test_stats_timer (void) {
rd_kafka_t *rk;
rd_kafka_conf_t *conf;
const int exp_calls = 10;
char errstr[512];
struct state state;
test_timing_t t_new;
memset(&state, 0, sizeof(state));
state.interval = 600*1000;
test_conf_init(&conf, NULL, 200);
test_conf_set(conf, "statistics.interval.ms", "600");
rd_kafka_conf_set_stats_cb(conf, stats_cb);
rd_kafka_conf_set_opaque(conf, &state);
TIMING_START(&t_new, "rd_kafka_new()");
rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
TIMING_STOP(&t_new);
if (!rk)
TEST_FAIL("Failed to create instance: %s\n", errstr);
TEST_SAY("Starting wait loop for %d expected stats_cb calls "
"with an interval of %dms\n",
exp_calls, state.interval/1000);
while (state.calls < exp_calls) {
test_timing_t t_poll;
TIMING_START(&t_poll, "rd_kafka_poll()");
rd_kafka_poll(rk, 100);
TIMING_STOP(&t_poll);
if (TIMING_DURATION(&t_poll) > 150*1000)
TEST_WARN("rd_kafka_poll(rk,100) "
"took more than 50%% extra\n");
}
rd_kafka_destroy(rk);
if (state.calls > exp_calls)
TEST_SAY("Got more calls than expected: %d > %d\n",
state.calls, exp_calls);
if (state.fails)
TEST_FAIL("%d/%d intervals failed\n", state.fails, state.calls);
else
TEST_SAY("All %d intervals okay\n", state.calls);
}
示例14: main_0039_event
/**
* @brief Local test: test event generation
*/
int main_0039_event (int argc, char **argv) {
rd_kafka_t *rk;
rd_kafka_conf_t *conf;
rd_kafka_queue_t *eventq;
int waitevent = 1;
/* Set up a config with ERROR events enabled and
* configure an invalid broker so that _TRANSPORT or ALL_BROKERS_DOWN
* is promptly generated. */
conf = rd_kafka_conf_new();
rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);
/* Create kafka instance */
rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
eventq = rd_kafka_queue_get_main(rk);
while (waitevent) {
rd_kafka_event_t *rkev;
rkev = rd_kafka_queue_poll(eventq, 1000);
switch (rd_kafka_event_type(rkev))
{
case RD_KAFKA_EVENT_ERROR:
TEST_SAY("Got %s%s event: %s: %s\n",
rd_kafka_event_error_is_fatal(rkev) ?
"FATAL " : "",
rd_kafka_event_name(rkev),
rd_kafka_err2name(rd_kafka_event_error(rkev)),
rd_kafka_event_error_string(rkev));
waitevent = 0;
break;
default:
TEST_SAY("Unhandled event: %s\n",
rd_kafka_event_name(rkev));
break;
}
rd_kafka_event_destroy(rkev);
}
rd_kafka_queue_destroy(eventq);
/* Destroy rdkafka instance */
TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
rd_kafka_destroy(rk);
return 0;
}
示例15: test_wait_exit
/**
* Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
*/
void test_wait_exit (int timeout) {
int r;
while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
sleep(1);
}
TEST_SAY("%i thread(s) in use by librdkafka\n", r);
if (r > 0) {
assert(0);
TEST_FAIL("%i thread(s) still active in librdkafka", r);
}
}