本文整理汇总了C++中GPR_ASSERT函数的典型用法代码示例。如果您正苦于以下问题:C++ GPR_ASSERT函数的具体用法?C++ GPR_ASSERT怎么用?C++ GPR_ASSERT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GPR_ASSERT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test_max_message_length_on_response
// Test with response larger than the limit.
// If send_limit is true, applies send limit on server; otherwise, applies
// recv limit on client.
static void test_max_message_length_on_response(grpc_end2end_test_config config,
bool send_limit,
bool use_service_config,
bool use_string_json_value) {
gpr_log(GPR_INFO,
"testing response with send_limit=%d use_service_config=%d "
"use_string_json_value=%d",
send_limit, use_service_config, use_string_json_value);
grpc_end2end_test_fixture f;
grpc_call *c = NULL;
grpc_call *s = NULL;
cq_verifier *cqv;
grpc_op ops[6];
grpc_op *op;
grpc_slice response_payload_slice =
grpc_slice_from_copied_string("hello world");
grpc_byte_buffer *response_payload =
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
grpc_byte_buffer *recv_payload = NULL;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
grpc_slice details;
int was_cancelled = 2;
grpc_channel_args *client_args = NULL;
grpc_channel_args *server_args = NULL;
if (use_service_config) {
// We don't currently support service configs on the server side.
GPR_ASSERT(!send_limit);
grpc_arg arg;
arg.type = GRPC_ARG_STRING;
arg.key = GRPC_ARG_SERVICE_CONFIG;
arg.value.string =
use_string_json_value
? "{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"service\", \"method\": \"method\" }\n"
" ],\n"
" \"maxResponseMessageBytes\": \"5\"\n"
" } ]\n"
"}"
: "{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"service\", \"method\": \"method\" }\n"
" ],\n"
" \"maxResponseMessageBytes\": 5\n"
" } ]\n"
"}";
client_args = grpc_channel_args_copy_and_add(NULL, &arg, 1);
} else {
// Set limit via channel args.
grpc_arg arg;
arg.key = send_limit ? GRPC_ARG_MAX_SEND_MESSAGE_LENGTH
: GRPC_ARG_MAX_RECEIVE_MESSAGE_LENGTH;
arg.type = GRPC_ARG_INTEGER;
arg.value.integer = 5;
grpc_channel_args *args = grpc_channel_args_copy_and_add(NULL, &arg, 1);
if (send_limit) {
server_args = args;
} else {
client_args = args;
}
}
f = begin_test(config, "test_max_response_message_length", client_args,
server_args);
{
grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
if (client_args != NULL) grpc_channel_args_destroy(&exec_ctx, client_args);
if (server_args != NULL) grpc_channel_args_destroy(&exec_ctx, server_args);
grpc_exec_ctx_finish(&exec_ctx);
}
cqv = cq_verifier_create(f.cq);
c = grpc_channel_create_call(
f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq,
grpc_slice_from_static_string("/service/method"),
get_host_override_slice("foo.test.google.fr:1234", config),
gpr_inf_future(GPR_CLOCK_REALTIME), NULL);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
//.........这里部分代码省略.........
示例2: assert_log_empty
/* Asserts that the log is empty. */
static void assert_log_empty(void) {
size_t bytes_available;
census_log_init_reader();
GPR_ASSERT(census_log_read_next(&bytes_available) == NULL);
}
示例3: setup_test
static void setup_test(int circular_log) {
census_log_initialize(LOG_SIZE_IN_MB, circular_log);
GPR_ASSERT(census_log_remaining_space() == LOG_SIZE_IN_BYTES);
}
示例4: do_iocp_work
static int do_iocp_work() {
BOOL success;
DWORD bytes = 0;
DWORD flags = 0;
ULONG_PTR completion_key;
LPOVERLAPPED overlapped;
gpr_timespec wait_time = gpr_inf_future;
grpc_winsocket *socket;
grpc_winsocket_callback_info *info;
void(*f)(void *, int) = NULL;
void *opaque = NULL;
success = GetQueuedCompletionStatus(g_iocp, &bytes,
&completion_key, &overlapped,
gpr_time_to_millis(wait_time));
if (!success && !overlapped) {
/* The deadline got attained. */
return 0;
}
GPR_ASSERT(completion_key && overlapped);
if (overlapped == &g_iocp_custom_overlap) {
if (completion_key == (ULONG_PTR) &g_iocp_kick_token) {
/* We were awoken from a kick. */
gpr_log(GPR_DEBUG, "do_iocp_work - got a kick");
return 1;
}
gpr_log(GPR_ERROR, "Unknown custom completion key.");
abort();
}
socket = (grpc_winsocket*) completion_key;
if (overlapped == &socket->write_info.overlapped) {
gpr_log(GPR_DEBUG, "do_iocp_work - got write packet");
info = &socket->write_info;
} else if (overlapped == &socket->read_info.overlapped) {
gpr_log(GPR_DEBUG, "do_iocp_work - got read packet");
info = &socket->read_info;
} else {
gpr_log(GPR_ERROR, "Unknown IOCP operation");
abort();
}
success = WSAGetOverlappedResult(socket->socket, &info->overlapped, &bytes,
FALSE, &flags);
gpr_log(GPR_DEBUG, "bytes: %u, flags: %u - op %s", bytes, flags,
success ? "succeeded" : "failed");
info->bytes_transfered = bytes;
info->wsa_error = success ? 0 : WSAGetLastError();
GPR_ASSERT(overlapped == &info->overlapped);
gpr_mu_lock(&socket->state_mu);
GPR_ASSERT(!info->has_pending_iocp);
if (info->cb) {
f = info->cb;
opaque = info->opaque;
info->cb = NULL;
} else {
info->has_pending_iocp = 1;
}
gpr_mu_unlock(&socket->state_mu);
if (f) f(opaque, 1);
return 1;
}
示例5: main
int main(int argc, char **argv) {
grpc_event *ev;
call_state *s;
char *addr_buf = NULL;
gpr_cmdline *cl;
int shutdown_started = 0;
int shutdown_finished = 0;
int secure = 0;
char *addr = NULL;
char *fake_argv[1];
#define MAX_ARGS 4
grpc_arg arge[MAX_ARGS];
grpc_arg *e;
grpc_channel_args args = {0, NULL};
grpc_http_server_page home_page = {"/", "text/html",
"<head>\n"
"<title>Echo Server</title>\n"
"</head>\n"
"<body>\n"
"Welcome to the world of the future!\n"
"</body>\n"};
GPR_ASSERT(argc >= 1);
fake_argv[0] = argv[0];
grpc_test_init(1, fake_argv);
grpc_init();
srand(clock());
memset(arge, 0, sizeof(arge));
args.args = arge;
cl = gpr_cmdline_create("echo server");
gpr_cmdline_add_string(cl, "bind", "Bind host:port", &addr);
gpr_cmdline_add_flag(cl, "secure", "Run with security?", &secure);
gpr_cmdline_parse(cl, argc, argv);
gpr_cmdline_destroy(cl);
e = &arge[args.num_args++];
e->type = GRPC_ARG_POINTER;
e->key = GRPC_ARG_SERVE_OVER_HTTP;
e->value.pointer.p = &home_page;
if (addr == NULL) {
gpr_join_host_port(&addr_buf, "::", grpc_pick_unused_port_or_die());
addr = addr_buf;
}
gpr_log(GPR_INFO, "creating server on: %s", addr);
cq = grpc_completion_queue_create();
if (secure) {
grpc_ssl_pem_key_cert_pair pem_key_cert_pair = {test_server1_key,
test_server1_cert};
grpc_server_credentials *ssl_creds =
grpc_ssl_server_credentials_create(NULL, &pem_key_cert_pair, 1);
server = grpc_server_create(cq, &args);
GPR_ASSERT(grpc_server_add_secure_http2_port(server, addr, ssl_creds));
grpc_server_credentials_release(ssl_creds);
} else {
server = grpc_server_create(cq, &args);
GPR_ASSERT(grpc_server_add_http2_port(server, addr));
}
grpc_server_start(server);
gpr_free(addr_buf);
addr = addr_buf = NULL;
request_call();
signal(SIGINT, sigint_handler);
while (!shutdown_finished) {
if (got_sigint && !shutdown_started) {
gpr_log(GPR_INFO, "Shutting down due to SIGINT");
grpc_server_shutdown(server);
grpc_completion_queue_shutdown(cq);
shutdown_started = 1;
}
ev = grpc_completion_queue_next(
cq, gpr_time_add(gpr_now(), gpr_time_from_seconds(1)));
if (!ev) continue;
s = ev->tag;
switch (ev->type) {
case GRPC_SERVER_RPC_NEW:
if (ev->call != NULL) {
/* initial ops are already started in request_call */
grpc_call_server_accept_old(ev->call, cq, s);
grpc_call_server_end_initial_metadata_old(ev->call,
GRPC_WRITE_BUFFER_HINT);
GPR_ASSERT(grpc_call_start_read_old(ev->call, s) == GRPC_CALL_OK);
request_call();
} else {
GPR_ASSERT(shutdown_started);
gpr_free(s);
}
break;
case GRPC_WRITE_ACCEPTED:
GPR_ASSERT(ev->data.write_accepted == GRPC_OP_OK);
//.........这里部分代码省略.........
示例6: tcp_flush
static grpc_endpoint_op_status tcp_flush(grpc_tcp *tcp) {
struct msghdr msg;
struct iovec iov[MAX_WRITE_IOVEC];
int iov_size;
ssize_t sent_length;
ssize_t sending_length;
ssize_t trailing;
ssize_t unwind_slice_idx;
ssize_t unwind_byte_idx;
for (;;) {
sending_length = 0;
unwind_slice_idx = tcp->outgoing_slice_idx;
unwind_byte_idx = tcp->outgoing_byte_idx;
for (iov_size = 0; tcp->outgoing_slice_idx != tcp->outgoing_buffer->count &&
iov_size != MAX_WRITE_IOVEC;
iov_size++) {
iov[iov_size].iov_base =
GPR_SLICE_START_PTR(
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) +
tcp->outgoing_byte_idx;
iov[iov_size].iov_len =
GPR_SLICE_LENGTH(
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]) -
tcp->outgoing_byte_idx;
sending_length += iov[iov_size].iov_len;
tcp->outgoing_slice_idx++;
tcp->outgoing_byte_idx = 0;
}
GPR_ASSERT(iov_size > 0);
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_iov = iov;
msg.msg_iovlen = iov_size;
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_flags = 0;
GRPC_TIMER_BEGIN(GRPC_PTAG_SENDMSG, 0);
do {
/* TODO(klempner): Cork if this is a partial write */
sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
} while (sent_length < 0 && errno == EINTR);
GRPC_TIMER_END(GRPC_PTAG_SENDMSG, 0);
if (sent_length < 0) {
if (errno == EAGAIN) {
tcp->outgoing_slice_idx = unwind_slice_idx;
tcp->outgoing_byte_idx = unwind_byte_idx;
return GRPC_ENDPOINT_PENDING;
} else {
/* TODO(klempner): Log some of these */
return GRPC_ENDPOINT_ERROR;
}
}
GPR_ASSERT(tcp->outgoing_byte_idx == 0);
trailing = sending_length - sent_length;
while (trailing > 0) {
ssize_t slice_length;
tcp->outgoing_slice_idx--;
slice_length = GPR_SLICE_LENGTH(
tcp->outgoing_buffer->slices[tcp->outgoing_slice_idx]);
if (slice_length > trailing) {
tcp->outgoing_byte_idx = slice_length - trailing;
break;
} else {
trailing -= slice_length;
}
}
if (tcp->outgoing_slice_idx == tcp->outgoing_buffer->count) {
return GRPC_ENDPOINT_DONE;
}
};
}
示例7: win_write
/* Initiates a write. */
static grpc_endpoint_op_status win_write(grpc_endpoint *ep,
gpr_slice_buffer *slices,
grpc_iomgr_closure *cb) {
grpc_tcp *tcp = (grpc_tcp *)ep;
grpc_winsocket *socket = tcp->socket;
grpc_winsocket_callback_info *info = &socket->write_info;
unsigned i;
DWORD bytes_sent;
int status;
WSABUF local_buffers[16];
WSABUF *allocated = NULL;
WSABUF *buffers = local_buffers;
GPR_ASSERT(!tcp->socket->write_info.outstanding);
if (tcp->shutting_down) {
return GRPC_ENDPOINT_ERROR;
}
TCP_REF(tcp, "write");
tcp->socket->write_info.outstanding = 1;
tcp->write_cb = cb;
tcp->write_slices = slices;
if (tcp->write_slices->count > GPR_ARRAY_SIZE(local_buffers)) {
buffers = (WSABUF *)gpr_malloc(sizeof(WSABUF) * tcp->write_slices->count);
allocated = buffers;
}
for (i = 0; i < tcp->write_slices->count; i++) {
buffers[i].len = GPR_SLICE_LENGTH(tcp->write_slices->slices[i]);
buffers[i].buf = (char *)GPR_SLICE_START_PTR(tcp->write_slices->slices[i]);
}
/* First, let's try a synchronous, non-blocking write. */
status = WSASend(socket->socket, buffers, tcp->write_slices->count,
&bytes_sent, 0, NULL, NULL);
info->wsa_error = status == 0 ? 0 : WSAGetLastError();
/* We would kind of expect to get a WSAEWOULDBLOCK here, especially on a busy
connection that has its send queue filled up. But if we don't, then we can
avoid doing an async write operation at all. */
if (info->wsa_error != WSAEWOULDBLOCK) {
grpc_endpoint_op_status ret = GRPC_ENDPOINT_ERROR;
if (status == 0) {
ret = GRPC_ENDPOINT_DONE;
GPR_ASSERT(bytes_sent == tcp->write_slices->length);
} else {
if (socket->read_info.wsa_error != WSAECONNRESET) {
char *utf8_message = gpr_format_message(info->wsa_error);
gpr_log(GPR_ERROR, "WSASend error: %s", utf8_message);
gpr_free(utf8_message);
}
}
if (allocated) gpr_free(allocated);
tcp->socket->write_info.outstanding = 0;
TCP_UNREF(tcp, "write");
return ret;
}
/* If we got a WSAEWOULDBLOCK earlier, then we need to re-do the same
operation, this time asynchronously. */
memset(&socket->write_info.overlapped, 0, sizeof(OVERLAPPED));
status = WSASend(socket->socket, buffers, tcp->write_slices->count,
&bytes_sent, 0, &socket->write_info.overlapped, NULL);
if (allocated) gpr_free(allocated);
if (status != 0) {
int wsa_error = WSAGetLastError();
if (wsa_error != WSA_IO_PENDING) {
tcp->socket->write_info.outstanding = 0;
TCP_UNREF(tcp, "write");
return GRPC_ENDPOINT_ERROR;
}
}
/* As all is now setup, we can now ask for the IOCP notification. It may
trigger the callback immediately however, but no matter. */
grpc_socket_notify_on_write(socket, on_write, tcp);
return GRPC_ENDPOINT_PENDING;
}
示例8: grpc_pick_unused_port_or_die
int grpc_pick_unused_port_or_die(void) {
int port = grpc_pick_unused_port();
GPR_ASSERT(port > 0);
return port;
}
示例9: grpc_recycle_unused_port
void grpc_recycle_unused_port(int port) { GPR_ASSERT(free_chosen_port(port)); }
示例10: test_cancel_after_accept
/* Cancel after accept, no payload */
static void test_cancel_after_accept(grpc_end2end_test_config config,
cancellation_mode mode,
bool use_service_config) {
grpc_op ops[6];
grpc_op *op;
grpc_call *c;
grpc_call *s;
gpr_timespec deadline = use_service_config
? gpr_inf_future(GPR_CLOCK_MONOTONIC)
: five_seconds_time();
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
grpc_call_error error;
char *details = NULL;
size_t details_capacity = 0;
grpc_byte_buffer *request_payload_recv = NULL;
grpc_byte_buffer *response_payload_recv = NULL;
grpc_slice request_payload_slice =
grpc_slice_from_copied_string("hello world");
grpc_slice response_payload_slice =
grpc_slice_from_copied_string("hello you");
grpc_byte_buffer *request_payload =
grpc_raw_byte_buffer_create(&request_payload_slice, 1);
grpc_byte_buffer *response_payload =
grpc_raw_byte_buffer_create(&response_payload_slice, 1);
int was_cancelled = 2;
grpc_channel_args *args = NULL;
if (use_service_config) {
grpc_arg arg;
arg.type = GRPC_ARG_STRING;
arg.key = GRPC_ARG_SERVICE_CONFIG;
arg.value.string =
"{\n"
" \"methodConfig\": [ {\n"
" \"name\": [\n"
" { \"service\": \"service\", \"method\": \"method\" }\n"
" ],\n"
" \"timeout\": \"5s\"\n"
" } ]\n"
"}";
args = grpc_channel_args_copy_and_add(args, &arg, 1);
}
grpc_end2end_test_fixture f =
begin_test(config, "cancel_after_accept", args, NULL);
cq_verifier *cqv = cq_verifier_create(f.cq);
c = grpc_channel_create_call(
f.client, NULL, GRPC_PROPAGATE_DEFAULTS, f.cq, "/service/method",
get_host_override_string("foo.test.google.fr:1234", config), deadline,
NULL);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
memset(ops, 0, sizeof(ops));
op = ops;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_SEND_MESSAGE;
op->data.send_message = request_payload;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op->flags = 0;
op->reserved = NULL;
op++;
op->op = GRPC_OP_RECV_MESSAGE;
op->data.recv_message = &response_payload_recv;
op->flags = 0;
op->reserved = NULL;
op++;
error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), NULL);
GPR_ASSERT(GRPC_CALL_OK == error);
error = grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.cq, f.cq, tag(2));
GPR_ASSERT(GRPC_CALL_OK == error);
CQ_EXPECT_COMPLETION(cqv, tag(2), 1);
//.........这里部分代码省略.........
示例11: grpc_pick_unused_port
int grpc_pick_unused_port(void) {
/* We repeatedly pick a port and then see whether or not it is
available for use both as a TCP socket and a UDP socket. First, we
pick a random large port number. For subsequent
iterations, we bind to an anonymous port and let the OS pick the
port number. The random port picking reduces the probability of
races with other processes on kernels that want to reuse the same
port numbers over and over. */
/* In alternating iterations we trial UDP ports before TCP ports UDP
ports -- it could be the case that this machine has been using up
UDP ports and they are scarcer. */
/* Type of port to first pick in next iteration */
int is_tcp = 1;
int trial = 0;
char *env = gpr_getenv("GRPC_TEST_PORT_SERVER");
if (env) {
int port = grpc_pick_port_using_server(env);
gpr_free(env);
if (port != 0) {
return port;
}
}
for (;;) {
int port;
trial++;
if (trial == 1) {
port = _getpid() % (65536 - 30000) + 30000;
} else if (trial <= NUM_RANDOM_PORTS_TO_PICK) {
port = rand() % (65536 - 30000) + 30000;
} else {
port = 0;
}
if (has_port_been_chosen(port)) {
continue;
}
if (!is_port_available(&port, is_tcp)) {
continue;
}
GPR_ASSERT(port > 0);
/* Check that the port # is free for the other type of socket also */
if (!is_port_available(&port, !is_tcp)) {
/* In the next iteration trial to bind to the other type first
because perhaps it is more rare. */
is_tcp = !is_tcp;
continue;
}
/* TODO(ctiller): consider caching this port in some structure, to avoid
handing it out again */
chose_port(port);
return port;
}
/* The port iterator reached the end without finding a suitable port. */
return 0;
}
示例12: endpoint_write
static grpc_endpoint_write_status endpoint_write(grpc_endpoint *secure_ep,
gpr_slice *slices,
size_t nslices,
grpc_endpoint_write_cb cb,
void *user_data) {
unsigned i;
size_t output_buffer_count = 0;
tsi_result result = TSI_OK;
secure_endpoint *ep = (secure_endpoint *)secure_ep;
gpr_uint8 *cur = GPR_SLICE_START_PTR(ep->write_staging_buffer);
gpr_uint8 *end = GPR_SLICE_END_PTR(ep->write_staging_buffer);
grpc_endpoint_write_status status;
GPR_ASSERT(ep->output_buffer.count == 0);
if (grpc_trace_secure_endpoint) {
for (i = 0; i < nslices; i++) {
char *data = gpr_dump_slice(slices[i], GPR_DUMP_HEX | GPR_DUMP_ASCII);
gpr_log(GPR_DEBUG, "WRITE %p: %s", ep, data);
gpr_free(data);
}
}
for (i = 0; i < nslices; i++) {
gpr_slice plain = slices[i];
gpr_uint8 *message_bytes = GPR_SLICE_START_PTR(plain);
size_t message_size = GPR_SLICE_LENGTH(plain);
while (message_size > 0) {
size_t protected_buffer_size_to_send = (size_t)(end - cur);
size_t processed_message_size = message_size;
gpr_mu_lock(&ep->protector_mu);
result = tsi_frame_protector_protect(ep->protector, message_bytes,
&processed_message_size, cur,
&protected_buffer_size_to_send);
gpr_mu_unlock(&ep->protector_mu);
if (result != TSI_OK) {
gpr_log(GPR_ERROR, "Encryption error: %s",
tsi_result_to_string(result));
break;
}
message_bytes += processed_message_size;
message_size -= processed_message_size;
cur += protected_buffer_size_to_send;
if (cur == end) {
flush_write_staging_buffer(ep, &cur, &end);
}
}
if (result != TSI_OK) break;
}
if (result == TSI_OK) {
size_t still_pending_size;
do {
size_t protected_buffer_size_to_send = (size_t)(end - cur);
gpr_mu_lock(&ep->protector_mu);
result = tsi_frame_protector_protect_flush(ep->protector, cur,
&protected_buffer_size_to_send,
&still_pending_size);
gpr_mu_unlock(&ep->protector_mu);
if (result != TSI_OK) break;
cur += protected_buffer_size_to_send;
if (cur == end) {
flush_write_staging_buffer(ep, &cur, &end);
}
} while (still_pending_size > 0);
if (cur != GPR_SLICE_START_PTR(ep->write_staging_buffer)) {
gpr_slice_buffer_add(
&ep->output_buffer,
gpr_slice_split_head(
&ep->write_staging_buffer,
(size_t)(cur - GPR_SLICE_START_PTR(ep->write_staging_buffer))));
}
}
for (i = 0; i < nslices; i++) {
gpr_slice_unref(slices[i]);
}
if (result != TSI_OK) {
/* TODO(yangg) do different things according to the error type? */
gpr_slice_buffer_reset_and_unref(&ep->output_buffer);
return GRPC_ENDPOINT_WRITE_ERROR;
}
/* clear output_buffer and let the lower level handle its slices. */
output_buffer_count = ep->output_buffer.count;
ep->output_buffer.count = 0;
ep->write_cb = cb;
ep->write_user_data = user_data;
/* Need to keep the endpoint alive across a transport */
secure_endpoint_ref(ep);
status = grpc_endpoint_write(ep->wrapped_ep, ep->output_buffer.slices,
output_buffer_count, on_write, ep);
if (status != GRPC_ENDPOINT_WRITE_PENDING) {
secure_endpoint_unref(ep);
}
return status;
}
示例13: shrink_test
static void shrink_test(void) {
gpr_log(GPR_INFO, "shrink_test");
grpc_timer_heap pq;
size_t i;
size_t expected_size;
/* A large random number to allow for multiple shrinkages, at least 512. */
const size_t num_elements = (size_t)rand() % 2000 + 512;
grpc_timer_heap_init(&pq);
/* Create a priority queue with many elements. Make sure the Size() is
correct. */
for (i = 0; i < num_elements; ++i) {
GPR_ASSERT(i == pq.timer_count);
grpc_timer_heap_add(&pq, create_test_elements(1));
}
GPR_ASSERT(num_elements == pq.timer_count);
/* Remove elements until the Size is 1/4 the original size. */
while (pq.timer_count > num_elements / 4) {
grpc_timer *const te = pq.timers[pq.timer_count - 1];
grpc_timer_heap_remove(&pq, te);
gpr_free(te);
}
GPR_ASSERT(num_elements / 4 == pq.timer_count);
/* Expect that Capacity is in the right range:
Size * 2 <= Capacity <= Size * 4 */
GPR_ASSERT(pq.timer_count * 2 <= pq.timer_capacity);
GPR_ASSERT(pq.timer_capacity <= pq.timer_count * 4);
check_valid(&pq);
/* Remove the rest of the elements. Check that the Capacity is not more than
4 times the Size and not less than 2 times, but never goes below 16. */
expected_size = pq.timer_count;
while (pq.timer_count > 0) {
const size_t which = (size_t)rand() % pq.timer_count;
grpc_timer *te = pq.timers[which];
grpc_timer_heap_remove(&pq, te);
gpr_free(te);
expected_size--;
GPR_ASSERT(expected_size == pq.timer_count);
GPR_ASSERT(pq.timer_count * 2 <= pq.timer_capacity);
if (pq.timer_count >= 8) {
GPR_ASSERT(pq.timer_capacity <= pq.timer_count * 4);
} else {
GPR_ASSERT(16 <= pq.timer_capacity);
}
check_valid(&pq);
}
GPR_ASSERT(0 == pq.timer_count);
GPR_ASSERT(pq.timer_capacity >= 16 && pq.timer_capacity < 32);
grpc_timer_heap_destroy(&pq);
}
示例14: test2
static void test2(void) {
gpr_log(GPR_INFO, "test2");
grpc_timer_heap pq;
static const size_t elems_size = 1000;
elem_struct *elems = gpr_malloc(elems_size * sizeof(elem_struct));
size_t num_inserted = 0;
grpc_timer_heap_init(&pq);
memset(elems, 0, elems_size);
for (size_t round = 0; round < 10000; round++) {
int r = rand() % 1000;
if (r <= 550) {
/* 55% of the time we try to add something */
elem_struct *el = search_elems(elems, GPR_ARRAY_SIZE(elems), false);
if (el != NULL) {
el->elem.deadline = random_deadline();
grpc_timer_heap_add(&pq, &el->elem);
el->inserted = true;
num_inserted++;
check_valid(&pq);
}
} else if (r <= 650) {
/* 10% of the time we try to remove something */
elem_struct *el = search_elems(elems, GPR_ARRAY_SIZE(elems), true);
if (el != NULL) {
grpc_timer_heap_remove(&pq, &el->elem);
el->inserted = false;
num_inserted--;
check_valid(&pq);
}
} else {
/* the remaining times we pop */
if (num_inserted > 0) {
grpc_timer *top = grpc_timer_heap_top(&pq);
grpc_timer_heap_pop(&pq);
for (size_t i = 0; i < elems_size; i++) {
if (top == &elems[i].elem) {
GPR_ASSERT(elems[i].inserted);
elems[i].inserted = false;
}
}
num_inserted--;
check_valid(&pq);
}
}
if (num_inserted) {
gpr_timespec *min_deadline = NULL;
for (size_t i = 0; i < elems_size; i++) {
if (elems[i].inserted) {
if (min_deadline == NULL) {
min_deadline = &elems[i].elem.deadline;
} else {
if (gpr_time_cmp(elems[i].elem.deadline, *min_deadline) < 0) {
min_deadline = &elems[i].elem.deadline;
}
}
}
}
GPR_ASSERT(
0 == gpr_time_cmp(grpc_timer_heap_top(&pq)->deadline, *min_deadline));
}
}
grpc_timer_heap_destroy(&pq);
gpr_free(elems);
}
示例15: test_early_server_shutdown_finishes_inflight_calls
static void test_early_server_shutdown_finishes_inflight_calls(
grpc_end2end_test_config config) {
grpc_call *c;
grpc_call *s;
gpr_timespec deadline = five_seconds_time();
grpc_end2end_test_fixture f = begin_test(config, "test_early_server_shutdown_finishes_inflight_calls", NULL, NULL);
cq_verifier *v_client = cq_verifier_create(f.client_cq);
cq_verifier *v_server = cq_verifier_create(f.server_cq);
grpc_op ops[6];
grpc_op *op;
grpc_metadata_array initial_metadata_recv;
grpc_metadata_array trailing_metadata_recv;
grpc_metadata_array request_metadata_recv;
grpc_call_details call_details;
grpc_status_code status;
char *details = NULL;
size_t details_capacity = 0;
int was_cancelled = 2;
c = grpc_channel_create_call(f.client, f.client_cq, "/foo",
"foo.test.google.fr", deadline);
GPR_ASSERT(c);
grpc_metadata_array_init(&initial_metadata_recv);
grpc_metadata_array_init(&trailing_metadata_recv);
grpc_metadata_array_init(&request_metadata_recv);
grpc_call_details_init(&call_details);
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op->data.send_initial_metadata.metadata = NULL;
op++;
op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
op++;
op->op = GRPC_OP_RECV_INITIAL_METADATA;
op->data.recv_initial_metadata = &initial_metadata_recv;
op++;
op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
op->data.recv_status_on_client.status = &status;
op->data.recv_status_on_client.status_details = &details;
op->data.recv_status_on_client.status_details_capacity = &details_capacity;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(c, ops, op - ops, tag(1)));
GPR_ASSERT(GRPC_CALL_OK ==
grpc_server_request_call(f.server, &s, &call_details,
&request_metadata_recv, f.server_cq,
f.server_cq, tag(101)));
cq_expect_completion(v_server, tag(101), 1);
cq_verify(v_server);
/* shutdown and destroy the server */
grpc_server_shutdown_and_notify(f.server, tag(0xdead));
cq_verify_empty(v_server);
op = ops;
op->op = GRPC_OP_SEND_INITIAL_METADATA;
op->data.send_initial_metadata.count = 0;
op++;
op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
op->data.send_status_from_server.trailing_metadata_count = 0;
op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
op->data.send_status_from_server.status_details = "xyz";
op++;
op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
op->data.recv_close_on_server.cancelled = &was_cancelled;
op++;
GPR_ASSERT(GRPC_CALL_OK == grpc_call_start_batch(s, ops, op - ops, tag(102)));
cq_expect_completion(v_server, tag(102), 1);
cq_verify(v_server);
grpc_call_destroy(s);
cq_expect_completion(v_server, tag(0xdead), 1);
cq_verify(v_server);
cq_expect_completion(v_client, tag(1), 1);
cq_verify(v_client);
GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
GPR_ASSERT(0 == strcmp(call_details.method, "/foo"));
GPR_ASSERT(0 == strcmp(call_details.host, "foo.test.google.fr"));
GPR_ASSERT(was_cancelled == 0);
gpr_free(details);
grpc_metadata_array_destroy(&initial_metadata_recv);
grpc_metadata_array_destroy(&trailing_metadata_recv);
grpc_metadata_array_destroy(&request_metadata_recv);
grpc_call_details_destroy(&call_details);
grpc_call_destroy(c);
cq_verifier_destroy(v_client);
cq_verifier_destroy(v_server);
end_test(&f);
config.tear_down_data(&f);
}