本文整理汇总了C++中message_ptr::header方法的典型用法代码示例。如果您正苦于以下问题:C++ message_ptr::header方法的具体用法?C++ message_ptr::header怎么用?C++ message_ptr::header使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类message_ptr
的用法示例。
在下文中一共展示了message_ptr::header方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: task
rpc_request_task::rpc_request_task(message_ptr& request, service_node* node)
: task(task_code(request->header().local_rpc_code), request->header().client.hash, node),
_request(request)
{
dbg_dassert (TASK_TYPE_RPC_REQUEST == spec().type, "task type must be RPC_REQUEST, please use DEFINE_TASK_CODE_RPC to define the task code");
}
示例2: send
void sim_client_session::send(message_ptr& msg)
{
sim_network_provider* rnet = nullptr;
if (!s_switch[task_spec::get(msg->header().local_rpc_code)->rpc_call_channel].get(msg->header().to_address, rnet))
{
dwarn("cannot find destination node %s:%d in simulator",
msg->header().to_address.name.c_str(),
static_cast<int>(msg->header().to_address.port)
);
return;
}
auto server_session = rnet->get_server_session(_net.address());
if (nullptr == server_session)
{
rpc_client_session_ptr cptr = this;
server_session.reset(new sim_server_session(*rnet, _net.address(), cptr));
rnet->on_server_session_accepted(server_session);
}
message_ptr recv_msg(new message(msg->writer().get_buffer()));
recv_msg->header().from_address = msg->header().from_address;
recv_msg->header().to_address = msg->header().to_address;
server_session->on_recv_request(recv_msg,
recv_msg->header().from_address == recv_msg->header().to_address ?
0 : rnet->net_delay_milliseconds()
);
}
示例3: on_recv_request
void rpc_server_session::on_recv_request(message_ptr& msg, int delay_ms)
{
msg->header().from_address = remote_address();
msg->header().from_address.port = msg->header().client.port;
msg->header().to_address = _net.address();
msg->server_session().reset(this);
return _net.on_recv_request(msg, delay_ms);
}
示例4: on_recv_reply
bool rpc_client_session::on_recv_reply(uint64_t key, message_ptr& reply, int delay_ms)
{
if (reply != nullptr)
{
reply->header().from_address = remote_address();
reply->header().to_address = _net.address();
}
return _matcher->on_recv_reply(key, reply, delay_ms);
}
示例5: write
void client_net_io::write(message_ptr& msg)
{
_sq.enqueue(msg, task_spec::get(msg->header().local_rpc_code)->priority);
// not connected
if (SS_CONNECTED != _state)
{
return;
}
do_write();
}
示例6: response_client_message
void replica::response_client_message(message_ptr& request, error_code error, decree d/* = invalid_decree*/)
{
if (nullptr == request)
return;
message_ptr resp = request->create_response();
resp->writer().write(error);
dassert(error != ERR_OK, "");
dinfo("handle replication request with rpc_id = %016llx failed, err = %s",
request->header().rpc_id, error.to_string());
rpc::reply(resp);
}
示例7: call
void connection_oriented_network::call(message_ptr& request, rpc_response_task_ptr& call)
{
rpc_client_session_ptr client = nullptr;
end_point& to = request->header().to_address;
bool new_client = false;
// TODO: thread-local client ptr cache
{
utils::auto_read_lock l(_clients_lock);
auto it = _clients.find(to);
if (it != _clients.end())
{
client = it->second;
}
}
if (nullptr == client.get())
{
utils::auto_write_lock l(_clients_lock);
auto it = _clients.find(to);
if (it != _clients.end())
{
client = it->second;
}
else
{
client = create_client_session(to);
_clients.insert(client_sessions::value_type(to, client));
new_client = true;
}
}
// init connection if necessary
if (new_client)
client->connect();
// rpc call
client->call(request, call);
}
示例8: query_partition_configuration_reply
void replication_app_client_base::query_partition_configuration_reply(error_code err, message_ptr& request, message_ptr& response, int pidx)
{
if (!err)
{
configuration_query_by_index_response resp;
unmarshall(response->reader(), resp);
if (resp.err == ERR_SUCCESS)
{
zauto_write_lock l(_config_lock);
_last_contact_point = response->header().from_address;
if (resp.partitions.size() > 0)
{
if (_app_id != -1 && _app_id != resp.partitions[0].gpid.app_id)
{
dassert(false, "app id is changed (mostly the app was removed and created with the same name), local Vs remote: %u vs %u ",
_app_id, resp.partitions[0].gpid.app_id);
}
_app_id = resp.partitions[0].gpid.app_id;
_app_partition_count = resp.partition_count;
}
for (auto it = resp.partitions.begin(); it != resp.partitions.end(); it++)
{
partition_configuration& new_config = *it;
auto it2 = _config_cache.find(new_config.gpid.pidx);
if (it2 == _config_cache.end())
{
_config_cache[new_config.gpid.pidx] = new_config;
}
else if (it2->second.ballot < new_config.ballot)
{
it2->second = new_config;
}
}
}
}
// send pending client msgs
partition_context* pc = nullptr;
{
zauto_lock l(_requests_lock);
auto it = _pending_requests.find(pidx);
if (it != _pending_requests.end())
{
pc = it->second;
_pending_requests.erase(pidx);
}
}
if (pc != nullptr)
{
for (auto& req : pc->requests)
{
call(req, false);
}
pc->requests.clear();
delete pc;
}
}
示例9: on_prepare_reply
void replica::on_prepare_reply(std::pair<mutation_ptr, partition_status> pr, int err, message_ptr& request, message_ptr& reply)
{
check_hashed_access();
mutation_ptr& mu = pr.first;
partition_status targetStatus = pr.second;
// skip callback for old mutations
if (mu->data.header.ballot < get_ballot() || PS_PRIMARY != status())
return;
dassert (mu->data.header.ballot == get_ballot(), "");
end_point node = request->header().to_address;
partition_status st = _primary_states.GetNodeStatus(node);
// handle reply
prepare_ack resp;
// handle error
if (err)
{
resp.err = err;
}
else
{
unmarshall(reply, resp);
ddebug(
"%s: mutation %s on_prepare_reply from %s:%d",
name(), mu->name(),
node.name.c_str(), static_cast<int>(node.port)
);
}
if (resp.err == ERR_SUCCESS)
{
dassert (resp.ballot == get_ballot(), "");
dassert (resp.decree == mu->data.header.decree, "");
switch (targetStatus)
{
case PS_SECONDARY:
dassert (_primary_states.check_exist(node, PS_SECONDARY), "");
dassert (mu->left_secondary_ack_count() > 0, "");
if (0 == mu->decrease_left_secondary_ack_count())
{
do_possible_commit_on_primary(mu);
}
break;
case PS_POTENTIAL_SECONDARY:
dassert (mu->left_potential_secondary_ack_count() > 0, "");
if (0 == mu->decrease_left_potential_secondary_ack_count())
{
do_possible_commit_on_primary(mu);
}
break;
default:
ddebug(
"%s: mutation %s prepare ack skipped coz the node is now inactive", name(), mu->name()
);
break;
}
}
// failure handling
else
{
// note targetStatus and (curent) status may diff
if (targetStatus == PS_POTENTIAL_SECONDARY)
{
dassert (mu->left_potential_secondary_ack_count() > 0, "");
if (0 == mu->decrease_left_potential_secondary_ack_count())
{
do_possible_commit_on_primary(mu);
}
}
handle_remote_failure(st, node, resp.err);
}
}