本文整理汇总了C++中session::write_data方法的典型用法代码示例。如果您正苦于以下问题:C++ session::write_data方法的具体用法?C++ session::write_data怎么用?C++ session::write_data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类session
的用法示例。
在下文中一共展示了session::write_data方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: test_cache_timestamp
static void test_cache_timestamp(session &sess)
{
argument_data data("this is a timestamp test");
key k("this is a timestamp test key");
sess.transform(k);
dnet_io_control ctl;
memset(&ctl, 0, sizeof(ctl));
ctl.data = data.data();
dnet_current_time(&ctl.io.timestamp);
ctl.io.flags = DNET_IO_FLAGS_CACHE;
ctl.io.start = 5;
ctl.io.size = data.size();
memcpy(&ctl.id, &k.id(), sizeof(dnet_id));
ctl.fd = -1;
ELLIPTICS_REQUIRE(write_result, sess.write_data(ctl));
sleep(ctl.io.start + 2);
ELLIPTICS_REQUIRE(read_result, sess.read_data(k, 0, 0));
auto io = read_result.get_one().io_attribute();
BOOST_REQUIRE_EQUAL(io->timestamp.tsec, ctl.io.timestamp.tsec);
BOOST_REQUIRE_EQUAL(io->timestamp.tnsec, ctl.io.timestamp.tnsec);
}
示例2: test_oplock
/*
* After writing of a key to cache, keys data will be synced to disk cache_sync_timeout seconds later.
* Before syncing a key, dnet_oplock() taken for this key. After syncing a key, key's oplock released.
*
* Following test checks this mechanics by calling write_data(key, data) multiple times with the same data,
* then writing to cache by calling write_cache(key, cache_data) cache data, waiting cache_sync_timeout seconds
* until cache is synced back to disk (backend), thereby taking oplock. Then called write_data(key, result_data).
* If last write_data() operation timeouted, then dnet_opunlock() (after cache sync) is not properly realeased key's oplock.
*/
static void test_oplock(session &sess)
{
const key id(std::string("oplock_key"));
const std::string data = "some_data";
const std::string cache_data = "cache_data";
const std::string result_data = "result_data";
const size_t num_writes = 10;
std::unique_ptr<async_write_result[]> results(new async_write_result[num_writes]);
for (size_t i = 0; i < num_writes; ++i) {
results[i] = std::move(sess.write_data(id, data, 0));
}
for (size_t i = 0; i < num_writes; ++i) {
results[i].wait();
}
ELLIPTICS_COMPARE_REQUIRE(read_data_result, sess.read_data(id, 0, 0), data);
ELLIPTICS_REQUIRE(async_cache_write, sess.write_cache(id, cache_data, 0));
sleep(cache_sync_timeout + 1);
ELLIPTICS_COMPARE_REQUIRE(read_cache_result, sess.read_data(id, 0, 0), cache_data);
ELLIPTICS_REQUIRE(async_write, sess.write_data(id, result_data, 0));
ELLIPTICS_COMPARE_REQUIRE(read_result, sess.read_data(id, 0, 0), result_data);
}
示例3: test_write_order_execution
/*
* Multiple writes with same key must be processed in the same order as
* they were initiated by client.
*
* Following test checks this mechanics by calling write_cas() with data containing
* counter that is incremented after every write_cas() and checking that previosly stored
* counter is one unit less than current counter. Also this test writes multiple different
* keys (with repetitions) in different order, thereby modelling real workload case.
*/
static void test_write_order_execution(session &sess)
{
const int num_write_repetitions = 5;
const int num_different_keys = 10;
std::vector<std::pair<key, int>> keys;
for (int i = 0; i < num_different_keys; ++i) {
key id(std::to_string(static_cast<unsigned long long>(i)));
for (int j = 0; j < num_write_repetitions; ++j) {
keys.push_back(std::make_pair(id, i));
}
}
std::unique_ptr<async_write_result[]> results(new async_write_result[keys.size()]);
dnet_id old_csum;
const int num_iterations = 30;
for (int i = 0; i < num_iterations; ++i) {
// every key is associated with counter, which is initialized by zero
std::vector<int> write_counter(num_different_keys, 0);
std::random_shuffle(keys.begin(), keys.end());
for (size_t j = 0; j < keys.size(); ++j) {
// increment counter associated with key identified by key_id
const int key_id = keys[j].second;
const int new_value = write_counter[key_id]++;
if (new_value > 0) {
const int prev_value = new_value - 1;
memset(&old_csum, 0, sizeof(old_csum));
sess.transform(std::to_string(static_cast<unsigned long long>(prev_value)), old_csum);
results[j] = std::move(sess.write_cas(keys[j].first, std::to_string(static_cast<unsigned long long>(new_value)), old_csum, 0));
} else {
// first write
results[j] = std::move(sess.write_data(keys[j].first, std::to_string(static_cast<unsigned long long>(new_value)), 0));
}
}
for (size_t j = 0; j < keys.size(); ++j) {
results[j].wait();
const int err = results[j].error().code();
BOOST_REQUIRE_MESSAGE(err == 0,
"write_cas() failed (err=" + std::to_string(static_cast<unsigned long long>(err)) + "): "
"multiple consecutive writes are executed out-of-order "
"or overlapped. Oplock mechanism of backend's request queue is broken.");
}
}
}
示例4: test_backend_weights
// Writing of keys to all groups updates backend weights for every backend they
// were written. Writes to slow backend leads to significant reduction of this
// backend weigth comparing to faster ones.
// read_data() uses backend weights to choose fastest group via dnet_mix_states().
//
// Following test checks this mechanics by reading of previously written keys and
// checking read distribution among backends. Slow backend simulated by setting artificial delay.
// Expected outcome should be that reads would be rarely sent to that slow backend.
//
// We define "rarely" as no more than 1% of total reads. This value was empirically found.
static void test_backend_weights(session &sess)
{
// set backends delay to simulate slow backends i/o behaviour for particular group
set_backends_delay_for_group(sess, slow_group_id, backend_delay);
const int num_keys = 10;
for (int i = 0; i < num_keys; ++i) {
const key id = std::string("key_") + std::to_string(static_cast<long long>(i));
const std::string data = "some_data";
ELLIPTICS_REQUIRE(async_write, sess.write_data(id, data, 0));
}
const int num_reads = 1000;
int num_slow_group_reads = 0;
for (int i = 0; i < num_reads; ++i) {
const key id = std::string("key_") + std::to_string(static_cast<long long>(i % num_keys));
auto async_result = sess.read_data(id, 0, 0);
async_result.wait();
read_result_entry read_result;
async_result.get(read_result);
const dnet_cmd *cmd = read_result.command();
const int group_id = cmd->id.group_id;
if ( group_id == slow_group_id )
++num_slow_group_reads;
}
const int max_reads_from_slow_group = 10;
BOOST_REQUIRE_MESSAGE(num_slow_group_reads < max_reads_from_slow_group,
"Too much reads from slow group (it means that backend weights are not working or backend hardware is extremely slow): "
"num_slow_group_reads: " + std::to_string(static_cast<long long>(num_slow_group_reads)) +
", max_reads_from_slow_group: " + std::to_string(static_cast<long long>(max_reads_from_slow_group)));
set_backends_delay_for_group(sess, slow_group_id, 0);
}