本文整理汇总了C++中Future::get方法的典型用法代码示例。如果您正苦于以下问题:C++ Future::get方法的具体用法?C++ Future::get怎么用?C++ Future::get使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Future
的用法示例。
在下文中一共展示了Future::get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: containerizer
// This test verifies that the launch of new executor will result in
// an unschedule of the framework work directory created by an old
// executor.
TEST_F(GarbageCollectorIntegrationTest, Unschedule)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Future<SlaveRegisteredMessage> slaveRegistered =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
ExecutorInfo executor1; // Bug in gcc 4.1.*, must assign on next line.
executor1 = CREATE_EXECUTOR_INFO("executor-1", "exit 1");
ExecutorInfo executor2; // Bug in gcc 4.1.*, must assign on next line.
executor2 = CREATE_EXECUTOR_INFO("executor-2", "exit 1");
MockExecutor exec1(executor1.executor_id());
MockExecutor exec2(executor2.executor_id());
hashmap<ExecutorID, Executor*> execs;
execs[executor1.executor_id()] = &exec1;
execs[executor2.executor_id()] = &exec2;
TestContainerizer containerizer(execs);
slave::Flags flags = CreateSlaveFlags();
Try<PID<Slave> > slave = StartSlave(&containerizer, flags);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegistered);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(_, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Resources resources = Resources::parse(flags.resources.get()).get();
double cpus = resources.get<Value::Scalar>("cpus").get().value();
double mem = resources.get<Value::Scalar>("mem").get().value();
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(LaunchTasks(executor1, 1, cpus, mem, "*"));
EXPECT_CALL(exec1, registered(_, _, _, _));
EXPECT_CALL(exec1, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(_, _))
.WillOnce(FutureArg<1>(&status));
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
// TODO(benh/vinod): Would've been great to match the dispatch
// against arguments here.
// NOTE: Since Google Mock selects the last matching expectation
// that is still active, the order of (un)schedule expectations
// below are the reverse of the actual (un)schedule call order.
// Schedule framework work directory.
Future<Nothing> scheduleFrameworkWork =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
// Schedule top level executor work directory.
Future<Nothing> scheduleExecutorWork =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
// Schedule executor run work directory.
Future<Nothing> scheduleExecutorRunWork =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::schedule);
// Unschedule framework work directory.
Future<Nothing> unscheduleFrameworkWork =
FUTURE_DISPATCH(_, &GarbageCollectorProcess::unschedule);
// We ask the isolator to kill the first executor below.
EXPECT_CALL(exec1, shutdown(_))
.Times(AtMost(1));
EXPECT_CALL(sched, statusUpdate(_, _))
.Times(AtMost(2)); // Once for a TASK_LOST then once for TASK_RUNNING.
// We use the killed executor/tasks resources to run another task.
EXPECT_CALL(sched, resourceOffers(_, _))
.WillOnce(LaunchTasks(executor2, 1, cpus, mem, "*"));
EXPECT_CALL(exec2, registered(_, _, _, _));
//.........这里部分代码省略.........
示例2: Group
// Tests whether a slave correctly detects the new master when its
// ZooKeeper session is expired and a new master is elected before the
// slave reconnects with ZooKeeper.
TEST_F(ZooKeeperMasterContenderDetectorTest,
MasterDetectorExpireSlaveZKSessionNewMaster)
{
Try<zookeeper::URL> url = zookeeper::URL::parse(
"zk://" + server->connectString() + "/mesos");
ASSERT_SOME(url);
// Simulate a leading master.
Owned<zookeeper::Group> leaderGroup(
new Group(url.get(), MASTER_CONTENDER_ZK_SESSION_TIMEOUT));
// 1. Simulate a leading contender.
ZooKeeperMasterContender leaderContender(leaderGroup);
ZooKeeperMasterDetector leaderDetector(leaderGroup);
PID<Master> pid;
pid.node.ip = 10000000;
pid.node.port = 10000;
MasterInfo leader = internal::protobuf::createMasterInfo(pid);
leaderContender.initialize(leader);
Future<Future<Nothing> > contended = leaderContender.contend();
AWAIT_READY(contended);
Future<Option<MasterInfo> > detected = leaderDetector.detect(None());
AWAIT_READY(detected);
EXPECT_SOME_EQ(leader, detected.get());
// 2. Simulate a non-leading contender.
Owned<zookeeper::Group> followerGroup(
new Group(url.get(), MASTER_CONTENDER_ZK_SESSION_TIMEOUT));
ZooKeeperMasterContender followerContender(followerGroup);
ZooKeeperMasterDetector followerDetector(followerGroup);
PID<Master> pid2;
pid2.node.ip = 10000001;
pid2.node.port = 10001;
MasterInfo follower = internal::protobuf::createMasterInfo(pid2);
followerContender.initialize(follower);
contended = followerContender.contend();
AWAIT_READY(contended);
detected = followerDetector.detect(None());
EXPECT_SOME_EQ(leader, detected.get());
// 3. Simulate a non-contender.
Owned<zookeeper::Group> nonContenderGroup(
new Group(url.get(), MASTER_DETECTOR_ZK_SESSION_TIMEOUT));
ZooKeeperMasterDetector nonContenderDetector(nonContenderGroup);
detected = nonContenderDetector.detect();
EXPECT_SOME_EQ(leader, detected.get());
detected = nonContenderDetector.detect(leader);
// Now expire the slave's and leading master's zk sessions.
// NOTE: Here we assume that slave stays disconnected from the ZK
// when the leading master loses its session.
Future<Option<int64_t> > slaveSession = nonContenderGroup->session();
AWAIT_READY(slaveSession);
Future<Option<int64_t> > masterSession = leaderGroup->session();
AWAIT_READY(masterSession);
server->expireSession(slaveSession.get().get());
server->expireSession(masterSession.get().get());
// Wait for session expiration and the detector will first receive
// a "no master detected" event.
AWAIT_READY(detected);
EXPECT_NONE(detected.get());
// nonContenderDetector can now re-detect the new master.
detected = nonContenderDetector.detect(detected.get());
AWAIT_READY(detected);
EXPECT_SOME_EQ(follower, detected.get());
}
示例3: exec
// This test does not set any Accept header for the subscribe call.
// The default response media type should be "application/json" in
// this case.
TEST_P(ExecutorHttpApiTest, NoAcceptHeader)
{
Try<PID<Master>> master = StartMaster();
ASSERT_SOME(master);
ExecutorID executorId = DEFAULT_EXECUTOR_ID;
MockExecutor exec(executorId);
Try<PID<Slave>> slave = StartSlave(&exec);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers));
Future<Nothing> statusUpdate;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureSatisfy(&statusUpdate));
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
ASSERT_EQ(1u, offers.get().size());
EXPECT_CALL(exec, registered(_, _, _, _))
.Times(1);
EXPECT_CALL(exec, launchTask(_, _))
.WillOnce(SendStatusUpdateFromTask(TASK_RUNNING));
TaskInfo taskInfo = createTask(offers.get()[0], "", executorId);
driver.launchTasks(offers.get()[0].id(), {taskInfo});
// Wait until status update is received on the scheduler before sending
// an executor subscribe request.
AWAIT_READY(statusUpdate);
// Only subscribe needs to 'Accept' JSON or protobuf.
Call call;
call.mutable_framework_id()->CopyFrom(evolve(frameworkId.get()));
call.mutable_executor_id()->CopyFrom(evolve(executorId));
call.set_type(Call::SUBSCRIBE);
call.mutable_subscribe();
// Retrieve the parameter passed as content type to this test.
const ContentType contentType = GetParam();
// No 'Accept' header leads to all media types considered
// acceptable. JSON will be chosen by default.
process::http::Headers headers;
Future<Response> response = process::http::streaming::post(
slave.get(),
"api/v1/executor",
headers,
serialize(contentType, call),
stringify(contentType));
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
EXPECT_SOME_EQ(APPLICATION_JSON, response.get().headers.get("Content-Type"));
Shutdown();
}
示例4: _collect
void PosixDiskIsolatorProcess::_collect(
const ContainerID& containerId,
const string& path,
const Future<Bytes>& future)
{
if (future.isDiscarded()) {
LOG(INFO) << "Checking disk usage at '" << path << "' for container "
<< containerId << " has been cancelled";
} else if (future.isFailed()) {
LOG(ERROR) << "Checking disk usage at '" << path << "' for container "
<< containerId << " has failed: " << future.failure();
}
if (!infos.contains(containerId)) {
// The container might have just been destroyed.
return;
}
const Owned<Info>& info = infos[containerId];
if (!info->paths.contains(path)) {
// The path might have just been removed from this container's
// resources.
return;
}
// Check if the disk usage exceeds the quota. If yes, report the
// limitation. We keep collecting the disk usage for 'path' by
// initiating another round of disk usage check. The check will be
// throttled by DiskUsageCollector.
if (future.isReady()) {
// Save the last disk usage.
info->paths[path].lastUsage = future.get();
// We need to ignore the quota enforcement check for MOUNT type
// disk resources because its quota will be enforced by the
// underlying filesystem.
bool isDiskSourceMount = false;
foreach (const Resource& resource, info->paths[path].quota) {
if (resource.has_disk() &&
resource.disk().has_source() &&
resource.disk().source().type() ==
Resource::DiskInfo::Source::MOUNT) {
isDiskSourceMount = true;
}
}
if (flags.enforce_container_disk_quota && !isDiskSourceMount) {
Option<Bytes> quota = info->paths[path].quota.disk();
CHECK_SOME(quota);
if (future.get() > quota.get()) {
info->limitation.set(
protobuf::slave::createContainerLimitation(
Resources(info->paths[path].quota),
"Disk usage (" + stringify(future.get()) +
") exceeds quota (" + stringify(quota.get()) + ")",
TaskStatus::REASON_CONTAINER_LIMITATION_DISK));
}
}
}
示例5: sessionTimeout
// Tests that detectors do not fail when we reach our ZooKeeper
// session timeout.
TEST_F(ZooKeeperMasterContenderDetectorTest, MasterDetectorTimedoutSession)
{
// Use an arbitrary timeout value.
Duration sessionTimeout(Seconds(10));
Try<zookeeper::URL> url = zookeeper::URL::parse(
"zk://" + server->connectString() + "/mesos");
ASSERT_SOME(url);
Owned<zookeeper::Group> leaderGroup(new Group(url.get(), sessionTimeout));
// First we bring up three master contender/detector:
// 1. A leading contender.
// 2. A non-leading contender.
// 3. A non-contender (detector).
// 1. Simulate a leading contender.
ZooKeeperMasterContender leaderContender(leaderGroup);
PID<Master> pid;
pid.node.ip = 10000000;
pid.node.port = 10000;
MasterInfo leader = internal::protobuf::createMasterInfo(pid);
leaderContender.initialize(leader);
Future<Future<Nothing> > contended = leaderContender.contend();
AWAIT_READY(contended);
Future<Nothing> leaderLostCandidacy = contended.get();
ZooKeeperMasterDetector leaderDetector(leaderGroup);
Future<Option<MasterInfo> > detected = leaderDetector.detect();
AWAIT_READY(detected);
EXPECT_SOME_EQ(leader, detected.get());
// 2. Simulate a non-leading contender.
Owned<zookeeper::Group> followerGroup(new Group(url.get(), sessionTimeout));
ZooKeeperMasterContender followerContender(followerGroup);
PID<Master> pid2;
pid2.node.ip = 10000001;
pid2.node.port = 10001;
MasterInfo follower = internal::protobuf::createMasterInfo(pid2);
followerContender.initialize(follower);
contended = followerContender.contend();
AWAIT_READY(contended);
Future<Nothing> followerLostCandidacy = contended.get();
ZooKeeperMasterDetector followerDetector(followerGroup);
detected = followerDetector.detect();
AWAIT_READY(detected);
EXPECT_SOME_EQ(leader, detected.get());
// 3. Simulate a non-contender.
Owned<zookeeper::Group> nonContenderGroup(
new Group(url.get(), sessionTimeout));
ZooKeeperMasterDetector nonContenderDetector(nonContenderGroup);
detected = nonContenderDetector.detect();
EXPECT_SOME_EQ(leader, detected.get());
// Expecting the reconnecting event after we shut down the ZK.
Future<Nothing> leaderReconnecting = FUTURE_DISPATCH(
leaderGroup->process->self(),
&GroupProcess::reconnecting);
Future<Nothing> followerReconnecting = FUTURE_DISPATCH(
followerGroup->process->self(),
&GroupProcess::reconnecting);
Future<Nothing> nonContenderReconnecting = FUTURE_DISPATCH(
nonContenderGroup->process->self(),
&GroupProcess::reconnecting);
server->shutdownNetwork();
AWAIT_READY(leaderReconnecting);
AWAIT_READY(followerReconnecting);
AWAIT_READY(nonContenderReconnecting);
// Now the detectors re-detect.
Future<Option<MasterInfo> > leaderDetected =
leaderDetector.detect(leader);
Future<Option<MasterInfo> > followerDetected =
followerDetector.detect(leader);
Future<Option<MasterInfo> > nonContenderDetected =
nonContenderDetector.detect(leader);
Clock::pause();
// We may need to advance multiple times because we could have
// advanced the clock before the timer in Group starts.
//.........这里部分代码省略.........
示例6: driver
TEST_F(ExceptionTest, DisallowSchedulerCallbacksOnAbort)
{
Try<PID<Master> > master = StartMaster();
ASSERT_SOME(master);
Try<PID<Slave> > slave = StartSlave();
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _))
.Times(1);
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return());
Future<process::Message> message =
FUTURE_MESSAGE(Eq(FrameworkRegisteredMessage().GetTypeName()), _, _);
driver.start();
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
// None of these callbacks should be invoked.
EXPECT_CALL(sched, offerRescinded(&driver, _))
.Times(0);
EXPECT_CALL(sched, statusUpdate(&driver, _))
.Times(0);
EXPECT_CALL(sched, frameworkMessage(&driver, _, _, _))
.Times(0);
EXPECT_CALL(sched, slaveLost(&driver, _))
.Times(0);
EXPECT_CALL(sched, error(&driver, _))
.Times(0);
ASSERT_EQ(DRIVER_ABORTED, driver.abort());
Future<RescindResourceOfferMessage> rescindMsg =
FUTURE_PROTOBUF(RescindResourceOfferMessage(), _, _);
// Simulate a message from master to the scheduler.
RescindResourceOfferMessage rescindMessage;
rescindMessage.mutable_offer_id()->MergeFrom(offers.get()[0].id());
process::post(message.get().to, rescindMessage);
AWAIT_READY(rescindMsg);
Future<UnregisterFrameworkMessage> unregisterMsg =
FUTURE_PROTOBUF(UnregisterFrameworkMessage(), _, _);
driver.stop();
//Ensures reception of RescindResourceOfferMessage.
AWAIT_READY(unregisterMsg);
Shutdown();
}
示例7: stringify
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
// Create provisioner.
slave::Flags flags;
flags.image_providers = "APPC";
flags.appc_store_dir = path::join(os::getcwd(), "store");
flags.image_provisioner_backend = "bind";
flags.work_dir = "work_dir";
Fetcher fetcher;
Try<Owned<Provisioner>> provisioner = Provisioner::create(flags, &fetcher);
ASSERT_SOME(provisioner);
// Create a simple image in the store:
// <store>
// |--images
// |--<id>
// |--manifest
// |--rootfs/tmp/test
JSON::Value manifest = JSON::parse(
"{"
" \"acKind\": \"ImageManifest\","
" \"acVersion\": \"0.6.1\","
" \"name\": \"foo.com/bar\","
" \"labels\": ["
" {"
" \"name\": \"version\","
" \"value\": \"1.0.0\""
" },"
" {"
" \"name\": \"arch\","
" \"value\": \"amd64\""
" },"
" {"
" \"name\": \"os\","
" \"value\": \"linux\""
" }"
" ],"
" \"annotations\": ["
" {"
" \"name\": \"created\","
" \"value\": \"1438983392\""
" }"
" ]"
"}").get();
// The 'imageId' below has the correct format but it's not computed
// by hashing the tarball of the image. It's OK here as we assume
// the images under 'images' have passed such check when they are
// downloaded and validated.
string imageId =
"sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e"
"797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0";
string imagePath = path::join(flags.appc_store_dir, "images", imageId);
ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp")));
ASSERT_SOME(
os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test"));
ASSERT_SOME(
os::write(path::join(imagePath, "manifest"), stringify(manifest)));
// Recover. This is when the image in the store is loaded.
AWAIT_READY(provisioner.get()->recover({}, {}));
// Simulate a task that requires an image.
Image image;
image.mutable_appc()->set_name("foo.com/bar");
ContainerID containerId;
containerId.set_value("12345");
Future<string> rootfs = provisioner.get()->provision(containerId, image);
AWAIT_READY(rootfs);
string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);
string containerDir =
slave::provisioner::paths::getContainerDir(
provisionerDir,
containerId);
Try<hashmap<string, hashset<string>>> rootfses =
slave::provisioner::paths::listContainerRootfses(
provisionerDir,
containerId);
ASSERT_SOME(rootfses);
// Verify that the rootfs is successfully provisioned.
ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
Path(rootfs.get()).basename());
Future<bool> destroy = provisioner.get()->destroy(containerId);
AWAIT_READY(destroy);
//.........这里部分代码省略.........
示例8: createTestImage
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
// Create provisioner.
slave::Flags flags;
flags.image_providers = "APPC";
flags.appc_store_dir = path::join(os::getcwd(), "store");
flags.image_provisioner_backend = "bind";
flags.work_dir = "work_dir";
Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
ASSERT_SOME(provisioner);
Try<string> createImage = createTestImage(
flags.appc_store_dir,
getManifest());
ASSERT_SOME(createImage);
// Recover. This is when the image in the store is loaded.
AWAIT_READY(provisioner.get()->recover({}, {}));
// Simulate a task that requires an image.
Image image;
image.mutable_appc()->CopyFrom(getTestImage());
ContainerID containerId;
containerId.set_value("12345");
Future<slave::ProvisionInfo> provisionInfo =
provisioner.get()->provision(containerId, image);
AWAIT_READY(provisionInfo);
string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);
string containerDir =
slave::provisioner::paths::getContainerDir(
provisionerDir,
containerId);
Try<hashmap<string, hashset<string>>> rootfses =
slave::provisioner::paths::listContainerRootfses(
provisionerDir,
containerId);
ASSERT_SOME(rootfses);
// Verify that the rootfs is successfully provisioned.
ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
Path(provisionInfo.get().rootfs).basename());
Future<bool> destroy = provisioner.get()->destroy(containerId);
AWAIT_READY(destroy);
// One rootfs is destroyed.
EXPECT_TRUE(destroy.get());
// The container directory is successfully cleaned up.
EXPECT_FALSE(os::exists(containerDir));
}
示例9: monitor
TEST(MonitorTest, Statistics)
{
FrameworkID frameworkId;
frameworkId.set_value("framework");
ExecutorID executorId;
executorId.set_value("executor");
ExecutorInfo executorInfo;
executorInfo.mutable_executor_id()->CopyFrom(executorId);
executorInfo.mutable_framework_id()->CopyFrom(frameworkId);
executorInfo.set_name("name");
executorInfo.set_source("source");
ResourceStatistics statistics;
statistics.set_cpus_nr_periods(100);
statistics.set_cpus_nr_throttled(2);
statistics.set_cpus_user_time_secs(4);
statistics.set_cpus_system_time_secs(1);
statistics.set_cpus_throttled_time_secs(0.5);
statistics.set_cpus_limit(1.0);
statistics.set_mem_file_bytes(0);
statistics.set_mem_anon_bytes(0);
statistics.set_mem_mapped_file_bytes(0);
statistics.set_mem_rss_bytes(1024);
statistics.set_mem_limit_bytes(2048);
statistics.set_timestamp(0);
ResourceMonitor monitor([=]() -> Future<ResourceUsage> {
Resources resources = Resources::parse("cpus:1;mem:2").get();
ResourceUsage usage;
ResourceUsage::Executor* executor = usage.add_executors();
executor->mutable_executor_info()->CopyFrom(executorInfo);
executor->mutable_allocated()->CopyFrom(resources);
executor->mutable_statistics()->CopyFrom(statistics);
return usage;
});
UPID upid("monitor", process::address());
Future<http::Response> response = http::get(upid, "statistics");
AWAIT_READY(response);
AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response);
AWAIT_EXPECT_RESPONSE_HEADER_EQ(
"application/json",
"Content-Type",
response);
JSON::Array expected;
JSON::Object usage;
usage.values["executor_id"] = "executor";
usage.values["executor_name"] = "name";
usage.values["framework_id"] = "framework";
usage.values["source"] = "source";
usage.values["statistics"] = JSON::Protobuf(statistics);
expected.values.push_back(usage);
Try<JSON::Array> result = JSON::parse<JSON::Array>(response.get().body);
ASSERT_SOME(result);
ASSERT_EQ(expected, result.get());
}
示例10: parse
// Abstracts the manifest accessor for the test fixture. This provides the
// ability for customizing manifests for fixtures.
virtual JSON::Value getManifest() const
{
return JSON::parse(
R"~(
{
"acKind": "ImageManifest",
"acVersion": "0.6.1",
"name": "foo.com/bar",
"labels": [
{
"name": "version",
"value": "1.0.0"
},
{
"name": "arch",
"value": "amd64"
},
{
"name": "os",
"value": "linux"
}
],
"annotations": [
{
"name": "created",
"value": "1438983392"
}
]
})~").get();
}
};
TEST_F(AppcStoreTest, Recover)
{
// Create store.
slave::Flags flags;
flags.appc_store_dir = path::join(os::getcwd(), "store");
Try<Owned<slave::Store>> store = Store::create(flags);
ASSERT_SOME(store);
Try<string> createImage = createTestImage(
flags.appc_store_dir,
getManifest());
ASSERT_SOME(createImage);
const string imagePath = createImage.get();
// Recover the image from disk.
AWAIT_READY(store.get()->recover());
Image image;
image.mutable_appc()->CopyFrom(getTestImage());
Future<slave::ImageInfo> ImageInfo = store.get()->get(image);
AWAIT_READY(ImageInfo);
EXPECT_EQ(1u, ImageInfo.get().layers.size());
ASSERT_SOME(os::realpath(imagePath));
EXPECT_EQ(
os::realpath(path::join(imagePath, "rootfs")).get(),
ImageInfo.get().layers.front());
}
示例11: driver
// This is an end-to-end test that verfies that the slave returns the
// correct ResourceUsage based on the currently running executors, and
// the values get from the statistics endpoint are as expected.
TEST_F(MonitorIntegrationTest, RunningExecutor)
{
Try<PID<Master>> master = StartMaster();
ASSERT_SOME(master);
Try<PID<Slave>> slave = StartSlave();
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
EXPECT_FALSE(offers.get().empty());
const Offer& offer = offers.get()[0];
// Launch a task and wait until it is in RUNNING status.
TaskInfo task = createTask(
offer.slave_id(),
Resources::parse("cpus:1;mem:32").get(),
"sleep 1000");
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
driver.launchTasks(offer.id(), {task});
AWAIT_READY(status);
EXPECT_EQ(task.task_id(), status.get().task_id());
EXPECT_EQ(TASK_RUNNING, status.get().state());
// Hit the statistics endpoint and expect the response contains the
// resource statistics for the running container.
UPID upid("monitor", process::address());
Future<http::Response> response = http::get(upid, "statistics");
AWAIT_READY(response);
AWAIT_EXPECT_RESPONSE_STATUS_EQ(http::OK().status, response);
AWAIT_EXPECT_RESPONSE_HEADER_EQ(
"application/json",
"Content-Type",
response);
// Verify that the statistics in the response contains the proper
// resource limits for the container.
Try<JSON::Value> value = JSON::parse(response.get().body);
ASSERT_SOME(value);
Try<JSON::Value> expected = JSON::parse(strings::format(
"[{"
"\"statistics\":{"
"\"cpus_limit\":%g,"
"\"mem_limit_bytes\":%lu"
"}"
"}]",
1 + slave::DEFAULT_EXECUTOR_CPUS,
(Megabytes(32) + slave::DEFAULT_EXECUTOR_MEM).bytes()).get());
ASSERT_SOME(expected);
EXPECT_TRUE(value.get().contains(expected.get()));
driver.stop();
driver.join();
Shutdown();
}
示例12: upid
TEST(MetricsTest, SnapshotTimeout)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
UPID upid("metrics", process::address());
Clock::pause();
// Advance the clock to avoid rate limit.
Clock::advance(Seconds(1));
// Ensure the timeout parameter is validated.
AWAIT_EXPECT_RESPONSE_STATUS_EQ(
BadRequest().status,
http::get(upid, "snapshot", "timeout=foobar"));
// Advance the clock to avoid rate limit.
Clock::advance(Seconds(1));
// Add gauges and a counter.
GaugeProcess process;
PID<GaugeProcess> pid = spawn(&process);
ASSERT_TRUE(pid);
Gauge gauge("test/gauge", defer(pid, &GaugeProcess::get));
Gauge gaugeFail("test/gauge_fail", defer(pid, &GaugeProcess::fail));
Gauge gaugeTimeout("test/gauge_timeout", defer(pid, &GaugeProcess::pending));
Counter counter("test/counter");
AWAIT_READY(metrics::add(gauge));
AWAIT_READY(metrics::add(gaugeFail));
AWAIT_READY(metrics::add(gaugeTimeout));
AWAIT_READY(metrics::add(counter));
// Advance the clock to avoid rate limit.
Clock::advance(Seconds(1));
// Get the snapshot.
Future<Response> response = http::get(upid, "snapshot", "timeout=2secs");
// Make sure the request is pending before the timeout is exceeded.
Clock::settle();
ASSERT_TRUE(response.isPending());
// Advance the clock to trigger the timeout.
Clock::advance(Seconds(2));
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
// Parse the response.
Try<JSON::Object> responseJSON =
JSON::parse<JSON::Object>(response.get().body);
ASSERT_SOME(responseJSON);
// We can't use simple JSON equality testing here as initializing
// libprocess adds metrics to the system. We want to only check if
// the metrics from this test are correctly handled.
map<string, JSON::Value> values = responseJSON.get().values;
EXPECT_EQ(1u, values.count("test/counter"));
EXPECT_FLOAT_EQ(0.0, values["test/counter"].as<JSON::Number>().value);
EXPECT_EQ(1u, values.count("test/gauge"));
EXPECT_FLOAT_EQ(42.0, values["test/gauge"].as<JSON::Number>().value);
EXPECT_EQ(0u, values.count("test/gauge_fail"));
EXPECT_EQ(0u, values.count("test/gauge_timeout"));
// Remove the metrics and ensure they are no longer in the snapshot.
AWAIT_READY(metrics::remove(gauge));
AWAIT_READY(metrics::remove(gaugeFail));
AWAIT_READY(metrics::remove(gaugeTimeout));
AWAIT_READY(metrics::remove(counter));
// Advance the clock to avoid rate limit.
Clock::advance(Seconds(1));
// Ensure MetricsProcess has removed the metrics.
Clock::settle();
response = http::get(upid, "snapshot", "timeout=2secs");
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
// Parse the response.
responseJSON = JSON::parse<JSON::Object>(response.get().body);
ASSERT_SOME(responseJSON);
values = responseJSON.get().values;
ASSERT_SOME(responseJSON);
EXPECT_EQ(0u, values.count("test/counter"));
EXPECT_EQ(0u, values.count("test/gauge"));
EXPECT_EQ(0u, values.count("test/gauge_fail"));
EXPECT_EQ(0u, values.count("test/gauge_timeout"));
terminate(process);
wait(process);
}
示例13: reader
TEST_F(LogStateTest, Diff)
{
Future<Variable<Slaves>> future1 = state->fetch<Slaves>("slaves");
AWAIT_READY(future1);
Variable<Slaves> variable = future1.get();
Slaves slaves = variable.get();
ASSERT_EQ(0, slaves.slaves().size());
for (size_t i = 0; i < 1024; i++) {
Slave* slave = slaves.add_slaves();
slave->mutable_info()->set_hostname("localhost" + stringify(i));
}
variable = variable.mutate(slaves);
Future<Option<Variable<Slaves>>> future2 = state->store(variable);
AWAIT_READY(future2);
ASSERT_SOME(future2.get());
variable = future2.get().get();
Slave* slave = slaves.add_slaves();
slave->mutable_info()->set_hostname("localhost1024");
variable = variable.mutate(slaves);
future2 = state->store(variable);
AWAIT_READY(future2);
ASSERT_SOME(future2.get());
// It's possible that we're doing truncation asynchronously which
// will cause the test to fail because we'll end up getting a
// pending position from Log::Reader::ending which will cause
// Log::Reader::read to fail. To remedy this, we pause the clock and
// wait for all executing processe to settle.
Clock::pause();
Clock::settle();
Clock::resume();
Log::Reader reader(log);
Future<Log::Position> beginning = reader.beginning();
Future<Log::Position> ending = reader.ending();
AWAIT_READY(beginning);
AWAIT_READY(ending);
Future<list<Log::Entry>> entries = reader.read(beginning.get(), ending.get());
AWAIT_READY(entries);
// Convert each Log::Entry to a Operation.
vector<Operation> operations;
foreach (const Log::Entry& entry, entries.get()) {
// Parse the Operation from the Log::Entry.
Operation operation;
google::protobuf::io::ArrayInputStream stream(
entry.data.data(),
entry.data.size());
ASSERT_TRUE(operation.ParseFromZeroCopyStream(&stream));
operations.push_back(operation);
}
ASSERT_EQ(2u, operations.size());
EXPECT_EQ(Operation::SNAPSHOT, operations[0].type());
EXPECT_EQ(Operation::DIFF, operations[1].type());
}
示例14: exec
// The purpose of this test is to ensure that when slaves are removed
// from the master, and then attempt to send status updates, we send
// a ShutdownMessage to the slave. Why? Because during a network
// partition, the master will remove a partitioned slave, thus sending
// its tasks to LOST. At this point, when the partition is removed,
// the slave may attempt to send updates if it was unaware that the
// master removed it. We've already notified frameworks that these
// tasks were LOST, so we have to have the slave shut down.
TEST_F(PartitionTest, PartitionedSlaveStatusUpdates)
{
master::Flags masterFlags = CreateMasterFlags();
Try<Owned<cluster::Master>> master = StartMaster(masterFlags);
ASSERT_SOME(master);
// Allow the master to PING the slave, but drop all PONG messages
// from the slave. Note that we don't match on the master / slave
// PIDs because it's actually the SlaveObserver Process that sends
// the pings.
Future<Message> ping = FUTURE_MESSAGE(
Eq(PingSlaveMessage().GetTypeName()), _, _);
DROP_PROTOBUFS(PongSlaveMessage(), _, _);
Future<SlaveRegisteredMessage> slaveRegisteredMessage =
FUTURE_PROTOBUF(SlaveRegisteredMessage(), _, _);
MockExecutor exec(DEFAULT_EXECUTOR_ID);
TestContainerizer containerizer(&exec);
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), &containerizer);
ASSERT_SOME(slave);
AWAIT_READY(slaveRegisteredMessage);
SlaveID slaveId = slaveRegisteredMessage.get().slave_id();
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillRepeatedly(Return());
driver.start();
AWAIT_READY(frameworkId);
// Drop the first shutdown message from the master (simulated
// partition), allow the second shutdown message to pass when
// the slave sends an update.
Future<ShutdownMessage> shutdownMessage =
DROP_PROTOBUF(ShutdownMessage(), _, slave.get()->pid);
EXPECT_CALL(sched, offerRescinded(&driver, _))
.WillRepeatedly(Return());
Future<Nothing> slaveLost;
EXPECT_CALL(sched, slaveLost(&driver, _))
.WillOnce(FutureSatisfy(&slaveLost));
Clock::pause();
// Now, induce a partition of the slave by having the master
// timeout the slave.
size_t pings = 0;
while (true) {
AWAIT_READY(ping);
pings++;
if (pings == masterFlags.max_slave_ping_timeouts) {
break;
}
ping = FUTURE_MESSAGE(Eq(PingSlaveMessage().GetTypeName()), _, _);
Clock::advance(masterFlags.slave_ping_timeout);
Clock::settle();
}
Clock::advance(masterFlags.slave_ping_timeout);
Clock::settle();
// Wait for the master to attempt to shut down the slave.
AWAIT_READY(shutdownMessage);
// The master will notify the framework that the slave was lost.
AWAIT_READY(slaveLost);
shutdownMessage = FUTURE_PROTOBUF(ShutdownMessage(), _, slave.get()->pid);
// At this point, the slave still thinks it's registered, so we
// simulate a status update coming from the slave.
TaskID taskId;
taskId.set_value("task_id");
const StatusUpdate& update = protobuf::createStatusUpdate(
frameworkId.get(),
slaveId,
taskId,
TASK_RUNNING,
//.........这里部分代码省略.........
示例15: Failure
Future<Nothing> CopyFetcherPlugin::fetch(
const URI& uri,
const string& directory) const
{
// TODO(jojy): Validate the given URI.
if (!uri.has_path()) {
return Failure("URI path is not specified");
}
// TODO(jojy): Verify that the path is a file.
Try<Nothing> mkdir = os::mkdir(directory);
if (mkdir.isError()) {
return Failure(
"Failed to create directory '" +
directory + "': " + mkdir.error());
}
VLOG(1) << "Copying '" << uri.path() << "' to '" << directory << "'";
const vector<string> argv = {"cp", "-a", uri.path(), directory};
Try<Subprocess> s = subprocess(
"cp",
argv,
Subprocess::PATH(os::DEV_NULL),
Subprocess::PIPE(),
Subprocess::PIPE());
if (s.isError()) {
return Failure("Failed to exec the copy subprocess: " + s.error());
}
return await(
s.get().status(),
io::read(s.get().out().get()),
io::read(s.get().err().get()))
.then([](const tuple<
Future<Option<int>>,
Future<string>,
Future<string>>& t) -> Future<Nothing> {
Future<Option<int>> status = std::get<0>(t);
if (!status.isReady()) {
return Failure(
"Failed to get the exit status of the copy subprocess: " +
(status.isFailed() ? status.failure() : "discarded"));
}
if (status->isNone()) {
return Failure("Failed to reap the copy subprocess");
}
if (status->get() != 0) {
Future<string> error = std::get<2>(t);
if (!error.isReady()) {
return Failure(
"Failed to perform 'copy'. Reading stderr failed: " +
(error.isFailed() ? error.failure() : "discarded"));
}
return Failure("Failed to perform 'copy': " + error.get());
}
return Nothing();
});
}