本文整理汇总了C++中Try::get方法的典型用法代码示例。如果您正苦于以下问题:C++ Try::get方法的具体用法?C++ Try::get怎么用?C++ Try::get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Try
的用法示例。
在下文中一共展示了Try::get方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: frameworkId
// This test verifies that executor API and operator API calls receive an
// unsuccessful response if the request contains a properly-signed
// authentication token with invalid claims.
TEST_F(ExecutorAuthorizationTest, FailedApiCalls)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
// Start an agent with permissive ACLs so that a task can be launched and the
// local authorizer's implicit executor authorization will be performed.
ACLs acls;
acls.set_permissive(true);
slave::Flags flags = CreateSlaveFlags();
flags.acls = acls;
Owned<MasterDetector> detector = master.get()->createDetector();
v1::Resources resources =
v1::Resources::parse("cpus:0.1;mem:32;disk:32").get();
v1::ExecutorInfo executorInfo;
executorInfo.set_type(v1::ExecutorInfo::DEFAULT);
executorInfo.mutable_executor_id()->CopyFrom(v1::DEFAULT_EXECUTOR_ID);
executorInfo.mutable_resources()->CopyFrom(resources);
auto executor = std::make_shared<v1::MockHTTPExecutor>();
Owned<TestContainerizer> containerizer(new TestContainerizer(
devolve(executorInfo.executor_id()), executor));
Try<Owned<cluster::Slave>> slave =
this->StartSlave(detector.get(), containerizer.get(), flags);
ASSERT_SOME(slave);
auto scheduler = std::make_shared<v1::MockHTTPScheduler>();
Future<Nothing> connected;
EXPECT_CALL(*scheduler, connected(_))
.WillOnce(FutureSatisfy(&connected));
v1::scheduler::TestMesos mesos(
master.get()->pid,
ContentType::PROTOBUF,
scheduler);
AWAIT_READY(connected);
Future<v1::scheduler::Event::Subscribed> frameworkSubscribed;
EXPECT_CALL(*scheduler, subscribed(_, _))
.WillOnce(FutureArg<1>(&frameworkSubscribed));
Future<v1::scheduler::Event::Offers> offers;
EXPECT_CALL(*scheduler, offers(_, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
EXPECT_CALL(*scheduler, heartbeat(_))
.WillRepeatedly(Return()); // Ignore heartbeats.
mesos.send(v1::createCallSubscribe(v1::DEFAULT_FRAMEWORK_INFO));
AWAIT_READY(frameworkSubscribed);
v1::FrameworkID frameworkId(frameworkSubscribed->framework_id());
executorInfo.mutable_framework_id()->CopyFrom(frameworkId);
AWAIT_READY(offers);
ASSERT_FALSE(offers->offers().empty());
Future<v1::executor::Mesos*> executorLib;
EXPECT_CALL(*executor, connected(_))
.WillOnce(FutureArg<0>(&executorLib));
const v1::Offer& offer = offers->offers(0);
const v1::AgentID& agentId = offer.agent_id();
{
v1::scheduler::Call call;
call.mutable_framework_id()->CopyFrom(frameworkId);
call.set_type(v1::scheduler::Call::ACCEPT);
v1::scheduler::Call::Accept* accept = call.mutable_accept();
accept->add_offer_ids()->CopyFrom(offer.id());
v1::Offer::Operation* operation = accept->add_operations();
operation->set_type(v1::Offer::Operation::LAUNCH_GROUP);
v1::TaskInfo taskInfo =
v1::createTask(agentId, resources, SLEEP_COMMAND(1000));
v1::TaskGroupInfo taskGroup;
taskGroup.add_tasks()->CopyFrom(taskInfo);
v1::Offer::Operation::LaunchGroup* launchGroup =
operation->mutable_launch_group();
launchGroup->mutable_executor()->CopyFrom(executorInfo);
launchGroup->mutable_task_group()->CopyFrom(taskGroup);
//.........这里部分代码省略.........
示例2: driver
// This test verifies that the environment variables for sandbox
// (i.e., MESOS_SANDBOX) is set properly.
TEST_F(LinuxFilesystemIsolatorMesosTest, ROOT_SandboxEnvironmentVariable)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "filesystem/linux,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Owned<MasterDetector> detector = master.get()->createDetector();
Try<Owned<cluster::Slave>> slave = StartSlave(detector.get(), flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched,
DEFAULT_FRAMEWORK_INFO,
master.get()->pid,
DEFAULT_CREDENTIAL);
EXPECT_CALL(sched, registered(&driver, _, _));
Future<vector<Offer>> offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(offers);
ASSERT_FALSE(offers->empty());
const Offer& offer = offers.get()[0];
TaskInfo task = createTask(
offer.slave_id(),
offer.resources(),
strings::format(
"if [ \"$MESOS_SANDBOX\" != \"%s\" ]; then exit 1; fi &&"
"if [ ! -d \"$MESOS_SANDBOX\" ]; then exit 1; fi",
flags.sandbox_directory).get());
task.mutable_container()->CopyFrom(createContainerInfo("test_image"));
driver.launchTasks(offer.id(), {task});
Future<TaskStatus> statusStarting;
Future<TaskStatus> statusRunning;
Future<TaskStatus> statusFinished;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&statusStarting))
.WillOnce(FutureArg<1>(&statusRunning))
.WillOnce(FutureArg<1>(&statusFinished));
AWAIT_READY(statusStarting);
EXPECT_EQ(TASK_STARTING, statusStarting->state());
AWAIT_READY(statusRunning);
EXPECT_EQ(TASK_RUNNING, statusRunning->state());
AWAIT_READY(statusFinished);
EXPECT_EQ(TASK_FINISHED, statusFinished->state());
driver.stop();
driver.join();
}
示例3: Failure
Future<ExecutorInfo> ExternalContainerizerProcess::_launch(
const ContainerID& containerId,
const FrameworkID& frameworkId,
const ExecutorInfo executorInfo,
const SlaveID& slaveId,
bool checkpoint,
const Future<ResultFutures>& future)
{
VLOG(1) << "Launch callback triggered on container '" << containerId << "'";
if (!containers.contains(containerId)) {
return Failure("Container '" + containerId.value() + "' not running");
}
string result;
Try<bool> support = commandSupported(future, result);
if (support.isError()) {
terminate(containerId);
return Failure(support.error());
}
if (!support.get()) {
// We generally need to use an internal implementation in these
// cases.
// For the specific case of a launch however, there can not be an
// internal implementation for a external containerizer, hence
// we need to fail or even abort at this point.
// TODO(tillt): Consider using posix-isolator as a fall back.
terminate(containerId);
return Failure("External containerizer does not support launch");
}
VLOG(1) << "Launch supported by external containerizer";
ExternalStatus ps;
if (!ps.ParseFromString(result)) {
// TODO(tillt): Consider not terminating the containerizer due
// to protocol breach but only fail the operation.
terminate(containerId);
return Failure("Could not parse launch result protobuf (error: "
+ protobufError(ps) + ")");
}
VLOG(2) << "Launch result: '" << ps.message() << "'";
VLOG(2) << "Executor pid: " << ps.pid();
containers[containerId]->pid = ps.pid();
// Observe the executor process and install a callback for status
// changes.
process::reap(ps.pid())
.onAny(defer(
PID<ExternalContainerizerProcess>(this),
&ExternalContainerizerProcess::reaped,
containerId,
lambda::_1));
// Checkpoint the container's pid if requested.
if (checkpoint) {
const string& path = slave::paths::getForkedPidPath(
slave::paths::getMetaRootDir(flags.work_dir),
slaveId,
frameworkId,
executorInfo.executor_id(),
containerId);
LOG(INFO) << "Checkpointing containerized executor '" << containerId
<< "' pid " << ps.pid() << " to '" << path << "'";
Try<Nothing> checkpointed =
slave::state::checkpoint(path, stringify(ps.pid()));
if (checkpointed.isError()) {
terminate(containerId);
return Failure("Failed to checkpoint containerized executor '"
+ containerId.value() + "' pid " + stringify(ps.pid()) + " to '" + path
+ "'");
}
}
VLOG(1) << "Launch finishing up for container '" << containerId << "'";
return executorInfo;
}
示例4: Megabytes
TYPED_TEST(MemIsolatorTest, MemUsage)
{
Flags flags;
Try<Isolator*> isolator = TypeParam::create(flags);
CHECK_SOME(isolator);
// A PosixLauncher is sufficient even when testing a cgroups isolator.
Try<Launcher*> launcher = PosixLauncher::create(flags);
ExecutorInfo executorInfo;
executorInfo.mutable_resources()->CopyFrom(
Resources::parse("mem:1024").get());
ContainerID containerId;
containerId.set_value("memory_usage");
AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));
int pipes[2];
ASSERT_NE(-1, ::pipe(pipes));
lambda::function<int()> inChild = lambda::bind(
&consumeMemory,
Megabytes(256),
Seconds(10),
pipes);
Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
ASSERT_SOME(pid);
// Set up the reaper to wait on the forked child.
Future<Option<int> > status = process::reap(pid.get());
// Continue in the parent.
::close(pipes[0]);
// Isolate the forked child.
AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));
// Now signal the child to continue.
int buf;
ASSERT_LT(0, ::write(pipes[1], &buf, sizeof(buf)));
::close(pipes[1]);
// Wait up to 5 seconds for the child process to consume 256 MB of memory;
ResourceStatistics statistics;
Bytes threshold = Megabytes(256);
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
AWAIT_READY(usage);
statistics = usage.get();
// If we meet our usage expectations, we're done!
if (statistics.mem_rss_bytes() >= threshold.bytes()) {
break;
}
os::sleep(Seconds(1));
waited += Seconds(1);
} while (waited < Seconds(5));
EXPECT_LE(threshold.bytes(), statistics.mem_rss_bytes());
// Ensure all processes are killed.
AWAIT_READY(launcher.get()->destroy(containerId));
// Make sure the child was reaped.
AWAIT_READY(status);
// Let the isolator clean up.
AWAIT_READY(isolator.get()->cleanup(containerId));
delete isolator.get();
delete launcher.get();
}
示例5: upid
TEST(Metrics, Snapshot)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
UPID upid("metrics", process::address());
Clock::pause();
// Add a gauge and a counter.
GaugeProcess process;
PID<GaugeProcess> pid = spawn(&process);
ASSERT_TRUE(pid);
Gauge gauge("test/gauge", defer(pid, &GaugeProcess::get));
Gauge gaugeFail("test/gauge_fail", defer(pid, &GaugeProcess::fail));
Counter counter("test/counter");
AWAIT_READY(metrics::add(gauge));
AWAIT_READY(metrics::add(gaugeFail));
AWAIT_READY(metrics::add(counter));
// Advance the clock to avoid rate limit.
Clock::advance(Seconds(1));
// Get the snapshot.
Future<Response> response = http::get(upid, "snapshot");
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
// Parse the response.
Try<JSON::Object> responseJSON =
JSON::parse<JSON::Object>(response.get().body);
ASSERT_SOME(responseJSON);
map<string, JSON::Value> values = responseJSON.get().values;
EXPECT_EQ(1u, values.count("test/counter"));
EXPECT_FLOAT_EQ(0.0, values["test/counter"].as<JSON::Number>().value);
EXPECT_EQ(1u, values.count("test/gauge"));
EXPECT_FLOAT_EQ(42.0, values["test/gauge"].as<JSON::Number>().value);
EXPECT_EQ(0u, values.count("test/gauge_fail"));
// Remove the metrics and ensure they are no longer in the snapshot.
AWAIT_READY(metrics::remove(gauge));
AWAIT_READY(metrics::remove(gaugeFail));
AWAIT_READY(metrics::remove(counter));
// Advance the clock to avoid rate limit.
Clock::advance(Seconds(1));
// Ensure MetricsProcess has removed the metrics.
Clock::settle();
response = http::get(upid, "snapshot");
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
// Parse the response.
responseJSON = JSON::parse<JSON::Object>(response.get().body);
ASSERT_SOME(responseJSON);
values = responseJSON.get().values;
EXPECT_EQ(0u, values.count("test/counter"));
EXPECT_EQ(0u, values.count("test/gauge"));
EXPECT_EQ(0u, values.count("test/gauge_fail"));
terminate(process);
wait(process);
}
示例6: driver
// Testing route with authorization header and good credentials.
TEST_F(TeardownTest, Success)
{
Try<Owned<cluster::Master>> master = StartMaster();
ASSERT_SOME(master);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get()->pid, DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
ASSERT_EQ(DRIVER_RUNNING, driver.start());
AWAIT_READY(frameworkId);
{
Future<Response> response = process::http::post(
master.get()->pid,
"teardown",
createBasicAuthHeaders(DEFAULT_CREDENTIAL),
"frameworkId=" + frameworkId.get().value());
AWAIT_READY(response);
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
}
// Check that the framework that was shutdown appears in the
// "completed_frameworks" list in the master's "/state" endpoint.
{
Future<Response> response = process::http::get(
master.get()->pid,
"state",
None(),
createBasicAuthHeaders(DEFAULT_CREDENTIAL));
AWAIT_EXPECT_RESPONSE_STATUS_EQ(OK().status, response);
AWAIT_EXPECT_RESPONSE_HEADER_EQ(APPLICATION_JSON, "Content-Type", response);
Try<JSON::Object> parse = JSON::parse<JSON::Object>(response.get().body);
ASSERT_SOME(parse);
JSON::Array frameworks = parse->values["frameworks"].as<JSON::Array>();
EXPECT_TRUE(frameworks.values.empty());
JSON::Array completedFrameworks =
parse->values["completed_frameworks"].as<JSON::Array>();
ASSERT_EQ(1u, completedFrameworks.values.size());
JSON::Object completedFramework =
completedFrameworks.values.front().as<JSON::Object>();
JSON::String completedFrameworkId =
completedFramework.values["id"].as<JSON::String>();
EXPECT_EQ(frameworkId.get(), completedFrameworkId.value);
}
driver.stop();
driver.join();
}
示例7: Error
Try<RunState> RunState::recover(
const string& rootDir,
const SlaveID& slaveId,
const FrameworkID& frameworkId,
const ExecutorID& executorId,
const UUID& uuid,
bool strict)
{
RunState state;
state.id = uuid;
string message;
// Find the tasks.
const Try<list<string> >& tasks = os::glob(strings::format(
paths::TASK_PATH,
rootDir,
slaveId,
frameworkId,
executorId,
uuid.toString(),
"*").get());
if (tasks.isError()) {
return Error("Failed to find tasks for executor run " + uuid.toString() +
": " + tasks.error());
}
// Recover tasks.
foreach (const string& path, tasks.get()) {
TaskID taskId;
taskId.set_value(os::basename(path).get());
const Try<TaskState>& task = TaskState::recover(
rootDir, slaveId, frameworkId, executorId, uuid, taskId, strict);
if (task.isError()) {
return Error(
"Failed to recover task " + taskId.value() + ": " + task.error());
}
state.tasks[taskId] = task.get();
}
// Read the forked pid.
string path = paths::getForkedPidPath(
rootDir, slaveId, frameworkId, executorId, uuid);
Try<string> pid = os::read(path);
if (pid.isError()) {
message = "Failed to read executor's forked pid from '" + path +
"': " + pid.error();
if (strict) {
return Error(message);
} else {
LOG(WARNING) << message;
return state;
}
}
Try<pid_t> forkedPid = numify<pid_t>(pid.get());
if (forkedPid.isError()) {
return Error("Failed to parse forked pid " + pid.get() +
": " + forkedPid.error());
}
state.forkedPid = forkedPid.get();
// Read the libprocess pid.
path = paths::getLibprocessPidPath(
rootDir, slaveId, frameworkId, executorId, uuid);
pid = os::read(path);
if (pid.isError()) {
message = "Failed to read executor's libprocess pid from '" + path +
"': " + pid.error();
if (strict) {
return Error(message);
} else {
LOG(WARNING) << message;
return state;
}
}
state.libprocessPid = process::UPID(pid.get());
return state;
}
示例8: Error
Try<Manifest> Manifest::create(const string& jsonString)
{
Try<JSON::Object> manifestJSON = JSON::parse<JSON::Object>(jsonString);
if (manifestJSON.isError()) {
return Error(manifestJSON.error());
}
Result<JSON::String> name = manifestJSON.get().find<JSON::String>("name");
if (name.isNone()) {
return Error("Failed to find \"name\" in manifest response");
}
Result<JSON::Array> fsLayersJSON =
manifestJSON.get().find<JSON::Array>("fsLayers");
if (fsLayersJSON.isNone()) {
return Error("Failed to find \"fsLayers\" in manifest response");
}
Result<JSON::Array> historyArray =
manifestJSON.get().find<JSON::Array>("history");
if (historyArray.isNone()) {
return Error("Failed to find \"history\" in manifest response");
}
if (historyArray.get().values.size() != fsLayersJSON.get().values.size()) {
return Error(
"\"history\" and \"fsLayers\" array count mismatch"
"in manifest response");
}
vector<FileSystemLayerInfo> fsLayers;
// We add layers in reverse order because 'fsLayers' in the manifest
// response is ordered with the latest layer on the top. When we apply the
// layer changes, we want the filesystem modification order to be the same
// as its history(oldest layer applied first).
for (size_t index = fsLayersJSON.get().values.size(); index-- > 0; ) {
const JSON::Value& layer = fsLayersJSON.get().values[index];
if (!layer.is<JSON::Object>()) {
return Error(
"Failed to parse layer as a JSON object for index: " +
stringify(index));
}
const JSON::Object& layerInfoJSON = layer.as<JSON::Object>();
// Get blobsum for layer.
const Result<JSON::String> blobSumInfo =
layerInfoJSON.find<JSON::String>("blobSum");
if (blobSumInfo.isNone()) {
return Error("Failed to find \"blobSum\" in manifest response");
}
// Get history for layer.
if (!historyArray.get().values[index].is<JSON::Object>()) {
return Error(
"Failed to parse history as a JSON object for index: " +
stringify(index));
}
const JSON::Object& historyObj =
historyArray.get().values[index].as<JSON::Object>();
// Get layer id.
const Result<JSON::String> v1CompatibilityJSON =
historyObj.find<JSON::String>("v1Compatibility");
if (!v1CompatibilityJSON.isSome()) {
return Error(
"Failed to obtain layer v1 compability json in manifest for layer: "
+ stringify(index));
}
Try<JSON::Object> v1CompatibilityObj =
JSON::parse<JSON::Object>(v1CompatibilityJSON.get().value);
if (!v1CompatibilityObj.isSome()) {
return Error(
"Failed to parse v1 compability json in manifest for layer: "
+ stringify(index));
}
const Result<JSON::String> id =
v1CompatibilityObj.get().find<JSON::String>("id");
if (!id.isSome()) {
return Error(
"Failed to find \"id\" in manifest for layer: " + stringify(index));
}
fsLayers.emplace_back(
FileSystemLayerInfo{
blobSumInfo.get().value,
id.get().value,
});
}
//.........这里部分代码省略.........
示例9: driver
TYPED_TEST(IsolatorTest, Usage)
{
Try<PID<Master> > master = this->StartMaster();
ASSERT_SOME(master);
TypeParam isolator;
slave::Flags flags = this->CreateSlaveFlags();
Try<PID<Slave> > slave = this->StartSlave(&isolator, flags);
ASSERT_SOME(slave);
MockScheduler sched;
MesosSchedulerDriver driver(
&sched, DEFAULT_FRAMEWORK_INFO, master.get(), DEFAULT_CREDENTIAL);
Future<FrameworkID> frameworkId;
EXPECT_CALL(sched, registered(&driver, _, _))
.WillOnce(FutureArg<1>(&frameworkId));
Future<vector<Offer> > offers;
EXPECT_CALL(sched, resourceOffers(&driver, _))
.WillOnce(FutureArg<1>(&offers))
.WillRepeatedly(Return()); // Ignore subsequent offers.
driver.start();
AWAIT_READY(frameworkId);
AWAIT_READY(offers);
EXPECT_NE(0u, offers.get().size());
TaskInfo task;
task.set_name("isolator_test");
task.mutable_task_id()->set_value("1");
task.mutable_slave_id()->MergeFrom(offers.get()[0].slave_id());
task.mutable_resources()->MergeFrom(offers.get()[0].resources());
Resources resources(offers.get()[0].resources());
Option<Bytes> mem = resources.mem();
ASSERT_SOME(mem);
Option<double> cpus = resources.cpus();
ASSERT_SOME(cpus);
const std::string& file = path::join(flags.work_dir, "ready");
// This task induces user/system load in a child process by
// running top in a child process for ten seconds.
task.mutable_command()->set_value(
#ifdef __APPLE__
// Use logging mode with 30,000 samples with no interval.
"top -l 30000 -s 0 2>&1 > /dev/null & "
#else
// Batch mode, with 30,000 samples with no interval.
"top -b -d 0 -n 30000 2>&1 > /dev/null & "
#endif
"touch " + file + "; " // Signals that the top command is running.
"sleep 60");
vector<TaskInfo> tasks;
tasks.push_back(task);
Future<TaskStatus> status;
EXPECT_CALL(sched, statusUpdate(&driver, _))
.WillOnce(FutureArg<1>(&status));
driver.launchTasks(offers.get()[0].id(), tasks);
AWAIT_READY(status);
EXPECT_EQ(TASK_RUNNING, status.get().state());
// Wait for the task to begin inducing cpu time.
while (!os::exists(file));
ExecutorID executorId;
executorId.set_value(task.task_id().value());
// We'll wait up to 10 seconds for the child process to induce
// 1/8 of a second of user and system cpu time in total.
// TODO(bmahler): Also induce rss memory consumption, by re-using
// the balloon framework.
ResourceStatistics statistics;
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage =
process::dispatch(
(Isolator*) &isolator, // TODO(benh): Fix after reaper changes.
&Isolator::usage,
frameworkId.get(),
executorId);
AWAIT_READY(usage);
statistics = usage.get();
// If we meet our usage expectations, we're done!
if (statistics.cpus_user_time_secs() >= 0.125 &&
statistics.cpus_system_time_secs() >= 0.125 &&
statistics.mem_rss_bytes() >= 1024u) {
//.........这里部分代码省略.........
示例10: while
TEST(IOTest, Redirect)
{
ASSERT_TRUE(GTEST_IS_THREADSAFE);
// Start by checking that using "invalid" file descriptors fails.
AWAIT_EXPECT_FAILED(io::redirect(-1, 0));
AWAIT_EXPECT_FAILED(io::redirect(0, -1));
// Create a temporary file for redirecting into.
Try<string> path = os::mktemp();
ASSERT_SOME(path);
Try<int> fd = os::open(
path.get(),
O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
ASSERT_SOME(fd);
ASSERT_SOME(os::nonblock(fd.get()));
// Use a nonblocking pipe for doing the redirection.
int pipes[2];
ASSERT_NE(-1, ::pipe(pipes));
ASSERT_SOME(os::nonblock(pipes[0]));
ASSERT_SOME(os::nonblock(pipes[1]));
// Now write data to the pipe and splice to the file.
string data =
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do "
"eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim "
"ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut "
"aliquip ex ea commodo consequat. Duis aute irure dolor in "
"reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla "
"pariatur. Excepteur sint occaecat cupidatat non proident, sunt in "
"culpa qui officia deserunt mollit anim id est laborum.";
// Create more data!
while (Bytes(data.size()) < Megabytes(1)) {
data.append(data);
}
Future<Nothing> redirect = io::redirect(pipes[0], fd.get());
// Closing the read end of the pipe and the file should not have any
// impact as we dup the file descriptor.
ASSERT_SOME(os::close(pipes[0]));
ASSERT_SOME(os::close(fd.get()));
EXPECT_TRUE(redirect.isPending());
// Writing the data should keep the future pending as it hasn't seen
// EOF yet.
AWAIT_READY(io::write(pipes[1], data));
EXPECT_TRUE(redirect.isPending());
// Now closing the write pipe should cause an EOF on the read end,
// thus completing underlying splice in io::redirect.
ASSERT_SOME(os::close(pipes[1]));
AWAIT_READY(redirect);
// Now make sure all the data is there!
Try<string> read = os::read(path.get());
ASSERT_SOME(read);
EXPECT_EQ(data, read.get());
}
示例11: add
void FlagsBase::add(
Option<T> Flags::*option,
const std::string& name,
const std::string& help,
F validate)
{
// Don't bother adding anything if the pointer is NULL.
if (option == NULL) {
return;
}
Flags* flags = dynamic_cast<Flags*>(this);
if (flags == NULL) {
ABORT("Attempted to add flag '" + name + "' with incompatible type");
}
Flag flag;
flag.name = name;
flag.help = help;
flag.boolean = typeid(T) == typeid(bool);
// NOTE: See comment above in Flags::T* overload of FLagsBase::add
// for why we need to pass FlagsBase* (or const FlagsBase&) as a
// parameter.
flag.load =
[option](FlagsBase* base, const std::string& value) -> Try<Nothing> {
Flags* flags = dynamic_cast<Flags*>(base);
if (flags != NULL) {
// NOTE: 'fetch' "retrieves" the value if necessary and then
// invokes 'parse'. See 'fetch' for more details.
Try<T> t = fetch<T>(value);
if (t.isSome()) {
flags->*option = Some(t.get());
} else {
return Error("Failed to load value '" + value + "': " + t.error());
}
}
return Nothing();
};
flag.stringify = [option](const FlagsBase& base) -> Option<std::string> {
const Flags* flags = dynamic_cast<const Flags*>(&base);
if (flags != NULL) {
if ((flags->*option).isSome()) {
return stringify((flags->*option).get());
}
}
return None();
};
flag.validate = [option, validate](const FlagsBase& base) -> Option<Error> {
const Flags* flags = dynamic_cast<const Flags*>(&base);
if (flags != NULL) {
return validate(flags->*option);
}
return None();
};
add(flag);
}
示例12: main
int main(int argc, char** argv)
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
master::Flags flags;
// The following flags are executable specific (e.g., since we only
// have one instance of libprocess per execution, we only want to
// advertise the IP and port option once, here).
Option<string> ip;
flags.add(&ip,
"ip",
"IP address to listen on. This cannot be used in conjunction\n"
"with `--ip_discovery_command`.");
uint16_t port;
flags.add(&port,
"port",
"Port to listen on.",
MasterInfo().port());
Option<string> advertise_ip;
flags.add(&advertise_ip,
"advertise_ip",
"IP address advertised to reach this Mesos master.\n"
"The master does not bind using this IP address.\n"
"However, this IP address may be used to access this master.");
Option<string> advertise_port;
flags.add(&advertise_port,
"advertise_port",
"Port advertised to reach Mesos master (along with\n"
"`advertise_ip`). The master does not bind to this port.\n"
"However, this port (along with `advertise_ip`) may be used to\n"
"access this master.");
Option<string> zk;
flags.add(&zk,
"zk",
"ZooKeeper URL (used for leader election amongst masters)\n"
"May be one of:\n"
" `zk://host1:port1,host2:port2,.../path`\n"
" `zk://username:[email protected]:port1,host2:port2,.../path`\n"
" `file:///path/to/file` (where file contains one of the above)\n"
"NOTE: Not required if master is run in standalone mode (non-HA).");
// Optional IP discover script that will set the Master IP.
// If set, its output is expected to be a valid parseable IP string.
Option<string> ip_discovery_command;
flags.add(&ip_discovery_command,
"ip_discovery_command",
"Optional IP discovery binary: if set, it is expected to emit\n"
"the IP address which the master will try to bind to.\n"
"Cannot be used in conjunction with `--ip`.");
Try<Nothing> load = flags.load("MESOS_", argc, argv);
if (load.isError()) {
cerr << flags.usage(load.error()) << endl;
return EXIT_FAILURE;
}
if (flags.version) {
version();
return EXIT_SUCCESS;
}
if (flags.help) {
cout << flags.usage() << endl;
return EXIT_SUCCESS;
}
// Initialize modules. Note that since other subsystems may depend
// upon modules, we should initialize modules before anything else.
if (flags.modules.isSome()) {
Try<Nothing> result = ModuleManager::load(flags.modules.get());
if (result.isError()) {
EXIT(EXIT_FAILURE) << "Error loading modules: " << result.error();
}
}
// Initialize hooks.
if (flags.hooks.isSome()) {
Try<Nothing> result = HookManager::initialize(flags.hooks.get());
if (result.isError()) {
EXIT(EXIT_FAILURE) << "Error installing hooks: " << result.error();
}
}
if (ip_discovery_command.isSome() && ip.isSome()) {
EXIT(EXIT_FAILURE) << flags.usage(
"Only one of `--ip` or `--ip_discovery_command` should be specified");
}
if (ip_discovery_command.isSome()) {
Try<string> ipAddress = os::shell(ip_discovery_command.get());
if (ipAddress.isError()) {
EXIT(EXIT_FAILURE) << ipAddress.error();
}
//.........这里部分代码省略.........
示例13: Group
// Tests whether a slave correctly detects the new master when its
// ZooKeeper session is expired and a new master is elected before the
// slave reconnects with ZooKeeper.
TEST_F(ZooKeeperMasterContenderDetectorTest,
MasterDetectorExpireSlaveZKSessionNewMaster)
{
Try<zookeeper::URL> url = zookeeper::URL::parse(
"zk://" + server->connectString() + "/mesos");
ASSERT_SOME(url);
// Simulate a leading master.
Owned<zookeeper::Group> leaderGroup(
new Group(url.get(), MASTER_CONTENDER_ZK_SESSION_TIMEOUT));
// 1. Simulate a leading contender.
ZooKeeperMasterContender leaderContender(leaderGroup);
ZooKeeperMasterDetector leaderDetector(leaderGroup);
PID<Master> leader;
leader.ip = 10000000;
leader.port = 10000;
leaderContender.initialize(leader);
Future<Future<Nothing> > contended = leaderContender.contend();
AWAIT_READY(contended);
Future<Option<UPID> > detected = leaderDetector.detect(None());
AWAIT_READY(detected);
EXPECT_SOME_EQ(leader, detected.get());
// 2. Simulate a non-leading contender.
Owned<zookeeper::Group> followerGroup(
new Group(url.get(), MASTER_CONTENDER_ZK_SESSION_TIMEOUT));
ZooKeeperMasterContender followerContender(followerGroup);
ZooKeeperMasterDetector followerDetector(followerGroup);
PID<Master> follower;
follower.ip = 10000001;
follower.port = 10001;
followerContender.initialize(follower);
contended = followerContender.contend();
AWAIT_READY(contended);
detected = followerDetector.detect(None());
EXPECT_SOME_EQ(leader, detected.get());
// 3. Simulate a non-contender.
Owned<zookeeper::Group> nonContenderGroup(
new Group(url.get(), MASTER_DETECTOR_ZK_SESSION_TIMEOUT));
ZooKeeperMasterDetector nonContenderDetector(nonContenderGroup);
detected = nonContenderDetector.detect();
EXPECT_SOME_EQ(leader, detected.get());
detected = nonContenderDetector.detect(leader);
// Now expire the slave's and leading master's zk sessions.
// NOTE: Here we assume that slave stays disconnected from the ZK
// when the leading master loses its session.
Future<Option<int64_t> > slaveSession = nonContenderGroup->session();
AWAIT_READY(slaveSession);
Future<Option<int64_t> > masterSession = leaderGroup->session();
AWAIT_READY(masterSession);
server->expireSession(slaveSession.get().get());
server->expireSession(masterSession.get().get());
// Wait for session expiration and ensure a new master is detected.
AWAIT_READY(detected);
EXPECT_SOME_EQ(follower, detected.get());
}
示例14: Failure
Future<size_t> RegistryClientProcess::getBlob(
const Image::Name& imageName,
const Option<string>& digest,
const Path& filePath)
{
Try<Nothing> mkdir = os::mkdir(filePath.dirname(), true);
if (mkdir.isError()) {
return Failure(
"Failed to create directory to download blob: " + mkdir.error());
}
const string blobURLPath = getRepositoryPath(imageName) + "/blobs/" +
digest.getOrElse("");
http::URL blobURL(registryServer_);
blobURL.path = blobURLPath;
return doHttpGet(blobURL, None(), true, true, None())
.then([this, blobURLPath, digest, filePath](
const http::Response& response) -> Future<size_t> {
Try<int> fd = os::open(
filePath.value,
O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd.isError()) {
return Failure("Failed to open file '" + filePath.value + "': " +
fd.error());
}
Try<Nothing> nonblock = os::nonblock(fd.get());
if (nonblock.isError()) {
Try<Nothing> close = os::close(fd.get());
if (close.isError()) {
LOG(WARNING) << "Failed to close the file descriptor for file '"
<< stringify(filePath) << "': " << close.error();
}
return Failure(
"Failed to set non-blocking mode for file: " + filePath.value);
}
// TODO(jojy): Add blob validation.
// TODO(jojy): Add check for max size.
Option<Pipe::Reader> reader = response.reader;
if (reader.isNone()) {
Try<Nothing> close = os::close(fd.get());
if (close.isError()) {
LOG(WARNING) << "Failed to close the file descriptor for file '"
<< stringify(filePath) << "': " << close.error();
}
return Failure("Failed to get streaming reader from blob response");
}
return saveBlob(fd.get(), reader.get())
.onAny([blobURLPath, digest, filePath, fd](
const Future<size_t>& future) {
Try<Nothing> close = os::close(fd.get());
if (close.isError()) {
LOG(WARNING) << "Failed to close the file descriptor for blob '"
<< stringify(filePath) << "': " << close.error();
}
if (future.isFailed()) {
LOG(WARNING) << "Failed to save blob requested from '"
<< blobURLPath << "' to path '"
<< stringify(filePath) << "': " << future.failure();
}
if (future.isDiscarded()) {
LOG(WARNING) << "Failed to save blob requested from '"
<< blobURLPath << "' to path '" << stringify(filePath)
<< "': future discarded";
}
});
});
}
示例15: stringify
TEST(ProtobufTest, JSON)
{
tests::Message message;
message.set_b(true);
message.set_str("string");
message.set_bytes("bytes");
message.set_int32(-1);
message.set_int64(-1);
message.set_uint32(1);
message.set_uint64(1);
message.set_sint32(-1);
message.set_sint64(-1);
message.set_f(1.0);
message.set_d(1.0);
message.set_e(tests::ONE);
message.mutable_nested()->set_str("nested");
message.add_repeated_bool(true);
message.add_repeated_string("repeated_string");
message.add_repeated_bytes("repeated_bytes");
message.add_repeated_int32(-2);
message.add_repeated_int64(-2);
message.add_repeated_uint32(2);
message.add_repeated_uint64(2);
message.add_repeated_sint32(-2);
message.add_repeated_sint64(-2);
message.add_repeated_float(1.0);
message.add_repeated_double(1.0);
message.add_repeated_double(2.0);
message.add_repeated_enum(tests::TWO);
message.add_repeated_nested()->set_str("repeated_nested");
// TODO(bmahler): To dynamically generate a protobuf message,
// see the commented-out code below.
// DescriptorProto proto;
//
// proto.set_name("Message");
//
// FieldDescriptorProto* field = proto.add_field();
// field->set_name("str");
// field->set_type(FieldDescriptorProto::TYPE_STRING);
//
// const Descriptor* descriptor = proto.descriptor();
//
// DynamicMessageFactory factory;
// Message* message = factory.GetPrototype(descriptor);
//
// Reflection* message.getReflection();
// The keys are in alphabetical order.
string expected = strings::remove(
"{"
" \"b\": true,"
" \"bytes\": \"bytes\","
" \"d\": 1,"
" \"e\": \"ONE\","
" \"f\": 1,"
" \"int32\": -1,"
" \"int64\": -1,"
" \"nested\": { \"str\": \"nested\"},"
" \"optional_default\": 42,"
" \"repeated_bool\": [true],"
" \"repeated_bytes\": [\"repeated_bytes\"],"
" \"repeated_double\": [1, 2],"
" \"repeated_enum\": [\"TWO\"],"
" \"repeated_float\": [1],"
" \"repeated_int32\": [-2],"
" \"repeated_int64\": [-2],"
" \"repeated_nested\": [ { \"str\": \"repeated_nested\" } ],"
" \"repeated_sint32\": [-2],"
" \"repeated_sint64\": [-2],"
" \"repeated_string\": [\"repeated_string\"],"
" \"repeated_uint32\": [2],"
" \"repeated_uint64\": [2],"
" \"sint32\": -1,"
" \"sint64\": -1,"
" \"str\": \"string\","
" \"uint32\": 1,"
" \"uint64\": 1"
"}",
" ");
JSON::Object object = JSON::Protobuf(message);
EXPECT_EQ(expected, stringify(object));
// Test parsing too.
Try<tests::Message> parse = protobuf::parse<tests::Message>(object);
ASSERT_SOME(parse);
EXPECT_EQ(object, JSON::Protobuf(parse.get()));
// Modify the message to test (de-)serialization of random bytes generated
// by UUID.
message.set_bytes(UUID::random().toBytes());
object = JSON::Protobuf(message);
// Test parsing too.
parse = protobuf::parse<tests::Message>(object);
ASSERT_SOME(parse);
//.........这里部分代码省略.........