本文整理汇总了C++中ContainerID类的典型用法代码示例。如果您正苦于以下问题:C++ ContainerID类的具体用法?C++ ContainerID怎么用?C++ ContainerID使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ContainerID类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: TEST_F
// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
// Create provisioner.
slave::Flags flags;
flags.image_providers = "APPC";
flags.appc_store_dir = path::join(os::getcwd(), "store");
flags.image_provisioner_backend = "copy";
flags.work_dir = "work_dir";
Try<Owned<Provisioner>> provisioner1 = Provisioner::create(flags);
ASSERT_SOME(provisioner1);
Try<string> createImage = createTestImage(
flags.appc_store_dir,
getManifest());
ASSERT_SOME(createImage);
// Recover. This is when the image in the store is loaded.
AWAIT_READY(provisioner1.get()->recover({}, {}));
Image image;
image.mutable_appc()->CopyFrom(getTestImage());
ContainerID containerId;
containerId.set_value(UUID::random().toString());
Future<slave::ProvisionInfo> provisionInfo =
provisioner1.get()->provision(containerId, image);
AWAIT_READY(provisionInfo);
// Create a new provisioner to recover the state from the container.
Try<Owned<Provisioner>> provisioner2 = Provisioner::create(flags);
ASSERT_SOME(provisioner2);
mesos::slave::ContainerState state;
// Here we are using an ExecutorInfo in the ContainerState without a
// ContainerInfo. This is the situation where the Image is specified
// via --default_container_info so it's not part of the recovered
// ExecutorInfo.
state.mutable_container_id()->CopyFrom(containerId);
AWAIT_READY(provisioner2.get()->recover({state}, {}));
// It's possible for the user to provision two different rootfses
// from the same image.
AWAIT_READY(provisioner2.get()->provision(containerId, image));
string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);
string containerDir =
slave::provisioner::paths::getContainerDir(
provisionerDir,
containerId);
Try<hashmap<string, hashset<string>>> rootfses =
slave::provisioner::paths::listContainerRootfses(
provisionerDir,
containerId);
ASSERT_SOME(rootfses);
// Verify that the rootfs is successfully provisioned.
ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());
Future<bool> destroy = provisioner2.get()->destroy(containerId);
AWAIT_READY(destroy);
EXPECT_TRUE(destroy.get());
// The container directory is successfully cleaned up.
EXPECT_FALSE(os::exists(containerDir));
}
示例2: Error
Try<RunState> RunState::recover(
const string& rootDir,
const SlaveID& slaveId,
const FrameworkID& frameworkId,
const ExecutorID& executorId,
const ContainerID& containerId,
bool strict)
{
RunState state;
state.id = containerId;
string message;
// See if the sentinel file exists. This is done first so it is
// known even if partial state is returned, e.g., if the libprocess
// pid file is not recovered. It indicates the slave removed the
// executor.
string path = paths::getExecutorSentinelPath(
rootDir, slaveId, frameworkId, executorId, containerId);
state.completed = os::exists(path);
// Find the tasks.
Try<list<string> > tasks = paths::getTaskPaths(
rootDir,
slaveId,
frameworkId,
executorId,
containerId);
if (tasks.isError()) {
return Error(
"Failed to find tasks for executor run " + containerId.value() +
": " + tasks.error());
}
// Recover tasks.
foreach (const string& path, tasks.get()) {
TaskID taskId;
taskId.set_value(Path(path).basename());
Try<TaskState> task = TaskState::recover(
rootDir, slaveId, frameworkId, executorId, containerId, taskId, strict);
if (task.isError()) {
return Error(
"Failed to recover task " + taskId.value() + ": " + task.error());
}
state.tasks[taskId] = task.get();
state.errors += task.get().errors;
}
// Read the forked pid.
path = paths::getForkedPidPath(
rootDir, slaveId, frameworkId, executorId, containerId);
if (!os::exists(path)) {
// This could happen if the slave died before the isolator
// checkpointed the forked pid.
LOG(WARNING) << "Failed to find executor forked pid file '" << path << "'";
return state;
}
Try<string> pid = os::read(path);
if (pid.isError()) {
message = "Failed to read executor forked pid from '" + path +
"': " + pid.error();
if (strict) {
return Error(message);
} else {
LOG(WARNING) << message;
state.errors++;
return state;
}
}
if (pid.get().empty()) {
// This could happen if the slave died after opening the file for
// writing but before it checkpointed anything.
LOG(WARNING) << "Found empty executor forked pid file '" << path << "'";
return state;
}
Try<pid_t> forkedPid = numify<pid_t>(pid.get());
if (forkedPid.isError()) {
return Error("Failed to parse forked pid " + pid.get() +
": " + forkedPid.error());
}
state.forkedPid = forkedPid.get();
// Read the libprocess pid.
path = paths::getLibprocessPidPath(
rootDir, slaveId, frameworkId, executorId, containerId);
if (!os::exists(path)) {
// This could happen if the slave died before the executor
// registered with the slave.
LOG(WARNING)
//.........这里部分代码省略.........
示例3: TEST_F
// This test verifies that a provisioner can recover the rootfs
// provisioned by a previous provisioner and then destroy it. Note
// that we use the copy backend in this test so Linux is not required.
TEST_F(ProvisionerAppcTest, Recover)
{
// Create provisioner.
slave::Flags flags;
flags.image_providers = "APPC";
flags.appc_store_dir = path::join(os::getcwd(), "store");
flags.image_provisioner_backend = COPY_BACKEND;
flags.work_dir = path::join(sandbox.get(), "work_dir");
Try<Owned<Provisioner>> provisioner = Provisioner::create(flags);
ASSERT_SOME(provisioner);
Try<string> createImage = createTestImage(
flags.appc_store_dir,
getManifest());
ASSERT_SOME(createImage);
// Recover. This is when the image in the store is loaded.
AWAIT_READY(provisioner.get()->recover({}));
Image image;
image.mutable_appc()->CopyFrom(getTestImage());
ContainerID containerId;
containerId.set_value(UUID::random().toString());
Future<slave::ProvisionInfo> provisionInfo =
provisioner.get()->provision(containerId, image);
AWAIT_READY(provisionInfo);
provisioner->reset();
// Create a new provisioner to recover the state from the container.
provisioner = Provisioner::create(flags);
ASSERT_SOME(provisioner);
AWAIT_READY(provisioner.get()->recover({containerId}));
// It's possible for the user to provision two different rootfses
// from the same image.
AWAIT_READY(provisioner.get()->provision(containerId, image));
string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);
string containerDir =
slave::provisioner::paths::getContainerDir(
provisionerDir,
containerId);
Try<hashmap<string, hashset<string>>> rootfses =
slave::provisioner::paths::listContainerRootfses(
provisionerDir,
containerId);
ASSERT_SOME(rootfses);
// Verify that the rootfs is successfully provisioned.
ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
EXPECT_EQ(2u, rootfses->get(flags.image_provisioner_backend)->size());
Future<bool> destroy = provisioner.get()->destroy(containerId);
AWAIT_READY(destroy);
EXPECT_TRUE(destroy.get());
// The container directory is successfully cleaned up.
EXPECT_FALSE(os::exists(containerDir));
}
示例4: TEST
TEST(AgentCallValidationTest, LaunchNestedContainerSession)
{
// Missing `launch_nested_container_session`.
agent::Call call;
call.set_type(agent::Call::LAUNCH_NESTED_CONTAINER_SESSION);
Option<Error> error = validation::agent::call::validate(call);
EXPECT_SOME(error);
// `container_id` is not valid.
ContainerID badContainerId;
badContainerId.set_value("no spaces allowed");
agent::Call::LaunchNestedContainerSession* launch =
call.mutable_launch_nested_container_session();
launch->mutable_container_id()->CopyFrom(badContainerId);
error = validation::agent::call::validate(call);
EXPECT_SOME(error);
// Valid `container_id` but missing `container_id.parent`.
ContainerID containerId;
containerId.set_value(UUID::random().toString());
launch->mutable_container_id()->CopyFrom(containerId);
error = validation::agent::call::validate(call);
EXPECT_SOME(error);
// Valid `container_id.parent` but invalid `command.environment`. Set
// an invalid environment variable to check that the common validation
// code for the command's environment is being executed.
ContainerID parentContainerId;
parentContainerId.set_value(UUID::random().toString());
launch->mutable_container_id()->mutable_parent()->CopyFrom(parentContainerId);
launch->mutable_command()->CopyFrom(createCommandInfo("exit 0"));
Environment::Variable* variable = launch
->mutable_command()
->mutable_environment()
->mutable_variables()
->Add();
variable->set_name("ENV_VAR_KEY");
variable->set_type(mesos::Environment::Variable::VALUE);
error = validation::agent::call::validate(call);
EXPECT_SOME(error);
EXPECT_EQ(
"'launch_nested_container_session.command' is invalid: Environment "
"variable 'ENV_VAR_KEY' of type 'VALUE' must have a value set",
error->message);
// Test the valid case.
variable->set_value("env_var_value");
error = validation::agent::call::validate(call);
EXPECT_NONE(error);
// Any number of parents is valid.
ContainerID grandparentContainerId;
grandparentContainerId.set_value(UUID::random().toString());
launch->mutable_container_id()->mutable_parent()->mutable_parent()->CopyFrom(
grandparentContainerId);
error = validation::agent::call::validate(call);
EXPECT_NONE(error);
}
示例5: TEST_F
// This test verifies that sandbox path volume allows two containers
// nested under the same parent container to share data.
// TODO(jieyu): Parameterize this test to test both linux and posix
// launcher and filesystem isolator.
TEST_F(VolumeSandboxPathIsolatorTest, SharedVolume)
{
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "volume/sandbox_path";
Fetcher fetcher;
Try<MesosContainerizer*> create = MesosContainerizer::create(
flags,
true,
&fetcher);
ASSERT_SOME(create);
Owned<MesosContainerizer> containerizer(create.get());
SlaveState state;
state.id = SlaveID();
AWAIT_READY(containerizer->recover(state));
ContainerID containerId;
containerId.set_value(UUID::random().toString());
ExecutorInfo executor = createExecutorInfo("executor", "sleep 99", "cpus:1");
Try<string> directory = environment->mkdtemp();
ASSERT_SOME(directory);
Future<bool> launch = containerizer->launch(
containerId,
None(),
executor,
directory.get(),
None(),
state.id,
map<string, string>(),
true); // TODO(benh): Ever want to check not-checkpointing?
AWAIT_ASSERT_TRUE(launch);
ContainerID nestedContainerId1;
nestedContainerId1.mutable_parent()->CopyFrom(containerId);
nestedContainerId1.set_value(UUID::random().toString());
ContainerInfo containerInfo;
containerInfo.set_type(ContainerInfo::MESOS);
Volume* volume = containerInfo.add_volumes();
volume->set_mode(Volume::RW);
volume->set_container_path("parent");
Volume::Source* source = volume->mutable_source();
source->set_type(Volume::Source::SANDBOX_PATH);
Volume::Source::SandboxPath* sandboxPath = source->mutable_sandbox_path();
sandboxPath->set_type(Volume::Source::SandboxPath::PARENT);
sandboxPath->set_path("shared");
launch = containerizer->launch(
nestedContainerId1,
createCommandInfo("touch parent/file; sleep 1000"),
containerInfo,
None(),
state.id);
AWAIT_ASSERT_TRUE(launch);
ContainerID nestedContainerId2;
nestedContainerId2.mutable_parent()->CopyFrom(containerId);
nestedContainerId2.set_value(UUID::random().toString());
launch = containerizer->launch(
nestedContainerId2,
createCommandInfo(
"while true; do if [ -f parent/file ]; then exit 0; fi; done"),
containerInfo,
None(),
state.id);
AWAIT_ASSERT_TRUE(launch);
Future<Option<ContainerTermination>> wait =
containerizer->wait(nestedContainerId2);
AWAIT_READY(wait);
ASSERT_SOME(wait.get());
ASSERT_TRUE(wait.get()->has_status());
EXPECT_WEXITSTATUS_EQ(0, wait.get()->status());
wait = containerizer->wait(containerId);
containerizer->destroy(containerId);
AWAIT_READY(wait);
ASSERT_SOME(wait.get());
//.........这里部分代码省略.........
示例6: launch
Future<bool> launch(
const ContainerID& containerId,
const ContainerConfig& containerConfig,
const map<string, string>& environment,
const Option<string>& pidCheckpointPath)
{
CHECK(!terminatedContainers.contains(containerId))
<< "Failed to launch nested container " << containerId
<< " for executor '" << containerConfig.executor_info().executor_id()
<< "' of framework " << containerConfig.executor_info().framework_id()
<< " because this ContainerID is being re-used with"
<< " a previously terminated container";
CHECK(!containers_.contains(containerId))
<< "Failed to launch container " << containerId
<< " for executor '" << containerConfig.executor_info().executor_id()
<< "' of framework " << containerConfig.executor_info().framework_id()
<< " because it is already launched";
containers_[containerId] = Owned<ContainerData>(new ContainerData());
if (containerId.has_parent()) {
// Launching a nested container via the test containerizer is a
// no-op for now.
return true;
}
CHECK(executors.contains(containerConfig.executor_info().executor_id()))
<< "Failed to launch executor '"
<< containerConfig.executor_info().executor_id()
<< "' of framework " << containerConfig.executor_info().framework_id()
<< " because it is unknown to the containerizer";
containers_.at(containerId)->executorId =
containerConfig.executor_info().executor_id();
containers_.at(containerId)->frameworkId =
containerConfig.executor_info().framework_id();
// We need to synchronize all reads and writes to the environment
// as this is global state.
//
// TODO(jmlvanre): Even this is not sufficient, as other aspects
// of the code may read an environment variable while we are
// manipulating it. The better solution is to pass the environment
// variables into the fork, or to set them on the command line.
// See MESOS-3475.
static std::mutex mutex;
synchronized(mutex) {
// Since the constructor for `MesosExecutorDriver` reads
// environment variables to load flags, even it needs to
// be within this synchronization section.
//
// Prepare additional environment variables for the executor.
// TODO(benh): Need to get flags passed into the TestContainerizer
// in order to properly use here.
slave::Flags flags;
flags.recovery_timeout = Duration::zero();
// We need to save the original set of environment variables so we
// can reset the environment after calling 'driver->start()' below.
hashmap<string, string> original = os::environment();
foreachpair (const string& name, const string& variable, environment) {
os::setenv(name, variable);
}
// TODO(benh): Can this be removed and done exclusively in the
// 'executorEnvironment()' function? There are other places in the
// code where we do this as well and it's likely we can do this once
// in 'executorEnvironment()'.
foreach (const Environment::Variable& variable,
containerConfig.executor_info()
.command().environment().variables()) {
os::setenv(variable.name(), variable.value());
}
os::setenv("MESOS_LOCAL", "1");
const Owned<ExecutorData>& executorData =
executors.at(containerConfig.executor_info().executor_id());
if (executorData->executor != nullptr) {
executorData->driver = Owned<MesosExecutorDriver>(
new MesosExecutorDriver(executorData->executor));
executorData->driver->start();
} else {
shared_ptr<v1::MockHTTPExecutor> executor =
executorData->v1ExecutorMock;
executorData->v1Library = Owned<v1::executor::TestMesos>(
new v1::executor::TestMesos(ContentType::PROTOBUF, executor));
}
os::unsetenv("MESOS_LOCAL");
// Unset the environment variables we set by resetting them to their
// original values and also removing any that were not part of the
// original environment.
foreachpair (const string& name, const string& value, original) {
//.........这里部分代码省略.........
示例7: TEST_F
// This test verifies that the provisioner can provision an rootfs
// from an image that is already put into the store directory.
TEST_F(ProvisionerAppcTest, ROOT_Provision)
{
// Create provisioner.
slave::Flags flags;
flags.image_providers = "APPC";
flags.appc_store_dir = path::join(os::getcwd(), "store");
flags.image_provisioner_backend = "bind";
flags.work_dir = "work_dir";
Fetcher fetcher;
Try<Owned<Provisioner>> provisioner = Provisioner::create(flags, &fetcher);
ASSERT_SOME(provisioner);
// Create a simple image in the store:
// <store>
// |--images
// |--<id>
// |--manifest
// |--rootfs/tmp/test
JSON::Value manifest = JSON::parse(
"{"
" \"acKind\": \"ImageManifest\","
" \"acVersion\": \"0.6.1\","
" \"name\": \"foo.com/bar\","
" \"labels\": ["
" {"
" \"name\": \"version\","
" \"value\": \"1.0.0\""
" },"
" {"
" \"name\": \"arch\","
" \"value\": \"amd64\""
" },"
" {"
" \"name\": \"os\","
" \"value\": \"linux\""
" }"
" ],"
" \"annotations\": ["
" {"
" \"name\": \"created\","
" \"value\": \"1438983392\""
" }"
" ]"
"}").get();
// The 'imageId' below has the correct format but it's not computed
// by hashing the tarball of the image. It's OK here as we assume
// the images under 'images' have passed such check when they are
// downloaded and validated.
string imageId =
"sha512-e77d96aa0240eedf134b8c90baeaf76dca8e78691836301d7498c84020446042e"
"797b296d6ab296e0954c2626bfb264322ebeb8f447dac4fac6511ea06bc61f0";
string imagePath = path::join(flags.appc_store_dir, "images", imageId);
ASSERT_SOME(os::mkdir(path::join(imagePath, "rootfs", "tmp")));
ASSERT_SOME(
os::write(path::join(imagePath, "rootfs", "tmp", "test"), "test"));
ASSERT_SOME(
os::write(path::join(imagePath, "manifest"), stringify(manifest)));
// Recover. This is when the image in the store is loaded.
AWAIT_READY(provisioner.get()->recover({}, {}));
// Simulate a task that requires an image.
Image image;
image.mutable_appc()->set_name("foo.com/bar");
ContainerID containerId;
containerId.set_value("12345");
Future<string> rootfs = provisioner.get()->provision(containerId, image);
AWAIT_READY(rootfs);
string provisionerDir = slave::paths::getProvisionerDir(flags.work_dir);
string containerDir =
slave::provisioner::paths::getContainerDir(
provisionerDir,
containerId);
Try<hashmap<string, hashset<string>>> rootfses =
slave::provisioner::paths::listContainerRootfses(
provisionerDir,
containerId);
ASSERT_SOME(rootfses);
// Verify that the rootfs is successfully provisioned.
ASSERT_TRUE(rootfses->contains(flags.image_provisioner_backend));
ASSERT_EQ(1u, rootfses->get(flags.image_provisioner_backend)->size());
EXPECT_EQ(*rootfses->get(flags.image_provisioner_backend)->begin(),
Path(rootfs.get()).basename());
Future<bool> destroy = provisioner.get()->destroy(containerId);
AWAIT_READY(destroy);
//.........这里部分代码省略.........
示例8: Failure
Future<Nothing> NetworkCniIsolatorProcess::_attach(
const ContainerID& containerId,
const string& networkName,
const string& plugin,
const tuple<Future<Option<int>>, Future<string>>& t)
{
CHECK(infos.contains(containerId));
CHECK(infos[containerId]->containerNetworks.contains(networkName));
Future<Option<int>> status = std::get<0>(t);
if (!status.isReady()) {
return Failure(
"Failed to get the exit status of the CNI plugin '" +
plugin + "' subprocess: " +
(status.isFailed() ? status.failure() : "discarded"));
}
if (status->isNone()) {
return Failure(
"Failed to reap the CNI plugin '" + plugin + "' subprocess");
}
// CNI plugin will print result (in case of success) or error (in
// case of failure) to stdout.
Future<string> output = std::get<1>(t);
if (!output.isReady()) {
return Failure(
"Failed to read stdout from the CNI plugin '" +
plugin + "' subprocess: " +
(output.isFailed() ? output.failure() : "discarded"));
}
if (status.get() != 0) {
return Failure(
"The CNI plugin '" + plugin + "' failed to attach container " +
containerId.value() + " to CNI network '" + networkName +
"': " + output.get());
}
// Parse the output of CNI plugin.
Try<spec::NetworkInfo> parse = spec::parseNetworkInfo(output.get());
if (parse.isError()) {
return Failure(
"Failed to parse the output of the CNI plugin '" +
plugin + "': " + parse.error());
}
if (parse.get().has_ip4()) {
LOG(INFO) << "Got assigned IPv4 address '" << parse.get().ip4().ip()
<< "' from CNI network '" << networkName
<< "' for container " << containerId;
}
if (parse.get().has_ip6()) {
LOG(INFO) << "Got assigned IPv6 address '" << parse.get().ip6().ip()
<< "' from CNI network '" << networkName
<< "' for container " << containerId;
}
// Checkpoint the output of CNI plugin.
// The destruction of the container cannot happen in the middle of
// 'attach()' and '_attach()' because the containerizer will wait
// for 'isolate()' to finish before destroying the container.
ContainerNetwork& containerNetwork =
infos[containerId]->containerNetworks[networkName];
const string networkInfoPath = paths::getNetworkInfoPath(
rootDir.get(),
containerId.value(),
networkName,
containerNetwork.ifName);
Try<Nothing> write = os::write(networkInfoPath, output.get());
if (write.isError()) {
return Failure(
"Failed to checkpoint the output of CNI plugin'" +
output.get() + "': " + write.error());
}
containerNetwork.cniNetworkInfo = parse.get();
return Nothing();
}
示例9: Failure
Future<ExecutorInfo> ExternalContainerizerProcess::launch(
const ContainerID& containerId,
const TaskInfo& taskInfo,
const FrameworkID& frameworkId,
const std::string& directory,
const Option<std::string>& user,
const SlaveID& slaveId,
const PID<Slave>& slavePid,
bool checkpoint)
{
LOG(INFO) << "Launching container '" << containerId << "'";
// Get the executor from our task. If no executor is associated with
// the given task, this function renders an ExecutorInfo using the
// mesos-executor as its command.
ExecutorInfo executor = containerExecutorInfo(flags, taskInfo, frameworkId);
executor.mutable_resources()->MergeFrom(taskInfo.resources());
if (containers.contains(containerId)) {
return Failure("Cannot start already running container '"
+ containerId.value() + "'");
}
sandboxes.put(containerId, Owned<Sandbox>(new Sandbox(directory, user)));
map<string, string> environment = executorEnvironment(
executor,
directory,
slaveId,
slavePid,
checkpoint,
flags.recovery_timeout);
if (!flags.hadoop_home.empty()) {
environment["HADOOP_HOME"] = flags.hadoop_home;
}
TaskInfo task;
task.CopyFrom(taskInfo);
CommandInfo* command = task.has_executor()
? task.mutable_executor()->mutable_command()
: task.mutable_command();
// When the selected command has no container attached, use the
// default from the slave startup flags, if available.
if (!command->has_container()) {
if (flags.default_container_image.isSome()) {
command->mutable_container()->set_image(
flags.default_container_image.get());
} else {
LOG(INFO) << "No container specified in task and no default given. "
<< "The external containerizer will have to fill in "
<< "defaults.";
}
}
ExternalTask external;
external.mutable_task()->CopyFrom(task);
external.set_mesos_executor_path(
path::join(flags.launcher_dir, "mesos-executor"));
stringstream output;
external.SerializeToOstream(&output);
Try<Subprocess> invoked = invoke(
"launch",
containerId,
output.str(),
environment);
if (invoked.isError()) {
return Failure("Launch of container '" + containerId.value()
+ "' failed (error: " + invoked.error() + ")");
}
// Record the process.
containers.put(
containerId,
Owned<Container>(new Container(invoked.get().pid())));
VLOG(2) << "Now awaiting data from pipe...";
// Read from the result-pipe and invoke callbacks when reaching EOF.
return await(read(invoked.get().out()), invoked.get().status())
.then(defer(
PID<ExternalContainerizerProcess>(this),
&ExternalContainerizerProcess::_launch,
containerId,
frameworkId,
executor,
slaveId,
checkpoint,
lambda::_1));
}
示例10: hash_value
inline std::size_t hash_value(const ContainerID& containerId)
{
size_t seed = 0;
boost::hash_combine(seed, containerId.value());
return seed;
}
示例11: TEST_P
// This test verifies that the image specified in the volume will be
// properly provisioned and mounted into the container if container
// root filesystem is not specified.
TEST_P(VolumeImageIsolatorTest, ROOT_ImageInVolumeWithoutRootFilesystem)
{
string registry = path::join(sandbox.get(), "registry");
AWAIT_READY(DockerArchive::create(registry, "test_image"));
slave::Flags flags = CreateSlaveFlags();
flags.isolation = "filesystem/linux,volume/image,docker/runtime";
flags.docker_registry = registry;
flags.docker_store_dir = path::join(sandbox.get(), "store");
flags.image_providers = "docker";
Try<MesosContainerizer*> create =
MesosContainerizer::create(flags, true, &fetcher);
ASSERT_SOME(create);
Owned<Containerizer> containerizer(create.get());
ContainerID containerId;
containerId.set_value(UUID::random().toString());
ContainerInfo container = createContainerInfo(
None(),
{createVolumeFromDockerImage("rootfs", "test_image", Volume::RW)});
CommandInfo command = createCommandInfo("test -d rootfs/bin");
ExecutorInfo executor = createExecutorInfo(
"test_executor",
nesting ? createCommandInfo("sleep 1000") : command);
if (!nesting) {
executor.mutable_container()->CopyFrom(container);
}
string directory = path::join(flags.work_dir, "sandbox");
ASSERT_SOME(os::mkdir(directory));
Future<bool> launch = containerizer->launch(
containerId,
None(),
executor,
directory,
None(),
SlaveID(),
map<string, string>(),
false);
AWAIT_ASSERT_TRUE(launch);
Future<Option<ContainerTermination>> wait = containerizer->wait(containerId);
if (nesting) {
ContainerID nestedContainerId;
nestedContainerId.mutable_parent()->CopyFrom(containerId);
nestedContainerId.set_value(UUID::random().toString());
launch = containerizer->launch(
nestedContainerId,
command,
container,
None(),
SlaveID());
AWAIT_ASSERT_TRUE(launch);
wait = containerizer->wait(nestedContainerId);
}
AWAIT_READY(wait);
ASSERT_SOME(wait.get());
ASSERT_TRUE(wait->get().has_status());
EXPECT_WEXITSTATUS_EQ(0, wait->get().status());
if (nesting) {
wait = containerizer->wait(containerId);
containerizer->destroy(containerId);
AWAIT_READY(wait);
ASSERT_SOME(wait.get());
ASSERT_TRUE(wait->get().has_status());
EXPECT_WTERMSIG_EQ(SIGKILL, wait.get()->status());
}
}
示例12: Failure
Future<Nothing> LinuxFilesystemIsolatorProcess::update(
const ContainerID& containerId,
const Resources& resources)
{
if (containerId.has_parent()) {
return Failure("Not supported for nested containers");
}
// Mount persistent volumes. We do this in the host namespace and
// rely on mount propagation for them to be visible inside the
// container.
if (!infos.contains(containerId)) {
return Failure("Unknown container");
}
const Owned<Info>& info = infos[containerId];
Resources current = info->resources;
// We first remove unneeded persistent volumes.
foreach (const Resource& resource, current.persistentVolumes()) {
// This is enforced by the master.
CHECK(resource.disk().has_volume());
// Ignore absolute and nested paths.
const string& containerPath = resource.disk().volume().container_path();
if (strings::contains(containerPath, "/")) {
LOG(WARNING) << "Skipping updating mount for persistent volume "
<< resource << " of container " << containerId
<< " because the container path '" << containerPath
<< "' contains slash";
continue;
}
if (resources.contains(resource)) {
continue;
}
// Determine the target of the mount.
string target = path::join(info->directory, containerPath);
LOG(INFO) << "Removing mount '" << target << "' for persistent volume "
<< resource << " of container " << containerId;
// The unmount will fail if the task/executor is still using files
// or directories under 'target'.
Try<Nothing> unmount = fs::unmount(target);
if (unmount.isError()) {
return Failure(
"Failed to unmount unneeded persistent volume at '" +
target + "': " + unmount.error());
}
// NOTE: This is a non-recursive rmdir.
Try<Nothing> rmdir = os::rmdir(target, false);
if (rmdir.isError()) {
return Failure(
"Failed to remove persistent volume mount point at '" +
target + "': " + rmdir.error());
}
}
// Get user and group info for this task based on the task's sandbox.
struct stat s;
if (::stat(info->directory.c_str(), &s) < 0) {
return Failure("Failed to get ownership for '" + info->directory +
"': " + os::strerror(errno));
}
const uid_t uid = s.st_uid;
const gid_t gid = s.st_gid;
// We then mount new persistent volumes.
foreach (const Resource& resource, resources.persistentVolumes()) {
// This is enforced by the master.
CHECK(resource.disk().has_volume());
// Ignore absolute and nested paths.
const string& containerPath = resource.disk().volume().container_path();
if (strings::contains(containerPath, "/")) {
LOG(WARNING) << "Skipping updating mount for persistent volume "
<< resource << " of container " << containerId
<< " because the container path '" << containerPath
<< "' contains slash";
continue;
}
if (current.contains(resource)) {
continue;
}
// Determine the source of the mount.
string source = paths::getPersistentVolumePath(flags.work_dir, resource);
bool isVolumeInUse = false;
foreachvalue (const Owned<Info>& info, infos) {
if (info->resources.contains(resource)) {
isVolumeInUse = true;
break;
//.........这里部分代码省略.........
示例13:
bool operator==(const ContainerID& left, const ContainerID& right)
{
return left.value() == right.value() &&
left.has_parent() == right.has_parent() &&
(!left.has_parent() || left.parent() == right.parent());
}
示例14: Failure
Future<Nothing> NvidiaGpuIsolatorProcess::update(
const ContainerID& containerId,
const Resources& resources)
{
if (containerId.has_parent()) {
return Failure("Not supported for nested containers");
}
if (!infos.contains(containerId)) {
return Failure("Unknown container");
}
Info* info = CHECK_NOTNULL(infos[containerId]);
Option<double> gpus = resources.gpus();
// Make sure that the `gpus` resource is not fractional.
// We rely on scalar resources only having 3 digits of precision.
if (static_cast<long long>(gpus.getOrElse(0.0) * 1000.0) % 1000 != 0) {
return Failure("The 'gpus' resource must be an unsigned integer");
}
size_t requested = static_cast<size_t>(resources.gpus().getOrElse(0.0));
// Update the GPU allocation to reflect the new total.
if (requested > info->allocated.size()) {
size_t additional = requested - info->allocated.size();
return allocator.allocate(additional)
.then(defer(PID<NvidiaGpuIsolatorProcess>(this),
&NvidiaGpuIsolatorProcess::_update,
containerId,
lambda::_1));
} else if (requested < info->allocated.size()) {
size_t fewer = info->allocated.size() - requested;
set<Gpu> deallocated;
for (size_t i = 0; i < fewer; i++) {
const auto gpu = info->allocated.begin();
cgroups::devices::Entry entry;
entry.selector.type = Entry::Selector::Type::CHARACTER;
entry.selector.major = gpu->major;
entry.selector.minor = gpu->minor;
entry.access.read = true;
entry.access.write = true;
entry.access.mknod = true;
Try<Nothing> deny = cgroups::devices::deny(
hierarchy, info->cgroup, entry);
if (deny.isError()) {
return Failure("Failed to deny cgroups access to GPU device"
" '" + stringify(entry) + "': " + deny.error());
}
deallocated.insert(*gpu);
info->allocated.erase(gpu);
}
return allocator.deallocate(deallocated);
}
return Nothing();
}
示例15: TYPED_TEST
TYPED_TEST(CpuIsolatorTest, SystemCpuUsage)
{
Flags flags;
Try<Isolator*> isolator = TypeParam::create(flags);
CHECK_SOME(isolator);
// A PosixLauncher is sufficient even when testing a cgroups isolator.
Try<Launcher*> launcher = PosixLauncher::create(flags);
ExecutorInfo executorInfo;
executorInfo.mutable_resources()->CopyFrom(
Resources::parse("cpus:1.0").get());
ContainerID containerId;
containerId.set_value("system_cpu_usage");
AWAIT_READY(isolator.get()->prepare(containerId, executorInfo));
Try<string> dir = os::mkdtemp();
ASSERT_SOME(dir);
const string& file = path::join(dir.get(), "mesos_isolator_test_ready");
// Generating random numbers is done by the kernel and will max out a single
// core and run almost exclusively in the kernel, i.e., system time.
string command = "cat /dev/urandom > /dev/null & "
"touch " + file + "; " // Signals the command is running.
"sleep 60";
int pipes[2];
ASSERT_NE(-1, ::pipe(pipes));
lambda::function<int()> inChild = lambda::bind(&execute, command, pipes);
Try<pid_t> pid = launcher.get()->fork(containerId, inChild);
ASSERT_SOME(pid);
// Reap the forked child.
Future<Option<int> > status = process::reap(pid.get());
// Continue in the parent.
::close(pipes[0]);
// Isolate the forked child.
AWAIT_READY(isolator.get()->isolate(containerId, pid.get()));
// Now signal the child to continue.
int buf;
ASSERT_LT(0, ::write(pipes[1], &buf, sizeof(buf)));
::close(pipes[1]);
// Wait for the command to start.
while (!os::exists(file));
// Wait up to 1 second for the child process to induce 1/8 of a second of
// system cpu time.
ResourceStatistics statistics;
Duration waited = Duration::zero();
do {
Future<ResourceStatistics> usage = isolator.get()->usage(containerId);
AWAIT_READY(usage);
statistics = usage.get();
// If we meet our usage expectations, we're done!
if (statistics.cpus_system_time_secs() >= 0.125) {
break;
}
os::sleep(Milliseconds(200));
waited += Milliseconds(200);
} while (waited < Seconds(1));
EXPECT_LE(0.125, statistics.cpus_system_time_secs());
// Shouldn't be any appreciable user time.
EXPECT_GT(0.025, statistics.cpus_user_time_secs());
// Ensure all processes are killed.
AWAIT_READY(launcher.get()->destroy(containerId));
// Make sure the child was reaped.
AWAIT_READY(status);
// Let the isolator clean up.
AWAIT_READY(isolator.get()->cleanup(containerId));
delete isolator.get();
delete launcher.get();
CHECK_SOME(os::rmdir(dir.get()));
}