本文整理汇总了Java中org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy类的典型用法代码示例。如果您正苦于以下问题:Java ContainerManagementProtocolProxy类的具体用法?Java ContainerManagementProtocolProxy怎么用?Java ContainerManagementProtocolProxy使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ContainerManagementProtocolProxy类属于org.apache.hadoop.yarn.client.api.impl包,在下文中一共展示了ContainerManagementProtocolProxy类的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: serviceInit
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
super.serviceInit(conf);
this.limitOnPoolSize =
conf.getInt(AngelConf.ANGEL_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
AngelConf.DEFAULT_ANGEL_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
cmProxy = new ContainerManagementProtocolProxy(conf);
}
示例2: serviceInit
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.limitOnPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
this.initialPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE);
LOG.info("The thread pool initial size is " + this.initialPoolSize);
super.serviceInit(conf);
cmProxy = new ContainerManagementProtocolProxy(conf);
}
示例3: createContainerLauncher
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
@Override
protected ContainerLauncher
createContainerLauncher(final AppContext context) {
return new ContainerLauncherImpl(context) {
@Override
public ContainerManagementProtocolProxyData getCMProxy(
String containerMgrBindAddr, ContainerId containerId)
throws IOException {
InetSocketAddress addr = NetUtils.getConnectAddress(server);
String containerManagerBindAddr =
addr.getHostName() + ":" + addr.getPort();
Token token =
tokenSecretManager.createNMToken(
containerId.getApplicationAttemptId(),
NodeId.newInstance(addr.getHostName(), addr.getPort()), "user");
ContainerManagementProtocolProxy cmProxy =
new ContainerManagementProtocolProxy(conf);
ContainerManagementProtocolProxyData proxy =
cmProxy.new ContainerManagementProtocolProxyData(
YarnRPC.create(conf), containerManagerBindAddr, containerId,
token);
return proxy;
}
};
}
示例4: serviceInit
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
@Override
protected void serviceInit(Configuration conf) throws Exception {
this.limitOnPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
LOG.info("Upper limit on the thread pool size is " + this.limitOnPoolSize);
super.serviceInit(conf);
cmProxy = new ContainerManagementProtocolProxy(conf);
}
示例5: getCMProxy
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
public ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData getCMProxy(
String containerMgrBindAddr, ContainerId containerId) throws IOException {
return cmProxy.getProxy(containerMgrBindAddr, containerId);
}
示例6: getCMProxy
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
public ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData
getCMProxy(String containerMgrBindAddr, ContainerId containerId)
throws IOException {
return cmProxy.getProxy(containerMgrBindAddr, containerId);
}
示例7: serviceStart
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
@Override
public void serviceStart() {
cmProxy =
new ContainerManagementProtocolProxy(getConfig());
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(
"ContainerLauncher #%d").setDaemon(true).build();
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
new LinkedBlockingQueue<Runnable>(),
tf);
eventHandlingThread = new Thread() {
@Override
public void run() {
NMCommunicatorEvent event = null;
while (!Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if(!serviceStopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
int poolSize = launcherPool.getCorePoolSize();
// See if we need up the pool size only if haven't reached the
// maximum limit yet.
if (poolSize != limitOnPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int numNodes = context.getAllNodes().size();
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize < idealPoolSize) {
// Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
// later is just a buffer so we are not always increasing the
// pool-size
int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize
+ INITIAL_POOL_SIZE);
LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
+ " as number-of-nodes to talk to is " + numNodes);
launcherPool.setCorePoolSize(newPoolSize);
}
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(createEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventHandlingThread.setName("ContainerLauncher Event Handler");
eventHandlingThread.start();
}
示例8: getCMProxy
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
protected ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData getCMProxy(
ContainerId containerID, final String containerManagerBindAddr,
Token containerToken) throws IOException {
return cmProxy.getProxy(containerManagerBindAddr, containerID);
}
示例9: start
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy; //导入依赖的package包/类
@Override
public void start() throws TezException {
// pass a copy of config to ContainerManagementProtocolProxy until YARN-3497 is fixed
cmProxy =
new ContainerManagementProtocolProxy(conf);
ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(
"ContainerLauncher #%d").setDaemon(true).build();
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
new LinkedBlockingQueue<Runnable>(),
tf, new CustomizedRejectedExecutionHandler());
eventHandlingThread = new Thread() {
@Override
public void run() {
ContainerOp event = null;
while (!Thread.currentThread().isInterrupted()) {
try {
event = eventQueue.take();
} catch (InterruptedException e) {
if(!serviceStopped.get()) {
LOG.error("Returning, interrupted : " + e);
}
return;
}
int poolSize = launcherPool.getCorePoolSize();
// See if we need up the pool size only if haven't reached the
// maximum limit yet.
if (poolSize != limitOnPoolSize) {
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
int numNodes =
getContext().getNumNodes(TezConstants.getTezYarnServicePluginName());
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize < idealPoolSize) {
// Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
// later is just a buffer so we are not always increasing the
// pool-size
int newPoolSize = Math.min(limitOnPoolSize, idealPoolSize
+ INITIAL_POOL_SIZE);
LOG.info("Setting ContainerLauncher pool size to " + newPoolSize
+ " as number-of-nodes to talk to is " + numNodes);
launcherPool.setCorePoolSize(newPoolSize);
}
}
// the events from the queue are handled in parallel
// using a thread pool
launcherPool.execute(createEventProcessor(event));
// TODO: Group launching of multiple containers to a single
// NodeManager into a single connection
}
}
};
eventHandlingThread.setName("ContainerLauncher Event Handler");
eventHandlingThread.start();
boolean cleanupDagDataOnComplete = ShuffleUtils.isTezShuffleHandler(conf)
&& conf.getBoolean(TezConfiguration.TEZ_AM_DAG_CLEANUP_ON_COMPLETION,
TezConfiguration.TEZ_AM_DAG_CLEANUP_ON_COMPLETION_DEFAULT);
if (cleanupDagDataOnComplete) {
String deletionTrackerClassName = conf.get(TezConfiguration.TEZ_AM_DELETION_TRACKER_CLASS,
TezConfiguration.TEZ_AM_DELETION_TRACKER_CLASS_DEFAULT);
deletionTracker = ReflectionUtils.createClazzInstance(
deletionTrackerClassName, new Class[]{Configuration.class}, new Object[]{conf});
}
}