本文整理汇总了Java中org.apache.hadoop.mapreduce.protocol.ClientProtocol类的典型用法代码示例。如果您正苦于以下问题:Java ClientProtocol类的具体用法?Java ClientProtocol怎么用?Java ClientProtocol使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ClientProtocol类属于org.apache.hadoop.mapreduce.protocol包,在下文中一共展示了ClientProtocol类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testJobToString
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
@Test
public void testJobToString() throws IOException, InterruptedException {
Cluster cluster = mock(Cluster.class);
ClientProtocol client = mock(ClientProtocol.class);
when(cluster.getClient()).thenReturn(client);
JobID jobid = new JobID("1014873536921", 6);
JobStatus status = new JobStatus(jobid, 0.0f, 0.0f, 0.0f, 0.0f,
State.FAILED, JobPriority.NORMAL, "root", "TestJobToString",
"job file", "tracking url");
when(client.getJobStatus(jobid)).thenReturn(status);
when(client.getTaskReports(jobid, TaskType.MAP)).thenReturn(
new TaskReport[0]);
when(client.getTaskReports(jobid, TaskType.REDUCE)).thenReturn(
new TaskReport[0]);
when(client.getTaskCompletionEvents(jobid, 0, 10)).thenReturn(
new TaskCompletionEvent[0]);
Job job = Job.getInstance(cluster, status, new JobConf());
Assert.assertNotNull(job.toString());
}
示例2: testJobToString
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
@Test
public void testJobToString() throws IOException, InterruptedException {
Cluster cluster = mock(Cluster.class);
ClientProtocol client = mock(ClientProtocol.class);
when(cluster.getClient()).thenReturn(client);
JobID jobid = new JobID("1014873536921", 6);
JobStatus status = new JobStatus(jobid, 0.0f, 0.0f, 0.0f, 0.0f,
State.FAILED, JobPriority.DEFAULT, "root", "TestJobToString",
"job file", "tracking url");
when(client.getJobStatus(jobid)).thenReturn(status);
when(client.getTaskReports(jobid, TaskType.MAP)).thenReturn(
new TaskReport[0]);
when(client.getTaskReports(jobid, TaskType.REDUCE)).thenReturn(
new TaskReport[0]);
when(client.getTaskCompletionEvents(jobid, 0, 10)).thenReturn(
new TaskCompletionEvent[0]);
Job job = Job.getInstance(cluster, status, new JobConf());
Assert.assertNotNull(job.toString());
}
示例3: getProtocolVersion
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(InterTrackerProtocol.class.getName())) {
return InterTrackerProtocol.versionID;
} else if (protocol.equals(ClientProtocol.class.getName())){
return ClientProtocol.versionID;
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
return RefreshAuthorizationPolicyProtocol.versionID;
} else if (protocol.equals(AdminOperationsProtocol.class.getName())){
return AdminOperationsProtocol.versionID;
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
return RefreshUserMappingsProtocol.versionID;
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
return GetUserMappingsProtocol.versionID;
} else {
throw new IOException("Unknown protocol to job tracker: " + protocol);
}
}
示例4: create
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
@Override
public ClientProtocol create(Configuration conf) throws IOException {
if (MRConfig.YARN_FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
return new YARNRunner(conf);
}
return null;
}
示例5: create
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
@Override
public ClientProtocol create(Configuration conf) throws IOException {
String framework =
conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) {
return null;
}
conf.setInt(JobContext.NUM_MAPS, 1);
return new LocalJobRunner(conf);
}
示例6: setUp
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
conf = new Configuration();
clientProtocol = mock(ClientProtocol.class);
Cluster cluster = mock(Cluster.class);
when(cluster.getConf()).thenReturn(conf);
when(cluster.getClient()).thenReturn(clientProtocol);
JobStatus jobStatus = new JobStatus(new JobID("job_000", 1), 0f, 0f, 0f, 0f,
State.RUNNING, JobPriority.HIGH, "tmp-user", "tmp-jobname",
"tmp-jobfile", "tmp-url");
job = Job.getInstance(cluster, jobStatus, conf);
job = spy(job);
}
示例7: create
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
/** {@inheritDoc} */
@Override public ClientProtocol create(Configuration conf) throws IOException {
if (FRAMEWORK_NAME.equals(conf.get(MRConfig.FRAMEWORK_NAME))) {
Collection<String> addrs = conf.getTrimmedStringCollection(MRConfig.MASTER_ADDRESS);
if (F.isEmpty(addrs))
throw new IOException("Failed to create client protocol because Ignite node addresses are not " +
"specified (did you set " + MRConfig.MASTER_ADDRESS + " property?).");
if (F.contains(addrs, "local"))
throw new IOException("Local execution mode is not supported, please point " +
MRConfig.MASTER_ADDRESS + " to real Ignite nodes.");
Collection<String> addrs0 = new ArrayList<>(addrs.size());
// Set up port by default if need
for (String addr : addrs) {
if (!addr.contains(":"))
addrs0.add(addr + ':' + ConnectorConfiguration.DFLT_TCP_PORT);
else
addrs0.add(addr);
}
return new HadoopClientProtocol(conf, client(conf.get(MRConfig.MASTER_ADDRESS), addrs0));
}
return null;
}
示例8: close
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
/** {@inheritDoc} */
@Override public void close(ClientProtocol cliProto) throws IOException {
if (cliProto instanceof HadoopClientProtocol) {
MapReduceClient cli = ((HadoopClientProtocol)cliProto).client();
if (cli.release())
cliMap.remove(cli.cluster(), cli);
}
}
示例9: create
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
@Override
public ClientProtocol create(Configuration configuration) throws IOException {
if (HSERVER_FRAMEWORK_NAME.equals(configuration.get(MRConfig.FRAMEWORK_NAME))) {
return new HServerClientProtocol(configuration);
}
return null;
}
示例10: initialize
import org.apache.hadoop.mapreduce.protocol.ClientProtocol; //导入依赖的package包/类
private void initialize(InetSocketAddress jobTrackAddr, Configuration conf)
throws IOException {
initProviderList();
for (ClientProtocolProvider provider : providerList) {
LOG.debug("Trying ClientProtocolProvider : "
+ provider.getClass().getName());
ClientProtocol clientProtocol = null;
try {
if (jobTrackAddr == null) {
clientProtocol = provider.create(conf);
} else {
clientProtocol = provider.create(jobTrackAddr, conf);
}
if (clientProtocol != null) {
clientProtocolProvider = provider;
client = clientProtocol;
LOG.debug("Picked " + provider.getClass().getName()
+ " as the ClientProtocolProvider");
break;
} else {
LOG.debug("Cannot pick " + provider.getClass().getName()
+ " as the ClientProtocolProvider - returned null protocol");
}
} catch (Exception e) {
LOG.info("Failed to use " + provider.getClass().getName()
+ " due to error: ", e);
}
}
if (null == clientProtocolProvider || null == client) {
throw new IOException(
"Cannot initialize Cluster. Please check your configuration for "
+ MRConfig.FRAMEWORK_NAME
+ " and the correspond server addresses.");
}
}