本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.ClientProtocol类的典型用法代码示例。如果您正苦于以下问题:Java ClientProtocol类的具体用法?Java ClientProtocol怎么用?Java ClientProtocol使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
ClientProtocol类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了ClientProtocol类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getNNProxy
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
private static ClientProtocol getNNProxy(
Token<DelegationTokenIdentifier> token, Configuration conf)
throws IOException {
URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME,
token);
if (HAUtil.isTokenForLogicalUri(token) &&
!HAUtil.isLogicalUri(conf, uri)) {
// If the token is for a logical nameservice, but the configuration
// we have disagrees about that, we can't actually renew it.
// This can be the case in MR, for example, if the RM doesn't
// have all of the HA clusters configured in its configuration.
throw new IOException("Unable to map logical nameservice URI '" +
uri + "' to a NameNode. Local configuration does not have " +
"a failover proxy provider configured.");
}
NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
assert info.getDelegationTokenService().equals(token.getService()) :
"Returned service '" + info.getDelegationTokenService().toString() +
"' doesn't match expected service '" +
token.getService().toString() + "'";
return info.getProxy();
}
示例2: createNameNodeProxy
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
/**
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
*/
protected ClientProtocol createNameNodeProxy() throws IOException {
ServletContext context = getServletContext();
// if we are running in the Name Node, use it directly rather than via
// rpc
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
return nn.getRpcServer();
}
InetSocketAddress nnAddr =
NameNodeHttpServer.getNameNodeAddressFromContext(context);
Configuration conf = new HdfsConfiguration(
NameNodeHttpServer.getConfFromContext(context));
return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
ClientProtocol.class).getProxy();
}
示例3: getProtocolVersion
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
return ClientProtocol.versionID;
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
return NamenodeProtocol.versionID;
} else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
return RefreshAuthorizationPolicyProtocol.versionID;
} else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
return RefreshUserMappingsProtocol.versionID;
} else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
return RefreshCallQueueProtocol.versionID;
} else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
return GetUserMappingsProtocol.versionID;
} else if (protocol.equals(TraceAdminProtocol.class.getName())){
return TraceAdminProtocol.versionID;
} else {
throw new IOException("Unknown protocol to name node: " + protocol);
}
}
示例4: convert
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
public static GetFsStatsResponseProto convert(long[] fsStats) {
GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto
.newBuilder();
if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1)
result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1)
result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1)
result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
result.setUnderReplicated(
fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
result.setCorruptBlocks(
fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
result.setMissingBlocks(
fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX + 1)
result.setMissingReplOneBlocks(
fsStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]);
return result.build();
}
示例5: metaSave
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
/**
* Dumps DFS data structures into specified file.
* Usage: hdfs dfsadmin -metasave filename
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if an error occurred while accessing
* the file or path.
*/
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log "
+ "directory of namenode " + proxy.getAddress());
}
} else {
dfs.metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " +
"directory of namenode " + dfs.getUri());
}
return 0;
}
示例6: isAtLeastOneActive
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
/**
* Used to ensure that at least one of the given HA NNs is currently in the
* active state..
*
* @param namenodes list of RPC proxies for each NN to check.
* @return true if at least one NN is active, false if all are in the standby state.
* @throws IOException in the event of error.
*/
public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
throws IOException {
for (ClientProtocol namenode : namenodes) {
try {
namenode.getFileInfo("/");
return true;
} catch (RemoteException re) {
IOException cause = re.unwrapRemoteException();
if (cause instanceof StandbyException) {
// This is expected to happen for a standby NN.
} else {
throw re;
}
}
}
return false;
}
示例7: genClientWithDummyHandler
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
URI nnUri = dfs.getUri();
FailoverProxyProvider<ClientProtocol> failoverProxyProvider =
NameNodeProxies.createFailoverProxyProvider(conf,
nnUri, ClientProtocol.class, true, null);
InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
failoverProxyProvider, RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
Integer.MAX_VALUE,
DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
failoverProxyProvider.getInterface().getClassLoader(),
new Class[] { ClientProtocol.class }, dummyHandler);
DFSClient client = new DFSClient(null, proxy, conf, null);
return client;
}
示例8: wait
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
static void wait(final ClientProtocol[] clients,
long expectedUsedSpace, long expectedTotalSpace) throws IOException {
LOG.info("WAIT expectedUsedSpace=" + expectedUsedSpace
+ ", expectedTotalSpace=" + expectedTotalSpace);
for(int n = 0; n < clients.length; n++) {
int i = 0;
for(boolean done = false; !done; ) {
final long[] s = clients[n].getStats();
done = s[0] == expectedTotalSpace && s[1] == expectedUsedSpace;
if (!done) {
sleep(100L);
if (++i % 100 == 0) {
LOG.warn("WAIT i=" + i + ", s=[" + s[0] + ", " + s[1] + "]");
}
}
}
}
}
示例9: mockCreate
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"),
(byte) 0))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
示例10: getNNProxy
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
private static ClientProtocol getNNProxy(
Token<DelegationTokenIdentifier> token, Configuration conf)
throws IOException {
URI uri = HAUtilClient.getServiceUriFromToken(
HdfsConstants.HDFS_URI_SCHEME, token);
if (HAUtilClient.isTokenForLogicalUri(token) &&
!HAUtilClient.isLogicalUri(conf, uri)) {
// If the token is for a logical nameservice, but the configuration
// we have disagrees about that, we can't actually renew it.
// This can be the case in MR, for example, if the RM doesn't
// have all of the HA clusters configured in its configuration.
throw new IOException("Unable to map logical nameservice URI '" +
uri + "' to a NameNode. Local configuration does not have " +
"a failover proxy provider configured.");
}
ProxyAndInfo<ClientProtocol> info =
NameNodeProxiesClient.createProxyWithClientProtocol(conf, uri, null);
assert info.getDelegationTokenService().equals(token.getService()) :
"Returned service '" + info.getDelegationTokenService().toString() +
"' doesn't match expected service '" +
token.getService().toString() + "'";
return info.getProxy();
}
示例11: convert
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
public static long[] convert(GetFsStatsResponseProto res) {
long[] result = new long[ClientProtocol.STATS_ARRAY_LENGTH];
result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] =
res.getUnderReplicated();
result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] =
res.getCorruptBlocks();
result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] =
res.getMissingBlocks();
result[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
res.getMissingReplOneBlocks();
result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0;
return result;
}
示例12: createNameNodeProxy
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
/**
* Create a {@link NameNode} proxy from the current {@link ServletContext}.
*/
protected ClientProtocol createNameNodeProxy() throws IOException {
ServletContext context = getServletContext();
// if we are running in the Name Node, use it directly rather than via
// rpc
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
if (nn != null) {
return nn.getRpcServer();
}
InetSocketAddress nnAddr =
NameNodeHttpServer.getNameNodeAddressFromContext(context);
Configuration conf = new HdfsConfiguration(
NameNodeHttpServer.getConfFromContext(context));
return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr),
ClientProtocol.class).getProxy();
}
示例13: metaSave
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
/**
* Dumps DFS data structures into specified file.
* Usage: hdfs dfsadmin -metasave filename
* @param argv List of of command line parameters.
* @param idx The index of the command that is being processed.
* @exception IOException if an error occurred while accessing
* the file or path.
*/
public int metaSave(String[] argv, int idx) throws IOException {
String pathname = argv[idx];
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log "
+ "directory of namenode " + proxy.getAddress());
}
} else {
dfs.metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " +
"directory of namenode " + dfs.getUri());
}
return 0;
}
示例14: genClientWithDummyHandler
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
URI nnUri = dfs.getUri();
FailoverProxyProvider<ClientProtocol> failoverProxyProvider =
NameNodeProxiesClient.createFailoverProxyProvider(conf,
nnUri, ClientProtocol.class, true, null);
InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
failoverProxyProvider, RetryPolicies
.failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
Integer.MAX_VALUE,
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT,
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT));
ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
failoverProxyProvider.getInterface().getClassLoader(),
new Class[] { ClientProtocol.class }, dummyHandler);
DFSClient client = new DFSClient(null, proxy, conf, null);
return client;
}
示例15: mockCreate
import org.apache.hadoop.hdfs.protocol.ClientProtocol; //导入依赖的package包/类
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
CipherSuite suite, CryptoProtocolVersion version) throws Exception {
Mockito.doReturn(
new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
(short) 777), "owner", "group", new byte[0], new byte[0],
1010, 0, new FileEncryptionInfo(suite,
version, new byte[suite.getAlgorithmBlockSize()],
new byte[suite.getAlgorithmBlockSize()],
"fakeKey", "fakeVersion"),
(byte) 0, null))
.when(mcp)
.create(anyString(), (FsPermission) anyObject(), anyString(),
(EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}