当前位置: 首页>>代码示例>>Java>>正文


Java NameNodeProxies类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.NameNodeProxies的典型用法代码示例。如果您正苦于以下问题:Java NameNodeProxies类的具体用法?Java NameNodeProxies怎么用?Java NameNodeProxies使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


NameNodeProxies类属于org.apache.hadoop.hdfs包,在下文中一共展示了NameNodeProxies类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getProxy

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:IPFailoverProxyProvider.java

示例2: createNameNodeProxy

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:DfsServlet.java

示例3: handshake

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:BackupNode.java

示例4: EditLogBackupOutputStream

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
EditLogBackupOutputStream(NamenodeRegistration bnReg, // backup node
                          JournalInfo journalInfo) // active name-node
throws IOException {
  super();
  this.bnRegistration = bnReg;
  this.journalInfo = journalInfo;
  InetSocketAddress bnAddress =
    NetUtils.createSocketAddr(bnRegistration.getAddress());
  try {
    this.backupNode = NameNodeProxies.createNonHAProxy(new HdfsConfiguration(),
        bnAddress, JournalProtocol.class, UserGroupInformation.getCurrentUser(),
        true).getProxy();
  } catch(IOException e) {
    Storage.LOG.error("Error connecting to: " + bnAddress, e);
    throw e;
  }
  this.doubleBuf = new EditsDoubleBuffer(DEFAULT_BUFFER_SIZE);
  this.out = new DataOutputBuffer(DEFAULT_BUFFER_SIZE);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:EditLogBackupOutputStream.java

示例5: genClientWithDummyHandler

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRetryCacheWithHA.java

示例6: getProxy

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:17,代码来源:IPFailoverProxyProvider.java

示例7: createNameNodeProxy

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:19,代码来源:DfsServlet.java

示例8: getProxy

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Lazily initialize the RPC proxy object.
 */
@SuppressWarnings("unchecked")
@Override
public synchronized T getProxy() {
  AddressRpcProxyPair current = proxies.get(currentProxyIndex);
  if (current.namenode == null) {
    try {
      current.namenode = NameNodeProxies.createNonHAProxy(conf,
          current.address, xface, ugi, false).getProxy();
    } catch (IOException e) {
      LOG.error("Failed to create RPC proxy to NameNode", e);
      throw new RuntimeException(e);
    }
  }
  return (T)current.namenode;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:19,代码来源:ConfiguredFailoverProxyProvider.java

示例9: handshake

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = NetUtils.getHostPortString(super.getHttpServerAddress(conf));
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:BackupNode.java

示例10: refreshServiceAcl

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Refresh the authorization policy on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));

  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol =
      NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
          RefreshAuthorizationPolicyProtocol.class).getProxy();
  
  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();
  
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:DFSAdmin.java

示例11: refreshUserToGroupsMappings

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Refresh the user-to-groups mappings on the {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshUserToGroupsMappings() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));
 
  // Create the client
  RefreshUserMappingsProtocol refreshProtocol =
    NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        RefreshUserMappingsProtocol.class).getProxy();

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshUserToGroupsMappings();
  
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:DFSAdmin.java

示例12: refreshSuperUserGroupsConfiguration

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * refreshSuperUserGroupsConfiguration {@link NameNode}.
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshSuperUserGroupsConfiguration() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();

  // for security authorization
  // server principal for this call 
  // should be NAMENODE's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));

  // Create the client
  RefreshUserMappingsProtocol refreshProtocol =
    NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
        RefreshUserMappingsProtocol.class).getProxy();

  // Refresh the user-to-groups mappings
  refreshProtocol.refreshSuperUserGroupsConfiguration();

  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:26,代码来源:DFSAdmin.java

示例13: genClientWithDummyHandler

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  Class<FailoverProxyProvider<ClientProtocol>> failoverProxyProviderClass = 
      NameNodeProxies.getFailoverProxyProviderClass(conf, nnUri, 
          ClientProtocol.class);
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          failoverProxyProviderClass, ClientProtocol.class, nnUri);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:22,代码来源:TestRetryCacheWithHA.java

示例14: createNameNodeProxy

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}.
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
      NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf =
      new HdfsConfiguration(NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies
      .createProxy(conf, NameNode.getUri(nnAddr), ClientProtocol.class)
      .getProxy();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:DfsServlet.java

示例15: refreshServiceAcl

import org.apache.hadoop.hdfs.NameNodeProxies; //导入依赖的package包/类
/**
 * Refresh the authorization policy on the {@link NameNode}.
 *
 * @return exitcode 0 on success, non-zero on failure
 * @throws IOException
 */
public int refreshServiceAcl() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""));

  // Create the client
  RefreshAuthorizationPolicyProtocol refreshProtocol = NameNodeProxies
      .createProxy(conf, FileSystem.getDefaultUri(conf),
          RefreshAuthorizationPolicyProtocol.class).getProxy();
  
  // Refresh the authorization policy in-effect
  refreshProtocol.refreshServiceAcl();
  
  return 0;
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:DFSAdmin.java


注:本文中的org.apache.hadoop.hdfs.NameNodeProxies类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。