当前位置: 首页>>代码示例>>Java>>正文


Java NameNode.getAddress方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.NameNode.getAddress方法的典型用法代码示例。如果您正苦于以下问题:Java NameNode.getAddress方法的具体用法?Java NameNode.getAddress怎么用?Java NameNode.getAddress使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.server.namenode.NameNode的用法示例。


在下文中一共展示了NameNode.getAddress方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: RpcProgramMountd

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
public RpcProgramMountd(NfsConfiguration config,
    DatagramSocket registrationSocket, boolean allowInsecurePorts)
    throws IOException {
  // Note that RPC cache is not enabled
  super("mountd", "localhost", config.getInt(
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY,
      NfsConfigKeys.DFS_NFS_MOUNTD_PORT_DEFAULT), PROGRAM, VERSION_1,
      VERSION_3, registrationSocket, allowInsecurePorts);
  exports = new ArrayList<String>();
  exports.add(config.get(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY,
      NfsConfigKeys.DFS_NFS_EXPORT_POINT_DEFAULT));
  this.hostsMatcher = NfsExports.getInstance(config);
  this.mounts = Collections.synchronizedList(new ArrayList<MountEntry>());
  UserGroupInformation.setConfiguration(config);
  SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
  this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:RpcProgramMountd.java

示例2: getProxy

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Override
public synchronized ProxyInfo<T> getProxy() {
  // Create a non-ha proxy if not already created.
  if (nnProxyInfo == null) {
    try {
      // Create a proxy that is not wrapped in RetryProxy
      InetSocketAddress nnAddr = NameNode.getAddress(nameNodeUri);
      nnProxyInfo = new ProxyInfo<T>(NameNodeProxies.createNonHAProxy(
          conf, nnAddr, xface, UserGroupInformation.getCurrentUser(), 
          false).getProxy(), nnAddr.toString());
    } catch (IOException ioe) {
      throw new RuntimeException(ioe);
    }
  }
  return nnProxyInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:IPFailoverProxyProvider.java

示例3: run

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Override
public int run(String[] args) throws Exception {
  parseArgs(args);
  parseConfAndFindOtherNN();
  NameNode.checkAllowFormat(conf);

  InetSocketAddress myAddr = NameNode.getAddress(conf);
  SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
      DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, myAddr.getHostName());

  return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
    @Override
    public Integer run() {
      try {
        return doRun();
      } catch (IOException e) {
        throw new RuntimeException(e);
      }
    }
  });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:BootstrapStandby.java

示例4: createProxy

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
 * Creates the namenode proxy with the passed protocol. This will handle
 * creation of either HA- or non-HA-enabled proxy objects, depending upon
 * if the provided URI is a configured logical URI.
 *
 * @param conf the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException if there is an error creating the proxy
 **/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
    URI nameNodeUri, Class<T> xface, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(conf, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider == null) {
    // Non-HA case
    return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
        UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
  } else {
    // HA case
    Conf config = new Conf(conf);
    T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
        RetryPolicies.failoverOnNetworkException(
            RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
            config.maxRetryAttempts, config.failoverSleepBaseMillis,
            config.failoverSleepMaxMillis));

    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:NameNodeProxies.java

示例5: testSetUp

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@BeforeClass
public static void testSetUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
  cluster = new MiniDFSCluster.Builder(conf).build();
  fs = cluster.getFileSystem();
  fc = FileContext.getFileContext(cluster.getURI(0), conf);
  hftpfs = cluster.getHftpFileSystem(0);
  dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
  file1 = new Path("filestatus.dat");
  writeFile(fs, file1, 1, fileSize, blockSize);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestFileStatus.java

示例6: testOOOWrites

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Test
public void testOOOWrites() throws IOException, InterruptedException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;
  RpcProgramNfs3 nfsd;
  final int bufSize = 32;
  final int numOOO = 3;
  SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
  Mockito.when(securityHandler.getUser()).thenReturn(
      System.getProperty("user.name"));
  String currentUser = System.getProperty("user.name");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(currentUser),
      "*");
  config.set(
      DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserIpConfKey(currentUser),
      "*");
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    Nfs3 nfs3 = new Nfs3(config);
    nfs3.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();

    DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
    HdfsFileStatus status = dfsClient.getFileInfo("/");
    FileHandle rootHandle = new FileHandle(status.getFileId());

    CREATE3Request createReq = new CREATE3Request(rootHandle,
        "out-of-order-write" + System.currentTimeMillis(),
        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
    XDR createXdr = new XDR();
    createReq.serialize(createXdr);
    CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", 1234));
    FileHandle handle = createRsp.getObjHandle();

    byte[][] oooBuf = new byte[numOOO][bufSize];
    for (int i = 0; i < numOOO; i++) {
      Arrays.fill(oooBuf[i], (byte) i);
    }

    for (int i = 0; i < numOOO; i++) {
      final long offset = (numOOO - 1 - i) * bufSize;
      WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
          WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
      XDR writeXdr = new XDR();
      writeReq.serialize(writeXdr);
      nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
          new InetSocketAddress("localhost", 1234));
    }

    waitWrite(nfsd, handle, 60000);
    READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
    XDR readXdr = new XDR();
    readReq.serialize(readXdr);
    READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
        securityHandler, new InetSocketAddress("localhost", config.getInt(
            NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
            NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
    assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:TestWrites.java

示例7: createProxyWithLossyRetryHandler

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
 * Generate a dummy namenode proxy instance that utilizes our hacked
 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this
 * method will proactively drop RPC responses. Currently this method only
 * support HA setup. null will be returned if the given configuration is not 
 * for HA.
 * 
 * @param config the configuration containing the required IPC
 *        properties, client failover configurations, etc.
 * @param nameNodeUri the URI pointing either to a specific NameNode
 *        or to a logical nameservice.
 * @param xface the IPC interface which should be created
 * @param numResponseToDrop The number of responses to drop for each RPC call
 * @param fallbackToSimpleAuth set to true or false during calls to indicate if
 *   a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to. Will return null of the
 *         given configuration does not support HA.
 * @throws IOException if there is an error creating the proxy
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
    Configuration config, URI nameNodeUri, Class<T> xface,
    int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  Preconditions.checkArgument(numResponseToDrop > 0);
  AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
      createFailoverProxyProvider(config, nameNodeUri, xface, true,
        fallbackToSimpleAuth);

  if (failoverProxyProvider != null) { // HA case
    int delay = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
    int maxCap = config.getInt(
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
        DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
    int maxFailoverAttempts = config.getInt(
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
    int maxRetryAttempts = config.getInt(
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
        DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
    InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
            numResponseToDrop, failoverProxyProvider,
            RetryPolicies.failoverOnNetworkException(
                RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, 
                Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, 
                maxCap));
    
    T proxy = (T) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { xface }, dummyHandler);
    Text dtService;
    if (failoverProxyProvider.useLogicalURI()) {
      dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
          HdfsConstants.HDFS_URI_SCHEME);
    } else {
      dtService = SecurityUtil.buildTokenService(
          NameNode.getAddress(nameNodeUri));
    }
    return new ProxyAndInfo<T>(proxy, dtService,
        NameNode.getAddress(nameNodeUri));
  } else {
    LOG.warn("Currently creating proxy using " +
    		"LossyRetryInvocationHandler requires NN HA setup");
    return null;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:70,代码来源:NameNodeProxies.java

示例8: loginAsFCUser

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Override
public void loginAsFCUser() throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
      DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:DFSZKFailoverController.java

示例9: getProtocolAddress

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Override
protected InetSocketAddress getProtocolAddress(Configuration conf)
    throws IOException {
  return NameNode.getAddress(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:6,代码来源:GetGroups.java

示例10: testDatanodePeersXceiver

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
@Test (timeout=600000)
// Test DatanodeXceiver has correct peer-dataxceiver pairs for sending OOB message
public void testDatanodePeersXceiver() throws Exception {
  try {
    startCluster();

    // Create files in DFS.
    String testFile1 = "/" + GenericTestUtils.getMethodName() + ".01.dat";
    String testFile2 = "/" + GenericTestUtils.getMethodName() + ".02.dat";
    String testFile3 = "/" + GenericTestUtils.getMethodName() + ".03.dat";

    DFSClient client1 = new DFSClient(NameNode.getAddress(conf), conf);
    DFSClient client2 = new DFSClient(NameNode.getAddress(conf), conf);
    DFSClient client3 = new DFSClient(NameNode.getAddress(conf), conf);

    DFSOutputStream s1 = (DFSOutputStream) client1.create(testFile1, true);
    DFSOutputStream s2 = (DFSOutputStream) client2.create(testFile2, true);
    DFSOutputStream s3 = (DFSOutputStream) client3.create(testFile3, true);

    byte[] toWrite = new byte[1024*1024*8];
    Random rb = new Random(1111);
    rb.nextBytes(toWrite);
    s1.write(toWrite, 0, 1024*1024*8);
    s1.flush();
    s2.write(toWrite, 0, 1024*1024*8);
    s2.flush();
    s3.write(toWrite, 0, 1024*1024*8);
    s3.flush();       

    assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
        .getNumPeersXceiver());
    s1.close();
    s2.close();
    s3.close();
    assertTrue(dn0.getXferServer().getNumPeersXceiver() == dn0.getXferServer()
        .getNumPeersXceiver());
    client1.close();
    client2.close();
    client3.close();      
  } finally {
    shutdownCluster();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:44,代码来源:TestDataNodeRollingUpgrade.java

示例11: DFSClient

import org.apache.hadoop.hdfs.server.namenode.NameNode; //导入方法依赖的package包/类
/**
 * Same as this(NameNode.getAddress(conf), conf);
 * @see #DFSClient(InetSocketAddress, Configuration)
 * @deprecated Deprecated at 0.21
 */
@Deprecated
public DFSClient(Configuration conf) throws IOException {
  this(NameNode.getAddress(conf), conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DFSClient.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.NameNode.getAddress方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。