当前位置: 首页>>代码示例>>Java>>正文


Java DFSUtil类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.DFSUtil的典型用法代码示例。如果您正苦于以下问题:Java DFSUtil类的具体用法?Java DFSUtil怎么用?Java DFSUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DFSUtil类属于org.apache.hadoop.hdfs包,在下文中一共展示了DFSUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: start

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
void start() throws IOException {
  final InetSocketAddress httpAddr = getHttpAddress(conf);

  final String httpsAddrString = conf.get(
      NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY,
      NfsConfigKeys.NFS_HTTPS_ADDRESS_DEFAULT);
  InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);

  HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
      httpAddr, httpsAddr, "nfs3",
      NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY,
      NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY);

  this.httpServer = builder.build();
  this.httpServer.start();
  
  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  int connIdx = 0;
  if (policy.isHttpEnabled()) {
    infoPort = httpServer.getConnectorAddress(connIdx++).getPort();
  }

  if (policy.isHttpsEnabled()) {
    infoSecurePort = httpServer.getConnectorAddress(connIdx).getPort();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:Nfs3HttpServer.java

示例2: testGetReplWorkMultiplier

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/**
 * This testcase tests whether the value returned by
 * DFSUtil.getReplWorkMultiplier() is positive,
 * and whether an IllegalArgumentException will be thrown 
 * when a non-positive value is retrieved
 */
@Test
public void testGetReplWorkMultiplier() {
  Configuration conf = new Configuration();
  int blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
  assertTrue(blocksReplWorkMultiplier > 0);

  conf.set(DFSConfigKeys.
      DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"3");
  blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
  assertEquals(blocksReplWorkMultiplier, 3);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,"-1");
  exception.expect(IllegalArgumentException.class);
  blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestReplicationPolicy.java

示例3: bestNode

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
private static DatanodeInfo bestNode(DatanodeInfo[] nodes, boolean doRandom)
    throws IOException {
  if (nodes == null || nodes.length == 0) {
    throw new IOException("No nodes contain this block");
  }
  int l = 0;
  while (l < nodes.length && !nodes[l].isDecommissioned()) {
    ++l;
  }

  if (l == 0) {
    throw new IOException("No active nodes contain this block");
  }

  int index = doRandom ? DFSUtil.getRandom().nextInt(l) : 0;
  return nodes[index];
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:JspHelper.java

示例4: createINodeFiles

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/** 
 * Creates the required number of files with one block each
 * @param nCount Number of INodes to create
 * @return Array of INode files
 */
private INodeFile[] createINodeFiles(int nCount, String fileNamePrefix) {
  if(nCount <= 0)
    return new INodeFile[1];

  replication = 3;
  preferredBlockSize = 128 * 1024 * 1024;
  INodeFile[] iNodes = new INodeFile[nCount];
  for (int i = 0; i < nCount; i++) {
    iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
        preferredBlockSize, (byte)0);
    iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
    BlockInfoContiguous newblock = new BlockInfoContiguous(replication);
    iNodes[i].addBlock(newblock);
  }
  
  return iNodes;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestINodeFile.java

示例5: getFileInfo

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/**
 * Get the file info for a specific file.
 *
 * @param srcArg The string representation of the path to the file
 * @param resolveLink whether to throw UnresolvedLinkException
 *        if src refers to a symlink
 *
 * @return object containing information regarding the file
 *         or null if file not found
 */
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String srcArg, boolean resolveLink)
    throws IOException {
  String src = srcArg;
  if (!DFSUtil.isValidName(src)) {
    throw new InvalidPathException("Invalid file name: " + src);
  }
  FSPermissionChecker pc = fsd.getPermissionChecker();
  byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
  src = fsd.resolvePath(pc, src, pathComponents);
  final INodesInPath iip = fsd.getINodesInPath(src, resolveLink);
  boolean isSuperUser = true;
  if (fsd.isPermissionEnabled()) {
    fsd.checkPermission(pc, iip, false, null, null, null, null, false);
    isSuperUser = pc.isSuperUser();
  }
  return getFileInfo(fsd, src, resolveLink,
      FSDirectory.isReservedRawName(srcArg), isSuperUser);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:FSDirStatAndListingOp.java

示例6: submitAllocate

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
Future<byte[]> submitAllocate() {
  count.incrementAndGet();

  final Future<byte[]> f = pool.submit(new Callable<byte[]>() {
    @Override
    public byte[] call() throws Exception {
      final int lower = maxArrayLength == ByteArrayManager.MIN_ARRAY_LENGTH?
          0: maxArrayLength >> 1;
      final int arrayLength = DFSUtil.getRandom().nextInt(
          maxArrayLength - lower) + lower + 1;
      final byte[] array = bam.newByteArray(arrayLength);
      try {
        Assert.assertEquals("arrayLength=" + arrayLength + ", lower=" + lower,
            maxArrayLength, array.length);
      } catch(AssertionError e) {
        assertionErrors.add(e);
      }
      return array;
    }
  });
  synchronized (arrays) {
    arrays.add(f);
  }
  return f;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestByteArrayManager.java

示例7: testHdfsFileStatus

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
@Test
public void testHdfsFileStatus() throws IOException {
  final long now = Time.now();
  final String parent = "/dir";
  final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
      now, now + 10, new FsPermission((short) 0644), "user", "group",
      DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
      INodeId.GRANDFATHER_INODE_ID, 0, null, (byte) 0);
  final FileStatus fstatus = toFileStatus(status, parent);
  System.out.println("status  = " + status);
  System.out.println("fstatus = " + fstatus);
  final String json = JsonUtil.toJsonString(status, true);
  System.out.println("json    = " + json.replace(",", ",\n  "));
  ObjectReader reader = new ObjectMapper().reader(Map.class);
  final HdfsFileStatus s2 =
      JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
  final FileStatus fs2 = toFileStatus(s2, parent);
  System.out.println("s2      = " + s2);
  System.out.println("fs2     = " + fs2);
  Assert.assertEquals(fstatus, fs2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestJsonUtil.java

示例8: handshake

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
private NamespaceInfo handshake(Configuration conf) throws IOException {
  // connect to name node
  InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
  this.namenode = NameNodeProxies.createNonHAProxy(conf, nnAddress,
      NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
      true).getProxy();
  this.nnRpcAddress = NetUtils.getHostPortString(nnAddress);
  this.nnHttpAddress = DFSUtil.getInfoServer(nnAddress, conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  // get version and id info from the name-node
  NamespaceInfo nsInfo = null;
  while(!isStopRequested()) {
    try {
      nsInfo = handshake(namenode);
      break;
    } catch(SocketTimeoutException e) {  // name-node is busy
      LOG.info("Problem connecting to server: " + nnAddress);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ie) {
        LOG.warn("Encountered exception ", e);
      }
    }
  }
  return nsInfo;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:BackupNode.java

示例9: verifyMaxComponentLength

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/**
 * Verify child's name for fs limit.
 *
 * @param childName byte[] containing new child name
 * @param parentPath String containing parent path
 * @throws PathComponentTooLongException child's name is too long.
 */
void verifyMaxComponentLength(byte[] childName, String parentPath)
    throws PathComponentTooLongException {
  if (maxComponentLength == 0) {
    return;
  }

  final int length = childName.length;
  if (length > maxComponentLength) {
    final PathComponentTooLongException e = new PathComponentTooLongException(
        maxComponentLength, length, parentPath,
        DFSUtil.bytes2String(childName));
    if (namesystem.isImageLoaded()) {
      throw e;
    } else {
      // Do not throw if edits log is still being processed
      NameNode.LOG.error("ERROR in FSDirectory.verifyINodeName", e);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:FSDirectory.java

示例10: convert

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) {
  if (entry == null) {
    return null;
  }
  ByteString sourcePath = ByteString
      .copyFrom(entry.getSourcePath() == null ? DFSUtil.EMPTY_BYTES : entry
          .getSourcePath());
  String modification = entry.getType().getLabel();
  SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
      .newBuilder().setFullpath(sourcePath)
      .setModificationLabel(modification);
  if (entry.getType() == DiffType.RENAME) {
    ByteString targetPath = ByteString
        .copyFrom(entry.getTargetPath() == null ? DFSUtil.EMPTY_BYTES : entry
            .getTargetPath());
    builder.setTargetPath(targetPath);
  }
  return builder.build();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:PBHelper.java

示例11: scheduleNextBlockReport

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/**
 * Schedule the next block report after the block report interval. If the
 * current block report was delayed then the next block report is sent per
 * the original schedule.
 * Numerical overflow is possible here.
 */
void scheduleNextBlockReport() {
  // If we have sent the first set of block reports, then wait a random
  // time before we start the periodic block reports.
  if (resetBlockReportTime) {
    nextBlockReportTime = monotonicNow() +
        DFSUtil.getRandom().nextInt((int)(blockReportIntervalMs));
    resetBlockReportTime = false;
  } else {
    /* say the last block report was at 8:20:14. The current report
     * should have started around 9:20:14 (default 1 hour interval).
     * If current time is :
     *   1) normal like 9:20:18, next report should be at 10:20:14
     *   2) unexpected like 11:35:43, next report should be at 12:20:14
     */
    nextBlockReportTime +=
          (((monotonicNow() - nextBlockReportTime + blockReportIntervalMs) /
              blockReportIntervalMs)) * blockReportIntervalMs;
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:BPServiceActor.java

示例12: main

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/**
 */
public static void main(String argv[]) throws Exception {
  if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
    System.exit(0);
  }

  try {
    StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
    NameNode namenode = createNameNode(argv, null);
    if (namenode != null) {
      namenode.join();
    }
  } catch (Throwable e) {
    LOG.error("Failed to start namenode.", e);
    terminate(1, e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:NameNode.java

示例13: initHAConf

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
private Configuration initHAConf(URI journalURI, Configuration conf) {
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      journalURI.toString());
  
  String address1 = "127.0.0.1:" + basePort;
  String address2 = "127.0.0.1:" + (basePort + 2);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      NAMESERVICE, NN2), address2);
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE),
      NN1 + "," + NN2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE);
  
  return conf;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:MiniQJMHACluster.java

示例14: rollingRollback

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
/** rollback for rolling upgrade. */
private void rollingRollback(long discardSegmentTxId, long ckptId)
    throws IOException {
  // discard discard unnecessary editlog segments starting from the given id
  this.editLog.discardSegments(discardSegmentTxId);
  // rename the special checkpoint
  renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE,
      true);
  // purge all the checkpoints after the marker
  archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId);
  String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf);
  if (HAUtil.isHAEnabled(conf, nameserviceId)) {
    // close the editlog since it is currently open for write
    this.editLog.close();
    // reopen the editlog for read
    this.editLog.initSharedJournalsForRead();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:FSImage.java

示例15: run

import org.apache.hadoop.hdfs.DFSUtil; //导入依赖的package包/类
@Override
public void run() {
  for(int i = 0; i < n; i++) {
    final boolean isAllocate = DFSUtil.getRandom().nextInt(NUM_RUNNERS) < p;
    if (isAllocate) {
      submitAllocate();
    } else {
      try {
        final Future<byte[]> f = removeFirst();
        if (f != null) {
          submitRecycle(f.get());
        }
      } catch (Exception e) {
        e.printStackTrace();
        Assert.fail(this + " has " + e);
      }
    }

    if ((i & 0xFF) == 0) {
      sleepMs(100);
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:TestByteArrayManager.java


注:本文中的org.apache.hadoop.hdfs.DFSUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。