当前位置: 首页>>代码示例>>Java>>正文


Java HdfsConstants类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.HdfsConstants的典型用法代码示例。如果您正苦于以下问题:Java HdfsConstants类的具体用法?Java HdfsConstants怎么用?Java HdfsConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


HdfsConstants类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了HdfsConstants类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testContentSummaryWithoutQuotaByStorageType

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
@Test(timeout = 60000)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(!fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify getContentSummary without any quota set
  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestQuotaByStorageType.java

示例2: testEditsLogOldRename

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
@Test
public void testEditsLogOldRename() throws Exception {
  DistributedFileSystem fs = cluster.getFileSystem();
  Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  createFile(src1);
  fs.mkdirs(dst1.getParent());
  createFile(dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, HdfsConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  oldRename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = cluster.getFileSystem();
  src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1");
  dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1");
  Assert.assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  Assert.assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:TestHDFSFileContextMainOperations.java

示例3: createSnapshot

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
private Path createSnapshot() throws IOException {
  LOG.debug("Source table {}.{} has its data located at {}", sourceTable.getDbName(), sourceTable.getTableName(),
      sourceDataPath);

  FileSystem fileSystem = fileSystemFactory.get(sourceDataPath, sourceHiveConf);
  Path snapshotMetaDataPath = new Path(sourceDataPath, HdfsConstants.DOT_SNAPSHOT_DIR);
  Path resolvedLocation = sourceDataPath;
  if (fileSystem.exists(snapshotMetaDataPath)) {
    if (snapshotsDisabled) {
      LOG.info("Path {} can be snapshot, but feature has been disabled.", sourceDataPath);
    } else {
      LOG.debug("Creating source data snapshot: {}, {}", sourceDataPath, eventId);
      // fileSystem.createSnapshot does not return a fully qualified URI.
      resolvedLocation = fileSystem.makeQualified(fileSystem.createSnapshot(sourceDataPath, eventId));
      snapshotPath = resolvedLocation;
    }
  } else {
    LOG.debug("Snapshots not enabled on source location: {}", sourceDataPath);
  }
  return resolvedLocation;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:22,代码来源:HdfsSnapshotLocationManager.java

示例4: setUp

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
@Before
public void setUp() throws IOException {
  mockDnConf = mock(DNConf.class);
  doReturn(VersionInfo.getVersion()).when(mockDnConf).getMinimumNameNodeVersion();
  
  DataNode mockDN = mock(DataNode.class);
  doReturn(true).when(mockDN).shouldRun();
  doReturn(mockDnConf).when(mockDN).getDnConf();
  
  BPOfferService mockBPOS = mock(BPOfferService.class);
  doReturn(mockDN).when(mockBPOS).getDataNode();
  
  actor = new BPServiceActor(INVALID_ADDR, mockBPOS);

  fakeNsInfo = mock(NamespaceInfo.class);
  // Return a a good software version.
  doReturn(VersionInfo.getVersion()).when(fakeNsInfo).getSoftwareVersion();
  // Return a good layout version for now.
  doReturn(HdfsConstants.NAMENODE_LAYOUT_VERSION).when(fakeNsInfo)
      .getLayoutVersion();
  
  DatanodeProtocolClientSideTranslatorPB fakeDnProt = 
      mock(DatanodeProtocolClientSideTranslatorPB.class);
  when(fakeDnProt.versionRequest()).thenReturn(fakeNsInfo);
  actor.setNameNode(fakeDnProt);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestDatanodeRegister.java

示例5: createSocketForPipeline

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
/**
 * Create a socket for a write pipeline
 * @param first the first datanode 
 * @param length the pipeline length
 * @param client client
 * @return the socket connected to the first datanode
 */
static Socket createSocketForPipeline(final DatanodeInfo first,
    final int length, final DFSClient client) throws IOException {
  final String dnAddr = first.getXferAddr(
      client.getConf().connectToDnViaHostname);
  if (DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
  }
  final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
  final Socket sock = client.socketFactory.createSocket();
  final int timeout = client.getDatanodeReadTimeout(length);
  NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
  sock.setSoTimeout(timeout);
  sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  if(DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
  }
  return sock;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DFSOutputStream.java

示例6: testIsClusterUpAfterShutdown

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
@Test(timeout=100000)
public void testIsClusterUpAfterShutdown() throws Throwable {
  Configuration conf = new HdfsConfiguration();
  File testDataCluster4 = new File(testDataPath, CLUSTER_4);
  String c4Path = testDataCluster4.getAbsolutePath();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c4Path);
  MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build();
  try {
    DistributedFileSystem dfs = cluster4.getFileSystem();
    dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
    cluster4.shutdown();
  } finally {
    while(cluster4.isClusterUp()){
      Thread.sleep(1000);
    }  
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:TestMiniDFSCluster.java

示例7: inferChecksumTypeByReading

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
/**
 * Infer the checksum type for a replica by sending an OP_READ_BLOCK
 * for the first byte of that replica. This is used for compatibility
 * with older HDFS versions which did not include the checksum type in
 * OpBlockChecksumResponseProto.
 *
 * @param lb the located block
 * @param dn the connected datanode
 * @return the inferred checksum type
 * @throws IOException if an error occurs
 */
private Type inferChecksumTypeByReading(LocatedBlock lb, DatanodeInfo dn)
    throws IOException {
  IOStreamPair pair = connectToDN(dn, dfsClientConf.socketTimeout, lb);

  try {
    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
        HdfsConstants.SMALL_BUFFER_SIZE));
    DataInputStream in = new DataInputStream(pair.in);

    new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName,
        0, 1, true, CachingStrategy.newDefaultStrategy());
    final BlockOpResponseProto reply =
        BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
    String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn;
    DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);

    return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
  } finally {
    IOUtils.cleanup(null, pair.in, pair.out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:DFSClient.java

示例8: getFileInfo

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
static HdfsFileStatus getFileInfo(
    FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath,
    boolean includeStoragePolicy)
  throws IOException {
  String srcs = FSDirectory.normalizePath(src);
  if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
    if (fsd.getINode4DotSnapshot(srcs) != null) {
      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
          HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
          BlockStoragePolicySuite.ID_UNSPECIFIED);
    }
    return null;
  }

  fsd.readLock();
  try {
    final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
    return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
  } finally {
    fsd.readUnlock();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:FSDirStatAndListingOp.java

示例9: testReadURL

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
@Test
public void testReadURL() throws Exception {
  HttpURLConnection conn = mock(HttpURLConnection.class);
  doReturn(new ByteArrayInputStream(FAKE_LOG_DATA)).when(conn).getInputStream();
  doReturn(HttpURLConnection.HTTP_OK).when(conn).getResponseCode();
  doReturn(Integer.toString(FAKE_LOG_DATA.length)).when(conn).getHeaderField("Content-Length");

  URLConnectionFactory factory = mock(URLConnectionFactory.class);
  doReturn(conn).when(factory).openConnection(Mockito.<URL> any(),
      anyBoolean());

  URL url = new URL("http://localhost/fakeLog");
  EditLogInputStream elis = EditLogFileInputStream.fromUrl(factory, url,
      HdfsConstants.INVALID_TXID, HdfsConstants.INVALID_TXID, false);
  // Read the edit log and verify that we got all of the data.
  EnumMap<FSEditLogOpCodes, Holder<Integer>> counts = FSImageTestUtil
      .countEditLogOpTypes(elis);
  assertThat(counts.get(FSEditLogOpCodes.OP_ADD).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_SET_GENSTAMP_V1).held, is(1));
  assertThat(counts.get(FSEditLogOpCodes.OP_CLOSE).held, is(1));

  // Check that length header was picked up.
  assertEquals(FAKE_LOG_DATA.length, elis.length());
  elis.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:TestEditLogFileInputStream.java

示例10: testQuotaByStorageTypeParentOnChildOn

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
  final Path parent = new Path(dir, "parent");
  final Path child = new Path(parent, "child");
  dfs.mkdirs(parent);
  dfs.mkdirs(child);

  dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
  dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
  dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);

  // Create file of size 2.5 * BLOCKSIZE under child directory
  // Verify parent Quota applies
  Path createdFile1 = new Path(child, "created_file1.data");
  long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  int bufLen = BLOCKSIZE / 16;
  try {
    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
        REPLICATION, seed);
    fail("Should have failed with QuotaByStorageTypeExceededException ");
  } catch (Throwable t) {
    LOG.info("Got expected exception ", t);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestQuotaByStorageType.java

示例11: testQuota

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
@Test(timeout=10000)
/** Test craeteSymlink(..) with quota. */
public void testQuota() throws IOException {
  final Path dir = new Path(testBaseDir1());
  dfs.setQuota(dir, 3, HdfsConstants.QUOTA_DONT_SET);

  final Path file = new Path(dir, "file");
  createAndWriteFile(file);

  //creating the first link should succeed
  final Path link1 = new Path(dir, "link1");
  wrapper.createSymlink(file, link1, false);

  try {
    //creating the second link should fail with QuotaExceededException.
    final Path link2 = new Path(dir, "link2");
    wrapper.createSymlink(file, link2, false);
    fail("Created symlink despite quota violation");
  } catch(QuotaExceededException qee) {
    //expected
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestSymlinkHdfs.java

示例12: getAddress

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
/**
 * @return address of file system
 */
public static InetSocketAddress getAddress(URI filesystemURI) {
  String authority = filesystemURI.getAuthority();
  if (authority == null) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s has no authority.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
  }
  if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
      filesystemURI.getScheme())) {
    throw new IllegalArgumentException(String.format(
        "Invalid URI for NameNode address (check %s): %s is not of scheme '%s'.",
        FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString(),
        HdfsConstants.HDFS_URI_SCHEME));
  }
  return getAddress(authority);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:NameNode.java

示例13: validateEditLog

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
static FSEditLogLoader.EditLogValidation validateEditLog(File file)
    throws IOException {
  EditLogFileInputStream in;
  try {
    in = new EditLogFileInputStream(file);
    in.getVersion(true); // causes us to read the header
  } catch (LogHeaderCorruptException e) {
    // If the header is malformed or the wrong value, this indicates a corruption
    LOG.warn("Log file " + file + " has no valid header", e);
    return new FSEditLogLoader.EditLogValidation(0,
        HdfsConstants.INVALID_TXID, true);
  }
  
  try {
    return FSEditLogLoader.validateEditLog(in);
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:EditLogFileInputStream.java

示例14: readLogVersion

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
/**
 * Read the header of fsedit log
 * @param in fsedit stream
 * @return the edit log version number
 * @throws IOException if error occurs
 */
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
    throws IOException, LogHeaderCorruptException {
  int logVersion;
  try {
    logVersion = in.readInt();
  } catch (EOFException eofe) {
    throw new LogHeaderCorruptException(
        "Reached EOF when reading log header");
  }
  if (verifyLayoutVersion &&
      (logVersion < HdfsConstants.NAMENODE_LAYOUT_VERSION || // future version
       logVersion > Storage.LAST_UPGRADABLE_LAYOUT_VERSION)) { // unsupported
    throw new LogHeaderCorruptException(
        "Unexpected version of the file system log file: "
        + logVersion + ". Current version = "
        + HdfsConstants.NAMENODE_LAYOUT_VERSION + ".");
  }
  return logVersion;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:EditLogFileInputStream.java

示例15: testFallback

import org.apache.hadoop.hdfs.protocol.HdfsConstants; //导入依赖的package包/类
/**
 * Test the sync returns false in the following scenarios:
 * 1. the source/target dir are not snapshottable dir
 * 2. the source/target does not have the given snapshots
 * 3. changes have been made in target
 */
@Test
public void testFallback() throws Exception {
  // the source/target dir are not snapshottable dir
  Assert.assertFalse(DistCpSync.sync(options, conf));
  // make sure the source path has been updated to the snapshot path
  final Path spath = new Path(source,
      HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s2");
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source path in options
  options.setSourcePaths(Arrays.asList(source));
  // the source/target does not have the given snapshots
  dfs.allowSnapshot(source);
  dfs.allowSnapshot(target);
  Assert.assertFalse(DistCpSync.sync(options, conf));
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source path in options
  options.setSourcePaths(Arrays.asList(source));
  dfs.createSnapshot(source, "s1");
  dfs.createSnapshot(source, "s2");
  dfs.createSnapshot(target, "s1");
  Assert.assertTrue(DistCpSync.sync(options, conf));

  // reset source paths in options
  options.setSourcePaths(Arrays.asList(source));
  // changes have been made in target
  final Path subTarget = new Path(target, "sub");
  dfs.mkdirs(subTarget);
  Assert.assertFalse(DistCpSync.sync(options, conf));
  // make sure the source path has been updated to the snapshot path
  Assert.assertEquals(spath, options.getSourcePaths().get(0));

  // reset source paths in options
  options.setSourcePaths(Arrays.asList(source));
  dfs.delete(subTarget, true);
  Assert.assertTrue(DistCpSync.sync(options, conf));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:45,代码来源:TestDistCpSync.java


注:本文中的org.apache.hadoop.hdfs.protocol.HdfsConstants类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。