当前位置: 首页>>代码示例>>Java>>正文


Java SimulatedFSDataset类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset的典型用法代码示例。如果您正苦于以下问题:Java SimulatedFSDataset类的具体用法?Java SimulatedFSDataset怎么用?Java SimulatedFSDataset使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


SimulatedFSDataset类属于org.apache.hadoop.hdfs.server.datanode包,在下文中一共展示了SimulatedFSDataset类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: injectBlocks

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
/**
 * This method is valid only if the data nodes have simulated data
 * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
 * @param blocksToInject - the blocks
 * @param bpid - (optional) the block pool id to use for injecting blocks.
 *             If not supplied then it is queried from the in-process NameNode.
 * @throws IOException
 *              if not simulatedFSDataset
 *             if any of blocks already exist in the data node
 *   
 */
public void injectBlocks(int dataNodeIndex,
    Iterable<Block> blocksToInject, String bpid) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  if (bpid == null) {
    bpid = getNamesystem().getBlockPoolId();
  }
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:MiniDFSCluster.java

示例2: testFileNotFound

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
/**
 * FileNotFoundException is expected for appending to a non-exisiting file
 * 
 * @throws FileNotFoundException as the result
 */
@Test(expected = FileNotFoundException.class)
public void testFileNotFound() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path file1 = new Path("/nonexistingfile.dat");
    fs.append(file1);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestFileAppend.java

示例3: checkFile

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:TestSmallBlock.java

示例4: testSmallBlock

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("smallblocktest.dat");
    writeFile(fileSys, file1);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestSmallBlock.java

示例5: setUp

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestFileAppend4.java

示例6: dfsPreadTest

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:TestPread.java

示例7: testSmallBlock

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
/**
 * Tests small block size in in DFS.
 */
@Test
public void testSmallBlock() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  DistributedFileSystem fileSys = cluster.getFileSystem();
  try {
    Path file1 = new Path("/smallblocktest.dat");
    DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
        (short) 1, seed);
    checkFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:24,代码来源:TestSmallBlock.java

示例8: setUp

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  this.conf = new Configuration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }

  // lower heartbeat interval for fast recognition of DN death
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
  // handle under-replicated blocks quickly (for replication asserts)
  conf.setInt(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  
  // handle failures in the DFSClient pipeline quickly
  // (for cluster.shutdown(); fs.close() idiom)
  conf.setInt("ipc.client.connect.max.retries", 1);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:22,代码来源:TestFileAppend4.java

示例9: testFileCreationNonRecursive

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
/**
 * Test file creation using createNonRecursive().
 */
@Test
public void testFileCreationNonRecursive() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();

  try {
    testFileCreationNonRecursive(fs);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:20,代码来源:TestFileCreation.java

示例10: dfsPreadTest

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
private void dfsPreadTest(Configuration conf, boolean disableTransferTo, boolean verifyChecksum)
    throws IOException {
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
  conf.setLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, 4096);
  // Set short retry timeouts so this test runs faster
  conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 0);
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  if (disableTransferTo) {
    conf.setBoolean("dfs.datanode.transferTo.allowed", false);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fileSys = cluster.getFileSystem();
  fileSys.setVerifyChecksum(verifyChecksum);
  try {
    Path file1 = new Path("/preadtest.dat");
    writeFile(fileSys, file1);
    pReadFile(fileSys, file1);
    datanodeRestartTest(cluster, fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:27,代码来源:TestPread.java

示例11: injectBlocks

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
/**
 * Multiple-NameNode version of injectBlocks.
 */
public void injectBlocks(int nameNodeIndex, int dataNodeIndex,
    Iterable<Block> blocksToInject) throws IOException {
  if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
    throw new IndexOutOfBoundsException();
  }
  final DataNode dn = dataNodes.get(dataNodeIndex).datanode;
  final FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
  if (!(dataSet instanceof SimulatedFSDataset)) {
    throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
  }
  String bpid = getNamesystem(nameNodeIndex).getBlockPoolId();
  SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
  sdataset.injectBlocks(bpid, blocksToInject);
  dataNodes.get(dataNodeIndex).datanode.scheduleAllBlockReport(0);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:19,代码来源:MiniDFSCluster.java

示例12: checkContent

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
private void checkContent(FileSystem fileSys, Path name, int length)
    throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[length];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = fileContents[i];
    }
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[length];
  stm.readFully(0, actual);
  checkData(actual, 0, expected, "Read 1");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:TestFileAppend.java

示例13: setUp

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
@Override
  public void setUp() throws Exception {
    this.conf = new Configuration();
    if (simulatedStorage) {
      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
    }
    conf.setBoolean("dfs.support.append", true);

    // lower heartbeat interval for fast recognition of DN death
    conf.setInt("heartbeat.recheck.interval", 1000);
    conf.setInt("dfs.heartbeat.interval", 1);
    conf.setInt("dfs.socket.timeout", 5000);
    // handle under-replicated blocks quickly (for replication asserts)
//    conf.set("dfs.replication.pending.timeout.sec", Integer.toString(5));
    conf.setInt("dfs.replication.pending.timeout.sec", 5);
    conf.setInt("dfs.replication.interval", 1);
    // handle failures in the DFSClient pipeline quickly
    // (for cluster.shutdown(); fs.close() idiom)
    conf.setInt("ipc.client.connect.max.retries", 1);
    conf.setInt("dfs.client.block.recovery.retries", 1);
    // Delay blockReceived calls from DNs to be more similar to a real
    // cluster. 10ms is enough so that client often gets there first.
    conf.setInt("dfs.datanode.artificialBlockReceivedDelay", 10);
  }
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:25,代码来源:TestFileAppend4.java

示例14: addSomeBlocks

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
  int bytesAdded = 0;
  for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
    Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
    BlockDataFile.Writer dataOut = ((SimulatedFSDataset.SimulatedBlockInlineChecksumFileWriter) fsdataset
        .writeToBlock(0, b, b, false, false, -1, -1)).getBlockDataFile()
        .getWriter(0);
    assertEquals(0, fsdataset.getFinalizedBlockLength(0,b));
    for (int j=1; j <= blockIdToLen(i); ++j) {
      dataOut.write(new byte[] {(byte)j});
      assertEquals(j, fsdataset.getFinalizedBlockLength(0,b)); // correct length even as we write
      bytesAdded++;
    }
    dataOut.close();
    b.setNumBytes(blockIdToLen(i));
    fsdataset.finalizeBlock(0,b);
    assertEquals(blockIdToLen(i), fsdataset.getFinalizedBlockLength(0,b));
  }
  return bytesAdded;  
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:TestSimulatedFSDataset.java

示例15: checkBlockDataAndSize

import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; //导入依赖的package包/类
void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
            Block b, long expectedLen) throws IOException { 
  ReplicaToRead replica = fsdataset.getReplicaToRead(0, b);
  InputStream input = replica.getBlockInputStream(null, 0);
  long lengthRead = 0;
  int data;
  int count = 0;
  while ((data = input.read()) != -1) {
    if (count++ < BlockInlineChecksumReader.getHeaderSize()) {
      continue;
    }
    assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
    lengthRead++;
  }
  assertEquals(expectedLen, lengthRead);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:TestSimulatedFSDataset.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。