当前位置: 首页>>代码示例>>Java>>正文


Java SimulatedFSDataset.DEFAULT_DATABYTE属性代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset.DEFAULT_DATABYTE属性的典型用法代码示例。如果您正苦于以下问题:Java SimulatedFSDataset.DEFAULT_DATABYTE属性的具体用法?Java SimulatedFSDataset.DEFAULT_DATABYTE怎么用?Java SimulatedFSDataset.DEFAULT_DATABYTE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset的用法示例。


在下文中一共展示了SimulatedFSDataset.DEFAULT_DATABYTE属性的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: checkFile

private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations = fileSys.getFileBlockLocations(
      fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestSmallBlock.java

示例2: checkContent

private void checkContent(FileSystem fileSys, Path name, int length)
    throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[length];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = fileContents[i];
    }
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[length];
  stm.readFully(0, actual);
  checkData(actual, 0, expected, "Read 1");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:18,代码来源:TestFileAppend.java

示例3: checkFile

private void checkFile(FileSystem fileSys, Path name) throws IOException {
  BlockLocation[] locations =
      fileSys.getFileBlockLocations(fileSys.getFileStatus(name), 0, fileSize);
  assertEquals("Number of blocks", fileSize, locations.length);
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[fileSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; ++i) {
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[fileSize];
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  stm.close();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:20,代码来源:TestSmallBlock.java

示例4: checkFile

private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  // do not check file status since the file is not yet closed.
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1", false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestFileAppend.java

示例5: checkFile

private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {;}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected = 
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE,
      expected, "Read 1");
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:39,代码来源:TestFileAppend.java

示例6: checkFile

private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  System.out.println("Verifying file ");
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestFileLocalRead.java

示例7: checkFile

public void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:39,代码来源:TestFileCreation.java

示例8: pReadFile

private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[12 * blockSize];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(10 * blockSize - 2048, actual);
  checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4*blockSize, actual, 0, 4096);
  stm.readFully(7*blockSize, actual, 0, 4096);
  actual = new byte[3*4096];
  stm.readFully(0*blockSize, actual, 0, 3*4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8*4096];
  stm.readFully(3*blockSize, actual, 0, 8*4096);
  checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:68,代码来源:TestPread.java

示例9: pReadFile

private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[(int)(12*blockSize)];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. pread the first 4K bytes
  List<ByteBuffer> rlist = stm.readFullyScatterGather(0, 4096);
  checkAndEraseData(rlist, 4096, 0, expected, "Read Sanity Test");

  // now do a pread for the first 8K bytes
  byte[] actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");

  // Now check to see if the normal read returns 0K-8K byte range
  actual = new byte[8192];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Pread Test 2");

  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  rlist = stm.readFullyScatterGather(blockSize - 2048, 4096);
  checkAndEraseData(rlist, 4096, (int)(blockSize-2048), expected, "Pread Test 3");

  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  int size = (int)(blockSize+4096);
  rlist = stm.readFullyScatterGather(blockSize - 2048, size);
  checkAndEraseData(rlist, size, (int)(blockSize-2048), expected, "Pread Test 4");

  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  size = (int)(blockSize+4096);
  rlist = stm.readFullyScatterGather(10*blockSize - 2048, size);
  checkAndEraseData(rlist, size, (int)(10*blockSize-2048), expected, "Pread Test 5");

  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");

  // pread beyond the end of the file. It should return the last half block.
  size = blockSize/2;
  rlist = stm.readFullyScatterGather(11*blockSize+size, blockSize);
  checkAndEraseData(rlist, size, (int)(11*blockSize+size), expected, "Pread Test 5");

  IOException res = null;
  try { // normal read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:64,代码来源:TestScatterGather.java

示例10: pReadFile

private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[(int)(12*blockSize)];
  if (simulatedStorage) {
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[(int)(blockSize+4096)];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (int)(blockSize-2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[(int)(blockSize+4096)];
  stm.readFully(10*blockSize - 2048, actual);
  checkAndEraseData(actual, (int)(10*blockSize-2048), expected, "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4*blockSize, actual, 0, 4096);
  stm.readFully(7*blockSize, actual, 0, 4096);
  actual = new byte[3*4096];
  stm.readFully(0*blockSize, actual, 0, 3*4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8*4096];
  stm.readFully(3*blockSize, actual, 0, 8*4096);
  checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:68,代码来源:TestPread.java

示例11: checkFile

private void checkFile(FileSystem fileSys, Path name, int repl)
    throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {
      ;
    }
    done = true;
    BlockLocation[] locations = fileSys
        .getFileBlockLocations(fileSys.getFileStatus(name), 0,
            AppendTestUtil.FILE_SIZE);
    if (locations.length < AppendTestUtil.NUM_BLOCKS) {
      System.out.println("Number of blocks found " + locations.length);
      done = false;
      continue;
    }
    for (int idx = 0; idx < AppendTestUtil.NUM_BLOCKS; idx++) {
      if (locations[idx].getHosts().length < repl) {
        System.out.println("Block index " + idx + " not yet replciated.");
        done = false;
        break;
      }
    }
  }
  byte[] expected =
      new byte[AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; i++) {
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    System.arraycopy(fileContents, 0, expected, 0, expected.length);
  }
  // do a sanity check. Read the file
  AppendTestUtil.checkFullFile(fileSys, name,
      AppendTestUtil.NUM_BLOCKS * AppendTestUtil.BLOCK_SIZE, expected,
      "Read 1");
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:42,代码来源:TestFileAppend.java

示例12: pReadFile

private void pReadFile(FileSystem fileSys, Path name) throws IOException {
  FSDataInputStream stm = fileSys.open(name);
  byte[] expected = new byte[12 * blockSize];
  if (simulatedStorage) {
    for (int i = 0; i < expected.length; i++) {
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    Random rand = new Random(seed);
    rand.nextBytes(expected);
  }
  // do a sanity check. Read first 4K bytes
  byte[] actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 0, expected, "Read Sanity Test");
  // now do a pread for the first 8K bytes
  actual = new byte[8192];
  doPread(stm, 0L, actual, 0, 8192);
  checkAndEraseData(actual, 0, expected, "Pread Test 1");
  // Now check to see if the normal read returns 4K-8K byte range
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 4096, expected, "Pread Test 2");
  // Now see if we can cross a single block boundary successfully
  // read 4K bytes from blockSize - 2K offset
  stm.readFully(blockSize - 2048, actual, 0, 4096);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
  // now see if we can cross two block boundaries successfully
  // read blockSize + 4K bytes from blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(blockSize - 2048, actual);
  checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
  // now see if we can cross two block boundaries that are not cached
  // read blockSize + 4K bytes from 10*blockSize - 2K offset
  actual = new byte[blockSize + 4096];
  stm.readFully(10 * blockSize - 2048, actual);
  checkAndEraseData(actual, (10 * blockSize - 2048), expected,
      "Pread Test 5");
  // now check that even after all these preads, we can still read
  // bytes 8K-12K
  actual = new byte[4096];
  stm.readFully(actual);
  checkAndEraseData(actual, 8192, expected, "Pread Test 6");
  // done
  stm.close();
  // check block location caching
  stm = fileSys.open(name);
  stm.readFully(1, actual, 0, 4096);
  stm.readFully(4 * blockSize, actual, 0, 4096);
  stm.readFully(7 * blockSize, actual, 0, 4096);
  actual = new byte[3 * 4096];
  stm.readFully(0 * blockSize, actual, 0, 3 * 4096);
  checkAndEraseData(actual, 0, expected, "Pread Test 7");
  actual = new byte[8 * 4096];
  stm.readFully(3 * blockSize, actual, 0, 8 * 4096);
  checkAndEraseData(actual, 3 * blockSize, expected, "Pread Test 8");
  // read the tail
  stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize / 2);
  IOException res = null;
  try { // read beyond the end of the file
    stm.readFully(11 * blockSize + blockSize / 2, actual, 0, blockSize);
  } catch (IOException e) {
    // should throw an exception
    res = e;
  }
  assertTrue("Error reading beyond file boundary.", res != null);
  
  stm.close();
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:69,代码来源:TestPread.java


注:本文中的org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset.DEFAULT_DATABYTE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。