当前位置: 首页>>代码示例>>Java>>正文


Java AppendTestUtil.randomBytes方法代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.AppendTestUtil.randomBytes方法的典型用法代码示例。如果您正苦于以下问题:Java AppendTestUtil.randomBytes方法的具体用法?Java AppendTestUtil.randomBytes怎么用?Java AppendTestUtil.randomBytes使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hdfs.AppendTestUtil的用法示例。


在下文中一共展示了AppendTestUtil.randomBytes方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: doTestShortCircuitReadWithRemoteBlockReader

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
                                                        int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path)
              .isDirectory() == true);

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestShortCircuitLocalRead.java

示例2: doTestShortCircuitReadWithRemoteBlockReader

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test that file data can be read by reading the block
 * through RemoteBlockReader
 * @throws IOException
*/
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum,
    int size, String shortCircuitUser, int readOffset,
    boolean shortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
           .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  // check that / exists
  Path path = new Path("/");
  URI uri = cluster.getURI();
  assertTrue("/ should be a directory", fs.getFileStatus(path).isDirectory());

  byte[] fileData = AppendTestUtil.randomBytes(seed, size);
  Path file1 = new Path("filelocal.dat");
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(fileData);
  stm.close();
  try {
    checkFileContent(uri, file1, fileData, readOffset, shortCircuitUser, 
        conf, shortCircuitFails);
    //RemoteBlockReader have unsupported method read(ByteBuffer bf)
    assertTrue("RemoteBlockReader unsupported method read(ByteBuffer bf) error",
                  checkUnsupportedMethod(fs, file1, fileData, readOffset));
  } catch(IOException e) {
    throw new IOException("doTestShortCircuitReadWithRemoteBlockReader ex error ", e);
  } catch(InterruptedException inEx) {
    throw inEx;
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:42,代码来源:TestShortCircuitLocalRead.java

示例3: checkFile

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
private void checkFile(FileSystem fileSys, Path name, int repl)
  throws IOException {
  boolean done = false;

  // wait till all full blocks are confirmed by the datanodes.
  while (!done) {
    try {
      Thread.sleep(1000);
    } catch (InterruptedException e) {}
    done = true;
    BlockLocation[] locations = fileSys.getFileBlockLocations(
        fileSys.getFileStatus(name), 0, fileSize);
    if (locations.length < numBlocks) {
      done = false;
      continue;
    }
    for (int idx = 0; idx < locations.length; idx++) {
      if (locations[idx].getHosts().length < repl) {
        done = false;
        break;
      }
    }
  }
  FSDataInputStream stm = fileSys.open(name);
  final byte[] expected;
  if (simulatedStorage) {
    expected = new byte[numBlocks * blockSize];
    for (int i= 0; i < expected.length; i++) {  
      expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
    }
  } else {
    expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
  }
  // do a sanity check. Read the file
  byte[] actual = new byte[numBlocks * blockSize];
  stm.readFully(0, actual);
  stm.close();
  checkData(actual, 0, expected, "Read 1");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:40,代码来源:TestAvatarDataNodeMultipleRegistrations.java

示例4: doTestShortCircuitReadImpl

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();
    
    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestShortCircuitLocalRead.java

示例5: testSkipWithVerifyChecksum

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testSkipWithVerifyChecksum() throws IOException {
  int size = blockSize;
  Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/testSkipWithVerifyChecksum._PORT");
  DomainSocket.disableBindPathValidation();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory", fs.getFileStatus(path)
        .isDirectory() == true);
    
    byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
    // create a new file in home directory. Do not close it.
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    stm.write(fileData);
    stm.close();
    
    // now test the skip function
    FSDataInputStream instm = fs.open(file1);
    byte[] actual = new byte[fileData.length];
    // read something from the block first, otherwise BlockReaderLocal.skip()
    // will not be invoked
    int nread = instm.read(actual, 0, 3);
    long skipped = 2*size+3;
    instm.seek(skipped);
    nread = instm.read(actual, (int)(skipped + nread), 3);
    instm.close();
      
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例6: main

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test to run benchmarks between short circuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {    
  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.parseInt(args[2]);

  // Setup create a file
  final Configuration conf = new Configuration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, shortcircuit);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/TestShortCircuitLocalRead._PORT");
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
      checksum);
  
  //Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);
  
  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);
  
  stm.write(dataToWrite);
  stm.close();

  long start = Time.now();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      @Override
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            String user = getCurrentUser();
            checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true);
          } catch (IOException e) {
            e.printStackTrace();
          } catch (InterruptedException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = Time.now();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestShortCircuitLocalRead.java

示例7: doTestShortCircuitReadImpl

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test that file data can be read by reading the block file
 * directly from the local store.
 */
public void doTestShortCircuitReadImpl(boolean ignoreChecksum, int size,
    int readOffset, String shortCircuitUser, String readingUser,
    boolean legacyShortCircuitFails) throws IOException, InterruptedException {
  Configuration conf = new Configuration();
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
      ignoreChecksum);
  // Set a random client context name so that we don't share a cache with
  // other invocations of this function.
  conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
      UUID.randomUUID().toString());
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestShortCircuitLocalRead._PORT.sock").getAbsolutePath());
  if (shortCircuitUser != null) {
    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
        shortCircuitUser);
    conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
  }
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory",
        fs.getFileStatus(path).isDirectory());

    byte[] fileData = AppendTestUtil.randomBytes(seed, size);
    Path file1 = fs.makeQualified(new Path("filelocal.dat"));
    FSDataOutputStream stm = createFile(fs, file1, 1);
    stm.write(fileData);
    stm.close();

    URI uri = cluster.getURI();
    checkFileContent(uri, file1, fileData, readOffset, readingUser, conf,
        legacyShortCircuitFails);
    checkFileContentDirect(uri, file1, fileData, readOffset, readingUser,
        conf, legacyShortCircuitFails);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:52,代码来源:TestShortCircuitLocalRead.java

示例8: testSkipWithVerifyChecksum

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
@Test(timeout=10000)
public void testSkipWithVerifyChecksum() throws IOException {
  int size = blockSize;
  Configuration conf = new Configuration();
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
          "testSkipWithVerifyChecksum._PORT.sock").getAbsolutePath());
  DomainSocket.disableBindPathValidation();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    // check that / exists
    Path path = new Path("/");
    assertTrue("/ should be a directory",
        fs.getFileStatus(path).isDirectory());

    byte[] fileData = AppendTestUtil.randomBytes(seed, size*3);
    // create a new file in home directory. Do not close it.
    Path file1 = new Path("filelocal.dat");
    FSDataOutputStream stm = createFile(fs, file1, 1);

    // write to file
    stm.write(fileData);
    stm.close();

    // now test the skip function
    FSDataInputStream instm = fs.open(file1);
    byte[] actual = new byte[fileData.length];
    // read something from the block first, otherwise BlockReaderLocal.skip()
    // will not be invoked
    int nread = instm.read(actual, 0, 3);
    long skipped = 2*size+3;
    instm.seek(skipped);
    nread = instm.read(actual, (int)(skipped + nread), 3);
    instm.close();
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:47,代码来源:TestShortCircuitLocalRead.java

示例9: main

import org.apache.hadoop.hdfs.AppendTestUtil; //导入方法依赖的package包/类
/**
 * Test to run benchmarks between short circuit read vs regular read with
 * specified number of threads simultaneously reading.
 * <br>
 * Run this using the following command:
 * bin/hadoop --config confdir \
 * org.apache.hadoop.hdfs.TestShortCircuitLocalRead \
 * <shortcircuit on?> <checsum on?> <Number of threads>
 */
public static void main(String[] args) throws Exception {    
  if (args.length != 3) {
    System.out.println("Usage: test shortcircuit checksum threadCount");
    System.exit(1);
  }
  boolean shortcircuit = Boolean.valueOf(args[0]);
  boolean checksum = Boolean.valueOf(args[1]);
  int threadCount = Integer.parseInt(args[2]);

  // Setup create a file
  final Configuration conf = new Configuration();
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, shortcircuit);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      "/tmp/TestShortCircuitLocalRead._PORT");
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
      checksum);

  // Override fileSize and DATA_TO_WRITE to much larger values for benchmark test
  int fileSize = 1000 * blockSize + 100; // File with 1000 blocks
  final byte [] dataToWrite = AppendTestUtil.randomBytes(seed, fileSize);

  // create a new file in home directory. Do not close it.
  final Path file1 = new Path("filelocal.dat");
  final FileSystem fs = FileSystem.get(conf);
  FSDataOutputStream stm = createFile(fs, file1, 1);

  stm.write(dataToWrite);
  stm.close();

  long start = Time.now();
  final int iteration = 20;
  Thread[] threads = new Thread[threadCount];
  for (int i = 0; i < threadCount; i++) {
    threads[i] = new Thread() {
      @Override
      public void run() {
        for (int i = 0; i < iteration; i++) {
          try {
            String user = getCurrentUser();
            checkFileContent(fs.getUri(), file1, dataToWrite, 0, user, conf, true);
          } catch (IOException e) {
            e.printStackTrace();
          } catch (InterruptedException e) {
            e.printStackTrace();
          }
        }
      }
    };
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].start();
  }
  for (int i = 0; i < threadCount; i++) {
    threads[i].join();
  }
  long end = Time.now();
  System.out.println("Iteration " + iteration + " took " + (end - start));
  fs.delete(file1, false);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:69,代码来源:TestShortCircuitLocalRead.java


注:本文中的org.apache.hadoop.hdfs.AppendTestUtil.randomBytes方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。