当前位置: 首页>>代码示例>>Java>>正文


Java CreateOpts类代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Options.CreateOpts的典型用法代码示例。如果您正苦于以下问题:Java CreateOpts类的具体用法?Java CreateOpts怎么用?Java CreateOpts使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


CreateOpts类属于org.apache.hadoop.fs.Options包,在下文中一共展示了CreateOpts类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createWriter

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
/**
 * Construct the preferred type of SequenceFile Writer.
 * @param fs The configured filesystem.
 * @param conf The configuration.
 * @param name The name of the file.
 * @param keyClass The 'key' type.
 * @param valClass The 'value' type.
 * @param bufferSize buffer size for the underlaying outputstream.
 * @param replication replication factor for the file.
 * @param blockSize block size for the file.
 * @param createParent create parent directory if non-existent
 * @param compressionType The compression type.
 * @param codec The compression codec.
 * @param metadata The metadata of the file.
 * @return Returns the handle to the constructed SequenceFile Writer.
 * @throws IOException
 */
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
             Class keyClass, Class valClass, int bufferSize,
             short replication, long blockSize, boolean createParent,
             CompressionType compressionType, CompressionCodec codec,
             Metadata metadata) throws IOException {
  return createWriter(FileContext.getFileContext(fs.getUri(), conf),
      conf, name, keyClass, valClass, compressionType, codec,
      metadata, EnumSet.of(CreateFlag.CREATE,CreateFlag.OVERWRITE),
      CreateOpts.bufferSize(bufferSize),
      createParent ? CreateOpts.createParent()
                   : CreateOpts.donotCreateParent(),
      CreateOpts.repFac(replication),
      CreateOpts.blockSize(blockSize)
    );
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:35,代码来源:SequenceFile.java

示例2: testGetFileContext1

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
@Test
/*
 * Test method
 *  org.apache.hadoop.fs.FileContext.getFileContext(AbstractFileSystem)
 */
public void testGetFileContext1() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  AbstractFileSystem asf = fc.getDefaultFileSystem();
  // create FileContext using the protected #getFileContext(1) method:
  FileContext fc2 = FileContext.getFileContext(asf);
  // Now just check that this context can do something reasonable:
  final Path path = new Path(rootPath, "zoo");
  FSDataOutputStream out = fc2.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  out.close();
  Path pathResolved = fc2.resolvePath(path);
  assertEquals(pathResolved.toUri().getPath(), path.toUri().getPath());
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:FileContextMainOperationsBaseTest.java

示例3: testCreateFileViaDanglingLinkParent

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
@Test(timeout=10000)
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  wrapper.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = wrapper.create(file, EnumSet.of(CreateFlag.CREATE),
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:18,代码来源:SymlinkBaseTest.java

示例4: genFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    numOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    numOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:29,代码来源:LoadGenerator.java

示例5: genFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = null;
  try {
    out = fc.create(file,
        EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
        CreateOpts.createParent(), CreateOpts.bufferSize(4096),
        CreateOpts.repFac((short) 3));
    executionTime[CREATE] += (Time.now() - startTime);
    totalNumOfOps[CREATE]++;

    long i = fileSize;
    while (i > 0) {
      long s = Math.min(fileSize, WRITE_CONTENTS.length);
      out.write(WRITE_CONTENTS, 0, (int) s);
      i -= s;
    }

    startTime = Time.now();
    executionTime[WRITE_CLOSE] += (Time.now() - startTime);
    totalNumOfOps[WRITE_CLOSE]++;
  } finally {
    IOUtils.cleanup(LOG, out);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:29,代码来源:LoadGenerator.java

示例6: testCreateFileViaDanglingLinkParent

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
@Test
/** Try to create a file with parent that is a dangling link */
public void testCreateFileViaDanglingLinkParent() throws IOException {
  Path dir  = new Path(testBaseDir1()+"/dangling");
  Path file = new Path(testBaseDir1()+"/dangling/file");
  fc.createSymlink(new Path("/doesNotExist"), dir, false);
  FSDataOutputStream out;
  try {
    out = fc.create(file, EnumSet.of(CreateFlag.CREATE), 
                    CreateOpts.repFac((short) 1),
                    CreateOpts.blockSize(blockSize));
    out.close();
    fail("Created a link with dangling link parent");
  } catch (FileNotFoundException e) {
    // Expected. The parent is dangling.
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:18,代码来源:FileContextSymlinkBaseTest.java

示例7: genFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
/** Create a file with a length of <code>fileSize</code>.
 * The file is filled with 'a'.
 */
private void genFile(Path file, long fileSize) throws IOException {
  long startTime = Time.now();
  FSDataOutputStream out = fc.create(file,
      EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE),
      CreateOpts.createParent(), CreateOpts.bufferSize(4096),
      CreateOpts.repFac((short) 3));
  executionTime[CREATE] += (Time.now()-startTime);
  totalNumOfOps[CREATE]++;

  for (long i=0; i<fileSize; i++) {
    out.writeByte('a');
  }
  startTime = Time.now();
  out.close();
  executionTime[WRITE_CLOSE] += (Time.now()-startTime);
  totalNumOfOps[WRITE_CLOSE]++;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:21,代码来源:LoadGenerator.java

示例8: createFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
public long createFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out =
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:FileContextTestWrapper.java

示例9: appendToFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
public void appendToFile(Path path, int numBlocks, CreateOpts... options)
    throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:12,代码来源:FileContextTestWrapper.java

示例10: create

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
@Override
public FSDataOutputStream create(Path f, EnumSet<CreateFlag> createFlag,
    CreateOpts... opts) throws AccessControlException,
    FileAlreadyExistsException, FileNotFoundException,
    ParentNotDirectoryException, UnsupportedFileSystemException, IOException {
  return fc.create(f, createFlag, opts);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:8,代码来源:FileContextTestWrapper.java

示例11: writeReadAndDelete

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
private void writeReadAndDelete(int len) throws IOException {
  Path path = getTestRootPath(fc, "test/hadoop/file");
  
  fc.mkdir(path.getParent(), FsPermission.getDefault(), true);

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      CreateOpts.repFac((short) 1), CreateOpts
          .blockSize(getDefaultBlockSize()));
  out.write(data, 0, len);
  out.close();

  Assert.assertTrue("Exists", exists(fc, path));
  Assert.assertEquals("Length", len, fc.getFileStatus(path).getLen());

  FSDataInputStream in = fc.open(path);
  byte[] buf = new byte[len];
  in.readFully(0, buf);
  in.close();

  Assert.assertEquals(len, buf.length);
  for (int i = 0; i < buf.length; i++) {
    Assert.assertEquals("Position " + i, data[i], buf[i]);
  }
  
  Assert.assertTrue("Deleted", fc.delete(path, false));
  
  Assert.assertFalse("No longer exists", exists(fc, path));

}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:30,代码来源:FileContextMainOperationsBaseTest.java

示例12: testOutputStreamClosedTwice

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
@Test
public void testOutputStreamClosedTwice() throws IOException {
  //HADOOP-4760 according to Closeable#close() closing already-closed 
  //streams should have no effect. 
  Path src = getTestRootPath(fc, "test/hadoop/file");
  FSDataOutputStream out = fc.create(src, EnumSet.of(CREATE),
          Options.CreateOpts.createParent());
  
  out.writeChar('H'); //write some data
  out.close();
  out.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:FileContextMainOperationsBaseTest.java

示例13: testSetVerifyChecksum

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
@Test
public void testSetVerifyChecksum() throws IOException {
  final Path rootPath = getTestRootPath(fc, "test");
  final Path path = new Path(rootPath, "zoo");

  FSDataOutputStream out = fc.create(path, EnumSet.of(CREATE),
      Options.CreateOpts.createParent());
  try {
    // instruct FS to verify checksum through the FileContext:
    fc.setVerifyChecksum(true, path);
    out.write(data, 0, data.length);
  } finally {
    out.close();
  }

  // NB: underlying FS may be different (this is an abstract test),
  // so we cannot assert .zoo.crc existence.
  // Instead, we check that the file is read correctly:
  FileStatus fileStatus = fc.getFileStatus(path);
  final long len = fileStatus.getLen();
  assertTrue(len == data.length);
  byte[] bb = new byte[(int)len];
  FSDataInputStream fsdis = fc.open(path);
  try {
    fsdis.readFully(bb);
  } finally {
    fsdis.close();
  }
  assertArrayEquals(data, bb);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:31,代码来源:FileContextMainOperationsBaseTest.java

示例14: createFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
public static long createFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out = 
    fc.create(path, EnumSet.of(CreateFlag.CREATE), options);
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
  return data.length;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:13,代码来源:FileContextTestHelper.java

示例15: appendToFile

import org.apache.hadoop.fs.Options.CreateOpts; //导入依赖的package包/类
public static void appendToFile(FileContext fc, Path path, int numBlocks,
    CreateOpts... options) throws IOException {
  BlockSize blockSizeOpt = CreateOpts.getOpt(CreateOpts.BlockSize.class, options);
  long blockSize = blockSizeOpt != null ? blockSizeOpt.getValue()
      : DEFAULT_BLOCK_SIZE;
  FSDataOutputStream out;
  out = fc.create(path, EnumSet.of(CreateFlag.APPEND));
  byte[] data = getFileData(numBlocks, blockSize);
  out.write(data, 0, data.length);
  out.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:12,代码来源:FileContextTestHelper.java


注:本文中的org.apache.hadoop.fs.Options.CreateOpts类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。