当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.write方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.write方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.write方法的具体用法?Java FSDataOutputStream.write怎么用?Java FSDataOutputStream.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.write方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeDataset

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Write a file.
 * Optional flags control
 * whether file overwrite operations should be enabled
 * @param fs filesystem
 * @param path path to write to
 * @param len length of data
 * @param overwrite should the create option allow overwrites?
 * @throws IOException IO problems
 */
public static void writeDataset(FileSystem fs,
                                 Path path,
                                 byte[] src,
                                 int len,
                                 int blocksize,
                                 boolean overwrite) throws IOException {
  assertTrue(
    "Not enough data in source array to write " + len + " bytes",
    src.length >= len);
  FSDataOutputStream out = fs.create(path,
                                     overwrite,
                                     fs.getConf()
                                       .getInt(IO_FILE_BUFFER_SIZE,
                                               4096),
                                     (short) 1,
                                     blocksize);
  out.write(src, 0, len);
  out.close();
  assertFileHasLength(fs, path, len);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:SwiftTestUtils.java

示例2: writeDataset

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Write a file.
 * Optional flags control
 * whether file overwrite operations should be enabled
 * @param fs filesystem
 * @param path path to write to
 * @param len length of data
 * @param overwrite should the create option allow overwrites?
 * @throws IOException IO problems
 */
public static void writeDataset(FileSystem fs,
                                 Path path,
                                 byte[] src,
                                 int len,
                                 int buffersize,
                                 boolean overwrite) throws IOException {
  assertTrue(
    "Not enough data in source array to write " + len + " bytes",
    src.length >= len);
  FSDataOutputStream out = fs.create(path,
                                     overwrite,
                                     fs.getConf()
                                       .getInt(IO_FILE_BUFFER_SIZE,
                                               4096),
                                     (short) 1,
                                     buffersize);
  out.write(src, 0, len);
  out.close();
  assertFileHasLength(fs, path, len);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:ContractTestUtils.java

示例3: createFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
public static void createFile(FileSystem fs, Path fileName, long fileLen, 
    short replFactor, long seed) throws IOException {
  if (!fs.mkdirs(fileName.getParent())) {
    throw new IOException("Mkdirs failed to create " + 
                          fileName.getParent().toString());
  }
  FSDataOutputStream out = null;
  try {
    out = fs.create(fileName, replFactor);
    byte[] toWrite = new byte[1024];
    Random rb = new Random(seed);
    long bytesToWrite = fileLen;
    while (bytesToWrite>0) {
      rb.nextBytes(toWrite);
      int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;

      out.write(toWrite, 0, bytesToWriteNext);
      bytesToWrite -= bytesToWriteNext;
    }
    out.close();
    out = null;
  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DFSTestUtil.java

示例4: write1byte

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * 1. create files with dfs
 * 2. write 1 byte
 * 3. close file
 * 4. open the same file
 * 5. read the 1 byte and compare results
 */
static void write1byte(String methodName) throws IOException {
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf
      ).numDataNodes(REPLICATION + 1).build();
  final FileSystem dfs = cluster.getFileSystem();
  try {
    final Path p = new Path("/" + methodName + "/foo");
    final FSDataOutputStream out = createFile(dfs, p);
    out.write(1);
    out.close();
    
    final FSDataInputStream in = dfs.open(p);
    final int b = in.read();
    in.close();
    Assert.assertEquals(1, b);
  }
  finally {
    dfs.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestFiDataTransferProtocol.java

示例5: createFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private Path createFile(final String filestr, final int size,
    final boolean triggerLeaseRenewerInterrupt)
throws IOException, InterruptedException {
  AppendTestUtil.LOG.info("filestr=" + filestr);
  Path filepath = new Path(filestr);
  FSDataOutputStream stm = dfs.create(filepath, true, BUF_SIZE,
    REPLICATION_NUM, BLOCK_SIZE);
  assertTrue(dfs.dfs.exists(filestr));

  AppendTestUtil.LOG.info("size=" + size);
  stm.write(buffer, 0, size);

  // hflush file
  AppendTestUtil.LOG.info("hflush");
  stm.hflush();

  if (triggerLeaseRenewerInterrupt) {
    AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
    dfs.dfs.getLeaseRenewer().interruptAndJoin();
  }
  return filepath;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:23,代码来源:TestLeaseRecovery2.java

示例6: testWriteReadFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test(timeout = SWIFT_TEST_TIMEOUT)
public void testWriteReadFile() throws Exception {
  final Path f = new Path("/test/test");
  final FSDataOutputStream fsDataOutputStream = fs.create(f);
  final String message = "Test string";
  fsDataOutputStream.write(message.getBytes());
  fsDataOutputStream.close();
  assertExists("created file", f);
  FSDataInputStream open = null;
  try {
    open = fs.open(f);
    final byte[] bytes = new byte[512];
    final int read = open.read(bytes);
    final byte[] buffer = new byte[read];
    System.arraycopy(bytes, 0, buffer, 0, read);
    assertEquals(message, new String(buffer));
  } finally {
    fs.delete(f, false);
    IOUtils.closeStream(open);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestSwiftFileSystemExtendedContract.java

示例7: writeFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
protected void writeFile(Path outputPath, byte[] data, boolean
        makeUnradableByAdmin) throws Exception {
  Path tempPath =
      new Path(outputPath.getParent(), outputPath.getName() + ".tmp");
  FSDataOutputStream fsOut = null;
  // This file will be overwritten when app/attempt finishes for saving the
  // final status.
  try {
    fsOut = fs.create(tempPath, true);
    if (makeUnradableByAdmin) {
      setUnreadableBySuperuserXattrib(tempPath);
    }
    fsOut.write(data);
    fsOut.close();
    fsOut = null;
    fs.rename(tempPath, outputPath);
  } finally {
    IOUtils.cleanup(LOG, fsOut);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:21,代码来源:FileSystemRMStateStore.java

示例8: testRBWFileCreationError

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
@Test(timeout = 30000)
public void testRBWFileCreationError() throws Exception {

  final short replication = 1;
  startCluster(BLOCK_SIZE, replication, -1);

  final FsVolumeImpl fsVolumeImpl = (FsVolumeImpl) cluster.getDataNodes()
      .get(0).getFSDataset().getVolumes().get(0);
  final String methodName = GenericTestUtils.getMethodName();
  final Path file = new Path("/" + methodName + ".01.dat");

  // Mock BlockPoolSlice so that RBW file creation gives IOExcception
  BlockPoolSlice blockPoolSlice = Mockito.mock(BlockPoolSlice.class);
  Mockito.when(blockPoolSlice.createRbwFile((Block) Mockito.any()))
      .thenThrow(new IOException("Synthetic IO Exception Throgh MOCK"));

  Field field = FsVolumeImpl.class.getDeclaredField("bpSlices");
  field.setAccessible(true);
  Map<String, BlockPoolSlice> bpSlices = (Map<String, BlockPoolSlice>) field
      .get(fsVolumeImpl);
  bpSlices.put(fsVolumeImpl.getBlockPoolList()[0], blockPoolSlice);

  try {
    // Write 1 byte to the file
    FSDataOutputStream os = fs.create(file, replication);
    os.write(new byte[1]);
    os.hsync();
    os.close();
    fail("Expecting IOException file creation failure");
  } catch (IOException e) {
    // Exception can be ignored (expected)
  }

  // Ensure RBW space reserved is released
  assertTrue("Expected ZERO but got " + fsVolumeImpl.getReservedForRbw(),
      fsVolumeImpl.getReservedForRbw() == 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestRbwSpaceReservation.java

示例9: writeFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeFile(FileSystem fileSys, Path name) throws IOException {
  // create and write a file that contains three blocks of data
  FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
      .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short) 1, blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestSmallBlock.java

示例10: testUpdateNeededReplicationsForAppendedFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test the updation of NeededReplications for the Appended Block
 */
@Test(timeout = 60000)
public void testUpdateNeededReplicationsForAppendedFile() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file.
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Append to the file.
    FSDataOutputStream append = fileSystem.append(f);
    append.write("/testAppend".getBytes());
    append.close();

    // Start a new datanode
    cluster.startDataNodes(conf, 1, true, null, null);

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestFileAppend4.java

示例11: generateData

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Generates data that can be used for Job Control tests.
 * 
 * @param fs FileSystem to create data in.
 * @param dirPath Path to create the data in.
 * @throws IOException If an error occurs creating the data.
 */
static void generateData(FileSystem fs, Path dirPath) throws IOException {
  FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt"));
  for (int i = 0; i < 10000; i++) {
    String line = generateRandomLine();
    out.write(line.getBytes("UTF-8"));
  }
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:JobControlTestUtils.java

示例12: testManyPartitionedFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例13: doMROnTableTest

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Run an ImportTsv job and perform basic validation on the results. Returns
 * the ImportTsv <code>Tool</code> instance so that other tests can inspect it
 * for further validation as necessary. This method is static to insure
 * non-reliance on instance's util/conf facilities.
 * 
 * @param args
 *          Any arguments to pass BEFORE inputFile path is appended.
 * @param dataAvailable
 * @return The Tool instance used to run the test.
 */
private Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, String[] args,
    int valueMultiplier, boolean dataAvailable) throws Exception {
  String table = args[args.length - 1];
  Configuration conf = new Configuration(util.getConfiguration());

  // populate input file
  FileSystem fs = FileSystem.get(conf);
  Path inputPath = fs.makeQualified(new Path(util.getDataTestDirOnTestFS(table), "input.dat"));
  FSDataOutputStream op = fs.create(inputPath, true);
  op.write(Bytes.toBytes(data));
  op.close();
  LOG.debug(String.format("Wrote test data to file: %s", inputPath));

  if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
    LOG.debug("Forcing combiner.");
    conf.setInt("mapreduce.map.combine.minspills", 1);
  }

  // run the import
  List<String> argv = new ArrayList<String>(Arrays.asList(args));
  argv.add(inputPath.toString());
  Tool tool = new ImportTsv();
  LOG.debug("Running ImportTsv with arguments: " + argv);
  assertEquals(0, ToolRunner.run(conf, tool, argv.toArray(args)));

  validateTable(conf, TableName.valueOf(table), family, valueMultiplier, dataAvailable);

  if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) {
    LOG.debug("Deleting test subdirectory");
    util.cleanupDataTestDirOnTestFS(table);
  }
  return tool;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:45,代码来源:TestImportTSVWithOperationAttributes.java

示例14: writeFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private static void writeFile(FileSystem fileSys, Path name, int fileSize)
throws IOException {
  // Create and write a file that contains three blocks of data
  FSDataOutputStream stm = fileSys.create(name);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestListFiles.java

示例15: writePasswordToLocalFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writePasswordToLocalFile(String localPasswordFile,
    byte[] password, JobConf conf) throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);
  Path localPath = new Path(localPasswordFile);
  FSDataOutputStream out = FileSystem.create(localFs, localPath,
      new FsPermission("400"));
  out.write(password);
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:Application.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。