当前位置: 首页>>代码示例>>Java>>正文


Java FSDataOutputStream.writeChars方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.writeChars方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.writeChars方法的具体用法?Java FSDataOutputStream.writeChars怎么用?Java FSDataOutputStream.writeChars使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FSDataOutputStream的用法示例。


在下文中一共展示了FSDataOutputStream.writeChars方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: writeFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeFile(final DistributedFileSystem dfs,
    Path dir, String fileName) throws IOException {
  Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
  final FSDataOutputStream out = dfs.create(filePath);
  out.writeChars("teststring");
  out.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestFsck.java

示例2: createInputFile

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void createInputFile(Path path, int rowCount)
  throws IOException {
  if (fs.exists(path)) {
    fs.delete(path, true);
  }
  FSDataOutputStream os = fs.create(path);
  for (int i = 0; i < rowCount; i++) {
    String s = i + "\n";
    os.writeChars(s);
  }
  os.close();
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:13,代码来源:HCatalogTestUtils.java

示例3: testForNonDefaultFileSystem

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
 * Test when input files are from non-default file systems
 */
@Test
public void testForNonDefaultFileSystem() throws Throwable {
  Configuration conf = new Configuration();

  // use a fake file system scheme as default
  conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DUMMY_FS_URI);

  // default fs path
  assertEquals(DUMMY_FS_URI, FileSystem.getDefaultUri(conf).toString());
  // add a local file
  Path localPath = new Path("testFile1");
  FileSystem lfs = FileSystem.getLocal(conf);
  FSDataOutputStream dos = lfs.create(localPath);
  dos.writeChars("Local file for CFIF");
  dos.close();

  Job job = Job.getInstance(conf);
  FileInputFormat.setInputPaths(job, lfs.makeQualified(localPath));
  DummyInputFormat inFormat = new DummyInputFormat();
  List<InputSplit> splits = inFormat.getSplits(job);
  assertTrue(splits.size() > 0);
  for (InputSplit s : splits) {
    CombineFileSplit cfs = (CombineFileSplit)s;
    for (Path p : cfs.getPaths()) {
      assertEquals(p.toUri().getScheme(), "file");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestCombineFileInputFormat.java

示例4: testScheduleSameBlock

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testScheduleSameBlock() throws IOException {
  final Configuration conf = new HdfsConfiguration();
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(4).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testScheduleSameBlock/file";
    
    {
      final FSDataOutputStream out = dfs.create(new Path(file));
      out.writeChars("testScheduleSameBlock");
      out.close();
    }

    final Mover mover = newMover(conf);
    mover.init();
    final Mover.Processor processor = mover.new Processor();

    final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    final List<MLocation> locations = MLocation.toLocations(lb);
    final MLocation ml = locations.get(0);
    final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations);

    final List<StorageType> storageTypes = new ArrayList<StorageType>(
        Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
    Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
    Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestMover.java

示例5: testMoverFailedRetry

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testMoverFailedRetry() throws Exception {
  // HDFS-8147
  final Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE}}).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testMoverFailedRetry";
    // write to DISK
    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
    out.writeChars("testMoverFailedRetry");
    out.close();

    // Delete block file so, block move will fail with FileNotFoundException
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
    // move to ARCHIVE
    dfs.setStoragePolicy(new Path(file), "COLD");
    int rc = ToolRunner.run(conf, new Mover.Cli(),
        new String[] {"-p", file.toString()});
    Assert.assertEquals("Movement should fail after some retry",
        ExitStatus.IO_EXCEPTION.getExitCode(), rc);
  } finally {
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestMover.java

示例6: testCompactionWithCorruptResult

import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testCompactionWithCorruptResult() throws Exception {
  int nfiles = 10;
  for (int i = 0; i < nfiles; i++) {
    createStoreFile(r);
  }
  HStore store = (HStore) r.getStore(COLUMN_FAMILY);

  Collection<StoreFile> storeFiles = store.getStorefiles();
  DefaultCompactor tool = (DefaultCompactor) store.storeEngine.getCompactor();
  tool.compactForTesting(storeFiles, false);

  // Now lets corrupt the compacted file.
  FileSystem fs = store.getFileSystem();
  // default compaction policy created one and only one new compacted file
  Path dstPath = store.getRegionFileSystem().createTempName();
  FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short) 3, (long) 1024, null);
  stream.writeChars("CORRUPT FILE!!!!");
  stream.close();
  Path origPath =
      store.getRegionFileSystem().commitStoreFile(Bytes.toString(COLUMN_FAMILY), dstPath);

  try {
    ((HStore) store).moveFileIntoPlace(origPath);
  } catch (Exception e) {
    // The complete compaction should fail and the corrupt file should remain
    // in the 'tmp' directory;
    assert (fs.exists(origPath));
    assert (!fs.exists(dstPath));
    System.out.println("testCompactionWithCorruptResult Passed");
    return;
  }
  fail("testCompactionWithCorruptResult failed since no exception was"
      + "thrown while completing a corrupt file");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestCompaction.java


注:本文中的org.apache.hadoop.fs.FSDataOutputStream.writeChars方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。