本文整理汇总了Java中org.apache.hadoop.fs.FSDataOutputStream.writeChars方法的典型用法代码示例。如果您正苦于以下问题:Java FSDataOutputStream.writeChars方法的具体用法?Java FSDataOutputStream.writeChars怎么用?Java FSDataOutputStream.writeChars使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FSDataOutputStream
的用法示例。
在下文中一共展示了FSDataOutputStream.writeChars方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: writeFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void writeFile(final DistributedFileSystem dfs,
Path dir, String fileName) throws IOException {
Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
final FSDataOutputStream out = dfs.create(filePath);
out.writeChars("teststring");
out.close();
}
示例2: createInputFile
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
private void createInputFile(Path path, int rowCount)
throws IOException {
if (fs.exists(path)) {
fs.delete(path, true);
}
FSDataOutputStream os = fs.create(path);
for (int i = 0; i < rowCount; i++) {
String s = i + "\n";
os.writeChars(s);
}
os.close();
}
示例3: testForNonDefaultFileSystem
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
/**
* Test when input files are from non-default file systems
*/
@Test
public void testForNonDefaultFileSystem() throws Throwable {
Configuration conf = new Configuration();
// use a fake file system scheme as default
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, DUMMY_FS_URI);
// default fs path
assertEquals(DUMMY_FS_URI, FileSystem.getDefaultUri(conf).toString());
// add a local file
Path localPath = new Path("testFile1");
FileSystem lfs = FileSystem.getLocal(conf);
FSDataOutputStream dos = lfs.create(localPath);
dos.writeChars("Local file for CFIF");
dos.close();
Job job = Job.getInstance(conf);
FileInputFormat.setInputPaths(job, lfs.makeQualified(localPath));
DummyInputFormat inFormat = new DummyInputFormat();
List<InputSplit> splits = inFormat.getSplits(job);
assertTrue(splits.size() > 0);
for (InputSplit s : splits) {
CombineFileSplit cfs = (CombineFileSplit)s;
for (Path p : cfs.getPaths()) {
assertEquals(p.toUri().getScheme(), "file");
}
}
}
示例4: testScheduleSameBlock
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testScheduleSameBlock() throws IOException {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(4).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testScheduleSameBlock/file";
{
final FSDataOutputStream out = dfs.create(new Path(file));
out.writeChars("testScheduleSameBlock");
out.close();
}
final Mover mover = newMover(conf);
mover.init();
final Mover.Processor processor = mover.new Processor();
final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
final List<MLocation> locations = MLocation.toLocations(lb);
final MLocation ml = locations.get(0);
final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations);
final List<StorageType> storageTypes = new ArrayList<StorageType>(
Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT));
Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes));
Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes));
} finally {
cluster.shutdown();
}
}
示例5: testMoverFailedRetry
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testMoverFailedRetry() throws Exception {
// HDFS-8147
final Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE},
{StorageType.DISK, StorageType.ARCHIVE}}).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final String file = "/testMoverFailedRetry";
// write to DISK
final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
out.writeChars("testMoverFailedRetry");
out.close();
// Delete block file so, block move will fail with FileNotFoundException
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
// move to ARCHIVE
dfs.setStoragePolicy(new Path(file), "COLD");
int rc = ToolRunner.run(conf, new Mover.Cli(),
new String[] {"-p", file.toString()});
Assert.assertEquals("Movement should fail after some retry",
ExitStatus.IO_EXCEPTION.getExitCode(), rc);
} finally {
cluster.shutdown();
}
}
示例6: testCompactionWithCorruptResult
import org.apache.hadoop.fs.FSDataOutputStream; //导入方法依赖的package包/类
@Test
public void testCompactionWithCorruptResult() throws Exception {
int nfiles = 10;
for (int i = 0; i < nfiles; i++) {
createStoreFile(r);
}
HStore store = (HStore) r.getStore(COLUMN_FAMILY);
Collection<StoreFile> storeFiles = store.getStorefiles();
DefaultCompactor tool = (DefaultCompactor) store.storeEngine.getCompactor();
tool.compactForTesting(storeFiles, false);
// Now lets corrupt the compacted file.
FileSystem fs = store.getFileSystem();
// default compaction policy created one and only one new compacted file
Path dstPath = store.getRegionFileSystem().createTempName();
FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short) 3, (long) 1024, null);
stream.writeChars("CORRUPT FILE!!!!");
stream.close();
Path origPath =
store.getRegionFileSystem().commitStoreFile(Bytes.toString(COLUMN_FAMILY), dstPath);
try {
((HStore) store).moveFileIntoPlace(origPath);
} catch (Exception e) {
// The complete compaction should fail and the corrupt file should remain
// in the 'tmp' directory;
assert (fs.exists(origPath));
assert (!fs.exists(dstPath));
System.out.println("testCompactionWithCorruptResult Passed");
return;
}
fail("testCompactionWithCorruptResult failed since no exception was"
+ "thrown while completing a corrupt file");
}