本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.create方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.create方法的具体用法?Java FileSystem.create怎么用?Java FileSystem.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createExportFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void createExportFile(ColumnGenerator... extraCols)
throws IOException {
String ext = ".txt";
Path tablePath = getTablePath();
Path filePath = new Path(tablePath, "part0" + ext);
Configuration conf = new Configuration();
if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
}
FileSystem fs = FileSystem.get(conf);
fs.mkdirs(tablePath);
OutputStream os = fs.create(filePath);
BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
for (int i = 0; i < 3; i++) {
String line = getRecordLine(i, extraCols);
w.write(line);
LOG.debug("Create Export file - Writing line : " + line);
}
w.close();
os.close();
}
示例2: writeConfigFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void writeConfigFile(FileSystem fs, Path name,
ArrayList<String> nodes) throws IOException {
// delete if it already exists
if (fs.exists(name)) {
fs.delete(name, true);
}
FSDataOutputStream stm = fs.create(name);
if (nodes != null) {
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
String node = it.next();
stm.writeBytes(node);
stm.writeBytes("\n");
}
}
stm.close();
}
示例3: writeAndAppend
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void writeAndAppend(FileSystem fs, Path p,
int lengthForCreate, int lengthForAppend) throws IOException {
// Creating a file with 4096 blockSize to write multiple blocks
FSDataOutputStream stream = fs.create(
p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
try {
AppendTestUtil.write(stream, 0, lengthForCreate);
stream.close();
stream = fs.append(p);
AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
stream.close();
} finally {
IOUtils.closeStream(stream);
}
int totalLength = lengthForCreate + lengthForAppend;
assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
示例4: execute
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Executes the filesystem operation.
*
* @param fs filesystem instance to use.
*
* @return The URI of the created file.
*
* @throws IOException thrown if an IO error occured.
*/
@Override
public Void execute(FileSystem fs) throws IOException {
if (replication == -1) {
replication = fs.getDefaultReplication(path);
}
if (blockSize == -1) {
blockSize = fs.getDefaultBlockSize(path);
}
FsPermission fsPermission = new FsPermission(permission);
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
return null;
}
示例5: testStatusLimit
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testStatusLimit() throws IOException, InterruptedException,
ClassNotFoundException {
Path test = new Path(testRootTempDir, "testStatusLimit");
Configuration conf = new Configuration();
Path inDir = new Path(test, "in");
Path outDir = new Path(test, "out");
FileSystem fs = FileSystem.get(conf);
if (fs.exists(inDir)) {
fs.delete(inDir, true);
}
fs.mkdirs(inDir);
DataOutputStream file = fs.create(new Path(inDir, "part-" + 0));
file.writeBytes("testStatusLimit");
file.close();
if (fs.exists(outDir)) {
fs.delete(outDir, true);
}
Job job = Job.getInstance(conf, "testStatusLimit");
job.setMapperClass(StatusLimitMapper.class);
job.setNumReduceTasks(0);
FileInputFormat.addInputPath(job, inDir);
FileOutputFormat.setOutputPath(job, outDir);
job.waitForCompletion(true);
assertTrue("Job failed", job.isSuccessful());
}
示例6: decompress
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void decompress(FileSystem fs, String in, String outpath) throws IOException {
Configuration conf = new Configuration();
CompressionCodecFactory factory = new CompressionCodecFactory(conf);
// the correct codec will be discovered by the extension of the file
CompressionCodec codec = factory.getCodec(new Path(in));
//Decompressing zip file.
InputStream is = codec.createInputStream(fs.open(new Path(in)));
OutputStream out = fs.create(new Path(outpath));
//Write decompressed out
IOUtils.copyBytes(is, out, conf);
is.close();
out.close();
}
示例7: testAppendTwice
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
final FileSystem fs1 = cluster.getFileSystem();
final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
try {
final Path p = new Path("/testAppendTwice/foo");
final int len = 1 << 16;
final byte[] fileContents = AppendTestUtil.initBuffer(len);
{
// create a new file with a full block.
FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
out.write(fileContents, 0, len);
out.close();
}
//1st append does not add any data so that the last block remains full
//and the last block in INodeFileUnderConstruction is a BlockInfo
//but not BlockInfoUnderConstruction.
fs2.append(p);
//2nd append should get AlreadyBeingCreatedException
fs1.append(p);
Assert.fail();
} catch(RemoteException re) {
AppendTestUtil.LOG.info("Got an exception:", re);
Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
re.getClassName());
} finally {
fs2.close();
fs1.close();
cluster.shutdown();
}
}
示例8: writeFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true,
fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short)repl, blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
示例9: testRenameRace
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testRenameRace() throws Exception {
try {
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
Path dirPath1 = new Path("/testRenameRace1");
Path dirPath2 = new Path("/testRenameRace2");
Path filePath = new Path("/testRenameRace1/file1");
fs.mkdirs(dirPath1);
FSDataOutputStream out = fs.create(filePath);
Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
renameThread.start();
// write data and close to make sure a block is allocated.
out.write(new byte[32], 0, 32);
out.close();
// Restart name node so that it replays edit. If old path was
// logged in edit, it will fail to come up.
cluster.restartNameNode(0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
示例10: writeFileToHadoop
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public synchronized void writeFileToHadoop(List<ConsumerRecord<String, String>> buffer) {
Configuration configuration = new Configuration();
String str;
StringBuffer stringBuffer = new StringBuffer();
try {
FileSystem fileSystem = FileSystem.get(configuration);
Path path = new Path("/user/hive/output/data.dat");
FSDataOutputStream fsDataOutputStream = fileSystem.create(path);
//fileWriter = new FileWriter(file,false);
//printWriter = new PrintWriter(fileWriter);
for (int i = 0; i < buffer.size(); i++) {
str = buffer.get(i).value() + "\t" + buffer.get(i).value() + "\n";
stringBuffer.append(str);
//printWriter.println(buffer.get(i).value() + "\t" + buffer.get(i).value());
}
fsDataOutputStream.write(stringBuffer.toString().getBytes(),0,stringBuffer.toString().getBytes().length);
fsDataOutputStream.flush();
fsDataOutputStream.close();
stringBuffer.delete(0,stringBuffer.length());
insertIntoHive();//存入hive中
//printWriter.flush();
} catch (IOException e) {
}
}
示例11: writeHTD
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
FSDataOutputStream out = fs.create(p, false);
try {
// We used to write this file out as a serialized HTD Writable followed by two '\n's and then
// the toString version of HTD. Now we just write out the pb serialization.
out.write(htd.toByteArray());
} finally {
out.close();
}
}
示例12: createEmptyHarArchive
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* Create an empty Har archive in the FileSystem fs at the Path p.
*
* @param fs the file system to create the Har archive in
* @param p the path to create the Har archive at
* @throws IOException in the event of error
*/
private static void createEmptyHarArchive(FileSystem fs, Path p)
throws IOException {
fs.mkdirs(p);
OutputStream out = fs.create(new Path(p, "_masterindex"));
out.write(Integer.toString(HarFileSystem.VERSION).getBytes());
out.close();
fs.create(new Path(p, "_index")).close();
}
示例13: addToLocalResources
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void addToLocalResources(FileSystem fs, String fileSrcPath,
String fileDstPath, String appId, Map<String, LocalResource> localResources,
String resources) throws IOException {
String suffix =
"prkeyrotation" + "/" + appId + "/" + fileDstPath;
Path dst =
new Path(fs.getHomeDirectory(), suffix);
if (fileSrcPath == null) {
FSDataOutputStream ostream = null;
try {
ostream = FileSystem
.create(fs, dst, new FsPermission((short) 0710));
ostream.writeUTF(resources);
} finally {
IOUtils.closeQuietly(ostream);
}
} else {
fs.copyFromLocalFile(new Path(fileSrcPath), dst);
}
FileStatus scFileStatus = fs.getFileStatus(dst);
LocalResource scRsrc =
LocalResource.newInstance(
ConverterUtils.getYarnUrlFromPath(dst),
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
scFileStatus.getLen(), scFileStatus.getModificationTime());
localResources.put(fileDstPath, scRsrc);
}
示例14: configure
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void configure(JobConf conf) {
try {
FileSystem fs = FileSystem.get(conf);
OutputStream os =
fs.create(FileOutputFormat.getPathForCustomFile(conf, "test"));
os.write(1);
os.close();
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
示例15: testWriteAfterClose
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testWriteAfterClose() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.build();
try {
final byte[] data = "foo".getBytes();
FileSystem fs = FileSystem.get(conf);
OutputStream out = fs.create(new Path("/test"));
out.write(data);
out.close();
try {
// Should fail.
out.write(data);
fail("Should not have been able to write more data after file is closed.");
} catch (ClosedChannelException cce) {
// We got the correct exception. Ignoring.
}
// Should succeed. Double closes are OK.
out.close();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}