当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.create方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.create方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.create方法的具体用法?Java FileSystem.create怎么用?Java FileSystem.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createExportFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected void createExportFile(ColumnGenerator... extraCols)
  throws IOException {
  String ext = ".txt";

  Path tablePath = getTablePath();
  Path filePath = new Path(tablePath, "part0" + ext);

  Configuration conf = new Configuration();
  if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
    conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
  }
  FileSystem fs = FileSystem.get(conf);
  fs.mkdirs(tablePath);
  OutputStream os = fs.create(filePath);

  BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
  for (int i = 0; i < 3; i++) {
    String line = getRecordLine(i, extraCols);
    w.write(line);
    LOG.debug("Create Export file - Writing line : " + line);
  }
  w.close();
  os.close();
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:25,代码来源:NetezzaExportManualTest.java

示例2: writeConfigFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void writeConfigFile(FileSystem fs, Path name,
    ArrayList<String> nodes) throws IOException {

  // delete if it already exists
  if (fs.exists(name)) {
    fs.delete(name, true);
  }

  FSDataOutputStream stm = fs.create(name);

  if (nodes != null) {
    for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
      String node = it.next();
      stm.writeBytes(node);
      stm.writeBytes("\n");
    }
  }
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestDecommissioningStatus.java

示例3: writeAndAppend

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void writeAndAppend(FileSystem fs, Path p,
    int lengthForCreate, int lengthForAppend) throws IOException {
  // Creating a file with 4096 blockSize to write multiple blocks
  FSDataOutputStream stream = fs.create(
      p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
  try {
    AppendTestUtil.write(stream, 0, lengthForCreate);
    stream.close();
    
    stream = fs.append(p);
    AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
    stream.close();
  } finally {
    IOUtils.closeStream(stream);
  }
  
  int totalLength = lengthForCreate + lengthForAppend; 
  assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestFileAppendRestart.java

示例4: execute

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return The URI of the created file.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
public Void execute(FileSystem fs) throws IOException {
  if (replication == -1) {
    replication = fs.getDefaultReplication(path);
  }
  if (blockSize == -1) {
    blockSize = fs.getDefaultBlockSize(path);
  }
  FsPermission fsPermission = new FsPermission(permission);
  int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
  OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
  IOUtils.copyBytes(is, os, bufferSize, true);
  os.close();
  return null;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:FSOperations.java

示例5: testStatusLimit

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testStatusLimit() throws IOException, InterruptedException,
    ClassNotFoundException {
  Path test = new Path(testRootTempDir, "testStatusLimit");

  Configuration conf = new Configuration();
  Path inDir = new Path(test, "in");
  Path outDir = new Path(test, "out");
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(inDir)) {
    fs.delete(inDir, true);
  }
  fs.mkdirs(inDir);
  DataOutputStream file = fs.create(new Path(inDir, "part-" + 0));
  file.writeBytes("testStatusLimit");
  file.close();

  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }

  Job job = Job.getInstance(conf, "testStatusLimit");

  job.setMapperClass(StatusLimitMapper.class);
  job.setNumReduceTasks(0);

  FileInputFormat.addInputPath(job, inDir);
  FileOutputFormat.setOutputPath(job, outDir);

  job.waitForCompletion(true);

  assertTrue("Job failed", job.isSuccessful());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestReporter.java

示例6: decompress

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void decompress(FileSystem fs, String in, String outpath) throws IOException {
  Configuration conf = new Configuration();
  CompressionCodecFactory factory = new CompressionCodecFactory(conf);
  // the correct codec will be discovered by the extension of the file

  CompressionCodec codec = factory.getCodec(new Path(in));
  //Decompressing zip file.
  InputStream is = codec.createInputStream(fs.open(new Path(in)));
  OutputStream out = fs.create(new Path(outpath));
  //Write decompressed out
  IOUtils.copyBytes(is, out, conf);
  is.close();
  out.close();
}
 
开发者ID:NGSeq,项目名称:ViraPipe,代码行数:15,代码来源:Decompress.java

示例7: testAppendTwice

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs1 = cluster.getFileSystem();
  final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
  try {

    final Path p = new Path("/testAppendTwice/foo");
    final int len = 1 << 16;
    final byte[] fileContents = AppendTestUtil.initBuffer(len);

    {
      // create a new file with a full block.
      FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
      out.write(fileContents, 0, len);
      out.close();
    }

    //1st append does not add any data so that the last block remains full
    //and the last block in INodeFileUnderConstruction is a BlockInfo
    //but not BlockInfoUnderConstruction. 
    fs2.append(p);
    
    //2nd append should get AlreadyBeingCreatedException
    fs1.append(p);
    Assert.fail();
  } catch(RemoteException re) {
    AppendTestUtil.LOG.info("Got an exception:", re);
    Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
        re.getClassName());
  } finally {
    fs2.close();
    fs1.close();
    cluster.shutdown();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestFileAppend.java

示例8: writeFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
  FSDataOutputStream stm = fileSys.create(name, true,
      fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short)repl, blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestJMXGet.java

示例9: testRenameRace

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testRenameRace() throws Exception {
  try {
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    Path dirPath1 = new Path("/testRenameRace1");
    Path dirPath2 = new Path("/testRenameRace2");
    Path filePath = new Path("/testRenameRace1/file1");
    

    fs.mkdirs(dirPath1);
    FSDataOutputStream out = fs.create(filePath);
    Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
    renameThread.start();

    // write data and close to make sure a block is allocated.
    out.write(new byte[32], 0, 32);
    out.close();

    // Restart name node so that it replays edit. If old path was
    // logged in edit, it will fail to come up.
    cluster.restartNameNode(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:31,代码来源:TestDeleteRace.java

示例10: writeFileToHadoop

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public synchronized void writeFileToHadoop(List<ConsumerRecord<String, String>> buffer) {


        Configuration configuration = new Configuration();
        String str;
        StringBuffer stringBuffer = new StringBuffer();
        try {

            FileSystem fileSystem = FileSystem.get(configuration);
            Path path = new Path("/user/hive/output/data.dat");
            FSDataOutputStream fsDataOutputStream = fileSystem.create(path);

            //fileWriter = new FileWriter(file,false);
            //printWriter = new PrintWriter(fileWriter);
            for (int i = 0; i < buffer.size(); i++) {
                str = buffer.get(i).value() + "\t" + buffer.get(i).value() + "\n";
                stringBuffer.append(str);
                //printWriter.println(buffer.get(i).value()   + "\t" + buffer.get(i).value());
            }
            fsDataOutputStream.write(stringBuffer.toString().getBytes(),0,stringBuffer.toString().getBytes().length);
            fsDataOutputStream.flush();
            fsDataOutputStream.close();
            stringBuffer.delete(0,stringBuffer.length());
            insertIntoHive();//存入hive中
            //printWriter.flush();

        } catch (IOException e) {

        }

    }
 
开发者ID:wanghan0501,项目名称:WiFiProbeAnalysis,代码行数:32,代码来源:KafkaConsumerForHive.java

示例11: writeHTD

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
  FSDataOutputStream out = fs.create(p, false);
  try {
    // We used to write this file out as a serialized HTD Writable followed by two '\n's and then
    // the toString version of HTD.  Now we just write out the pb serialization.
    out.write(htd.toByteArray());
  } finally {
    out.close();
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:FSTableDescriptors.java

示例12: createEmptyHarArchive

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Create an empty Har archive in the FileSystem fs at the Path p.
 * 
 * @param fs the file system to create the Har archive in
 * @param p the path to create the Har archive at
 * @throws IOException in the event of error
 */
private static void createEmptyHarArchive(FileSystem fs, Path p)
    throws IOException {
  fs.mkdirs(p);
  OutputStream out = fs.create(new Path(p, "_masterindex"));
  out.write(Integer.toString(HarFileSystem.VERSION).getBytes());
  out.close();
  fs.create(new Path(p, "_index")).close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestHarFileSystemWithHA.java

示例13: addToLocalResources

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void addToLocalResources(FileSystem fs, String fileSrcPath,
																 String fileDstPath, String appId, Map<String, LocalResource> localResources,
																 String resources) throws IOException {
	String suffix =
			"prkeyrotation" + "/" + appId + "/" + fileDstPath;
	Path dst =
			new Path(fs.getHomeDirectory(), suffix);
	if (fileSrcPath == null) {
		FSDataOutputStream ostream = null;
		try {
			ostream = FileSystem
					.create(fs, dst, new FsPermission((short) 0710));
			ostream.writeUTF(resources);
		} finally {
			IOUtils.closeQuietly(ostream);
		}
	} else {
		fs.copyFromLocalFile(new Path(fileSrcPath), dst);
	}
	FileStatus scFileStatus = fs.getFileStatus(dst);
	LocalResource scRsrc =
			LocalResource.newInstance(
					ConverterUtils.getYarnUrlFromPath(dst),
					LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
					scFileStatus.getLen(), scFileStatus.getModificationTime());
	localResources.put(fileDstPath, scRsrc);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:28,代码来源:Client.java

示例14: configure

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void configure(JobConf conf) {
  try {
    FileSystem fs = FileSystem.get(conf);
    OutputStream os =
      fs.create(FileOutputFormat.getPathForCustomFile(conf, "test"));
    os.write(1);
    os.close();
  }
  catch (IOException ex) {
    throw new RuntimeException(ex);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestFileOutputFormat.java

示例15: testWriteAfterClose

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testWriteAfterClose() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .build();
  
  try {
    final byte[] data = "foo".getBytes();
    
    FileSystem fs = FileSystem.get(conf);
    OutputStream out = fs.create(new Path("/test"));
    
    out.write(data);
    out.close();
    try {
      // Should fail.
      out.write(data);
      fail("Should not have been able to write more data after file is closed.");
    } catch (ClosedChannelException cce) {
      // We got the correct exception. Ignoring.
    }
    // Should succeed. Double closes are OK.
    out.close();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestClose.java


注:本文中的org.apache.hadoop.fs.FileSystem.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。