当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.open方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.open方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.open方法的具体用法?Java FileSystem.open怎么用?Java FileSystem.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: initBloomFilter

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void initBloomFilter(Path dirName, 
                             Configuration conf) {
  
  DataInputStream in = null;
  try {
    FileSystem fs = dirName.getFileSystem(conf);
    in = fs.open(new Path(dirName, BLOOM_FILE_NAME));
    bloomFilter = new DynamicBloomFilter();
    bloomFilter.readFields(in);
    in.close();
    in = null;
  } catch (IOException ioe) {
    LOG.warn("Can't open BloomFilter: " + ioe + " - fallback to MapFile.");
    bloomFilter = null;
  } finally {
    IOUtils.closeStream(in);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:19,代码来源:BloomMapFile.java

示例2: Main

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void Main() throws Exception {
    String inputFile="/user/root/flinkwordcount/input/resultTweets.txt";
    FileSystem fs = HdfsOperationUtil.getFs();
    FSDataInputStream dataInputStream = fs.open(new Path(inputFile));
    BufferedReader bufferedReader=new BufferedReader(new InputStreamReader(dataInputStream));
    long startTimeSystemTime= System.currentTimeMillis();
    String text=null;
    while ((text=bufferedReader.readLine())!=null){
        predictorHotKeyUtil.simpleComputPredictorHotKey(text);
    }
    long endTimeSystemTime = System.currentTimeMillis();
    LOG.info("startTime:"+new Timestamp(startTimeSystemTime));
    LOG.info("endTime:"+new Timestamp(endTimeSystemTime));
    long timelong = (endTimeSystemTime-startTimeSystemTime) / 1000;
    LOG.info("totalTime:"+timelong+" s"+"------or------"+timelong/60+" min");
    System.exit(0);
}
 
开发者ID:DStream-Storm,项目名称:DStream,代码行数:19,代码来源:PredictorHotKeyUtilTest.java

示例3: testAuditDenied

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
  final Path file = new Path(fnames[0]);
  FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);

  fs.setPermission(file, new FsPermission((short)0600));
  fs.setOwner(file, "root", null);

  setupAuditLogs();

  try {
    userfs.open(file);
    fail("open must not succeed");
  } catch(AccessControlException e) {
    System.out.println("got access denied, as expected.");
  }
  verifyAuditLogs(false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestAuditLogs.java

示例4: testEOF

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void testEOF(MiniDFSCluster cluster, int fileLength) throws IOException {
  FileSystem fs = cluster.getFileSystem();
  Path path = new Path("testEOF." + fileLength);
  DFSTestUtil.createFile(fs, path, fileLength, (short)1, 0xBEEFBEEF);
  FSDataInputStream fis = fs.open(path);
  ByteBuffer empty = ByteBuffer.allocate(0);
  // A read into an empty bytebuffer at the beginning of the file gives 0.
  Assert.assertEquals(0, fis.read(empty));
  fis.seek(fileLength);
  // A read into an empty bytebuffer at the end of the file gives -1.
  Assert.assertEquals(-1, fis.read(empty));
  if (fileLength > BLOCK_SIZE) {
    fis.seek(fileLength - BLOCK_SIZE + 1);
    ByteBuffer dbb = ByteBuffer.allocateDirect(BLOCK_SIZE);
    Assert.assertEquals(BLOCK_SIZE - 1, fis.read(dbb));
  }
  fis.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:19,代码来源:TestRead.java

示例5: verifyOutput

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void verifyOutput(RunningJob submittedJob, FileSystem fileSystem, int numMappers, int numLines)
  throws Exception {
  FSDataInputStream dis = null;
  long numValidRecords = 0;
  long numInvalidRecords = 0;
  String prevKeyValue = "000000000";
  Path[] fileList =
    FileUtil.stat2Paths(fileSystem.listStatus(OUTPUT,
        new Utils.OutputFileUtils.OutputFilesFilter()));
  for (Path outFile : fileList) {
    try {
      dis = fileSystem.open(outFile);
      String record;
      while((record = dis.readLine()) != null) {
        // Split the line into key and value.
        int blankPos = record.indexOf(" ");
        String keyString = record.substring(0, blankPos);
        String valueString = record.substring(blankPos+1);
        // Check for sorted output and correctness of record.
        if (keyString.compareTo(prevKeyValue) >= 0
            && keyString.equals(valueString)) {
          prevKeyValue = keyString;
          numValidRecords++;
        } else {
          numInvalidRecords++;
        }
      }
    } finally {
      if (dis != null) {
        dis.close();
        dis = null;
      }
    }
  }
  // Make sure we got all input records in the output in sorted order.
  assertEquals((long)(numMappers * numLines), numValidRecords);
  // Make sure there is no extraneous invalid record.
  assertEquals(0, numInvalidRecords);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:40,代码来源:TestMRIntermediateDataEncryption.java

示例6: readBytesToString

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Read in "length" bytes, convert to an ascii string
 * @param fs filesystem
 * @param path path to read
 * @param length #of bytes to read.
 * @return the bytes read and converted to a string
 * @throws IOException IO problems
 */
public static String readBytesToString(FileSystem fs,
                                Path path,
                                int length) throws IOException {
  FSDataInputStream in = fs.open(path);
  try {
    byte[] buf = new byte[length];
    in.readFully(0, buf);
    return toChar(buf);
  } finally {
    in.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:21,代码来源:ContractTestUtils.java

示例7: testFailoverAfterOpen

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testFailoverAfterOpen() throws IOException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +
      "://" + LOGICAL_NAME);
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  final Path p = new Path("/test");
  final byte[] data = "Hello".getBytes();

  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
            .numDataNodes(1).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);

    cluster.waitActive();

    fs = FileSystem.get(WEBHDFS_URI, conf);
    cluster.transitionToActive(1);

    FSDataOutputStream out = fs.create(p);
    cluster.shutdownNameNode(1);
    cluster.transitionToActive(0);

    out.write(data);
    out.close();
    FSDataInputStream in = fs.open(p);
    byte[] buf = new byte[data.length];
    IOUtils.readFully(in, buf, 0, buf.length);
    Assert.assertArrayEquals(data, buf);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestWebHDFSForHA.java

示例8: datanodeRestartTest

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void datanodeRestartTest(MiniDFSCluster cluster, FileSystem fileSys,
    Path name) throws IOException {
  // skip this test if using simulated storage since simulated blocks
  // don't survive datanode restarts.
  if (simulatedStorage) {
    return;
  }
  int numBlocks = 1;
  assertTrue(numBlocks <= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
  byte[] expected = new byte[numBlocks * blockSize];
  Random rand = new Random(seed);
  rand.nextBytes(expected);
  byte[] actual = new byte[numBlocks * blockSize];
  FSDataInputStream stm = fileSys.open(name);
  // read a block and get block locations cached as a result
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Setup");
  // restart all datanodes. it is expected that they will
  // restart on different ports, hence, cached block locations
  // will no longer work.
  assertTrue(cluster.restartDataNodes());
  cluster.waitActive();
  // verify the block can be read again using the same InputStream 
  // (via re-fetching of block locations from namenode). there is a 
  // 3 sec sleep in chooseDataNode(), which can be shortened for 
  // this test if configurable.
  stm.readFully(0, actual);
  checkAndEraseData(actual, 0, expected, "Pread Datanode Restart Test");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestPread.java

示例9: printTextFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private static void printTextFile(FileSystem fs, Path p) throws IOException {
  BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(p)));
  String line;
  while ((line = in.readLine()) != null) {
    System.out.println("  Row: " + line);
  }
  in.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestMapRed.java

示例10: getMeta

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Get model meta
 *
 * @param modelDir model save directory path
 * @return model meta
 */
public static ModelFilesMeta getMeta(String modelDir, Configuration conf) throws IOException {
  Path modelPath = new Path(modelDir);
  Path meteFilePath = new Path(modelPath, ModelFilesConstent.modelMetaFileName);
  ModelFilesMeta meta = new ModelFilesMeta();
  FileSystem fs = meteFilePath.getFileSystem(conf);
  if (!fs.exists(meteFilePath)) {
    throw new IOException("matrix meta file does not exist ");
  }
  FSDataInputStream input = fs.open(meteFilePath);
  meta.read(input);
  input.close();
  return meta;
}
 
开发者ID:Tencent,项目名称:angel,代码行数:20,代码来源:ModelLoader.java

示例11: readFromHdfs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**从HDFS上读取文件*/
public static DataInputStream readFromHdfs(String fileName) throws IOException {

    String dst = NodeConfig.HDFS_PATH+fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    return fs.open(new Path(dst));
}
 
开发者ID:cuiods,项目名称:WIFIProbe,代码行数:9,代码来源:HDFSTool.java

示例12: testGzipCodecRead

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
public void testGzipCodecRead() throws IOException {
  // Create a gzipped file and try to read it back, using a decompressor
  // from the CodecPool.

  // Don't use native libs for this test.
  Configuration conf = new Configuration();
  ZlibFactory.setNativeZlibLoaded(false);
  // Ensure that the CodecPool has a BuiltInZlibInflater in it.
  Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
  assertNotNull("zlibDecompressor is null!", zlibDecompressor);
  assertTrue("ZlibFactory returned unexpected inflator",
      zlibDecompressor instanceof BuiltInZlibInflater);
  CodecPool.returnDecompressor(zlibDecompressor);

  // Now create a GZip text file.
  String tmpDir = System.getProperty("test.build.data", "/tmp/");
  Path f = new Path(new Path(tmpDir), "testGzipCodecRead.txt.gz");
  BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(
    new GZIPOutputStream(new FileOutputStream(f.toString()))));
  final String msg = "This is the message in the file!";
  bw.write(msg);
  bw.close();

  // Now read it back, using the CodecPool to establish the
  // decompressor to use.
  CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
  CompressionCodec codec = ccf.getCodec(f);
  Decompressor decompressor = CodecPool.getDecompressor(codec);
  FileSystem fs = FileSystem.getLocal(conf);
  InputStream is = fs.open(f);
  is = codec.createInputStream(is, decompressor);
  BufferedReader br = new BufferedReader(new InputStreamReader(is));
  String line = br.readLine();
  assertEquals("Didn't get the same message back!", msg, line);
  br.close();
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:38,代码来源:TestCodec.java

示例13: initialize

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void initialize(InputSplit split, TaskAttemptContext context) 
    throws IOException, InterruptedException {
  Path p = ((FileSplit)split).getPath();
  FileSystem fs = p.getFileSystem(context.getConfiguration());
  in = fs.open(p);
  long start = ((FileSplit)split).getStart();
  // find the offset to start at a record boundary
  offset = (RECORD_LENGTH - (start % RECORD_LENGTH)) % RECORD_LENGTH;
  in.seek(start + offset);
  length = ((FileSplit)split).getLength();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TeraInputFormat.java

示例14: txt2dat

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void txt2dat(Path dir, String inputFile, String outputFile)
        throws IOException {

    FileSystem fileSystem = dir.getFileSystem(new Configuration());

    Path in = new Path(dir, inputFile);
    Path out = new Path(dir, outputFile);

    FSDataInputStream fsDataInputStream = fileSystem.open(in);
    InputStreamReader inputStreamReader = new InputStreamReader(fsDataInputStream);
    BufferedReader reader = new BufferedReader(inputStreamReader);

    FSDataOutputStream writer = fileSystem.create(out);

    try {
        String line;
        line = reader.readLine();
        while (line != null){

            String[] keyVal = line.split("\\t");
            writer.writeLong(Long.parseLong(keyVal[0]));

            for (String aij : keyVal[1].split(",")) {
                writer.writeDouble(Double.parseDouble(aij));
            }

            line = reader.readLine();
        }
    } finally {
        reader.close();
        inputStreamReader.close();
        fsDataInputStream.close();
        writer.flush();
        writer.close();
    }
}
 
开发者ID:Romm17,项目名称:MRNMF,代码行数:37,代码来源:MatrixByteConverter.java

示例15: testFileCreationSyncOnClose

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test creating a file whose data gets sync when closed
 */
@Test
public void testFileCreationSyncOnClose() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    FileSystem fs = cluster.getFileSystem();
    
    Path[] p = {new Path("/foo"), new Path("/bar")};
    
    //write 2 files at the same time
    FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
    int i = 0;
    for(; i < 100; i++) {
      out[0].write(i);
      out[1].write(i);
    }
    out[0].close();
    for(; i < 200; i++) {out[1].write(i);}
    out[1].close();

    //verify
    FSDataInputStream[] in = {fs.open(p[0]), fs.open(p[1])};  
    for(i = 0; i < 100; i++) {assertEquals(i, in[0].read());}
    for(i = 0; i < 200; i++) {assertEquals(i, in[1].read());}
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestFileCreation.java


注:本文中的org.apache.hadoop.fs.FileSystem.open方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。