当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.getLocal方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.getLocal方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.getLocal方法的具体用法?Java FileSystem.getLocal怎么用?Java FileSystem.getLocal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.getLocal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testDeleteFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * test {@code BloomMapFile.delete()} method
 */
public void testDeleteFile() {
  BloomMapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    writer = new BloomMapFile.Writer(conf, TEST_FILE,
        MapFile.Writer.keyClass(IntWritable.class),
        MapFile.Writer.valueClass(Text.class));
    assertNotNull("testDeleteFile error !!!", writer);
    writer.close();
    BloomMapFile.delete(fs, TEST_FILE.toString());
  } catch (Exception ex) {
    fail("unexpect ex in testDeleteFile !!!");
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestBloomMapFile.java

示例2: testRename

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * test  {@code MapFile.Writer.rename()} method 
 */
@Test
public void testRename() {
  final String NEW_FILE_NAME = "test-new.mapfile";
  final String OLD_FILE_NAME = "test-old.mapfile";
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
    writer.close();
    MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(), 
        new Path(TEST_DIR, NEW_FILE_NAME).toString());
    MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
  } catch (IOException ex) {
    fail("testRename error " + ex);
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestMapFile.java

示例3: testMidKeyEmpty

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = new MapFile.Writer(conf, fs,
      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
  writer.close();
  // Now do getClosest on created mapfile.
  MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
  try {
    assertEquals(null, reader.midKey()); 
  } finally {
    reader.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:TestMapFile.java

示例4: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  Configuration conf = new Configuration();
  fSysTarget = FileSystem.getLocal(conf);
  fileSystemTestHelper = new FileSystemTestHelper();
  chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
  // In case previous test was killed before cleanup
  fSysTarget.delete(chrootedTo, true);
  
  fSysTarget.mkdirs(chrootedTo);


  // ChRoot to the root of the testDirectory
  fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestChRootedFileSystem.java

示例5: setConf

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void setConf(Configuration conf) {
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    this.conf = conf;
    Path partFile = new Path(TeraInputFormat.PARTITION_FILENAME);
    splitPoints = readPartitions(fs, partFile, conf);
    trie = buildTrie(splitPoints, 0, splitPoints.length, new Text(), 2);
  } catch (IOException ie) {
    throw new IllegalArgumentException("can't read partitions file", ie);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TeraSort.java

示例6: createMetaDataFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void createMetaDataFile(StoreBuilderConfig builderConfig) throws IOException {
  final KVStoreInfo kvStoreInfo = DataStoreUtils.toInfo(builderConfig);
  final Path metadataFile = new Path(metaDataFilesDir.getAbsolutePath(), format("%s%s", builderConfig.getName(), METADATA_FILE_SUFFIX));
  final FileSystem fs = FileSystem.getLocal(new Configuration());
  try (FSDataOutputStream metaDataOut = fs.create(metadataFile, true)) {
    ProtostuffUtil.toJSON(metaDataOut, kvStoreInfo, KVStoreInfo.getSchema(), false);
  }
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:9,代码来源:CoreStoreProviderImpl.java

示例7: testDefaultRecordDelimiters

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test the default behavior when the textinputformat.record.delimiter
 * configuration property is not specified
 * 
 * @throws IOException
 * @throws InterruptedException
 * @throws ClassNotFoundException
 */
@Test
public void testDefaultRecordDelimiters() throws IOException,
    InterruptedException, ClassNotFoundException {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  // cleanup
  localFs.delete(workDir, true);
  // creating input test file
  createInputFile(conf);
  createAndRunJob(conf);
  String expected = "0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n";
  assertEquals(expected, readOutputFile(conf));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestLineRecordReaderJobs.java

示例8: testHistograms

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * @throws IOException
 * 
 *           There should be files in the directory named by
 *           ${test.build.data}/rumen/histogram-test .
 * 
 *           There will be pairs of files, inputXxx.json and goldXxx.json .
 * 
 *           We read the input file as a HistogramRawTestData in json. Then we
 *           create a Histogram using the data field, and then a
 *           LoggedDiscreteCDF using the percentiles and scale field. Finally,
 *           we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
 *           deepCompare them.
 */
@Test
public void testHistograms() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem lfs = FileSystem.getLocal(conf);
  final Path rootInputDir = new Path(
      System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
  final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests");


  FileStatus[] tests = lfs.listStatus(rootInputFile);

  for (int i = 0; i < tests.length; ++i) {
    Path filePath = tests[i].getPath();
    String fileName = filePath.getName();
    if (fileName.startsWith("input")) {
      String testName = fileName.substring("input".length());
      Path goldFilePath = new Path(rootInputFile, "gold"+testName);
      assertTrue("Gold file dies not exist", lfs.exists(goldFilePath));
      LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
      System.out.println("Testing a Histogram for " + fileName);
      FSDataInputStream goldStream = lfs.open(goldFilePath);
      JsonObjectMapperParser<LoggedDiscreteCDF> parser = new JsonObjectMapperParser<LoggedDiscreteCDF>(
          goldStream, LoggedDiscreteCDF.class); 
      try {
        LoggedDiscreteCDF dcdf = parser.getNext();
        dcdf.deepCompare(newResult, new TreePath(null, "<root>"));
      } catch (DeepInequalityException e) {
        fail(e.path.toString());
      }
      finally {
          parser.close();
      }
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:50,代码来源:TestHistograms.java

示例9: testSeekBugLocalFS

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Tests if the seek bug exists in FSDataInputStream in LocalFS.
 */
@Test
public void testSeekBugLocalFS() throws IOException {
  Configuration conf = new HdfsConfiguration();
  FileSystem fileSys = FileSystem.getLocal(conf);
  try {
    Path file1 = new Path("build/test/data", "seektest.dat");
    writeFile(fileSys, file1);
    seekReadFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:17,代码来源:TestSeekBug.java

示例10: setConf

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Read in the partition file and build indexing data structures.
 * If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
 * <tt>total.order.partitioner.natural.order</tt> is not false, a trie
 * of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
 * will be built. Otherwise, keys will be located using a binary search of
 * the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
 * defined for this job. The input file must be sorted with the same
 * comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.
 */
@SuppressWarnings("unchecked") // keytype from conf not static
public void setConf(Configuration conf) {
  try {
    this.conf = conf;
    String parts = getPartitionFile(conf);
    final Path partFile = new Path(parts);
    final FileSystem fs = (DEFAULT_PATH.equals(parts))
      ? FileSystem.getLocal(conf)     // assume in DistributedCache
      : partFile.getFileSystem(conf);

    Job job = Job.getInstance(conf);
    Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
    K[] splitPoints = readPartitions(fs, partFile, keyClass, conf);
    if (splitPoints.length != job.getNumReduceTasks() - 1) {
      throw new IOException("Wrong number of partitions in keyset");
    }
    RawComparator<K> comparator =
      (RawComparator<K>) job.getSortComparator();
    for (int i = 0; i < splitPoints.length - 1; ++i) {
      if (comparator.compare(splitPoints[i], splitPoints[i+1]) >= 0) {
        throw new IOException("Split points are out of order");
      }
    }
    boolean natOrder =
      conf.getBoolean(NATURAL_ORDER, true);
    if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
      partitions = buildTrie((BinaryComparable[])splitPoints, 0,
          splitPoints.length, new byte[0],
          // Now that blocks of identical splitless trie nodes are 
          // represented reentrantly, and we develop a leaf for any trie
          // node with only one split point, the only reason for a depth
          // limit is to refute stack overflow or bloat in the pathological
          // case where the split points are long and mostly look like bytes 
          // iii...iixii...iii   .  Therefore, we make the default depth
          // limit large but not huge.
          conf.getInt(MAX_TRIE_DEPTH, 200));
    } else {
      partitions = new BinarySearchNode(splitPoints, comparator);
    }
  } catch (IOException e) {
    throw new IllegalArgumentException("Can't read partitions file", e);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TotalOrderPartitioner.java

示例11: testCompressibleGridmixRecord

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test compressible {@link GridmixRecord}.
 */
@Test
public void testCompressibleGridmixRecord() throws IOException {
  JobConf conf = new JobConf();
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  
  FileSystem lfs = FileSystem.getLocal(conf);
  int dataSize = 1024 * 1024 * 10; // 10 MB
  float ratio = 0.357F;
  
  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, 
                          "TestPossiblyCompressibleGridmixRecord");
  lfs.delete(tempDir, true);
  
  // define a compressible GridmixRecord
  GridmixRecord record = new GridmixRecord(dataSize, 0);
  record.setCompressibility(true, ratio); // enable compression
  
  conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class, 
                CompressionCodec.class);
  org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
  
  // write the record to a file
  Path recordFile = new Path(tempDir, "record");
  OutputStream outStream = CompressionEmulationUtil
                             .getPossiblyCompressedOutputStream(recordFile, 
                                                                conf);    
  DataOutputStream out = new DataOutputStream(outStream);
  record.write(out);
  out.close();
  outStream.close();
  
  // open the compressed stream for reading
  Path actualRecordFile = recordFile.suffix(".gz");
  InputStream in = 
    CompressionEmulationUtil
      .getPossiblyDecompressedInputStream(actualRecordFile, conf, 0);
  
  // get the compressed file size
  long compressedFileSize = lfs.listStatus(actualRecordFile)[0].getLen();
  
  GridmixRecord recordRead = new GridmixRecord();
  recordRead.readFields(new DataInputStream(in));
  
  assertEquals("Record size mismatch in a compressible GridmixRecord",
               dataSize, recordRead.getSize());
  assertTrue("Failed to generate a compressible GridmixRecord",
             recordRead.getSize() > compressedFileSize);
  
  // check if the record can generate data with the desired compression ratio
  float seenRatio = ((float)compressedFileSize)/dataSize;
  assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio), 
      CompressionEmulationUtil.standardizeCompressionRatio(seenRatio), 1.0D);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestCompressionEmulationUtils.java

示例12: testExternalSubdir

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void testExternalSubdir() throws IOException {
  final String DATA = "This is the clob data!";
  final String FILENAME = "_lob/clobdata";

  try {
    doExternalTest(DATA, FILENAME);
  } finally {
    // remove dir we made.
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    String tmpDir = System.getProperty("test.build.data", "/tmp/");
    Path lobDir = new Path(new Path(tmpDir), "_lob");
    fs.delete(lobDir, true);
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:16,代码来源:TestClobRef.java

示例13: testFix

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * test {@code MapFile.Writer.testFix} method
 */
@Test
public void testFix() {
  final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
  int PAIR_SIZE = 20;
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
    writer = createWriter(INDEX_LESS_MAP_FILE, IntWritable.class, Text.class);
    for (int i = 0; i < PAIR_SIZE; i++)
      writer.append(new IntWritable(0), new Text("value"));
    writer.close();

    File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
    boolean isDeleted = false;
    if (indexFile.exists())
      isDeleted = indexFile.delete();

    if (isDeleted)
      assertTrue("testFix error !!!",
          MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
  } catch (Exception ex) {
    fail("testFix error !!!");
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:31,代码来源:TestMapFile.java

示例14: assertPermissions

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void assertPermissions(File f, int expected) throws IOException {
  FileSystem localfs = FileSystem.getLocal(new Configuration());
  FsPermission perms = localfs.getFileStatus(
    new Path(f.getAbsolutePath())).getPermission();
  assertEquals(expected, perms.toShort());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestNativeIO.java

示例15: assertFirstSpecificNumber

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Assert that a directory contains a file with exactly one line
 * in it, containing the prescribed number 'val'.
 */
public void assertFirstSpecificNumber(String tableName, int val) {
  try {
    FileSystem fs = FileSystem.getLocal(new Configuration());
    Path warehouse = new Path(BaseSqoopTestCase.LOCAL_WAREHOUSE_DIR);
    Path tableDir = new Path(warehouse, tableName);
    FileStatus [] stats = fs.listStatus(tableDir);
    String [] filePaths = new String[stats.length];
    for (int i = 0; i < stats.length; i++) {
      filePaths[i] = stats[i].getPath().toString();
    }

    // Read the first file that is not a hidden file.
    boolean foundVal = false;
    for (String filePath : filePaths) {
      String fileName = new Path(filePath).getName();
      if (fileName.startsWith("_") || fileName.startsWith(".")) {
        continue;
      }

      if (foundVal) {
        // Make sure we don't have two or more "real" files in the dir.
        fail("Got an extra data-containing file in this directory.");
      }

      BufferedReader r = new BufferedReader(
          new InputStreamReader(fs.open(new Path(filePath))));
      try {
        String s = r.readLine();
        if (null == s) {
          fail("Unexpected empty file " + filePath + ".");
        }
        assertEquals(val, (int) Integer.valueOf(s.trim()));

        String nextLine = r.readLine();
        if (nextLine != null) {
          fail("Expected only one result, but got another line: " + nextLine);
        }

        // Successfully got the value we were looking for.
        foundVal = true;
      } finally {
        r.close();
      }
    }
  } catch (IOException e) {
    fail("Got unexpected exception: " + StringUtils.stringifyException(e));
  }
}
 
开发者ID:aliyun,项目名称:aliyun-maxcompute-data-collectors,代码行数:53,代码来源:TestIncrementalImport.java


注:本文中的org.apache.hadoop.fs.FileSystem.getLocal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。