当前位置: 首页>>代码示例>>Java>>正文


Java Path.suffix方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.suffix方法的典型用法代码示例。如果您正苦于以下问题:Java Path.suffix方法的具体用法?Java Path.suffix怎么用?Java Path.suffix使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.suffix方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getPossiblyCompressedOutputStream

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Returns a {@link OutputStream} for a file that might need 
 * compression.
 */
static OutputStream getPossiblyCompressedOutputStream(Path file, 
                                                      Configuration conf)
throws IOException {
  FileSystem fs = file.getFileSystem(conf);
  JobConf jConf = new JobConf(conf);
  if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
    // get the codec class
    Class<? extends CompressionCodec> codecClass =
      org.apache.hadoop.mapred.FileOutputFormat
                              .getOutputCompressorClass(jConf, 
                                                        GzipCodec.class);
    // get the codec implementation
    CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);

    // add the appropriate extension
    file = file.suffix(codec.getDefaultExtension());

    if (isCompressionEmulationEnabled(conf)) {
      FSDataOutputStream fileOut = fs.create(file, false);
      return new DataOutputStream(codec.createOutputStream(fileOut));
    }
  }
  return fs.create(file, false);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:CompressionEmulationUtil.java

示例2: testPossiblyCompressedDecompressedStreams

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Test 
 * {@link CompressionEmulationUtil#getPossiblyDecompressedInputStream(Path, 
 *                                   Configuration, long)}
 *  and
 *  {@link CompressionEmulationUtil#getPossiblyCompressedOutputStream(Path, 
 *                                    Configuration)}.
 */
@Test
public void testPossiblyCompressedDecompressedStreams() throws IOException {
  JobConf conf = new JobConf();
  FileSystem lfs = FileSystem.getLocal(conf);
  String inputLine = "Hi Hello!";

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);
  conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class, 
                CompressionCodec.class);

  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir =
    new Path(rootTempDir, "TestPossiblyCompressedDecompressedStreams");
  lfs.delete(tempDir, true);

  // create a compressed file
  Path compressedFile = new Path(tempDir, "test");
  OutputStream out = 
    CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, 
                                                               conf);
  BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
  writer.write(inputLine);
  writer.close();
  
  // now read back the data from the compressed stream
  compressedFile = compressedFile.suffix(".gz");
  InputStream in = 
    CompressionEmulationUtil
      .getPossiblyDecompressedInputStream(compressedFile, conf, 0);
  BufferedReader reader = new BufferedReader(new InputStreamReader(in));
  String readLine = reader.readLine();
  assertEquals("Compression/Decompression error", inputLine, readLine);
  reader.close();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:49,代码来源:TestCompressionEmulationUtils.java

示例3: writeRenameReadCompare

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
protected void writeRenameReadCompare(Path path, long len)
    throws IOException, NoSuchAlgorithmException {
  // If len > fs.s3n.multipart.uploads.block.size,
  // we'll use a multipart upload copy
  MessageDigest digest = MessageDigest.getInstance("MD5");
  OutputStream out = new BufferedOutputStream(
      new DigestOutputStream(fs.create(path, false), digest));
  for (long i = 0; i < len; i++) {
    out.write('Q');
  }
  out.flush();
  out.close();

  assertTrue("Exists", fs.exists(path));

  // Depending on if this file is over 5 GB or not,
  // rename will cause a multipart upload copy
  Path copyPath = path.suffix(".copy");
  fs.rename(path, copyPath);

  assertTrue("Copy exists", fs.exists(copyPath));

  // Download file from S3 and compare the digest against the original
  MessageDigest digest2 = MessageDigest.getInstance("MD5");
  InputStream in = new BufferedInputStream(
      new DigestInputStream(fs.open(copyPath), digest2));
  long copyLen = 0;
  while (in.read() != -1) {copyLen++;}
  in.close();

  assertEquals("Copy length matches original", len, copyLen);
  assertArrayEquals("Digests match", digest.digest(), digest2.digest());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:TestJets3tNativeFileSystemStore.java

示例4: getLogDirs

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
    "We only release this lock when we set it. Updates to code that uses it should verify use " +
    "of the guard boolean.")
private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
  List<Path> logDirs = new ArrayList<Path>();
  boolean needReleaseLock = false;
  if (!this.services.isInitialized()) {
    // during master initialization, we could have multiple places splitting a same wal
    this.splitLogLock.lock();
    needReleaseLock = true;
  }
  try {
    for (ServerName serverName : serverNames) {
      Path logDir = new Path(this.rootdir,
          DefaultWALProvider.getWALDirectoryName(serverName.toString()));
      Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
      // Rename the directory so a rogue RS doesn't create more WALs
      if (fs.exists(logDir)) {
        if (!this.fs.rename(logDir, splitDir)) {
          throw new IOException("Failed fs.rename for log split: " + logDir);
        }
        logDir = splitDir;
        LOG.debug("Renamed region directory: " + splitDir);
      } else if (!fs.exists(splitDir)) {
        LOG.info("Log dir for server " + serverName + " does not exist");
        continue;
      }
      logDirs.add(splitDir);
    }
  } finally {
    if (needReleaseLock) {
      this.splitLogLock.unlock();
    }
  }
  return logDirs;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:MasterFileSystem.java

示例5: getTempPath

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@VisibleForTesting
static Path getTempPath(Path outPath, int fetcher) {
  return outPath.suffix(String.valueOf(fetcher));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:OnDiskMapOutput.java

示例6: copyMapOutput

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Retrieve the map output of a single map task
 * and send it to the merger.
 */
private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException {
  // Figure out where the map task stored its output.
  Path mapOutputFileName = localMapFiles.get(mapTaskId).getOutputFile();
  Path indexFileName = mapOutputFileName.suffix(".index");

  // Read its index to determine the location of our split
  // and its size.
  SpillRecord sr = new SpillRecord(indexFileName, job);
  IndexRecord ir = sr.getIndex(reduce);

  long compressedLength = ir.partLength;
  long decompressedLength = ir.rawLength;

  compressedLength -= CryptoUtils.cryptoPadding(job);
  decompressedLength -= CryptoUtils.cryptoPadding(job);

  // Get the location for the map output - either in-memory or on-disk
  MapOutput<K, V> mapOutput = merger.reserve(mapTaskId, decompressedLength,
      id);

  // Check if we can shuffle *now* ...
  if (mapOutput == null) {
    LOG.info("fetcher#" + id + " - MergeManager returned Status.WAIT ...");
    return false;
  }

  // Go!
  LOG.info("localfetcher#" + id + " about to shuffle output of map " + 
           mapOutput.getMapId() + " decomp: " +
           decompressedLength + " len: " + compressedLength + " to " +
           mapOutput.getDescription());

  // now read the file, seek to the appropriate section, and send it.
  FileSystem localFs = FileSystem.getLocal(job).getRaw();
  FSDataInputStream inStream = localFs.open(mapOutputFileName);

  inStream = CryptoUtils.wrapIfNecessary(job, inStream);

  try {
    inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job));
    mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter);
  } finally {
    try {
      inStream.close();
    } catch (IOException ioe) {
      LOG.warn("IOException closing inputstream from map output: "
          + ioe.toString());
    }
  }

  scheduler.copySucceeded(mapTaskId, LOCALHOST, compressedLength, 0, 0,
      mapOutput);
  return true; // successful fetch.
}
 
开发者ID:naver,项目名称:hadoop,代码行数:59,代码来源:LocalFetcher.java

示例7: testCompressibleGridmixRecord

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Test compressible {@link GridmixRecord}.
 */
@Test
public void testCompressibleGridmixRecord() throws IOException {
  JobConf conf = new JobConf();
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  
  FileSystem lfs = FileSystem.getLocal(conf);
  int dataSize = 1024 * 1024 * 10; // 10 MB
  float ratio = 0.357F;
  
  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, 
                          "TestPossiblyCompressibleGridmixRecord");
  lfs.delete(tempDir, true);
  
  // define a compressible GridmixRecord
  GridmixRecord record = new GridmixRecord(dataSize, 0);
  record.setCompressibility(true, ratio); // enable compression
  
  conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class, 
                CompressionCodec.class);
  org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
  
  // write the record to a file
  Path recordFile = new Path(tempDir, "record");
  OutputStream outStream = CompressionEmulationUtil
                             .getPossiblyCompressedOutputStream(recordFile, 
                                                                conf);    
  DataOutputStream out = new DataOutputStream(outStream);
  record.write(out);
  out.close();
  outStream.close();
  
  // open the compressed stream for reading
  Path actualRecordFile = recordFile.suffix(".gz");
  InputStream in = 
    CompressionEmulationUtil
      .getPossiblyDecompressedInputStream(actualRecordFile, conf, 0);
  
  // get the compressed file size
  long compressedFileSize = lfs.listStatus(actualRecordFile)[0].getLen();
  
  GridmixRecord recordRead = new GridmixRecord();
  recordRead.readFields(new DataInputStream(in));
  
  assertEquals("Record size mismatch in a compressible GridmixRecord",
               dataSize, recordRead.getSize());
  assertTrue("Failed to generate a compressible GridmixRecord",
             recordRead.getSize() > compressedFileSize);
  
  // check if the record can generate data with the desired compression ratio
  float seenRatio = ((float)compressedFileSize)/dataSize;
  assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio), 
      CompressionEmulationUtil.standardizeCompressionRatio(seenRatio), 1.0D);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:63,代码来源:TestCompressionEmulationUtils.java

示例8: testFileQueueDecompression

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Test of {@link FileQueue} can identify compressed file and provide
 * readers to extract uncompressed data only if input-compression is enabled.
 */
@Test
public void testFileQueueDecompression() throws IOException {
  JobConf conf = new JobConf();
  FileSystem lfs = FileSystem.getLocal(conf);
  String inputLine = "Hi Hello!";
  
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
  org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf, 
                                              GzipCodec.class);

  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, "TestFileQueueDecompression");
  lfs.delete(tempDir, true);

  // create a compressed file
  Path compressedFile = new Path(tempDir, "test");
  OutputStream out = 
    CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, 
                                                               conf);
  BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
  writer.write(inputLine);
  writer.close();
  
  compressedFile = compressedFile.suffix(".gz");
  // now read back the data from the compressed stream using FileQueue
  long fileSize = lfs.listStatus(compressedFile)[0].getLen();
  CombineFileSplit split = 
    new CombineFileSplit(new Path[] {compressedFile}, new long[] {fileSize});
  FileQueue queue = new FileQueue(split, conf);
  byte[] bytes = new byte[inputLine.getBytes().length];
  queue.read(bytes);
  queue.close();
  String readLine = new String(bytes);
  assertEquals("Compression/Decompression error", inputLine, readLine);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:46,代码来源:TestCompressionEmulationUtils.java

示例9: testLogRollAfterSplitStart

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test (timeout=300000)
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = "testLogRollAfterSplitStart";
  Path thisTestsDir = new Path(HBASEDIR, DefaultWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, null, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    HRegionInfo regioninfo = new HRegionInfo(tableName,
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes());
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    final int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      HTableDescriptor htd = new HTableDescriptor(tableName);
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), mvcc), kvs, true);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((FSHLog) log).replaceWriter(((FSHLog)log).getOldPath(), null, null, null);

    /* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
     * handles RS shutdowns (as observed by the splitting process)
     */
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(DefaultWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestLogRollAbort.java


注:本文中的org.apache.hadoop.fs.Path.suffix方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。