當前位置: 首頁>>代碼示例>>Java>>正文


Java Path.suffix方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.Path.suffix方法的典型用法代碼示例。如果您正苦於以下問題:Java Path.suffix方法的具體用法?Java Path.suffix怎麽用?Java Path.suffix使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.suffix方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getPossiblyCompressedOutputStream

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Returns a {@link OutputStream} for a file that might need 
 * compression.
 */
static OutputStream getPossiblyCompressedOutputStream(Path file, 
                                                      Configuration conf)
throws IOException {
  FileSystem fs = file.getFileSystem(conf);
  JobConf jConf = new JobConf(conf);
  if (org.apache.hadoop.mapred.FileOutputFormat.getCompressOutput(jConf)) {
    // get the codec class
    Class<? extends CompressionCodec> codecClass =
      org.apache.hadoop.mapred.FileOutputFormat
                              .getOutputCompressorClass(jConf, 
                                                        GzipCodec.class);
    // get the codec implementation
    CompressionCodec codec = ReflectionUtils.newInstance(codecClass, conf);

    // add the appropriate extension
    file = file.suffix(codec.getDefaultExtension());

    if (isCompressionEmulationEnabled(conf)) {
      FSDataOutputStream fileOut = fs.create(file, false);
      return new DataOutputStream(codec.createOutputStream(fileOut));
    }
  }
  return fs.create(file, false);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:29,代碼來源:CompressionEmulationUtil.java

示例2: testPossiblyCompressedDecompressedStreams

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Test 
 * {@link CompressionEmulationUtil#getPossiblyDecompressedInputStream(Path, 
 *                                   Configuration, long)}
 *  and
 *  {@link CompressionEmulationUtil#getPossiblyCompressedOutputStream(Path, 
 *                                    Configuration)}.
 */
@Test
public void testPossiblyCompressedDecompressedStreams() throws IOException {
  JobConf conf = new JobConf();
  FileSystem lfs = FileSystem.getLocal(conf);
  String inputLine = "Hi Hello!";

  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  conf.setBoolean(FileOutputFormat.COMPRESS, true);
  conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class, 
                CompressionCodec.class);

  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir =
    new Path(rootTempDir, "TestPossiblyCompressedDecompressedStreams");
  lfs.delete(tempDir, true);

  // create a compressed file
  Path compressedFile = new Path(tempDir, "test");
  OutputStream out = 
    CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, 
                                                               conf);
  BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
  writer.write(inputLine);
  writer.close();
  
  // now read back the data from the compressed stream
  compressedFile = compressedFile.suffix(".gz");
  InputStream in = 
    CompressionEmulationUtil
      .getPossiblyDecompressedInputStream(compressedFile, conf, 0);
  BufferedReader reader = new BufferedReader(new InputStreamReader(in));
  String readLine = reader.readLine();
  assertEquals("Compression/Decompression error", inputLine, readLine);
  reader.close();
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:49,代碼來源:TestCompressionEmulationUtils.java

示例3: writeRenameReadCompare

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
protected void writeRenameReadCompare(Path path, long len)
    throws IOException, NoSuchAlgorithmException {
  // If len > fs.s3n.multipart.uploads.block.size,
  // we'll use a multipart upload copy
  MessageDigest digest = MessageDigest.getInstance("MD5");
  OutputStream out = new BufferedOutputStream(
      new DigestOutputStream(fs.create(path, false), digest));
  for (long i = 0; i < len; i++) {
    out.write('Q');
  }
  out.flush();
  out.close();

  assertTrue("Exists", fs.exists(path));

  // Depending on if this file is over 5 GB or not,
  // rename will cause a multipart upload copy
  Path copyPath = path.suffix(".copy");
  fs.rename(path, copyPath);

  assertTrue("Copy exists", fs.exists(copyPath));

  // Download file from S3 and compare the digest against the original
  MessageDigest digest2 = MessageDigest.getInstance("MD5");
  InputStream in = new BufferedInputStream(
      new DigestInputStream(fs.open(copyPath), digest2));
  long copyLen = 0;
  while (in.read() != -1) {copyLen++;}
  in.close();

  assertEquals("Copy length matches original", len, copyLen);
  assertArrayEquals("Digests match", digest.digest(), digest2.digest());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:34,代碼來源:TestJets3tNativeFileSystemStore.java

示例4: getLogDirs

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
    "We only release this lock when we set it. Updates to code that uses it should verify use " +
    "of the guard boolean.")
private List<Path> getLogDirs(final Set<ServerName> serverNames) throws IOException {
  List<Path> logDirs = new ArrayList<Path>();
  boolean needReleaseLock = false;
  if (!this.services.isInitialized()) {
    // during master initialization, we could have multiple places splitting a same wal
    this.splitLogLock.lock();
    needReleaseLock = true;
  }
  try {
    for (ServerName serverName : serverNames) {
      Path logDir = new Path(this.rootdir,
          DefaultWALProvider.getWALDirectoryName(serverName.toString()));
      Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
      // Rename the directory so a rogue RS doesn't create more WALs
      if (fs.exists(logDir)) {
        if (!this.fs.rename(logDir, splitDir)) {
          throw new IOException("Failed fs.rename for log split: " + logDir);
        }
        logDir = splitDir;
        LOG.debug("Renamed region directory: " + splitDir);
      } else if (!fs.exists(splitDir)) {
        LOG.info("Log dir for server " + serverName + " does not exist");
        continue;
      }
      logDirs.add(splitDir);
    }
  } finally {
    if (needReleaseLock) {
      this.splitLogLock.unlock();
    }
  }
  return logDirs;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:MasterFileSystem.java

示例5: getTempPath

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@VisibleForTesting
static Path getTempPath(Path outPath, int fetcher) {
  return outPath.suffix(String.valueOf(fetcher));
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:5,代碼來源:OnDiskMapOutput.java

示例6: copyMapOutput

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Retrieve the map output of a single map task
 * and send it to the merger.
 */
private boolean copyMapOutput(TaskAttemptID mapTaskId) throws IOException {
  // Figure out where the map task stored its output.
  Path mapOutputFileName = localMapFiles.get(mapTaskId).getOutputFile();
  Path indexFileName = mapOutputFileName.suffix(".index");

  // Read its index to determine the location of our split
  // and its size.
  SpillRecord sr = new SpillRecord(indexFileName, job);
  IndexRecord ir = sr.getIndex(reduce);

  long compressedLength = ir.partLength;
  long decompressedLength = ir.rawLength;

  compressedLength -= CryptoUtils.cryptoPadding(job);
  decompressedLength -= CryptoUtils.cryptoPadding(job);

  // Get the location for the map output - either in-memory or on-disk
  MapOutput<K, V> mapOutput = merger.reserve(mapTaskId, decompressedLength,
      id);

  // Check if we can shuffle *now* ...
  if (mapOutput == null) {
    LOG.info("fetcher#" + id + " - MergeManager returned Status.WAIT ...");
    return false;
  }

  // Go!
  LOG.info("localfetcher#" + id + " about to shuffle output of map " + 
           mapOutput.getMapId() + " decomp: " +
           decompressedLength + " len: " + compressedLength + " to " +
           mapOutput.getDescription());

  // now read the file, seek to the appropriate section, and send it.
  FileSystem localFs = FileSystem.getLocal(job).getRaw();
  FSDataInputStream inStream = localFs.open(mapOutputFileName);

  inStream = CryptoUtils.wrapIfNecessary(job, inStream);

  try {
    inStream.seek(ir.startOffset + CryptoUtils.cryptoPadding(job));
    mapOutput.shuffle(LOCALHOST, inStream, compressedLength, decompressedLength, metrics, reporter);
  } finally {
    try {
      inStream.close();
    } catch (IOException ioe) {
      LOG.warn("IOException closing inputstream from map output: "
          + ioe.toString());
    }
  }

  scheduler.copySucceeded(mapTaskId, LOCALHOST, compressedLength, 0, 0,
      mapOutput);
  return true; // successful fetch.
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:59,代碼來源:LocalFetcher.java

示例7: testCompressibleGridmixRecord

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Test compressible {@link GridmixRecord}.
 */
@Test
public void testCompressibleGridmixRecord() throws IOException {
  JobConf conf = new JobConf();
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  
  FileSystem lfs = FileSystem.getLocal(conf);
  int dataSize = 1024 * 1024 * 10; // 10 MB
  float ratio = 0.357F;
  
  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, 
                          "TestPossiblyCompressibleGridmixRecord");
  lfs.delete(tempDir, true);
  
  // define a compressible GridmixRecord
  GridmixRecord record = new GridmixRecord(dataSize, 0);
  record.setCompressibility(true, ratio); // enable compression
  
  conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class, 
                CompressionCodec.class);
  org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
  
  // write the record to a file
  Path recordFile = new Path(tempDir, "record");
  OutputStream outStream = CompressionEmulationUtil
                             .getPossiblyCompressedOutputStream(recordFile, 
                                                                conf);    
  DataOutputStream out = new DataOutputStream(outStream);
  record.write(out);
  out.close();
  outStream.close();
  
  // open the compressed stream for reading
  Path actualRecordFile = recordFile.suffix(".gz");
  InputStream in = 
    CompressionEmulationUtil
      .getPossiblyDecompressedInputStream(actualRecordFile, conf, 0);
  
  // get the compressed file size
  long compressedFileSize = lfs.listStatus(actualRecordFile)[0].getLen();
  
  GridmixRecord recordRead = new GridmixRecord();
  recordRead.readFields(new DataInputStream(in));
  
  assertEquals("Record size mismatch in a compressible GridmixRecord",
               dataSize, recordRead.getSize());
  assertTrue("Failed to generate a compressible GridmixRecord",
             recordRead.getSize() > compressedFileSize);
  
  // check if the record can generate data with the desired compression ratio
  float seenRatio = ((float)compressedFileSize)/dataSize;
  assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio), 
      CompressionEmulationUtil.standardizeCompressionRatio(seenRatio), 1.0D);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:63,代碼來源:TestCompressionEmulationUtils.java

示例8: testFileQueueDecompression

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Test of {@link FileQueue} can identify compressed file and provide
 * readers to extract uncompressed data only if input-compression is enabled.
 */
@Test
public void testFileQueueDecompression() throws IOException {
  JobConf conf = new JobConf();
  FileSystem lfs = FileSystem.getLocal(conf);
  String inputLine = "Hi Hello!";
  
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
  org.apache.hadoop.mapred.FileOutputFormat.setOutputCompressorClass(conf, 
                                              GzipCodec.class);

  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, "TestFileQueueDecompression");
  lfs.delete(tempDir, true);

  // create a compressed file
  Path compressedFile = new Path(tempDir, "test");
  OutputStream out = 
    CompressionEmulationUtil.getPossiblyCompressedOutputStream(compressedFile, 
                                                               conf);
  BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out));
  writer.write(inputLine);
  writer.close();
  
  compressedFile = compressedFile.suffix(".gz");
  // now read back the data from the compressed stream using FileQueue
  long fileSize = lfs.listStatus(compressedFile)[0].getLen();
  CombineFileSplit split = 
    new CombineFileSplit(new Path[] {compressedFile}, new long[] {fileSize});
  FileQueue queue = new FileQueue(split, conf);
  byte[] bytes = new byte[inputLine.getBytes().length];
  queue.read(bytes);
  queue.close();
  String readLine = new String(bytes);
  assertEquals("Compression/Decompression error", inputLine, readLine);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:46,代碼來源:TestCompressionEmulationUtils.java

示例9: testLogRollAfterSplitStart

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * Tests the case where a RegionServer enters a GC pause,
 * comes back online after the master declared it dead and started to split.
 * Want log rolling after a master split to fail. See HBASE-2312.
 */
@Test (timeout=300000)
public void testLogRollAfterSplitStart() throws IOException {
  LOG.info("Verify wal roll after split starts will fail.");
  String logName = "testLogRollAfterSplitStart";
  Path thisTestsDir = new Path(HBASEDIR, DefaultWALProvider.getWALDirectoryName(logName));
  final WALFactory wals = new WALFactory(conf, null, logName);

  try {
    // put some entries in an WAL
    TableName tableName =
        TableName.valueOf(this.getClass().getName());
    HRegionInfo regioninfo = new HRegionInfo(tableName,
        HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
    final WAL log = wals.getWAL(regioninfo.getEncodedNameAsBytes());
    MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1);

    final int total = 20;
    for (int i = 0; i < total; i++) {
      WALEdit kvs = new WALEdit();
      kvs.add(new KeyValue(Bytes.toBytes(i), tableName.getName(), tableName.getName()));
      HTableDescriptor htd = new HTableDescriptor(tableName);
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(htd, regioninfo, new WALKey(regioninfo.getEncodedNameAsBytes(), tableName,
          System.currentTimeMillis(), mvcc), kvs, true);
    }
    // Send the data to HDFS datanodes and close the HDFS writer
    log.sync();
    ((FSHLog) log).replaceWriter(((FSHLog)log).getOldPath(), null, null, null);

    /* code taken from MasterFileSystem.getLogDirs(), which is called from MasterFileSystem.splitLog()
     * handles RS shutdowns (as observed by the splitting process)
     */
    // rename the directory so a rogue RS doesn't create more WALs
    Path rsSplitDir = thisTestsDir.suffix(DefaultWALProvider.SPLITTING_EXT);
    if (!fs.rename(thisTestsDir, rsSplitDir)) {
      throw new IOException("Failed fs.rename for log split: " + thisTestsDir);
    }
    LOG.debug("Renamed region directory: " + rsSplitDir);

    LOG.debug("Processing the old log files.");
    WALSplitter.split(HBASEDIR, rsSplitDir, OLDLOGDIR, fs, conf, wals);

    LOG.debug("Trying to roll the WAL.");
    try {
      log.rollWriter();
      Assert.fail("rollWriter() did not throw any exception.");
    } catch (IOException ioe) {
      if (ioe.getCause() instanceof FileNotFoundException) {
        LOG.info("Got the expected exception: ", ioe.getCause());
      } else {
        Assert.fail("Unexpected exception: " + ioe);
      }
    }
  } finally {
    wals.close();
    if (fs.exists(thisTestsDir)) {
      fs.delete(thisTestsDir, true);
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:66,代碼來源:TestLogRollAbort.java


注:本文中的org.apache.hadoop.fs.Path.suffix方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。