當前位置: 首頁>>代碼示例>>Java>>正文


Java Path.toString方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.Path.toString方法的典型用法代碼示例。如果您正苦於以下問題:Java Path.toString方法的具體用法?Java Path.toString怎麽用?Java Path.toString使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.toString方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getReferredToFile

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
public static Path getReferredToFile(final Path p) {
  Matcher m = REF_NAME_PATTERN.matcher(p.getName());
  if (m == null || !m.matches()) {
    LOG.warn("Failed match of store file name " + p.toString());
    throw new IllegalArgumentException("Failed match of store file name " + p.toString());
  }

  // Other region name is suffix on the passed Reference file name
  String otherRegion = m.group(2);
  // Tabledir is up two directories from where Reference was written.
  Path tableDir = p.getParent().getParent().getParent();
  String nameStrippedOfSuffix = m.group(1);
  if (LOG.isDebugEnabled()) {
    LOG.debug(
        "reference '" + p + "' to region=" + otherRegion + " hfile=" + nameStrippedOfSuffix);
  }

  // Build up new path with the referenced region in place of our current
  // region in the reference path. Also strip regionname suffix from name.
  return new Path(new Path(new Path(tableDir, otherRegion), p.getParent().getName()),
      nameStrippedOfSuffix);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:StoreFileInfo.java

示例2: processPathShowQuotas

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Test
public void processPathShowQuotas() throws Exception {
  Path path = new Path("mockfs:/test");

  when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
  PathData pathData = new PathData(path.toString(), conf);

  PrintStream out = mock(PrintStream.class);

  Count count = new Count();
  count.out = out;

  LinkedList<String> options = new LinkedList<String>();
  options.add("-q");
  options.add("dummy");
  count.processOptions(options);

  count.processPath(pathData);
  verify(out).println(BYTES + WITH_QUOTAS + path.toString());
  verifyNoMoreInteractions(out);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:22,代碼來源:TestCount.java

示例3: startInternal

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
protected void startInternal() throws Exception {
  Path storeRoot = createStorageDir();
  Options options = new Options();
  options.createIfMissing(false);
  options.logger(new LeveldbLogger());
  LOG.info("Using state database at " + storeRoot + " for recovery");
  File dbfile = new File(storeRoot.toString());
  try {
    db = JniDBFactory.factory.open(dbfile, options);
  } catch (NativeDB.DBException e) {
    if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
      LOG.info("Creating state database at " + dbfile);
      options.createIfMissing(true);
      try {
        db = JniDBFactory.factory.open(dbfile, options);
        // store version
        storeVersion();
      } catch (DBException dbErr) {
        throw new IOException(dbErr.getMessage(), dbErr);
      }
    } else {
      throw e;
    }
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:27,代碼來源:LeveldbRMStateStore.java

示例4: testBulkOutputWithTsvImporterTextMapper

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Test
public void testBulkOutputWithTsvImporterTextMapper() throws Exception {
  String table = "test-" + UUID.randomUUID();
  String FAMILY = "FAM";
  Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table),"hfiles");
  // Prepare the arguments required for the test.
  String[] args =
      new String[] {
          "-D" + ImportTsv.MAPPER_CONF_KEY
              + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper",
          "-D" + ImportTsv.COLUMNS_CONF_KEY
              + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
          "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
          "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), table
          };
  String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n";
  doMROnTableTest(util, FAMILY, data, args, 4);
  util.deleteTable(table);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestImportTSVWithVisibilityLabels.java

示例5: removeResourceFromCacheFileSystem

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private boolean removeResourceFromCacheFileSystem(Path path)
    throws IOException {
  // rename the directory to make the delete atomic
  Path renamedPath = new Path(path.toString() + RENAMED_SUFFIX);
  if (fs.rename(path, renamedPath)) {
    // the directory can be removed safely now
    // log the original path
    LOG.info("Deleting " + path.toString());
    return fs.delete(renamedPath, true);
  } else {
    // we were unable to remove it for some reason: it's best to leave
    // it at that
    LOG.error("We were not able to rename the directory to "
        + renamedPath.toString() + ". We will leave it intact.");
  }
  return false;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:18,代碼來源:CleanerTask.java

示例6: create

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * @param permission Currently ignored.
 */
@Override
public FSDataOutputStream create(Path file, FsPermission permission,
    boolean overwrite, int bufferSize,
    short replication, long blockSize, Progressable progress)
  throws IOException {

  INode inode = store.retrieveINode(makeAbsolute(file));
  if (inode != null) {
    if (overwrite) {
      delete(file, true);
    } else {
      throw new FileAlreadyExistsException("File already exists: " + file);
    }
  } else {
    Path parent = file.getParent();
    if (parent != null) {
      if (!mkdirs(parent)) {
        throw new IOException("Mkdirs failed to create " + parent.toString());
      }
    }      
  }
  return new FSDataOutputStream
      (new S3OutputStream(getConf(), store, makeAbsolute(file),
                          blockSize, progress, bufferSize),
       statistics);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:30,代碼來源:S3FileSystem.java

示例7: resolveLink

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Override
protected Path resolveLink(Path f) throws IOException {
  statistics.incrementReadOps(1);
  String target = dfs.getLinkTarget(getPathName(fixRelativePart(f)));
  if (target == null) {
    throw new FileNotFoundException("File does not exist: " + f.toString());
  }
  return new Path(target);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:10,代碼來源:DistributedFileSystem.java

示例8: ConcatOp

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
ConcatOp(DFSClient client, Path target, int numSrc) {
  super("concat", client);
  this.target = target.toString();
  this.srcs = new String[numSrc];
  this.srcPaths = new Path[numSrc];
  Path parent = target.getParent();
  for (int i = 0; i < numSrc; i++) {
    srcPaths[i] = new Path(parent, "srcfile" + i);
    srcs[i] = srcPaths[i].toString();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:12,代碼來源:TestRetryCacheWithHA.java

示例9: getTableInfoSequenceId

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * @param p Path to a <code>.tableinfo</code> file.
 * @return The current editid or 0 if none found.
 */
@VisibleForTesting static int getTableInfoSequenceId(final Path p) {
  if (p == null) return 0;
  Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
  if (!m.matches()) throw new IllegalArgumentException(p.toString());
  String suffix = m.group(2);
  if (suffix == null || suffix.length() <= 0) return 0;
  return Integer.parseInt(m.group(2));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:13,代碼來源:FSTableDescriptors.java

示例10: testMROnTableWithBulkload

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
@Test
public void testMROnTableWithBulkload() throws Exception {
  String tableName = "test-" + UUID.randomUUID();
  Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName), "hfiles");
  // Prepare the arguments required for the test.
  String[] args = new String[] {
      "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
      "-D" + ImportTsv.COLUMNS_CONF_KEY
          + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
      "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
  String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n";
  util.createTable(TableName.valueOf(tableName), FAMILY);
  doMROnTableTest(util, FAMILY, data, args, 1);
  util.deleteTable(tableName);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:16,代碼來源:TestImportTSVWithVisibilityLabels.java

示例11: testToString

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
public void testToString() throws IOException {
  out=new BufferedWriter(new FileWriter(CONFIG));
  startConfig();
  endConfig();
  Path fileResource = new Path(CONFIG);
  conf.addResource(fileResource);
  
  String expectedOutput = 
    "Configuration: core-default.xml, core-site.xml, " + 
    fileResource.toString();
  assertEquals(expectedOutput, conf.toString());
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:13,代碼來源:TestConfiguration.java

示例12: map

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
/**
 * {@inheritDoc}
 */
protected void map(final Object key, final OrcStruct value, final Context context) throws IOException, InterruptedException {
    if (value!= null && value.toString() != null && value.toString().isEmpty()) {
        return;
    }

    // Mapper sends data with parent directory path as keys to retain directory structure
    final FileSplit fileSplit = (FileSplit) context.getInputSplit();
    final Path filePath = fileSplit.getPath();
    final String parentFilePath = String.format("%s/", filePath.getParent().toString());
    log.debug("Parent file path {}", parentFilePath);

    if (!fileSizesMap.containsKey(filePath.toString())) {
        if (fileSystem == null){
            final URI uri = URI.create(filePath.toString());
            fileSystem = FileSystem.get(uri, configuration);
        }
        final FileStatus[] listStatuses = fileSystem.listStatus(filePath);
        for (FileStatus fileStatus : listStatuses) {
            if (!fileStatus.isDirectory()) {
                fileSizesMap.put(fileStatus.getPath().toString(), fileStatus.getLen());
                log.info("Entry added to fileSizes Map {} {}", fileStatus.getPath().toString(), fileStatus.getLen());
            }
        }
    }

    final Text parentFilePathKey = new Text(parentFilePath);
    final Text filePathKey = new Text(filePath.toString());
    final OrcValue orcValue = new OrcValue();
    orcValue.value = value;


    final Long fileSize = fileSizesMap.get(filePath.toString());

    if (fileSize < threshold) {
        context.write(parentFilePathKey, orcValue);
    } else {
        context.write(filePathKey, orcValue);
    }
}
 
開發者ID:ExpediaInceCommercePlatform,項目名稱:dataSqueeze,代碼行數:43,代碼來源:OrcCompactionMapper.java

示例13: getOutputStreamWriter

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private OutputStreamWriter getOutputStreamWriter(Path srcFilePath,
    String fileName) throws IOException, FileNotFoundException,
    UnsupportedEncodingException {
  File dir = new File(srcFilePath.toString());
  if (!dir.exists()) {
    if (!dir.mkdirs()) {
      throw new IOException("Unable to create directory : " + dir);
    }
  }
  File outputFile = new File(new File(srcFilePath.toString()), fileName);
  FileOutputStream os = new FileOutputStream(outputFile);
  OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
  return osw;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:15,代碼來源:TestAggregatedLogFormat.java

示例14: runTeraGen

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
private void runTeraGen(Configuration conf, Path sortInput)
    throws Exception {
  String[] genArgs = {NUM_ROWS, sortInput.toString()};

  // Run TeraGen
  assertEquals(ToolRunner.run(conf, new TeraGen(), genArgs), 0);
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:8,代碼來源:TestTeraSort.java

示例15: configure

import org.apache.hadoop.fs.Path; //導入方法依賴的package包/類
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:67,代碼來源:TestKeyFieldBasedComparator.java


注:本文中的org.apache.hadoop.fs.Path.toString方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。