当前位置: 首页>>代码示例>>Java>>正文


Java Path.toString方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.Path.toString方法的典型用法代码示例。如果您正苦于以下问题:Java Path.toString方法的具体用法?Java Path.toString怎么用?Java Path.toString使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.Path的用法示例。


在下文中一共展示了Path.toString方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getReferredToFile

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public static Path getReferredToFile(final Path p) {
  Matcher m = REF_NAME_PATTERN.matcher(p.getName());
  if (m == null || !m.matches()) {
    LOG.warn("Failed match of store file name " + p.toString());
    throw new IllegalArgumentException("Failed match of store file name " + p.toString());
  }

  // Other region name is suffix on the passed Reference file name
  String otherRegion = m.group(2);
  // Tabledir is up two directories from where Reference was written.
  Path tableDir = p.getParent().getParent().getParent();
  String nameStrippedOfSuffix = m.group(1);
  if (LOG.isDebugEnabled()) {
    LOG.debug(
        "reference '" + p + "' to region=" + otherRegion + " hfile=" + nameStrippedOfSuffix);
  }

  // Build up new path with the referenced region in place of our current
  // region in the reference path. Also strip regionname suffix from name.
  return new Path(new Path(new Path(tableDir, otherRegion), p.getParent().getName()),
      nameStrippedOfSuffix);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:StoreFileInfo.java

示例2: processPathShowQuotas

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test
public void processPathShowQuotas() throws Exception {
  Path path = new Path("mockfs:/test");

  when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
  PathData pathData = new PathData(path.toString(), conf);

  PrintStream out = mock(PrintStream.class);

  Count count = new Count();
  count.out = out;

  LinkedList<String> options = new LinkedList<String>();
  options.add("-q");
  options.add("dummy");
  count.processOptions(options);

  count.processPath(pathData);
  verify(out).println(BYTES + WITH_QUOTAS + path.toString());
  verifyNoMoreInteractions(out);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestCount.java

示例3: startInternal

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
protected void startInternal() throws Exception {
  Path storeRoot = createStorageDir();
  Options options = new Options();
  options.createIfMissing(false);
  options.logger(new LeveldbLogger());
  LOG.info("Using state database at " + storeRoot + " for recovery");
  File dbfile = new File(storeRoot.toString());
  try {
    db = JniDBFactory.factory.open(dbfile, options);
  } catch (NativeDB.DBException e) {
    if (e.isNotFound() || e.getMessage().contains(" does not exist ")) {
      LOG.info("Creating state database at " + dbfile);
      options.createIfMissing(true);
      try {
        db = JniDBFactory.factory.open(dbfile, options);
        // store version
        storeVersion();
      } catch (DBException dbErr) {
        throw new IOException(dbErr.getMessage(), dbErr);
      }
    } else {
      throw e;
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:LeveldbRMStateStore.java

示例4: testBulkOutputWithTsvImporterTextMapper

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test
public void testBulkOutputWithTsvImporterTextMapper() throws Exception {
  String table = "test-" + UUID.randomUUID();
  String FAMILY = "FAM";
  Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table),"hfiles");
  // Prepare the arguments required for the test.
  String[] args =
      new String[] {
          "-D" + ImportTsv.MAPPER_CONF_KEY
              + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper",
          "-D" + ImportTsv.COLUMNS_CONF_KEY
              + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
          "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
          "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), table
          };
  String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n";
  doMROnTableTest(util, FAMILY, data, args, 4);
  util.deleteTable(table);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:TestImportTSVWithVisibilityLabels.java

示例5: removeResourceFromCacheFileSystem

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private boolean removeResourceFromCacheFileSystem(Path path)
    throws IOException {
  // rename the directory to make the delete atomic
  Path renamedPath = new Path(path.toString() + RENAMED_SUFFIX);
  if (fs.rename(path, renamedPath)) {
    // the directory can be removed safely now
    // log the original path
    LOG.info("Deleting " + path.toString());
    return fs.delete(renamedPath, true);
  } else {
    // we were unable to remove it for some reason: it's best to leave
    // it at that
    LOG.error("We were not able to rename the directory to "
        + renamedPath.toString() + ". We will leave it intact.");
  }
  return false;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:CleanerTask.java

示例6: create

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * @param permission Currently ignored.
 */
@Override
public FSDataOutputStream create(Path file, FsPermission permission,
    boolean overwrite, int bufferSize,
    short replication, long blockSize, Progressable progress)
  throws IOException {

  INode inode = store.retrieveINode(makeAbsolute(file));
  if (inode != null) {
    if (overwrite) {
      delete(file, true);
    } else {
      throw new FileAlreadyExistsException("File already exists: " + file);
    }
  } else {
    Path parent = file.getParent();
    if (parent != null) {
      if (!mkdirs(parent)) {
        throw new IOException("Mkdirs failed to create " + parent.toString());
      }
    }      
  }
  return new FSDataOutputStream
      (new S3OutputStream(getConf(), store, makeAbsolute(file),
                          blockSize, progress, bufferSize),
       statistics);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:S3FileSystem.java

示例7: resolveLink

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Override
protected Path resolveLink(Path f) throws IOException {
  statistics.incrementReadOps(1);
  String target = dfs.getLinkTarget(getPathName(fixRelativePart(f)));
  if (target == null) {
    throw new FileNotFoundException("File does not exist: " + f.toString());
  }
  return new Path(target);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:DistributedFileSystem.java

示例8: ConcatOp

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
ConcatOp(DFSClient client, Path target, int numSrc) {
  super("concat", client);
  this.target = target.toString();
  this.srcs = new String[numSrc];
  this.srcPaths = new Path[numSrc];
  Path parent = target.getParent();
  for (int i = 0; i < numSrc; i++) {
    srcPaths[i] = new Path(parent, "srcfile" + i);
    srcs[i] = srcPaths[i].toString();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:12,代码来源:TestRetryCacheWithHA.java

示例9: getTableInfoSequenceId

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * @param p Path to a <code>.tableinfo</code> file.
 * @return The current editid or 0 if none found.
 */
@VisibleForTesting static int getTableInfoSequenceId(final Path p) {
  if (p == null) return 0;
  Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName());
  if (!m.matches()) throw new IllegalArgumentException(p.toString());
  String suffix = m.group(2);
  if (suffix == null || suffix.length() <= 0) return 0;
  return Integer.parseInt(m.group(2));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:FSTableDescriptors.java

示例10: testMROnTableWithBulkload

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
@Test
public void testMROnTableWithBulkload() throws Exception {
  String tableName = "test-" + UUID.randomUUID();
  Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName), "hfiles");
  // Prepare the arguments required for the test.
  String[] args = new String[] {
      "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
      "-D" + ImportTsv.COLUMNS_CONF_KEY
          + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY",
      "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName };
  String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n";
  util.createTable(TableName.valueOf(tableName), FAMILY);
  doMROnTableTest(util, FAMILY, data, args, 1);
  util.deleteTable(tableName);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:16,代码来源:TestImportTSVWithVisibilityLabels.java

示例11: testToString

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public void testToString() throws IOException {
  out=new BufferedWriter(new FileWriter(CONFIG));
  startConfig();
  endConfig();
  Path fileResource = new Path(CONFIG);
  conf.addResource(fileResource);
  
  String expectedOutput = 
    "Configuration: core-default.xml, core-site.xml, " + 
    fileResource.toString();
  assertEquals(expectedOutput, conf.toString());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestConfiguration.java

示例12: map

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
/**
 * {@inheritDoc}
 */
protected void map(final Object key, final OrcStruct value, final Context context) throws IOException, InterruptedException {
    if (value!= null && value.toString() != null && value.toString().isEmpty()) {
        return;
    }

    // Mapper sends data with parent directory path as keys to retain directory structure
    final FileSplit fileSplit = (FileSplit) context.getInputSplit();
    final Path filePath = fileSplit.getPath();
    final String parentFilePath = String.format("%s/", filePath.getParent().toString());
    log.debug("Parent file path {}", parentFilePath);

    if (!fileSizesMap.containsKey(filePath.toString())) {
        if (fileSystem == null){
            final URI uri = URI.create(filePath.toString());
            fileSystem = FileSystem.get(uri, configuration);
        }
        final FileStatus[] listStatuses = fileSystem.listStatus(filePath);
        for (FileStatus fileStatus : listStatuses) {
            if (!fileStatus.isDirectory()) {
                fileSizesMap.put(fileStatus.getPath().toString(), fileStatus.getLen());
                log.info("Entry added to fileSizes Map {} {}", fileStatus.getPath().toString(), fileStatus.getLen());
            }
        }
    }

    final Text parentFilePathKey = new Text(parentFilePath);
    final Text filePathKey = new Text(filePath.toString());
    final OrcValue orcValue = new OrcValue();
    orcValue.value = value;


    final Long fileSize = fileSizesMap.get(filePath.toString());

    if (fileSize < threshold) {
        context.write(parentFilePathKey, orcValue);
    } else {
        context.write(filePathKey, orcValue);
    }
}
 
开发者ID:ExpediaInceCommercePlatform,项目名称:dataSqueeze,代码行数:43,代码来源:OrcCompactionMapper.java

示例13: getOutputStreamWriter

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private OutputStreamWriter getOutputStreamWriter(Path srcFilePath,
    String fileName) throws IOException, FileNotFoundException,
    UnsupportedEncodingException {
  File dir = new File(srcFilePath.toString());
  if (!dir.exists()) {
    if (!dir.mkdirs()) {
      throw new IOException("Unable to create directory : " + dir);
    }
  }
  File outputFile = new File(new File(srcFilePath.toString()), fileName);
  FileOutputStream os = new FileOutputStream(outputFile);
  OutputStreamWriter osw = new OutputStreamWriter(os, "UTF8");
  return osw;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestAggregatedLogFormat.java

示例14: runTeraGen

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
private void runTeraGen(Configuration conf, Path sortInput)
    throws Exception {
  String[] genArgs = {NUM_ROWS, sortInput.toString()};

  // Run TeraGen
  assertEquals(ToolRunner.run(conf, new TeraGen(), genArgs), 0);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:8,代码来源:TestTeraSort.java

示例15: configure

import org.apache.hadoop.fs.Path; //导入方法依赖的package包/类
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:67,代码来源:TestKeyFieldBasedComparator.java


注:本文中的org.apache.hadoop.fs.Path.toString方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。