当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.makeQualified方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.makeQualified方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.makeQualified方法的具体用法?Java FileSystem.makeQualified怎么用?Java FileSystem.makeQualified使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.makeQualified方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testMidKey

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testMidKey() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
      IntWritable.class, IntWritable.class);
    writer.append(new IntWritable(1), new IntWritable(1));
    writer.close();
    // Now do getClosest on created mapfile.
    reader = new MapFile.Reader(qualifiedDirName, conf);
    assertEquals(new IntWritable(1), reader.midKey());
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:23,代码来源:TestMapFile.java

示例2: testMidKeyEmpty

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = new MapFile.Writer(conf, fs,
      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
  writer.close();
  // Now do getClosest on created mapfile.
  MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
  try {
    assertEquals(null, reader.midKey()); 
  } finally {
    reader.close();
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:20,代码来源:TestMapFile.java

示例3: createSnapshot

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private Path createSnapshot() throws IOException {
  LOG.debug("Source table {}.{} has its data located at {}", sourceTable.getDbName(), sourceTable.getTableName(),
      sourceDataPath);

  FileSystem fileSystem = fileSystemFactory.get(sourceDataPath, sourceHiveConf);
  Path snapshotMetaDataPath = new Path(sourceDataPath, HdfsConstants.DOT_SNAPSHOT_DIR);
  Path resolvedLocation = sourceDataPath;
  if (fileSystem.exists(snapshotMetaDataPath)) {
    if (snapshotsDisabled) {
      LOG.info("Path {} can be snapshot, but feature has been disabled.", sourceDataPath);
    } else {
      LOG.debug("Creating source data snapshot: {}, {}", sourceDataPath, eventId);
      // fileSystem.createSnapshot does not return a fully qualified URI.
      resolvedLocation = fileSystem.makeQualified(fileSystem.createSnapshot(sourceDataPath, eventId));
      snapshotPath = resolvedLocation;
    }
  } else {
    LOG.debug("Snapshots not enabled on source location: {}", sourceDataPath);
  }
  return resolvedLocation;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:22,代码来源:HdfsSnapshotLocationManager.java

示例4: isPublic

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Returns a boolean to denote whether a cache file is visible to all (public)
 * or not
 *
 * @return true if the path in the current path is visible to all, false
 * otherwise
 */
@Private
public static boolean isPublic(FileSystem fs, Path current, FileStatus sStat,
    LoadingCache<Path,Future<FileStatus>> statCache) throws IOException {
  current = fs.makeQualified(current);
  //the leaf level file should be readable by others
  if (!checkPublicPermsForAll(fs, sStat, FsAction.READ_EXECUTE, FsAction.READ)) {
    return false;
  }

  if (Shell.WINDOWS && fs instanceof LocalFileSystem) {
    // Relax the requirement for public cache on LFS on Windows since default
    // permissions are "700" all the way up to the drive letter. In this
    // model, the only requirement for a user is to give EVERYONE group
    // permission on the file and the file will be considered public.
    // This code path is only hit when fs.default.name is file:/// (mainly
    // in tests).
    return true;
  }
  return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:FSDownload.java

示例5: setUp

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();
  
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
  
  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();
  
  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 
  
  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:TestBinaryTokenFile.java

示例6: checkOutputSpecs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);
    
    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);
    
    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:FileOutputFormat.java

示例7: FileOutputCommitter

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Create a file output committer
 * @param outputPath the job's output path, or null if you want the output
 * committer to act as a noop.
 * @param context the task's context
 * @throws IOException
 */
@Private
public FileOutputCommitter(Path outputPath, 
                           JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();
  algorithmVersion =
      conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
                  FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
  LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
  if (algorithmVersion != 1 && algorithmVersion != 2) {
    throw new IOException("Only 1 or 2 algorithm version is supported");
  }
  if (outputPath != null) {
    FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
    this.outputPath = fs.makeQualified(outputPath);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:24,代码来源:FileOutputCommitter.java

示例8: processGeneralOptions

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Modify configuration according user-specified generic options
 * @param conf Configuration to be modified
 * @param line User-specified generic options
 */
private void processGeneralOptions(Configuration conf,
    CommandLine line) throws IOException {
  if (line.hasOption("fs")) {
    FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
  }

  if (line.hasOption("jt")) {
    String optionValue = line.getOptionValue("jt");
    if (optionValue.equalsIgnoreCase("local")) {
      conf.set("mapreduce.framework.name", optionValue);
    }

    conf.set("yarn.resourcemanager.address", optionValue, 
        "from -jt command line option");
  }
  if (line.hasOption("conf")) {
    String[] values = line.getOptionValues("conf");
    for(String value : values) {
      conf.addResource(new Path(value));
    }
  }

  if (line.hasOption('D')) {
    String[] property = line.getOptionValues('D');
    for(String prop : property) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1], "from command line");
      }
    }
  }

  if (line.hasOption("libjars")) {
    conf.set("tmpjars", 
             validateFiles(line.getOptionValue("libjars"), conf),
             "from -libjars command line option");
    //setting libjars in client classpath
    URL[] libjars = getLibJars(conf);
    if(libjars!=null && libjars.length>0) {
      conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
      Thread.currentThread().setContextClassLoader(
          new URLClassLoader(libjars, 
              Thread.currentThread().getContextClassLoader()));
    }
  }
  if (line.hasOption("files")) {
    conf.set("tmpfiles", 
             validateFiles(line.getOptionValue("files"), conf),
             "from -files command line option");
  }
  if (line.hasOption("archives")) {
    conf.set("tmparchives", 
              validateFiles(line.getOptionValue("archives"), conf),
              "from -archives command line option");
  }
  conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
  
  // tokensFile
  if(line.hasOption("tokenCacheFile")) {
    String fileName = line.getOptionValue("tokenCacheFile");
    // check if the local file exists
    FileSystem localFs = FileSystem.getLocal(conf);
    Path p = localFs.makeQualified(new Path(fileName));
    if (!localFs.exists(p)) {
        throw new FileNotFoundException("File "+fileName+" does not exist.");
    }
    if(LOG.isDebugEnabled()) {
      LOG.debug("setting conf tokensFile: " + fileName);
    }
    UserGroupInformation.getCurrentUser().addCredentials(
        Credentials.readTokenStorageFile(p, conf));
    conf.set("mapreduce.job.credentials.binary", p.toString(),
             "from -tokenCacheFile command line option");

  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:82,代码来源:GenericOptionsParser.java

示例9: doMROnTableTest

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data,
    String[] args, int valueMultiplier) throws Exception {
  TableName table = TableName.valueOf(args[args.length - 1]);
  Configuration conf = new Configuration(util.getConfiguration());

  // populate input file
  FileSystem fs = FileSystem.get(conf);
  Path inputPath = fs.makeQualified(new Path(util
      .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat"));
  FSDataOutputStream op = fs.create(inputPath, true);
  op.write(Bytes.toBytes(data));
  op.close();
  LOG.debug(String.format("Wrote test data to file: %s", inputPath));

  if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
    LOG.debug("Forcing combiner.");
    conf.setInt("mapreduce.map.combine.minspills", 1);
  }

  // run the import
  List<String> argv = new ArrayList<String>(Arrays.asList(args));
  argv.add(inputPath.toString());
  Tool tool = new ImportTsv();
  LOG.debug("Running ImportTsv with arguments: " + argv);
  try {
    // Job will fail if observer rejects entries without TTL
    assertEquals(0, ToolRunner.run(conf, tool, argv.toArray(args)));
  } finally {
    // Clean up
    if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) {
      LOG.debug("Deleting test subdirectory");
      util.cleanupDataTestDirOnTestFS(table.getNameAsString());
    }
  }

  return tool;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestImportTSVWithTTLs.java

示例10: testMiniDFSCluster

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Test public void testMiniDFSCluster() throws Exception {
  HBaseTestingUtility hbt = new HBaseTestingUtility();
  MiniDFSCluster cluster = hbt.startMiniDFSCluster(null);
  FileSystem dfs = cluster.getFileSystem();
  Path dir = new Path("dir");
  Path qualifiedDir = dfs.makeQualified(dir);
  LOG.info("dir=" + dir + ", qualifiedDir=" + qualifiedDir);
  assertFalse(dfs.exists(qualifiedDir));
  assertTrue(dfs.mkdirs(qualifiedDir));
  assertTrue(dfs.delete(qualifiedDir, true));
  hbt.shutdownMiniCluster();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestHBaseTestingUtility.java

示例11: joinFSPaths

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * @return paths from {@link FileStatus}es into one comma-separated String
 * @see FileInputFormat#addInputPath(org.apache.hadoop.mapreduce.Job, Path)
 */
private static String joinFSPaths(FileSystem fs, FileStatus[] statuses) {
  StringBuilder joined = new StringBuilder();
  for (FileStatus status : statuses) {
    if (joined.length() > 0) {
      joined.append(',');
    }
    Path path = fs.makeQualified(status.getPath());
    joined.append(StringUtils.escapeString(path.toString()));
  }
  return joined.toString();
}
 
开发者ID:oncewang,项目名称:oryx2,代码行数:16,代码来源:BatchUpdateFunction.java

示例12: testGetClosest

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test getClosest feature.
 * 
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testGetClosest() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);
  // Make an index entry for every third insertion.
  MapFile.Writer.setIndexInterval(conf, 3);
  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
      Text.class, Text.class);
    // Assert that the index interval is 1
    assertEquals(3, writer.getIndexInterval());
    // Add entries up to 100 in intervals of ten.
    final int FIRST_KEY = 10;
    for (int i = FIRST_KEY; i < 100; i += 10) {
      String iStr = Integer.toString(i);
      Text t = new Text("00".substring(iStr.length()) + iStr);
      writer.append(t, t);
    }
    writer.close();
    // Now do getClosest on created mapfile.
    reader = new MapFile.Reader(qualifiedDirName, conf);
    Text key = new Text("55");
    Text value = new Text();
    Text closest = (Text) reader.getClosest(key, value);
    // Assert that closest after 55 is 60
    assertEquals(new Text("60"), closest);
    // Get closest that falls before the passed key: 50
    closest = (Text) reader.getClosest(key, value, true);
    assertEquals(new Text("50"), closest);
    // Test get closest when we pass explicit key
    final Text TWENTY = new Text("20");
    closest = (Text) reader.getClosest(TWENTY, value);
    assertEquals(TWENTY, closest);
    closest = (Text) reader.getClosest(TWENTY, value, true);
    assertEquals(TWENTY, closest);
    // Test what happens at boundaries. Assert if searching a key that is
    // less than first key in the mapfile, that the first key is returned.
    key = new Text("00");
    closest = (Text) reader.getClosest(key, value);
    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));

    // If we're looking for the first key before, and we pass in a key before
    // the first key in the file, we should get null
    closest = (Text) reader.getClosest(key, value, true);
    assertNull(closest);

    // Assert that null is returned if key is > last entry in mapfile.
    key = new Text("99");
    closest = (Text) reader.getClosest(key, value);
    assertNull(closest);

    // If we were looking for the key before, we should get the last key
    closest = (Text) reader.getClosest(key, value, true);
    assertEquals(new Text("90"), closest);
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:69,代码来源:TestMapFile.java

示例13: testTokenCacheOption

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * testing -fileCache option
 * @throws IOException
 */
public void testTokenCacheOption() throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);
  
  File tmpFile = new File(testDir, "tokenCacheFile");
  if(tmpFile.exists()) {
    tmpFile.delete();
  }
  String[] args = new String[2];
  // pass a files option 
  args[0] = "-tokenCacheFile";
  args[1] = tmpFile.toURI().toString();
  
  // test non existing file
  Throwable th = null;
  try {
    new GenericOptionsParser(conf, args);
  } catch (Exception e) {
    th = e;
  }
  assertNotNull(th);
  assertTrue("FileNotFoundException is not thrown",
      th instanceof FileNotFoundException);
  
  // create file
  Path tmpPath = localFs.makeQualified(new Path(tmpFile.toString()));
  Token<?> token = new Token<AbstractDelegationTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(),
      new Text("token-kind"), new Text("token-service"));
  Credentials creds = new Credentials();
  creds.addToken(new Text("token-alias"), token);
  creds.writeTokenStorageFile(tmpPath, conf);

  new GenericOptionsParser(conf, args);
  String fileName = conf.get("mapreduce.job.credentials.binary");
  assertNotNull("files is null", fileName);
  assertEquals("files option does not match", tmpPath.toString(), fileName);
  
  Credentials ugiCreds =
      UserGroupInformation.getCurrentUser().getCredentials();
  assertEquals(1, ugiCreds.numberOfTokens());
  Token<?> ugiToken = ugiCreds.getToken(new Text("token-alias"));
  assertNotNull(ugiToken);
  assertEquals(token, ugiToken);
  
  localFs.delete(new Path(testDir.getAbsolutePath()), true);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:51,代码来源:TestGenericOptionsParser.java

示例14: getTestRootPath

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
@Override
public Path getTestRootPath(FileSystem fSys, String pathString) {
  return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:TestNativeAzureFileSystemOperationsMocked.java

示例15: compute

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/** Run a map/reduce job to compute Pi. */
private static void compute(int startDigit, int nDigits, int nMaps,
    String workingDir, Configuration conf, PrintStream out
    ) throws IOException {
  final String name = startDigit + "_" + nDigits;

  //setup wroking directory
  out.println("Working Directory = " + workingDir);
  out.println();
  final FileSystem fs = FileSystem.get(conf);
  final Path dir = fs.makeQualified(new Path(workingDir));
  if (fs.exists(dir)) {
    throw new IOException("Working directory " + dir
        + " already exists.  Please remove it first.");
  } else if (!fs.mkdirs(dir)) {
    throw new IOException("Cannot create working directory " + dir);
  }

  out.println("Start Digit      = " + startDigit);
  out.println("Number of Digits = " + nDigits);
  out.println("Number of Maps   = " + nMaps);

  // setup a job
  final Job job = createJob(name, conf);
  final Path hexfile = new Path(dir, "pi_" + name + ".hex");
  FileOutputFormat.setOutputPath(job, new Path(dir, "out"));

  // setup custom properties
  job.getConfiguration().set(WORKING_DIR_PROPERTY, dir.toString());
  job.getConfiguration().set(HEX_FILE_PROPERTY, hexfile.toString());

  job.getConfiguration().setInt(DIGIT_START_PROPERTY, startDigit);
  job.getConfiguration().setInt(DIGIT_SIZE_PROPERTY, nDigits);
  job.getConfiguration().setInt(DIGIT_PARTS_PROPERTY, nMaps);

  // start a map/reduce job
  out.println("\nStarting Job ...");
  final long startTime = System.currentTimeMillis();
  try {
    if (!job.waitForCompletion(true)) {
      out.println("Job failed.");
      System.exit(1);
    }
  } catch (Exception e) {
    throw new RuntimeException(e);
  } finally {
    final double duration = (System.currentTimeMillis() - startTime)/1000.0;
    out.println("Duration is " + duration + " seconds.");
  }
  out.println("Output file: " + hexfile);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:BaileyBorweinPlouffe.java


注:本文中的org.apache.hadoop.fs.FileSystem.makeQualified方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。